code
stringlengths
0
23.9M
/* * Copyright 2022 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ /* FILE POLICY AND INTENDED USAGE: * This file implements retrieval and configuration of eDP panel features such * as PSR and ABM and it also manages specs defined eDP panel power sequences. */ #include "link_edp_panel_control.h" #include "link_dpcd.h" #include "link_dp_capability.h" #include "dm_helpers.h" #include "dal_asic_id.h" #include "link_dp_phy.h" #include "dce/dmub_psr.h" #include "dc/dc_dmub_srv.h" #include "dce/dmub_replay.h" #include "abm.h" #include "resource.h" #define DC_LOGGER \ link->ctx->logger #define DC_LOGGER_INIT(logger) #define DP_SINK_PR_ENABLE_AND_CONFIGURATION 0x37B /* Travis */ static const uint8_t DP_VGA_LVDS_CONVERTER_ID_2[] = "sivarT"; /* Nutmeg */ static const uint8_t DP_VGA_LVDS_CONVERTER_ID_3[] = "dnomlA"; void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode) { union dpcd_edp_config edp_config_set; bool panel_mode_edp = false; enum dc_status result; memset(&edp_config_set, '\0', sizeof(union dpcd_edp_config)); switch (panel_mode) { case DP_PANEL_MODE_EDP: case DP_PANEL_MODE_SPECIAL: panel_mode_edp = true; break; default: break; } /*set edp panel mode in receiver*/ result = core_link_read_dpcd( link, DP_EDP_CONFIGURATION_SET, &edp_config_set.raw, sizeof(edp_config_set.raw)); if (result == DC_OK && edp_config_set.bits.PANEL_MODE_EDP != panel_mode_edp) { edp_config_set.bits.PANEL_MODE_EDP = panel_mode_edp; result = core_link_write_dpcd( link, DP_EDP_CONFIGURATION_SET, &edp_config_set.raw, sizeof(edp_config_set.raw)); ASSERT(result == DC_OK); } link->panel_mode = panel_mode; DC_LOG_DETECTION_DP_CAPS("Link: %d eDP panel mode supported: %d " "eDP panel mode enabled: %d \n", link->link_index, link->dpcd_caps.panel_mode_edp, panel_mode_edp); } enum dp_panel_mode dp_get_panel_mode(struct dc_link *link) { /* We need to explicitly check that connector * is not DP. Some Travis_VGA get reported * by video bios as DP. */ if (link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) { switch (link->dpcd_caps.branch_dev_id) { case DP_BRANCH_DEVICE_ID_0022B9: /* alternate scrambler reset is required for Travis * for the case when external chip does not * provide sink device id, alternate scrambler * scheme will be overriden later by querying * Encoder features */ if (strncmp( link->dpcd_caps.branch_dev_name, DP_VGA_LVDS_CONVERTER_ID_2, sizeof( link->dpcd_caps. branch_dev_name)) == 0) { return DP_PANEL_MODE_SPECIAL; } break; case DP_BRANCH_DEVICE_ID_00001A: /* alternate scrambler reset is required for Travis * for the case when external chip does not provide * sink device id, alternate scrambler scheme will * be overriden later by querying Encoder feature */ if (strncmp(link->dpcd_caps.branch_dev_name, DP_VGA_LVDS_CONVERTER_ID_3, sizeof( link->dpcd_caps. branch_dev_name)) == 0) { return DP_PANEL_MODE_SPECIAL; } break; default: break; } } if (link->dpcd_caps.panel_mode_edp && (link->connector_signal == SIGNAL_TYPE_EDP || (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT && link->is_internal_display))) { return DP_PANEL_MODE_EDP; } return DP_PANEL_MODE_DEFAULT; } bool edp_set_backlight_level_nits(struct dc_link *link, bool isHDR, uint32_t backlight_millinits, uint32_t transition_time_in_ms) { if (!link || (link->connector_signal != SIGNAL_TYPE_EDP && link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) return false; // use internal backlight control if dmub capabilities are not present if (link->backlight_control_type == BACKLIGHT_CONTROL_VESA_AUX && !link->dc->caps.dmub_caps.aux_backlight_support) { uint8_t backlight_enable = 0; struct target_luminance_value *target_luminance = NULL; //if target luminance value is greater than 24 bits, clip the value to 24 bits if (backlight_millinits > 0xFFFFFF) backlight_millinits = 0xFFFFFF; target_luminance = (struct target_luminance_value *)&backlight_millinits; core_link_read_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &backlight_enable, sizeof(uint8_t)); backlight_enable |= DP_EDP_PANEL_LUMINANCE_CONTROL_ENABLE; if (core_link_write_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &backlight_enable, sizeof(backlight_enable)) != DC_OK) return false; if (core_link_write_dpcd(link, DP_EDP_PANEL_TARGET_LUMINANCE_VALUE, (uint8_t *)(target_luminance), sizeof(struct target_luminance_value)) != DC_OK) return false; } else if (link->backlight_control_type == BACKLIGHT_CONTROL_AMD_AUX) { struct dpcd_source_backlight_set dpcd_backlight_set; *(uint32_t *)&dpcd_backlight_set.backlight_level_millinits = backlight_millinits; *(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms; uint8_t backlight_control = isHDR ? 1 : 0; // OLEDs have no PWM, they can only use AUX if (link->dpcd_sink_ext_caps.bits.oled == 1) backlight_control = 1; if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL, (uint8_t *)(&dpcd_backlight_set), sizeof(dpcd_backlight_set)) != DC_OK) return false; if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL, &backlight_control, 1) != DC_OK) return false; } return true; } bool edp_get_backlight_level_nits(struct dc_link *link, uint32_t *backlight_millinits_avg, uint32_t *backlight_millinits_peak) { union dpcd_source_backlight_get dpcd_backlight_get; memset(&dpcd_backlight_get, 0, sizeof(union dpcd_source_backlight_get)); if (!link || (link->connector_signal != SIGNAL_TYPE_EDP && link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) return false; if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_CURRENT_PEAK, dpcd_backlight_get.raw, sizeof(union dpcd_source_backlight_get))) return false; *backlight_millinits_avg = dpcd_backlight_get.bytes.backlight_millinits_avg; *backlight_millinits_peak = dpcd_backlight_get.bytes.backlight_millinits_peak; /* On non-supported panels dpcd_read usually succeeds with 0 returned */ if (*backlight_millinits_avg == 0 || *backlight_millinits_avg > *backlight_millinits_peak) return false; return true; } bool edp_backlight_enable_aux(struct dc_link *link, bool enable) { uint8_t backlight_enable = enable ? 1 : 0; if (!link || (link->connector_signal != SIGNAL_TYPE_EDP && link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) return false; if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_ENABLE, &backlight_enable, 1) != DC_OK) return false; return true; } // we read default from 0x320 because we expect BIOS wrote it there // regular get_backlight_nit reads from panel set at 0x326 static bool read_default_bl_aux(struct dc_link *link, uint32_t *backlight_millinits) { if (!link || (link->connector_signal != SIGNAL_TYPE_EDP && link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) return false; if (!link->dpcd_caps.panel_luminance_control) { if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL, (uint8_t *)backlight_millinits, sizeof(uint32_t))) return false; } else { //setting to 0 as a precaution, since target_luminance_value is 3 bytes memset(backlight_millinits, 0, sizeof(uint32_t)); if (!core_link_read_dpcd(link, DP_EDP_PANEL_TARGET_LUMINANCE_VALUE, (uint8_t *)backlight_millinits, sizeof(struct target_luminance_value))) return false; } return true; } bool set_default_brightness_aux(struct dc_link *link) { uint32_t default_backlight; if (link && link->dpcd_sink_ext_caps.bits.oled == 1) { if (!read_default_bl_aux(link, &default_backlight)) default_backlight = 150000; // if > 5000, it might be wrong readback. 0 nits is a valid default value for OLED panel. if (default_backlight < 1000 || default_backlight > 5000000) default_backlight = 150000; return edp_set_backlight_level_nits(link, true, default_backlight, 0); } return false; } bool edp_is_ilr_optimization_enabled(struct dc_link *link) { if (link->dpcd_caps.edp_supported_link_rates_count == 0 || !link->panel_config.ilr.optimize_edp_link_rate) return false; return true; } enum dc_link_rate get_max_edp_link_rate(struct dc_link *link) { enum dc_link_rate max_ilr_rate = LINK_RATE_UNKNOWN; enum dc_link_rate max_non_ilr_rate = dp_get_max_link_cap(link).link_rate; for (int i = 0; i < link->dpcd_caps.edp_supported_link_rates_count; i++) { if (max_ilr_rate < link->dpcd_caps.edp_supported_link_rates[i]) max_ilr_rate = link->dpcd_caps.edp_supported_link_rates[i]; } return (max_ilr_rate > max_non_ilr_rate ? max_ilr_rate : max_non_ilr_rate); } bool edp_is_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timing *crtc_timing) { struct dc_link_settings link_setting; uint8_t link_bw_set = 0; uint8_t link_rate_set = 0; uint32_t req_bw; union lane_count_set lane_count_set = {0}; ASSERT(link || crtc_timing); // invalid input if (!edp_is_ilr_optimization_enabled(link)) return false; // Read DPCD 00100h to find if standard link rates are set core_link_read_dpcd(link, DP_LINK_BW_SET, &link_bw_set, sizeof(link_bw_set)); if (link_bw_set) { DC_LOG_EVENT_LINK_TRAINING("eDP ILR: Optimization required, VBIOS used link_bw_set\n"); return true; } // Read DPCD 00115h to find the edp link rate set used core_link_read_dpcd(link, DP_LINK_RATE_SET, &link_rate_set, sizeof(link_rate_set)); // Read DPCD 00101h to find out the number of lanes currently set core_link_read_dpcd(link, DP_LANE_COUNT_SET, &lane_count_set.raw, sizeof(lane_count_set)); req_bw = dc_bandwidth_in_kbps_from_timing(crtc_timing, dc_link_get_highest_encoding_format(link)); if (!crtc_timing->flags.DSC) edp_decide_link_settings(link, &link_setting, req_bw); else decide_edp_link_settings_with_dsc(link, &link_setting, req_bw, LINK_RATE_UNKNOWN); if (link->dpcd_caps.edp_supported_link_rates[link_rate_set] != link_setting.link_rate || lane_count_set.bits.LANE_COUNT_SET != link_setting.lane_count) { DC_LOG_EVENT_LINK_TRAINING("eDP ILR: Optimization required, VBIOS link_rate_set not optimal\n"); return true; } DC_LOG_EVENT_LINK_TRAINING("eDP ILR: No optimization required, VBIOS set optimal link_rate_set\n"); return false; } void edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd) { if (link->connector_signal != SIGNAL_TYPE_EDP) return; link->dc->hwss.edp_power_control(link, true); if (wait_for_hpd) link->dc->hwss.edp_wait_for_hpd_ready(link, true); if (link->dc->hwss.edp_backlight_control) link->dc->hwss.edp_backlight_control(link, true); } void edp_set_panel_power(struct dc_link *link, bool powerOn) { if (powerOn) { // 1. panel VDD on if (!link->dc->config.edp_no_power_sequencing) link->dc->hwss.edp_power_control(link, true); link->dc->hwss.edp_wait_for_hpd_ready(link, true); // 2. panel BL on if (link->dc->hwss.edp_backlight_control) link->dc->hwss.edp_backlight_control(link, true); // 3. Rx power on dpcd_write_rx_power_ctrl(link, true); } else { // 3. Rx power off dpcd_write_rx_power_ctrl(link, false); // 2. panel BL off if (link->dc->hwss.edp_backlight_control) link->dc->hwss.edp_backlight_control(link, false); // 1. panel VDD off if (!link->dc->config.edp_no_power_sequencing) link->dc->hwss.edp_power_control(link, false); } } bool edp_wait_for_t12(struct dc_link *link) { if (link->connector_signal == SIGNAL_TYPE_EDP && link->dc->hwss.edp_wait_for_T12) { link->dc->hwss.edp_wait_for_T12(link); return true; } return false; } void edp_add_delay_for_T9(struct dc_link *link) { if (link && link->panel_config.pps.extra_delay_backlight_off > 0) fsleep(link->panel_config.pps.extra_delay_backlight_off * 1000); } bool edp_receiver_ready_T9(struct dc_link *link) { unsigned int tries = 0; unsigned char sinkstatus = 0; unsigned char edpRev = 0; enum dc_status result = DC_OK; result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev)); /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/ if (result == DC_OK && edpRev >= DP_EDP_12) { do { sinkstatus = 1; result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus)); if (sinkstatus == 0) break; if (result != DC_OK) break; udelay(100); //MAx T9 } while (++tries < 50); } return result; } bool edp_receiver_ready_T7(struct dc_link *link) { unsigned char sinkstatus = 0; unsigned char edpRev = 0; enum dc_status result = DC_OK; /* use absolute time stamp to constrain max T7*/ unsigned long long enter_timestamp = 0; unsigned long long finish_timestamp = 0; unsigned long long time_taken_in_ns = 0; result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev)); if (result == DC_OK && edpRev >= DP_EDP_12) { /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/ enter_timestamp = dm_get_timestamp(link->ctx); do { sinkstatus = 0; result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus)); if (sinkstatus == 1) break; if (result != DC_OK) break; udelay(25); finish_timestamp = dm_get_timestamp(link->ctx); time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp, enter_timestamp); } while (time_taken_in_ns < 50 * 1000000); //MAx T7 is 50ms } if (link && link->panel_config.pps.extra_t7_ms > 0) fsleep(link->panel_config.pps.extra_t7_ms * 1000); return result; } bool edp_power_alpm_dpcd_enable(struct dc_link *link, bool enable) { bool ret = false; union dpcd_alpm_configuration alpm_config; if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) { memset(&alpm_config, 0, sizeof(alpm_config)); alpm_config.bits.ENABLE = (enable ? true : false); ret = dm_helpers_dp_write_dpcd(link->ctx, link, DP_RECEIVER_ALPM_CONFIG, &alpm_config.raw, sizeof(alpm_config.raw)); } return ret; } static struct pipe_ctx *get_pipe_from_link(const struct dc_link *link) { int i; struct dc *dc = link->ctx->dc; struct pipe_ctx *pipe_ctx = NULL; for (i = 0; i < MAX_PIPES; i++) { if (dc->current_state->res_ctx.pipe_ctx[i].stream) { if (dc->current_state->res_ctx.pipe_ctx[i].stream->link == link) { pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; break; } } } return pipe_ctx; } bool edp_set_backlight_level(const struct dc_link *link, struct set_backlight_level_params *backlight_level_params) { struct dc *dc = link->ctx->dc; uint32_t backlight_pwm_u16_16 = backlight_level_params->backlight_pwm_u16_16; uint32_t frame_ramp = backlight_level_params->frame_ramp; DC_LOGGER_INIT(link->ctx->logger); DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", backlight_pwm_u16_16, backlight_pwm_u16_16); if (dc_is_embedded_signal(link->connector_signal)) { struct pipe_ctx *pipe_ctx = get_pipe_from_link(link); if (link->panel_cntl) link->panel_cntl->stored_backlight_registers.USER_LEVEL = backlight_pwm_u16_16; if (pipe_ctx) { /* Disable brightness ramping when the display is blanked * as it can hang the DMCU */ if (pipe_ctx->plane_state == NULL) frame_ramp = 0; } else { return false; } backlight_level_params->frame_ramp = frame_ramp; dc->hwss.set_backlight_level( pipe_ctx, backlight_level_params); } return true; } bool edp_set_psr_allow_active(struct dc_link *link, const bool *allow_active, bool wait, bool force_static, const unsigned int *power_opts) { struct dc *dc = link->ctx->dc; struct dmcu *dmcu = dc->res_pool->dmcu; struct dmub_psr *psr = dc->res_pool->psr; unsigned int panel_inst; if (psr == NULL && force_static) return false; if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) return false; if ((allow_active != NULL) && (*allow_active == true) && (link->type == dc_connection_none)) { // Don't enter PSR if panel is not connected return false; } /* Set power optimization flag */ if (power_opts && link->psr_settings.psr_power_opt != *power_opts) { link->psr_settings.psr_power_opt = *power_opts; if (psr != NULL && link->psr_settings.psr_feature_enabled && psr->funcs->psr_set_power_opt) psr->funcs->psr_set_power_opt(psr, link->psr_settings.psr_power_opt, panel_inst); } if (psr != NULL && link->psr_settings.psr_feature_enabled && force_static && psr->funcs->psr_force_static) psr->funcs->psr_force_static(psr, panel_inst); /* Enable or Disable PSR */ if (allow_active && link->psr_settings.psr_allow_active != *allow_active) { link->psr_settings.psr_allow_active = *allow_active; if (!link->psr_settings.psr_allow_active) dc_z10_restore(dc); if (psr != NULL && link->psr_settings.psr_feature_enabled) { psr->funcs->psr_enable(psr, link->psr_settings.psr_allow_active, wait, panel_inst); } else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_settings.psr_feature_enabled) dmcu->funcs->set_psr_enable(dmcu, link->psr_settings.psr_allow_active, wait); else return false; } return true; } bool edp_get_psr_state(const struct dc_link *link, enum dc_psr_state *state) { struct dc *dc = link->ctx->dc; struct dmcu *dmcu = dc->res_pool->dmcu; struct dmub_psr *psr = dc->res_pool->psr; unsigned int panel_inst; if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) return false; if (psr != NULL && link->psr_settings.psr_feature_enabled) psr->funcs->psr_get_state(psr, state, panel_inst); else if (dmcu != NULL && link->psr_settings.psr_feature_enabled) dmcu->funcs->get_psr_state(dmcu, state); return true; } static inline enum physical_phy_id transmitter_to_phy_id(struct dc_link *link) { struct dc_context *dc_ctx = link->ctx; enum transmitter transmitter_value = link->link_enc->transmitter; switch (transmitter_value) { case TRANSMITTER_UNIPHY_A: return PHYLD_0; case TRANSMITTER_UNIPHY_B: return PHYLD_1; case TRANSMITTER_UNIPHY_C: return PHYLD_2; case TRANSMITTER_UNIPHY_D: return PHYLD_3; case TRANSMITTER_UNIPHY_E: return PHYLD_4; case TRANSMITTER_UNIPHY_F: return PHYLD_5; case TRANSMITTER_NUTMEG_CRT: return PHYLD_6; case TRANSMITTER_TRAVIS_CRT: return PHYLD_7; case TRANSMITTER_TRAVIS_LCD: return PHYLD_8; case TRANSMITTER_UNIPHY_G: return PHYLD_9; case TRANSMITTER_COUNT: return PHYLD_COUNT; case TRANSMITTER_UNKNOWN: return PHYLD_UNKNOWN; default: DC_ERROR("Unknown transmitter value %d\n", transmitter_value); return PHYLD_UNKNOWN; } } bool edp_setup_psr(struct dc_link *link, const struct dc_stream_state *stream, struct psr_config *psr_config, struct psr_context *psr_context) { struct dc *dc; struct dmcu *dmcu; struct dmub_psr *psr; int i; unsigned int panel_inst; /* updateSinkPsrDpcdConfig*/ union dpcd_psr_configuration psr_configuration; union dpcd_sink_active_vtotal_control_mode vtotal_control = {0}; psr_context->controllerId = CONTROLLER_ID_UNDEFINED; if (!link) return false; dc = link->ctx->dc; dmcu = dc->res_pool->dmcu; psr = dc->res_pool->psr; if (!dmcu && !psr) return false; if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) return false; memset(&psr_configuration, 0, sizeof(psr_configuration)); psr_configuration.bits.ENABLE = 1; psr_configuration.bits.CRC_VERIFICATION = 1; psr_configuration.bits.FRAME_CAPTURE_INDICATION = psr_config->psr_frame_capture_indication_req; /* Check for PSR v2*/ if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) { /* For PSR v2 selective update. * Indicates whether sink should start capturing * immediately following active scan line, * or starting with the 2nd active scan line. */ psr_configuration.bits.LINE_CAPTURE_INDICATION = 0; /*For PSR v2, determines whether Sink should generate * IRQ_HPD when CRC mismatch is detected. */ psr_configuration.bits.IRQ_HPD_WITH_CRC_ERROR = 1; /* For PSR v2, set the bit when the Source device will * be enabling PSR2 operation. */ psr_configuration.bits.ENABLE_PSR2 = 1; /* For PSR v2, the Sink device must be able to receive * SU region updates early in the frame time. */ psr_configuration.bits.EARLY_TRANSPORT_ENABLE = 1; } dm_helpers_dp_write_dpcd( link->ctx, link, 368, &psr_configuration.raw, sizeof(psr_configuration.raw)); if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) { edp_power_alpm_dpcd_enable(link, true); psr_context->su_granularity_required = psr_config->su_granularity_required; psr_context->su_y_granularity = psr_config->su_y_granularity; psr_context->line_time_in_us = psr_config->line_time_in_us; /* linux must be able to expose AMD Source DPCD definition * in order to support FreeSync PSR */ if (link->psr_settings.psr_vtotal_control_support) { psr_context->rate_control_caps = psr_config->rate_control_caps; vtotal_control.bits.ENABLE = true; core_link_write_dpcd(link, DP_SINK_PSR_ACTIVE_VTOTAL_CONTROL_MODE, &vtotal_control.raw, sizeof(vtotal_control.raw)); } } psr_context->channel = link->ddc->ddc_pin->hw_info.ddc_channel; psr_context->transmitterId = link->link_enc->transmitter; psr_context->engineId = link->link_enc->preferred_engine; for (i = 0; i < MAX_PIPES; i++) { if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) { /* dmcu -1 for all controller id values, * therefore +1 here */ psr_context->controllerId = dc->current_state->res_ctx. pipe_ctx[i].stream_res.tg->inst + 1; break; } } /* Hardcoded for now. Can be Pcie or Uniphy (or Unknown)*/ psr_context->phyType = PHY_TYPE_UNIPHY; /*PhyId is associated with the transmitter id*/ psr_context->smuPhyId = transmitter_to_phy_id(link); psr_context->crtcTimingVerticalTotal = stream->timing.v_total; psr_context->vsync_rate_hz = div64_u64(div64_u64((stream-> timing.pix_clk_100hz * (u64)100), stream->timing.v_total), stream->timing.h_total); psr_context->psrSupportedDisplayConfig = true; psr_context->psrExitLinkTrainingRequired = psr_config->psr_exit_link_training_required; psr_context->sdpTransmitLineNumDeadline = psr_config->psr_sdp_transmit_line_num_deadline; psr_context->psrFrameCaptureIndicationReq = psr_config->psr_frame_capture_indication_req; psr_context->skipPsrWaitForPllLock = 0; /* only = 1 in KV */ psr_context->numberOfControllers = link->dc->res_pool->timing_generator_count; psr_context->rfb_update_auto_en = true; /* 2 frames before enter PSR. */ psr_context->timehyst_frames = 2; /* half a frame * (units in 100 lines, i.e. a value of 1 represents 100 lines) */ psr_context->hyst_lines = stream->timing.v_total / 2 / 100; psr_context->aux_repeats = 10; psr_context->psr_level.u32all = 0; /*skip power down the single pipe since it blocks the cstate*/ if (link->ctx->asic_id.chip_family >= FAMILY_RV) { switch (link->ctx->asic_id.chip_family) { case FAMILY_YELLOW_CARP: case AMDGPU_FAMILY_GC_10_3_6: case AMDGPU_FAMILY_GC_11_0_1: if (dc->debug.disable_z10 || dc->debug.psr_skip_crtc_disable) psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true; break; default: psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true; break; } } /* SMU will perform additional powerdown sequence. * For unsupported ASICs, set psr_level flag to skip PSR * static screen notification to SMU. * (Always set for DAL2, did not check ASIC) */ psr_context->allow_smu_optimizations = psr_config->allow_smu_optimizations; psr_context->allow_multi_disp_optimizations = psr_config->allow_multi_disp_optimizations; /* Complete PSR entry before aborting to prevent intermittent * freezes on certain eDPs */ psr_context->psr_level.bits.DISABLE_PSR_ENTRY_ABORT = 1; /* Disable ALPM first for compatible non-ALPM panel now */ psr_context->psr_level.bits.DISABLE_ALPM = 0; psr_context->psr_level.bits.ALPM_DEFAULT_PD_MODE = 1; /* Controls additional delay after remote frame capture before * continuing power down, default = 0 */ psr_context->frame_delay = 0; psr_context->dsc_slice_height = psr_config->dsc_slice_height; if (psr) { link->psr_settings.psr_feature_enabled = psr->funcs->psr_copy_settings(psr, link, psr_context, panel_inst); link->psr_settings.psr_power_opt = 0; link->psr_settings.psr_allow_active = 0; } else { link->psr_settings.psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context); } /* psr_enabled == 0 indicates setup_psr did not succeed, but this * should not happen since firmware should be running at this point */ if (link->psr_settings.psr_feature_enabled == 0) ASSERT(0); return true; } void edp_get_psr_residency(const struct dc_link *link, uint32_t *residency, enum psr_residency_mode mode) { struct dc *dc = link->ctx->dc; struct dmub_psr *psr = dc->res_pool->psr; unsigned int panel_inst; if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) return; // PSR residency measurements only supported on DMCUB if (psr != NULL && link->psr_settings.psr_feature_enabled) psr->funcs->psr_get_residency(psr, residency, panel_inst, mode); else *residency = 0; } bool edp_set_sink_vtotal_in_psr_active(const struct dc_link *link, uint16_t psr_vtotal_idle, uint16_t psr_vtotal_su) { struct dc *dc = link->ctx->dc; struct dmub_psr *psr = dc->res_pool->psr; if (psr == NULL || !link->psr_settings.psr_feature_enabled || !link->psr_settings.psr_vtotal_control_support) return false; psr->funcs->psr_set_sink_vtotal_in_psr_active(psr, psr_vtotal_idle, psr_vtotal_su); return true; } bool edp_set_replay_allow_active(struct dc_link *link, const bool *allow_active, bool wait, bool force_static, const unsigned int *power_opts) { struct dc *dc = link->ctx->dc; struct dmub_replay *replay = dc->res_pool->replay; unsigned int panel_inst; if (replay == NULL && force_static) return false; if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) return false; /* Set power optimization flag */ if (power_opts && link->replay_settings.replay_power_opt_active != *power_opts) { if (replay != NULL && link->replay_settings.replay_feature_enabled && replay->funcs->replay_set_power_opt) { replay->funcs->replay_set_power_opt(replay, *power_opts, panel_inst); link->replay_settings.replay_power_opt_active = *power_opts; } } /* Activate or deactivate Replay */ if (allow_active && link->replay_settings.replay_allow_active != *allow_active) { // TODO: Handle mux change case if force_static is set // If force_static is set, just change the replay_allow_active state directly if (replay != NULL && link->replay_settings.replay_feature_enabled) replay->funcs->replay_enable(replay, *allow_active, wait, panel_inst); link->replay_settings.replay_allow_active = *allow_active; } return true; } bool edp_get_replay_state(const struct dc_link *link, uint64_t *state) { struct dc *dc = link->ctx->dc; struct dmub_replay *replay = dc->res_pool->replay; unsigned int panel_inst; enum replay_state pr_state = REPLAY_STATE_0; if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) return false; if (replay != NULL && link->replay_settings.replay_feature_enabled) replay->funcs->replay_get_state(replay, &pr_state, panel_inst); *state = pr_state; return true; } bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream) { /* To-do: Setup Replay */ struct dc *dc; struct dmub_replay *replay; int i; unsigned int panel_inst; struct replay_context replay_context = { 0 }; unsigned int lineTimeInNs = 0; union replay_enable_and_configuration replay_config = { 0 }; union dpcd_alpm_configuration alpm_config; replay_context.controllerId = CONTROLLER_ID_UNDEFINED; if (!link) return false; dc = link->ctx->dc; replay = dc->res_pool->replay; if (!replay) return false; if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) return false; replay_context.aux_inst = link->ddc->ddc_pin->hw_info.ddc_channel; replay_context.digbe_inst = link->link_enc->transmitter; replay_context.digfe_inst = link->link_enc->preferred_engine; for (i = 0; i < MAX_PIPES; i++) { if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) { /* dmcu -1 for all controller id values, * therefore +1 here */ replay_context.controllerId = dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg->inst + 1; break; } } lineTimeInNs = ((stream->timing.h_total * 1000000) / (stream->timing.pix_clk_100hz / 10)) + 1; replay_context.line_time_in_ns = lineTimeInNs; link->replay_settings.replay_feature_enabled = replay->funcs->replay_copy_settings(replay, link, &replay_context, panel_inst); if (link->replay_settings.replay_feature_enabled) { replay_config.bits.FREESYNC_PANEL_REPLAY_MODE = 1; replay_config.bits.TIMING_DESYNC_ERROR_VERIFICATION = link->replay_settings.config.replay_timing_sync_supported; replay_config.bits.STATE_TRANSITION_ERROR_DETECTION = 1; dm_helpers_dp_write_dpcd(link->ctx, link, DP_SINK_PR_ENABLE_AND_CONFIGURATION, (uint8_t *)&(replay_config.raw), sizeof(uint8_t)); memset(&alpm_config, 0, sizeof(alpm_config)); alpm_config.bits.ENABLE = 1; dm_helpers_dp_write_dpcd( link->ctx, link, DP_RECEIVER_ALPM_CONFIG, &alpm_config.raw, sizeof(alpm_config.raw)); } return true; } /* * This is general Interface for Replay to set an 32 bit variable to dmub * replay_FW_Message_type: Indicates which instruction or variable pass to DMUB * cmd_data: Value of the config. */ bool edp_send_replay_cmd(struct dc_link *link, enum replay_FW_Message_type msg, union dmub_replay_cmd_set *cmd_data) { struct dc *dc = link->ctx->dc; struct dmub_replay *replay = dc->res_pool->replay; unsigned int panel_inst; if (!replay) return false; DC_LOGGER_INIT(link->ctx->logger); if (dc_get_edp_link_panel_inst(dc, link, &panel_inst)) cmd_data->panel_inst = panel_inst; else { DC_LOG_DC("%s(): get edp panel inst fail ", __func__); return false; } replay->funcs->replay_send_cmd(replay, msg, cmd_data); return true; } bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal) { struct dc *dc = link->ctx->dc; struct dmub_replay *replay = dc->res_pool->replay; unsigned int panel_inst; if (!replay) return false; if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) return false; if (coasting_vtotal && link->replay_settings.coasting_vtotal != coasting_vtotal) { replay->funcs->replay_set_coasting_vtotal(replay, coasting_vtotal, panel_inst); link->replay_settings.coasting_vtotal = coasting_vtotal; } return true; } bool edp_replay_residency(const struct dc_link *link, unsigned int *residency, const bool is_start, const enum pr_residency_mode mode) { struct dc *dc = link->ctx->dc; struct dmub_replay *replay = dc->res_pool->replay; unsigned int panel_inst; if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) return false; if (!residency) return false; if (replay != NULL && link->replay_settings.replay_feature_enabled) replay->funcs->replay_residency(replay, panel_inst, residency, is_start, mode); else *residency = 0; return true; } bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link, const unsigned int *power_opts, uint32_t coasting_vtotal) { struct dc *dc = link->ctx->dc; struct dmub_replay *replay = dc->res_pool->replay; unsigned int panel_inst; if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) return false; /* Only both power and coasting vtotal changed, this func could return true */ if (power_opts && link->replay_settings.replay_power_opt_active != *power_opts && coasting_vtotal && link->replay_settings.coasting_vtotal != coasting_vtotal) { if (link->replay_settings.replay_feature_enabled && replay->funcs->replay_set_power_opt_and_coasting_vtotal) { replay->funcs->replay_set_power_opt_and_coasting_vtotal(replay, *power_opts, panel_inst, coasting_vtotal); link->replay_settings.replay_power_opt_active = *power_opts; link->replay_settings.coasting_vtotal = coasting_vtotal; } else return false; } else return false; return true; } static struct abm *get_abm_from_stream_res(const struct dc_link *link) { int i; struct dc *dc = link->ctx->dc; struct abm *abm = NULL; for (i = 0; i < MAX_PIPES; i++) { struct pipe_ctx pipe_ctx = dc->current_state->res_ctx.pipe_ctx[i]; struct dc_stream_state *stream = pipe_ctx.stream; if (stream && stream->link == link) { abm = pipe_ctx.stream_res.abm; break; } } return abm; } int edp_get_backlight_level(const struct dc_link *link) { struct abm *abm = get_abm_from_stream_res(link); struct panel_cntl *panel_cntl = link->panel_cntl; struct dc *dc = link->ctx->dc; struct dmcu *dmcu = dc->res_pool->dmcu; bool fw_set_brightness = true; if (dmcu) fw_set_brightness = dmcu->funcs->is_dmcu_initialized(dmcu); if (!fw_set_brightness && panel_cntl->funcs->get_current_backlight) return panel_cntl->funcs->get_current_backlight(panel_cntl); else if (abm != NULL && abm->funcs->get_current_backlight != NULL) return (int) abm->funcs->get_current_backlight(abm); else return DC_ERROR_UNEXPECTED; } int edp_get_target_backlight_pwm(const struct dc_link *link) { struct abm *abm = get_abm_from_stream_res(link); if (abm == NULL || abm->funcs->get_target_backlight == NULL) return DC_ERROR_UNEXPECTED; return (int) abm->funcs->get_target_backlight(abm); } static void edp_set_assr_enable(const struct dc *pDC, struct dc_link *link, struct link_resource *link_res, bool enable) { union dmub_rb_cmd cmd; bool use_hpo_dp_link_enc = false; uint8_t link_enc_index = 0; uint8_t phy_type = 0; uint8_t phy_id = 0; if (!pDC->config.use_assr_psp_message) return; memset(&cmd, 0, sizeof(cmd)); link_enc_index = link->link_enc->transmitter - TRANSMITTER_UNIPHY_A; if (link_res->hpo_dp_link_enc) { link_enc_index = link_res->hpo_dp_link_enc->inst; use_hpo_dp_link_enc = true; } if (enable) phy_type = ((dp_get_panel_mode(link) == DP_PANEL_MODE_EDP) ? 1 : 0); phy_id = resource_transmitter_to_phy_idx(pDC, link->link_enc->transmitter); cmd.assr_enable.header.type = DMUB_CMD__PSP; cmd.assr_enable.header.sub_type = DMUB_CMD__PSP_ASSR_ENABLE; cmd.assr_enable.assr_data.enable = enable; cmd.assr_enable.assr_data.phy_port_type = phy_type; cmd.assr_enable.assr_data.phy_port_id = phy_id; cmd.assr_enable.assr_data.link_enc_index = link_enc_index; cmd.assr_enable.assr_data.hpo_mode = use_hpo_dp_link_enc; dc_wake_and_execute_dmub_cmd(pDC->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } void edp_set_panel_assr(struct dc_link *link, struct pipe_ctx *pipe_ctx, enum dp_panel_mode *panel_mode, bool enable) { struct link_resource *link_res = &pipe_ctx->link_res; struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp; if (*panel_mode != DP_PANEL_MODE_EDP) return; if (link->dc->config.use_assr_psp_message) { edp_set_assr_enable(link->dc, link, link_res, enable); } else if (cp_psp && cp_psp->funcs.enable_assr && enable) { /* ASSR is bound to fail with unsigned PSP * verstage used during devlopment phase. * Report and continue with eDP panel mode to * perform eDP link training with right settings */ bool result; result = cp_psp->funcs.enable_assr(cp_psp->handle, link); if (!result && link->panel_mode != DP_PANEL_MODE_EDP) *panel_mode = DP_PANEL_MODE_DEFAULT; } }
/* * Copyright 2013 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs <[email protected]> */ #include "priv.h" struct nvkm_hwsq { struct nvkm_subdev *subdev; u32 addr; u32 data; struct { u8 data[512]; u16 size; } c; }; static void hwsq_cmd(struct nvkm_hwsq *hwsq, int size, u8 data[]) { memcpy(&hwsq->c.data[hwsq->c.size], data, size * sizeof(data[0])); hwsq->c.size += size; } int nvkm_hwsq_init(struct nvkm_subdev *subdev, struct nvkm_hwsq **phwsq) { struct nvkm_hwsq *hwsq; hwsq = *phwsq = kmalloc(sizeof(*hwsq), GFP_KERNEL); if (hwsq) { hwsq->subdev = subdev; hwsq->addr = ~0; hwsq->data = ~0; memset(hwsq->c.data, 0x7f, sizeof(hwsq->c.data)); hwsq->c.size = 0; } return hwsq ? 0 : -ENOMEM; } int nvkm_hwsq_fini(struct nvkm_hwsq **phwsq, bool exec) { struct nvkm_hwsq *hwsq = *phwsq; int ret = 0, i; if (hwsq) { struct nvkm_subdev *subdev = hwsq->subdev; struct nvkm_bus *bus = subdev->device->bus; hwsq->c.size = (hwsq->c.size + 4) / 4; if (hwsq->c.size <= bus->func->hwsq_size) { if (exec) ret = bus->func->hwsq_exec(bus, (u32 *)hwsq->c.data, hwsq->c.size); if (ret) nvkm_error(subdev, "hwsq exec failed: %d\n", ret); } else { nvkm_error(subdev, "hwsq ucode too large\n"); ret = -ENOSPC; } for (i = 0; ret && i < hwsq->c.size; i++) nvkm_error(subdev, "\t%08x\n", ((u32 *)hwsq->c.data)[i]); *phwsq = NULL; kfree(hwsq); } return ret; } void nvkm_hwsq_wr32(struct nvkm_hwsq *hwsq, u32 addr, u32 data) { nvkm_debug(hwsq->subdev, "R[%06x] = %08x\n", addr, data); if (hwsq->data != data) { if ((data & 0xffff0000) != (hwsq->data & 0xffff0000)) { hwsq_cmd(hwsq, 5, (u8[]){ 0xe2, data, data >> 8, data >> 16, data >> 24 }); } else { hwsq_cmd(hwsq, 3, (u8[]){ 0x42, data, data >> 8 }); } } if ((addr & 0xffff0000) != (hwsq->addr & 0xffff0000)) { hwsq_cmd(hwsq, 5, (u8[]){ 0xe0, addr, addr >> 8, addr >> 16, addr >> 24 }); } else { hwsq_cmd(hwsq, 3, (u8[]){ 0x40, addr, addr >> 8 }); } hwsq->addr = addr; hwsq->data = data; } void nvkm_hwsq_setf(struct nvkm_hwsq *hwsq, u8 flag, int data) { nvkm_debug(hwsq->subdev, " FLAG[%02x] = %d\n", flag, data); flag += 0x80; if (data >= 0) flag += 0x20; if (data >= 1) flag += 0x20; hwsq_cmd(hwsq, 1, (u8[]){ flag }); } void nvkm_hwsq_wait(struct nvkm_hwsq *hwsq, u8 flag, u8 data) { nvkm_debug(hwsq->subdev, " WAIT[%02x] = %d\n", flag, data); hwsq_cmd(hwsq, 3, (u8[]){ 0x5f, flag, data }); } void nvkm_hwsq_wait_vblank(struct nvkm_hwsq *hwsq) { struct nvkm_subdev *subdev = hwsq->subdev; struct nvkm_device *device = subdev->device; u32 heads, x, y, px = 0; int i, head_sync; heads = nvkm_rd32(device, 0x610050); for (i = 0; i < 2; i++) { /* Heuristic: sync to head with biggest resolution */ if (heads & (2 << (i << 3))) { x = nvkm_rd32(device, 0x610b40 + (0x540 * i)); y = (x & 0xffff0000) >> 16; x &= 0x0000ffff; if ((x * y) > px) { px = (x * y); head_sync = i; } } } if (px == 0) { nvkm_debug(subdev, "WAIT VBLANK !NO ACTIVE HEAD\n"); return; } nvkm_debug(subdev, "WAIT VBLANK HEAD%d\n", head_sync); nvkm_hwsq_wait(hwsq, head_sync ? 0x3 : 0x1, 0x0); nvkm_hwsq_wait(hwsq, head_sync ? 0x3 : 0x1, 0x1); } void nvkm_hwsq_nsec(struct nvkm_hwsq *hwsq, u32 nsec) { u8 shift = 0, usec = nsec / 1000; while (usec & ~3) { usec >>= 2; shift++; } nvkm_debug(hwsq->subdev, " DELAY = %d ns\n", nsec); hwsq_cmd(hwsq, 1, (u8[]){ 0x00 | (shift << 2) | usec }); }
/* SPDX-License-Identifier: GPL-2.0 */ /* * ppc_cbe_cpufreq.h * * This file contains the definitions used by the cbe_cpufreq driver. * * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007 * * Author: Christian Krafft <[email protected]> * */ #include <linux/cpufreq.h> #include <linux/types.h> int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode); int cbe_cpufreq_get_pmode(int cpu); int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode); #if IS_ENABLED(CONFIG_CPU_FREQ_CBE_PMI) extern bool cbe_cpufreq_has_pmi; void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy); void cbe_cpufreq_pmi_policy_exit(struct cpufreq_policy *policy); void cbe_cpufreq_pmi_init(void); void cbe_cpufreq_pmi_exit(void); #else #define cbe_cpufreq_has_pmi (0) static inline void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy) {} static inline void cbe_cpufreq_pmi_policy_exit(struct cpufreq_policy *policy) {} static inline void cbe_cpufreq_pmi_init(void) {} static inline void cbe_cpufreq_pmi_exit(void) {} #endif
// SPDX-License-Identifier: GPL-2.0 #include <linux/bpf.h> #include <bpf/bpf_helpers.h> #define MAX_STACK_RAWTP 10 SEC("raw_tracepoint/sys_enter") int bpf_prog2(void *ctx) { __u64 stack[MAX_STACK_RAWTP]; int error; /* set all the flags which should return -EINVAL */ error = bpf_get_stack(ctx, stack, 0, -1); if (error < 0) goto loop; return error; loop: while (1) { error++; } } char _license[] SEC("license") = "GPL";
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __NET_GENETLINK_H #define __NET_GENETLINK_H #include <linux/wait.h> /* for synchronisation between af_netlink and genetlink */ extern atomic_t genl_sk_destructing_cnt; extern wait_queue_head_t genl_sk_destructing_waitq; #endif /* __LINUX_GENERIC_NETLINK_H */
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2017 Pengutronix, Jan Luebbe <[email protected]> */ #include <linux/kernel.h> #include <linux/edac.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/platform_device.h> #include <asm/hardware/cache-l2x0.h> #include <asm/hardware/cache-aurora-l2.h> #include "edac_mc.h" #include "edac_device.h" #include "edac_module.h" /************************ EDAC MC (DDR RAM) ********************************/ #define SDRAM_NUM_CS 4 #define SDRAM_CONFIG_REG 0x0 #define SDRAM_CONFIG_ECC_MASK BIT(18) #define SDRAM_CONFIG_REGISTERED_MASK BIT(17) #define SDRAM_CONFIG_BUS_WIDTH_MASK BIT(15) #define SDRAM_ADDR_CTRL_REG 0x10 #define SDRAM_ADDR_CTRL_SIZE_HIGH_OFFSET(cs) (20+cs) #define SDRAM_ADDR_CTRL_SIZE_HIGH_MASK(cs) (0x1 << SDRAM_ADDR_CTRL_SIZE_HIGH_OFFSET(cs)) #define SDRAM_ADDR_CTRL_ADDR_SEL_MASK(cs) BIT(16+cs) #define SDRAM_ADDR_CTRL_SIZE_LOW_OFFSET(cs) (cs*4+2) #define SDRAM_ADDR_CTRL_SIZE_LOW_MASK(cs) (0x3 << SDRAM_ADDR_CTRL_SIZE_LOW_OFFSET(cs)) #define SDRAM_ADDR_CTRL_STRUCT_OFFSET(cs) (cs*4) #define SDRAM_ADDR_CTRL_STRUCT_MASK(cs) (0x3 << SDRAM_ADDR_CTRL_STRUCT_OFFSET(cs)) #define SDRAM_ERR_DATA_H_REG 0x40 #define SDRAM_ERR_DATA_L_REG 0x44 #define SDRAM_ERR_RECV_ECC_REG 0x48 #define SDRAM_ERR_RECV_ECC_VALUE_MASK 0xff #define SDRAM_ERR_CALC_ECC_REG 0x4c #define SDRAM_ERR_CALC_ECC_ROW_OFFSET 8 #define SDRAM_ERR_CALC_ECC_ROW_MASK (0xffff << SDRAM_ERR_CALC_ECC_ROW_OFFSET) #define SDRAM_ERR_CALC_ECC_VALUE_MASK 0xff #define SDRAM_ERR_ADDR_REG 0x50 #define SDRAM_ERR_ADDR_BANK_OFFSET 23 #define SDRAM_ERR_ADDR_BANK_MASK (0x7 << SDRAM_ERR_ADDR_BANK_OFFSET) #define SDRAM_ERR_ADDR_COL_OFFSET 8 #define SDRAM_ERR_ADDR_COL_MASK (0x7fff << SDRAM_ERR_ADDR_COL_OFFSET) #define SDRAM_ERR_ADDR_CS_OFFSET 1 #define SDRAM_ERR_ADDR_CS_MASK (0x3 << SDRAM_ERR_ADDR_CS_OFFSET) #define SDRAM_ERR_ADDR_TYPE_MASK BIT(0) #define SDRAM_ERR_CTRL_REG 0x54 #define SDRAM_ERR_CTRL_THR_OFFSET 16 #define SDRAM_ERR_CTRL_THR_MASK (0xff << SDRAM_ERR_CTRL_THR_OFFSET) #define SDRAM_ERR_CTRL_PROP_MASK BIT(9) #define SDRAM_ERR_SBE_COUNT_REG 0x58 #define SDRAM_ERR_DBE_COUNT_REG 0x5c #define SDRAM_ERR_CAUSE_ERR_REG 0xd0 #define SDRAM_ERR_CAUSE_MSG_REG 0xd8 #define SDRAM_ERR_CAUSE_DBE_MASK BIT(1) #define SDRAM_ERR_CAUSE_SBE_MASK BIT(0) #define SDRAM_RANK_CTRL_REG 0x1e0 #define SDRAM_RANK_CTRL_EXIST_MASK(cs) BIT(cs) struct axp_mc_drvdata { void __iomem *base; /* width in bytes */ unsigned int width; /* bank interleaving */ bool cs_addr_sel[SDRAM_NUM_CS]; char msg[128]; }; /* derived from "DRAM Address Multiplexing" in the ARMADA XP Functional Spec */ static uint32_t axp_mc_calc_address(struct axp_mc_drvdata *drvdata, uint8_t cs, uint8_t bank, uint16_t row, uint16_t col) { if (drvdata->width == 8) { /* 64 bit */ if (drvdata->cs_addr_sel[cs]) /* bank interleaved */ return (((row & 0xfff8) << 16) | ((bank & 0x7) << 16) | ((row & 0x7) << 13) | ((col & 0x3ff) << 3)); else return (((row & 0xffff << 16) | ((bank & 0x7) << 13) | ((col & 0x3ff)) << 3)); } else if (drvdata->width == 4) { /* 32 bit */ if (drvdata->cs_addr_sel[cs]) /* bank interleaved */ return (((row & 0xfff0) << 15) | ((bank & 0x7) << 16) | ((row & 0xf) << 12) | ((col & 0x3ff) << 2)); else return (((row & 0xffff << 15) | ((bank & 0x7) << 12) | ((col & 0x3ff)) << 2)); } else { /* 16 bit */ if (drvdata->cs_addr_sel[cs]) /* bank interleaved */ return (((row & 0xffe0) << 14) | ((bank & 0x7) << 16) | ((row & 0x1f) << 11) | ((col & 0x3ff) << 1)); else return (((row & 0xffff << 14) | ((bank & 0x7) << 11) | ((col & 0x3ff)) << 1)); } } static void axp_mc_check(struct mem_ctl_info *mci) { struct axp_mc_drvdata *drvdata = mci->pvt_info; uint32_t data_h, data_l, recv_ecc, calc_ecc, addr; uint32_t cnt_sbe, cnt_dbe, cause_err, cause_msg; uint32_t row_val, col_val, bank_val, addr_val; uint8_t syndrome_val, cs_val; char *msg = drvdata->msg; data_h = readl(drvdata->base + SDRAM_ERR_DATA_H_REG); data_l = readl(drvdata->base + SDRAM_ERR_DATA_L_REG); recv_ecc = readl(drvdata->base + SDRAM_ERR_RECV_ECC_REG); calc_ecc = readl(drvdata->base + SDRAM_ERR_CALC_ECC_REG); addr = readl(drvdata->base + SDRAM_ERR_ADDR_REG); cnt_sbe = readl(drvdata->base + SDRAM_ERR_SBE_COUNT_REG); cnt_dbe = readl(drvdata->base + SDRAM_ERR_DBE_COUNT_REG); cause_err = readl(drvdata->base + SDRAM_ERR_CAUSE_ERR_REG); cause_msg = readl(drvdata->base + SDRAM_ERR_CAUSE_MSG_REG); /* clear cause registers */ writel(~(SDRAM_ERR_CAUSE_DBE_MASK | SDRAM_ERR_CAUSE_SBE_MASK), drvdata->base + SDRAM_ERR_CAUSE_ERR_REG); writel(~(SDRAM_ERR_CAUSE_DBE_MASK | SDRAM_ERR_CAUSE_SBE_MASK), drvdata->base + SDRAM_ERR_CAUSE_MSG_REG); /* clear error counter registers */ if (cnt_sbe) writel(0, drvdata->base + SDRAM_ERR_SBE_COUNT_REG); if (cnt_dbe) writel(0, drvdata->base + SDRAM_ERR_DBE_COUNT_REG); if (!cnt_sbe && !cnt_dbe) return; if (!(addr & SDRAM_ERR_ADDR_TYPE_MASK)) { if (cnt_sbe) cnt_sbe--; else dev_warn(mci->pdev, "inconsistent SBE count detected\n"); } else { if (cnt_dbe) cnt_dbe--; else dev_warn(mci->pdev, "inconsistent DBE count detected\n"); } /* report earlier errors */ if (cnt_sbe) edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, cnt_sbe, /* error count */ 0, 0, 0, /* pfn, offset, syndrome */ -1, -1, -1, /* top, mid, low layer */ mci->ctl_name, "details unavailable (multiple errors)"); if (cnt_dbe) edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, cnt_dbe, /* error count */ 0, 0, 0, /* pfn, offset, syndrome */ -1, -1, -1, /* top, mid, low layer */ mci->ctl_name, "details unavailable (multiple errors)"); /* report details for most recent error */ cs_val = (addr & SDRAM_ERR_ADDR_CS_MASK) >> SDRAM_ERR_ADDR_CS_OFFSET; bank_val = (addr & SDRAM_ERR_ADDR_BANK_MASK) >> SDRAM_ERR_ADDR_BANK_OFFSET; row_val = (calc_ecc & SDRAM_ERR_CALC_ECC_ROW_MASK) >> SDRAM_ERR_CALC_ECC_ROW_OFFSET; col_val = (addr & SDRAM_ERR_ADDR_COL_MASK) >> SDRAM_ERR_ADDR_COL_OFFSET; syndrome_val = (recv_ecc ^ calc_ecc) & 0xff; addr_val = axp_mc_calc_address(drvdata, cs_val, bank_val, row_val, col_val); msg += sprintf(msg, "row=0x%04x ", row_val); /* 11 chars */ msg += sprintf(msg, "bank=0x%x ", bank_val); /* 9 chars */ msg += sprintf(msg, "col=0x%04x ", col_val); /* 11 chars */ msg += sprintf(msg, "cs=%d", cs_val); /* 4 chars */ if (!(addr & SDRAM_ERR_ADDR_TYPE_MASK)) { edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, /* error count */ addr_val >> PAGE_SHIFT, addr_val & ~PAGE_MASK, syndrome_val, cs_val, -1, -1, /* top, mid, low layer */ mci->ctl_name, drvdata->msg); } else { edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, /* error count */ addr_val >> PAGE_SHIFT, addr_val & ~PAGE_MASK, syndrome_val, cs_val, -1, -1, /* top, mid, low layer */ mci->ctl_name, drvdata->msg); } } static void axp_mc_read_config(struct mem_ctl_info *mci) { struct axp_mc_drvdata *drvdata = mci->pvt_info; uint32_t config, addr_ctrl, rank_ctrl; unsigned int i, cs_struct, cs_size; struct dimm_info *dimm; config = readl(drvdata->base + SDRAM_CONFIG_REG); if (config & SDRAM_CONFIG_BUS_WIDTH_MASK) /* 64 bit */ drvdata->width = 8; else /* 32 bit */ drvdata->width = 4; addr_ctrl = readl(drvdata->base + SDRAM_ADDR_CTRL_REG); rank_ctrl = readl(drvdata->base + SDRAM_RANK_CTRL_REG); for (i = 0; i < SDRAM_NUM_CS; i++) { dimm = mci->dimms[i]; if (!(rank_ctrl & SDRAM_RANK_CTRL_EXIST_MASK(i))) continue; drvdata->cs_addr_sel[i] = !!(addr_ctrl & SDRAM_ADDR_CTRL_ADDR_SEL_MASK(i)); cs_struct = (addr_ctrl & SDRAM_ADDR_CTRL_STRUCT_MASK(i)) >> SDRAM_ADDR_CTRL_STRUCT_OFFSET(i); cs_size = ((addr_ctrl & SDRAM_ADDR_CTRL_SIZE_HIGH_MASK(i)) >> (SDRAM_ADDR_CTRL_SIZE_HIGH_OFFSET(i) - 2) | ((addr_ctrl & SDRAM_ADDR_CTRL_SIZE_LOW_MASK(i)) >> SDRAM_ADDR_CTRL_SIZE_LOW_OFFSET(i))); switch (cs_size) { case 0: /* 2GBit */ dimm->nr_pages = 524288; break; case 1: /* 256MBit */ dimm->nr_pages = 65536; break; case 2: /* 512MBit */ dimm->nr_pages = 131072; break; case 3: /* 1GBit */ dimm->nr_pages = 262144; break; case 4: /* 4GBit */ dimm->nr_pages = 1048576; break; case 5: /* 8GBit */ dimm->nr_pages = 2097152; break; } dimm->grain = 8; dimm->dtype = cs_struct ? DEV_X16 : DEV_X8; dimm->mtype = (config & SDRAM_CONFIG_REGISTERED_MASK) ? MEM_RDDR3 : MEM_DDR3; dimm->edac_mode = EDAC_SECDED; } } static const struct of_device_id axp_mc_of_match[] = { {.compatible = "marvell,armada-xp-sdram-controller",}, {}, }; MODULE_DEVICE_TABLE(of, axp_mc_of_match); static int axp_mc_probe(struct platform_device *pdev) { struct axp_mc_drvdata *drvdata; struct edac_mc_layer layers[1]; const struct of_device_id *id; struct mem_ctl_info *mci; void __iomem *base; uint32_t config; base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) { dev_err(&pdev->dev, "Unable to map regs\n"); return PTR_ERR(base); } config = readl(base + SDRAM_CONFIG_REG); if (!(config & SDRAM_CONFIG_ECC_MASK)) { dev_warn(&pdev->dev, "SDRAM ECC is not enabled\n"); return -EINVAL; } layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; layers[0].size = SDRAM_NUM_CS; layers[0].is_virt_csrow = true; mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*drvdata)); if (!mci) return -ENOMEM; drvdata = mci->pvt_info; drvdata->base = base; mci->pdev = &pdev->dev; platform_set_drvdata(pdev, mci); id = of_match_device(axp_mc_of_match, &pdev->dev); mci->edac_check = axp_mc_check; mci->mtype_cap = MEM_FLAG_DDR3; mci->edac_cap = EDAC_FLAG_SECDED; mci->mod_name = pdev->dev.driver->name; mci->ctl_name = id ? id->compatible : "unknown"; mci->dev_name = dev_name(&pdev->dev); mci->scrub_mode = SCRUB_NONE; axp_mc_read_config(mci); /* These SoCs have a reduced width bus */ if (of_machine_is_compatible("marvell,armada380") || of_machine_is_compatible("marvell,armadaxp-98dx3236")) drvdata->width /= 2; /* configure SBE threshold */ /* it seems that SBEs are not captured otherwise */ writel(1 << SDRAM_ERR_CTRL_THR_OFFSET, drvdata->base + SDRAM_ERR_CTRL_REG); /* clear cause registers */ writel(~(SDRAM_ERR_CAUSE_DBE_MASK | SDRAM_ERR_CAUSE_SBE_MASK), drvdata->base + SDRAM_ERR_CAUSE_ERR_REG); writel(~(SDRAM_ERR_CAUSE_DBE_MASK | SDRAM_ERR_CAUSE_SBE_MASK), drvdata->base + SDRAM_ERR_CAUSE_MSG_REG); /* clear counter registers */ writel(0, drvdata->base + SDRAM_ERR_SBE_COUNT_REG); writel(0, drvdata->base + SDRAM_ERR_DBE_COUNT_REG); if (edac_mc_add_mc(mci)) { edac_mc_free(mci); return -EINVAL; } edac_op_state = EDAC_OPSTATE_POLL; return 0; } static void axp_mc_remove(struct platform_device *pdev) { struct mem_ctl_info *mci = platform_get_drvdata(pdev); edac_mc_del_mc(&pdev->dev); edac_mc_free(mci); platform_set_drvdata(pdev, NULL); } static struct platform_driver axp_mc_driver = { .probe = axp_mc_probe, .remove = axp_mc_remove, .driver = { .name = "armada_xp_mc_edac", .of_match_table = of_match_ptr(axp_mc_of_match), }, }; /************************ EDAC Device (L2 Cache) ***************************/ struct aurora_l2_drvdata { void __iomem *base; char msg[128]; /* error injection via debugfs */ uint32_t inject_addr; uint32_t inject_mask; uint8_t inject_ctl; struct dentry *debugfs; }; #ifdef CONFIG_EDAC_DEBUG static void aurora_l2_inject(struct aurora_l2_drvdata *drvdata) { drvdata->inject_addr &= AURORA_ERR_INJECT_CTL_ADDR_MASK; drvdata->inject_ctl &= AURORA_ERR_INJECT_CTL_EN_MASK; writel(0, drvdata->base + AURORA_ERR_INJECT_CTL_REG); writel(drvdata->inject_mask, drvdata->base + AURORA_ERR_INJECT_MASK_REG); writel(drvdata->inject_addr | drvdata->inject_ctl, drvdata->base + AURORA_ERR_INJECT_CTL_REG); } #endif static void aurora_l2_check(struct edac_device_ctl_info *dci) { struct aurora_l2_drvdata *drvdata = dci->pvt_info; uint32_t cnt, src, txn, err, attr_cap, addr_cap, way_cap; unsigned int cnt_ce, cnt_ue; char *msg = drvdata->msg; size_t size = sizeof(drvdata->msg); size_t len = 0; cnt = readl(drvdata->base + AURORA_ERR_CNT_REG); attr_cap = readl(drvdata->base + AURORA_ERR_ATTR_CAP_REG); addr_cap = readl(drvdata->base + AURORA_ERR_ADDR_CAP_REG); way_cap = readl(drvdata->base + AURORA_ERR_WAY_CAP_REG); cnt_ce = (cnt & AURORA_ERR_CNT_CE_MASK) >> AURORA_ERR_CNT_CE_OFFSET; cnt_ue = (cnt & AURORA_ERR_CNT_UE_MASK) >> AURORA_ERR_CNT_UE_OFFSET; /* clear error counter registers */ if (cnt_ce || cnt_ue) writel(AURORA_ERR_CNT_CLR, drvdata->base + AURORA_ERR_CNT_REG); if (!(attr_cap & AURORA_ERR_ATTR_CAP_VALID)) goto clear_remaining; src = (attr_cap & AURORA_ERR_ATTR_SRC_MSK) >> AURORA_ERR_ATTR_SRC_OFF; if (src <= 3) len += scnprintf(msg+len, size-len, "src=CPU%d ", src); else len += scnprintf(msg+len, size-len, "src=IO "); txn = (attr_cap & AURORA_ERR_ATTR_TXN_MSK) >> AURORA_ERR_ATTR_TXN_OFF; switch (txn) { case 0: len += scnprintf(msg+len, size-len, "txn=Data-Read "); break; case 1: len += scnprintf(msg+len, size-len, "txn=Isn-Read "); break; case 2: len += scnprintf(msg+len, size-len, "txn=Clean-Flush "); break; case 3: len += scnprintf(msg+len, size-len, "txn=Eviction "); break; case 4: len += scnprintf(msg+len, size-len, "txn=Read-Modify-Write "); break; } err = (attr_cap & AURORA_ERR_ATTR_ERR_MSK) >> AURORA_ERR_ATTR_ERR_OFF; switch (err) { case 0: len += scnprintf(msg+len, size-len, "err=CorrECC "); break; case 1: len += scnprintf(msg+len, size-len, "err=UnCorrECC "); break; case 2: len += scnprintf(msg+len, size-len, "err=TagParity "); break; } len += scnprintf(msg+len, size-len, "addr=0x%x ", addr_cap & AURORA_ERR_ADDR_CAP_ADDR_MASK); len += scnprintf(msg+len, size-len, "index=0x%x ", (way_cap & AURORA_ERR_WAY_IDX_MSK) >> AURORA_ERR_WAY_IDX_OFF); len += scnprintf(msg+len, size-len, "way=0x%x", (way_cap & AURORA_ERR_WAY_CAP_WAY_MASK) >> AURORA_ERR_WAY_CAP_WAY_OFFSET); /* clear error capture registers */ writel(AURORA_ERR_ATTR_CAP_VALID, drvdata->base + AURORA_ERR_ATTR_CAP_REG); if (err) { /* UnCorrECC or TagParity */ if (cnt_ue) cnt_ue--; edac_device_handle_ue(dci, 0, 0, drvdata->msg); } else { if (cnt_ce) cnt_ce--; edac_device_handle_ce(dci, 0, 0, drvdata->msg); } clear_remaining: /* report remaining errors */ while (cnt_ue--) edac_device_handle_ue(dci, 0, 0, "details unavailable (multiple errors)"); while (cnt_ce--) edac_device_handle_ue(dci, 0, 0, "details unavailable (multiple errors)"); } static void aurora_l2_poll(struct edac_device_ctl_info *dci) { #ifdef CONFIG_EDAC_DEBUG struct aurora_l2_drvdata *drvdata = dci->pvt_info; #endif aurora_l2_check(dci); #ifdef CONFIG_EDAC_DEBUG aurora_l2_inject(drvdata); #endif } static const struct of_device_id aurora_l2_of_match[] = { {.compatible = "marvell,aurora-system-cache",}, {}, }; MODULE_DEVICE_TABLE(of, aurora_l2_of_match); static int aurora_l2_probe(struct platform_device *pdev) { struct aurora_l2_drvdata *drvdata; struct edac_device_ctl_info *dci; const struct of_device_id *id; uint32_t l2x0_aux_ctrl; void __iomem *base; base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) { dev_err(&pdev->dev, "Unable to map regs\n"); return PTR_ERR(base); } l2x0_aux_ctrl = readl(base + L2X0_AUX_CTRL); if (!(l2x0_aux_ctrl & AURORA_ACR_PARITY_EN)) dev_warn(&pdev->dev, "tag parity is not enabled\n"); if (!(l2x0_aux_ctrl & AURORA_ACR_ECC_EN)) dev_warn(&pdev->dev, "data ECC is not enabled\n"); dci = edac_device_alloc_ctl_info(sizeof(*drvdata), "cpu", 1, "L", 1, 2, 0); if (!dci) return -ENOMEM; drvdata = dci->pvt_info; drvdata->base = base; dci->dev = &pdev->dev; platform_set_drvdata(pdev, dci); id = of_match_device(aurora_l2_of_match, &pdev->dev); dci->edac_check = aurora_l2_poll; dci->mod_name = pdev->dev.driver->name; dci->ctl_name = id ? id->compatible : "unknown"; dci->dev_name = dev_name(&pdev->dev); /* clear registers */ writel(AURORA_ERR_CNT_CLR, drvdata->base + AURORA_ERR_CNT_REG); writel(AURORA_ERR_ATTR_CAP_VALID, drvdata->base + AURORA_ERR_ATTR_CAP_REG); if (edac_device_add_device(dci)) { edac_device_free_ctl_info(dci); return -EINVAL; } #ifdef CONFIG_EDAC_DEBUG drvdata->debugfs = edac_debugfs_create_dir(dev_name(&pdev->dev)); if (drvdata->debugfs) { edac_debugfs_create_x32("inject_addr", 0644, drvdata->debugfs, &drvdata->inject_addr); edac_debugfs_create_x32("inject_mask", 0644, drvdata->debugfs, &drvdata->inject_mask); edac_debugfs_create_x8("inject_ctl", 0644, drvdata->debugfs, &drvdata->inject_ctl); } #endif return 0; } static void aurora_l2_remove(struct platform_device *pdev) { struct edac_device_ctl_info *dci = platform_get_drvdata(pdev); #ifdef CONFIG_EDAC_DEBUG struct aurora_l2_drvdata *drvdata = dci->pvt_info; edac_debugfs_remove_recursive(drvdata->debugfs); #endif edac_device_del_device(&pdev->dev); edac_device_free_ctl_info(dci); platform_set_drvdata(pdev, NULL); } static struct platform_driver aurora_l2_driver = { .probe = aurora_l2_probe, .remove = aurora_l2_remove, .driver = { .name = "aurora_l2_edac", .of_match_table = of_match_ptr(aurora_l2_of_match), }, }; /************************ Driver registration ******************************/ static struct platform_driver * const drivers[] = { &axp_mc_driver, &aurora_l2_driver, }; static int __init armada_xp_edac_init(void) { int res; if (ghes_get_devices()) return -EBUSY; /* only polling is supported */ edac_op_state = EDAC_OPSTATE_POLL; res = platform_register_drivers(drivers, ARRAY_SIZE(drivers)); if (res) pr_warn("Armada XP EDAC drivers fail to register\n"); return 0; } module_init(armada_xp_edac_init); static void __exit armada_xp_edac_exit(void) { platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); } module_exit(armada_xp_edac_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Pengutronix"); MODULE_DESCRIPTION("EDAC Drivers for Marvell Armada XP SDRAM and L2 Cache Controller");
/* * Copyright (c) 2016 Laurent Pinchart <[email protected]> * * DRM core format related functions * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that copyright * notice and this permission notice appear in supporting documentation, and * that the name of the copyright holders not be used in advertising or * publicity pertaining to distribution of the software without specific, * written prior permission. The copyright holders make no representations * about the suitability of this software for any purpose. It is provided "as * is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THIS SOFTWARE. */ #include <linux/bug.h> #include <linux/ctype.h> #include <linux/export.h> #include <linux/kernel.h> #include <drm/drm_device.h> #include <drm/drm_fourcc.h> /** * drm_mode_legacy_fb_format - compute drm fourcc code from legacy description * @bpp: bits per pixels * @depth: bit depth per pixel * * Computes a drm fourcc pixel format code for the given @bpp/@depth values. */ uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth) { uint32_t fmt = DRM_FORMAT_INVALID; switch (bpp) { case 1: if (depth == 1) fmt = DRM_FORMAT_C1; break; case 2: if (depth == 2) fmt = DRM_FORMAT_C2; break; case 4: if (depth == 4) fmt = DRM_FORMAT_C4; break; case 8: if (depth == 8) fmt = DRM_FORMAT_C8; break; case 16: switch (depth) { case 15: fmt = DRM_FORMAT_XRGB1555; break; case 16: fmt = DRM_FORMAT_RGB565; break; default: break; } break; case 24: if (depth == 24) fmt = DRM_FORMAT_RGB888; break; case 32: switch (depth) { case 24: fmt = DRM_FORMAT_XRGB8888; break; case 30: fmt = DRM_FORMAT_XRGB2101010; break; case 32: fmt = DRM_FORMAT_ARGB8888; break; default: break; } break; default: break; } return fmt; } EXPORT_SYMBOL(drm_mode_legacy_fb_format); /** * drm_driver_legacy_fb_format - compute drm fourcc code from legacy description * @dev: DRM device * @bpp: bits per pixels * @depth: bit depth per pixel * * Computes a drm fourcc pixel format code for the given @bpp/@depth values. * Unlike drm_mode_legacy_fb_format() this looks at the drivers mode_config, * and depending on the &drm_mode_config.quirk_addfb_prefer_host_byte_order flag * it returns little endian byte order or host byte order framebuffer formats. */ uint32_t drm_driver_legacy_fb_format(struct drm_device *dev, uint32_t bpp, uint32_t depth) { uint32_t fmt = drm_mode_legacy_fb_format(bpp, depth); if (dev->mode_config.quirk_addfb_prefer_host_byte_order) { if (fmt == DRM_FORMAT_XRGB8888) fmt = DRM_FORMAT_HOST_XRGB8888; if (fmt == DRM_FORMAT_ARGB8888) fmt = DRM_FORMAT_HOST_ARGB8888; if (fmt == DRM_FORMAT_RGB565) fmt = DRM_FORMAT_HOST_RGB565; if (fmt == DRM_FORMAT_XRGB1555) fmt = DRM_FORMAT_HOST_XRGB1555; } if (dev->mode_config.quirk_addfb_prefer_xbgr_30bpp && fmt == DRM_FORMAT_XRGB2101010) fmt = DRM_FORMAT_XBGR2101010; return fmt; } EXPORT_SYMBOL(drm_driver_legacy_fb_format); /** * drm_driver_color_mode_format - Compute DRM 4CC code from color mode * @dev: DRM device * @color_mode: command-line color mode * * Computes a DRM 4CC pixel format code for the given color mode using * drm_driver_color_mode(). The color mode is in the format used and the * kernel command line. It specifies the number of bits per pixel * and color depth in a single value. * * Useful in fbdev emulation code, since that deals in those values. The * helper does not consider YUV or other complicated formats. This means * only legacy formats are supported (fmt->depth is a legacy field), but * the framebuffer emulation can only deal with such formats, specifically * RGB/BGA formats. */ uint32_t drm_driver_color_mode_format(struct drm_device *dev, unsigned int color_mode) { switch (color_mode) { case 15: return drm_driver_legacy_fb_format(dev, 16, 15); case 32: return drm_driver_legacy_fb_format(dev, 32, 24); default: return drm_driver_legacy_fb_format(dev, color_mode, color_mode); } } EXPORT_SYMBOL(drm_driver_color_mode_format); /* * Internal function to query information for a given format. See * drm_format_info() for the public API. */ const struct drm_format_info *__drm_format_info(u32 format) { static const struct drm_format_info formats[] = { { .format = DRM_FORMAT_C1, .depth = 1, .num_planes = 1, .char_per_block = { 1, }, .block_w = { 8, }, .block_h = { 1, }, .hsub = 1, .vsub = 1, .is_color_indexed = true }, { .format = DRM_FORMAT_C2, .depth = 2, .num_planes = 1, .char_per_block = { 1, }, .block_w = { 4, }, .block_h = { 1, }, .hsub = 1, .vsub = 1, .is_color_indexed = true }, { .format = DRM_FORMAT_C4, .depth = 4, .num_planes = 1, .char_per_block = { 1, }, .block_w = { 2, }, .block_h = { 1, }, .hsub = 1, .vsub = 1, .is_color_indexed = true }, { .format = DRM_FORMAT_C8, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1, .is_color_indexed = true }, { .format = DRM_FORMAT_D1, .depth = 1, .num_planes = 1, .char_per_block = { 1, }, .block_w = { 8, }, .block_h = { 1, }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_D2, .depth = 2, .num_planes = 1, .char_per_block = { 1, }, .block_w = { 4, }, .block_h = { 1, }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_D4, .depth = 4, .num_planes = 1, .char_per_block = { 1, }, .block_w = { 2, }, .block_h = { 1, }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_D8, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_R1, .depth = 1, .num_planes = 1, .char_per_block = { 1, }, .block_w = { 8, }, .block_h = { 1, }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_R2, .depth = 2, .num_planes = 1, .char_per_block = { 1, }, .block_w = { 4, }, .block_h = { 1, }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_R4, .depth = 4, .num_planes = 1, .char_per_block = { 1, }, .block_w = { 2, }, .block_h = { 1, }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_R8, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_R10, .depth = 10, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_R12, .depth = 12, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_RGB332, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_BGR233, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_XRGB4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_XBGR4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_RGBX4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_BGRX4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_ARGB4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_ABGR4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_RGBA4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_BGRA4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_XRGB1555, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_XBGR1555, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_RGBX5551, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_BGRX5551, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_ARGB1555, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_ABGR1555, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_RGBA5551, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_BGRA5551, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_BGR565, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 }, #ifdef __BIG_ENDIAN { .format = DRM_FORMAT_XRGB1555 | DRM_FORMAT_BIG_ENDIAN, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_RGB565 | DRM_FORMAT_BIG_ENDIAN, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 }, #endif { .format = DRM_FORMAT_RGB888, .depth = 24, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_BGR888, .depth = 24, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_RGBX8888, .depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_BGRX8888, .depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_RGB565_A8, .depth = 24, .num_planes = 2, .cpp = { 2, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_BGR565_A8, .depth = 24, .num_planes = 2, .cpp = { 2, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_RGBX1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_BGRX1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_RGBA1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_BGRA1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_RGBA8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_XRGB16161616F, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_XBGR16161616F, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_ARGB16161616F, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_ABGR16161616F, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_AXBXGXRX106106106106, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_XRGB16161616, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_XBGR16161616, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_ARGB16161616, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_ABGR16161616, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_RGB888_A8, .depth = 32, .num_planes = 2, .cpp = { 3, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_BGR888_A8, .depth = 32, .num_planes = 2, .cpp = { 3, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_XRGB8888_A8, .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_XBGR8888_A8, .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_RGBX8888_A8, .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_BGRX8888_A8, .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_YUV410, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 4, .is_yuv = true }, { .format = DRM_FORMAT_YVU410, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 4, .is_yuv = true }, { .format = DRM_FORMAT_YUV411, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_YVU411, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_YUV420, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 2, .is_yuv = true }, { .format = DRM_FORMAT_YVU420, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 2, .is_yuv = true }, { .format = DRM_FORMAT_YUV422, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_YVU422, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_YUV444, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 1, .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_YVU444, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 1, .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_NV12, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 2, .is_yuv = true }, { .format = DRM_FORMAT_NV21, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 2, .is_yuv = true }, { .format = DRM_FORMAT_NV16, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_NV61, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_NV24, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_NV42, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_YUYV, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_YVYU, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_UYVY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_VYUY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_XYUV8888, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_VUY888, .depth = 0, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_AYUV, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true }, { .format = DRM_FORMAT_Y210, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_Y212, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_Y216, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_Y410, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true }, { .format = DRM_FORMAT_Y412, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true }, { .format = DRM_FORMAT_Y416, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true }, { .format = DRM_FORMAT_XVYU2101010, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_XVYU12_16161616, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_XVYU16161616, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_Y0L0, .depth = 0, .num_planes = 1, .char_per_block = { 8, 0, 0 }, .block_w = { 2, 0, 0 }, .block_h = { 2, 0, 0 }, .hsub = 2, .vsub = 2, .has_alpha = true, .is_yuv = true }, { .format = DRM_FORMAT_X0L0, .depth = 0, .num_planes = 1, .char_per_block = { 8, 0, 0 }, .block_w = { 2, 0, 0 }, .block_h = { 2, 0, 0 }, .hsub = 2, .vsub = 2, .is_yuv = true }, { .format = DRM_FORMAT_Y0L2, .depth = 0, .num_planes = 1, .char_per_block = { 8, 0, 0 }, .block_w = { 2, 0, 0 }, .block_h = { 2, 0, 0 }, .hsub = 2, .vsub = 2, .has_alpha = true, .is_yuv = true }, { .format = DRM_FORMAT_X0L2, .depth = 0, .num_planes = 1, .char_per_block = { 8, 0, 0 }, .block_w = { 2, 0, 0 }, .block_h = { 2, 0, 0 }, .hsub = 2, .vsub = 2, .is_yuv = true }, { .format = DRM_FORMAT_P010, .depth = 0, .num_planes = 2, .char_per_block = { 2, 4, 0 }, .block_w = { 1, 1, 0 }, .block_h = { 1, 1, 0 }, .hsub = 2, .vsub = 2, .is_yuv = true}, { .format = DRM_FORMAT_P012, .depth = 0, .num_planes = 2, .char_per_block = { 2, 4, 0 }, .block_w = { 1, 1, 0 }, .block_h = { 1, 1, 0 }, .hsub = 2, .vsub = 2, .is_yuv = true}, { .format = DRM_FORMAT_P016, .depth = 0, .num_planes = 2, .char_per_block = { 2, 4, 0 }, .block_w = { 1, 1, 0 }, .block_h = { 1, 1, 0 }, .hsub = 2, .vsub = 2, .is_yuv = true}, { .format = DRM_FORMAT_P210, .depth = 0, .num_planes = 2, .char_per_block = { 2, 4, 0 }, .block_w = { 1, 1, 0 }, .block_h = { 1, 1, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_VUY101010, .depth = 0, .num_planes = 1, .cpp = { 0, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_YUV420_8BIT, .depth = 0, .num_planes = 1, .cpp = { 0, 0, 0 }, .hsub = 2, .vsub = 2, .is_yuv = true }, { .format = DRM_FORMAT_YUV420_10BIT, .depth = 0, .num_planes = 1, .cpp = { 0, 0, 0 }, .hsub = 2, .vsub = 2, .is_yuv = true }, { .format = DRM_FORMAT_NV15, .depth = 0, .num_planes = 2, .char_per_block = { 5, 5, 0 }, .block_w = { 4, 2, 0 }, .block_h = { 1, 1, 0 }, .hsub = 2, .vsub = 2, .is_yuv = true }, { .format = DRM_FORMAT_NV20, .depth = 0, .num_planes = 2, .char_per_block = { 5, 5, 0 }, .block_w = { 4, 2, 0 }, .block_h = { 1, 1, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_NV30, .depth = 0, .num_planes = 2, .char_per_block = { 5, 5, 0 }, .block_w = { 4, 2, 0 }, .block_h = { 1, 1, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_Q410, .depth = 0, .num_planes = 3, .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 }, .hsub = 1, .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_Q401, .depth = 0, .num_planes = 3, .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 }, .hsub = 1, .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_P030, .depth = 0, .num_planes = 2, .char_per_block = { 4, 8, 0 }, .block_w = { 3, 3, 0 }, .block_h = { 1, 1, 0 }, .hsub = 2, .vsub = 2, .is_yuv = true}, }; unsigned int i; for (i = 0; i < ARRAY_SIZE(formats); ++i) { if (formats[i].format == format) return &formats[i]; } return NULL; } /** * drm_format_info - query information for a given format * @format: pixel format (DRM_FORMAT_*) * * The caller should only pass a supported pixel format to this function. * Unsupported pixel formats will generate a warning in the kernel log. * * Returns: * The instance of struct drm_format_info that describes the pixel format, or * NULL if the format is unsupported. */ const struct drm_format_info *drm_format_info(u32 format) { const struct drm_format_info *info; info = __drm_format_info(format); WARN_ON(!info); return info; } EXPORT_SYMBOL(drm_format_info); /** * drm_get_format_info - query information for a given framebuffer configuration * @dev: DRM device * @mode_cmd: metadata from the userspace fb creation request * * Returns: * The instance of struct drm_format_info that describes the pixel format, or * NULL if the format is unsupported. */ const struct drm_format_info * drm_get_format_info(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd) { const struct drm_format_info *info = NULL; if (dev->mode_config.funcs->get_format_info) info = dev->mode_config.funcs->get_format_info(mode_cmd); if (!info) info = drm_format_info(mode_cmd->pixel_format); return info; } EXPORT_SYMBOL(drm_get_format_info); /** * drm_format_info_block_width - width in pixels of block. * @info: pixel format info * @plane: plane index * * Returns: * The width in pixels of a block, depending on the plane index. */ unsigned int drm_format_info_block_width(const struct drm_format_info *info, int plane) { if (!info || plane < 0 || plane >= info->num_planes) return 0; if (!info->block_w[plane]) return 1; return info->block_w[plane]; } EXPORT_SYMBOL(drm_format_info_block_width); /** * drm_format_info_block_height - height in pixels of a block * @info: pixel format info * @plane: plane index * * Returns: * The height in pixels of a block, depending on the plane index. */ unsigned int drm_format_info_block_height(const struct drm_format_info *info, int plane) { if (!info || plane < 0 || plane >= info->num_planes) return 0; if (!info->block_h[plane]) return 1; return info->block_h[plane]; } EXPORT_SYMBOL(drm_format_info_block_height); /** * drm_format_info_bpp - number of bits per pixel * @info: pixel format info * @plane: plane index * * Returns: * The actual number of bits per pixel, depending on the plane index. */ unsigned int drm_format_info_bpp(const struct drm_format_info *info, int plane) { if (!info || plane < 0 || plane >= info->num_planes) return 0; return info->char_per_block[plane] * 8 / (drm_format_info_block_width(info, plane) * drm_format_info_block_height(info, plane)); } EXPORT_SYMBOL(drm_format_info_bpp); /** * drm_format_info_min_pitch - computes the minimum required pitch in bytes * @info: pixel format info * @plane: plane index * @buffer_width: buffer width in pixels * * Returns: * The minimum required pitch in bytes for a buffer by taking into consideration * the pixel format information and the buffer width. */ uint64_t drm_format_info_min_pitch(const struct drm_format_info *info, int plane, unsigned int buffer_width) { if (!info || plane < 0 || plane >= info->num_planes) return 0; return DIV_ROUND_UP_ULL((u64)buffer_width * info->char_per_block[plane], drm_format_info_block_width(info, plane) * drm_format_info_block_height(info, plane)); } EXPORT_SYMBOL(drm_format_info_min_pitch);
/* SPDX-License-Identifier: GPL-2.0 */ /* * BSD Process Accounting for Linux - Definitions * * Author: Marco van Wieringen ([email protected]) * * This header file contains the definitions needed to implement * BSD-style process accounting. The kernel accounting code and all * user-level programs that try to do something useful with the * process accounting log must include this file. * * Copyright (C) 1995 - 1997 Marco van Wieringen - ELM Consultancy B.V. * */ #ifndef _LINUX_ACCT_H #define _LINUX_ACCT_H #include <uapi/linux/acct.h> #ifdef CONFIG_BSD_PROCESS_ACCT struct pid_namespace; extern void acct_collect(long exitcode, int group_dead); extern void acct_process(void); extern void acct_exit_ns(struct pid_namespace *); #else #define acct_collect(x,y) do { } while (0) #define acct_process() do { } while (0) #define acct_exit_ns(ns) do { } while (0) #endif /* * ACCT_VERSION numbers as yet defined: * 0: old format (until 2.6.7) with 16 bit uid/gid * 1: extended variant (binary compatible on M68K) * 2: extended variant (binary compatible on everything except M68K) * 3: new binary incompatible format (64 bytes) * 4: new binary incompatible format (128 bytes) * 5: new binary incompatible format (128 bytes, second half) * */ #undef ACCT_VERSION #undef AHZ #ifdef CONFIG_BSD_PROCESS_ACCT_V3 #define ACCT_VERSION 3 #define AHZ 100 typedef struct acct_v3 acct_t; #else #ifdef CONFIG_M68K #define ACCT_VERSION 1 #else #define ACCT_VERSION 2 #endif #define AHZ (USER_HZ) typedef struct acct acct_t; #endif #include <linux/jiffies.h> /* * Yet another set of HZ to *HZ helper functions. * See <linux/jiffies.h> for the original. */ static inline u32 jiffies_to_AHZ(unsigned long x) { #if (TICK_NSEC % (NSEC_PER_SEC / AHZ)) == 0 # if HZ < AHZ return x * (AHZ / HZ); # else return x / (HZ / AHZ); # endif #else u64 tmp = (u64)x * TICK_NSEC; do_div(tmp, (NSEC_PER_SEC / AHZ)); return (long)tmp; #endif } static inline u64 nsec_to_AHZ(u64 x) { #if (NSEC_PER_SEC % AHZ) == 0 do_div(x, (NSEC_PER_SEC / AHZ)); #elif (AHZ % 512) == 0 x *= AHZ/512; do_div(x, (NSEC_PER_SEC / 512)); #else /* * max relative error 5.7e-8 (1.8s per year) for AHZ <= 1024, * overflow after 64.99 years. * exact for AHZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ... */ x *= 9; do_div(x, (unsigned long)((9ull * NSEC_PER_SEC + (AHZ/2)) / AHZ)); #endif return x; } #endif /* _LINUX_ACCT_H */
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Driver for the Conexant CX25821 PCIe bridge * * Copyright (C) 2009 Conexant Systems Inc. * Authors <[email protected]>, <[email protected]> */ #ifndef __CX25821_REGISTERS__ #define __CX25821_REGISTERS__ /* Risc Instructions */ #define RISC_CNT_INC 0x00010000 #define RISC_CNT_RESET 0x00030000 #define RISC_IRQ1 0x01000000 #define RISC_IRQ2 0x02000000 #define RISC_EOL 0x04000000 #define RISC_SOL 0x08000000 #define RISC_WRITE 0x10000000 #define RISC_SKIP 0x20000000 #define RISC_JUMP 0x70000000 #define RISC_SYNC 0x80000000 #define RISC_RESYNC 0x80008000 #define RISC_READ 0x90000000 #define RISC_WRITERM 0xB0000000 #define RISC_WRITECM 0xC0000000 #define RISC_WRITECR 0xD0000000 #define RISC_WRITEC 0x50000000 #define RISC_READC 0xA0000000 #define RISC_SYNC_ODD 0x00000000 #define RISC_SYNC_EVEN 0x00000200 #define RISC_SYNC_ODD_VBI 0x00000006 #define RISC_SYNC_EVEN_VBI 0x00000207 #define RISC_NOOP 0xF0000000 /***************************************************************************** * ASB SRAM *****************************************************************************/ #define TX_SRAM 0x000000 /* Transmit SRAM */ /*****************************************************************************/ #define RX_RAM 0x010000 /* Receive SRAM */ /***************************************************************************** * Application Layer (AL) *****************************************************************************/ #define DEV_CNTRL2 0x040000 /* Device control */ #define FLD_RUN_RISC 0x00000020 /* ***************************************************************************** */ #define PCI_INT_MSK 0x040010 /* PCI interrupt mask */ #define PCI_INT_STAT 0x040014 /* PCI interrupt status */ #define PCI_INT_MSTAT 0x040018 /* PCI interrupt masked status */ #define FLD_HAMMERHEAD_INT (1 << 27) #define FLD_UART_INT (1 << 26) #define FLD_IRQN_INT (1 << 25) #define FLD_TM_INT (1 << 28) #define FLD_I2C_3_RACK (1 << 27) #define FLD_I2C_3_INT (1 << 26) #define FLD_I2C_2_RACK (1 << 25) #define FLD_I2C_2_INT (1 << 24) #define FLD_I2C_1_RACK (1 << 23) #define FLD_I2C_1_INT (1 << 22) #define FLD_APB_DMA_BERR_INT (1 << 21) #define FLD_AL_WR_BERR_INT (1 << 20) #define FLD_AL_RD_BERR_INT (1 << 19) #define FLD_RISC_WR_BERR_INT (1 << 18) #define FLD_RISC_RD_BERR_INT (1 << 17) #define FLD_VID_I_INT (1 << 8) #define FLD_VID_H_INT (1 << 7) #define FLD_VID_G_INT (1 << 6) #define FLD_VID_F_INT (1 << 5) #define FLD_VID_E_INT (1 << 4) #define FLD_VID_D_INT (1 << 3) #define FLD_VID_C_INT (1 << 2) #define FLD_VID_B_INT (1 << 1) #define FLD_VID_A_INT (1 << 0) /* ***************************************************************************** */ #define VID_A_INT_MSK 0x040020 /* Video A interrupt mask */ #define VID_A_INT_STAT 0x040024 /* Video A interrupt status */ #define VID_A_INT_MSTAT 0x040028 /* Video A interrupt masked status */ #define VID_A_INT_SSTAT 0x04002C /* Video A interrupt set status */ /* ***************************************************************************** */ #define VID_B_INT_MSK 0x040030 /* Video B interrupt mask */ #define VID_B_INT_STAT 0x040034 /* Video B interrupt status */ #define VID_B_INT_MSTAT 0x040038 /* Video B interrupt masked status */ #define VID_B_INT_SSTAT 0x04003C /* Video B interrupt set status */ /* ***************************************************************************** */ #define VID_C_INT_MSK 0x040040 /* Video C interrupt mask */ #define VID_C_INT_STAT 0x040044 /* Video C interrupt status */ #define VID_C_INT_MSTAT 0x040048 /* Video C interrupt masked status */ #define VID_C_INT_SSTAT 0x04004C /* Video C interrupt set status */ /* ***************************************************************************** */ #define VID_D_INT_MSK 0x040050 /* Video D interrupt mask */ #define VID_D_INT_STAT 0x040054 /* Video D interrupt status */ #define VID_D_INT_MSTAT 0x040058 /* Video D interrupt masked status */ #define VID_D_INT_SSTAT 0x04005C /* Video D interrupt set status */ /* ***************************************************************************** */ #define VID_E_INT_MSK 0x040060 /* Video E interrupt mask */ #define VID_E_INT_STAT 0x040064 /* Video E interrupt status */ #define VID_E_INT_MSTAT 0x040068 /* Video E interrupt masked status */ #define VID_E_INT_SSTAT 0x04006C /* Video E interrupt set status */ /* ***************************************************************************** */ #define VID_F_INT_MSK 0x040070 /* Video F interrupt mask */ #define VID_F_INT_STAT 0x040074 /* Video F interrupt status */ #define VID_F_INT_MSTAT 0x040078 /* Video F interrupt masked status */ #define VID_F_INT_SSTAT 0x04007C /* Video F interrupt set status */ /* ***************************************************************************** */ #define VID_G_INT_MSK 0x040080 /* Video G interrupt mask */ #define VID_G_INT_STAT 0x040084 /* Video G interrupt status */ #define VID_G_INT_MSTAT 0x040088 /* Video G interrupt masked status */ #define VID_G_INT_SSTAT 0x04008C /* Video G interrupt set status */ /* ***************************************************************************** */ #define VID_H_INT_MSK 0x040090 /* Video H interrupt mask */ #define VID_H_INT_STAT 0x040094 /* Video H interrupt status */ #define VID_H_INT_MSTAT 0x040098 /* Video H interrupt masked status */ #define VID_H_INT_SSTAT 0x04009C /* Video H interrupt set status */ /* ***************************************************************************** */ #define VID_I_INT_MSK 0x0400A0 /* Video I interrupt mask */ #define VID_I_INT_STAT 0x0400A4 /* Video I interrupt status */ #define VID_I_INT_MSTAT 0x0400A8 /* Video I interrupt masked status */ #define VID_I_INT_SSTAT 0x0400AC /* Video I interrupt set status */ /* ***************************************************************************** */ #define VID_J_INT_MSK 0x0400B0 /* Video J interrupt mask */ #define VID_J_INT_STAT 0x0400B4 /* Video J interrupt status */ #define VID_J_INT_MSTAT 0x0400B8 /* Video J interrupt masked status */ #define VID_J_INT_SSTAT 0x0400BC /* Video J interrupt set status */ #define FLD_VID_SRC_OPC_ERR 0x00020000 #define FLD_VID_DST_OPC_ERR 0x00010000 #define FLD_VID_SRC_SYNC 0x00002000 #define FLD_VID_DST_SYNC 0x00001000 #define FLD_VID_SRC_UF 0x00000200 #define FLD_VID_DST_OF 0x00000100 #define FLD_VID_SRC_RISC2 0x00000020 #define FLD_VID_DST_RISC2 0x00000010 #define FLD_VID_SRC_RISC1 0x00000002 #define FLD_VID_DST_RISC1 0x00000001 #define FLD_VID_SRC_ERRORS (FLD_VID_SRC_OPC_ERR | FLD_VID_SRC_SYNC | FLD_VID_SRC_UF) #define FLD_VID_DST_ERRORS (FLD_VID_DST_OPC_ERR | FLD_VID_DST_SYNC | FLD_VID_DST_OF) /* ***************************************************************************** */ #define AUD_A_INT_MSK 0x0400C0 /* Audio Int interrupt mask */ #define AUD_A_INT_STAT 0x0400C4 /* Audio Int interrupt status */ #define AUD_A_INT_MSTAT 0x0400C8 /* Audio Int interrupt masked status */ #define AUD_A_INT_SSTAT 0x0400CC /* Audio Int interrupt set status */ /* ***************************************************************************** */ #define AUD_B_INT_MSK 0x0400D0 /* Audio Int interrupt mask */ #define AUD_B_INT_STAT 0x0400D4 /* Audio Int interrupt status */ #define AUD_B_INT_MSTAT 0x0400D8 /* Audio Int interrupt masked status */ #define AUD_B_INT_SSTAT 0x0400DC /* Audio Int interrupt set status */ /* ***************************************************************************** */ #define AUD_C_INT_MSK 0x0400E0 /* Audio Int interrupt mask */ #define AUD_C_INT_STAT 0x0400E4 /* Audio Int interrupt status */ #define AUD_C_INT_MSTAT 0x0400E8 /* Audio Int interrupt masked status */ #define AUD_C_INT_SSTAT 0x0400EC /* Audio Int interrupt set status */ /* ***************************************************************************** */ #define AUD_D_INT_MSK 0x0400F0 /* Audio Int interrupt mask */ #define AUD_D_INT_STAT 0x0400F4 /* Audio Int interrupt status */ #define AUD_D_INT_MSTAT 0x0400F8 /* Audio Int interrupt masked status */ #define AUD_D_INT_SSTAT 0x0400FC /* Audio Int interrupt set status */ /* ***************************************************************************** */ #define AUD_E_INT_MSK 0x040100 /* Audio Int interrupt mask */ #define AUD_E_INT_STAT 0x040104 /* Audio Int interrupt status */ #define AUD_E_INT_MSTAT 0x040108 /* Audio Int interrupt masked status */ #define AUD_E_INT_SSTAT 0x04010C /* Audio Int interrupt set status */ #define FLD_AUD_SRC_OPC_ERR 0x00020000 #define FLD_AUD_DST_OPC_ERR 0x00010000 #define FLD_AUD_SRC_SYNC 0x00002000 #define FLD_AUD_DST_SYNC 0x00001000 #define FLD_AUD_SRC_OF 0x00000200 #define FLD_AUD_DST_OF 0x00000100 #define FLD_AUD_SRC_RISCI2 0x00000020 #define FLD_AUD_DST_RISCI2 0x00000010 #define FLD_AUD_SRC_RISCI1 0x00000002 #define FLD_AUD_DST_RISCI1 0x00000001 /* ***************************************************************************** */ #define MBIF_A_INT_MSK 0x040110 /* MBIF Int interrupt mask */ #define MBIF_A_INT_STAT 0x040114 /* MBIF Int interrupt status */ #define MBIF_A_INT_MSTAT 0x040118 /* MBIF Int interrupt masked status */ #define MBIF_A_INT_SSTAT 0x04011C /* MBIF Int interrupt set status */ /* ***************************************************************************** */ #define MBIF_B_INT_MSK 0x040120 /* MBIF Int interrupt mask */ #define MBIF_B_INT_STAT 0x040124 /* MBIF Int interrupt status */ #define MBIF_B_INT_MSTAT 0x040128 /* MBIF Int interrupt masked status */ #define MBIF_B_INT_SSTAT 0x04012C /* MBIF Int interrupt set status */ #define FLD_MBIF_DST_OPC_ERR 0x00010000 #define FLD_MBIF_DST_SYNC 0x00001000 #define FLD_MBIF_DST_OF 0x00000100 #define FLD_MBIF_DST_RISCI2 0x00000010 #define FLD_MBIF_DST_RISCI1 0x00000001 /* ***************************************************************************** */ #define AUD_EXT_INT_MSK 0x040060 /* Audio Ext interrupt mask */ #define AUD_EXT_INT_STAT 0x040064 /* Audio Ext interrupt status */ #define AUD_EXT_INT_MSTAT 0x040068 /* Audio Ext interrupt masked status */ #define AUD_EXT_INT_SSTAT 0x04006C /* Audio Ext interrupt set status */ #define FLD_AUD_EXT_OPC_ERR 0x00010000 #define FLD_AUD_EXT_SYNC 0x00001000 #define FLD_AUD_EXT_OF 0x00000100 #define FLD_AUD_EXT_RISCI2 0x00000010 #define FLD_AUD_EXT_RISCI1 0x00000001 /* ***************************************************************************** */ #define GPIO_LO 0x110010 /* Lower of GPIO pins [31:0] */ #define GPIO_HI 0x110014 /* Upper WORD of GPIO pins [47:31] */ #define GPIO_LO_OE 0x110018 /* Lower of GPIO output enable [31:0] */ #define GPIO_HI_OE 0x11001C /* Upper word of GPIO output enable [47:32] */ #define GPIO_LO_INT_MSK 0x11003C /* GPIO interrupt mask */ #define GPIO_LO_INT_STAT 0x110044 /* GPIO interrupt status */ #define GPIO_LO_INT_MSTAT 0x11004C /* GPIO interrupt masked status */ #define GPIO_LO_ISM_SNS 0x110054 /* GPIO interrupt sensitivity */ #define GPIO_LO_ISM_POL 0x11005C /* GPIO interrupt polarity */ #define GPIO_HI_INT_MSK 0x110040 /* GPIO interrupt mask */ #define GPIO_HI_INT_STAT 0x110048 /* GPIO interrupt status */ #define GPIO_HI_INT_MSTAT 0x110050 /* GPIO interrupt masked status */ #define GPIO_HI_ISM_SNS 0x110058 /* GPIO interrupt sensitivity */ #define GPIO_HI_ISM_POL 0x110060 /* GPIO interrupt polarity */ #define FLD_GPIO43_INT (1 << 11) #define FLD_GPIO42_INT (1 << 10) #define FLD_GPIO41_INT (1 << 9) #define FLD_GPIO40_INT (1 << 8) #define FLD_GPIO9_INT (1 << 9) #define FLD_GPIO8_INT (1 << 8) #define FLD_GPIO7_INT (1 << 7) #define FLD_GPIO6_INT (1 << 6) #define FLD_GPIO5_INT (1 << 5) #define FLD_GPIO4_INT (1 << 4) #define FLD_GPIO3_INT (1 << 3) #define FLD_GPIO2_INT (1 << 2) #define FLD_GPIO1_INT (1 << 1) #define FLD_GPIO0_INT (1 << 0) /* ***************************************************************************** */ #define TC_REQ 0x040090 /* Rider PCI Express traFFic class request */ /* ***************************************************************************** */ #define TC_REQ_SET 0x040094 /* Rider PCI Express traFFic class request set */ /* ***************************************************************************** */ /* Rider */ /* ***************************************************************************** */ /* PCI Compatible Header */ /* ***************************************************************************** */ #define RDR_CFG0 0x050000 #define RDR_VENDOR_DEVICE_ID_CFG 0x050000 /* ***************************************************************************** */ #define RDR_CFG1 0x050004 /* ***************************************************************************** */ #define RDR_CFG2 0x050008 /* ***************************************************************************** */ #define RDR_CFG3 0x05000C /* ***************************************************************************** */ #define RDR_CFG4 0x050010 /* ***************************************************************************** */ #define RDR_CFG5 0x050014 /* ***************************************************************************** */ #define RDR_CFG6 0x050018 /* ***************************************************************************** */ #define RDR_CFG7 0x05001C /* ***************************************************************************** */ #define RDR_CFG8 0x050020 /* ***************************************************************************** */ #define RDR_CFG9 0x050024 /* ***************************************************************************** */ #define RDR_CFGA 0x050028 /* ***************************************************************************** */ #define RDR_CFGB 0x05002C #define RDR_SUSSYSTEM_ID_CFG 0x05002C /* ***************************************************************************** */ #define RDR_CFGC 0x050030 /* ***************************************************************************** */ #define RDR_CFGD 0x050034 /* ***************************************************************************** */ #define RDR_CFGE 0x050038 /* ***************************************************************************** */ #define RDR_CFGF 0x05003C /* ***************************************************************************** */ /* PCI-Express Capabilities */ /* ***************************************************************************** */ #define RDR_PECAP 0x050040 /* ***************************************************************************** */ #define RDR_PEDEVCAP 0x050044 /* ***************************************************************************** */ #define RDR_PEDEVSC 0x050048 /* ***************************************************************************** */ #define RDR_PELINKCAP 0x05004C /* ***************************************************************************** */ #define RDR_PELINKSC 0x050050 /* ***************************************************************************** */ #define RDR_PMICAP 0x050080 /* ***************************************************************************** */ #define RDR_PMCSR 0x050084 /* ***************************************************************************** */ #define RDR_VPDCAP 0x050090 /* ***************************************************************************** */ #define RDR_VPDDATA 0x050094 /* ***************************************************************************** */ #define RDR_MSICAP 0x0500A0 /* ***************************************************************************** */ #define RDR_MSIARL 0x0500A4 /* ***************************************************************************** */ #define RDR_MSIARU 0x0500A8 /* ***************************************************************************** */ #define RDR_MSIDATA 0x0500AC /* ***************************************************************************** */ /* PCI Express Extended Capabilities */ /* ***************************************************************************** */ #define RDR_AERXCAP 0x050100 /* ***************************************************************************** */ #define RDR_AERUESTA 0x050104 /* ***************************************************************************** */ #define RDR_AERUEMSK 0x050108 /* ***************************************************************************** */ #define RDR_AERUESEV 0x05010C /* ***************************************************************************** */ #define RDR_AERCESTA 0x050110 /* ***************************************************************************** */ #define RDR_AERCEMSK 0x050114 /* ***************************************************************************** */ #define RDR_AERCC 0x050118 /* ***************************************************************************** */ #define RDR_AERHL0 0x05011C /* ***************************************************************************** */ #define RDR_AERHL1 0x050120 /* ***************************************************************************** */ #define RDR_AERHL2 0x050124 /* ***************************************************************************** */ #define RDR_AERHL3 0x050128 /* ***************************************************************************** */ #define RDR_VCXCAP 0x050200 /* ***************************************************************************** */ #define RDR_VCCAP1 0x050204 /* ***************************************************************************** */ #define RDR_VCCAP2 0x050208 /* ***************************************************************************** */ #define RDR_VCSC 0x05020C /* ***************************************************************************** */ #define RDR_VCR0_CAP 0x050210 /* ***************************************************************************** */ #define RDR_VCR0_CTRL 0x050214 /* ***************************************************************************** */ #define RDR_VCR0_STAT 0x050218 /* ***************************************************************************** */ #define RDR_VCR1_CAP 0x05021C /* ***************************************************************************** */ #define RDR_VCR1_CTRL 0x050220 /* ***************************************************************************** */ #define RDR_VCR1_STAT 0x050224 /* ***************************************************************************** */ #define RDR_VCR2_CAP 0x050228 /* ***************************************************************************** */ #define RDR_VCR2_CTRL 0x05022C /* ***************************************************************************** */ #define RDR_VCR2_STAT 0x050230 /* ***************************************************************************** */ #define RDR_VCR3_CAP 0x050234 /* ***************************************************************************** */ #define RDR_VCR3_CTRL 0x050238 /* ***************************************************************************** */ #define RDR_VCR3_STAT 0x05023C /* ***************************************************************************** */ #define RDR_VCARB0 0x050240 /* ***************************************************************************** */ #define RDR_VCARB1 0x050244 /* ***************************************************************************** */ #define RDR_VCARB2 0x050248 /* ***************************************************************************** */ #define RDR_VCARB3 0x05024C /* ***************************************************************************** */ #define RDR_VCARB4 0x050250 /* ***************************************************************************** */ #define RDR_VCARB5 0x050254 /* ***************************************************************************** */ #define RDR_VCARB6 0x050258 /* ***************************************************************************** */ #define RDR_VCARB7 0x05025C /* ***************************************************************************** */ #define RDR_RDRSTAT0 0x050300 /* ***************************************************************************** */ #define RDR_RDRSTAT1 0x050304 /* ***************************************************************************** */ #define RDR_RDRCTL0 0x050308 /* ***************************************************************************** */ #define RDR_RDRCTL1 0x05030C /* ***************************************************************************** */ /* Transaction Layer Registers */ /* ***************************************************************************** */ #define RDR_TLSTAT0 0x050310 /* ***************************************************************************** */ #define RDR_TLSTAT1 0x050314 /* ***************************************************************************** */ #define RDR_TLCTL0 0x050318 #define FLD_CFG_UR_CPL_MODE 0x00000040 #define FLD_CFG_CORR_ERR_QUITE 0x00000020 #define FLD_CFG_RCB_CK_EN 0x00000010 #define FLD_CFG_BNDRY_CK_EN 0x00000008 #define FLD_CFG_BYTE_EN_CK_EN 0x00000004 #define FLD_CFG_RELAX_ORDER_MSK 0x00000002 #define FLD_CFG_TAG_ORDER_EN 0x00000001 /* ***************************************************************************** */ #define RDR_TLCTL1 0x05031C /* ***************************************************************************** */ #define RDR_REQRCAL 0x050320 /* ***************************************************************************** */ #define RDR_REQRCAU 0x050324 /* ***************************************************************************** */ #define RDR_REQEPA 0x050328 /* ***************************************************************************** */ #define RDR_REQCTRL 0x05032C /* ***************************************************************************** */ #define RDR_REQSTAT 0x050330 /* ***************************************************************************** */ #define RDR_TL_TEST 0x050334 /* ***************************************************************************** */ #define RDR_VCR01_CTL 0x050348 /* ***************************************************************************** */ #define RDR_VCR23_CTL 0x05034C /* ***************************************************************************** */ #define RDR_RX_VCR0_FC 0x050350 /* ***************************************************************************** */ #define RDR_RX_VCR1_FC 0x050354 /* ***************************************************************************** */ #define RDR_RX_VCR2_FC 0x050358 /* ***************************************************************************** */ #define RDR_RX_VCR3_FC 0x05035C /* ***************************************************************************** */ /* Data Link Layer Registers */ /* ***************************************************************************** */ #define RDR_DLLSTAT 0x050360 /* ***************************************************************************** */ #define RDR_DLLCTRL 0x050364 /* ***************************************************************************** */ #define RDR_REPLAYTO 0x050368 /* ***************************************************************************** */ #define RDR_ACKLATTO 0x05036C /* ***************************************************************************** */ /* MAC Layer Registers */ /* ***************************************************************************** */ #define RDR_MACSTAT0 0x050380 /* ***************************************************************************** */ #define RDR_MACSTAT1 0x050384 /* ***************************************************************************** */ #define RDR_MACCTRL0 0x050388 /* ***************************************************************************** */ #define RDR_MACCTRL1 0x05038C /* ***************************************************************************** */ #define RDR_MACCTRL2 0x050390 /* ***************************************************************************** */ #define RDR_MAC_LB_DATA 0x050394 /* ***************************************************************************** */ #define RDR_L0S_EXIT_LAT 0x050398 /* ***************************************************************************** */ /* DMAC */ /* ***************************************************************************** */ #define DMA1_PTR1 0x100000 /* DMA Current Ptr : Ch#1 */ /* ***************************************************************************** */ #define DMA2_PTR1 0x100004 /* DMA Current Ptr : Ch#2 */ /* ***************************************************************************** */ #define DMA3_PTR1 0x100008 /* DMA Current Ptr : Ch#3 */ /* ***************************************************************************** */ #define DMA4_PTR1 0x10000C /* DMA Current Ptr : Ch#4 */ /* ***************************************************************************** */ #define DMA5_PTR1 0x100010 /* DMA Current Ptr : Ch#5 */ /* ***************************************************************************** */ #define DMA6_PTR1 0x100014 /* DMA Current Ptr : Ch#6 */ /* ***************************************************************************** */ #define DMA7_PTR1 0x100018 /* DMA Current Ptr : Ch#7 */ /* ***************************************************************************** */ #define DMA8_PTR1 0x10001C /* DMA Current Ptr : Ch#8 */ /* ***************************************************************************** */ #define DMA9_PTR1 0x100020 /* DMA Current Ptr : Ch#9 */ /* ***************************************************************************** */ #define DMA10_PTR1 0x100024 /* DMA Current Ptr : Ch#10 */ /* ***************************************************************************** */ #define DMA11_PTR1 0x100028 /* DMA Current Ptr : Ch#11 */ /* ***************************************************************************** */ #define DMA12_PTR1 0x10002C /* DMA Current Ptr : Ch#12 */ /* ***************************************************************************** */ #define DMA13_PTR1 0x100030 /* DMA Current Ptr : Ch#13 */ /* ***************************************************************************** */ #define DMA14_PTR1 0x100034 /* DMA Current Ptr : Ch#14 */ /* ***************************************************************************** */ #define DMA15_PTR1 0x100038 /* DMA Current Ptr : Ch#15 */ /* ***************************************************************************** */ #define DMA16_PTR1 0x10003C /* DMA Current Ptr : Ch#16 */ /* ***************************************************************************** */ #define DMA17_PTR1 0x100040 /* DMA Current Ptr : Ch#17 */ /* ***************************************************************************** */ #define DMA18_PTR1 0x100044 /* DMA Current Ptr : Ch#18 */ /* ***************************************************************************** */ #define DMA19_PTR1 0x100048 /* DMA Current Ptr : Ch#19 */ /* ***************************************************************************** */ #define DMA20_PTR1 0x10004C /* DMA Current Ptr : Ch#20 */ /* ***************************************************************************** */ #define DMA21_PTR1 0x100050 /* DMA Current Ptr : Ch#21 */ /* ***************************************************************************** */ #define DMA22_PTR1 0x100054 /* DMA Current Ptr : Ch#22 */ /* ***************************************************************************** */ #define DMA23_PTR1 0x100058 /* DMA Current Ptr : Ch#23 */ /* ***************************************************************************** */ #define DMA24_PTR1 0x10005C /* DMA Current Ptr : Ch#24 */ /* ***************************************************************************** */ #define DMA25_PTR1 0x100060 /* DMA Current Ptr : Ch#25 */ /* ***************************************************************************** */ #define DMA26_PTR1 0x100064 /* DMA Current Ptr : Ch#26 */ /* ***************************************************************************** */ #define DMA1_PTR2 0x100080 /* DMA Tab Ptr : Ch#1 */ /* ***************************************************************************** */ #define DMA2_PTR2 0x100084 /* DMA Tab Ptr : Ch#2 */ /* ***************************************************************************** */ #define DMA3_PTR2 0x100088 /* DMA Tab Ptr : Ch#3 */ /* ***************************************************************************** */ #define DMA4_PTR2 0x10008C /* DMA Tab Ptr : Ch#4 */ /* ***************************************************************************** */ #define DMA5_PTR2 0x100090 /* DMA Tab Ptr : Ch#5 */ /* ***************************************************************************** */ #define DMA6_PTR2 0x100094 /* DMA Tab Ptr : Ch#6 */ /* ***************************************************************************** */ #define DMA7_PTR2 0x100098 /* DMA Tab Ptr : Ch#7 */ /* ***************************************************************************** */ #define DMA8_PTR2 0x10009C /* DMA Tab Ptr : Ch#8 */ /* ***************************************************************************** */ #define DMA9_PTR2 0x1000A0 /* DMA Tab Ptr : Ch#9 */ /* ***************************************************************************** */ #define DMA10_PTR2 0x1000A4 /* DMA Tab Ptr : Ch#10 */ /* ***************************************************************************** */ #define DMA11_PTR2 0x1000A8 /* DMA Tab Ptr : Ch#11 */ /* ***************************************************************************** */ #define DMA12_PTR2 0x1000AC /* DMA Tab Ptr : Ch#12 */ /* ***************************************************************************** */ #define DMA13_PTR2 0x1000B0 /* DMA Tab Ptr : Ch#13 */ /* ***************************************************************************** */ #define DMA14_PTR2 0x1000B4 /* DMA Tab Ptr : Ch#14 */ /* ***************************************************************************** */ #define DMA15_PTR2 0x1000B8 /* DMA Tab Ptr : Ch#15 */ /* ***************************************************************************** */ #define DMA16_PTR2 0x1000BC /* DMA Tab Ptr : Ch#16 */ /* ***************************************************************************** */ #define DMA17_PTR2 0x1000C0 /* DMA Tab Ptr : Ch#17 */ /* ***************************************************************************** */ #define DMA18_PTR2 0x1000C4 /* DMA Tab Ptr : Ch#18 */ /* ***************************************************************************** */ #define DMA19_PTR2 0x1000C8 /* DMA Tab Ptr : Ch#19 */ /* ***************************************************************************** */ #define DMA20_PTR2 0x1000CC /* DMA Tab Ptr : Ch#20 */ /* ***************************************************************************** */ #define DMA21_PTR2 0x1000D0 /* DMA Tab Ptr : Ch#21 */ /* ***************************************************************************** */ #define DMA22_PTR2 0x1000D4 /* DMA Tab Ptr : Ch#22 */ /* ***************************************************************************** */ #define DMA23_PTR2 0x1000D8 /* DMA Tab Ptr : Ch#23 */ /* ***************************************************************************** */ #define DMA24_PTR2 0x1000DC /* DMA Tab Ptr : Ch#24 */ /* ***************************************************************************** */ #define DMA25_PTR2 0x1000E0 /* DMA Tab Ptr : Ch#25 */ /* ***************************************************************************** */ #define DMA26_PTR2 0x1000E4 /* DMA Tab Ptr : Ch#26 */ /* ***************************************************************************** */ #define DMA1_CNT1 0x100100 /* DMA BuFFer Size : Ch#1 */ /* ***************************************************************************** */ #define DMA2_CNT1 0x100104 /* DMA BuFFer Size : Ch#2 */ /* ***************************************************************************** */ #define DMA3_CNT1 0x100108 /* DMA BuFFer Size : Ch#3 */ /* ***************************************************************************** */ #define DMA4_CNT1 0x10010C /* DMA BuFFer Size : Ch#4 */ /* ***************************************************************************** */ #define DMA5_CNT1 0x100110 /* DMA BuFFer Size : Ch#5 */ /* ***************************************************************************** */ #define DMA6_CNT1 0x100114 /* DMA BuFFer Size : Ch#6 */ /* ***************************************************************************** */ #define DMA7_CNT1 0x100118 /* DMA BuFFer Size : Ch#7 */ /* ***************************************************************************** */ #define DMA8_CNT1 0x10011C /* DMA BuFFer Size : Ch#8 */ /* ***************************************************************************** */ #define DMA9_CNT1 0x100120 /* DMA BuFFer Size : Ch#9 */ /* ***************************************************************************** */ #define DMA10_CNT1 0x100124 /* DMA BuFFer Size : Ch#10 */ /* ***************************************************************************** */ #define DMA11_CNT1 0x100128 /* DMA BuFFer Size : Ch#11 */ /* ***************************************************************************** */ #define DMA12_CNT1 0x10012C /* DMA BuFFer Size : Ch#12 */ /* ***************************************************************************** */ #define DMA13_CNT1 0x100130 /* DMA BuFFer Size : Ch#13 */ /* ***************************************************************************** */ #define DMA14_CNT1 0x100134 /* DMA BuFFer Size : Ch#14 */ /* ***************************************************************************** */ #define DMA15_CNT1 0x100138 /* DMA BuFFer Size : Ch#15 */ /* ***************************************************************************** */ #define DMA16_CNT1 0x10013C /* DMA BuFFer Size : Ch#16 */ /* ***************************************************************************** */ #define DMA17_CNT1 0x100140 /* DMA BuFFer Size : Ch#17 */ /* ***************************************************************************** */ #define DMA18_CNT1 0x100144 /* DMA BuFFer Size : Ch#18 */ /* ***************************************************************************** */ #define DMA19_CNT1 0x100148 /* DMA BuFFer Size : Ch#19 */ /* ***************************************************************************** */ #define DMA20_CNT1 0x10014C /* DMA BuFFer Size : Ch#20 */ /* ***************************************************************************** */ #define DMA21_CNT1 0x100150 /* DMA BuFFer Size : Ch#21 */ /* ***************************************************************************** */ #define DMA22_CNT1 0x100154 /* DMA BuFFer Size : Ch#22 */ /* ***************************************************************************** */ #define DMA23_CNT1 0x100158 /* DMA BuFFer Size : Ch#23 */ /* ***************************************************************************** */ #define DMA24_CNT1 0x10015C /* DMA BuFFer Size : Ch#24 */ /* ***************************************************************************** */ #define DMA25_CNT1 0x100160 /* DMA BuFFer Size : Ch#25 */ /* ***************************************************************************** */ #define DMA26_CNT1 0x100164 /* DMA BuFFer Size : Ch#26 */ /* ***************************************************************************** */ #define DMA1_CNT2 0x100180 /* DMA Table Size : Ch#1 */ /* ***************************************************************************** */ #define DMA2_CNT2 0x100184 /* DMA Table Size : Ch#2 */ /* ***************************************************************************** */ #define DMA3_CNT2 0x100188 /* DMA Table Size : Ch#3 */ /* ***************************************************************************** */ #define DMA4_CNT2 0x10018C /* DMA Table Size : Ch#4 */ /* ***************************************************************************** */ #define DMA5_CNT2 0x100190 /* DMA Table Size : Ch#5 */ /* ***************************************************************************** */ #define DMA6_CNT2 0x100194 /* DMA Table Size : Ch#6 */ /* ***************************************************************************** */ #define DMA7_CNT2 0x100198 /* DMA Table Size : Ch#7 */ /* ***************************************************************************** */ #define DMA8_CNT2 0x10019C /* DMA Table Size : Ch#8 */ /* ***************************************************************************** */ #define DMA9_CNT2 0x1001A0 /* DMA Table Size : Ch#9 */ /* ***************************************************************************** */ #define DMA10_CNT2 0x1001A4 /* DMA Table Size : Ch#10 */ /* ***************************************************************************** */ #define DMA11_CNT2 0x1001A8 /* DMA Table Size : Ch#11 */ /* ***************************************************************************** */ #define DMA12_CNT2 0x1001AC /* DMA Table Size : Ch#12 */ /* ***************************************************************************** */ #define DMA13_CNT2 0x1001B0 /* DMA Table Size : Ch#13 */ /* ***************************************************************************** */ #define DMA14_CNT2 0x1001B4 /* DMA Table Size : Ch#14 */ /* ***************************************************************************** */ #define DMA15_CNT2 0x1001B8 /* DMA Table Size : Ch#15 */ /* ***************************************************************************** */ #define DMA16_CNT2 0x1001BC /* DMA Table Size : Ch#16 */ /* ***************************************************************************** */ #define DMA17_CNT2 0x1001C0 /* DMA Table Size : Ch#17 */ /* ***************************************************************************** */ #define DMA18_CNT2 0x1001C4 /* DMA Table Size : Ch#18 */ /* ***************************************************************************** */ #define DMA19_CNT2 0x1001C8 /* DMA Table Size : Ch#19 */ /* ***************************************************************************** */ #define DMA20_CNT2 0x1001CC /* DMA Table Size : Ch#20 */ /* ***************************************************************************** */ #define DMA21_CNT2 0x1001D0 /* DMA Table Size : Ch#21 */ /* ***************************************************************************** */ #define DMA22_CNT2 0x1001D4 /* DMA Table Size : Ch#22 */ /* ***************************************************************************** */ #define DMA23_CNT2 0x1001D8 /* DMA Table Size : Ch#23 */ /* ***************************************************************************** */ #define DMA24_CNT2 0x1001DC /* DMA Table Size : Ch#24 */ /* ***************************************************************************** */ #define DMA25_CNT2 0x1001E0 /* DMA Table Size : Ch#25 */ /* ***************************************************************************** */ #define DMA26_CNT2 0x1001E4 /* DMA Table Size : Ch#26 */ /* ***************************************************************************** */ /* ITG */ /* ***************************************************************************** */ #define TM_CNT_LDW 0x110000 /* Timer : Counter low */ /* ***************************************************************************** */ #define TM_CNT_UW 0x110004 /* Timer : Counter high word */ /* ***************************************************************************** */ #define TM_LMT_LDW 0x110008 /* Timer : Limit low */ /* ***************************************************************************** */ #define TM_LMT_UW 0x11000C /* Timer : Limit high word */ /* ***************************************************************************** */ #define GP0_IO 0x110010 /* GPIO output enables data I/O */ #define FLD_GP_OE 0x00FF0000 /* GPIO: GP_OE output enable */ #define FLD_GP_IN 0x0000FF00 /* GPIO: GP_IN status */ #define FLD_GP_OUT 0x000000FF /* GPIO: GP_OUT control */ /* ***************************************************************************** */ #define GPIO_ISM 0x110014 /* GPIO interrupt sensitivity mode */ #define FLD_GP_ISM_SNS 0x00000070 #define FLD_GP_ISM_POL 0x00000007 /* ***************************************************************************** */ #define SOFT_RESET 0x11001C /* Output system reset reg */ #define FLD_PECOS_SOFT_RESET 0x00000001 /* ***************************************************************************** */ #define MC416_RWD 0x110020 /* MC416 GPIO[18:3] pin */ #define MC416_OEN 0x110024 /* Output enable of GPIO[18:3] */ #define MC416_CTL 0x110028 /* ***************************************************************************** */ #define ALT_PIN_OUT_SEL 0x11002C /* Alternate GPIO output select */ #define FLD_ALT_GPIO_OUT_SEL 0xF0000000 /* 0 Disabled <-- default */ /* 1 GPIO[0] */ /* 2 GPIO[10] */ /* 3 VIP_656_DATA_VAL */ /* 4 VIP_656_DATA[0] */ /* 5 VIP_656_CLK */ /* 6 VIP_656_DATA_EXT[1] */ /* 7 VIP_656_DATA_EXT[0] */ /* 8 ATT_IF */ #define FLD_AUX_PLL_CLK_ALT_SEL 0x0F000000 /* 0 AUX_PLL_CLK<-- default */ /* 1 GPIO[2] */ /* 2 GPIO[10] */ /* 3 VIP_656_DATA_VAL */ /* 4 VIP_656_DATA[0] */ /* 5 VIP_656_CLK */ /* 6 VIP_656_DATA_EXT[1] */ /* 7 VIP_656_DATA_EXT[0] */ #define FLD_IR_TX_ALT_SEL 0x00F00000 /* 0 IR_TX <-- default */ /* 1 GPIO[1] */ /* 2 GPIO[10] */ /* 3 VIP_656_DATA_VAL */ /* 4 VIP_656_DATA[0] */ /* 5 VIP_656_CLK */ /* 6 VIP_656_DATA_EXT[1] */ /* 7 VIP_656_DATA_EXT[0] */ #define FLD_IR_RX_ALT_SEL 0x000F0000 /* 0 IR_RX <-- default */ /* 1 GPIO[0] */ /* 2 GPIO[10] */ /* 3 VIP_656_DATA_VAL */ /* 4 VIP_656_DATA[0] */ /* 5 VIP_656_CLK */ /* 6 VIP_656_DATA_EXT[1] */ /* 7 VIP_656_DATA_EXT[0] */ #define FLD_GPIO10_ALT_SEL 0x0000F000 /* 0 GPIO[10] <-- default */ /* 1 GPIO[0] */ /* 2 GPIO[10] */ /* 3 VIP_656_DATA_VAL */ /* 4 VIP_656_DATA[0] */ /* 5 VIP_656_CLK */ /* 6 VIP_656_DATA_EXT[1] */ /* 7 VIP_656_DATA_EXT[0] */ #define FLD_GPIO2_ALT_SEL 0x00000F00 /* 0 GPIO[2] <-- default */ /* 1 GPIO[1] */ /* 2 GPIO[10] */ /* 3 VIP_656_DATA_VAL */ /* 4 VIP_656_DATA[0] */ /* 5 VIP_656_CLK */ /* 6 VIP_656_DATA_EXT[1] */ /* 7 VIP_656_DATA_EXT[0] */ #define FLD_GPIO1_ALT_SEL 0x000000F0 /* 0 GPIO[1] <-- default */ /* 1 GPIO[0] */ /* 2 GPIO[10] */ /* 3 VIP_656_DATA_VAL */ /* 4 VIP_656_DATA[0] */ /* 5 VIP_656_CLK */ /* 6 VIP_656_DATA_EXT[1] */ /* 7 VIP_656_DATA_EXT[0] */ #define FLD_GPIO0_ALT_SEL 0x0000000F /* 0 GPIO[0] <-- default */ /* 1 GPIO[1] */ /* 2 GPIO[10] */ /* 3 VIP_656_DATA_VAL */ /* 4 VIP_656_DATA[0] */ /* 5 VIP_656_CLK */ /* 6 VIP_656_DATA_EXT[1] */ /* 7 VIP_656_DATA_EXT[0] */ #define ALT_PIN_IN_SEL 0x110030 /* Alternate GPIO input select */ #define FLD_GPIO10_ALT_IN_SEL 0x0000F000 /* 0 GPIO[10] <-- default */ /* 1 IR_RX */ /* 2 IR_TX */ /* 3 AUX_PLL_CLK */ /* 4 IF_ATT_SEL */ /* 5 GPIO[0] */ /* 6 GPIO[1] */ /* 7 GPIO[2] */ #define FLD_GPIO2_ALT_IN_SEL 0x00000F00 /* 0 GPIO[2] <-- default */ /* 1 IR_RX */ /* 2 IR_TX */ /* 3 AUX_PLL_CLK */ /* 4 IF_ATT_SEL */ #define FLD_GPIO1_ALT_IN_SEL 0x000000F0 /* 0 GPIO[1] <-- default */ /* 1 IR_RX */ /* 2 IR_TX */ /* 3 AUX_PLL_CLK */ /* 4 IF_ATT_SEL */ #define FLD_GPIO0_ALT_IN_SEL 0x0000000F /* 0 GPIO[0] <-- default */ /* 1 IR_RX */ /* 2 IR_TX */ /* 3 AUX_PLL_CLK */ /* 4 IF_ATT_SEL */ /* ***************************************************************************** */ #define TEST_BUS_CTL1 0x110040 /* Test bus control register #1 */ /* ***************************************************************************** */ #define TEST_BUS_CTL2 0x110044 /* Test bus control register #2 */ /* ***************************************************************************** */ #define CLK_DELAY 0x110048 /* Clock delay */ #define FLD_MOE_CLK_DIS 0x80000000 /* Disable MoE clock */ /* ***************************************************************************** */ #define PAD_CTRL 0x110068 /* Pad drive strength control */ /* ***************************************************************************** */ #define MBIST_CTRL 0x110050 /* SRAM memory built-in self test control */ /* ***************************************************************************** */ #define MBIST_STAT 0x110054 /* SRAM memory built-in self test status */ /* ***************************************************************************** */ /* PLL registers */ /* ***************************************************************************** */ #define PLL_A_INT_FRAC 0x110088 #define PLL_A_POST_STAT_BIST 0x11008C #define PLL_B_INT_FRAC 0x110090 #define PLL_B_POST_STAT_BIST 0x110094 #define PLL_C_INT_FRAC 0x110098 #define PLL_C_POST_STAT_BIST 0x11009C #define PLL_D_INT_FRAC 0x1100A0 #define PLL_D_POST_STAT_BIST 0x1100A4 #define CLK_RST 0x11002C #define FLD_VID_I_CLK_NOE 0x00001000 #define FLD_VID_J_CLK_NOE 0x00002000 #define FLD_USE_ALT_PLL_REF 0x00004000 #define VID_CH_MODE_SEL 0x110078 #define VID_CH_CLK_SEL 0x11007C /* ***************************************************************************** */ #define VBI_A_DMA 0x130008 /* VBI A DMA data port */ /* ***************************************************************************** */ #define VID_A_VIP_CTL 0x130080 /* Video A VIP format control */ #define FLD_VIP_MODE 0x00000001 /* ***************************************************************************** */ #define VID_A_PIXEL_FRMT 0x130084 /* Video A pixel format */ #define FLD_VID_A_GAMMA_DIS 0x00000008 #define FLD_VID_A_FORMAT 0x00000007 #define FLD_VID_A_GAMMA_FACTOR 0x00000010 /* ***************************************************************************** */ #define VID_A_VBI_CTL 0x130088 /* Video A VBI miscellaneous control */ #define FLD_VID_A_VIP_EXT 0x00000003 /* ***************************************************************************** */ #define VID_B_DMA 0x130100 /* Video B DMA data port */ /* ***************************************************************************** */ #define VBI_B_DMA 0x130108 /* VBI B DMA data port */ /* ***************************************************************************** */ #define VID_B_SRC_SEL 0x130144 /* Video B source select */ #define FLD_VID_B_SRC_SEL 0x00000000 /* ***************************************************************************** */ #define VID_B_LNGTH 0x130150 /* Video B line length */ #define FLD_VID_B_LN_LNGTH 0x00000FFF /* ***************************************************************************** */ #define VID_B_VIP_CTL 0x130180 /* Video B VIP format control */ /* ***************************************************************************** */ #define VID_B_PIXEL_FRMT 0x130184 /* Video B pixel format */ #define FLD_VID_B_GAMMA_DIS 0x00000008 #define FLD_VID_B_FORMAT 0x00000007 #define FLD_VID_B_GAMMA_FACTOR 0x00000010 /* ***************************************************************************** */ #define VID_C_DMA 0x130200 /* Video C DMA data port */ /* ***************************************************************************** */ #define VID_C_LNGTH 0x130250 /* Video C line length */ #define FLD_VID_C_LN_LNGTH 0x00000FFF /* ***************************************************************************** */ /* Video Destination Channels */ /* ***************************************************************************** */ #define VID_DST_A_GPCNT 0x130020 /* Video A general purpose counter */ #define VID_DST_B_GPCNT 0x130120 /* Video B general purpose counter */ #define VID_DST_C_GPCNT 0x130220 /* Video C general purpose counter */ #define VID_DST_D_GPCNT 0x130320 /* Video D general purpose counter */ #define VID_DST_E_GPCNT 0x130420 /* Video E general purpose counter */ #define VID_DST_F_GPCNT 0x130520 /* Video F general purpose counter */ #define VID_DST_G_GPCNT 0x130620 /* Video G general purpose counter */ #define VID_DST_H_GPCNT 0x130720 /* Video H general purpose counter */ /* ***************************************************************************** */ #define VID_DST_A_GPCNT_CTL 0x130030 /* Video A general purpose control */ #define VID_DST_B_GPCNT_CTL 0x130130 /* Video B general purpose control */ #define VID_DST_C_GPCNT_CTL 0x130230 /* Video C general purpose control */ #define VID_DST_D_GPCNT_CTL 0x130330 /* Video D general purpose control */ #define VID_DST_E_GPCNT_CTL 0x130430 /* Video E general purpose control */ #define VID_DST_F_GPCNT_CTL 0x130530 /* Video F general purpose control */ #define VID_DST_G_GPCNT_CTL 0x130630 /* Video G general purpose control */ #define VID_DST_H_GPCNT_CTL 0x130730 /* Video H general purpose control */ /* ***************************************************************************** */ #define VID_DST_A_DMA_CTL 0x130040 /* Video A DMA control */ #define VID_DST_B_DMA_CTL 0x130140 /* Video B DMA control */ #define VID_DST_C_DMA_CTL 0x130240 /* Video C DMA control */ #define VID_DST_D_DMA_CTL 0x130340 /* Video D DMA control */ #define VID_DST_E_DMA_CTL 0x130440 /* Video E DMA control */ #define VID_DST_F_DMA_CTL 0x130540 /* Video F DMA control */ #define VID_DST_G_DMA_CTL 0x130640 /* Video G DMA control */ #define VID_DST_H_DMA_CTL 0x130740 /* Video H DMA control */ #define FLD_VID_RISC_EN 0x00000010 #define FLD_VID_FIFO_EN 0x00000001 /* ***************************************************************************** */ #define VID_DST_A_VIP_CTL 0x130080 /* Video A VIP control */ #define VID_DST_B_VIP_CTL 0x130180 /* Video B VIP control */ #define VID_DST_C_VIP_CTL 0x130280 /* Video C VIP control */ #define VID_DST_D_VIP_CTL 0x130380 /* Video D VIP control */ #define VID_DST_E_VIP_CTL 0x130480 /* Video E VIP control */ #define VID_DST_F_VIP_CTL 0x130580 /* Video F VIP control */ #define VID_DST_G_VIP_CTL 0x130680 /* Video G VIP control */ #define VID_DST_H_VIP_CTL 0x130780 /* Video H VIP control */ /* ***************************************************************************** */ #define VID_DST_A_PIX_FRMT 0x130084 /* Video A Pixel format */ #define VID_DST_B_PIX_FRMT 0x130184 /* Video B Pixel format */ #define VID_DST_C_PIX_FRMT 0x130284 /* Video C Pixel format */ #define VID_DST_D_PIX_FRMT 0x130384 /* Video D Pixel format */ #define VID_DST_E_PIX_FRMT 0x130484 /* Video E Pixel format */ #define VID_DST_F_PIX_FRMT 0x130584 /* Video F Pixel format */ #define VID_DST_G_PIX_FRMT 0x130684 /* Video G Pixel format */ #define VID_DST_H_PIX_FRMT 0x130784 /* Video H Pixel format */ /* ***************************************************************************** */ /* Video Source Channels */ /* ***************************************************************************** */ #define VID_SRC_A_GPCNT_CTL 0x130804 /* Video A general purpose control */ #define VID_SRC_B_GPCNT_CTL 0x130904 /* Video B general purpose control */ #define VID_SRC_C_GPCNT_CTL 0x130A04 /* Video C general purpose control */ #define VID_SRC_D_GPCNT_CTL 0x130B04 /* Video D general purpose control */ #define VID_SRC_E_GPCNT_CTL 0x130C04 /* Video E general purpose control */ #define VID_SRC_F_GPCNT_CTL 0x130D04 /* Video F general purpose control */ #define VID_SRC_I_GPCNT_CTL 0x130E04 /* Video I general purpose control */ #define VID_SRC_J_GPCNT_CTL 0x130F04 /* Video J general purpose control */ /* ***************************************************************************** */ #define VID_SRC_A_GPCNT 0x130808 /* Video A general purpose counter */ #define VID_SRC_B_GPCNT 0x130908 /* Video B general purpose counter */ #define VID_SRC_C_GPCNT 0x130A08 /* Video C general purpose counter */ #define VID_SRC_D_GPCNT 0x130B08 /* Video D general purpose counter */ #define VID_SRC_E_GPCNT 0x130C08 /* Video E general purpose counter */ #define VID_SRC_F_GPCNT 0x130D08 /* Video F general purpose counter */ #define VID_SRC_I_GPCNT 0x130E08 /* Video I general purpose counter */ #define VID_SRC_J_GPCNT 0x130F08 /* Video J general purpose counter */ /* ***************************************************************************** */ #define VID_SRC_A_DMA_CTL 0x13080C /* Video A DMA control */ #define VID_SRC_B_DMA_CTL 0x13090C /* Video B DMA control */ #define VID_SRC_C_DMA_CTL 0x130A0C /* Video C DMA control */ #define VID_SRC_D_DMA_CTL 0x130B0C /* Video D DMA control */ #define VID_SRC_E_DMA_CTL 0x130C0C /* Video E DMA control */ #define VID_SRC_F_DMA_CTL 0x130D0C /* Video F DMA control */ #define VID_SRC_I_DMA_CTL 0x130E0C /* Video I DMA control */ #define VID_SRC_J_DMA_CTL 0x130F0C /* Video J DMA control */ #define FLD_APB_RISC_EN 0x00000010 #define FLD_APB_FIFO_EN 0x00000001 /* ***************************************************************************** */ #define VID_SRC_A_FMT_CTL 0x130810 /* Video A format control */ #define VID_SRC_B_FMT_CTL 0x130910 /* Video B format control */ #define VID_SRC_C_FMT_CTL 0x130A10 /* Video C format control */ #define VID_SRC_D_FMT_CTL 0x130B10 /* Video D format control */ #define VID_SRC_E_FMT_CTL 0x130C10 /* Video E format control */ #define VID_SRC_F_FMT_CTL 0x130D10 /* Video F format control */ #define VID_SRC_I_FMT_CTL 0x130E10 /* Video I format control */ #define VID_SRC_J_FMT_CTL 0x130F10 /* Video J format control */ /* ***************************************************************************** */ #define VID_SRC_A_ACTIVE_CTL1 0x130814 /* Video A active control 1 */ #define VID_SRC_B_ACTIVE_CTL1 0x130914 /* Video B active control 1 */ #define VID_SRC_C_ACTIVE_CTL1 0x130A14 /* Video C active control 1 */ #define VID_SRC_D_ACTIVE_CTL1 0x130B14 /* Video D active control 1 */ #define VID_SRC_E_ACTIVE_CTL1 0x130C14 /* Video E active control 1 */ #define VID_SRC_F_ACTIVE_CTL1 0x130D14 /* Video F active control 1 */ #define VID_SRC_I_ACTIVE_CTL1 0x130E14 /* Video I active control 1 */ #define VID_SRC_J_ACTIVE_CTL1 0x130F14 /* Video J active control 1 */ /* ***************************************************************************** */ #define VID_SRC_A_ACTIVE_CTL2 0x130818 /* Video A active control 2 */ #define VID_SRC_B_ACTIVE_CTL2 0x130918 /* Video B active control 2 */ #define VID_SRC_C_ACTIVE_CTL2 0x130A18 /* Video C active control 2 */ #define VID_SRC_D_ACTIVE_CTL2 0x130B18 /* Video D active control 2 */ #define VID_SRC_E_ACTIVE_CTL2 0x130C18 /* Video E active control 2 */ #define VID_SRC_F_ACTIVE_CTL2 0x130D18 /* Video F active control 2 */ #define VID_SRC_I_ACTIVE_CTL2 0x130E18 /* Video I active control 2 */ #define VID_SRC_J_ACTIVE_CTL2 0x130F18 /* Video J active control 2 */ /* ***************************************************************************** */ #define VID_SRC_A_CDT_SZ 0x13081C /* Video A CDT size */ #define VID_SRC_B_CDT_SZ 0x13091C /* Video B CDT size */ #define VID_SRC_C_CDT_SZ 0x130A1C /* Video C CDT size */ #define VID_SRC_D_CDT_SZ 0x130B1C /* Video D CDT size */ #define VID_SRC_E_CDT_SZ 0x130C1C /* Video E CDT size */ #define VID_SRC_F_CDT_SZ 0x130D1C /* Video F CDT size */ #define VID_SRC_I_CDT_SZ 0x130E1C /* Video I CDT size */ #define VID_SRC_J_CDT_SZ 0x130F1C /* Video J CDT size */ /* ***************************************************************************** */ /* Audio I/F */ /* ***************************************************************************** */ #define AUD_DST_A_DMA 0x140000 /* Audio Int A DMA data port */ #define AUD_SRC_A_DMA 0x140008 /* Audio Int A DMA data port */ #define AUD_A_GPCNT 0x140010 /* Audio Int A gp counter */ #define FLD_AUD_A_GP_CNT 0x0000FFFF #define AUD_A_GPCNT_CTL 0x140014 /* Audio Int A gp control */ #define AUD_A_LNGTH 0x140018 /* Audio Int A line length */ #define AUD_A_CFG 0x14001C /* Audio Int A configuration */ /* ***************************************************************************** */ #define AUD_DST_B_DMA 0x140100 /* Audio Int B DMA data port */ #define AUD_SRC_B_DMA 0x140108 /* Audio Int B DMA data port */ #define AUD_B_GPCNT 0x140110 /* Audio Int B gp counter */ #define FLD_AUD_B_GP_CNT 0x0000FFFF #define AUD_B_GPCNT_CTL 0x140114 /* Audio Int B gp control */ #define AUD_B_LNGTH 0x140118 /* Audio Int B line length */ #define AUD_B_CFG 0x14011C /* Audio Int B configuration */ /* ***************************************************************************** */ #define AUD_DST_C_DMA 0x140200 /* Audio Int C DMA data port */ #define AUD_SRC_C_DMA 0x140208 /* Audio Int C DMA data port */ #define AUD_C_GPCNT 0x140210 /* Audio Int C gp counter */ #define FLD_AUD_C_GP_CNT 0x0000FFFF #define AUD_C_GPCNT_CTL 0x140214 /* Audio Int C gp control */ #define AUD_C_LNGTH 0x140218 /* Audio Int C line length */ #define AUD_C_CFG 0x14021C /* Audio Int C configuration */ /* ***************************************************************************** */ #define AUD_DST_D_DMA 0x140300 /* Audio Int D DMA data port */ #define AUD_SRC_D_DMA 0x140308 /* Audio Int D DMA data port */ #define AUD_D_GPCNT 0x140310 /* Audio Int D gp counter */ #define FLD_AUD_D_GP_CNT 0x0000FFFF #define AUD_D_GPCNT_CTL 0x140314 /* Audio Int D gp control */ #define AUD_D_LNGTH 0x140318 /* Audio Int D line length */ #define AUD_D_CFG 0x14031C /* Audio Int D configuration */ /* ***************************************************************************** */ #define AUD_SRC_E_DMA 0x140400 /* Audio Int E DMA data port */ #define AUD_E_GPCNT 0x140410 /* Audio Int E gp counter */ #define FLD_AUD_E_GP_CNT 0x0000FFFF #define AUD_E_GPCNT_CTL 0x140414 /* Audio Int E gp control */ #define AUD_E_CFG 0x14041C /* Audio Int E configuration */ /* ***************************************************************************** */ #define FLD_AUD_DST_LN_LNGTH 0x00000FFF #define FLD_AUD_DST_PK_MODE 0x00004000 #define FLD_AUD_CLK_ENABLE 0x00000200 #define FLD_AUD_MASTER_MODE 0x00000002 #define FLD_AUD_SONY_MODE 0x00000001 #define FLD_AUD_CLK_SELECT_PLL_D 0x00001800 #define FLD_AUD_DST_ENABLE 0x00020000 #define FLD_AUD_SRC_ENABLE 0x00010000 /* ***************************************************************************** */ #define AUD_INT_DMA_CTL 0x140500 /* Audio Int DMA control */ #define FLD_AUD_SRC_E_RISC_EN 0x00008000 #define FLD_AUD_SRC_C_RISC_EN 0x00004000 #define FLD_AUD_SRC_B_RISC_EN 0x00002000 #define FLD_AUD_SRC_A_RISC_EN 0x00001000 #define FLD_AUD_DST_D_RISC_EN 0x00000800 #define FLD_AUD_DST_C_RISC_EN 0x00000400 #define FLD_AUD_DST_B_RISC_EN 0x00000200 #define FLD_AUD_DST_A_RISC_EN 0x00000100 #define FLD_AUD_SRC_E_FIFO_EN 0x00000080 #define FLD_AUD_SRC_C_FIFO_EN 0x00000040 #define FLD_AUD_SRC_B_FIFO_EN 0x00000020 #define FLD_AUD_SRC_A_FIFO_EN 0x00000010 #define FLD_AUD_DST_D_FIFO_EN 0x00000008 #define FLD_AUD_DST_C_FIFO_EN 0x00000004 #define FLD_AUD_DST_B_FIFO_EN 0x00000002 #define FLD_AUD_DST_A_FIFO_EN 0x00000001 /* ***************************************************************************** */ /* */ /* Mobilygen Interface Registers */ /* */ /* ***************************************************************************** */ /* Mobilygen Interface A */ /* ***************************************************************************** */ #define MB_IF_A_DMA 0x150000 /* MBIF A DMA data port */ #define MB_IF_A_GPCN 0x150008 /* MBIF A GP counter */ #define MB_IF_A_GPCN_CTRL 0x15000C #define MB_IF_A_DMA_CTRL 0x150010 #define MB_IF_A_LENGTH 0x150014 #define MB_IF_A_HDMA_XFER_SZ 0x150018 #define MB_IF_A_HCMD 0x15001C #define MB_IF_A_HCONFIG 0x150020 #define MB_IF_A_DATA_STRUCT_0 0x150024 #define MB_IF_A_DATA_STRUCT_1 0x150028 #define MB_IF_A_DATA_STRUCT_2 0x15002C #define MB_IF_A_DATA_STRUCT_3 0x150030 #define MB_IF_A_DATA_STRUCT_4 0x150034 #define MB_IF_A_DATA_STRUCT_5 0x150038 #define MB_IF_A_DATA_STRUCT_6 0x15003C #define MB_IF_A_DATA_STRUCT_7 0x150040 #define MB_IF_A_DATA_STRUCT_8 0x150044 #define MB_IF_A_DATA_STRUCT_9 0x150048 #define MB_IF_A_DATA_STRUCT_A 0x15004C #define MB_IF_A_DATA_STRUCT_B 0x150050 #define MB_IF_A_DATA_STRUCT_C 0x150054 #define MB_IF_A_DATA_STRUCT_D 0x150058 #define MB_IF_A_DATA_STRUCT_E 0x15005C #define MB_IF_A_DATA_STRUCT_F 0x150060 /* ***************************************************************************** */ /* Mobilygen Interface B */ /* ***************************************************************************** */ #define MB_IF_B_DMA 0x160000 /* MBIF A DMA data port */ #define MB_IF_B_GPCN 0x160008 /* MBIF A GP counter */ #define MB_IF_B_GPCN_CTRL 0x16000C #define MB_IF_B_DMA_CTRL 0x160010 #define MB_IF_B_LENGTH 0x160014 #define MB_IF_B_HDMA_XFER_SZ 0x160018 #define MB_IF_B_HCMD 0x16001C #define MB_IF_B_HCONFIG 0x160020 #define MB_IF_B_DATA_STRUCT_0 0x160024 #define MB_IF_B_DATA_STRUCT_1 0x160028 #define MB_IF_B_DATA_STRUCT_2 0x16002C #define MB_IF_B_DATA_STRUCT_3 0x160030 #define MB_IF_B_DATA_STRUCT_4 0x160034 #define MB_IF_B_DATA_STRUCT_5 0x160038 #define MB_IF_B_DATA_STRUCT_6 0x16003C #define MB_IF_B_DATA_STRUCT_7 0x160040 #define MB_IF_B_DATA_STRUCT_8 0x160044 #define MB_IF_B_DATA_STRUCT_9 0x160048 #define MB_IF_B_DATA_STRUCT_A 0x16004C #define MB_IF_B_DATA_STRUCT_B 0x160050 #define MB_IF_B_DATA_STRUCT_C 0x160054 #define MB_IF_B_DATA_STRUCT_D 0x160058 #define MB_IF_B_DATA_STRUCT_E 0x16005C #define MB_IF_B_DATA_STRUCT_F 0x160060 /* MB_DMA_CTRL */ #define FLD_MB_IF_RISC_EN 0x00000010 #define FLD_MB_IF_FIFO_EN 0x00000001 /* MB_LENGTH */ #define FLD_MB_IF_LN_LNGTH 0x00000FFF /* MB_HCMD register */ #define FLD_MB_HCMD_H_GO 0x80000000 #define FLD_MB_HCMD_H_BUSY 0x40000000 #define FLD_MB_HCMD_H_DMA_HOLD 0x10000000 #define FLD_MB_HCMD_H_DMA_BUSY 0x08000000 #define FLD_MB_HCMD_H_DMA_TYPE 0x04000000 #define FLD_MB_HCMD_H_DMA_XACT 0x02000000 #define FLD_MB_HCMD_H_RW_N 0x01000000 #define FLD_MB_HCMD_H_ADDR 0x00FF0000 #define FLD_MB_HCMD_H_DATA 0x0000FFFF /* ***************************************************************************** */ /* I2C #1 */ /* ***************************************************************************** */ #define I2C1_ADDR 0x180000 /* I2C #1 address */ #define FLD_I2C_DADDR 0xfe000000 /* RW [31:25] I2C Device Address */ /* RO [24] reserved */ /* ***************************************************************************** */ #define FLD_I2C_SADDR 0x00FFFFFF /* RW [23:0] I2C Sub-address */ /* ***************************************************************************** */ #define I2C1_WDATA 0x180004 /* I2C #1 write data */ #define FLD_I2C_WDATA 0xFFFFFFFF /* RW [31:0] */ /* ***************************************************************************** */ #define I2C1_CTRL 0x180008 /* I2C #1 control */ #define FLD_I2C_PERIOD 0xFF000000 /* RW [31:24] */ #define FLD_I2C_SCL_IN 0x00200000 /* RW [21] */ #define FLD_I2C_SDA_IN 0x00100000 /* RW [20] */ /* RO [19:18] reserved */ #define FLD_I2C_SCL_OUT 0x00020000 /* RW [17] */ #define FLD_I2C_SDA_OUT 0x00010000 /* RW [16] */ /* RO [15] reserved */ #define FLD_I2C_DATA_LEN 0x00007000 /* RW [14:12] */ #define FLD_I2C_SADDR_INC 0x00000800 /* RW [11] */ /* RO [10:9] reserved */ #define FLD_I2C_SADDR_LEN 0x00000300 /* RW [9:8] */ /* RO [7:6] reserved */ #define FLD_I2C_SOFT 0x00000020 /* RW [5] */ #define FLD_I2C_NOSTOP 0x00000010 /* RW [4] */ #define FLD_I2C_EXTEND 0x00000008 /* RW [3] */ #define FLD_I2C_SYNC 0x00000004 /* RW [2] */ #define FLD_I2C_READ_SA 0x00000002 /* RW [1] */ #define FLD_I2C_READ_WRN 0x00000001 /* RW [0] */ /* ***************************************************************************** */ #define I2C1_RDATA 0x18000C /* I2C #1 read data */ #define FLD_I2C_RDATA 0xFFFFFFFF /* RO [31:0] */ /* ***************************************************************************** */ #define I2C1_STAT 0x180010 /* I2C #1 status */ #define FLD_I2C_XFER_IN_PROG 0x00000002 /* RO [1] */ #define FLD_I2C_RACK 0x00000001 /* RO [0] */ /* ***************************************************************************** */ /* I2C #2 */ /* ***************************************************************************** */ #define I2C2_ADDR 0x190000 /* I2C #2 address */ /* ***************************************************************************** */ #define I2C2_WDATA 0x190004 /* I2C #2 write data */ /* ***************************************************************************** */ #define I2C2_CTRL 0x190008 /* I2C #2 control */ /* ***************************************************************************** */ #define I2C2_RDATA 0x19000C /* I2C #2 read data */ /* ***************************************************************************** */ #define I2C2_STAT 0x190010 /* I2C #2 status */ /* ***************************************************************************** */ /* I2C #3 */ /* ***************************************************************************** */ #define I2C3_ADDR 0x1A0000 /* I2C #3 address */ /* ***************************************************************************** */ #define I2C3_WDATA 0x1A0004 /* I2C #3 write data */ /* ***************************************************************************** */ #define I2C3_CTRL 0x1A0008 /* I2C #3 control */ /* ***************************************************************************** */ #define I2C3_RDATA 0x1A000C /* I2C #3 read data */ /* ***************************************************************************** */ #define I2C3_STAT 0x1A0010 /* I2C #3 status */ /* ***************************************************************************** */ /* UART */ /* ***************************************************************************** */ #define UART_CTL 0x1B0000 /* UART Control Register */ #define FLD_LOOP_BACK_EN (1 << 7) /* RW field - default 0 */ #define FLD_RX_TRG_SZ (3 << 2) /* RW field - default 0 */ #define FLD_RX_EN (1 << 1) /* RW field - default 0 */ #define FLD_TX_EN (1 << 0) /* RW field - default 0 */ /* ***************************************************************************** */ #define UART_BRD 0x1B0004 /* UART Baud Rate Divisor */ #define FLD_BRD 0x0000FFFF /* RW field - default 0x197 */ /* ***************************************************************************** */ #define UART_DBUF 0x1B0008 /* UART Tx/Rx Data BuFFer */ #define FLD_DB 0xFFFFFFFF /* RW field - default 0 */ /* ***************************************************************************** */ #define UART_ISR 0x1B000C /* UART Interrupt Status */ #define FLD_RXD_TIMEOUT_EN (1 << 7) /* RW field - default 0 */ #define FLD_FRM_ERR_EN (1 << 6) /* RW field - default 0 */ #define FLD_RXD_RDY_EN (1 << 5) /* RW field - default 0 */ #define FLD_TXD_EMPTY_EN (1 << 4) /* RW field - default 0 */ #define FLD_RXD_OVERFLOW (1 << 3) /* RW field - default 0 */ #define FLD_FRM_ERR (1 << 2) /* RW field - default 0 */ #define FLD_RXD_RDY (1 << 1) /* RW field - default 0 */ #define FLD_TXD_EMPTY (1 << 0) /* RW field - default 0 */ /* ***************************************************************************** */ #define UART_CNT 0x1B0010 /* UART Tx/Rx FIFO Byte Count */ #define FLD_TXD_CNT (0x1F << 8) /* RW field - default 0 */ #define FLD_RXD_CNT (0x1F << 0) /* RW field - default 0 */ /* ***************************************************************************** */ /* Motion Detection */ #define MD_CH0_GRID_BLOCK_YCNT 0x170014 #define MD_CH1_GRID_BLOCK_YCNT 0x170094 #define MD_CH2_GRID_BLOCK_YCNT 0x170114 #define MD_CH3_GRID_BLOCK_YCNT 0x170194 #define MD_CH4_GRID_BLOCK_YCNT 0x170214 #define MD_CH5_GRID_BLOCK_YCNT 0x170294 #define MD_CH6_GRID_BLOCK_YCNT 0x170314 #define MD_CH7_GRID_BLOCK_YCNT 0x170394 #define PIXEL_FRMT_422 4 #define PIXEL_FRMT_411 5 #define PIXEL_FRMT_Y8 6 #define PIXEL_ENGINE_VIP1 0 #define PIXEL_ENGINE_VIP2 1 #endif /* Athena_REGISTERS */
/* * Copyright 2017 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * */ #ifndef __AMDGPU_DISPLAY_H__ #define __AMDGPU_DISPLAY_H__ #define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc)) #define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l)) #define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e)) #define amdgpu_display_hpd_sense(adev, h) (adev)->mode_info.funcs->hpd_sense((adev), (h)) #define amdgpu_display_hpd_set_polarity(adev, h) (adev)->mode_info.funcs->hpd_set_polarity((adev), (h)) #define amdgpu_display_hpd_get_gpio_reg(adev) (adev)->mode_info.funcs->hpd_get_gpio_reg((adev)) #define amdgpu_display_bandwidth_update(adev) (adev)->mode_info.funcs->bandwidth_update((adev)) #define amdgpu_display_page_flip(adev, crtc, base, async) (adev)->mode_info.funcs->page_flip((adev), (crtc), (base), (async)) #define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos)) #define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c)) #define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r)) void amdgpu_display_hotplug_work_func(struct work_struct *work); void amdgpu_display_update_priority(struct amdgpu_device *adev); uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev, uint64_t bo_flags); struct drm_framebuffer * amdgpu_display_user_framebuffer_create(struct drm_device *dev, struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd); const struct drm_format_info * amdgpu_lookup_format_info(u32 format, uint64_t modifier); int amdgpu_display_suspend_helper(struct amdgpu_device *adev); int amdgpu_display_resume_helper(struct amdgpu_device *adev); #endif
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2022 MediaTek Inc. * Author: Garmin Chang <[email protected]> */ #include <linux/clk-provider.h> #include <linux/mod_devicetable.h> #include <linux/platform_device.h> #include <dt-bindings/clock/mediatek,mt8188-clk.h> #include "clk-gate.h" #include "clk-mtk.h" static const struct mtk_gate_regs venc1_cg_regs = { .set_ofs = 0x4, .clr_ofs = 0x8, .sta_ofs = 0x0, }; #define GATE_VENC1(_id, _name, _parent, _shift) \ GATE_MTK(_id, _name, _parent, &venc1_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv) static const struct mtk_gate venc1_clks[] = { GATE_VENC1(CLK_VENC1_LARB, "venc1_larb", "top_venc", 0), GATE_VENC1(CLK_VENC1_VENC, "venc1_venc", "top_venc", 4), GATE_VENC1(CLK_VENC1_JPGENC, "venc1_jpgenc", "top_venc", 8), GATE_VENC1(CLK_VENC1_JPGDEC, "venc1_jpgdec", "top_venc", 12), GATE_VENC1(CLK_VENC1_JPGDEC_C1, "venc1_jpgdec_c1", "top_venc", 16), GATE_VENC1(CLK_VENC1_GALS, "venc1_gals", "top_venc", 28), GATE_VENC1(CLK_VENC1_GALS_SRAM, "venc1_gals_sram", "top_venc", 31), }; static const struct mtk_clk_desc venc1_desc = { .clks = venc1_clks, .num_clks = ARRAY_SIZE(venc1_clks), }; static const struct of_device_id of_match_clk_mt8188_venc1[] = { { .compatible = "mediatek,mt8188-vencsys", .data = &venc1_desc }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_venc1); static struct platform_driver clk_mt8188_venc1_drv = { .probe = mtk_clk_simple_probe, .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8188-venc1", .of_match_table = of_match_clk_mt8188_venc1, }, }; module_platform_driver(clk_mt8188_venc1_drv); MODULE_DESCRIPTION("MediaTek MT8188 Video Encoders clocks driver"); MODULE_LICENSE("GPL");
/* SPDX-License-Identifier: GPL-2.0 */ /* * c8sectpfe-debugfs.h - C8SECTPFE STi DVB driver debugfs header * * Copyright (c) STMicroelectronics 2015 * * Authors: Peter Griffin <[email protected]> */ #ifndef __C8SECTPFE_DEBUG_H #define __C8SECTPFE_DEBUG_H #include "c8sectpfe-core.h" #if defined(CONFIG_DEBUG_FS) void c8sectpfe_debugfs_init(struct c8sectpfei *); void c8sectpfe_debugfs_exit(struct c8sectpfei *); #else static inline void c8sectpfe_debugfs_init(struct c8sectpfei *fei) {}; static inline void c8sectpfe_debugfs_exit(struct c8sectpfei *fei) {}; #endif #endif /* __C8SECTPFE_DEBUG_H */
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/probes/kprobes/actions-thumb.c * * Copyright (C) 2011 Jon Medhurst <[email protected]>. */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/ptrace.h> #include <linux/kprobes.h> #include "../decode-thumb.h" #include "core.h" #include "checkers.h" /* These emulation encodings are functionally equivalent... */ #define t32_emulate_rd8rn16rm0ra12_noflags \ t32_emulate_rdlo12rdhi8rn16rm0_noflags /* t32 thumb actions */ static void __kprobes t32_simulate_table_branch(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { unsigned long pc = regs->ARM_pc; int rn = (insn >> 16) & 0xf; int rm = insn & 0xf; unsigned long rnv = (rn == 15) ? pc : regs->uregs[rn]; unsigned long rmv = regs->uregs[rm]; unsigned int halfwords; if (insn & 0x10) /* TBH */ halfwords = ((u16 *)rnv)[rmv]; else /* TBB */ halfwords = ((u8 *)rnv)[rmv]; regs->ARM_pc = pc + 2 * halfwords; } static void __kprobes t32_simulate_mrs(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { int rd = (insn >> 8) & 0xf; unsigned long mask = 0xf8ff03df; /* Mask out execution state */ regs->uregs[rd] = regs->ARM_cpsr & mask; } static void __kprobes t32_simulate_cond_branch(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { unsigned long pc = regs->ARM_pc; long offset = insn & 0x7ff; /* imm11 */ offset += (insn & 0x003f0000) >> 5; /* imm6 */ offset += (insn & 0x00002000) << 4; /* J1 */ offset += (insn & 0x00000800) << 7; /* J2 */ offset -= (insn & 0x04000000) >> 7; /* Apply sign bit */ regs->ARM_pc = pc + (offset * 2); } static enum probes_insn __kprobes t32_decode_cond_branch(probes_opcode_t insn, struct arch_probes_insn *asi, const struct decode_header *d) { int cc = (insn >> 22) & 0xf; asi->insn_check_cc = probes_condition_checks[cc]; asi->insn_handler = t32_simulate_cond_branch; return INSN_GOOD_NO_SLOT; } static void __kprobes t32_simulate_branch(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { unsigned long pc = regs->ARM_pc; long offset = insn & 0x7ff; /* imm11 */ offset += (insn & 0x03ff0000) >> 5; /* imm10 */ offset += (insn & 0x00002000) << 9; /* J1 */ offset += (insn & 0x00000800) << 10; /* J2 */ if (insn & 0x04000000) offset -= 0x00800000; /* Apply sign bit */ else offset ^= 0x00600000; /* Invert J1 and J2 */ if (insn & (1 << 14)) { /* BL or BLX */ regs->ARM_lr = regs->ARM_pc | 1; if (!(insn & (1 << 12))) { /* BLX so switch to ARM mode */ regs->ARM_cpsr &= ~PSR_T_BIT; pc &= ~3; } } regs->ARM_pc = pc + (offset * 2); } static void __kprobes t32_simulate_ldr_literal(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { unsigned long addr = regs->ARM_pc & ~3; int rt = (insn >> 12) & 0xf; unsigned long rtv; long offset = insn & 0xfff; if (insn & 0x00800000) addr += offset; else addr -= offset; if (insn & 0x00400000) { /* LDR */ rtv = *(unsigned long *)addr; if (rt == 15) { bx_write_pc(rtv, regs); return; } } else if (insn & 0x00200000) { /* LDRH */ if (insn & 0x01000000) rtv = *(s16 *)addr; else rtv = *(u16 *)addr; } else { /* LDRB */ if (insn & 0x01000000) rtv = *(s8 *)addr; else rtv = *(u8 *)addr; } regs->uregs[rt] = rtv; } static enum probes_insn __kprobes t32_decode_ldmstm(probes_opcode_t insn, struct arch_probes_insn *asi, const struct decode_header *d) { enum probes_insn ret = kprobe_decode_ldmstm(insn, asi, d); /* Fixup modified instruction to have halfwords in correct order...*/ insn = __mem_to_opcode_arm(asi->insn[0]); ((u16 *)asi->insn)[0] = __opcode_to_mem_thumb16(insn >> 16); ((u16 *)asi->insn)[1] = __opcode_to_mem_thumb16(insn & 0xffff); return ret; } static void __kprobes t32_emulate_ldrdstrd(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { unsigned long pc = regs->ARM_pc & ~3; int rt1 = (insn >> 12) & 0xf; int rt2 = (insn >> 8) & 0xf; int rn = (insn >> 16) & 0xf; register unsigned long rt1v asm("r0") = regs->uregs[rt1]; register unsigned long rt2v asm("r1") = regs->uregs[rt2]; register unsigned long rnv asm("r2") = (rn == 15) ? pc : regs->uregs[rn]; __asm__ __volatile__ ( "blx %[fn]" : "=r" (rt1v), "=r" (rt2v), "=r" (rnv) : "0" (rt1v), "1" (rt2v), "2" (rnv), [fn] "r" (asi->insn_fn) : "lr", "memory", "cc" ); if (rn != 15) regs->uregs[rn] = rnv; /* Writeback base register */ regs->uregs[rt1] = rt1v; regs->uregs[rt2] = rt2v; } static void __kprobes t32_emulate_ldrstr(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { int rt = (insn >> 12) & 0xf; int rn = (insn >> 16) & 0xf; int rm = insn & 0xf; register unsigned long rtv asm("r0") = regs->uregs[rt]; register unsigned long rnv asm("r2") = regs->uregs[rn]; register unsigned long rmv asm("r3") = regs->uregs[rm]; __asm__ __volatile__ ( "blx %[fn]" : "=r" (rtv), "=r" (rnv) : "0" (rtv), "1" (rnv), "r" (rmv), [fn] "r" (asi->insn_fn) : "lr", "memory", "cc" ); regs->uregs[rn] = rnv; /* Writeback base register */ if (rt == 15) /* Can't be true for a STR as they aren't allowed */ bx_write_pc(rtv, regs); else regs->uregs[rt] = rtv; } static void __kprobes t32_emulate_rd8rn16rm0_rwflags(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { int rd = (insn >> 8) & 0xf; int rn = (insn >> 16) & 0xf; int rm = insn & 0xf; register unsigned long rdv asm("r1") = regs->uregs[rd]; register unsigned long rnv asm("r2") = regs->uregs[rn]; register unsigned long rmv asm("r3") = regs->uregs[rm]; unsigned long cpsr = regs->ARM_cpsr; __asm__ __volatile__ ( "msr cpsr_fs, %[cpsr] \n\t" "blx %[fn] \n\t" "mrs %[cpsr], cpsr \n\t" : "=r" (rdv), [cpsr] "=r" (cpsr) : "0" (rdv), "r" (rnv), "r" (rmv), "1" (cpsr), [fn] "r" (asi->insn_fn) : "lr", "memory", "cc" ); regs->uregs[rd] = rdv; regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK); } static void __kprobes t32_emulate_rd8pc16_noflags(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { unsigned long pc = regs->ARM_pc; int rd = (insn >> 8) & 0xf; register unsigned long rdv asm("r1") = regs->uregs[rd]; register unsigned long rnv asm("r2") = pc & ~3; __asm__ __volatile__ ( "blx %[fn]" : "=r" (rdv) : "0" (rdv), "r" (rnv), [fn] "r" (asi->insn_fn) : "lr", "memory", "cc" ); regs->uregs[rd] = rdv; } static void __kprobes t32_emulate_rd8rn16_noflags(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { int rd = (insn >> 8) & 0xf; int rn = (insn >> 16) & 0xf; register unsigned long rdv asm("r1") = regs->uregs[rd]; register unsigned long rnv asm("r2") = regs->uregs[rn]; __asm__ __volatile__ ( "blx %[fn]" : "=r" (rdv) : "0" (rdv), "r" (rnv), [fn] "r" (asi->insn_fn) : "lr", "memory", "cc" ); regs->uregs[rd] = rdv; } static void __kprobes t32_emulate_rdlo12rdhi8rn16rm0_noflags(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { int rdlo = (insn >> 12) & 0xf; int rdhi = (insn >> 8) & 0xf; int rn = (insn >> 16) & 0xf; int rm = insn & 0xf; register unsigned long rdlov asm("r0") = regs->uregs[rdlo]; register unsigned long rdhiv asm("r1") = regs->uregs[rdhi]; register unsigned long rnv asm("r2") = regs->uregs[rn]; register unsigned long rmv asm("r3") = regs->uregs[rm]; __asm__ __volatile__ ( "blx %[fn]" : "=r" (rdlov), "=r" (rdhiv) : "0" (rdlov), "1" (rdhiv), "r" (rnv), "r" (rmv), [fn] "r" (asi->insn_fn) : "lr", "memory", "cc" ); regs->uregs[rdlo] = rdlov; regs->uregs[rdhi] = rdhiv; } /* t16 thumb actions */ static void __kprobes t16_simulate_bxblx(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { unsigned long pc = regs->ARM_pc + 2; int rm = (insn >> 3) & 0xf; unsigned long rmv = (rm == 15) ? pc : regs->uregs[rm]; if (insn & (1 << 7)) /* BLX ? */ regs->ARM_lr = regs->ARM_pc | 1; bx_write_pc(rmv, regs); } static void __kprobes t16_simulate_ldr_literal(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { unsigned long *base = (unsigned long *)((regs->ARM_pc + 2) & ~3); long index = insn & 0xff; int rt = (insn >> 8) & 0x7; regs->uregs[rt] = base[index]; } static void __kprobes t16_simulate_ldrstr_sp_relative(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { unsigned long* base = (unsigned long *)regs->ARM_sp; long index = insn & 0xff; int rt = (insn >> 8) & 0x7; if (insn & 0x800) /* LDR */ regs->uregs[rt] = base[index]; else /* STR */ base[index] = regs->uregs[rt]; } static void __kprobes t16_simulate_reladr(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { unsigned long base = (insn & 0x800) ? regs->ARM_sp : ((regs->ARM_pc + 2) & ~3); long offset = insn & 0xff; int rt = (insn >> 8) & 0x7; regs->uregs[rt] = base + offset * 4; } static void __kprobes t16_simulate_add_sp_imm(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { long imm = insn & 0x7f; if (insn & 0x80) /* SUB */ regs->ARM_sp -= imm * 4; else /* ADD */ regs->ARM_sp += imm * 4; } static void __kprobes t16_simulate_cbz(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { int rn = insn & 0x7; probes_opcode_t nonzero = regs->uregs[rn] ? insn : ~insn; if (nonzero & 0x800) { long i = insn & 0x200; long imm5 = insn & 0xf8; unsigned long pc = regs->ARM_pc + 2; regs->ARM_pc = pc + (i >> 3) + (imm5 >> 2); } } static void __kprobes t16_simulate_it(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { /* * The 8 IT state bits are split into two parts in CPSR: * ITSTATE<1:0> are in CPSR<26:25> * ITSTATE<7:2> are in CPSR<15:10> * The new IT state is in the lower byte of insn. */ unsigned long cpsr = regs->ARM_cpsr; cpsr &= ~PSR_IT_MASK; cpsr |= (insn & 0xfc) << 8; cpsr |= (insn & 0x03) << 25; regs->ARM_cpsr = cpsr; } static void __kprobes t16_singlestep_it(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { regs->ARM_pc += 2; t16_simulate_it(insn, asi, regs); } static enum probes_insn __kprobes t16_decode_it(probes_opcode_t insn, struct arch_probes_insn *asi, const struct decode_header *d) { asi->insn_singlestep = t16_singlestep_it; return INSN_GOOD_NO_SLOT; } static void __kprobes t16_simulate_cond_branch(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { unsigned long pc = regs->ARM_pc + 2; long offset = insn & 0x7f; offset -= insn & 0x80; /* Apply sign bit */ regs->ARM_pc = pc + (offset * 2); } static enum probes_insn __kprobes t16_decode_cond_branch(probes_opcode_t insn, struct arch_probes_insn *asi, const struct decode_header *d) { int cc = (insn >> 8) & 0xf; asi->insn_check_cc = probes_condition_checks[cc]; asi->insn_handler = t16_simulate_cond_branch; return INSN_GOOD_NO_SLOT; } static void __kprobes t16_simulate_branch(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { unsigned long pc = regs->ARM_pc + 2; long offset = insn & 0x3ff; offset -= insn & 0x400; /* Apply sign bit */ regs->ARM_pc = pc + (offset * 2); } static unsigned long __kprobes t16_emulate_loregs(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { unsigned long oldcpsr = regs->ARM_cpsr; unsigned long newcpsr; __asm__ __volatile__ ( "msr cpsr_fs, %[oldcpsr] \n\t" "mov r11, r7 \n\t" "ldmia %[regs], {r0-r7} \n\t" "blx %[fn] \n\t" "stmia %[regs], {r0-r7} \n\t" "mov r7, r11 \n\t" "mrs %[newcpsr], cpsr \n\t" : [newcpsr] "=r" (newcpsr) : [oldcpsr] "r" (oldcpsr), [regs] "r" (regs), [fn] "r" (asi->insn_fn) : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r11", "lr", "memory", "cc" ); return (oldcpsr & ~APSR_MASK) | (newcpsr & APSR_MASK); } static void __kprobes t16_emulate_loregs_rwflags(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { regs->ARM_cpsr = t16_emulate_loregs(insn, asi, regs); } static void __kprobes t16_emulate_loregs_noitrwflags(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { unsigned long cpsr = t16_emulate_loregs(insn, asi, regs); if (!in_it_block(cpsr)) regs->ARM_cpsr = cpsr; } static void __kprobes t16_emulate_hiregs(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { unsigned long pc = regs->ARM_pc + 2; int rdn = (insn & 0x7) | ((insn & 0x80) >> 4); int rm = (insn >> 3) & 0xf; register unsigned long rdnv asm("r1"); register unsigned long rmv asm("r0"); unsigned long cpsr = regs->ARM_cpsr; rdnv = (rdn == 15) ? pc : regs->uregs[rdn]; rmv = (rm == 15) ? pc : regs->uregs[rm]; __asm__ __volatile__ ( "msr cpsr_fs, %[cpsr] \n\t" "blx %[fn] \n\t" "mrs %[cpsr], cpsr \n\t" : "=r" (rdnv), [cpsr] "=r" (cpsr) : "0" (rdnv), "r" (rmv), "1" (cpsr), [fn] "r" (asi->insn_fn) : "lr", "memory", "cc" ); if (rdn == 15) rdnv &= ~1; regs->uregs[rdn] = rdnv; regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK); } static enum probes_insn __kprobes t16_decode_hiregs(probes_opcode_t insn, struct arch_probes_insn *asi, const struct decode_header *d) { insn &= ~0x00ff; insn |= 0x001; /* Set Rdn = R1 and Rm = R0 */ ((u16 *)asi->insn)[0] = __opcode_to_mem_thumb16(insn); asi->insn_handler = t16_emulate_hiregs; return INSN_GOOD; } static void __kprobes t16_emulate_push(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { __asm__ __volatile__ ( "mov r11, r7 \n\t" "ldr r9, [%[regs], #13*4] \n\t" "ldr r8, [%[regs], #14*4] \n\t" "ldmia %[regs], {r0-r7} \n\t" "blx %[fn] \n\t" "str r9, [%[regs], #13*4] \n\t" "mov r7, r11 \n\t" : : [regs] "r" (regs), [fn] "r" (asi->insn_fn) : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r8", "r9", "r11", "lr", "memory", "cc" ); } static enum probes_insn __kprobes t16_decode_push(probes_opcode_t insn, struct arch_probes_insn *asi, const struct decode_header *d) { /* * To simulate a PUSH we use a Thumb-2 "STMDB R9!, {registers}" * and call it with R9=SP and LR in the register list represented * by R8. */ /* 1st half STMDB R9!,{} */ ((u16 *)asi->insn)[0] = __opcode_to_mem_thumb16(0xe929); /* 2nd half (register list) */ ((u16 *)asi->insn)[1] = __opcode_to_mem_thumb16(insn & 0x1ff); asi->insn_handler = t16_emulate_push; return INSN_GOOD; } static void __kprobes t16_emulate_pop_nopc(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { __asm__ __volatile__ ( "mov r11, r7 \n\t" "ldr r9, [%[regs], #13*4] \n\t" "ldmia %[regs], {r0-r7} \n\t" "blx %[fn] \n\t" "stmia %[regs], {r0-r7} \n\t" "str r9, [%[regs], #13*4] \n\t" "mov r7, r11 \n\t" : : [regs] "r" (regs), [fn] "r" (asi->insn_fn) : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r9", "r11", "lr", "memory", "cc" ); } static void __kprobes t16_emulate_pop_pc(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { register unsigned long pc asm("r8"); __asm__ __volatile__ ( "mov r11, r7 \n\t" "ldr r9, [%[regs], #13*4] \n\t" "ldmia %[regs], {r0-r7} \n\t" "blx %[fn] \n\t" "stmia %[regs], {r0-r7} \n\t" "str r9, [%[regs], #13*4] \n\t" "mov r7, r11 \n\t" : "=r" (pc) : [regs] "r" (regs), [fn] "r" (asi->insn_fn) : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r9", "r11", "lr", "memory", "cc" ); bx_write_pc(pc, regs); } static enum probes_insn __kprobes t16_decode_pop(probes_opcode_t insn, struct arch_probes_insn *asi, const struct decode_header *d) { /* * To simulate a POP we use a Thumb-2 "LDMDB R9!, {registers}" * and call it with R9=SP and PC in the register list represented * by R8. */ /* 1st half LDMIA R9!,{} */ ((u16 *)asi->insn)[0] = __opcode_to_mem_thumb16(0xe8b9); /* 2nd half (register list) */ ((u16 *)asi->insn)[1] = __opcode_to_mem_thumb16(insn & 0x1ff); asi->insn_handler = insn & 0x100 ? t16_emulate_pop_pc : t16_emulate_pop_nopc; return INSN_GOOD; } const union decode_action kprobes_t16_actions[NUM_PROBES_T16_ACTIONS] = { [PROBES_T16_ADD_SP] = {.handler = t16_simulate_add_sp_imm}, [PROBES_T16_CBZ] = {.handler = t16_simulate_cbz}, [PROBES_T16_SIGN_EXTEND] = {.handler = t16_emulate_loregs_rwflags}, [PROBES_T16_PUSH] = {.decoder = t16_decode_push}, [PROBES_T16_POP] = {.decoder = t16_decode_pop}, [PROBES_T16_SEV] = {.handler = probes_emulate_none}, [PROBES_T16_WFE] = {.handler = probes_simulate_nop}, [PROBES_T16_IT] = {.decoder = t16_decode_it}, [PROBES_T16_CMP] = {.handler = t16_emulate_loregs_rwflags}, [PROBES_T16_ADDSUB] = {.handler = t16_emulate_loregs_noitrwflags}, [PROBES_T16_LOGICAL] = {.handler = t16_emulate_loregs_noitrwflags}, [PROBES_T16_LDR_LIT] = {.handler = t16_simulate_ldr_literal}, [PROBES_T16_BLX] = {.handler = t16_simulate_bxblx}, [PROBES_T16_HIREGOPS] = {.decoder = t16_decode_hiregs}, [PROBES_T16_LDRHSTRH] = {.handler = t16_emulate_loregs_rwflags}, [PROBES_T16_LDRSTR] = {.handler = t16_simulate_ldrstr_sp_relative}, [PROBES_T16_ADR] = {.handler = t16_simulate_reladr}, [PROBES_T16_LDMSTM] = {.handler = t16_emulate_loregs_rwflags}, [PROBES_T16_BRANCH_COND] = {.decoder = t16_decode_cond_branch}, [PROBES_T16_BRANCH] = {.handler = t16_simulate_branch}, }; const union decode_action kprobes_t32_actions[NUM_PROBES_T32_ACTIONS] = { [PROBES_T32_LDMSTM] = {.decoder = t32_decode_ldmstm}, [PROBES_T32_LDRDSTRD] = {.handler = t32_emulate_ldrdstrd}, [PROBES_T32_TABLE_BRANCH] = {.handler = t32_simulate_table_branch}, [PROBES_T32_TST] = {.handler = t32_emulate_rd8rn16rm0_rwflags}, [PROBES_T32_MOV] = {.handler = t32_emulate_rd8rn16rm0_rwflags}, [PROBES_T32_ADDSUB] = {.handler = t32_emulate_rd8rn16rm0_rwflags}, [PROBES_T32_LOGICAL] = {.handler = t32_emulate_rd8rn16rm0_rwflags}, [PROBES_T32_CMP] = {.handler = t32_emulate_rd8rn16rm0_rwflags}, [PROBES_T32_ADDWSUBW_PC] = {.handler = t32_emulate_rd8pc16_noflags,}, [PROBES_T32_ADDWSUBW] = {.handler = t32_emulate_rd8rn16_noflags}, [PROBES_T32_MOVW] = {.handler = t32_emulate_rd8rn16_noflags}, [PROBES_T32_SAT] = {.handler = t32_emulate_rd8rn16rm0_rwflags}, [PROBES_T32_BITFIELD] = {.handler = t32_emulate_rd8rn16_noflags}, [PROBES_T32_SEV] = {.handler = probes_emulate_none}, [PROBES_T32_WFE] = {.handler = probes_simulate_nop}, [PROBES_T32_MRS] = {.handler = t32_simulate_mrs}, [PROBES_T32_BRANCH_COND] = {.decoder = t32_decode_cond_branch}, [PROBES_T32_BRANCH] = {.handler = t32_simulate_branch}, [PROBES_T32_PLDI] = {.handler = probes_simulate_nop}, [PROBES_T32_LDR_LIT] = {.handler = t32_simulate_ldr_literal}, [PROBES_T32_LDRSTR] = {.handler = t32_emulate_ldrstr}, [PROBES_T32_SIGN_EXTEND] = {.handler = t32_emulate_rd8rn16rm0_rwflags}, [PROBES_T32_MEDIA] = {.handler = t32_emulate_rd8rn16rm0_rwflags}, [PROBES_T32_REVERSE] = {.handler = t32_emulate_rd8rn16_noflags}, [PROBES_T32_MUL_ADD] = {.handler = t32_emulate_rd8rn16rm0_rwflags}, [PROBES_T32_MUL_ADD2] = {.handler = t32_emulate_rd8rn16rm0ra12_noflags}, [PROBES_T32_MUL_ADD_LONG] = { .handler = t32_emulate_rdlo12rdhi8rn16rm0_noflags}, }; const struct decode_checker *kprobes_t32_checkers[] = {t32_stack_checker, NULL}; const struct decode_checker *kprobes_t16_checkers[] = {t16_stack_checker, NULL};
/***********************license start*************** * Author: Cavium Networks * * Contact: [email protected] * This file is part of the OCTEON SDK * * Copyright (c) 2003-2008 Cavium Networks * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as * published by the Free Software Foundation. * * This file is distributed in the hope that it will be useful, but * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this file; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * or visit http://www.gnu.org/licenses/. * * This file may also be available under a different license from Cavium. * Contact Cavium Networks for more information ***********************license end**************************************/ /* * Packet buffer defines. */ #ifndef __CVMX_PACKET_H__ #define __CVMX_PACKET_H__ /** * This structure defines a buffer pointer on Octeon */ union cvmx_buf_ptr { void *ptr; uint64_t u64; struct { #ifdef __BIG_ENDIAN_BITFIELD /* if set, invert the "free" pick of the overall * packet. HW always sets this bit to 0 on inbound * packet */ uint64_t i:1; /* Indicates the amount to back up to get to the * buffer start in cache lines. In most cases this is * less than one complete cache line, so the value is * zero */ uint64_t back:4; /* The pool that the buffer came from / goes to */ uint64_t pool:3; /* The size of the segment pointed to by addr (in bytes) */ uint64_t size:16; /* Pointer to the first byte of the data, NOT buffer */ uint64_t addr:40; #else uint64_t addr:40; uint64_t size:16; uint64_t pool:3; uint64_t back:4; uint64_t i:1; #endif } s; }; #endif /* __CVMX_PACKET_H__ */
// SPDX-License-Identifier: (GPL-2.0+ OR MIT) /dts-v1/; #include "rk3566-pinetab2.dtsi" / { model = "Pine64 PineTab2 v0.1"; compatible = "pine64,pinetab2-v0.1", "pine64,pinetab2", "rockchip,rk3566"; }; &lcd { reset-gpios = <&gpio0 RK_PA6 GPIO_ACTIVE_LOW>; pinctrl-names = "default"; pinctrl-0 = <&lcd_pwren_h &lcd0_rst_l>; }; &pinctrl { lcd0 { lcd0_rst_l: lcd0-rst-l { rockchip,pins = <0 RK_PA6 RK_FUNC_GPIO &pcfg_pull_none>; }; }; }; &sdmmc1 { vmmc-supply = <&vcc3v3_sys>; };
// SPDX-License-Identifier: GPL-2.0 #include <test_progs.h> static void test_task_fd_query_tp_core(const char *probe_name, const char *tp_name) { const char *file = "./test_tracepoint.bpf.o"; int err, bytes, efd, prog_fd, pmu_fd; struct perf_event_attr attr = {}; __u64 probe_offset, probe_addr; __u32 len, prog_id, fd_type; struct bpf_object *obj = NULL; __u32 duration = 0; char buf[256]; err = bpf_prog_test_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); if (CHECK(err, "bpf_prog_test_load", "err %d errno %d\n", err, errno)) goto close_prog; if (access("/sys/kernel/tracing/trace", F_OK) == 0) { snprintf(buf, sizeof(buf), "/sys/kernel/tracing/events/%s/id", probe_name); } else { snprintf(buf, sizeof(buf), "/sys/kernel/debug/tracing/events/%s/id", probe_name); } efd = open(buf, O_RDONLY, 0); if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno)) goto close_prog; bytes = read(efd, buf, sizeof(buf)); close(efd); if (CHECK(bytes <= 0 || bytes >= sizeof(buf), "read", "bytes %d errno %d\n", bytes, errno)) goto close_prog; attr.config = strtol(buf, NULL, 0); attr.type = PERF_TYPE_TRACEPOINT; attr.sample_type = PERF_SAMPLE_RAW; attr.sample_period = 1; attr.wakeup_events = 1; pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu 0 */, -1 /* group id */, 0 /* flags */); if (CHECK(err, "perf_event_open", "err %d errno %d\n", err, errno)) goto close_pmu; err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0); if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", err, errno)) goto close_pmu; err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd); if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", err, errno)) goto close_pmu; /* query (getpid(), pmu_fd) */ len = sizeof(buf); err = bpf_task_fd_query(getpid(), pmu_fd, 0, buf, &len, &prog_id, &fd_type, &probe_offset, &probe_addr); if (CHECK(err < 0, "bpf_task_fd_query", "err %d errno %d\n", err, errno)) goto close_pmu; err = (fd_type == BPF_FD_TYPE_TRACEPOINT) && !strcmp(buf, tp_name); if (CHECK(!err, "check_results", "fd_type %d tp_name %s\n", fd_type, buf)) goto close_pmu; close_pmu: close(pmu_fd); close_prog: bpf_object__close(obj); } void test_task_fd_query_tp(void) { test_task_fd_query_tp_core("sched/sched_switch", "sched_switch"); test_task_fd_query_tp_core("syscalls/sys_enter_read", "sys_enter_read"); }
// SPDX-License-Identifier: GPL-2.0-or-later /* * Maxim Integrated MAX5432-MAX5435 digital potentiometer driver * Copyright (C) 2019 Martin Kaiser <[email protected]> * * Datasheet: * https://datasheets.maximintegrated.com/en/ds/MAX5432-MAX5435.pdf */ #include <linux/i2c.h> #include <linux/iio/iio.h> #include <linux/limits.h> #include <linux/module.h> #include <linux/mod_devicetable.h> #include <linux/property.h> /* All chip variants have 32 wiper positions. */ #define MAX5432_MAX_POS 31 #define MAX5432_OHM_50K (50 * 1000) #define MAX5432_OHM_100K (100 * 1000) /* Update the volatile (currently active) setting. */ #define MAX5432_CMD_VREG 0x11 struct max5432_data { struct i2c_client *client; unsigned long ohm; }; static const struct iio_chan_spec max5432_channels[] = { { .type = IIO_RESISTANCE, .indexed = 1, .output = 1, .channel = 0, .address = MAX5432_CMD_VREG, .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), } }; static int max5432_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long mask) { struct max5432_data *data = iio_priv(indio_dev); if (mask != IIO_CHAN_INFO_SCALE) return -EINVAL; if (unlikely(data->ohm > INT_MAX)) return -ERANGE; *val = data->ohm; *val2 = MAX5432_MAX_POS; return IIO_VAL_FRACTIONAL; } static int max5432_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int val, int val2, long mask) { struct max5432_data *data = iio_priv(indio_dev); u8 data_byte; if (mask != IIO_CHAN_INFO_RAW) return -EINVAL; if (val < 0 || val > MAX5432_MAX_POS) return -EINVAL; if (val2 != 0) return -EINVAL; /* Wiper position is in bits D7-D3. (D2-D0 are don't care bits.) */ data_byte = val << 3; return i2c_smbus_write_byte_data(data->client, chan->address, data_byte); } static const struct iio_info max5432_info = { .read_raw = max5432_read_raw, .write_raw = max5432_write_raw, }; static int max5432_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct iio_dev *indio_dev; struct max5432_data *data; indio_dev = devm_iio_device_alloc(dev, sizeof(struct max5432_data)); if (!indio_dev) return -ENOMEM; i2c_set_clientdata(client, indio_dev); data = iio_priv(indio_dev); data->client = client; data->ohm = (unsigned long)device_get_match_data(dev); indio_dev->info = &max5432_info; indio_dev->channels = max5432_channels; indio_dev->num_channels = ARRAY_SIZE(max5432_channels); indio_dev->name = client->name; return devm_iio_device_register(dev, indio_dev); } static const struct of_device_id max5432_dt_ids[] = { { .compatible = "maxim,max5432", .data = (void *)MAX5432_OHM_50K }, { .compatible = "maxim,max5433", .data = (void *)MAX5432_OHM_100K }, { .compatible = "maxim,max5434", .data = (void *)MAX5432_OHM_50K }, { .compatible = "maxim,max5435", .data = (void *)MAX5432_OHM_100K }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, max5432_dt_ids); static struct i2c_driver max5432_driver = { .driver = { .name = "max5432", .of_match_table = max5432_dt_ids, }, .probe = max5432_probe, }; module_i2c_driver(max5432_driver); MODULE_AUTHOR("Martin Kaiser <[email protected]>"); MODULE_DESCRIPTION("max5432-max5435 digital potentiometers"); MODULE_LICENSE("GPL v2");
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* Copyright(c) 2019-2020 Realtek Corporation */ #ifndef __RTW89_8852A_RFK_TABLE_H__ #define __RTW89_8852A_RFK_TABLE_H__ #include "phy.h" extern const struct rtw89_rfk_tbl rtw8852a_tssi_sys_defs_tbl; extern const struct rtw89_rfk_tbl rtw8852a_tssi_sys_defs_2g_tbl; extern const struct rtw89_rfk_tbl rtw8852a_tssi_sys_defs_5g_tbl; extern const struct rtw89_rfk_tbl rtw8852a_tssi_txpwr_ctrl_bb_defs_a_tbl; extern const struct rtw89_rfk_tbl rtw8852a_tssi_txpwr_ctrl_bb_defs_b_tbl; extern const struct rtw89_rfk_tbl rtw8852a_tssi_txpwr_ctrl_bb_defs_2g_tbl; extern const struct rtw89_rfk_tbl rtw8852a_tssi_txpwr_ctrl_bb_defs_5g_tbl; extern const struct rtw89_rfk_tbl rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_a_tbl; extern const struct rtw89_rfk_tbl rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_b_tbl; extern const struct rtw89_rfk_tbl rtw8852a_tssi_dck_defs_a_tbl; extern const struct rtw89_rfk_tbl rtw8852a_tssi_dck_defs_b_tbl; extern const struct rtw89_rfk_tbl rtw8852a_tssi_dac_gain_tbl_defs_a_tbl; extern const struct rtw89_rfk_tbl rtw8852a_tssi_dac_gain_tbl_defs_b_tbl; extern const struct rtw89_rfk_tbl rtw8852a_tssi_slope_cal_org_defs_a_tbl; extern const struct rtw89_rfk_tbl rtw8852a_tssi_slope_cal_org_defs_b_tbl; extern const struct rtw89_rfk_tbl rtw8852a_tssi_rf_gap_tbl_defs_a_tbl; extern const struct rtw89_rfk_tbl rtw8852a_tssi_rf_gap_tbl_defs_b_tbl; extern const struct rtw89_rfk_tbl rtw8852a_tssi_slope_defs_a_tbl; extern const struct rtw89_rfk_tbl rtw8852a_tssi_slope_defs_b_tbl; extern const struct rtw89_rfk_tbl rtw8852a_tssi_track_defs_a_tbl; extern const struct rtw89_rfk_tbl rtw8852a_tssi_track_defs_b_tbl; extern const struct rtw89_rfk_tbl rtw8852a_tssi_txagc_ofst_mv_avg_defs_a_tbl; extern const struct rtw89_rfk_tbl rtw8852a_tssi_txagc_ofst_mv_avg_defs_b_tbl; extern const struct rtw89_rfk_tbl rtw8852a_tssi_pak_defs_a_2g_tbl; extern const struct rtw89_rfk_tbl rtw8852a_tssi_pak_defs_a_5g_1_tbl; extern const struct rtw89_rfk_tbl rtw8852a_tssi_pak_defs_a_5g_3_tbl; extern const struct rtw89_rfk_tbl rtw8852a_tssi_pak_defs_a_5g_4_tbl; extern const struct rtw89_rfk_tbl rtw8852a_tssi_pak_defs_b_2g_tbl; extern const struct rtw89_rfk_tbl rtw8852a_tssi_pak_defs_b_5g_1_tbl; extern const struct rtw89_rfk_tbl rtw8852a_tssi_pak_defs_b_5g_3_tbl; extern const struct rtw89_rfk_tbl rtw8852a_tssi_pak_defs_b_5g_4_tbl; extern const struct rtw89_rfk_tbl rtw8852a_tssi_enable_defs_a_tbl; extern const struct rtw89_rfk_tbl rtw8852a_tssi_enable_defs_b_tbl; extern const struct rtw89_rfk_tbl rtw8852a_tssi_enable_defs_ab_tbl; extern const struct rtw89_rfk_tbl rtw8852a_tssi_disable_defs_tbl; extern const struct rtw89_rfk_tbl rtw8852a_tssi_tracking_defs_tbl; extern const struct rtw89_rfk_tbl rtw8852a_rfk_afe_init_defs_tbl; extern const struct rtw89_rfk_tbl rtw8852a_rfk_dack_reload_defs_a_tbl; extern const struct rtw89_rfk_tbl rtw8852a_rfk_dack_reload_defs_b_tbl; extern const struct rtw89_rfk_tbl rtw8852a_rfk_check_addc_defs_a_tbl; extern const struct rtw89_rfk_tbl rtw8852a_rfk_check_addc_defs_b_tbl; extern const struct rtw89_rfk_tbl rtw8852a_rfk_addck_reset_defs_a_tbl; extern const struct rtw89_rfk_tbl rtw8852a_rfk_addck_trigger_defs_a_tbl; extern const struct rtw89_rfk_tbl rtw8852a_rfk_addck_restore_defs_a_tbl; extern const struct rtw89_rfk_tbl rtw8852a_rfk_addck_reset_defs_b_tbl; extern const struct rtw89_rfk_tbl rtw8852a_rfk_addck_trigger_defs_b_tbl; extern const struct rtw89_rfk_tbl rtw8852a_rfk_addck_restore_defs_b_tbl; extern const struct rtw89_rfk_tbl rtw8852a_rfk_check_dadc_defs_f_a_tbl; extern const struct rtw89_rfk_tbl rtw8852a_rfk_check_dadc_defs_f_b_tbl; extern const struct rtw89_rfk_tbl rtw8852a_rfk_check_dadc_defs_r_a_tbl; extern const struct rtw89_rfk_tbl rtw8852a_rfk_check_dadc_defs_r_b_tbl; extern const struct rtw89_rfk_tbl rtw8852a_rfk_dack_defs_f_a_tbl; extern const struct rtw89_rfk_tbl rtw8852a_rfk_dack_defs_m_a_tbl; extern const struct rtw89_rfk_tbl rtw8852a_rfk_dack_defs_r_a_tbl; extern const struct rtw89_rfk_tbl rtw8852a_rfk_dack_defs_f_b_tbl; extern const struct rtw89_rfk_tbl rtw8852a_rfk_dack_defs_m_b_tbl; extern const struct rtw89_rfk_tbl rtw8852a_rfk_dack_defs_r_b_tbl; extern const struct rtw89_rfk_tbl rtw8852a_rfk_dpk_bb_afe_sf_defs_a_tbl; extern const struct rtw89_rfk_tbl rtw8852a_rfk_dpk_bb_afe_sr_defs_a_tbl; extern const struct rtw89_rfk_tbl rtw8852a_rfk_dpk_bb_afe_sf_defs_b_tbl; extern const struct rtw89_rfk_tbl rtw8852a_rfk_dpk_bb_afe_sr_defs_b_tbl; extern const struct rtw89_rfk_tbl rtw8852a_rfk_dpk_bb_afe_s_defs_ab_tbl; extern const struct rtw89_rfk_tbl rtw8852a_rfk_dpk_bb_afe_r_defs_a_tbl; extern const struct rtw89_rfk_tbl rtw8852a_rfk_dpk_bb_afe_r_defs_b_tbl; extern const struct rtw89_rfk_tbl rtw8852a_rfk_dpk_bb_afe_r_defs_ab_tbl; extern const struct rtw89_rfk_tbl rtw8852a_rfk_dpk_lbk_rxiqk_defs_f_tbl; extern const struct rtw89_rfk_tbl rtw8852a_rfk_dpk_lbk_rxiqk_defs_r_tbl; extern const struct rtw89_rfk_tbl rtw8852a_rfk_dpk_pas_read_defs_tbl; extern const struct rtw89_rfk_tbl rtw8852a_rfk_iqk_set_defs_nondbcc_path01_tbl; extern const struct rtw89_rfk_tbl rtw8852a_rfk_iqk_set_defs_dbcc_path0_tbl; extern const struct rtw89_rfk_tbl rtw8852a_rfk_iqk_set_defs_dbcc_path1_tbl; extern const struct rtw89_rfk_tbl rtw8852a_rfk_iqk_restore_defs_nondbcc_path01_tbl; extern const struct rtw89_rfk_tbl rtw8852a_rfk_iqk_restore_defs_dbcc_path0_tbl; extern const struct rtw89_rfk_tbl rtw8852a_rfk_iqk_restore_defs_dbcc_path1_tbl; #endif
/* * Copyright 2023 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * */ #ifndef SMU14_DRIVER_IF_V14_0_0_H #define SMU14_DRIVER_IF_V14_0_0_H typedef struct { int32_t value; uint32_t numFractionalBits; } FloatInIntFormat_t; typedef enum { DSPCLK_DCFCLK = 0, DSPCLK_DISPCLK, DSPCLK_PIXCLK, DSPCLK_PHYCLK, DSPCLK_COUNT, } DSPCLK_e; typedef struct { uint16_t Freq; // in MHz uint16_t Vid; // min voltage in SVI3 VID } DisplayClockTable_t; typedef struct { uint16_t MinClock; // This is either DCFCLK or SOCCLK (in MHz) uint16_t MaxClock; // This is either DCFCLK or SOCCLK (in MHz) uint16_t MinMclk; uint16_t MaxMclk; uint8_t WmSetting; uint8_t WmType; // Used for normal pstate change or memory retraining uint8_t Padding[2]; } WatermarkRowGeneric_t; #define NUM_WM_RANGES 4 #define WM_PSTATE_CHG 0 #define WM_RETRAINING 1 typedef enum { WM_SOCCLK = 0, WM_DCFCLK, WM_COUNT, } WM_CLOCK_e; typedef struct { // Watermarks WatermarkRowGeneric_t WatermarkRow[WM_COUNT][NUM_WM_RANGES]; uint32_t MmHubPadding[7]; // SMU internal use } Watermarks_t; typedef enum { CUSTOM_DPM_SETTING_GFXCLK, CUSTOM_DPM_SETTING_CCLK, CUSTOM_DPM_SETTING_FCLK_CCX, CUSTOM_DPM_SETTING_FCLK_GFX, CUSTOM_DPM_SETTING_FCLK_STALLS, CUSTOM_DPM_SETTING_LCLK, CUSTOM_DPM_SETTING_COUNT, } CUSTOM_DPM_SETTING_e; typedef struct { uint8_t ActiveHystLimit; uint8_t IdleHystLimit; uint8_t FPS; uint8_t MinActiveFreqType; FloatInIntFormat_t MinActiveFreq; FloatInIntFormat_t PD_Data_limit; FloatInIntFormat_t PD_Data_time_constant; FloatInIntFormat_t PD_Data_error_coeff; FloatInIntFormat_t PD_Data_error_rate_coeff; } DpmActivityMonitorCoeffExt_t; typedef struct { DpmActivityMonitorCoeffExt_t DpmActivityMonitorCoeff[CUSTOM_DPM_SETTING_COUNT]; } CustomDpmSettings_t; #define NUM_DCFCLK_DPM_LEVELS 8 #define NUM_DISPCLK_DPM_LEVELS 8 #define NUM_DPPCLK_DPM_LEVELS 8 #define NUM_SOCCLK_DPM_LEVELS 8 #define NUM_VCN_DPM_LEVELS 8 #define NUM_SOC_VOLTAGE_LEVELS 8 #define NUM_VPE_DPM_LEVELS 8 #define NUM_FCLK_DPM_LEVELS 8 #define NUM_MEM_PSTATE_LEVELS 4 typedef struct { uint32_t UClk; uint32_t MemClk; uint32_t Voltage; uint8_t WckRatio; uint8_t Spare[3]; } MemPstateTable_t; //Freq in MHz //Voltage in milli volts with 2 fractional bits typedef struct { uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS]; uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS]; uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS]; uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS]; uint32_t VClocks[NUM_VCN_DPM_LEVELS]; uint32_t DClocks[NUM_VCN_DPM_LEVELS]; uint32_t VPEClocks[NUM_VPE_DPM_LEVELS]; uint32_t FclkClocks_Freq[NUM_FCLK_DPM_LEVELS]; uint32_t FclkClocks_Voltage[NUM_FCLK_DPM_LEVELS]; uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS]; MemPstateTable_t MemPstateTable[NUM_MEM_PSTATE_LEVELS]; uint8_t NumDcfClkLevelsEnabled; uint8_t NumDispClkLevelsEnabled; //Applies to both Dispclk and Dppclk uint8_t NumSocClkLevelsEnabled; uint8_t VcnClkLevelsEnabled; //Applies to both Vclk and Dclk uint8_t VpeClkLevelsEnabled; uint8_t NumMemPstatesEnabled; uint8_t NumFclkLevelsEnabled; uint8_t spare[2]; uint32_t MinGfxClk; uint32_t MaxGfxClk; } DpmClocks_t; //Freq in MHz //Voltage in milli volts with 2 fractional bits typedef struct { uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS]; uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS]; uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS]; uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS]; uint32_t VClocks0[NUM_VCN_DPM_LEVELS]; uint32_t VClocks1[NUM_VCN_DPM_LEVELS]; uint32_t DClocks0[NUM_VCN_DPM_LEVELS]; uint32_t DClocks1[NUM_VCN_DPM_LEVELS]; uint32_t VPEClocks[NUM_VPE_DPM_LEVELS]; uint32_t FclkClocks_Freq[NUM_FCLK_DPM_LEVELS]; uint32_t FclkClocks_Voltage[NUM_FCLK_DPM_LEVELS]; uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS]; MemPstateTable_t MemPstateTable[NUM_MEM_PSTATE_LEVELS]; uint8_t NumDcfClkLevelsEnabled; uint8_t NumDispClkLevelsEnabled; //Applies to both Dispclk and Dppclk uint8_t NumSocClkLevelsEnabled; uint8_t Vcn0ClkLevelsEnabled; //Applies to both Vclk0 and Dclk0 uint8_t Vcn1ClkLevelsEnabled; //Applies to both Vclk1 and Dclk1 uint8_t VpeClkLevelsEnabled; uint8_t NumMemPstatesEnabled; uint8_t NumFclkLevelsEnabled; uint32_t MinGfxClk; uint32_t MaxGfxClk; } DpmClocks_t_v14_0_1; typedef struct { uint16_t CoreFrequency[16]; //Target core frequency [MHz] uint16_t CorePower[16]; //CAC calculated core power [mW] uint16_t CoreTemperature[16]; //TSEN measured core temperature [centi-C] uint16_t GfxTemperature; //TSEN measured GFX temperature [centi-C] uint16_t SocTemperature; //TSEN measured SOC temperature [centi-C] uint16_t StapmOpnLimit; //Maximum IRM defined STAPM power limit [mW] uint16_t StapmCurrentLimit; //Time filtered STAPM power limit [mW] uint16_t InfrastructureCpuMaxFreq; //CCLK frequency limit enforced on classic cores [MHz] uint16_t InfrastructureGfxMaxFreq; //GFXCLK frequency limit enforced on GFX [MHz] uint16_t SkinTemp; //Maximum skin temperature reported by APU and HS2 chassis sensors [centi-C] uint16_t GfxclkFrequency; //Time filtered target GFXCLK frequency [MHz] uint16_t FclkFrequency; //Time filtered target FCLK frequency [MHz] uint16_t GfxActivity; //Time filtered GFX busy % [0-100] uint16_t SocclkFrequency; //Time filtered target SOCCLK frequency [MHz] uint16_t VclkFrequency; //Time filtered target VCLK frequency [MHz] uint16_t VcnActivity; //Time filtered VCN busy % [0-100] uint16_t VpeclkFrequency; //Time filtered target VPECLK frequency [MHz] uint16_t IpuclkFrequency; //Time filtered target IPUCLK frequency [MHz] uint16_t IpuBusy[8]; //Time filtered IPU per-column busy % [0-100] uint16_t DRAMReads; //Time filtered DRAM read bandwidth [MB/sec] uint16_t DRAMWrites; //Time filtered DRAM write bandwidth [MB/sec] uint16_t CoreC0Residency[16]; //Time filtered per-core C0 residency % [0-100] uint16_t IpuPower; //Time filtered IPU power [mW] uint32_t ApuPower; //Time filtered APU power [mW] uint32_t GfxPower; //Time filtered GFX power [mW] uint32_t dGpuPower; //Time filtered dGPU power [mW] uint32_t SocketPower; //Time filtered power used for PPT/STAPM [APU+dGPU] [mW] uint32_t AllCorePower; //Time filtered sum of core power across all cores in the socket [mW] uint32_t FilterAlphaValue; //Metrics table alpha filter time constant [us] uint32_t MetricsCounter; //Counter that is incremented on every metrics table update [PM_TIMER cycles] uint16_t MemclkFrequency; //Time filtered target MEMCLK frequency [MHz] uint16_t MpipuclkFrequency; //Time filtered target MPIPUCLK frequency [MHz] uint16_t IpuReads; //Time filtered IPU read bandwidth [MB/sec] uint16_t IpuWrites; //Time filtered IPU write bandwidth [MB/sec] uint32_t ThrottleResidency_PROCHOT; //Counter that is incremented on every metrics table update when PROCHOT was engaged [PM_TIMER cycles] uint32_t ThrottleResidency_SPL; //Counter that is incremented on every metrics table update when SPL was engaged [PM_TIMER cycles] uint32_t ThrottleResidency_FPPT; //Counter that is incremented on every metrics table update when fast PPT was engaged [PM_TIMER cycles] uint32_t ThrottleResidency_SPPT; //Counter that is incremented on every metrics table update when slow PPT was engaged [PM_TIMER cycles] uint32_t ThrottleResidency_THM_CORE; //Counter that is incremented on every metrics table update when CORE thermal throttling was engaged [PM_TIMER cycles] uint32_t ThrottleResidency_THM_GFX; //Counter that is incremented on every metrics table update when GFX thermal throttling was engaged [PM_TIMER cycles] uint32_t ThrottleResidency_THM_SOC; //Counter that is incremented on every metrics table update when SOC thermal throttling was engaged [PM_TIMER cycles] uint16_t Psys; //Time filtered Psys power [mW] uint16_t spare1; uint32_t spare[6]; } SmuMetrics_t; //ISP tile definitions typedef enum { TILE_XTILE = 0, //ONO0 TILE_MTILE, //ONO1 TILE_PDP, //ONO2 TILE_CSTAT, //ONO2 TILE_LME, //ONO3 TILE_BYRP, //ONO4 TILE_GRBP, //ONO4 TILE_MCFP, //ONO4 TILE_YUVP, //ONO4 TILE_MCSC, //ONO4 TILE_GDC, //ONO5 TILE_MAX } TILE_NUM_e; // Tile Selection (Based on arguments) #define ISP_TILE_SEL(tile) (1<<tile) #define ISP_TILE_SEL_ALL 0x7FF // Workload bits #define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 0 #define WORKLOAD_PPLIB_VIDEO_BIT 2 #define WORKLOAD_PPLIB_VR_BIT 3 #define WORKLOAD_PPLIB_COMPUTE_BIT 4 #define WORKLOAD_PPLIB_CUSTOM_BIT 5 #define WORKLOAD_PPLIB_COUNT 6 #define TABLE_BIOS_IF 0 // Called by BIOS #define TABLE_WATERMARKS 1 // Called by DAL through VBIOS #define TABLE_CUSTOM_DPM 2 // Called by Driver #define TABLE_BIOS_GPIO_CONFIG 3 // Called by BIOS #define TABLE_DPMCLOCKS 4 // Called by Driver and VBIOS #define TABLE_MOMENTARY_PM 5 // Called by Tools #define TABLE_MODERN_STDBY 6 // Called by Tools for Modern Standby Log #define TABLE_SMU_METRICS 7 // Called by Driver and SMF/PMF #define TABLE_COUNT 8 #endif
/* SPDX-License-Identifier: GPL-2.0+ */ /* * max14577.h - Driver for the Maxim 14577/77836 * * Copyright (C) 2014 Samsung Electrnoics * Chanwoo Choi <[email protected]> * Krzysztof Kozlowski <[email protected]> * * This driver is based on max8997.h * * MAX14577 has MUIC, Charger devices. * The devices share the same I2C bus and interrupt line * included in this mfd driver. * * MAX77836 has additional PMIC and Fuel-Gauge on different I2C slave * addresses. */ #ifndef __MAX14577_H__ #define __MAX14577_H__ #include <linux/regulator/consumer.h> /* MAX14577 regulator IDs */ enum max14577_regulators { MAX14577_SAFEOUT = 0, MAX14577_CHARGER, MAX14577_REGULATOR_NUM, }; /* MAX77836 regulator IDs */ enum max77836_regulators { MAX77836_SAFEOUT = 0, MAX77836_CHARGER, MAX77836_LDO1, MAX77836_LDO2, MAX77836_REGULATOR_NUM, }; struct max14577_regulator_platform_data { int id; struct regulator_init_data *initdata; struct device_node *of_node; }; struct max14577_charger_platform_data { u32 constant_uvolt; u32 fast_charge_uamp; u32 eoc_uamp; u32 ovp_uvolt; }; /* * MAX14577 MFD platform data */ struct max14577_platform_data { /* IRQ */ int irq_base; /* current control GPIOs */ int gpio_pogo_vbatt_en; int gpio_pogo_vbus_en; /* current control GPIO control function */ int (*set_gpio_pogo_vbatt_en) (int gpio_val); int (*set_gpio_pogo_vbus_en) (int gpio_val); int (*set_gpio_pogo_cb) (int new_dev); struct max14577_regulator_platform_data *regulators; }; /* * Valid limits of current for max14577 and max77836 chargers. * They must correspond to MBCICHWRCL and MBCICHWRCH fields in CHGCTRL4 * register for given chipset. */ struct maxim_charger_current { /* Minimal current, set in CHGCTRL4/MBCICHWRCL, uA */ unsigned int min; /* * Minimal current when high setting is active, * set in CHGCTRL4/MBCICHWRCH, uA */ unsigned int high_start; /* Value of one step in high setting, uA */ unsigned int high_step; /* Maximum current of high setting, uA */ unsigned int max; }; extern const struct maxim_charger_current maxim_charger_currents[]; extern int maxim_charger_calc_reg_current(const struct maxim_charger_current *limits, unsigned int min_ua, unsigned int max_ua, u8 *dst); #endif /* __MAX14577_H__ */
// SPDX-License-Identifier: GPL-2.0-only /* * Network interface table. * * Network interfaces (devices) do not have a security field, so we * maintain a table associating each interface with a SID. * * Author: James Morris <[email protected]> * * Copyright (C) 2003 Red Hat, Inc., James Morris <[email protected]> * Copyright (C) 2007 Hewlett-Packard Development Company, L.P. * Paul Moore <[email protected]> */ #include <linux/init.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/notifier.h> #include <linux/netdevice.h> #include <linux/rcupdate.h> #include <net/net_namespace.h> #include "security.h" #include "objsec.h" #include "netif.h" #define SEL_NETIF_HASH_SIZE 64 #define SEL_NETIF_HASH_MAX 1024 struct sel_netif { struct list_head list; struct netif_security_struct nsec; struct rcu_head rcu_head; }; static u32 sel_netif_total; static DEFINE_SPINLOCK(sel_netif_lock); static struct list_head sel_netif_hash[SEL_NETIF_HASH_SIZE]; /** * sel_netif_hashfn - Hashing function for the interface table * @ns: the network namespace * @ifindex: the network interface * * Description: * This is the hashing function for the network interface table, it returns the * bucket number for the given interface. * */ static inline u32 sel_netif_hashfn(const struct net *ns, int ifindex) { return (((uintptr_t)ns + ifindex) & (SEL_NETIF_HASH_SIZE - 1)); } /** * sel_netif_find - Search for an interface record * @ns: the network namespace * @ifindex: the network interface * * Description: * Search the network interface table and return the record matching @ifindex. * If an entry can not be found in the table return NULL. * */ static inline struct sel_netif *sel_netif_find(const struct net *ns, int ifindex) { u32 idx = sel_netif_hashfn(ns, ifindex); struct sel_netif *netif; list_for_each_entry_rcu(netif, &sel_netif_hash[idx], list) if (net_eq(netif->nsec.ns, ns) && netif->nsec.ifindex == ifindex) return netif; return NULL; } /** * sel_netif_insert - Insert a new interface into the table * @netif: the new interface record * * Description: * Add a new interface record to the network interface hash table. Returns * zero on success, negative values on failure. * */ static int sel_netif_insert(struct sel_netif *netif) { u32 idx; if (sel_netif_total >= SEL_NETIF_HASH_MAX) return -ENOSPC; idx = sel_netif_hashfn(netif->nsec.ns, netif->nsec.ifindex); list_add_rcu(&netif->list, &sel_netif_hash[idx]); sel_netif_total++; return 0; } /** * sel_netif_destroy - Remove an interface record from the table * @netif: the existing interface record * * Description: * Remove an existing interface record from the network interface table. * */ static void sel_netif_destroy(struct sel_netif *netif) { list_del_rcu(&netif->list); sel_netif_total--; kfree_rcu(netif, rcu_head); } /** * sel_netif_sid_slow - Lookup the SID of a network interface using the policy * @ns: the network namespace * @ifindex: the network interface * @sid: interface SID * * Description: * This function determines the SID of a network interface by querying the * security policy. The result is added to the network interface table to * speedup future queries. Returns zero on success, negative values on * failure. * */ static int sel_netif_sid_slow(struct net *ns, int ifindex, u32 *sid) { int ret = 0; struct sel_netif *netif; struct sel_netif *new; struct net_device *dev; /* NOTE: we always use init's network namespace since we don't * currently support containers */ dev = dev_get_by_index(ns, ifindex); if (unlikely(dev == NULL)) { pr_warn("SELinux: failure in %s(), invalid network interface (%d)\n", __func__, ifindex); return -ENOENT; } spin_lock_bh(&sel_netif_lock); netif = sel_netif_find(ns, ifindex); if (netif != NULL) { *sid = netif->nsec.sid; goto out; } ret = security_netif_sid(dev->name, sid); if (ret != 0) goto out; new = kzalloc(sizeof(*new), GFP_ATOMIC); if (new) { new->nsec.ns = ns; new->nsec.ifindex = ifindex; new->nsec.sid = *sid; if (sel_netif_insert(new)) kfree(new); } out: spin_unlock_bh(&sel_netif_lock); dev_put(dev); if (unlikely(ret)) pr_warn("SELinux: failure in %s(), unable to determine network interface label (%d)\n", __func__, ifindex); return ret; } /** * sel_netif_sid - Lookup the SID of a network interface * @ns: the network namespace * @ifindex: the network interface * @sid: interface SID * * Description: * This function determines the SID of a network interface using the fastest * method possible. First the interface table is queried, but if an entry * can't be found then the policy is queried and the result is added to the * table to speedup future queries. Returns zero on success, negative values * on failure. * */ int sel_netif_sid(struct net *ns, int ifindex, u32 *sid) { struct sel_netif *netif; rcu_read_lock(); netif = sel_netif_find(ns, ifindex); if (likely(netif != NULL)) { *sid = netif->nsec.sid; rcu_read_unlock(); return 0; } rcu_read_unlock(); return sel_netif_sid_slow(ns, ifindex, sid); } /** * sel_netif_kill - Remove an entry from the network interface table * @ns: the network namespace * @ifindex: the network interface * * Description: * This function removes the entry matching @ifindex from the network interface * table if it exists. * */ static void sel_netif_kill(const struct net *ns, int ifindex) { struct sel_netif *netif; rcu_read_lock(); spin_lock_bh(&sel_netif_lock); netif = sel_netif_find(ns, ifindex); if (netif) sel_netif_destroy(netif); spin_unlock_bh(&sel_netif_lock); rcu_read_unlock(); } /** * sel_netif_flush - Flush the entire network interface table * * Description: * Remove all entries from the network interface table. * */ void sel_netif_flush(void) { int idx; struct sel_netif *netif; spin_lock_bh(&sel_netif_lock); for (idx = 0; idx < SEL_NETIF_HASH_SIZE; idx++) list_for_each_entry(netif, &sel_netif_hash[idx], list) sel_netif_destroy(netif); spin_unlock_bh(&sel_netif_lock); } static int sel_netif_netdev_notifier_handler(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); if (event == NETDEV_DOWN) sel_netif_kill(dev_net(dev), dev->ifindex); return NOTIFY_DONE; } static struct notifier_block sel_netif_netdev_notifier = { .notifier_call = sel_netif_netdev_notifier_handler, }; static __init int sel_netif_init(void) { int i; if (!selinux_enabled_boot) return 0; for (i = 0; i < SEL_NETIF_HASH_SIZE; i++) INIT_LIST_HEAD(&sel_netif_hash[i]); register_netdevice_notifier(&sel_netif_netdev_notifier); return 0; } __initcall(sel_netif_init);
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2013 - 2015 Linaro Ltd. * Copyright (c) 2013 HiSilicon Limited. */ #include <linux/sched.h> #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/dmapool.h> #include <linux/dmaengine.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/of.h> #include <linux/clk.h> #include <linux/of_dma.h> #include "virt-dma.h" #define DRIVER_NAME "k3-dma" #define DMA_MAX_SIZE 0x1ffc #define DMA_CYCLIC_MAX_PERIOD 0x1000 #define LLI_BLOCK_SIZE (4 * PAGE_SIZE) #define INT_STAT 0x00 #define INT_TC1 0x04 #define INT_TC2 0x08 #define INT_ERR1 0x0c #define INT_ERR2 0x10 #define INT_TC1_MASK 0x18 #define INT_TC2_MASK 0x1c #define INT_ERR1_MASK 0x20 #define INT_ERR2_MASK 0x24 #define INT_TC1_RAW 0x600 #define INT_TC2_RAW 0x608 #define INT_ERR1_RAW 0x610 #define INT_ERR2_RAW 0x618 #define CH_PRI 0x688 #define CH_STAT 0x690 #define CX_CUR_CNT 0x704 #define CX_LLI 0x800 #define CX_CNT1 0x80c #define CX_CNT0 0x810 #define CX_SRC 0x814 #define CX_DST 0x818 #define CX_CFG 0x81c #define CX_LLI_CHAIN_EN 0x2 #define CX_CFG_EN 0x1 #define CX_CFG_NODEIRQ BIT(1) #define CX_CFG_MEM2PER (0x1 << 2) #define CX_CFG_PER2MEM (0x2 << 2) #define CX_CFG_SRCINCR (0x1 << 31) #define CX_CFG_DSTINCR (0x1 << 30) struct k3_desc_hw { u32 lli; u32 reserved[3]; u32 count; u32 saddr; u32 daddr; u32 config; } __aligned(32); struct k3_dma_desc_sw { struct virt_dma_desc vd; dma_addr_t desc_hw_lli; size_t desc_num; size_t size; struct k3_desc_hw *desc_hw; }; struct k3_dma_phy; struct k3_dma_chan { u32 ccfg; struct virt_dma_chan vc; struct k3_dma_phy *phy; struct list_head node; dma_addr_t dev_addr; enum dma_status status; bool cyclic; struct dma_slave_config slave_config; }; struct k3_dma_phy { u32 idx; void __iomem *base; struct k3_dma_chan *vchan; struct k3_dma_desc_sw *ds_run; struct k3_dma_desc_sw *ds_done; }; struct k3_dma_dev { struct dma_device slave; void __iomem *base; struct tasklet_struct task; spinlock_t lock; struct list_head chan_pending; struct k3_dma_phy *phy; struct k3_dma_chan *chans; struct clk *clk; struct dma_pool *pool; u32 dma_channels; u32 dma_requests; u32 dma_channel_mask; unsigned int irq; }; #define K3_FLAG_NOCLK BIT(1) struct k3dma_soc_data { unsigned long flags; }; #define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave) static int k3_dma_config_write(struct dma_chan *chan, enum dma_transfer_direction dir, struct dma_slave_config *cfg); static struct k3_dma_chan *to_k3_chan(struct dma_chan *chan) { return container_of(chan, struct k3_dma_chan, vc.chan); } static void k3_dma_pause_dma(struct k3_dma_phy *phy, bool on) { u32 val = 0; if (on) { val = readl_relaxed(phy->base + CX_CFG); val |= CX_CFG_EN; writel_relaxed(val, phy->base + CX_CFG); } else { val = readl_relaxed(phy->base + CX_CFG); val &= ~CX_CFG_EN; writel_relaxed(val, phy->base + CX_CFG); } } static void k3_dma_terminate_chan(struct k3_dma_phy *phy, struct k3_dma_dev *d) { u32 val = 0; k3_dma_pause_dma(phy, false); val = 0x1 << phy->idx; writel_relaxed(val, d->base + INT_TC1_RAW); writel_relaxed(val, d->base + INT_TC2_RAW); writel_relaxed(val, d->base + INT_ERR1_RAW); writel_relaxed(val, d->base + INT_ERR2_RAW); } static void k3_dma_set_desc(struct k3_dma_phy *phy, struct k3_desc_hw *hw) { writel_relaxed(hw->lli, phy->base + CX_LLI); writel_relaxed(hw->count, phy->base + CX_CNT0); writel_relaxed(hw->saddr, phy->base + CX_SRC); writel_relaxed(hw->daddr, phy->base + CX_DST); writel_relaxed(hw->config, phy->base + CX_CFG); } static u32 k3_dma_get_curr_cnt(struct k3_dma_dev *d, struct k3_dma_phy *phy) { u32 cnt = 0; cnt = readl_relaxed(d->base + CX_CUR_CNT + phy->idx * 0x10); cnt &= 0xffff; return cnt; } static u32 k3_dma_get_curr_lli(struct k3_dma_phy *phy) { return readl_relaxed(phy->base + CX_LLI); } static u32 k3_dma_get_chan_stat(struct k3_dma_dev *d) { return readl_relaxed(d->base + CH_STAT); } static void k3_dma_enable_dma(struct k3_dma_dev *d, bool on) { if (on) { /* set same priority */ writel_relaxed(0x0, d->base + CH_PRI); /* unmask irq */ writel_relaxed(0xffff, d->base + INT_TC1_MASK); writel_relaxed(0xffff, d->base + INT_TC2_MASK); writel_relaxed(0xffff, d->base + INT_ERR1_MASK); writel_relaxed(0xffff, d->base + INT_ERR2_MASK); } else { /* mask irq */ writel_relaxed(0x0, d->base + INT_TC1_MASK); writel_relaxed(0x0, d->base + INT_TC2_MASK); writel_relaxed(0x0, d->base + INT_ERR1_MASK); writel_relaxed(0x0, d->base + INT_ERR2_MASK); } } static irqreturn_t k3_dma_int_handler(int irq, void *dev_id) { struct k3_dma_dev *d = (struct k3_dma_dev *)dev_id; struct k3_dma_phy *p; struct k3_dma_chan *c; u32 stat = readl_relaxed(d->base + INT_STAT); u32 tc1 = readl_relaxed(d->base + INT_TC1); u32 tc2 = readl_relaxed(d->base + INT_TC2); u32 err1 = readl_relaxed(d->base + INT_ERR1); u32 err2 = readl_relaxed(d->base + INT_ERR2); u32 i, irq_chan = 0; while (stat) { i = __ffs(stat); stat &= ~BIT(i); if (likely(tc1 & BIT(i)) || (tc2 & BIT(i))) { p = &d->phy[i]; c = p->vchan; if (c && (tc1 & BIT(i))) { spin_lock(&c->vc.lock); if (p->ds_run != NULL) { vchan_cookie_complete(&p->ds_run->vd); p->ds_done = p->ds_run; p->ds_run = NULL; } spin_unlock(&c->vc.lock); } if (c && (tc2 & BIT(i))) { spin_lock(&c->vc.lock); if (p->ds_run != NULL) vchan_cyclic_callback(&p->ds_run->vd); spin_unlock(&c->vc.lock); } irq_chan |= BIT(i); } if (unlikely((err1 & BIT(i)) || (err2 & BIT(i)))) dev_warn(d->slave.dev, "DMA ERR\n"); } writel_relaxed(irq_chan, d->base + INT_TC1_RAW); writel_relaxed(irq_chan, d->base + INT_TC2_RAW); writel_relaxed(err1, d->base + INT_ERR1_RAW); writel_relaxed(err2, d->base + INT_ERR2_RAW); if (irq_chan) tasklet_schedule(&d->task); if (irq_chan || err1 || err2) return IRQ_HANDLED; return IRQ_NONE; } static int k3_dma_start_txd(struct k3_dma_chan *c) { struct k3_dma_dev *d = to_k3_dma(c->vc.chan.device); struct virt_dma_desc *vd = vchan_next_desc(&c->vc); if (!c->phy) return -EAGAIN; if (BIT(c->phy->idx) & k3_dma_get_chan_stat(d)) return -EAGAIN; /* Avoid losing track of ds_run if a transaction is in flight */ if (c->phy->ds_run) return -EAGAIN; if (vd) { struct k3_dma_desc_sw *ds = container_of(vd, struct k3_dma_desc_sw, vd); /* * fetch and remove request from vc->desc_issued * so vc->desc_issued only contains desc pending */ list_del(&ds->vd.node); c->phy->ds_run = ds; c->phy->ds_done = NULL; /* start dma */ k3_dma_set_desc(c->phy, &ds->desc_hw[0]); return 0; } c->phy->ds_run = NULL; c->phy->ds_done = NULL; return -EAGAIN; } static void k3_dma_tasklet(struct tasklet_struct *t) { struct k3_dma_dev *d = from_tasklet(d, t, task); struct k3_dma_phy *p; struct k3_dma_chan *c, *cn; unsigned pch, pch_alloc = 0; /* check new dma request of running channel in vc->desc_issued */ list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) { spin_lock_irq(&c->vc.lock); p = c->phy; if (p && p->ds_done) { if (k3_dma_start_txd(c)) { /* No current txd associated with this channel */ dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx); /* Mark this channel free */ c->phy = NULL; p->vchan = NULL; } } spin_unlock_irq(&c->vc.lock); } /* check new channel request in d->chan_pending */ spin_lock_irq(&d->lock); for (pch = 0; pch < d->dma_channels; pch++) { if (!(d->dma_channel_mask & (1 << pch))) continue; p = &d->phy[pch]; if (p->vchan == NULL && !list_empty(&d->chan_pending)) { c = list_first_entry(&d->chan_pending, struct k3_dma_chan, node); /* remove from d->chan_pending */ list_del_init(&c->node); pch_alloc |= 1 << pch; /* Mark this channel allocated */ p->vchan = c; c->phy = p; dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc); } } spin_unlock_irq(&d->lock); for (pch = 0; pch < d->dma_channels; pch++) { if (!(d->dma_channel_mask & (1 << pch))) continue; if (pch_alloc & (1 << pch)) { p = &d->phy[pch]; c = p->vchan; if (c) { spin_lock_irq(&c->vc.lock); k3_dma_start_txd(c); spin_unlock_irq(&c->vc.lock); } } } } static void k3_dma_free_chan_resources(struct dma_chan *chan) { struct k3_dma_chan *c = to_k3_chan(chan); struct k3_dma_dev *d = to_k3_dma(chan->device); unsigned long flags; spin_lock_irqsave(&d->lock, flags); list_del_init(&c->node); spin_unlock_irqrestore(&d->lock, flags); vchan_free_chan_resources(&c->vc); c->ccfg = 0; } static enum dma_status k3_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *state) { struct k3_dma_chan *c = to_k3_chan(chan); struct k3_dma_dev *d = to_k3_dma(chan->device); struct k3_dma_phy *p; struct virt_dma_desc *vd; unsigned long flags; enum dma_status ret; size_t bytes = 0; ret = dma_cookie_status(&c->vc.chan, cookie, state); if (ret == DMA_COMPLETE) return ret; spin_lock_irqsave(&c->vc.lock, flags); p = c->phy; ret = c->status; /* * If the cookie is on our issue queue, then the residue is * its total size. */ vd = vchan_find_desc(&c->vc, cookie); if (vd && !c->cyclic) { bytes = container_of(vd, struct k3_dma_desc_sw, vd)->size; } else if ((!p) || (!p->ds_run)) { bytes = 0; } else { struct k3_dma_desc_sw *ds = p->ds_run; u32 clli = 0, index = 0; bytes = k3_dma_get_curr_cnt(d, p); clli = k3_dma_get_curr_lli(p); index = ((clli - ds->desc_hw_lli) / sizeof(struct k3_desc_hw)) + 1; for (; index < ds->desc_num; index++) { bytes += ds->desc_hw[index].count; /* end of lli */ if (!ds->desc_hw[index].lli) break; } } spin_unlock_irqrestore(&c->vc.lock, flags); dma_set_residue(state, bytes); return ret; } static void k3_dma_issue_pending(struct dma_chan *chan) { struct k3_dma_chan *c = to_k3_chan(chan); struct k3_dma_dev *d = to_k3_dma(chan->device); unsigned long flags; spin_lock_irqsave(&c->vc.lock, flags); /* add request to vc->desc_issued */ if (vchan_issue_pending(&c->vc)) { spin_lock(&d->lock); if (!c->phy) { if (list_empty(&c->node)) { /* if new channel, add chan_pending */ list_add_tail(&c->node, &d->chan_pending); /* check in tasklet */ tasklet_schedule(&d->task); dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc); } } spin_unlock(&d->lock); } else dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc); spin_unlock_irqrestore(&c->vc.lock, flags); } static void k3_dma_fill_desc(struct k3_dma_desc_sw *ds, dma_addr_t dst, dma_addr_t src, size_t len, u32 num, u32 ccfg) { if (num != ds->desc_num - 1) ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) * sizeof(struct k3_desc_hw); ds->desc_hw[num].lli |= CX_LLI_CHAIN_EN; ds->desc_hw[num].count = len; ds->desc_hw[num].saddr = src; ds->desc_hw[num].daddr = dst; ds->desc_hw[num].config = ccfg; } static struct k3_dma_desc_sw *k3_dma_alloc_desc_resource(int num, struct dma_chan *chan) { struct k3_dma_chan *c = to_k3_chan(chan); struct k3_dma_desc_sw *ds; struct k3_dma_dev *d = to_k3_dma(chan->device); int lli_limit = LLI_BLOCK_SIZE / sizeof(struct k3_desc_hw); if (num > lli_limit) { dev_dbg(chan->device->dev, "vch %p: sg num %d exceed max %d\n", &c->vc, num, lli_limit); return NULL; } ds = kzalloc(sizeof(*ds), GFP_NOWAIT); if (!ds) return NULL; ds->desc_hw = dma_pool_zalloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli); if (!ds->desc_hw) { dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc); kfree(ds); return NULL; } ds->desc_num = num; return ds; } static struct dma_async_tx_descriptor *k3_dma_prep_memcpy( struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, size_t len, unsigned long flags) { struct k3_dma_chan *c = to_k3_chan(chan); struct k3_dma_desc_sw *ds; size_t copy = 0; int num = 0; if (!len) return NULL; num = DIV_ROUND_UP(len, DMA_MAX_SIZE); ds = k3_dma_alloc_desc_resource(num, chan); if (!ds) return NULL; c->cyclic = 0; ds->size = len; num = 0; if (!c->ccfg) { /* default is memtomem, without calling device_config */ c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN; c->ccfg |= (0xf << 20) | (0xf << 24); /* burst = 16 */ c->ccfg |= (0x3 << 12) | (0x3 << 16); /* width = 64 bit */ } do { copy = min_t(size_t, len, DMA_MAX_SIZE); k3_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg); src += copy; dst += copy; len -= copy; } while (len); ds->desc_hw[num-1].lli = 0; /* end of link */ return vchan_tx_prep(&c->vc, &ds->vd, flags); } static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg( struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen, enum dma_transfer_direction dir, unsigned long flags, void *context) { struct k3_dma_chan *c = to_k3_chan(chan); struct k3_dma_desc_sw *ds; size_t len, avail, total = 0; struct scatterlist *sg; dma_addr_t addr, src = 0, dst = 0; int num = sglen, i; if (sgl == NULL) return NULL; c->cyclic = 0; for_each_sg(sgl, sg, sglen, i) { avail = sg_dma_len(sg); if (avail > DMA_MAX_SIZE) num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1; } ds = k3_dma_alloc_desc_resource(num, chan); if (!ds) return NULL; num = 0; k3_dma_config_write(chan, dir, &c->slave_config); for_each_sg(sgl, sg, sglen, i) { addr = sg_dma_address(sg); avail = sg_dma_len(sg); total += avail; do { len = min_t(size_t, avail, DMA_MAX_SIZE); if (dir == DMA_MEM_TO_DEV) { src = addr; dst = c->dev_addr; } else if (dir == DMA_DEV_TO_MEM) { src = c->dev_addr; dst = addr; } k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg); addr += len; avail -= len; } while (avail); } ds->desc_hw[num-1].lli = 0; /* end of link */ ds->size = total; return vchan_tx_prep(&c->vc, &ds->vd, flags); } static struct dma_async_tx_descriptor * k3_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction dir, unsigned long flags) { struct k3_dma_chan *c = to_k3_chan(chan); struct k3_dma_desc_sw *ds; size_t len, avail, total = 0; dma_addr_t addr, src = 0, dst = 0; int num = 1, since = 0; size_t modulo = DMA_CYCLIC_MAX_PERIOD; u32 en_tc2 = 0; dev_dbg(chan->device->dev, "%s: buf %pad, dst %pad, buf len %zu, period_len = %zu, dir %d\n", __func__, &buf_addr, &to_k3_chan(chan)->dev_addr, buf_len, period_len, (int)dir); avail = buf_len; if (avail > modulo) num += DIV_ROUND_UP(avail, modulo) - 1; ds = k3_dma_alloc_desc_resource(num, chan); if (!ds) return NULL; c->cyclic = 1; addr = buf_addr; avail = buf_len; total = avail; num = 0; k3_dma_config_write(chan, dir, &c->slave_config); if (period_len < modulo) modulo = period_len; do { len = min_t(size_t, avail, modulo); if (dir == DMA_MEM_TO_DEV) { src = addr; dst = c->dev_addr; } else if (dir == DMA_DEV_TO_MEM) { src = c->dev_addr; dst = addr; } since += len; if (since >= period_len) { /* descriptor asks for TC2 interrupt on completion */ en_tc2 = CX_CFG_NODEIRQ; since -= period_len; } else en_tc2 = 0; k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg | en_tc2); addr += len; avail -= len; } while (avail); /* "Cyclic" == end of link points back to start of link */ ds->desc_hw[num - 1].lli |= ds->desc_hw_lli; ds->size = total; return vchan_tx_prep(&c->vc, &ds->vd, flags); } static int k3_dma_config(struct dma_chan *chan, struct dma_slave_config *cfg) { struct k3_dma_chan *c = to_k3_chan(chan); memcpy(&c->slave_config, cfg, sizeof(*cfg)); return 0; } static int k3_dma_config_write(struct dma_chan *chan, enum dma_transfer_direction dir, struct dma_slave_config *cfg) { struct k3_dma_chan *c = to_k3_chan(chan); u32 maxburst = 0, val = 0; enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED; if (dir == DMA_DEV_TO_MEM) { c->ccfg = CX_CFG_DSTINCR; c->dev_addr = cfg->src_addr; maxburst = cfg->src_maxburst; width = cfg->src_addr_width; } else if (dir == DMA_MEM_TO_DEV) { c->ccfg = CX_CFG_SRCINCR; c->dev_addr = cfg->dst_addr; maxburst = cfg->dst_maxburst; width = cfg->dst_addr_width; } switch (width) { case DMA_SLAVE_BUSWIDTH_1_BYTE: case DMA_SLAVE_BUSWIDTH_2_BYTES: case DMA_SLAVE_BUSWIDTH_4_BYTES: case DMA_SLAVE_BUSWIDTH_8_BYTES: val = __ffs(width); break; default: val = 3; break; } c->ccfg |= (val << 12) | (val << 16); if ((maxburst == 0) || (maxburst > 16)) val = 15; else val = maxburst - 1; c->ccfg |= (val << 20) | (val << 24); c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN; /* specific request line */ c->ccfg |= c->vc.chan.chan_id << 4; return 0; } static void k3_dma_free_desc(struct virt_dma_desc *vd) { struct k3_dma_desc_sw *ds = container_of(vd, struct k3_dma_desc_sw, vd); struct k3_dma_dev *d = to_k3_dma(vd->tx.chan->device); dma_pool_free(d->pool, ds->desc_hw, ds->desc_hw_lli); kfree(ds); } static int k3_dma_terminate_all(struct dma_chan *chan) { struct k3_dma_chan *c = to_k3_chan(chan); struct k3_dma_dev *d = to_k3_dma(chan->device); struct k3_dma_phy *p = c->phy; unsigned long flags; LIST_HEAD(head); dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc); /* Prevent this channel being scheduled */ spin_lock(&d->lock); list_del_init(&c->node); spin_unlock(&d->lock); /* Clear the tx descriptor lists */ spin_lock_irqsave(&c->vc.lock, flags); vchan_get_all_descriptors(&c->vc, &head); if (p) { /* vchan is assigned to a pchan - stop the channel */ k3_dma_terminate_chan(p, d); c->phy = NULL; p->vchan = NULL; if (p->ds_run) { vchan_terminate_vdesc(&p->ds_run->vd); p->ds_run = NULL; } p->ds_done = NULL; } spin_unlock_irqrestore(&c->vc.lock, flags); vchan_dma_desc_free_list(&c->vc, &head); return 0; } static void k3_dma_synchronize(struct dma_chan *chan) { struct k3_dma_chan *c = to_k3_chan(chan); vchan_synchronize(&c->vc); } static int k3_dma_transfer_pause(struct dma_chan *chan) { struct k3_dma_chan *c = to_k3_chan(chan); struct k3_dma_dev *d = to_k3_dma(chan->device); struct k3_dma_phy *p = c->phy; dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc); if (c->status == DMA_IN_PROGRESS) { c->status = DMA_PAUSED; if (p) { k3_dma_pause_dma(p, false); } else { spin_lock(&d->lock); list_del_init(&c->node); spin_unlock(&d->lock); } } return 0; } static int k3_dma_transfer_resume(struct dma_chan *chan) { struct k3_dma_chan *c = to_k3_chan(chan); struct k3_dma_dev *d = to_k3_dma(chan->device); struct k3_dma_phy *p = c->phy; unsigned long flags; dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc); spin_lock_irqsave(&c->vc.lock, flags); if (c->status == DMA_PAUSED) { c->status = DMA_IN_PROGRESS; if (p) { k3_dma_pause_dma(p, true); } else if (!list_empty(&c->vc.desc_issued)) { spin_lock(&d->lock); list_add_tail(&c->node, &d->chan_pending); spin_unlock(&d->lock); } } spin_unlock_irqrestore(&c->vc.lock, flags); return 0; } static const struct k3dma_soc_data k3_v1_dma_data = { .flags = 0, }; static const struct k3dma_soc_data asp_v1_dma_data = { .flags = K3_FLAG_NOCLK, }; static const struct of_device_id k3_pdma_dt_ids[] = { { .compatible = "hisilicon,k3-dma-1.0", .data = &k3_v1_dma_data }, { .compatible = "hisilicon,hisi-pcm-asp-dma-1.0", .data = &asp_v1_dma_data }, {} }; MODULE_DEVICE_TABLE(of, k3_pdma_dt_ids); static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec, struct of_dma *ofdma) { struct k3_dma_dev *d = ofdma->of_dma_data; unsigned int request = dma_spec->args[0]; if (request >= d->dma_requests) return NULL; return dma_get_slave_channel(&(d->chans[request].vc.chan)); } static int k3_dma_probe(struct platform_device *op) { const struct k3dma_soc_data *soc_data; struct k3_dma_dev *d; int i, ret, irq = 0; d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL); if (!d) return -ENOMEM; soc_data = device_get_match_data(&op->dev); if (!soc_data) return -EINVAL; d->base = devm_platform_ioremap_resource(op, 0); if (IS_ERR(d->base)) return PTR_ERR(d->base); of_property_read_u32((&op->dev)->of_node, "dma-channels", &d->dma_channels); of_property_read_u32((&op->dev)->of_node, "dma-requests", &d->dma_requests); ret = of_property_read_u32((&op->dev)->of_node, "dma-channel-mask", &d->dma_channel_mask); if (ret) { dev_warn(&op->dev, "dma-channel-mask doesn't exist, considering all as available.\n"); d->dma_channel_mask = (u32)~0UL; } if (!(soc_data->flags & K3_FLAG_NOCLK)) { d->clk = devm_clk_get(&op->dev, NULL); if (IS_ERR(d->clk)) { dev_err(&op->dev, "no dma clk\n"); return PTR_ERR(d->clk); } } irq = platform_get_irq(op, 0); ret = devm_request_irq(&op->dev, irq, k3_dma_int_handler, 0, DRIVER_NAME, d); if (ret) return ret; d->irq = irq; /* A DMA memory pool for LLIs, align on 32-byte boundary */ d->pool = dmam_pool_create(DRIVER_NAME, &op->dev, LLI_BLOCK_SIZE, 32, 0); if (!d->pool) return -ENOMEM; /* init phy channel */ d->phy = devm_kcalloc(&op->dev, d->dma_channels, sizeof(struct k3_dma_phy), GFP_KERNEL); if (d->phy == NULL) return -ENOMEM; for (i = 0; i < d->dma_channels; i++) { struct k3_dma_phy *p; if (!(d->dma_channel_mask & BIT(i))) continue; p = &d->phy[i]; p->idx = i; p->base = d->base + i * 0x40; } INIT_LIST_HEAD(&d->slave.channels); dma_cap_set(DMA_SLAVE, d->slave.cap_mask); dma_cap_set(DMA_MEMCPY, d->slave.cap_mask); dma_cap_set(DMA_CYCLIC, d->slave.cap_mask); d->slave.dev = &op->dev; d->slave.device_free_chan_resources = k3_dma_free_chan_resources; d->slave.device_tx_status = k3_dma_tx_status; d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy; d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg; d->slave.device_prep_dma_cyclic = k3_dma_prep_dma_cyclic; d->slave.device_issue_pending = k3_dma_issue_pending; d->slave.device_config = k3_dma_config; d->slave.device_pause = k3_dma_transfer_pause; d->slave.device_resume = k3_dma_transfer_resume; d->slave.device_terminate_all = k3_dma_terminate_all; d->slave.device_synchronize = k3_dma_synchronize; d->slave.copy_align = DMAENGINE_ALIGN_8_BYTES; /* init virtual channel */ d->chans = devm_kcalloc(&op->dev, d->dma_requests, sizeof(struct k3_dma_chan), GFP_KERNEL); if (d->chans == NULL) return -ENOMEM; for (i = 0; i < d->dma_requests; i++) { struct k3_dma_chan *c = &d->chans[i]; c->status = DMA_IN_PROGRESS; INIT_LIST_HEAD(&c->node); c->vc.desc_free = k3_dma_free_desc; vchan_init(&c->vc, &d->slave); } /* Enable clock before accessing registers */ ret = clk_prepare_enable(d->clk); if (ret < 0) { dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret); return ret; } k3_dma_enable_dma(d, true); ret = dma_async_device_register(&d->slave); if (ret) goto dma_async_register_fail; ret = of_dma_controller_register((&op->dev)->of_node, k3_of_dma_simple_xlate, d); if (ret) goto of_dma_register_fail; spin_lock_init(&d->lock); INIT_LIST_HEAD(&d->chan_pending); tasklet_setup(&d->task, k3_dma_tasklet); platform_set_drvdata(op, d); dev_info(&op->dev, "initialized\n"); return 0; of_dma_register_fail: dma_async_device_unregister(&d->slave); dma_async_register_fail: clk_disable_unprepare(d->clk); return ret; } static void k3_dma_remove(struct platform_device *op) { struct k3_dma_chan *c, *cn; struct k3_dma_dev *d = platform_get_drvdata(op); dma_async_device_unregister(&d->slave); of_dma_controller_free((&op->dev)->of_node); devm_free_irq(&op->dev, d->irq, d); list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) { list_del(&c->vc.chan.device_node); tasklet_kill(&c->vc.task); } tasklet_kill(&d->task); clk_disable_unprepare(d->clk); } #ifdef CONFIG_PM_SLEEP static int k3_dma_suspend_dev(struct device *dev) { struct k3_dma_dev *d = dev_get_drvdata(dev); u32 stat = 0; stat = k3_dma_get_chan_stat(d); if (stat) { dev_warn(d->slave.dev, "chan %d is running fail to suspend\n", stat); return -1; } k3_dma_enable_dma(d, false); clk_disable_unprepare(d->clk); return 0; } static int k3_dma_resume_dev(struct device *dev) { struct k3_dma_dev *d = dev_get_drvdata(dev); int ret = 0; ret = clk_prepare_enable(d->clk); if (ret < 0) { dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret); return ret; } k3_dma_enable_dma(d, true); return 0; } #endif static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend_dev, k3_dma_resume_dev); static struct platform_driver k3_pdma_driver = { .driver = { .name = DRIVER_NAME, .pm = &k3_dma_pmops, .of_match_table = k3_pdma_dt_ids, }, .probe = k3_dma_probe, .remove = k3_dma_remove, }; module_platform_driver(k3_pdma_driver); MODULE_DESCRIPTION("HiSilicon k3 DMA Driver"); MODULE_ALIAS("platform:k3dma"); MODULE_LICENSE("GPL v2");
/* SPDX-License-Identifier: GPL-2.0 */ /* * Z8536 CIO Internal registers */ #ifndef _Z8536_H #define _Z8536_H /* Master Interrupt Control register */ #define Z8536_INT_CTRL_REG 0x00 #define Z8536_INT_CTRL_MIE BIT(7) /* Master Interrupt Enable */ #define Z8536_INT_CTRL_DLC BIT(6) /* Disable Lower Chain */ #define Z8536_INT_CTRL_NV BIT(5) /* No Vector */ #define Z8536_INT_CTRL_PA_VIS BIT(4) /* Port A Vect Inc Status */ #define Z8536_INT_CTRL_PB_VIS BIT(3) /* Port B Vect Inc Status */ #define Z8536_INT_CTRL_VT_VIS BIT(2) /* C/T Vect Inc Status */ #define Z8536_INT_CTRL_RJA BIT(1) /* Right Justified Addresses */ #define Z8536_INT_CTRL_RESET BIT(0) /* Reset */ /* Master Configuration Control register */ #define Z8536_CFG_CTRL_REG 0x01 #define Z8536_CFG_CTRL_PBE BIT(7) /* Port B Enable */ #define Z8536_CFG_CTRL_CT1E BIT(6) /* C/T 1 Enable */ #define Z8536_CFG_CTRL_CT2E BIT(5) /* C/T 2 Enable */ #define Z8536_CFG_CTRL_PCE_CT3E BIT(4) /* Port C & C/T 3 Enable */ #define Z8536_CFG_CTRL_PLC BIT(3) /* Port A/B Link Control */ #define Z8536_CFG_CTRL_PAE BIT(2) /* Port A Enable */ #define Z8536_CFG_CTRL_LC(x) (((x) & 0x3) << 0) /* Link Control */ #define Z8536_CFG_CTRL_LC_INDEP Z8536_CFG_CTRL_LC(0)/* Independent */ #define Z8536_CFG_CTRL_LC_GATE Z8536_CFG_CTRL_LC(1)/* 1 Gates 2 */ #define Z8536_CFG_CTRL_LC_TRIG Z8536_CFG_CTRL_LC(2)/* 1 Triggers 2 */ #define Z8536_CFG_CTRL_LC_CLK Z8536_CFG_CTRL_LC(3)/* 1 Clocks 2 */ #define Z8536_CFG_CTRL_LC_MASK Z8536_CFG_CTRL_LC(3) /* Interrupt Vector registers */ #define Z8536_PA_INT_VECT_REG 0x02 #define Z8536_PB_INT_VECT_REG 0x03 #define Z8536_CT_INT_VECT_REG 0x04 #define Z8536_CURR_INT_VECT_REG 0x1f /* Port A/B & Counter/Timer 1/2/3 Command and Status registers */ #define Z8536_PA_CMDSTAT_REG 0x08 #define Z8536_PB_CMDSTAT_REG 0x09 #define Z8536_CT1_CMDSTAT_REG 0x0a #define Z8536_CT2_CMDSTAT_REG 0x0b #define Z8536_CT3_CMDSTAT_REG 0x0c #define Z8536_CT_CMDSTAT_REG(x) (0x0a + (x)) #define Z8536_CMD(x) (((x) & 0x7) << 5) #define Z8536_CMD_NULL Z8536_CMD(0) /* Null Code */ #define Z8536_CMD_CLR_IP_IUS Z8536_CMD(1) /* Clear IP & IUS */ #define Z8536_CMD_SET_IUS Z8536_CMD(2) /* Set IUS */ #define Z8536_CMD_CLR_IUS Z8536_CMD(3) /* Clear IUS */ #define Z8536_CMD_SET_IP Z8536_CMD(4) /* Set IP */ #define Z8536_CMD_CLR_IP Z8536_CMD(5) /* Clear IP */ #define Z8536_CMD_SET_IE Z8536_CMD(6) /* Set IE */ #define Z8536_CMD_CLR_IE Z8536_CMD(7) /* Clear IE */ #define Z8536_CMD_MASK Z8536_CMD(7) #define Z8536_STAT_IUS BIT(7) /* Interrupt Under Service */ #define Z8536_STAT_IE BIT(6) /* Interrupt Enable */ #define Z8536_STAT_IP BIT(5) /* Interrupt Pending */ #define Z8536_STAT_ERR BIT(4) /* Interrupt Error */ #define Z8536_STAT_IE_IP (Z8536_STAT_IE | Z8536_STAT_IP) #define Z8536_PAB_STAT_ORE BIT(3) /* Output Register Empty */ #define Z8536_PAB_STAT_IRF BIT(2) /* Input Register Full */ #define Z8536_PAB_STAT_PMF BIT(1) /* Pattern Match Flag */ #define Z8536_PAB_CMDSTAT_IOE BIT(0) /* Interrupt On Error */ #define Z8536_CT_CMD_RCC BIT(3) /* Read Counter Control */ #define Z8536_CT_CMDSTAT_GCB BIT(2) /* Gate Command Bit */ #define Z8536_CT_CMD_TCB BIT(1) /* Trigger Command Bit */ #define Z8536_CT_STAT_CIP BIT(0) /* Count In Progress */ /* Port Data registers */ #define Z8536_PA_DATA_REG 0x0d #define Z8536_PB_DATA_REG 0x0e #define Z8536_PC_DATA_REG 0x0f /* Counter/Timer 1/2/3 Current Count registers */ #define Z8536_CT1_VAL_MSB_REG 0x10 #define Z8536_CT1_VAL_LSB_REG 0x11 #define Z8536_CT2_VAL_MSB_REG 0x12 #define Z8536_CT2_VAL_LSB_REG 0x13 #define Z8536_CT3_VAL_MSB_REG 0x14 #define Z8536_CT3_VAL_LSB_REG 0x15 #define Z8536_CT_VAL_MSB_REG(x) (0x10 + ((x) * 2)) #define Z8536_CT_VAL_LSB_REG(x) (0x11 + ((x) * 2)) /* Counter/Timer 1/2/3 Time Constant registers */ #define Z8536_CT1_RELOAD_MSB_REG 0x16 #define Z8536_CT1_RELOAD_LSB_REG 0x17 #define Z8536_CT2_RELOAD_MSB_REG 0x18 #define Z8536_CT2_RELOAD_LSB_REG 0x19 #define Z8536_CT3_RELOAD_MSB_REG 0x1a #define Z8536_CT3_RELOAD_LSB_REG 0x1b #define Z8536_CT_RELOAD_MSB_REG(x) (0x16 + ((x) * 2)) #define Z8536_CT_RELOAD_LSB_REG(x) (0x17 + ((x) * 2)) /* Counter/Timer 1/2/3 Mode Specification registers */ #define Z8536_CT1_MODE_REG 0x1c #define Z8536_CT2_MODE_REG 0x1d #define Z8536_CT3_MODE_REG 0x1e #define Z8536_CT_MODE_REG(x) (0x1c + (x)) #define Z8536_CT_MODE_CSC BIT(7) /* Continuous/Single Cycle */ #define Z8536_CT_MODE_EOE BIT(6) /* External Output Enable */ #define Z8536_CT_MODE_ECE BIT(5) /* External Count Enable */ #define Z8536_CT_MODE_ETE BIT(4) /* External Trigger Enable */ #define Z8536_CT_MODE_EGE BIT(3) /* External Gate Enable */ #define Z8536_CT_MODE_REB BIT(2) /* Retrigger Enable Bit */ #define Z8536_CT_MODE_DCS(x) (((x) & 0x3) << 0) /* Duty Cycle */ #define Z8536_CT_MODE_DCS_PULSE Z8536_CT_MODE_DCS(0) /* Pulse */ #define Z8536_CT_MODE_DCS_ONESHOT Z8536_CT_MODE_DCS(1) /* One-Shot */ #define Z8536_CT_MODE_DCS_SQRWAVE Z8536_CT_MODE_DCS(2) /* Square Wave */ #define Z8536_CT_MODE_DCS_DO_NOT_USE Z8536_CT_MODE_DCS(3) /* Do Not Use */ #define Z8536_CT_MODE_DCS_MASK Z8536_CT_MODE_DCS(3) /* Port A/B Mode Specification registers */ #define Z8536_PA_MODE_REG 0x20 #define Z8536_PB_MODE_REG 0x28 #define Z8536_PAB_MODE_PTS(x) (((x) & 0x3) << 6) /* Port type */ #define Z8536_PAB_MODE_PTS_BIT Z8536_PAB_MODE_PTS(0 << 6)/* Bit */ #define Z8536_PAB_MODE_PTS_INPUT Z8536_PAB_MODE_PTS(1 << 6)/* Input */ #define Z8536_PAB_MODE_PTS_OUTPUT Z8536_PAB_MODE_PTS(2 << 6)/* Output */ #define Z8536_PAB_MODE_PTS_BIDIR Z8536_PAB_MODE_PTS(3 << 6)/* Bidir */ #define Z8536_PAB_MODE_PTS_MASK Z8536_PAB_MODE_PTS(3 << 6) #define Z8536_PAB_MODE_ITB BIT(5) /* Interrupt on Two Bytes */ #define Z8536_PAB_MODE_SB BIT(4) /* Single Buffered mode */ #define Z8536_PAB_MODE_IMO BIT(3) /* Interrupt on Match Only */ #define Z8536_PAB_MODE_PMS(x) (((x) & 0x3) << 1) /* Pattern Mode */ #define Z8536_PAB_MODE_PMS_DISABLE Z8536_PAB_MODE_PMS(0)/* Disabled */ #define Z8536_PAB_MODE_PMS_AND Z8536_PAB_MODE_PMS(1)/* "AND" */ #define Z8536_PAB_MODE_PMS_OR Z8536_PAB_MODE_PMS(2)/* "OR" */ #define Z8536_PAB_MODE_PMS_OR_PEV Z8536_PAB_MODE_PMS(3)/* "OR-Priority" */ #define Z8536_PAB_MODE_PMS_MASK Z8536_PAB_MODE_PMS(3) #define Z8536_PAB_MODE_LPM BIT(0) /* Latch on Pattern Match */ #define Z8536_PAB_MODE_DTE BIT(0) /* Deskew Timer Enabled */ /* Port A/B Handshake Specification registers */ #define Z8536_PA_HANDSHAKE_REG 0x21 #define Z8536_PB_HANDSHAKE_REG 0x29 #define Z8536_PAB_HANDSHAKE_HST(x) (((x) & 0x3) << 6) /* Handshake Type */ #define Z8536_PAB_HANDSHAKE_HST_INTER Z8536_PAB_HANDSHAKE_HST(0)/*Interlock*/ #define Z8536_PAB_HANDSHAKE_HST_STROBED Z8536_PAB_HANDSHAKE_HST(1)/* Strobed */ #define Z8536_PAB_HANDSHAKE_HST_PULSED Z8536_PAB_HANDSHAKE_HST(2)/* Pulsed */ #define Z8536_PAB_HANDSHAKE_HST_3WIRE Z8536_PAB_HANDSHAKE_HST(3)/* 3-Wire */ #define Z8536_PAB_HANDSHAKE_HST_MASK Z8536_PAB_HANDSHAKE_HST(3) #define Z8536_PAB_HANDSHAKE_RWS(x) (((x) & 0x7) << 3) /* Req/Wait */ #define Z8536_PAB_HANDSHAKE_RWS_DISABLE Z8536_PAB_HANDSHAKE_RWS(0)/* Disabled */ #define Z8536_PAB_HANDSHAKE_RWS_OUTWAIT Z8536_PAB_HANDSHAKE_RWS(1)/* Out Wait */ #define Z8536_PAB_HANDSHAKE_RWS_INWAIT Z8536_PAB_HANDSHAKE_RWS(3)/* In Wait */ #define Z8536_PAB_HANDSHAKE_RWS_SPREQ Z8536_PAB_HANDSHAKE_RWS(4)/* Special */ #define Z8536_PAB_HANDSHAKE_RWS_OUTREQ Z8536_PAB_HANDSHAKE_RWS(5)/* Out Req */ #define Z8536_PAB_HANDSHAKE_RWS_INREQ Z8536_PAB_HANDSHAKE_RWS(7)/* In Req */ #define Z8536_PAB_HANDSHAKE_RWS_MASK Z8536_PAB_HANDSHAKE_RWS(7) #define Z8536_PAB_HANDSHAKE_DESKEW(x) ((x) << 0)/* Deskew Time */ #define Z8536_PAB_HANDSHAKE_DESKEW_MASK (3 << 0)/* Deskew Time mask */ /* * Port A/B/C Data Path Polarity registers * * 0 = Non-Inverting * 1 = Inverting */ #define Z8536_PA_DPP_REG 0x22 #define Z8536_PB_DPP_REG 0x2a #define Z8536_PC_DPP_REG 0x05 /* * Port A/B/C Data Direction registers * * 0 = Output bit * 1 = Input bit */ #define Z8536_PA_DD_REG 0x23 #define Z8536_PB_DD_REG 0x2b #define Z8536_PC_DD_REG 0x06 /* * Port A/B/C Special I/O Control registers * * 0 = Normal Input or Output * 1 = Output with open drain or Input with 1's catcher */ #define Z8536_PA_SIO_REG 0x24 #define Z8536_PB_SIO_REG 0x2c #define Z8536_PC_SIO_REG 0x07 /* * Port A/B Pattern Polarity/Transition/Mask registers * * PM PT PP Pattern Specification * -- -- -- ------------------------------------- * 0 0 x Bit masked off * 0 1 x Any transition * 1 0 0 Zero (low-level) * 1 0 1 One (high-level) * 1 1 0 One-to-zero transition (falling-edge) * 1 1 1 Zero-to-one transition (rising-edge) */ #define Z8536_PA_PP_REG 0x25 #define Z8536_PB_PP_REG 0x2d #define Z8536_PA_PT_REG 0x26 #define Z8536_PB_PT_REG 0x2e #define Z8536_PA_PM_REG 0x27 #define Z8536_PB_PM_REG 0x2f #endif /* _Z8536_H */
// SPDX-License-Identifier: GPL-2.0-only /* * TI LP8788 MFD - ADC driver * * Copyright 2012 Texas Instruments * * Author: Milo(Woogyom) Kim <[email protected]> */ #include <linux/delay.h> #include <linux/iio/iio.h> #include <linux/iio/driver.h> #include <linux/iio/machine.h> #include <linux/mfd/lp8788.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/platform_device.h> #include <linux/slab.h> /* register address */ #define LP8788_ADC_CONF 0x60 #define LP8788_ADC_RAW 0x61 #define LP8788_ADC_DONE 0x63 #define ADC_CONV_START 1 struct lp8788_adc { struct lp8788 *lp; const struct iio_map *map; struct mutex lock; }; static const int lp8788_scale[LPADC_MAX] = { [LPADC_VBATT_5P5] = 1343101, [LPADC_VIN_CHG] = 3052503, [LPADC_IBATT] = 610500, [LPADC_IC_TEMP] = 61050, [LPADC_VBATT_6P0] = 1465201, [LPADC_VBATT_5P0] = 1221001, [LPADC_ADC1] = 610500, [LPADC_ADC2] = 610500, [LPADC_VDD] = 1025641, [LPADC_VCOIN] = 757020, [LPADC_ADC3] = 610500, [LPADC_ADC4] = 610500, }; static int lp8788_get_adc_result(struct lp8788_adc *adc, enum lp8788_adc_id id, int *val) { unsigned int msb; unsigned int lsb; unsigned int result; u8 data; u8 rawdata[2]; int size = ARRAY_SIZE(rawdata); int retry = 5; int ret; data = (id << 1) | ADC_CONV_START; ret = lp8788_write_byte(adc->lp, LP8788_ADC_CONF, data); if (ret) goto err_io; /* retry until adc conversion is done */ data = 0; while (retry--) { usleep_range(100, 200); ret = lp8788_read_byte(adc->lp, LP8788_ADC_DONE, &data); if (ret) goto err_io; /* conversion done */ if (data) break; } ret = lp8788_read_multi_bytes(adc->lp, LP8788_ADC_RAW, rawdata, size); if (ret) goto err_io; msb = (rawdata[0] << 4) & 0x00000ff0; lsb = (rawdata[1] >> 4) & 0x0000000f; result = msb | lsb; *val = result; return 0; err_io: return ret; } static int lp8788_adc_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long mask) { struct lp8788_adc *adc = iio_priv(indio_dev); enum lp8788_adc_id id = chan->channel; int ret; mutex_lock(&adc->lock); switch (mask) { case IIO_CHAN_INFO_RAW: ret = lp8788_get_adc_result(adc, id, val) ? -EIO : IIO_VAL_INT; break; case IIO_CHAN_INFO_SCALE: *val = lp8788_scale[id] / 1000000; *val2 = lp8788_scale[id] % 1000000; ret = IIO_VAL_INT_PLUS_MICRO; break; default: ret = -EINVAL; break; } mutex_unlock(&adc->lock); return ret; } static const struct iio_info lp8788_adc_info = { .read_raw = &lp8788_adc_read_raw, }; #define LP8788_CHAN(_id, _type) { \ .type = _type, \ .indexed = 1, \ .channel = LPADC_##_id, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ BIT(IIO_CHAN_INFO_SCALE), \ .datasheet_name = #_id, \ } static const struct iio_chan_spec lp8788_adc_channels[] = { [LPADC_VBATT_5P5] = LP8788_CHAN(VBATT_5P5, IIO_VOLTAGE), [LPADC_VIN_CHG] = LP8788_CHAN(VIN_CHG, IIO_VOLTAGE), [LPADC_IBATT] = LP8788_CHAN(IBATT, IIO_CURRENT), [LPADC_IC_TEMP] = LP8788_CHAN(IC_TEMP, IIO_TEMP), [LPADC_VBATT_6P0] = LP8788_CHAN(VBATT_6P0, IIO_VOLTAGE), [LPADC_VBATT_5P0] = LP8788_CHAN(VBATT_5P0, IIO_VOLTAGE), [LPADC_ADC1] = LP8788_CHAN(ADC1, IIO_VOLTAGE), [LPADC_ADC2] = LP8788_CHAN(ADC2, IIO_VOLTAGE), [LPADC_VDD] = LP8788_CHAN(VDD, IIO_VOLTAGE), [LPADC_VCOIN] = LP8788_CHAN(VCOIN, IIO_VOLTAGE), [LPADC_ADC3] = LP8788_CHAN(ADC3, IIO_VOLTAGE), [LPADC_ADC4] = LP8788_CHAN(ADC4, IIO_VOLTAGE), }; /* default maps used by iio consumer (lp8788-charger driver) */ static const struct iio_map lp8788_default_iio_maps[] = { IIO_MAP("VBATT_5P0", "lp8788-charger", "lp8788_vbatt_5p0"), IIO_MAP("ADC1", "lp8788-charger", "lp8788_adc1"), { } }; static int lp8788_iio_map_register(struct device *dev, struct iio_dev *indio_dev, struct lp8788_platform_data *pdata, struct lp8788_adc *adc) { const struct iio_map *map; int ret; map = (!pdata || !pdata->adc_pdata) ? lp8788_default_iio_maps : pdata->adc_pdata; ret = devm_iio_map_array_register(dev, indio_dev, map); if (ret) { dev_err(&indio_dev->dev, "iio map err: %d\n", ret); return ret; } adc->map = map; return 0; } static int lp8788_adc_probe(struct platform_device *pdev) { struct lp8788 *lp = dev_get_drvdata(pdev->dev.parent); struct iio_dev *indio_dev; struct lp8788_adc *adc; int ret; indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc)); if (!indio_dev) return -ENOMEM; adc = iio_priv(indio_dev); adc->lp = lp; ret = lp8788_iio_map_register(&pdev->dev, indio_dev, lp->pdata, adc); if (ret) return ret; mutex_init(&adc->lock); indio_dev->name = pdev->name; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->info = &lp8788_adc_info; indio_dev->channels = lp8788_adc_channels; indio_dev->num_channels = ARRAY_SIZE(lp8788_adc_channels); return devm_iio_device_register(&pdev->dev, indio_dev); } static struct platform_driver lp8788_adc_driver = { .probe = lp8788_adc_probe, .driver = { .name = LP8788_DEV_ADC, }, }; module_platform_driver(lp8788_adc_driver); MODULE_DESCRIPTION("Texas Instruments LP8788 ADC Driver"); MODULE_AUTHOR("Milo Kim"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:lp8788-adc");
// SPDX-License-Identifier: GPL-2.0-or-later /* * (Tentative) USB Audio Driver for ALSA * * Mixer control part * * Copyright (c) 2002 by Takashi Iwai <[email protected]> * * Many codes borrowed from audio.c by * Alan Cox ([email protected]) * Thomas Sailer ([email protected]) */ /* * TODOs, for both the mixer and the streaming interfaces: * * - support for UAC2 effect units * - support for graphical equalizers * - RANGE and MEM set commands (UAC2) * - RANGE and MEM interrupt dispatchers (UAC2) * - audio channel clustering (UAC2) * - audio sample rate converter units (UAC2) * - proper handling of clock multipliers (UAC2) * - dispatch clock change notifications (UAC2) * - stop PCM streams which use a clock that became invalid * - stop PCM streams which use a clock selector that has changed * - parse available sample rates again when clock sources changed */ #include <linux/bitops.h> #include <linux/init.h> #include <linux/list.h> #include <linux/log2.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/usb.h> #include <linux/usb/audio.h> #include <linux/usb/audio-v2.h> #include <linux/usb/audio-v3.h> #include <sound/core.h> #include <sound/control.h> #include <sound/hwdep.h> #include <sound/info.h> #include <sound/tlv.h> #include "usbaudio.h" #include "mixer.h" #include "helper.h" #include "mixer_quirks.h" #include "power.h" #define MAX_ID_ELEMS 256 struct usb_audio_term { int id; int type; int channels; unsigned int chconfig; int name; }; struct usbmix_name_map; struct mixer_build { struct snd_usb_audio *chip; struct usb_mixer_interface *mixer; unsigned char *buffer; unsigned int buflen; DECLARE_BITMAP(unitbitmap, MAX_ID_ELEMS); DECLARE_BITMAP(termbitmap, MAX_ID_ELEMS); struct usb_audio_term oterm; const struct usbmix_name_map *map; const struct usbmix_selector_map *selector_map; }; /*E-mu 0202/0404/0204 eXtension Unit(XU) control*/ enum { USB_XU_CLOCK_RATE = 0xe301, USB_XU_CLOCK_SOURCE = 0xe302, USB_XU_DIGITAL_IO_STATUS = 0xe303, USB_XU_DEVICE_OPTIONS = 0xe304, USB_XU_DIRECT_MONITORING = 0xe305, USB_XU_METERING = 0xe306 }; enum { USB_XU_CLOCK_SOURCE_SELECTOR = 0x02, /* clock source*/ USB_XU_CLOCK_RATE_SELECTOR = 0x03, /* clock rate */ USB_XU_DIGITAL_FORMAT_SELECTOR = 0x01, /* the spdif format */ USB_XU_SOFT_LIMIT_SELECTOR = 0x03 /* soft limiter */ }; /* * manual mapping of mixer names * if the mixer topology is too complicated and the parsed names are * ambiguous, add the entries in usbmixer_maps.c. */ #include "mixer_maps.c" static const struct usbmix_name_map * find_map(const struct usbmix_name_map *p, int unitid, int control) { if (!p) return NULL; for (; p->id; p++) { if (p->id == unitid && (!control || !p->control || control == p->control)) return p; } return NULL; } /* get the mapped name if the unit matches */ static int check_mapped_name(const struct usbmix_name_map *p, char *buf, int buflen) { int len; if (!p || !p->name) return 0; buflen--; len = strscpy(buf, p->name, buflen); return len < 0 ? buflen : len; } /* ignore the error value if ignore_ctl_error flag is set */ #define filter_error(cval, err) \ ((cval)->head.mixer->ignore_ctl_error ? 0 : (err)) /* check whether the control should be ignored */ static inline int check_ignored_ctl(const struct usbmix_name_map *p) { if (!p || p->name || p->dB) return 0; return 1; } /* dB mapping */ static inline void check_mapped_dB(const struct usbmix_name_map *p, struct usb_mixer_elem_info *cval) { if (p && p->dB) { cval->dBmin = p->dB->min; cval->dBmax = p->dB->max; cval->min_mute = p->dB->min_mute; cval->initialized = 1; } } /* get the mapped selector source name */ static int check_mapped_selector_name(struct mixer_build *state, int unitid, int index, char *buf, int buflen) { const struct usbmix_selector_map *p; int len; if (!state->selector_map) return 0; for (p = state->selector_map; p->id; p++) { if (p->id == unitid && index < p->count) { len = strscpy(buf, p->names[index], buflen); return len < 0 ? buflen : len; } } return 0; } /* * find an audio control unit with the given unit id */ static void *find_audio_control_unit(struct mixer_build *state, unsigned char unit) { /* we just parse the header */ struct uac_feature_unit_descriptor *hdr = NULL; while ((hdr = snd_usb_find_desc(state->buffer, state->buflen, hdr, USB_DT_CS_INTERFACE)) != NULL) { if (hdr->bLength >= 4 && hdr->bDescriptorSubtype >= UAC_INPUT_TERMINAL && hdr->bDescriptorSubtype <= UAC3_SAMPLE_RATE_CONVERTER && hdr->bUnitID == unit) return hdr; } return NULL; } /* * copy a string with the given id */ static int snd_usb_copy_string_desc(struct snd_usb_audio *chip, int index, char *buf, int maxlen) { int len = usb_string(chip->dev, index, buf, maxlen - 1); if (len < 0) return 0; buf[len] = 0; return len; } /* * convert from the byte/word on usb descriptor to the zero-based integer */ static int convert_signed_value(struct usb_mixer_elem_info *cval, int val) { switch (cval->val_type) { case USB_MIXER_BOOLEAN: return !!val; case USB_MIXER_INV_BOOLEAN: return !val; case USB_MIXER_U8: val &= 0xff; break; case USB_MIXER_S8: val &= 0xff; if (val >= 0x80) val -= 0x100; break; case USB_MIXER_U16: val &= 0xffff; break; case USB_MIXER_S16: val &= 0xffff; if (val >= 0x8000) val -= 0x10000; break; } return val; } /* * convert from the zero-based int to the byte/word for usb descriptor */ static int convert_bytes_value(struct usb_mixer_elem_info *cval, int val) { switch (cval->val_type) { case USB_MIXER_BOOLEAN: return !!val; case USB_MIXER_INV_BOOLEAN: return !val; case USB_MIXER_S8: case USB_MIXER_U8: return val & 0xff; case USB_MIXER_S16: case USB_MIXER_U16: return val & 0xffff; } return 0; /* not reached */ } static int get_relative_value(struct usb_mixer_elem_info *cval, int val) { if (!cval->res) cval->res = 1; if (val < cval->min) return 0; else if (val >= cval->max) return DIV_ROUND_UP(cval->max - cval->min, cval->res); else return (val - cval->min) / cval->res; } static int get_abs_value(struct usb_mixer_elem_info *cval, int val) { if (val < 0) return cval->min; if (!cval->res) cval->res = 1; val *= cval->res; val += cval->min; if (val > cval->max) return cval->max; return val; } static int uac2_ctl_value_size(int val_type) { switch (val_type) { case USB_MIXER_S32: case USB_MIXER_U32: return 4; case USB_MIXER_S16: case USB_MIXER_U16: return 2; default: return 1; } return 0; /* unreachable */ } /* * retrieve a mixer value */ static inline int mixer_ctrl_intf(struct usb_mixer_interface *mixer) { return get_iface_desc(mixer->hostif)->bInterfaceNumber; } static int get_ctl_value_v1(struct usb_mixer_elem_info *cval, int request, int validx, int *value_ret) { struct snd_usb_audio *chip = cval->head.mixer->chip; unsigned char buf[2]; int val_len = cval->val_type >= USB_MIXER_S16 ? 2 : 1; int timeout = 10; int idx = 0, err; err = snd_usb_lock_shutdown(chip); if (err < 0) return -EIO; while (timeout-- > 0) { idx = mixer_ctrl_intf(cval->head.mixer) | (cval->head.id << 8); err = snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), request, USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN, validx, idx, buf, val_len); if (err >= val_len) { *value_ret = convert_signed_value(cval, snd_usb_combine_bytes(buf, val_len)); err = 0; goto out; } else if (err == -ETIMEDOUT) { goto out; } } usb_audio_dbg(chip, "cannot get ctl value: req = %#x, wValue = %#x, wIndex = %#x, type = %d\n", request, validx, idx, cval->val_type); err = -EINVAL; out: snd_usb_unlock_shutdown(chip); return err; } static int get_ctl_value_v2(struct usb_mixer_elem_info *cval, int request, int validx, int *value_ret) { struct snd_usb_audio *chip = cval->head.mixer->chip; /* enough space for one range */ unsigned char buf[sizeof(__u16) + 3 * sizeof(__u32)]; unsigned char *val; int idx = 0, ret, val_size, size; __u8 bRequest; val_size = uac2_ctl_value_size(cval->val_type); if (request == UAC_GET_CUR) { bRequest = UAC2_CS_CUR; size = val_size; } else { bRequest = UAC2_CS_RANGE; size = sizeof(__u16) + 3 * val_size; } memset(buf, 0, sizeof(buf)); if (snd_usb_lock_shutdown(chip)) return -EIO; idx = mixer_ctrl_intf(cval->head.mixer) | (cval->head.id << 8); ret = snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), bRequest, USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN, validx, idx, buf, size); snd_usb_unlock_shutdown(chip); if (ret < 0) { usb_audio_dbg(chip, "cannot get ctl value: req = %#x, wValue = %#x, wIndex = %#x, type = %d\n", request, validx, idx, cval->val_type); return ret; } /* FIXME: how should we handle multiple triplets here? */ switch (request) { case UAC_GET_CUR: val = buf; break; case UAC_GET_MIN: val = buf + sizeof(__u16); break; case UAC_GET_MAX: val = buf + sizeof(__u16) + val_size; break; case UAC_GET_RES: val = buf + sizeof(__u16) + val_size * 2; break; default: return -EINVAL; } *value_ret = convert_signed_value(cval, snd_usb_combine_bytes(val, val_size)); return 0; } static int get_ctl_value(struct usb_mixer_elem_info *cval, int request, int validx, int *value_ret) { validx += cval->idx_off; return (cval->head.mixer->protocol == UAC_VERSION_1) ? get_ctl_value_v1(cval, request, validx, value_ret) : get_ctl_value_v2(cval, request, validx, value_ret); } static int get_cur_ctl_value(struct usb_mixer_elem_info *cval, int validx, int *value) { return get_ctl_value(cval, UAC_GET_CUR, validx, value); } /* channel = 0: master, 1 = first channel */ static inline int get_cur_mix_raw(struct usb_mixer_elem_info *cval, int channel, int *value) { return get_ctl_value(cval, UAC_GET_CUR, (cval->control << 8) | channel, value); } int snd_usb_get_cur_mix_value(struct usb_mixer_elem_info *cval, int channel, int index, int *value) { int err; if (cval->cached & BIT(channel)) { *value = cval->cache_val[index]; return 0; } err = get_cur_mix_raw(cval, channel, value); if (err < 0) { if (!cval->head.mixer->ignore_ctl_error) usb_audio_dbg(cval->head.mixer->chip, "cannot get current value for control %d ch %d: err = %d\n", cval->control, channel, err); return err; } cval->cached |= BIT(channel); cval->cache_val[index] = *value; return 0; } /* * set a mixer value */ int snd_usb_mixer_set_ctl_value(struct usb_mixer_elem_info *cval, int request, int validx, int value_set) { struct snd_usb_audio *chip = cval->head.mixer->chip; unsigned char buf[4]; int idx = 0, val_len, err, timeout = 10; validx += cval->idx_off; if (cval->head.mixer->protocol == UAC_VERSION_1) { val_len = cval->val_type >= USB_MIXER_S16 ? 2 : 1; } else { /* UAC_VERSION_2/3 */ val_len = uac2_ctl_value_size(cval->val_type); /* FIXME */ if (request != UAC_SET_CUR) { usb_audio_dbg(chip, "RANGE setting not yet supported\n"); return -EINVAL; } request = UAC2_CS_CUR; } value_set = convert_bytes_value(cval, value_set); buf[0] = value_set & 0xff; buf[1] = (value_set >> 8) & 0xff; buf[2] = (value_set >> 16) & 0xff; buf[3] = (value_set >> 24) & 0xff; err = snd_usb_lock_shutdown(chip); if (err < 0) return -EIO; while (timeout-- > 0) { idx = mixer_ctrl_intf(cval->head.mixer) | (cval->head.id << 8); err = snd_usb_ctl_msg(chip->dev, usb_sndctrlpipe(chip->dev, 0), request, USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT, validx, idx, buf, val_len); if (err >= 0) { err = 0; goto out; } else if (err == -ETIMEDOUT) { goto out; } } usb_audio_dbg(chip, "cannot set ctl value: req = %#x, wValue = %#x, wIndex = %#x, type = %d, data = %#x/%#x\n", request, validx, idx, cval->val_type, buf[0], buf[1]); err = -EINVAL; out: snd_usb_unlock_shutdown(chip); return err; } static int set_cur_ctl_value(struct usb_mixer_elem_info *cval, int validx, int value) { return snd_usb_mixer_set_ctl_value(cval, UAC_SET_CUR, validx, value); } int snd_usb_set_cur_mix_value(struct usb_mixer_elem_info *cval, int channel, int index, int value) { int err; unsigned int read_only = (channel == 0) ? cval->master_readonly : cval->ch_readonly & BIT(channel - 1); if (read_only) { usb_audio_dbg(cval->head.mixer->chip, "%s(): channel %d of control %d is read_only\n", __func__, channel, cval->control); return 0; } err = snd_usb_mixer_set_ctl_value(cval, UAC_SET_CUR, (cval->control << 8) | channel, value); if (err < 0) return err; cval->cached |= BIT(channel); cval->cache_val[index] = value; return 0; } /* * TLV callback for mixer volume controls */ int snd_usb_mixer_vol_tlv(struct snd_kcontrol *kcontrol, int op_flag, unsigned int size, unsigned int __user *_tlv) { struct usb_mixer_elem_info *cval = kcontrol->private_data; DECLARE_TLV_DB_MINMAX(scale, 0, 0); if (size < sizeof(scale)) return -ENOMEM; if (cval->min_mute) scale[0] = SNDRV_CTL_TLVT_DB_MINMAX_MUTE; scale[2] = cval->dBmin; scale[3] = cval->dBmax; if (copy_to_user(_tlv, scale, sizeof(scale))) return -EFAULT; return 0; } /* * parser routines begin here... */ static int parse_audio_unit(struct mixer_build *state, int unitid); /* * check if the input/output channel routing is enabled on the given bitmap. * used for mixer unit parser */ static int check_matrix_bitmap(unsigned char *bmap, int ich, int och, int num_outs) { int idx = ich * num_outs + och; return bmap[idx >> 3] & (0x80 >> (idx & 7)); } /* * add an alsa control element * search and increment the index until an empty slot is found. * * if failed, give up and free the control instance. */ int snd_usb_mixer_add_list(struct usb_mixer_elem_list *list, struct snd_kcontrol *kctl, bool is_std_info) { struct usb_mixer_interface *mixer = list->mixer; int err; while (snd_ctl_find_id(mixer->chip->card, &kctl->id)) kctl->id.index++; err = snd_ctl_add(mixer->chip->card, kctl); if (err < 0) { usb_audio_dbg(mixer->chip, "cannot add control (err = %d)\n", err); return err; } list->kctl = kctl; list->is_std_info = is_std_info; list->next_id_elem = mixer->id_elems[list->id]; mixer->id_elems[list->id] = list; return 0; } /* * get a terminal name string */ static struct iterm_name_combo { int type; char *name; } iterm_names[] = { { 0x0300, "Output" }, { 0x0301, "Speaker" }, { 0x0302, "Headphone" }, { 0x0303, "HMD Audio" }, { 0x0304, "Desktop Speaker" }, { 0x0305, "Room Speaker" }, { 0x0306, "Com Speaker" }, { 0x0307, "LFE" }, { 0x0600, "External In" }, { 0x0601, "Analog In" }, { 0x0602, "Digital In" }, { 0x0603, "Line" }, { 0x0604, "Legacy In" }, { 0x0605, "IEC958 In" }, { 0x0606, "1394 DA Stream" }, { 0x0607, "1394 DV Stream" }, { 0x0700, "Embedded" }, { 0x0701, "Noise Source" }, { 0x0702, "Equalization Noise" }, { 0x0703, "CD" }, { 0x0704, "DAT" }, { 0x0705, "DCC" }, { 0x0706, "MiniDisk" }, { 0x0707, "Analog Tape" }, { 0x0708, "Phonograph" }, { 0x0709, "VCR Audio" }, { 0x070a, "Video Disk Audio" }, { 0x070b, "DVD Audio" }, { 0x070c, "TV Tuner Audio" }, { 0x070d, "Satellite Rec Audio" }, { 0x070e, "Cable Tuner Audio" }, { 0x070f, "DSS Audio" }, { 0x0710, "Radio Receiver" }, { 0x0711, "Radio Transmitter" }, { 0x0712, "Multi-Track Recorder" }, { 0x0713, "Synthesizer" }, { 0 }, }; static int get_term_name(struct snd_usb_audio *chip, struct usb_audio_term *iterm, unsigned char *name, int maxlen, int term_only) { struct iterm_name_combo *names; int len; if (iterm->name) { len = snd_usb_copy_string_desc(chip, iterm->name, name, maxlen); if (len) return len; } /* virtual type - not a real terminal */ if (iterm->type >> 16) { if (term_only) return 0; switch (iterm->type >> 16) { case UAC3_SELECTOR_UNIT: strcpy(name, "Selector"); return 8; case UAC3_PROCESSING_UNIT: strcpy(name, "Process Unit"); return 12; case UAC3_EXTENSION_UNIT: strcpy(name, "Ext Unit"); return 8; case UAC3_MIXER_UNIT: strcpy(name, "Mixer"); return 5; default: return sprintf(name, "Unit %d", iterm->id); } } switch (iterm->type & 0xff00) { case 0x0100: strcpy(name, "PCM"); return 3; case 0x0200: strcpy(name, "Mic"); return 3; case 0x0400: strcpy(name, "Headset"); return 7; case 0x0500: strcpy(name, "Phone"); return 5; } for (names = iterm_names; names->type; names++) { if (names->type == iterm->type) { strcpy(name, names->name); return strlen(names->name); } } return 0; } /* * Get logical cluster information for UAC3 devices. */ static int get_cluster_channels_v3(struct mixer_build *state, unsigned int cluster_id) { struct uac3_cluster_header_descriptor c_header; int err; err = snd_usb_ctl_msg(state->chip->dev, usb_rcvctrlpipe(state->chip->dev, 0), UAC3_CS_REQ_HIGH_CAPABILITY_DESCRIPTOR, USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN, cluster_id, snd_usb_ctrl_intf(state->mixer->hostif), &c_header, sizeof(c_header)); if (err < 0) goto error; if (err != sizeof(c_header)) { err = -EIO; goto error; } return c_header.bNrChannels; error: usb_audio_err(state->chip, "cannot request logical cluster ID: %d (err: %d)\n", cluster_id, err); return err; } /* * Get number of channels for a Mixer Unit. */ static int uac_mixer_unit_get_channels(struct mixer_build *state, struct uac_mixer_unit_descriptor *desc) { int mu_channels; switch (state->mixer->protocol) { case UAC_VERSION_1: case UAC_VERSION_2: default: if (desc->bLength < sizeof(*desc) + desc->bNrInPins + 1) return 0; /* no bmControls -> skip */ mu_channels = uac_mixer_unit_bNrChannels(desc); break; case UAC_VERSION_3: mu_channels = get_cluster_channels_v3(state, uac3_mixer_unit_wClusterDescrID(desc)); break; } return mu_channels; } /* * Parse Input Terminal Unit */ static int __check_input_term(struct mixer_build *state, int id, struct usb_audio_term *term); static int parse_term_uac1_iterm_unit(struct mixer_build *state, struct usb_audio_term *term, void *p1, int id) { struct uac_input_terminal_descriptor *d = p1; term->type = le16_to_cpu(d->wTerminalType); term->channels = d->bNrChannels; term->chconfig = le16_to_cpu(d->wChannelConfig); term->name = d->iTerminal; return 0; } static int parse_term_uac2_iterm_unit(struct mixer_build *state, struct usb_audio_term *term, void *p1, int id) { struct uac2_input_terminal_descriptor *d = p1; int err; /* call recursively to verify the referenced clock entity */ err = __check_input_term(state, d->bCSourceID, term); if (err < 0) return err; /* save input term properties after recursion, * to ensure they are not overriden by the recursion calls */ term->id = id; term->type = le16_to_cpu(d->wTerminalType); term->channels = d->bNrChannels; term->chconfig = le32_to_cpu(d->bmChannelConfig); term->name = d->iTerminal; return 0; } static int parse_term_uac3_iterm_unit(struct mixer_build *state, struct usb_audio_term *term, void *p1, int id) { struct uac3_input_terminal_descriptor *d = p1; int err; /* call recursively to verify the referenced clock entity */ err = __check_input_term(state, d->bCSourceID, term); if (err < 0) return err; /* save input term properties after recursion, * to ensure they are not overriden by the recursion calls */ term->id = id; term->type = le16_to_cpu(d->wTerminalType); err = get_cluster_channels_v3(state, le16_to_cpu(d->wClusterDescrID)); if (err < 0) return err; term->channels = err; /* REVISIT: UAC3 IT doesn't have channels cfg */ term->chconfig = 0; term->name = le16_to_cpu(d->wTerminalDescrStr); return 0; } static int parse_term_mixer_unit(struct mixer_build *state, struct usb_audio_term *term, void *p1, int id) { struct uac_mixer_unit_descriptor *d = p1; int protocol = state->mixer->protocol; int err; err = uac_mixer_unit_get_channels(state, d); if (err <= 0) return err; term->type = UAC3_MIXER_UNIT << 16; /* virtual type */ term->channels = err; if (protocol != UAC_VERSION_3) { term->chconfig = uac_mixer_unit_wChannelConfig(d, protocol); term->name = uac_mixer_unit_iMixer(d); } return 0; } static int parse_term_selector_unit(struct mixer_build *state, struct usb_audio_term *term, void *p1, int id) { struct uac_selector_unit_descriptor *d = p1; int err; /* call recursively to retrieve the channel info */ err = __check_input_term(state, d->baSourceID[0], term); if (err < 0) return err; term->type = UAC3_SELECTOR_UNIT << 16; /* virtual type */ term->id = id; if (state->mixer->protocol != UAC_VERSION_3) term->name = uac_selector_unit_iSelector(d); return 0; } static int parse_term_proc_unit(struct mixer_build *state, struct usb_audio_term *term, void *p1, int id, int vtype) { struct uac_processing_unit_descriptor *d = p1; int protocol = state->mixer->protocol; int err; if (d->bNrInPins) { /* call recursively to retrieve the channel info */ err = __check_input_term(state, d->baSourceID[0], term); if (err < 0) return err; } term->type = vtype << 16; /* virtual type */ term->id = id; if (protocol == UAC_VERSION_3) return 0; if (!term->channels) { term->channels = uac_processing_unit_bNrChannels(d); term->chconfig = uac_processing_unit_wChannelConfig(d, protocol); } term->name = uac_processing_unit_iProcessing(d, protocol); return 0; } static int parse_term_effect_unit(struct mixer_build *state, struct usb_audio_term *term, void *p1, int id) { struct uac2_effect_unit_descriptor *d = p1; int err; err = __check_input_term(state, d->bSourceID, term); if (err < 0) return err; term->type = UAC3_EFFECT_UNIT << 16; /* virtual type */ term->id = id; return 0; } static int parse_term_uac2_clock_source(struct mixer_build *state, struct usb_audio_term *term, void *p1, int id) { struct uac_clock_source_descriptor *d = p1; term->type = UAC3_CLOCK_SOURCE << 16; /* virtual type */ term->id = id; term->name = d->iClockSource; return 0; } static int parse_term_uac3_clock_source(struct mixer_build *state, struct usb_audio_term *term, void *p1, int id) { struct uac3_clock_source_descriptor *d = p1; term->type = UAC3_CLOCK_SOURCE << 16; /* virtual type */ term->id = id; term->name = le16_to_cpu(d->wClockSourceStr); return 0; } #define PTYPE(a, b) ((a) << 8 | (b)) /* * parse the source unit recursively until it reaches to a terminal * or a branched unit. */ static int __check_input_term(struct mixer_build *state, int id, struct usb_audio_term *term) { int protocol = state->mixer->protocol; void *p1; unsigned char *hdr; for (;;) { /* a loop in the terminal chain? */ if (test_and_set_bit(id, state->termbitmap)) return -EINVAL; p1 = find_audio_control_unit(state, id); if (!p1) break; if (!snd_usb_validate_audio_desc(p1, protocol)) break; /* bad descriptor */ hdr = p1; term->id = id; switch (PTYPE(protocol, hdr[2])) { case PTYPE(UAC_VERSION_1, UAC_FEATURE_UNIT): case PTYPE(UAC_VERSION_2, UAC_FEATURE_UNIT): case PTYPE(UAC_VERSION_3, UAC3_FEATURE_UNIT): { /* the header is the same for all versions */ struct uac_feature_unit_descriptor *d = p1; id = d->bSourceID; break; /* continue to parse */ } case PTYPE(UAC_VERSION_1, UAC_INPUT_TERMINAL): return parse_term_uac1_iterm_unit(state, term, p1, id); case PTYPE(UAC_VERSION_2, UAC_INPUT_TERMINAL): return parse_term_uac2_iterm_unit(state, term, p1, id); case PTYPE(UAC_VERSION_3, UAC_INPUT_TERMINAL): return parse_term_uac3_iterm_unit(state, term, p1, id); case PTYPE(UAC_VERSION_1, UAC_MIXER_UNIT): case PTYPE(UAC_VERSION_2, UAC_MIXER_UNIT): case PTYPE(UAC_VERSION_3, UAC3_MIXER_UNIT): return parse_term_mixer_unit(state, term, p1, id); case PTYPE(UAC_VERSION_1, UAC_SELECTOR_UNIT): case PTYPE(UAC_VERSION_2, UAC_SELECTOR_UNIT): case PTYPE(UAC_VERSION_2, UAC2_CLOCK_SELECTOR): case PTYPE(UAC_VERSION_3, UAC3_SELECTOR_UNIT): case PTYPE(UAC_VERSION_3, UAC3_CLOCK_SELECTOR): return parse_term_selector_unit(state, term, p1, id); case PTYPE(UAC_VERSION_1, UAC1_PROCESSING_UNIT): case PTYPE(UAC_VERSION_2, UAC2_PROCESSING_UNIT_V2): case PTYPE(UAC_VERSION_3, UAC3_PROCESSING_UNIT): return parse_term_proc_unit(state, term, p1, id, UAC3_PROCESSING_UNIT); case PTYPE(UAC_VERSION_2, UAC2_EFFECT_UNIT): case PTYPE(UAC_VERSION_3, UAC3_EFFECT_UNIT): return parse_term_effect_unit(state, term, p1, id); case PTYPE(UAC_VERSION_1, UAC1_EXTENSION_UNIT): case PTYPE(UAC_VERSION_2, UAC2_EXTENSION_UNIT_V2): case PTYPE(UAC_VERSION_3, UAC3_EXTENSION_UNIT): return parse_term_proc_unit(state, term, p1, id, UAC3_EXTENSION_UNIT); case PTYPE(UAC_VERSION_2, UAC2_CLOCK_SOURCE): return parse_term_uac2_clock_source(state, term, p1, id); case PTYPE(UAC_VERSION_3, UAC3_CLOCK_SOURCE): return parse_term_uac3_clock_source(state, term, p1, id); default: return -ENODEV; } } return -ENODEV; } static int check_input_term(struct mixer_build *state, int id, struct usb_audio_term *term) { memset(term, 0, sizeof(*term)); memset(state->termbitmap, 0, sizeof(state->termbitmap)); return __check_input_term(state, id, term); } /* * Feature Unit */ /* feature unit control information */ struct usb_feature_control_info { int control; const char *name; int type; /* data type for uac1 */ int type_uac2; /* data type for uac2 if different from uac1, else -1 */ }; static const struct usb_feature_control_info audio_feature_info[] = { { UAC_FU_MUTE, "Mute", USB_MIXER_INV_BOOLEAN, -1 }, { UAC_FU_VOLUME, "Volume", USB_MIXER_S16, -1 }, { UAC_FU_BASS, "Tone Control - Bass", USB_MIXER_S8, -1 }, { UAC_FU_MID, "Tone Control - Mid", USB_MIXER_S8, -1 }, { UAC_FU_TREBLE, "Tone Control - Treble", USB_MIXER_S8, -1 }, { UAC_FU_GRAPHIC_EQUALIZER, "Graphic Equalizer", USB_MIXER_S8, -1 }, /* FIXME: not implemented yet */ { UAC_FU_AUTOMATIC_GAIN, "Auto Gain Control", USB_MIXER_BOOLEAN, -1 }, { UAC_FU_DELAY, "Delay Control", USB_MIXER_U16, USB_MIXER_U32 }, { UAC_FU_BASS_BOOST, "Bass Boost", USB_MIXER_BOOLEAN, -1 }, { UAC_FU_LOUDNESS, "Loudness", USB_MIXER_BOOLEAN, -1 }, /* UAC2 specific */ { UAC2_FU_INPUT_GAIN, "Input Gain Control", USB_MIXER_S16, -1 }, { UAC2_FU_INPUT_GAIN_PAD, "Input Gain Pad Control", USB_MIXER_S16, -1 }, { UAC2_FU_PHASE_INVERTER, "Phase Inverter Control", USB_MIXER_BOOLEAN, -1 }, }; static void usb_mixer_elem_info_free(struct usb_mixer_elem_info *cval) { kfree(cval); } /* private_free callback */ void snd_usb_mixer_elem_free(struct snd_kcontrol *kctl) { usb_mixer_elem_info_free(kctl->private_data); kctl->private_data = NULL; } /* * interface to ALSA control for feature/mixer units */ /* volume control quirks */ static void volume_control_quirks(struct usb_mixer_elem_info *cval, struct snd_kcontrol *kctl) { struct snd_usb_audio *chip = cval->head.mixer->chip; if (chip->quirk_flags & QUIRK_FLAG_MIC_RES_384) { if (!strcmp(kctl->id.name, "Mic Capture Volume")) { usb_audio_info(chip, "set resolution quirk: cval->res = 384\n"); cval->res = 384; } } else if (chip->quirk_flags & QUIRK_FLAG_MIC_RES_16) { if (!strcmp(kctl->id.name, "Mic Capture Volume")) { usb_audio_info(chip, "set resolution quirk: cval->res = 16\n"); cval->res = 16; } } switch (chip->usb_id) { case USB_ID(0x0763, 0x2030): /* M-Audio Fast Track C400 */ case USB_ID(0x0763, 0x2031): /* M-Audio Fast Track C600 */ if (strcmp(kctl->id.name, "Effect Duration") == 0) { cval->min = 0x0000; cval->max = 0xffff; cval->res = 0x00e6; break; } if (strcmp(kctl->id.name, "Effect Volume") == 0 || strcmp(kctl->id.name, "Effect Feedback Volume") == 0) { cval->min = 0x00; cval->max = 0xff; break; } if (strstr(kctl->id.name, "Effect Return") != NULL) { cval->min = 0xb706; cval->max = 0xff7b; cval->res = 0x0073; break; } if ((strstr(kctl->id.name, "Playback Volume") != NULL) || (strstr(kctl->id.name, "Effect Send") != NULL)) { cval->min = 0xb5fb; /* -73 dB = 0xb6ff */ cval->max = 0xfcfe; cval->res = 0x0073; } break; case USB_ID(0x0763, 0x2081): /* M-Audio Fast Track Ultra 8R */ case USB_ID(0x0763, 0x2080): /* M-Audio Fast Track Ultra */ if (strcmp(kctl->id.name, "Effect Duration") == 0) { usb_audio_info(chip, "set quirk for FTU Effect Duration\n"); cval->min = 0x0000; cval->max = 0x7f00; cval->res = 0x0100; break; } if (strcmp(kctl->id.name, "Effect Volume") == 0 || strcmp(kctl->id.name, "Effect Feedback Volume") == 0) { usb_audio_info(chip, "set quirks for FTU Effect Feedback/Volume\n"); cval->min = 0x00; cval->max = 0x7f; break; } break; case USB_ID(0x0d8c, 0x0103): if (!strcmp(kctl->id.name, "PCM Playback Volume")) { usb_audio_info(chip, "set volume quirk for CM102-A+/102S+\n"); cval->min = -256; } break; case USB_ID(0x0471, 0x0101): case USB_ID(0x0471, 0x0104): case USB_ID(0x0471, 0x0105): case USB_ID(0x0672, 0x1041): /* quirk for UDA1321/N101. * note that detection between firmware 2.1.1.7 (N101) * and later 2.1.1.21 is not very clear from datasheets. * I hope that the min value is -15360 for newer firmware --jk */ if (!strcmp(kctl->id.name, "PCM Playback Volume") && cval->min == -15616) { usb_audio_info(chip, "set volume quirk for UDA1321/N101 chip\n"); cval->max = -256; } break; case USB_ID(0x046d, 0x09a4): if (!strcmp(kctl->id.name, "Mic Capture Volume")) { usb_audio_info(chip, "set volume quirk for QuickCam E3500\n"); cval->min = 6080; cval->max = 8768; cval->res = 192; } break; case USB_ID(0x0495, 0x3042): /* ESS Technology Asus USB DAC */ if ((strstr(kctl->id.name, "Playback Volume") != NULL) || strstr(kctl->id.name, "Capture Volume") != NULL) { cval->min >>= 8; cval->max = 0; cval->res = 1; } break; } } /* forcibly initialize the current mixer value; if GET_CUR fails, set to * the minimum as default */ static void init_cur_mix_raw(struct usb_mixer_elem_info *cval, int ch, int idx) { int val, err; err = snd_usb_get_cur_mix_value(cval, ch, idx, &val); if (!err) return; if (!cval->head.mixer->ignore_ctl_error) usb_audio_warn(cval->head.mixer->chip, "%d:%d: failed to get current value for ch %d (%d)\n", cval->head.id, mixer_ctrl_intf(cval->head.mixer), ch, err); snd_usb_set_cur_mix_value(cval, ch, idx, cval->min); } /* * retrieve the minimum and maximum values for the specified control */ static int get_min_max_with_quirks(struct usb_mixer_elem_info *cval, int default_min, struct snd_kcontrol *kctl) { int i, idx; /* for failsafe */ cval->min = default_min; cval->max = cval->min + 1; cval->res = 1; cval->dBmin = cval->dBmax = 0; if (cval->val_type == USB_MIXER_BOOLEAN || cval->val_type == USB_MIXER_INV_BOOLEAN) { cval->initialized = 1; } else { int minchn = 0; if (cval->cmask) { for (i = 0; i < MAX_CHANNELS; i++) if (cval->cmask & BIT(i)) { minchn = i + 1; break; } } if (get_ctl_value(cval, UAC_GET_MAX, (cval->control << 8) | minchn, &cval->max) < 0 || get_ctl_value(cval, UAC_GET_MIN, (cval->control << 8) | minchn, &cval->min) < 0) { usb_audio_err(cval->head.mixer->chip, "%d:%d: cannot get min/max values for control %d (id %d)\n", cval->head.id, mixer_ctrl_intf(cval->head.mixer), cval->control, cval->head.id); return -EINVAL; } if (get_ctl_value(cval, UAC_GET_RES, (cval->control << 8) | minchn, &cval->res) < 0) { cval->res = 1; } else if (cval->head.mixer->protocol == UAC_VERSION_1) { int last_valid_res = cval->res; while (cval->res > 1) { if (snd_usb_mixer_set_ctl_value(cval, UAC_SET_RES, (cval->control << 8) | minchn, cval->res / 2) < 0) break; cval->res /= 2; } if (get_ctl_value(cval, UAC_GET_RES, (cval->control << 8) | minchn, &cval->res) < 0) cval->res = last_valid_res; } if (cval->res == 0) cval->res = 1; /* Additional checks for the proper resolution * * Some devices report smaller resolutions than actually * reacting. They don't return errors but simply clip * to the lower aligned value. */ if (cval->min + cval->res < cval->max) { int last_valid_res = cval->res; int saved, test, check; if (get_cur_mix_raw(cval, minchn, &saved) < 0) goto no_res_check; for (;;) { test = saved; if (test < cval->max) test += cval->res; else test -= cval->res; if (test < cval->min || test > cval->max || snd_usb_set_cur_mix_value(cval, minchn, 0, test) || get_cur_mix_raw(cval, minchn, &check)) { cval->res = last_valid_res; break; } if (test == check) break; cval->res *= 2; } snd_usb_set_cur_mix_value(cval, minchn, 0, saved); } no_res_check: cval->initialized = 1; } if (kctl) volume_control_quirks(cval, kctl); /* USB descriptions contain the dB scale in 1/256 dB unit * while ALSA TLV contains in 1/100 dB unit */ cval->dBmin = (convert_signed_value(cval, cval->min) * 100) / 256; cval->dBmax = (convert_signed_value(cval, cval->max) * 100) / 256; if (cval->dBmin > cval->dBmax) { /* something is wrong; assume it's either from/to 0dB */ if (cval->dBmin < 0) cval->dBmax = 0; else if (cval->dBmin > 0) cval->dBmin = 0; if (cval->dBmin > cval->dBmax) { /* totally crap, return an error */ return -EINVAL; } } else { /* if the max volume is too low, it's likely a bogus range; * here we use -96dB as the threshold */ if (cval->dBmax <= -9600) { usb_audio_info(cval->head.mixer->chip, "%d:%d: bogus dB values (%d/%d), disabling dB reporting\n", cval->head.id, mixer_ctrl_intf(cval->head.mixer), cval->dBmin, cval->dBmax); cval->dBmin = cval->dBmax = 0; } } /* initialize all elements */ if (!cval->cmask) { init_cur_mix_raw(cval, 0, 0); } else { idx = 0; for (i = 0; i < MAX_CHANNELS; i++) { if (cval->cmask & BIT(i)) { init_cur_mix_raw(cval, i + 1, idx); idx++; } } } return 0; } #define get_min_max(cval, def) get_min_max_with_quirks(cval, def, NULL) /* get the max value advertised via control API */ static int get_max_exposed(struct usb_mixer_elem_info *cval) { if (!cval->max_exposed) { if (cval->res) cval->max_exposed = DIV_ROUND_UP(cval->max - cval->min, cval->res); else cval->max_exposed = cval->max - cval->min; } return cval->max_exposed; } /* get a feature/mixer unit info */ static int mixer_ctl_feature_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct usb_mixer_elem_info *cval = kcontrol->private_data; if (cval->val_type == USB_MIXER_BOOLEAN || cval->val_type == USB_MIXER_INV_BOOLEAN) uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN; else uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = cval->channels; if (cval->val_type != USB_MIXER_BOOLEAN && cval->val_type != USB_MIXER_INV_BOOLEAN) { if (!cval->initialized) { get_min_max_with_quirks(cval, 0, kcontrol); if (cval->initialized && cval->dBmin >= cval->dBmax) { kcontrol->vd[0].access &= ~(SNDRV_CTL_ELEM_ACCESS_TLV_READ | SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK); snd_ctl_notify(cval->head.mixer->chip->card, SNDRV_CTL_EVENT_MASK_INFO, &kcontrol->id); } } } uinfo->value.integer.min = 0; uinfo->value.integer.max = get_max_exposed(cval); return 0; } /* get the current value from feature/mixer unit */ static int mixer_ctl_feature_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct usb_mixer_elem_info *cval = kcontrol->private_data; int c, cnt, val, err; ucontrol->value.integer.value[0] = cval->min; if (cval->cmask) { cnt = 0; for (c = 0; c < MAX_CHANNELS; c++) { if (!(cval->cmask & BIT(c))) continue; err = snd_usb_get_cur_mix_value(cval, c + 1, cnt, &val); if (err < 0) return filter_error(cval, err); val = get_relative_value(cval, val); ucontrol->value.integer.value[cnt] = val; cnt++; } return 0; } else { /* master channel */ err = snd_usb_get_cur_mix_value(cval, 0, 0, &val); if (err < 0) return filter_error(cval, err); val = get_relative_value(cval, val); ucontrol->value.integer.value[0] = val; } return 0; } /* put the current value to feature/mixer unit */ static int mixer_ctl_feature_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct usb_mixer_elem_info *cval = kcontrol->private_data; int max_val = get_max_exposed(cval); int c, cnt, val, oval, err; int changed = 0; if (cval->cmask) { cnt = 0; for (c = 0; c < MAX_CHANNELS; c++) { if (!(cval->cmask & BIT(c))) continue; err = snd_usb_get_cur_mix_value(cval, c + 1, cnt, &oval); if (err < 0) return filter_error(cval, err); val = ucontrol->value.integer.value[cnt]; if (val < 0 || val > max_val) return -EINVAL; val = get_abs_value(cval, val); if (oval != val) { snd_usb_set_cur_mix_value(cval, c + 1, cnt, val); changed = 1; } cnt++; } } else { /* master channel */ err = snd_usb_get_cur_mix_value(cval, 0, 0, &oval); if (err < 0) return filter_error(cval, err); val = ucontrol->value.integer.value[0]; if (val < 0 || val > max_val) return -EINVAL; val = get_abs_value(cval, val); if (val != oval) { snd_usb_set_cur_mix_value(cval, 0, 0, val); changed = 1; } } return changed; } /* get the boolean value from the master channel of a UAC control */ static int mixer_ctl_master_bool_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct usb_mixer_elem_info *cval = kcontrol->private_data; int val, err; err = snd_usb_get_cur_mix_value(cval, 0, 0, &val); if (err < 0) return filter_error(cval, err); val = (val != 0); ucontrol->value.integer.value[0] = val; return 0; } static int get_connector_value(struct usb_mixer_elem_info *cval, char *name, int *val) { struct snd_usb_audio *chip = cval->head.mixer->chip; int idx = 0, validx, ret; validx = cval->control << 8 | 0; ret = snd_usb_lock_shutdown(chip) ? -EIO : 0; if (ret) goto error; idx = mixer_ctrl_intf(cval->head.mixer) | (cval->head.id << 8); if (cval->head.mixer->protocol == UAC_VERSION_2) { struct uac2_connectors_ctl_blk uac2_conn; ret = snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), UAC2_CS_CUR, USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN, validx, idx, &uac2_conn, sizeof(uac2_conn)); if (val) *val = !!uac2_conn.bNrChannels; } else { /* UAC_VERSION_3 */ struct uac3_insertion_ctl_blk uac3_conn; ret = snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), UAC2_CS_CUR, USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN, validx, idx, &uac3_conn, sizeof(uac3_conn)); if (val) *val = !!uac3_conn.bmConInserted; } snd_usb_unlock_shutdown(chip); if (ret < 0) { if (name && strstr(name, "Speaker")) { if (val) *val = 1; return 0; } error: usb_audio_err(chip, "cannot get connectors status: req = %#x, wValue = %#x, wIndex = %#x, type = %d\n", UAC_GET_CUR, validx, idx, cval->val_type); if (val) *val = 0; return filter_error(cval, ret); } return ret; } /* get the connectors status and report it as boolean type */ static int mixer_ctl_connector_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct usb_mixer_elem_info *cval = kcontrol->private_data; int ret, val; ret = get_connector_value(cval, kcontrol->id.name, &val); if (ret < 0) return ret; ucontrol->value.integer.value[0] = val; return 0; } static const struct snd_kcontrol_new usb_feature_unit_ctl = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "", /* will be filled later manually */ .info = mixer_ctl_feature_info, .get = mixer_ctl_feature_get, .put = mixer_ctl_feature_put, }; /* the read-only variant */ static const struct snd_kcontrol_new usb_feature_unit_ctl_ro = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "", /* will be filled later manually */ .info = mixer_ctl_feature_info, .get = mixer_ctl_feature_get, .put = NULL, }; /* * A control which shows the boolean value from reading a UAC control on * the master channel. */ static const struct snd_kcontrol_new usb_bool_master_control_ctl_ro = { .iface = SNDRV_CTL_ELEM_IFACE_CARD, .name = "", /* will be filled later manually */ .access = SNDRV_CTL_ELEM_ACCESS_READ, .info = snd_ctl_boolean_mono_info, .get = mixer_ctl_master_bool_get, .put = NULL, }; static const struct snd_kcontrol_new usb_connector_ctl_ro = { .iface = SNDRV_CTL_ELEM_IFACE_CARD, .name = "", /* will be filled later manually */ .access = SNDRV_CTL_ELEM_ACCESS_READ, .info = snd_ctl_boolean_mono_info, .get = mixer_ctl_connector_get, .put = NULL, }; /* * This symbol is exported in order to allow the mixer quirks to * hook up to the standard feature unit control mechanism */ const struct snd_kcontrol_new *snd_usb_feature_unit_ctl = &usb_feature_unit_ctl; /* * build a feature control */ static size_t append_ctl_name(struct snd_kcontrol *kctl, const char *str) { return strlcat(kctl->id.name, str, sizeof(kctl->id.name)); } /* * A lot of headsets/headphones have a "Speaker" mixer. Make sure we * rename it to "Headphone". We determine if something is a headphone * similar to how udev determines form factor. */ static void check_no_speaker_on_headset(struct snd_kcontrol *kctl, struct snd_card *card) { static const char * const names_to_check[] = { "Headset", "headset", "Headphone", "headphone", NULL}; const char * const *s; bool found = false; if (strcmp("Speaker", kctl->id.name)) return; for (s = names_to_check; *s; s++) if (strstr(card->shortname, *s)) { found = true; break; } if (!found) return; snd_ctl_rename(card, kctl, "Headphone"); } static const struct usb_feature_control_info *get_feature_control_info(int control) { int i; for (i = 0; i < ARRAY_SIZE(audio_feature_info); ++i) { if (audio_feature_info[i].control == control) return &audio_feature_info[i]; } return NULL; } static void __build_feature_ctl(struct usb_mixer_interface *mixer, const struct usbmix_name_map *imap, unsigned int ctl_mask, int control, struct usb_audio_term *iterm, struct usb_audio_term *oterm, int unitid, int nameid, int readonly_mask) { const struct usb_feature_control_info *ctl_info; unsigned int len = 0; int mapped_name = 0; struct snd_kcontrol *kctl; struct usb_mixer_elem_info *cval; const struct usbmix_name_map *map; unsigned int range; if (control == UAC_FU_GRAPHIC_EQUALIZER) { /* FIXME: not supported yet */ return; } map = find_map(imap, unitid, control); if (check_ignored_ctl(map)) return; cval = kzalloc(sizeof(*cval), GFP_KERNEL); if (!cval) return; snd_usb_mixer_elem_init_std(&cval->head, mixer, unitid); cval->control = control; cval->cmask = ctl_mask; ctl_info = get_feature_control_info(control); if (!ctl_info) { usb_mixer_elem_info_free(cval); return; } if (mixer->protocol == UAC_VERSION_1) cval->val_type = ctl_info->type; else /* UAC_VERSION_2 */ cval->val_type = ctl_info->type_uac2 >= 0 ? ctl_info->type_uac2 : ctl_info->type; if (ctl_mask == 0) { cval->channels = 1; /* master channel */ cval->master_readonly = readonly_mask; } else { int i, c = 0; for (i = 0; i < 16; i++) if (ctl_mask & BIT(i)) c++; cval->channels = c; cval->ch_readonly = readonly_mask; } /* * If all channels in the mask are marked read-only, make the control * read-only. snd_usb_set_cur_mix_value() will check the mask again and won't * issue write commands to read-only channels. */ if (cval->channels == readonly_mask) kctl = snd_ctl_new1(&usb_feature_unit_ctl_ro, cval); else kctl = snd_ctl_new1(&usb_feature_unit_ctl, cval); if (!kctl) { usb_audio_err(mixer->chip, "cannot malloc kcontrol\n"); usb_mixer_elem_info_free(cval); return; } kctl->private_free = snd_usb_mixer_elem_free; len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name)); mapped_name = len != 0; if (!len && nameid) len = snd_usb_copy_string_desc(mixer->chip, nameid, kctl->id.name, sizeof(kctl->id.name)); switch (control) { case UAC_FU_MUTE: case UAC_FU_VOLUME: /* * determine the control name. the rule is: * - if a name id is given in descriptor, use it. * - if the connected input can be determined, then use the name * of terminal type. * - if the connected output can be determined, use it. * - otherwise, anonymous name. */ if (!len) { if (iterm) len = get_term_name(mixer->chip, iterm, kctl->id.name, sizeof(kctl->id.name), 1); if (!len && oterm) len = get_term_name(mixer->chip, oterm, kctl->id.name, sizeof(kctl->id.name), 1); if (!len) snprintf(kctl->id.name, sizeof(kctl->id.name), "Feature %d", unitid); } if (!mapped_name) check_no_speaker_on_headset(kctl, mixer->chip->card); /* * determine the stream direction: * if the connected output is USB stream, then it's likely a * capture stream. otherwise it should be playback (hopefully :) */ if (!mapped_name && oterm && !(oterm->type >> 16)) { if ((oterm->type & 0xff00) == 0x0100) append_ctl_name(kctl, " Capture"); else append_ctl_name(kctl, " Playback"); } append_ctl_name(kctl, control == UAC_FU_MUTE ? " Switch" : " Volume"); break; default: if (!len) strscpy(kctl->id.name, audio_feature_info[control-1].name, sizeof(kctl->id.name)); break; } /* get min/max values */ get_min_max_with_quirks(cval, 0, kctl); /* skip a bogus volume range */ if (cval->max <= cval->min) { usb_audio_dbg(mixer->chip, "[%d] FU [%s] skipped due to invalid volume\n", cval->head.id, kctl->id.name); snd_ctl_free_one(kctl); return; } if (control == UAC_FU_VOLUME) { check_mapped_dB(map, cval); if (cval->dBmin < cval->dBmax || !cval->initialized) { kctl->tlv.c = snd_usb_mixer_vol_tlv; kctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_TLV_READ | SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK; } } snd_usb_mixer_fu_apply_quirk(mixer, cval, unitid, kctl); range = (cval->max - cval->min) / cval->res; /* * Are there devices with volume range more than 255? I use a bit more * to be sure. 384 is a resolution magic number found on Logitech * devices. It will definitively catch all buggy Logitech devices. */ if (range > 384) { usb_audio_warn(mixer->chip, "Warning! Unlikely big volume range (=%u), cval->res is probably wrong.", range); usb_audio_warn(mixer->chip, "[%d] FU [%s] ch = %d, val = %d/%d/%d", cval->head.id, kctl->id.name, cval->channels, cval->min, cval->max, cval->res); } usb_audio_dbg(mixer->chip, "[%d] FU [%s] ch = %d, val = %d/%d/%d\n", cval->head.id, kctl->id.name, cval->channels, cval->min, cval->max, cval->res); snd_usb_mixer_add_control(&cval->head, kctl); } static void build_feature_ctl(struct mixer_build *state, void *raw_desc, unsigned int ctl_mask, int control, struct usb_audio_term *iterm, int unitid, int readonly_mask) { struct uac_feature_unit_descriptor *desc = raw_desc; int nameid = uac_feature_unit_iFeature(desc); __build_feature_ctl(state->mixer, state->map, ctl_mask, control, iterm, &state->oterm, unitid, nameid, readonly_mask); } static void build_feature_ctl_badd(struct usb_mixer_interface *mixer, unsigned int ctl_mask, int control, int unitid, const struct usbmix_name_map *badd_map) { __build_feature_ctl(mixer, badd_map, ctl_mask, control, NULL, NULL, unitid, 0, 0); } static void get_connector_control_name(struct usb_mixer_interface *mixer, struct usb_audio_term *term, bool is_input, char *name, int name_size) { int name_len = get_term_name(mixer->chip, term, name, name_size, 0); if (name_len == 0) strscpy(name, "Unknown", name_size); /* * sound/core/ctljack.c has a convention of naming jack controls * by ending in " Jack". Make it slightly more useful by * indicating Input or Output after the terminal name. */ if (is_input) strlcat(name, " - Input Jack", name_size); else strlcat(name, " - Output Jack", name_size); } /* get connector value to "wake up" the USB audio */ static int connector_mixer_resume(struct usb_mixer_elem_list *list) { struct usb_mixer_elem_info *cval = mixer_elem_list_to_info(list); get_connector_value(cval, NULL, NULL); return 0; } /* Build a mixer control for a UAC connector control (jack-detect) */ static void build_connector_control(struct usb_mixer_interface *mixer, const struct usbmix_name_map *imap, struct usb_audio_term *term, bool is_input) { struct snd_kcontrol *kctl; struct usb_mixer_elem_info *cval; const struct usbmix_name_map *map; map = find_map(imap, term->id, 0); if (check_ignored_ctl(map)) return; cval = kzalloc(sizeof(*cval), GFP_KERNEL); if (!cval) return; snd_usb_mixer_elem_init_std(&cval->head, mixer, term->id); /* set up a specific resume callback */ cval->head.resume = connector_mixer_resume; /* * UAC2: The first byte from reading the UAC2_TE_CONNECTOR control returns the * number of channels connected. * * UAC3: The first byte specifies size of bitmap for the inserted controls. The * following byte(s) specifies which connectors are inserted. * * This boolean ctl will simply report if any channels are connected * or not. */ if (mixer->protocol == UAC_VERSION_2) cval->control = UAC2_TE_CONNECTOR; else /* UAC_VERSION_3 */ cval->control = UAC3_TE_INSERTION; cval->val_type = USB_MIXER_BOOLEAN; cval->channels = 1; /* report true if any channel is connected */ cval->min = 0; cval->max = 1; kctl = snd_ctl_new1(&usb_connector_ctl_ro, cval); if (!kctl) { usb_audio_err(mixer->chip, "cannot malloc kcontrol\n"); usb_mixer_elem_info_free(cval); return; } if (check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name))) strlcat(kctl->id.name, " Jack", sizeof(kctl->id.name)); else get_connector_control_name(mixer, term, is_input, kctl->id.name, sizeof(kctl->id.name)); kctl->private_free = snd_usb_mixer_elem_free; snd_usb_mixer_add_control(&cval->head, kctl); } static int parse_clock_source_unit(struct mixer_build *state, int unitid, void *_ftr) { struct uac_clock_source_descriptor *hdr = _ftr; struct usb_mixer_elem_info *cval; struct snd_kcontrol *kctl; int ret; if (state->mixer->protocol != UAC_VERSION_2) return -EINVAL; /* * The only property of this unit we are interested in is the * clock source validity. If that isn't readable, just bail out. */ if (!uac_v2v3_control_is_readable(hdr->bmControls, UAC2_CS_CONTROL_CLOCK_VALID)) return 0; cval = kzalloc(sizeof(*cval), GFP_KERNEL); if (!cval) return -ENOMEM; snd_usb_mixer_elem_init_std(&cval->head, state->mixer, hdr->bClockID); cval->min = 0; cval->max = 1; cval->channels = 1; cval->val_type = USB_MIXER_BOOLEAN; cval->control = UAC2_CS_CONTROL_CLOCK_VALID; cval->master_readonly = 1; /* From UAC2 5.2.5.1.2 "Only the get request is supported." */ kctl = snd_ctl_new1(&usb_bool_master_control_ctl_ro, cval); if (!kctl) { usb_mixer_elem_info_free(cval); return -ENOMEM; } kctl->private_free = snd_usb_mixer_elem_free; ret = snd_usb_copy_string_desc(state->chip, hdr->iClockSource, kctl->id.name, sizeof(kctl->id.name)); if (ret > 0) append_ctl_name(kctl, " Validity"); else snprintf(kctl->id.name, sizeof(kctl->id.name), "Clock Source %d Validity", hdr->bClockID); return snd_usb_mixer_add_control(&cval->head, kctl); } /* * parse a feature unit * * most of controls are defined here. */ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, void *_ftr) { int channels, i, j; struct usb_audio_term iterm; unsigned int master_bits; int err, csize; struct uac_feature_unit_descriptor *hdr = _ftr; __u8 *bmaControls; if (state->mixer->protocol == UAC_VERSION_1) { csize = hdr->bControlSize; channels = (hdr->bLength - 7) / csize - 1; bmaControls = hdr->bmaControls; } else if (state->mixer->protocol == UAC_VERSION_2) { struct uac2_feature_unit_descriptor *ftr = _ftr; csize = 4; channels = (hdr->bLength - 6) / 4 - 1; bmaControls = ftr->bmaControls; } else { /* UAC_VERSION_3 */ struct uac3_feature_unit_descriptor *ftr = _ftr; csize = 4; channels = (ftr->bLength - 7) / 4 - 1; bmaControls = ftr->bmaControls; } if (channels > 32) { usb_audio_info(state->chip, "usbmixer: too many channels (%d) in unit %d\n", channels, unitid); return -EINVAL; } /* parse the source unit */ err = parse_audio_unit(state, hdr->bSourceID); if (err < 0) return err; /* determine the input source type and name */ err = check_input_term(state, hdr->bSourceID, &iterm); if (err < 0) return err; master_bits = snd_usb_combine_bytes(bmaControls, csize); /* master configuration quirks */ switch (state->chip->usb_id) { case USB_ID(0x08bb, 0x2702): usb_audio_info(state->chip, "usbmixer: master volume quirk for PCM2702 chip\n"); /* disable non-functional volume control */ master_bits &= ~UAC_CONTROL_BIT(UAC_FU_VOLUME); break; case USB_ID(0x1130, 0xf211): usb_audio_info(state->chip, "usbmixer: volume control quirk for Tenx TP6911 Audio Headset\n"); /* disable non-functional volume control */ channels = 0; break; } if (state->mixer->protocol == UAC_VERSION_1) { /* check all control types */ for (i = 0; i < 10; i++) { unsigned int ch_bits = 0; int control = audio_feature_info[i].control; for (j = 0; j < channels; j++) { unsigned int mask; mask = snd_usb_combine_bytes(bmaControls + csize * (j+1), csize); if (mask & BIT(i)) ch_bits |= BIT(j); } /* audio class v1 controls are never read-only */ /* * The first channel must be set * (for ease of programming). */ if (ch_bits & 1) build_feature_ctl(state, _ftr, ch_bits, control, &iterm, unitid, 0); if (master_bits & BIT(i)) build_feature_ctl(state, _ftr, 0, control, &iterm, unitid, 0); } } else { /* UAC_VERSION_2/3 */ for (i = 0; i < ARRAY_SIZE(audio_feature_info); i++) { unsigned int ch_bits = 0; unsigned int ch_read_only = 0; int control = audio_feature_info[i].control; for (j = 0; j < channels; j++) { unsigned int mask; mask = snd_usb_combine_bytes(bmaControls + csize * (j+1), csize); if (uac_v2v3_control_is_readable(mask, control)) { ch_bits |= BIT(j); if (!uac_v2v3_control_is_writeable(mask, control)) ch_read_only |= BIT(j); } } /* * NOTE: build_feature_ctl() will mark the control * read-only if all channels are marked read-only in * the descriptors. Otherwise, the control will be * reported as writeable, but the driver will not * actually issue a write command for read-only * channels. */ /* * The first channel must be set * (for ease of programming). */ if (ch_bits & 1) build_feature_ctl(state, _ftr, ch_bits, control, &iterm, unitid, ch_read_only); if (uac_v2v3_control_is_readable(master_bits, control)) build_feature_ctl(state, _ftr, 0, control, &iterm, unitid, !uac_v2v3_control_is_writeable(master_bits, control)); } } return 0; } /* * Mixer Unit */ /* check whether the given in/out overflows bmMixerControls matrix */ static bool mixer_bitmap_overflow(struct uac_mixer_unit_descriptor *desc, int protocol, int num_ins, int num_outs) { u8 *hdr = (u8 *)desc; u8 *c = uac_mixer_unit_bmControls(desc, protocol); size_t rest; /* remaining bytes after bmMixerControls */ switch (protocol) { case UAC_VERSION_1: default: rest = 1; /* iMixer */ break; case UAC_VERSION_2: rest = 2; /* bmControls + iMixer */ break; case UAC_VERSION_3: rest = 6; /* bmControls + wMixerDescrStr */ break; } /* overflow? */ return c + (num_ins * num_outs + 7) / 8 + rest > hdr + hdr[0]; } /* * build a mixer unit control * * the callbacks are identical with feature unit. * input channel number (zero based) is given in control field instead. */ static void build_mixer_unit_ctl(struct mixer_build *state, struct uac_mixer_unit_descriptor *desc, int in_pin, int in_ch, int num_outs, int unitid, struct usb_audio_term *iterm) { struct usb_mixer_elem_info *cval; unsigned int i, len; struct snd_kcontrol *kctl; const struct usbmix_name_map *map; map = find_map(state->map, unitid, 0); if (check_ignored_ctl(map)) return; cval = kzalloc(sizeof(*cval), GFP_KERNEL); if (!cval) return; snd_usb_mixer_elem_init_std(&cval->head, state->mixer, unitid); cval->control = in_ch + 1; /* based on 1 */ cval->val_type = USB_MIXER_S16; for (i = 0; i < num_outs; i++) { __u8 *c = uac_mixer_unit_bmControls(desc, state->mixer->protocol); if (check_matrix_bitmap(c, in_ch, i, num_outs)) { cval->cmask |= BIT(i); cval->channels++; } } /* get min/max values */ get_min_max(cval, 0); kctl = snd_ctl_new1(&usb_feature_unit_ctl, cval); if (!kctl) { usb_audio_err(state->chip, "cannot malloc kcontrol\n"); usb_mixer_elem_info_free(cval); return; } kctl->private_free = snd_usb_mixer_elem_free; len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name)); if (!len) len = get_term_name(state->chip, iterm, kctl->id.name, sizeof(kctl->id.name), 0); if (!len) snprintf(kctl->id.name, sizeof(kctl->id.name), "Mixer Source %d", in_ch + 1); append_ctl_name(kctl, " Volume"); usb_audio_dbg(state->chip, "[%d] MU [%s] ch = %d, val = %d/%d\n", cval->head.id, kctl->id.name, cval->channels, cval->min, cval->max); snd_usb_mixer_add_control(&cval->head, kctl); } static int parse_audio_input_terminal(struct mixer_build *state, int unitid, void *raw_desc) { struct usb_audio_term iterm; unsigned int control, bmctls, term_id; if (state->mixer->protocol == UAC_VERSION_2) { struct uac2_input_terminal_descriptor *d_v2 = raw_desc; control = UAC2_TE_CONNECTOR; term_id = d_v2->bTerminalID; bmctls = le16_to_cpu(d_v2->bmControls); } else if (state->mixer->protocol == UAC_VERSION_3) { struct uac3_input_terminal_descriptor *d_v3 = raw_desc; control = UAC3_TE_INSERTION; term_id = d_v3->bTerminalID; bmctls = le32_to_cpu(d_v3->bmControls); } else { return 0; /* UAC1. No Insertion control */ } check_input_term(state, term_id, &iterm); /* Check for jack detection. */ if ((iterm.type & 0xff00) != 0x0100 && uac_v2v3_control_is_readable(bmctls, control)) build_connector_control(state->mixer, state->map, &iterm, true); return 0; } /* * parse a mixer unit */ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid, void *raw_desc) { struct uac_mixer_unit_descriptor *desc = raw_desc; struct usb_audio_term iterm; int input_pins, num_ins, num_outs; int pin, ich, err; err = uac_mixer_unit_get_channels(state, desc); if (err < 0) { usb_audio_err(state->chip, "invalid MIXER UNIT descriptor %d\n", unitid); return err; } num_outs = err; input_pins = desc->bNrInPins; num_ins = 0; ich = 0; for (pin = 0; pin < input_pins; pin++) { err = parse_audio_unit(state, desc->baSourceID[pin]); if (err < 0) continue; /* no bmControls field (e.g. Maya44) -> ignore */ if (!num_outs) continue; err = check_input_term(state, desc->baSourceID[pin], &iterm); if (err < 0) return err; num_ins += iterm.channels; if (mixer_bitmap_overflow(desc, state->mixer->protocol, num_ins, num_outs)) break; for (; ich < num_ins; ich++) { int och, ich_has_controls = 0; for (och = 0; och < num_outs; och++) { __u8 *c = uac_mixer_unit_bmControls(desc, state->mixer->protocol); if (check_matrix_bitmap(c, ich, och, num_outs)) { ich_has_controls = 1; break; } } if (ich_has_controls) build_mixer_unit_ctl(state, desc, pin, ich, num_outs, unitid, &iterm); } } return 0; } /* * Processing Unit / Extension Unit */ /* get callback for processing/extension unit */ static int mixer_ctl_procunit_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct usb_mixer_elem_info *cval = kcontrol->private_data; int err, val; err = get_cur_ctl_value(cval, cval->control << 8, &val); if (err < 0) { ucontrol->value.integer.value[0] = cval->min; return filter_error(cval, err); } val = get_relative_value(cval, val); ucontrol->value.integer.value[0] = val; return 0; } /* put callback for processing/extension unit */ static int mixer_ctl_procunit_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct usb_mixer_elem_info *cval = kcontrol->private_data; int val, oval, err; err = get_cur_ctl_value(cval, cval->control << 8, &oval); if (err < 0) return filter_error(cval, err); val = ucontrol->value.integer.value[0]; if (val < 0 || val > get_max_exposed(cval)) return -EINVAL; val = get_abs_value(cval, val); if (val != oval) { set_cur_ctl_value(cval, cval->control << 8, val); return 1; } return 0; } /* alsa control interface for processing/extension unit */ static const struct snd_kcontrol_new mixer_procunit_ctl = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "", /* will be filled later */ .info = mixer_ctl_feature_info, .get = mixer_ctl_procunit_get, .put = mixer_ctl_procunit_put, }; /* * predefined data for processing units */ struct procunit_value_info { int control; const char *suffix; int val_type; int min_value; }; struct procunit_info { int type; char *name; const struct procunit_value_info *values; }; static const struct procunit_value_info undefined_proc_info[] = { { 0x00, "Control Undefined", 0 }, { 0 } }; static const struct procunit_value_info updown_proc_info[] = { { UAC_UD_ENABLE, "Switch", USB_MIXER_BOOLEAN }, { UAC_UD_MODE_SELECT, "Mode Select", USB_MIXER_U8, 1 }, { 0 } }; static const struct procunit_value_info prologic_proc_info[] = { { UAC_DP_ENABLE, "Switch", USB_MIXER_BOOLEAN }, { UAC_DP_MODE_SELECT, "Mode Select", USB_MIXER_U8, 1 }, { 0 } }; static const struct procunit_value_info threed_enh_proc_info[] = { { UAC_3D_ENABLE, "Switch", USB_MIXER_BOOLEAN }, { UAC_3D_SPACE, "Spaciousness", USB_MIXER_U8 }, { 0 } }; static const struct procunit_value_info reverb_proc_info[] = { { UAC_REVERB_ENABLE, "Switch", USB_MIXER_BOOLEAN }, { UAC_REVERB_LEVEL, "Level", USB_MIXER_U8 }, { UAC_REVERB_TIME, "Time", USB_MIXER_U16 }, { UAC_REVERB_FEEDBACK, "Feedback", USB_MIXER_U8 }, { 0 } }; static const struct procunit_value_info chorus_proc_info[] = { { UAC_CHORUS_ENABLE, "Switch", USB_MIXER_BOOLEAN }, { UAC_CHORUS_LEVEL, "Level", USB_MIXER_U8 }, { UAC_CHORUS_RATE, "Rate", USB_MIXER_U16 }, { UAC_CHORUS_DEPTH, "Depth", USB_MIXER_U16 }, { 0 } }; static const struct procunit_value_info dcr_proc_info[] = { { UAC_DCR_ENABLE, "Switch", USB_MIXER_BOOLEAN }, { UAC_DCR_RATE, "Ratio", USB_MIXER_U16 }, { UAC_DCR_MAXAMPL, "Max Amp", USB_MIXER_S16 }, { UAC_DCR_THRESHOLD, "Threshold", USB_MIXER_S16 }, { UAC_DCR_ATTACK_TIME, "Attack Time", USB_MIXER_U16 }, { UAC_DCR_RELEASE_TIME, "Release Time", USB_MIXER_U16 }, { 0 } }; static const struct procunit_info procunits[] = { { UAC_PROCESS_UP_DOWNMIX, "Up Down", updown_proc_info }, { UAC_PROCESS_DOLBY_PROLOGIC, "Dolby Prologic", prologic_proc_info }, { UAC_PROCESS_STEREO_EXTENDER, "3D Stereo Extender", threed_enh_proc_info }, { UAC_PROCESS_REVERB, "Reverb", reverb_proc_info }, { UAC_PROCESS_CHORUS, "Chorus", chorus_proc_info }, { UAC_PROCESS_DYN_RANGE_COMP, "DCR", dcr_proc_info }, { 0 }, }; static const struct procunit_value_info uac3_updown_proc_info[] = { { UAC3_UD_MODE_SELECT, "Mode Select", USB_MIXER_U8, 1 }, { 0 } }; static const struct procunit_value_info uac3_stereo_ext_proc_info[] = { { UAC3_EXT_WIDTH_CONTROL, "Width Control", USB_MIXER_U8 }, { 0 } }; static const struct procunit_info uac3_procunits[] = { { UAC3_PROCESS_UP_DOWNMIX, "Up Down", uac3_updown_proc_info }, { UAC3_PROCESS_STEREO_EXTENDER, "3D Stereo Extender", uac3_stereo_ext_proc_info }, { UAC3_PROCESS_MULTI_FUNCTION, "Multi-Function", undefined_proc_info }, { 0 }, }; /* * predefined data for extension units */ static const struct procunit_value_info clock_rate_xu_info[] = { { USB_XU_CLOCK_RATE_SELECTOR, "Selector", USB_MIXER_U8, 0 }, { 0 } }; static const struct procunit_value_info clock_source_xu_info[] = { { USB_XU_CLOCK_SOURCE_SELECTOR, "External", USB_MIXER_BOOLEAN }, { 0 } }; static const struct procunit_value_info spdif_format_xu_info[] = { { USB_XU_DIGITAL_FORMAT_SELECTOR, "SPDIF/AC3", USB_MIXER_BOOLEAN }, { 0 } }; static const struct procunit_value_info soft_limit_xu_info[] = { { USB_XU_SOFT_LIMIT_SELECTOR, " ", USB_MIXER_BOOLEAN }, { 0 } }; static const struct procunit_info extunits[] = { { USB_XU_CLOCK_RATE, "Clock rate", clock_rate_xu_info }, { USB_XU_CLOCK_SOURCE, "DigitalIn CLK source", clock_source_xu_info }, { USB_XU_DIGITAL_IO_STATUS, "DigitalOut format:", spdif_format_xu_info }, { USB_XU_DEVICE_OPTIONS, "AnalogueIn Soft Limit", soft_limit_xu_info }, { 0 } }; /* * build a processing/extension unit */ static int build_audio_procunit(struct mixer_build *state, int unitid, void *raw_desc, const struct procunit_info *list, bool extension_unit) { struct uac_processing_unit_descriptor *desc = raw_desc; int num_ins; struct usb_mixer_elem_info *cval; struct snd_kcontrol *kctl; int i, err, nameid, type, len, val; const struct procunit_info *info; const struct procunit_value_info *valinfo; const struct usbmix_name_map *map; static const struct procunit_value_info default_value_info[] = { { 0x01, "Switch", USB_MIXER_BOOLEAN }, { 0 } }; static const struct procunit_info default_info = { 0, NULL, default_value_info }; const char *name = extension_unit ? "Extension Unit" : "Processing Unit"; num_ins = desc->bNrInPins; for (i = 0; i < num_ins; i++) { err = parse_audio_unit(state, desc->baSourceID[i]); if (err < 0) return err; } type = le16_to_cpu(desc->wProcessType); for (info = list; info && info->type; info++) if (info->type == type) break; if (!info || !info->type) info = &default_info; for (valinfo = info->values; valinfo->control; valinfo++) { __u8 *controls = uac_processing_unit_bmControls(desc, state->mixer->protocol); if (state->mixer->protocol == UAC_VERSION_1) { if (!(controls[valinfo->control / 8] & BIT((valinfo->control % 8) - 1))) continue; } else { /* UAC_VERSION_2/3 */ if (!uac_v2v3_control_is_readable(controls[valinfo->control / 8], valinfo->control)) continue; } map = find_map(state->map, unitid, valinfo->control); if (check_ignored_ctl(map)) continue; cval = kzalloc(sizeof(*cval), GFP_KERNEL); if (!cval) return -ENOMEM; snd_usb_mixer_elem_init_std(&cval->head, state->mixer, unitid); cval->control = valinfo->control; cval->val_type = valinfo->val_type; cval->channels = 1; if (state->mixer->protocol > UAC_VERSION_1 && !uac_v2v3_control_is_writeable(controls[valinfo->control / 8], valinfo->control)) cval->master_readonly = 1; /* get min/max values */ switch (type) { case UAC_PROCESS_UP_DOWNMIX: { bool mode_sel = false; switch (state->mixer->protocol) { case UAC_VERSION_1: case UAC_VERSION_2: default: if (cval->control == UAC_UD_MODE_SELECT) mode_sel = true; break; case UAC_VERSION_3: if (cval->control == UAC3_UD_MODE_SELECT) mode_sel = true; break; } if (mode_sel) { __u8 *control_spec = uac_processing_unit_specific(desc, state->mixer->protocol); cval->min = 1; cval->max = control_spec[0]; cval->res = 1; cval->initialized = 1; break; } get_min_max(cval, valinfo->min_value); break; } case USB_XU_CLOCK_RATE: /* * E-Mu USB 0404/0202/TrackerPre/0204 * samplerate control quirk */ cval->min = 0; cval->max = 5; cval->res = 1; cval->initialized = 1; break; default: get_min_max(cval, valinfo->min_value); break; } err = get_cur_ctl_value(cval, cval->control << 8, &val); if (err < 0) { usb_mixer_elem_info_free(cval); return -EINVAL; } kctl = snd_ctl_new1(&mixer_procunit_ctl, cval); if (!kctl) { usb_mixer_elem_info_free(cval); return -ENOMEM; } kctl->private_free = snd_usb_mixer_elem_free; if (check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name))) { /* nothing */ ; } else if (info->name) { strscpy(kctl->id.name, info->name, sizeof(kctl->id.name)); } else { if (extension_unit) nameid = uac_extension_unit_iExtension(desc, state->mixer->protocol); else nameid = uac_processing_unit_iProcessing(desc, state->mixer->protocol); len = 0; if (nameid) len = snd_usb_copy_string_desc(state->chip, nameid, kctl->id.name, sizeof(kctl->id.name)); if (!len) strscpy(kctl->id.name, name, sizeof(kctl->id.name)); } append_ctl_name(kctl, " "); append_ctl_name(kctl, valinfo->suffix); usb_audio_dbg(state->chip, "[%d] PU [%s] ch = %d, val = %d/%d\n", cval->head.id, kctl->id.name, cval->channels, cval->min, cval->max); err = snd_usb_mixer_add_control(&cval->head, kctl); if (err < 0) return err; } return 0; } static int parse_audio_processing_unit(struct mixer_build *state, int unitid, void *raw_desc) { switch (state->mixer->protocol) { case UAC_VERSION_1: case UAC_VERSION_2: default: return build_audio_procunit(state, unitid, raw_desc, procunits, false); case UAC_VERSION_3: return build_audio_procunit(state, unitid, raw_desc, uac3_procunits, false); } } static int parse_audio_extension_unit(struct mixer_build *state, int unitid, void *raw_desc) { /* * Note that we parse extension units with processing unit descriptors. * That's ok as the layout is the same. */ return build_audio_procunit(state, unitid, raw_desc, extunits, true); } /* * Selector Unit */ /* * info callback for selector unit * use an enumerator type for routing */ static int mixer_ctl_selector_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct usb_mixer_elem_info *cval = kcontrol->private_data; const char **itemlist = (const char **)kcontrol->private_value; if (snd_BUG_ON(!itemlist)) return -EINVAL; return snd_ctl_enum_info(uinfo, 1, cval->max, itemlist); } /* get callback for selector unit */ static int mixer_ctl_selector_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct usb_mixer_elem_info *cval = kcontrol->private_data; int val, err; err = get_cur_ctl_value(cval, cval->control << 8, &val); if (err < 0) { ucontrol->value.enumerated.item[0] = 0; return filter_error(cval, err); } val = get_relative_value(cval, val); ucontrol->value.enumerated.item[0] = val; return 0; } /* put callback for selector unit */ static int mixer_ctl_selector_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct usb_mixer_elem_info *cval = kcontrol->private_data; int val, oval, err; err = get_cur_ctl_value(cval, cval->control << 8, &oval); if (err < 0) return filter_error(cval, err); val = ucontrol->value.enumerated.item[0]; if (val < 0 || val >= cval->max) /* here cval->max = # elements */ return -EINVAL; val = get_abs_value(cval, val); if (val != oval) { set_cur_ctl_value(cval, cval->control << 8, val); return 1; } return 0; } /* alsa control interface for selector unit */ static const struct snd_kcontrol_new mixer_selectunit_ctl = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "", /* will be filled later */ .info = mixer_ctl_selector_info, .get = mixer_ctl_selector_get, .put = mixer_ctl_selector_put, }; /* * private free callback. * free both private_data and private_value */ static void usb_mixer_selector_elem_free(struct snd_kcontrol *kctl) { int i, num_ins = 0; if (kctl->private_data) { struct usb_mixer_elem_info *cval = kctl->private_data; num_ins = cval->max; usb_mixer_elem_info_free(cval); kctl->private_data = NULL; } if (kctl->private_value) { char **itemlist = (char **)kctl->private_value; for (i = 0; i < num_ins; i++) kfree(itemlist[i]); kfree(itemlist); kctl->private_value = 0; } } /* * parse a selector unit */ static int parse_audio_selector_unit(struct mixer_build *state, int unitid, void *raw_desc) { struct uac_selector_unit_descriptor *desc = raw_desc; unsigned int i, nameid, len; int err; struct usb_mixer_elem_info *cval; struct snd_kcontrol *kctl; const struct usbmix_name_map *map; char **namelist; for (i = 0; i < desc->bNrInPins; i++) { err = parse_audio_unit(state, desc->baSourceID[i]); if (err < 0) return err; } if (desc->bNrInPins == 1) /* only one ? nonsense! */ return 0; map = find_map(state->map, unitid, 0); if (check_ignored_ctl(map)) return 0; cval = kzalloc(sizeof(*cval), GFP_KERNEL); if (!cval) return -ENOMEM; snd_usb_mixer_elem_init_std(&cval->head, state->mixer, unitid); cval->val_type = USB_MIXER_U8; cval->channels = 1; cval->min = 1; cval->max = desc->bNrInPins; cval->res = 1; cval->initialized = 1; switch (state->mixer->protocol) { case UAC_VERSION_1: default: cval->control = 0; break; case UAC_VERSION_2: case UAC_VERSION_3: if (desc->bDescriptorSubtype == UAC2_CLOCK_SELECTOR || desc->bDescriptorSubtype == UAC3_CLOCK_SELECTOR) cval->control = UAC2_CX_CLOCK_SELECTOR; else /* UAC2/3_SELECTOR_UNIT */ cval->control = UAC2_SU_SELECTOR; break; } namelist = kcalloc(desc->bNrInPins, sizeof(char *), GFP_KERNEL); if (!namelist) { err = -ENOMEM; goto error_cval; } #define MAX_ITEM_NAME_LEN 64 for (i = 0; i < desc->bNrInPins; i++) { struct usb_audio_term iterm; namelist[i] = kmalloc(MAX_ITEM_NAME_LEN, GFP_KERNEL); if (!namelist[i]) { err = -ENOMEM; goto error_name; } len = check_mapped_selector_name(state, unitid, i, namelist[i], MAX_ITEM_NAME_LEN); if (! len && check_input_term(state, desc->baSourceID[i], &iterm) >= 0) len = get_term_name(state->chip, &iterm, namelist[i], MAX_ITEM_NAME_LEN, 0); if (! len) sprintf(namelist[i], "Input %u", i); } kctl = snd_ctl_new1(&mixer_selectunit_ctl, cval); if (! kctl) { usb_audio_err(state->chip, "cannot malloc kcontrol\n"); err = -ENOMEM; goto error_name; } kctl->private_value = (unsigned long)namelist; kctl->private_free = usb_mixer_selector_elem_free; /* check the static mapping table at first */ len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name)); if (!len) { /* no mapping ? */ switch (state->mixer->protocol) { case UAC_VERSION_1: case UAC_VERSION_2: default: /* if iSelector is given, use it */ nameid = uac_selector_unit_iSelector(desc); if (nameid) len = snd_usb_copy_string_desc(state->chip, nameid, kctl->id.name, sizeof(kctl->id.name)); break; case UAC_VERSION_3: /* TODO: Class-Specific strings not yet supported */ break; } /* ... or pick up the terminal name at next */ if (!len) len = get_term_name(state->chip, &state->oterm, kctl->id.name, sizeof(kctl->id.name), 0); /* ... or use the fixed string "USB" as the last resort */ if (!len) strscpy(kctl->id.name, "USB", sizeof(kctl->id.name)); /* and add the proper suffix */ if (desc->bDescriptorSubtype == UAC2_CLOCK_SELECTOR || desc->bDescriptorSubtype == UAC3_CLOCK_SELECTOR) append_ctl_name(kctl, " Clock Source"); else if ((state->oterm.type & 0xff00) == 0x0100) append_ctl_name(kctl, " Capture Source"); else append_ctl_name(kctl, " Playback Source"); } usb_audio_dbg(state->chip, "[%d] SU [%s] items = %d\n", cval->head.id, kctl->id.name, desc->bNrInPins); return snd_usb_mixer_add_control(&cval->head, kctl); error_name: for (i = 0; i < desc->bNrInPins; i++) kfree(namelist[i]); kfree(namelist); error_cval: usb_mixer_elem_info_free(cval); return err; } /* * parse an audio unit recursively */ static int parse_audio_unit(struct mixer_build *state, int unitid) { unsigned char *p1; int protocol = state->mixer->protocol; if (test_and_set_bit(unitid, state->unitbitmap)) return 0; /* the unit already visited */ p1 = find_audio_control_unit(state, unitid); if (!p1) { usb_audio_err(state->chip, "unit %d not found!\n", unitid); return -EINVAL; } if (!snd_usb_validate_audio_desc(p1, protocol)) { usb_audio_dbg(state->chip, "invalid unit %d\n", unitid); return 0; /* skip invalid unit */ } switch (PTYPE(protocol, p1[2])) { case PTYPE(UAC_VERSION_1, UAC_INPUT_TERMINAL): case PTYPE(UAC_VERSION_2, UAC_INPUT_TERMINAL): case PTYPE(UAC_VERSION_3, UAC_INPUT_TERMINAL): return parse_audio_input_terminal(state, unitid, p1); case PTYPE(UAC_VERSION_1, UAC_MIXER_UNIT): case PTYPE(UAC_VERSION_2, UAC_MIXER_UNIT): case PTYPE(UAC_VERSION_3, UAC3_MIXER_UNIT): return parse_audio_mixer_unit(state, unitid, p1); case PTYPE(UAC_VERSION_2, UAC2_CLOCK_SOURCE): case PTYPE(UAC_VERSION_3, UAC3_CLOCK_SOURCE): return parse_clock_source_unit(state, unitid, p1); case PTYPE(UAC_VERSION_1, UAC_SELECTOR_UNIT): case PTYPE(UAC_VERSION_2, UAC_SELECTOR_UNIT): case PTYPE(UAC_VERSION_3, UAC3_SELECTOR_UNIT): case PTYPE(UAC_VERSION_2, UAC2_CLOCK_SELECTOR): case PTYPE(UAC_VERSION_3, UAC3_CLOCK_SELECTOR): return parse_audio_selector_unit(state, unitid, p1); case PTYPE(UAC_VERSION_1, UAC_FEATURE_UNIT): case PTYPE(UAC_VERSION_2, UAC_FEATURE_UNIT): case PTYPE(UAC_VERSION_3, UAC3_FEATURE_UNIT): return parse_audio_feature_unit(state, unitid, p1); case PTYPE(UAC_VERSION_1, UAC1_PROCESSING_UNIT): case PTYPE(UAC_VERSION_2, UAC2_PROCESSING_UNIT_V2): case PTYPE(UAC_VERSION_3, UAC3_PROCESSING_UNIT): return parse_audio_processing_unit(state, unitid, p1); case PTYPE(UAC_VERSION_1, UAC1_EXTENSION_UNIT): case PTYPE(UAC_VERSION_2, UAC2_EXTENSION_UNIT_V2): case PTYPE(UAC_VERSION_3, UAC3_EXTENSION_UNIT): return parse_audio_extension_unit(state, unitid, p1); case PTYPE(UAC_VERSION_2, UAC2_EFFECT_UNIT): case PTYPE(UAC_VERSION_3, UAC3_EFFECT_UNIT): return 0; /* FIXME - effect units not implemented yet */ default: usb_audio_err(state->chip, "unit %u: unexpected type 0x%02x\n", unitid, p1[2]); return -EINVAL; } } static void snd_usb_mixer_free(struct usb_mixer_interface *mixer) { /* kill pending URBs */ snd_usb_mixer_disconnect(mixer); kfree(mixer->id_elems); if (mixer->urb) { kfree(mixer->urb->transfer_buffer); usb_free_urb(mixer->urb); } usb_free_urb(mixer->rc_urb); kfree(mixer->rc_setup_packet); kfree(mixer); } static int snd_usb_mixer_dev_free(struct snd_device *device) { struct usb_mixer_interface *mixer = device->device_data; snd_usb_mixer_free(mixer); return 0; } /* UAC3 predefined channels configuration */ struct uac3_badd_profile { int subclass; const char *name; int c_chmask; /* capture channels mask */ int p_chmask; /* playback channels mask */ int st_chmask; /* side tone mixing channel mask */ }; static const struct uac3_badd_profile uac3_badd_profiles[] = { { /* * BAIF, BAOF or combination of both * IN: Mono or Stereo cfg, Mono alt possible * OUT: Mono or Stereo cfg, Mono alt possible */ .subclass = UAC3_FUNCTION_SUBCLASS_GENERIC_IO, .name = "GENERIC IO", .c_chmask = -1, /* dynamic channels */ .p_chmask = -1, /* dynamic channels */ }, { /* BAOF; Stereo only cfg, Mono alt possible */ .subclass = UAC3_FUNCTION_SUBCLASS_HEADPHONE, .name = "HEADPHONE", .p_chmask = 3, }, { /* BAOF; Mono or Stereo cfg, Mono alt possible */ .subclass = UAC3_FUNCTION_SUBCLASS_SPEAKER, .name = "SPEAKER", .p_chmask = -1, /* dynamic channels */ }, { /* BAIF; Mono or Stereo cfg, Mono alt possible */ .subclass = UAC3_FUNCTION_SUBCLASS_MICROPHONE, .name = "MICROPHONE", .c_chmask = -1, /* dynamic channels */ }, { /* * BAIOF topology * IN: Mono only * OUT: Mono or Stereo cfg, Mono alt possible */ .subclass = UAC3_FUNCTION_SUBCLASS_HEADSET, .name = "HEADSET", .c_chmask = 1, .p_chmask = -1, /* dynamic channels */ .st_chmask = 1, }, { /* BAIOF; IN: Mono only; OUT: Stereo only, Mono alt possible */ .subclass = UAC3_FUNCTION_SUBCLASS_HEADSET_ADAPTER, .name = "HEADSET ADAPTER", .c_chmask = 1, .p_chmask = 3, .st_chmask = 1, }, { /* BAIF + BAOF; IN: Mono only; OUT: Mono only */ .subclass = UAC3_FUNCTION_SUBCLASS_SPEAKERPHONE, .name = "SPEAKERPHONE", .c_chmask = 1, .p_chmask = 1, }, { 0 } /* terminator */ }; static bool uac3_badd_func_has_valid_channels(struct usb_mixer_interface *mixer, const struct uac3_badd_profile *f, int c_chmask, int p_chmask) { /* * If both playback/capture channels are dynamic, make sure * at least one channel is present */ if (f->c_chmask < 0 && f->p_chmask < 0) { if (!c_chmask && !p_chmask) { usb_audio_warn(mixer->chip, "BAAD %s: no channels?", f->name); return false; } return true; } if ((f->c_chmask < 0 && !c_chmask) || (f->c_chmask >= 0 && f->c_chmask != c_chmask)) { usb_audio_warn(mixer->chip, "BAAD %s c_chmask mismatch", f->name); return false; } if ((f->p_chmask < 0 && !p_chmask) || (f->p_chmask >= 0 && f->p_chmask != p_chmask)) { usb_audio_warn(mixer->chip, "BAAD %s p_chmask mismatch", f->name); return false; } return true; } /* * create mixer controls for UAC3 BADD profiles * * UAC3 BADD device doesn't contain CS descriptors thus we will guess everything * * BADD device may contain Mixer Unit, which doesn't have any controls, skip it */ static int snd_usb_mixer_controls_badd(struct usb_mixer_interface *mixer, int ctrlif) { struct usb_device *dev = mixer->chip->dev; struct usb_interface_assoc_descriptor *assoc; int badd_profile = mixer->chip->badd_profile; const struct uac3_badd_profile *f; const struct usbmix_ctl_map *map; int p_chmask = 0, c_chmask = 0, st_chmask = 0; int i; assoc = usb_ifnum_to_if(dev, ctrlif)->intf_assoc; /* Detect BADD capture/playback channels from AS EP descriptors */ for (i = 0; i < assoc->bInterfaceCount; i++) { int intf = assoc->bFirstInterface + i; struct usb_interface *iface; struct usb_host_interface *alts; struct usb_interface_descriptor *altsd; unsigned int maxpacksize; char dir_in; int chmask, num; if (intf == ctrlif) continue; iface = usb_ifnum_to_if(dev, intf); if (!iface) continue; num = iface->num_altsetting; if (num < 2) return -EINVAL; /* * The number of Channels in an AudioStreaming interface * and the audio sample bit resolution (16 bits or 24 * bits) can be derived from the wMaxPacketSize field in * the Standard AS Audio Data Endpoint descriptor in * Alternate Setting 1 */ alts = &iface->altsetting[1]; altsd = get_iface_desc(alts); if (altsd->bNumEndpoints < 1) return -EINVAL; /* check direction */ dir_in = (get_endpoint(alts, 0)->bEndpointAddress & USB_DIR_IN); maxpacksize = le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize); switch (maxpacksize) { default: usb_audio_err(mixer->chip, "incorrect wMaxPacketSize 0x%x for BADD profile\n", maxpacksize); return -EINVAL; case UAC3_BADD_EP_MAXPSIZE_SYNC_MONO_16: case UAC3_BADD_EP_MAXPSIZE_ASYNC_MONO_16: case UAC3_BADD_EP_MAXPSIZE_SYNC_MONO_24: case UAC3_BADD_EP_MAXPSIZE_ASYNC_MONO_24: chmask = 1; break; case UAC3_BADD_EP_MAXPSIZE_SYNC_STEREO_16: case UAC3_BADD_EP_MAXPSIZE_ASYNC_STEREO_16: case UAC3_BADD_EP_MAXPSIZE_SYNC_STEREO_24: case UAC3_BADD_EP_MAXPSIZE_ASYNC_STEREO_24: chmask = 3; break; } if (dir_in) c_chmask = chmask; else p_chmask = chmask; } usb_audio_dbg(mixer->chip, "UAC3 BADD profile 0x%x: detected c_chmask=%d p_chmask=%d\n", badd_profile, c_chmask, p_chmask); /* check the mapping table */ for (map = uac3_badd_usbmix_ctl_maps; map->id; map++) { if (map->id == badd_profile) break; } if (!map->id) return -EINVAL; for (f = uac3_badd_profiles; f->name; f++) { if (badd_profile == f->subclass) break; } if (!f->name) return -EINVAL; if (!uac3_badd_func_has_valid_channels(mixer, f, c_chmask, p_chmask)) return -EINVAL; st_chmask = f->st_chmask; /* Playback */ if (p_chmask) { /* Master channel, always writable */ build_feature_ctl_badd(mixer, 0, UAC_FU_MUTE, UAC3_BADD_FU_ID2, map->map); /* Mono/Stereo volume channels, always writable */ build_feature_ctl_badd(mixer, p_chmask, UAC_FU_VOLUME, UAC3_BADD_FU_ID2, map->map); } /* Capture */ if (c_chmask) { /* Master channel, always writable */ build_feature_ctl_badd(mixer, 0, UAC_FU_MUTE, UAC3_BADD_FU_ID5, map->map); /* Mono/Stereo volume channels, always writable */ build_feature_ctl_badd(mixer, c_chmask, UAC_FU_VOLUME, UAC3_BADD_FU_ID5, map->map); } /* Side tone-mixing */ if (st_chmask) { /* Master channel, always writable */ build_feature_ctl_badd(mixer, 0, UAC_FU_MUTE, UAC3_BADD_FU_ID7, map->map); /* Mono volume channel, always writable */ build_feature_ctl_badd(mixer, 1, UAC_FU_VOLUME, UAC3_BADD_FU_ID7, map->map); } /* Insertion Control */ if (f->subclass == UAC3_FUNCTION_SUBCLASS_HEADSET_ADAPTER) { struct usb_audio_term iterm, oterm; /* Input Term - Insertion control */ memset(&iterm, 0, sizeof(iterm)); iterm.id = UAC3_BADD_IT_ID4; iterm.type = UAC_BIDIR_TERMINAL_HEADSET; build_connector_control(mixer, map->map, &iterm, true); /* Output Term - Insertion control */ memset(&oterm, 0, sizeof(oterm)); oterm.id = UAC3_BADD_OT_ID3; oterm.type = UAC_BIDIR_TERMINAL_HEADSET; build_connector_control(mixer, map->map, &oterm, false); } return 0; } /* * create mixer controls * * walk through all UAC_OUTPUT_TERMINAL descriptors to search for mixers */ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer) { struct mixer_build state; int err; const struct usbmix_ctl_map *map; void *p; memset(&state, 0, sizeof(state)); state.chip = mixer->chip; state.mixer = mixer; state.buffer = mixer->hostif->extra; state.buflen = mixer->hostif->extralen; /* check the mapping table */ for (map = usbmix_ctl_maps; map->id; map++) { if (map->id == state.chip->usb_id) { state.map = map->map; state.selector_map = map->selector_map; mixer->connector_map = map->connector_map; break; } } p = NULL; while ((p = snd_usb_find_csint_desc(mixer->hostif->extra, mixer->hostif->extralen, p, UAC_OUTPUT_TERMINAL)) != NULL) { if (!snd_usb_validate_audio_desc(p, mixer->protocol)) continue; /* skip invalid descriptor */ if (mixer->protocol == UAC_VERSION_1) { struct uac1_output_terminal_descriptor *desc = p; /* mark terminal ID as visited */ set_bit(desc->bTerminalID, state.unitbitmap); state.oterm.id = desc->bTerminalID; state.oterm.type = le16_to_cpu(desc->wTerminalType); state.oterm.name = desc->iTerminal; err = parse_audio_unit(&state, desc->bSourceID); if (err < 0 && err != -EINVAL) return err; } else if (mixer->protocol == UAC_VERSION_2) { struct uac2_output_terminal_descriptor *desc = p; /* mark terminal ID as visited */ set_bit(desc->bTerminalID, state.unitbitmap); state.oterm.id = desc->bTerminalID; state.oterm.type = le16_to_cpu(desc->wTerminalType); state.oterm.name = desc->iTerminal; err = parse_audio_unit(&state, desc->bSourceID); if (err < 0 && err != -EINVAL) return err; /* * For UAC2, use the same approach to also add the * clock selectors */ err = parse_audio_unit(&state, desc->bCSourceID); if (err < 0 && err != -EINVAL) return err; if ((state.oterm.type & 0xff00) != 0x0100 && uac_v2v3_control_is_readable(le16_to_cpu(desc->bmControls), UAC2_TE_CONNECTOR)) { build_connector_control(state.mixer, state.map, &state.oterm, false); } } else { /* UAC_VERSION_3 */ struct uac3_output_terminal_descriptor *desc = p; /* mark terminal ID as visited */ set_bit(desc->bTerminalID, state.unitbitmap); state.oterm.id = desc->bTerminalID; state.oterm.type = le16_to_cpu(desc->wTerminalType); state.oterm.name = le16_to_cpu(desc->wTerminalDescrStr); err = parse_audio_unit(&state, desc->bSourceID); if (err < 0 && err != -EINVAL) return err; /* * For UAC3, use the same approach to also add the * clock selectors */ err = parse_audio_unit(&state, desc->bCSourceID); if (err < 0 && err != -EINVAL) return err; if ((state.oterm.type & 0xff00) != 0x0100 && uac_v2v3_control_is_readable(le32_to_cpu(desc->bmControls), UAC3_TE_INSERTION)) { build_connector_control(state.mixer, state.map, &state.oterm, false); } } } return 0; } static int delegate_notify(struct usb_mixer_interface *mixer, int unitid, u8 *control, u8 *channel) { const struct usbmix_connector_map *map = mixer->connector_map; if (!map) return unitid; for (; map->id; map++) { if (map->id == unitid) { if (control && map->control) *control = map->control; if (channel && map->channel) *channel = map->channel; return map->delegated_id; } } return unitid; } void snd_usb_mixer_notify_id(struct usb_mixer_interface *mixer, int unitid) { struct usb_mixer_elem_list *list; unitid = delegate_notify(mixer, unitid, NULL, NULL); for_each_mixer_elem(list, mixer, unitid) { struct usb_mixer_elem_info *info; if (!list->is_std_info) continue; info = mixer_elem_list_to_info(list); /* invalidate cache, so the value is read from the device */ info->cached = 0; snd_ctl_notify(mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE, &list->kctl->id); } } static void snd_usb_mixer_dump_cval(struct snd_info_buffer *buffer, struct usb_mixer_elem_list *list) { struct usb_mixer_elem_info *cval = mixer_elem_list_to_info(list); static const char * const val_types[] = { [USB_MIXER_BOOLEAN] = "BOOLEAN", [USB_MIXER_INV_BOOLEAN] = "INV_BOOLEAN", [USB_MIXER_S8] = "S8", [USB_MIXER_U8] = "U8", [USB_MIXER_S16] = "S16", [USB_MIXER_U16] = "U16", [USB_MIXER_S32] = "S32", [USB_MIXER_U32] = "U32", [USB_MIXER_BESPOKEN] = "BESPOKEN", }; snd_iprintf(buffer, " Info: id=%i, control=%i, cmask=0x%x, " "channels=%i, type=\"%s\"\n", cval->head.id, cval->control, cval->cmask, cval->channels, val_types[cval->val_type]); snd_iprintf(buffer, " Volume: min=%i, max=%i, dBmin=%i, dBmax=%i\n", cval->min, cval->max, cval->dBmin, cval->dBmax); } static void snd_usb_mixer_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_usb_audio *chip = entry->private_data; struct usb_mixer_interface *mixer; struct usb_mixer_elem_list *list; int unitid; list_for_each_entry(mixer, &chip->mixer_list, list) { snd_iprintf(buffer, "USB Mixer: usb_id=0x%08x, ctrlif=%i, ctlerr=%i\n", chip->usb_id, mixer_ctrl_intf(mixer), mixer->ignore_ctl_error); snd_iprintf(buffer, "Card: %s\n", chip->card->longname); for (unitid = 0; unitid < MAX_ID_ELEMS; unitid++) { for_each_mixer_elem(list, mixer, unitid) { snd_iprintf(buffer, " Unit: %i\n", list->id); if (list->kctl) snd_iprintf(buffer, " Control: name=\"%s\", index=%i\n", list->kctl->id.name, list->kctl->id.index); if (list->dump) list->dump(buffer, list); } } } } static void snd_usb_mixer_interrupt_v2(struct usb_mixer_interface *mixer, int attribute, int value, int index) { struct usb_mixer_elem_list *list; __u8 unitid = (index >> 8) & 0xff; __u8 control = (value >> 8) & 0xff; __u8 channel = value & 0xff; unsigned int count = 0; if (channel >= MAX_CHANNELS) { usb_audio_dbg(mixer->chip, "%s(): bogus channel number %d\n", __func__, channel); return; } unitid = delegate_notify(mixer, unitid, &control, &channel); for_each_mixer_elem(list, mixer, unitid) count++; if (count == 0) return; for_each_mixer_elem(list, mixer, unitid) { struct usb_mixer_elem_info *info; if (!list->kctl) continue; if (!list->is_std_info) continue; info = mixer_elem_list_to_info(list); if (count > 1 && info->control != control) continue; switch (attribute) { case UAC2_CS_CUR: /* invalidate cache, so the value is read from the device */ if (channel) info->cached &= ~BIT(channel); else /* master channel */ info->cached = 0; snd_ctl_notify(mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE, &info->head.kctl->id); break; case UAC2_CS_RANGE: /* TODO */ break; case UAC2_CS_MEM: /* TODO */ break; default: usb_audio_dbg(mixer->chip, "unknown attribute %d in interrupt\n", attribute); break; } /* switch */ } } static void snd_usb_mixer_interrupt(struct urb *urb) { struct usb_mixer_interface *mixer = urb->context; int len = urb->actual_length; int ustatus = urb->status; if (ustatus != 0) goto requeue; if (mixer->protocol == UAC_VERSION_1) { struct uac1_status_word *status; for (status = urb->transfer_buffer; len >= sizeof(*status); len -= sizeof(*status), status++) { dev_dbg(&urb->dev->dev, "status interrupt: %02x %02x\n", status->bStatusType, status->bOriginator); /* ignore any notifications not from the control interface */ if ((status->bStatusType & UAC1_STATUS_TYPE_ORIG_MASK) != UAC1_STATUS_TYPE_ORIG_AUDIO_CONTROL_IF) continue; if (status->bStatusType & UAC1_STATUS_TYPE_MEM_CHANGED) snd_usb_mixer_rc_memory_change(mixer, status->bOriginator); else snd_usb_mixer_notify_id(mixer, status->bOriginator); } } else { /* UAC_VERSION_2 */ struct uac2_interrupt_data_msg *msg; for (msg = urb->transfer_buffer; len >= sizeof(*msg); len -= sizeof(*msg), msg++) { /* drop vendor specific and endpoint requests */ if ((msg->bInfo & UAC2_INTERRUPT_DATA_MSG_VENDOR) || (msg->bInfo & UAC2_INTERRUPT_DATA_MSG_EP)) continue; snd_usb_mixer_interrupt_v2(mixer, msg->bAttribute, le16_to_cpu(msg->wValue), le16_to_cpu(msg->wIndex)); } } requeue: if (ustatus != -ENOENT && ustatus != -ECONNRESET && ustatus != -ESHUTDOWN) { urb->dev = mixer->chip->dev; usb_submit_urb(urb, GFP_ATOMIC); } } /* create the handler for the optional status interrupt endpoint */ static int snd_usb_mixer_status_create(struct usb_mixer_interface *mixer) { struct usb_endpoint_descriptor *ep; void *transfer_buffer; int buffer_length; unsigned int epnum; /* we need one interrupt input endpoint */ if (get_iface_desc(mixer->hostif)->bNumEndpoints < 1) return 0; ep = get_endpoint(mixer->hostif, 0); if (!usb_endpoint_dir_in(ep) || !usb_endpoint_xfer_int(ep)) return 0; epnum = usb_endpoint_num(ep); buffer_length = le16_to_cpu(ep->wMaxPacketSize); transfer_buffer = kmalloc(buffer_length, GFP_KERNEL); if (!transfer_buffer) return -ENOMEM; mixer->urb = usb_alloc_urb(0, GFP_KERNEL); if (!mixer->urb) { kfree(transfer_buffer); return -ENOMEM; } usb_fill_int_urb(mixer->urb, mixer->chip->dev, usb_rcvintpipe(mixer->chip->dev, epnum), transfer_buffer, buffer_length, snd_usb_mixer_interrupt, mixer, ep->bInterval); usb_submit_urb(mixer->urb, GFP_KERNEL); return 0; } int snd_usb_create_mixer(struct snd_usb_audio *chip, int ctrlif) { static const struct snd_device_ops dev_ops = { .dev_free = snd_usb_mixer_dev_free }; struct usb_mixer_interface *mixer; int err; strcpy(chip->card->mixername, "USB Mixer"); mixer = kzalloc(sizeof(*mixer), GFP_KERNEL); if (!mixer) return -ENOMEM; mixer->chip = chip; mixer->ignore_ctl_error = !!(chip->quirk_flags & QUIRK_FLAG_IGNORE_CTL_ERROR); mixer->id_elems = kcalloc(MAX_ID_ELEMS, sizeof(*mixer->id_elems), GFP_KERNEL); if (!mixer->id_elems) { kfree(mixer); return -ENOMEM; } mixer->hostif = &usb_ifnum_to_if(chip->dev, ctrlif)->altsetting[0]; switch (get_iface_desc(mixer->hostif)->bInterfaceProtocol) { case UAC_VERSION_1: default: mixer->protocol = UAC_VERSION_1; break; case UAC_VERSION_2: mixer->protocol = UAC_VERSION_2; break; case UAC_VERSION_3: mixer->protocol = UAC_VERSION_3; break; } if (mixer->protocol == UAC_VERSION_3 && chip->badd_profile >= UAC3_FUNCTION_SUBCLASS_GENERIC_IO) { err = snd_usb_mixer_controls_badd(mixer, ctrlif); if (err < 0) goto _error; } else { err = snd_usb_mixer_controls(mixer); if (err < 0) goto _error; } err = snd_usb_mixer_status_create(mixer); if (err < 0) goto _error; err = snd_usb_mixer_apply_create_quirk(mixer); if (err < 0) goto _error; err = snd_device_new(chip->card, SNDRV_DEV_CODEC, mixer, &dev_ops); if (err < 0) goto _error; if (list_empty(&chip->mixer_list)) snd_card_ro_proc_new(chip->card, "usbmixer", chip, snd_usb_mixer_proc_read); list_add(&mixer->list, &chip->mixer_list); return 0; _error: snd_usb_mixer_free(mixer); return err; } void snd_usb_mixer_disconnect(struct usb_mixer_interface *mixer) { if (mixer->disconnected) return; if (mixer->urb) usb_kill_urb(mixer->urb); if (mixer->rc_urb) usb_kill_urb(mixer->rc_urb); if (mixer->private_free) mixer->private_free(mixer); mixer->disconnected = true; } /* stop any bus activity of a mixer */ static void snd_usb_mixer_inactivate(struct usb_mixer_interface *mixer) { usb_kill_urb(mixer->urb); usb_kill_urb(mixer->rc_urb); } static int snd_usb_mixer_activate(struct usb_mixer_interface *mixer) { int err; if (mixer->urb) { err = usb_submit_urb(mixer->urb, GFP_NOIO); if (err < 0) return err; } return 0; } int snd_usb_mixer_suspend(struct usb_mixer_interface *mixer) { snd_usb_mixer_inactivate(mixer); if (mixer->private_suspend) mixer->private_suspend(mixer); return 0; } static int restore_mixer_value(struct usb_mixer_elem_list *list) { struct usb_mixer_elem_info *cval = mixer_elem_list_to_info(list); int c, err, idx; if (cval->val_type == USB_MIXER_BESPOKEN) return 0; if (cval->cmask) { idx = 0; for (c = 0; c < MAX_CHANNELS; c++) { if (!(cval->cmask & BIT(c))) continue; if (cval->cached & BIT(c + 1)) { err = snd_usb_set_cur_mix_value(cval, c + 1, idx, cval->cache_val[idx]); if (err < 0) break; } idx++; } } else { /* master */ if (cval->cached) snd_usb_set_cur_mix_value(cval, 0, 0, *cval->cache_val); } return 0; } int snd_usb_mixer_resume(struct usb_mixer_interface *mixer) { struct usb_mixer_elem_list *list; int id, err; /* restore cached mixer values */ for (id = 0; id < MAX_ID_ELEMS; id++) { for_each_mixer_elem(list, mixer, id) { if (list->resume) { err = list->resume(list); if (err < 0) return err; } } } snd_usb_mixer_resume_quirk(mixer); return snd_usb_mixer_activate(mixer); } void snd_usb_mixer_elem_init_std(struct usb_mixer_elem_list *list, struct usb_mixer_interface *mixer, int unitid) { list->mixer = mixer; list->id = unitid; list->dump = snd_usb_mixer_dump_cval; list->resume = restore_mixer_value; }
// SPDX-License-Identifier: GPL-2.0-only /*x * Copyright (c) 2015, The Linux Foundation. All rights reserved. */ #include <linux/kernel.h> #include <linux/bitops.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/module.h> #include <linux/of.h> #include <linux/clk-provider.h> #include <linux/regmap.h> #include <linux/reset-controller.h> #include <linux/clk.h> #include <dt-bindings/clock/qcom,mmcc-msm8996.h> #include "common.h" #include "clk-regmap.h" #include "clk-regmap-divider.h" #include "clk-alpha-pll.h" #include "clk-rcg.h" #include "clk-branch.h" #include "reset.h" #include "gdsc.h" enum { P_XO, P_MMPLL0, P_GPLL0, P_GPLL0_DIV, P_MMPLL1, P_MMPLL9, P_MMPLL2, P_MMPLL8, P_MMPLL3, P_DSI0PLL, P_DSI1PLL, P_MMPLL5, P_HDMIPLL, P_DSI0PLL_BYTE, P_DSI1PLL_BYTE, P_MMPLL4, }; static struct clk_fixed_factor gpll0_div = { .mult = 1, .div = 2, .hw.init = &(struct clk_init_data){ .name = "gpll0_div", .parent_data = (const struct clk_parent_data[]){ { .fw_name = "gpll0", .name = "gpll0" }, }, .num_parents = 1, .ops = &clk_fixed_factor_ops, }, }; static const struct pll_vco mmpll_p_vco[] = { { 250000000, 500000000, 3 }, { 500000000, 1000000000, 2 }, { 1000000000, 1500000000, 1 }, { 1500000000, 2000000000, 0 }, }; static const struct pll_vco mmpll_gfx_vco[] = { { 400000000, 1000000000, 2 }, { 1000000000, 1500000000, 1 }, { 1500000000, 2000000000, 0 }, }; static const struct pll_vco mmpll_t_vco[] = { { 500000000, 1500000000, 0 }, }; static struct clk_alpha_pll mmpll0_early = { .offset = 0x0, .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], .vco_table = mmpll_p_vco, .num_vco = ARRAY_SIZE(mmpll_p_vco), .clkr = { .enable_reg = 0x100, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "mmpll0_early", .parent_data = (const struct clk_parent_data[]){ { .fw_name = "xo", .name = "xo_board" }, }, .num_parents = 1, .ops = &clk_alpha_pll_ops, }, }, }; static struct clk_alpha_pll_postdiv mmpll0 = { .offset = 0x0, .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], .width = 4, .clkr.hw.init = &(struct clk_init_data){ .name = "mmpll0", .parent_hws = (const struct clk_hw*[]){ &mmpll0_early.clkr.hw }, .num_parents = 1, .ops = &clk_alpha_pll_postdiv_ops, .flags = CLK_SET_RATE_PARENT, }, }; static struct clk_alpha_pll mmpll1_early = { .offset = 0x30, .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], .vco_table = mmpll_p_vco, .num_vco = ARRAY_SIZE(mmpll_p_vco), .clkr = { .enable_reg = 0x100, .enable_mask = BIT(1), .hw.init = &(struct clk_init_data){ .name = "mmpll1_early", .parent_data = (const struct clk_parent_data[]){ { .fw_name = "xo", .name = "xo_board" }, }, .num_parents = 1, .ops = &clk_alpha_pll_ops, } }, }; static struct clk_alpha_pll_postdiv mmpll1 = { .offset = 0x30, .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], .width = 4, .clkr.hw.init = &(struct clk_init_data){ .name = "mmpll1", .parent_hws = (const struct clk_hw*[]){ &mmpll1_early.clkr.hw }, .num_parents = 1, .ops = &clk_alpha_pll_postdiv_ops, .flags = CLK_SET_RATE_PARENT, }, }; static struct clk_alpha_pll mmpll2_early = { .offset = 0x4100, .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], .vco_table = mmpll_gfx_vco, .num_vco = ARRAY_SIZE(mmpll_gfx_vco), .clkr.hw.init = &(struct clk_init_data){ .name = "mmpll2_early", .parent_data = (const struct clk_parent_data[]){ { .fw_name = "xo", .name = "xo_board" }, }, .num_parents = 1, .ops = &clk_alpha_pll_ops, }, }; static struct clk_alpha_pll_postdiv mmpll2 = { .offset = 0x4100, .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], .width = 4, .clkr.hw.init = &(struct clk_init_data){ .name = "mmpll2", .parent_hws = (const struct clk_hw*[]){ &mmpll2_early.clkr.hw }, .num_parents = 1, .ops = &clk_alpha_pll_postdiv_ops, .flags = CLK_SET_RATE_PARENT, }, }; static struct clk_alpha_pll mmpll3_early = { .offset = 0x60, .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], .vco_table = mmpll_p_vco, .num_vco = ARRAY_SIZE(mmpll_p_vco), .clkr.hw.init = &(struct clk_init_data){ .name = "mmpll3_early", .parent_data = (const struct clk_parent_data[]){ { .fw_name = "xo", .name = "xo_board" }, }, .num_parents = 1, .ops = &clk_alpha_pll_ops, }, }; static struct clk_alpha_pll_postdiv mmpll3 = { .offset = 0x60, .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], .width = 4, .clkr.hw.init = &(struct clk_init_data){ .name = "mmpll3", .parent_hws = (const struct clk_hw*[]){ &mmpll3_early.clkr.hw }, .num_parents = 1, .ops = &clk_alpha_pll_postdiv_ops, .flags = CLK_SET_RATE_PARENT, }, }; static struct clk_alpha_pll mmpll4_early = { .offset = 0x90, .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], .vco_table = mmpll_t_vco, .num_vco = ARRAY_SIZE(mmpll_t_vco), .clkr.hw.init = &(struct clk_init_data){ .name = "mmpll4_early", .parent_data = (const struct clk_parent_data[]){ { .fw_name = "xo", .name = "xo_board" }, }, .num_parents = 1, .ops = &clk_alpha_pll_ops, }, }; static struct clk_alpha_pll_postdiv mmpll4 = { .offset = 0x90, .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], .width = 2, .clkr.hw.init = &(struct clk_init_data){ .name = "mmpll4", .parent_hws = (const struct clk_hw*[]){ &mmpll4_early.clkr.hw }, .num_parents = 1, .ops = &clk_alpha_pll_postdiv_ops, .flags = CLK_SET_RATE_PARENT, }, }; static struct clk_alpha_pll mmpll5_early = { .offset = 0xc0, .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], .vco_table = mmpll_p_vco, .num_vco = ARRAY_SIZE(mmpll_p_vco), .clkr.hw.init = &(struct clk_init_data){ .name = "mmpll5_early", .parent_data = (const struct clk_parent_data[]){ { .fw_name = "xo", .name = "xo_board" }, }, .num_parents = 1, .ops = &clk_alpha_pll_ops, }, }; static struct clk_alpha_pll_postdiv mmpll5 = { .offset = 0xc0, .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], .width = 4, .clkr.hw.init = &(struct clk_init_data){ .name = "mmpll5", .parent_hws = (const struct clk_hw*[]){ &mmpll5_early.clkr.hw }, .num_parents = 1, .ops = &clk_alpha_pll_postdiv_ops, .flags = CLK_SET_RATE_PARENT, }, }; static struct clk_alpha_pll mmpll8_early = { .offset = 0x4130, .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], .vco_table = mmpll_gfx_vco, .num_vco = ARRAY_SIZE(mmpll_gfx_vco), .clkr.hw.init = &(struct clk_init_data){ .name = "mmpll8_early", .parent_data = (const struct clk_parent_data[]){ { .fw_name = "xo", .name = "xo_board" }, }, .num_parents = 1, .ops = &clk_alpha_pll_ops, }, }; static struct clk_alpha_pll_postdiv mmpll8 = { .offset = 0x4130, .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], .width = 4, .clkr.hw.init = &(struct clk_init_data){ .name = "mmpll8", .parent_hws = (const struct clk_hw*[]){ &mmpll8_early.clkr.hw }, .num_parents = 1, .ops = &clk_alpha_pll_postdiv_ops, .flags = CLK_SET_RATE_PARENT, }, }; static struct clk_alpha_pll mmpll9_early = { .offset = 0x4200, .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], .vco_table = mmpll_t_vco, .num_vco = ARRAY_SIZE(mmpll_t_vco), .clkr.hw.init = &(struct clk_init_data){ .name = "mmpll9_early", .parent_data = (const struct clk_parent_data[]){ { .fw_name = "xo", .name = "xo_board" }, }, .num_parents = 1, .ops = &clk_alpha_pll_ops, }, }; static struct clk_alpha_pll_postdiv mmpll9 = { .offset = 0x4200, .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], .width = 2, .clkr.hw.init = &(struct clk_init_data){ .name = "mmpll9", .parent_hws = (const struct clk_hw*[]){ &mmpll9_early.clkr.hw }, .num_parents = 1, .ops = &clk_alpha_pll_postdiv_ops, .flags = CLK_SET_RATE_PARENT, }, }; static const struct parent_map mmss_xo_hdmi_map[] = { { P_XO, 0 }, { P_HDMIPLL, 1 } }; static const struct clk_parent_data mmss_xo_hdmi[] = { { .fw_name = "xo", .name = "xo_board" }, { .fw_name = "hdmipll", .name = "hdmipll" } }; static const struct parent_map mmss_xo_dsi0pll_dsi1pll_map[] = { { P_XO, 0 }, { P_DSI0PLL, 1 }, { P_DSI1PLL, 2 } }; static const struct clk_parent_data mmss_xo_dsi0pll_dsi1pll[] = { { .fw_name = "xo", .name = "xo_board" }, { .fw_name = "dsi0pll", .name = "dsi0pll" }, { .fw_name = "dsi1pll", .name = "dsi1pll" } }; static const struct parent_map mmss_xo_gpll0_gpll0_div_map[] = { { P_XO, 0 }, { P_GPLL0, 5 }, { P_GPLL0_DIV, 6 } }; static const struct clk_parent_data mmss_xo_gpll0_gpll0_div[] = { { .fw_name = "xo", .name = "xo_board" }, { .fw_name = "gpll0", .name = "gpll0" }, { .hw = &gpll0_div.hw } }; static const struct parent_map mmss_xo_dsibyte_map[] = { { P_XO, 0 }, { P_DSI0PLL_BYTE, 1 }, { P_DSI1PLL_BYTE, 2 } }; static const struct clk_parent_data mmss_xo_dsibyte[] = { { .fw_name = "xo", .name = "xo_board" }, { .fw_name = "dsi0pllbyte", .name = "dsi0pllbyte" }, { .fw_name = "dsi1pllbyte", .name = "dsi1pllbyte" } }; static const struct parent_map mmss_xo_mmpll0_gpll0_gpll0_div_map[] = { { P_XO, 0 }, { P_MMPLL0, 1 }, { P_GPLL0, 5 }, { P_GPLL0_DIV, 6 } }; static const struct clk_parent_data mmss_xo_mmpll0_gpll0_gpll0_div[] = { { .fw_name = "xo", .name = "xo_board" }, { .hw = &mmpll0.clkr.hw }, { .fw_name = "gpll0", .name = "gpll0" }, { .hw = &gpll0_div.hw } }; static const struct parent_map mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div_map[] = { { P_XO, 0 }, { P_MMPLL0, 1 }, { P_MMPLL1, 2 }, { P_GPLL0, 5 }, { P_GPLL0_DIV, 6 } }; static const struct clk_parent_data mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div[] = { { .fw_name = "xo", .name = "xo_board" }, { .hw = &mmpll0.clkr.hw }, { .hw = &mmpll1.clkr.hw }, { .fw_name = "gpll0", .name = "gpll0" }, { .hw = &gpll0_div.hw } }; static const struct parent_map mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div_map[] = { { P_XO, 0 }, { P_MMPLL0, 1 }, { P_MMPLL3, 3 }, { P_GPLL0, 5 }, { P_GPLL0_DIV, 6 } }; static const struct clk_parent_data mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div[] = { { .fw_name = "xo", .name = "xo_board" }, { .hw = &mmpll0.clkr.hw }, { .hw = &mmpll3.clkr.hw }, { .fw_name = "gpll0", .name = "gpll0" }, { .hw = &gpll0_div.hw } }; static const struct parent_map mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div_map[] = { { P_XO, 0 }, { P_MMPLL0, 1 }, { P_MMPLL5, 2 }, { P_GPLL0, 5 }, { P_GPLL0_DIV, 6 } }; static const struct clk_parent_data mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div[] = { { .fw_name = "xo", .name = "xo_board" }, { .hw = &mmpll0.clkr.hw }, { .hw = &mmpll5.clkr.hw }, { .fw_name = "gpll0", .name = "gpll0" }, { .hw = &gpll0_div.hw } }; static const struct parent_map mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div_map[] = { { P_XO, 0 }, { P_MMPLL0, 1 }, { P_MMPLL4, 3 }, { P_GPLL0, 5 }, { P_GPLL0_DIV, 6 } }; static const struct clk_parent_data mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div[] = { { .fw_name = "xo", .name = "xo_board" }, { .hw = &mmpll0.clkr.hw }, { .hw = &mmpll4.clkr.hw }, { .fw_name = "gpll0", .name = "gpll0" }, { .hw = &gpll0_div.hw } }; static const struct parent_map mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_map[] = { { P_XO, 0 }, { P_MMPLL0, 1 }, { P_MMPLL9, 2 }, { P_MMPLL2, 3 }, { P_MMPLL8, 4 }, { P_GPLL0, 5 } }; static const struct clk_parent_data mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0[] = { { .fw_name = "xo", .name = "xo_board" }, { .hw = &mmpll0.clkr.hw }, { .hw = &mmpll9.clkr.hw }, { .hw = &mmpll2.clkr.hw }, { .hw = &mmpll8.clkr.hw }, { .fw_name = "gpll0", .name = "gpll0" }, }; static const struct parent_map mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_gpll0_div_map[] = { { P_XO, 0 }, { P_MMPLL0, 1 }, { P_MMPLL9, 2 }, { P_MMPLL2, 3 }, { P_MMPLL8, 4 }, { P_GPLL0, 5 }, { P_GPLL0_DIV, 6 } }; static const struct clk_parent_data mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_gpll0_div[] = { { .fw_name = "xo", .name = "xo_board" }, { .hw = &mmpll0.clkr.hw }, { .hw = &mmpll9.clkr.hw }, { .hw = &mmpll2.clkr.hw }, { .hw = &mmpll8.clkr.hw }, { .fw_name = "gpll0", .name = "gpll0" }, { .hw = &gpll0_div.hw } }; static const struct parent_map mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div_map[] = { { P_XO, 0 }, { P_MMPLL0, 1 }, { P_MMPLL1, 2 }, { P_MMPLL4, 3 }, { P_MMPLL3, 4 }, { P_GPLL0, 5 }, { P_GPLL0_DIV, 6 } }; static const struct clk_parent_data mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div[] = { { .fw_name = "xo", .name = "xo_board" }, { .hw = &mmpll0.clkr.hw }, { .hw = &mmpll1.clkr.hw }, { .hw = &mmpll4.clkr.hw }, { .hw = &mmpll3.clkr.hw }, { .fw_name = "gpll0", .name = "gpll0" }, { .hw = &gpll0_div.hw } }; static const struct freq_tbl ftbl_ahb_clk_src[] = { F(19200000, P_XO, 1, 0, 0), F(40000000, P_GPLL0_DIV, 7.5, 0, 0), F(80000000, P_MMPLL0, 10, 0, 0), { } }; static struct clk_rcg2 ahb_clk_src = { .cmd_rcgr = 0x5000, .hid_width = 5, .parent_map = mmss_xo_mmpll0_gpll0_gpll0_div_map, .freq_tbl = ftbl_ahb_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "ahb_clk_src", .parent_data = mmss_xo_mmpll0_gpll0_gpll0_div, .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_gpll0_gpll0_div), .ops = &clk_rcg2_ops, }, }; static const struct freq_tbl ftbl_axi_clk_src[] = { F(19200000, P_XO, 1, 0, 0), F(75000000, P_GPLL0_DIV, 4, 0, 0), F(100000000, P_GPLL0, 6, 0, 0), F(171430000, P_GPLL0, 3.5, 0, 0), F(200000000, P_GPLL0, 3, 0, 0), F(320000000, P_MMPLL0, 2.5, 0, 0), F(400000000, P_MMPLL0, 2, 0, 0), { } }; static struct clk_rcg2 axi_clk_src = { .cmd_rcgr = 0x5040, .hid_width = 5, .parent_map = mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div_map, .freq_tbl = ftbl_axi_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "axi_clk_src", .parent_data = mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div, .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div), .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 maxi_clk_src = { .cmd_rcgr = 0x5090, .hid_width = 5, .parent_map = mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div_map, .freq_tbl = ftbl_axi_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "maxi_clk_src", .parent_data = mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div, .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div), .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2_gfx3d gfx3d_clk_src = { .rcg = { .cmd_rcgr = 0x4000, .hid_width = 5, .parent_map = mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_map, .clkr.hw.init = &(struct clk_init_data){ .name = "gfx3d_clk_src", .parent_data = mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0, .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0), .ops = &clk_gfx3d_ops, .flags = CLK_SET_RATE_PARENT, }, }, .hws = (struct clk_hw*[]) { &mmpll9.clkr.hw, &mmpll2.clkr.hw, &mmpll8.clkr.hw }, }; static const struct freq_tbl ftbl_rbbmtimer_clk_src[] = { F(19200000, P_XO, 1, 0, 0), { } }; static struct clk_rcg2 rbbmtimer_clk_src = { .cmd_rcgr = 0x4090, .hid_width = 5, .parent_map = mmss_xo_mmpll0_gpll0_gpll0_div_map, .freq_tbl = ftbl_rbbmtimer_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "rbbmtimer_clk_src", .parent_data = mmss_xo_mmpll0_gpll0_gpll0_div, .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_gpll0_gpll0_div), .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 isense_clk_src = { .cmd_rcgr = 0x4010, .hid_width = 5, .parent_map = mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_gpll0_div_map, .clkr.hw.init = &(struct clk_init_data){ .name = "isense_clk_src", .parent_data = mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_gpll0_div, .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_gpll0_div), .ops = &clk_rcg2_ops, }, }; static const struct freq_tbl ftbl_rbcpr_clk_src[] = { F(19200000, P_XO, 1, 0, 0), F(50000000, P_GPLL0, 12, 0, 0), { } }; static struct clk_rcg2 rbcpr_clk_src = { .cmd_rcgr = 0x4060, .hid_width = 5, .parent_map = mmss_xo_mmpll0_gpll0_gpll0_div_map, .freq_tbl = ftbl_rbcpr_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "rbcpr_clk_src", .parent_data = mmss_xo_mmpll0_gpll0_gpll0_div, .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_gpll0_gpll0_div), .ops = &clk_rcg2_ops, }, }; static const struct freq_tbl ftbl_video_core_clk_src[] = { F(75000000, P_GPLL0_DIV, 4, 0, 0), F(150000000, P_GPLL0, 4, 0, 0), F(346666667, P_MMPLL3, 3, 0, 0), F(520000000, P_MMPLL3, 2, 0, 0), { } }; static struct clk_rcg2 video_core_clk_src = { .cmd_rcgr = 0x1000, .mnd_width = 8, .hid_width = 5, .parent_map = mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div_map, .freq_tbl = ftbl_video_core_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "video_core_clk_src", .parent_data = mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div, .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div), .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 video_subcore0_clk_src = { .cmd_rcgr = 0x1060, .mnd_width = 8, .hid_width = 5, .parent_map = mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div_map, .freq_tbl = ftbl_video_core_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "video_subcore0_clk_src", .parent_data = mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div, .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div), .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 video_subcore1_clk_src = { .cmd_rcgr = 0x1080, .mnd_width = 8, .hid_width = 5, .parent_map = mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div_map, .freq_tbl = ftbl_video_core_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "video_subcore1_clk_src", .parent_data = mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div, .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div), .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 pclk0_clk_src = { .cmd_rcgr = 0x2000, .mnd_width = 8, .hid_width = 5, .parent_map = mmss_xo_dsi0pll_dsi1pll_map, .clkr.hw.init = &(struct clk_init_data){ .name = "pclk0_clk_src", .parent_data = mmss_xo_dsi0pll_dsi1pll, .num_parents = ARRAY_SIZE(mmss_xo_dsi0pll_dsi1pll), .ops = &clk_pixel_ops, .flags = CLK_SET_RATE_PARENT, }, }; static struct clk_rcg2 pclk1_clk_src = { .cmd_rcgr = 0x2020, .mnd_width = 8, .hid_width = 5, .parent_map = mmss_xo_dsi0pll_dsi1pll_map, .clkr.hw.init = &(struct clk_init_data){ .name = "pclk1_clk_src", .parent_data = mmss_xo_dsi0pll_dsi1pll, .num_parents = ARRAY_SIZE(mmss_xo_dsi0pll_dsi1pll), .ops = &clk_pixel_ops, .flags = CLK_SET_RATE_PARENT, }, }; static const struct freq_tbl ftbl_mdp_clk_src[] = { F(85714286, P_GPLL0, 7, 0, 0), F(100000000, P_GPLL0, 6, 0, 0), F(150000000, P_GPLL0, 4, 0, 0), F(171428571, P_GPLL0, 3.5, 0, 0), F(200000000, P_GPLL0, 3, 0, 0), F(275000000, P_MMPLL5, 3, 0, 0), F(300000000, P_GPLL0, 2, 0, 0), F(330000000, P_MMPLL5, 2.5, 0, 0), F(412500000, P_MMPLL5, 2, 0, 0), { } }; static struct clk_rcg2 mdp_clk_src = { .cmd_rcgr = 0x2040, .hid_width = 5, .parent_map = mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div_map, .freq_tbl = ftbl_mdp_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "mdp_clk_src", .parent_data = mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div, .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div), .ops = &clk_rcg2_ops, }, }; static const struct freq_tbl extpclk_freq_tbl[] = { { .src = P_HDMIPLL }, { } }; static struct clk_rcg2 extpclk_clk_src = { .cmd_rcgr = 0x2060, .hid_width = 5, .parent_map = mmss_xo_hdmi_map, .freq_tbl = extpclk_freq_tbl, .clkr.hw.init = &(struct clk_init_data){ .name = "extpclk_clk_src", .parent_data = mmss_xo_hdmi, .num_parents = ARRAY_SIZE(mmss_xo_hdmi), .ops = &clk_byte_ops, .flags = CLK_SET_RATE_PARENT, }, }; static const struct freq_tbl ftbl_mdss_vsync_clk[] = { F(19200000, P_XO, 1, 0, 0), { } }; static struct clk_rcg2 vsync_clk_src = { .cmd_rcgr = 0x2080, .hid_width = 5, .parent_map = mmss_xo_gpll0_gpll0_div_map, .freq_tbl = ftbl_mdss_vsync_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "vsync_clk_src", .parent_data = mmss_xo_gpll0_gpll0_div, .num_parents = ARRAY_SIZE(mmss_xo_gpll0_gpll0_div), .ops = &clk_rcg2_ops, }, }; static const struct freq_tbl ftbl_mdss_hdmi_clk[] = { F(19200000, P_XO, 1, 0, 0), { } }; static struct clk_rcg2 hdmi_clk_src = { .cmd_rcgr = 0x2100, .hid_width = 5, .parent_map = mmss_xo_gpll0_gpll0_div_map, .freq_tbl = ftbl_mdss_hdmi_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "hdmi_clk_src", .parent_data = mmss_xo_gpll0_gpll0_div, .num_parents = ARRAY_SIZE(mmss_xo_gpll0_gpll0_div), .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 byte0_clk_src = { .cmd_rcgr = 0x2120, .hid_width = 5, .parent_map = mmss_xo_dsibyte_map, .clkr.hw.init = &(struct clk_init_data){ .name = "byte0_clk_src", .parent_data = mmss_xo_dsibyte, .num_parents = ARRAY_SIZE(mmss_xo_dsibyte), .ops = &clk_byte2_ops, .flags = CLK_SET_RATE_PARENT, }, }; static struct clk_rcg2 byte1_clk_src = { .cmd_rcgr = 0x2140, .hid_width = 5, .parent_map = mmss_xo_dsibyte_map, .clkr.hw.init = &(struct clk_init_data){ .name = "byte1_clk_src", .parent_data = mmss_xo_dsibyte, .num_parents = ARRAY_SIZE(mmss_xo_dsibyte), .ops = &clk_byte2_ops, .flags = CLK_SET_RATE_PARENT, }, }; static const struct freq_tbl ftbl_mdss_esc0_1_clk[] = { F(19200000, P_XO, 1, 0, 0), { } }; static struct clk_rcg2 esc0_clk_src = { .cmd_rcgr = 0x2160, .hid_width = 5, .parent_map = mmss_xo_dsibyte_map, .freq_tbl = ftbl_mdss_esc0_1_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "esc0_clk_src", .parent_data = mmss_xo_dsibyte, .num_parents = ARRAY_SIZE(mmss_xo_dsibyte), .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 esc1_clk_src = { .cmd_rcgr = 0x2180, .hid_width = 5, .parent_map = mmss_xo_dsibyte_map, .freq_tbl = ftbl_mdss_esc0_1_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "esc1_clk_src", .parent_data = mmss_xo_dsibyte, .num_parents = ARRAY_SIZE(mmss_xo_dsibyte), .ops = &clk_rcg2_ops, }, }; static const struct freq_tbl ftbl_camss_gp0_clk_src[] = { F(10000, P_XO, 16, 1, 120), F(24000, P_XO, 16, 1, 50), F(6000000, P_GPLL0_DIV, 10, 1, 5), F(12000000, P_GPLL0_DIV, 1, 1, 25), F(13000000, P_GPLL0_DIV, 2, 13, 150), F(24000000, P_GPLL0_DIV, 1, 2, 25), { } }; static struct clk_rcg2 camss_gp0_clk_src = { .cmd_rcgr = 0x3420, .mnd_width = 8, .hid_width = 5, .parent_map = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div_map, .freq_tbl = ftbl_camss_gp0_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "camss_gp0_clk_src", .parent_data = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div, .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div), .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 camss_gp1_clk_src = { .cmd_rcgr = 0x3450, .mnd_width = 8, .hid_width = 5, .parent_map = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div_map, .freq_tbl = ftbl_camss_gp0_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "camss_gp1_clk_src", .parent_data = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div, .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div), .ops = &clk_rcg2_ops, }, }; static const struct freq_tbl ftbl_mclk0_clk_src[] = { F(4800000, P_XO, 4, 0, 0), F(6000000, P_GPLL0_DIV, 10, 1, 5), F(8000000, P_GPLL0_DIV, 1, 2, 75), F(9600000, P_XO, 2, 0, 0), F(16666667, P_GPLL0_DIV, 2, 1, 9), F(19200000, P_XO, 1, 0, 0), F(24000000, P_GPLL0_DIV, 1, 2, 25), F(33333333, P_GPLL0_DIV, 1, 1, 9), F(48000000, P_GPLL0, 1, 2, 25), F(66666667, P_GPLL0, 1, 1, 9), { } }; static struct clk_rcg2 mclk0_clk_src = { .cmd_rcgr = 0x3360, .mnd_width = 8, .hid_width = 5, .parent_map = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div_map, .freq_tbl = ftbl_mclk0_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "mclk0_clk_src", .parent_data = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div, .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div), .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 mclk1_clk_src = { .cmd_rcgr = 0x3390, .mnd_width = 8, .hid_width = 5, .parent_map = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div_map, .freq_tbl = ftbl_mclk0_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "mclk1_clk_src", .parent_data = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div, .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div), .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 mclk2_clk_src = { .cmd_rcgr = 0x33c0, .mnd_width = 8, .hid_width = 5, .parent_map = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div_map, .freq_tbl = ftbl_mclk0_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "mclk2_clk_src", .parent_data = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div, .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div), .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 mclk3_clk_src = { .cmd_rcgr = 0x33f0, .mnd_width = 8, .hid_width = 5, .parent_map = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div_map, .freq_tbl = ftbl_mclk0_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "mclk3_clk_src", .parent_data = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div, .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div), .ops = &clk_rcg2_ops, }, }; static const struct freq_tbl ftbl_cci_clk_src[] = { F(19200000, P_XO, 1, 0, 0), F(37500000, P_GPLL0, 16, 0, 0), F(50000000, P_GPLL0, 12, 0, 0), F(100000000, P_GPLL0, 6, 0, 0), { } }; static struct clk_rcg2 cci_clk_src = { .cmd_rcgr = 0x3300, .mnd_width = 8, .hid_width = 5, .parent_map = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div_map, .freq_tbl = ftbl_cci_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "cci_clk_src", .parent_data = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div, .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div), .ops = &clk_rcg2_ops, }, }; static const struct freq_tbl ftbl_csi0phytimer_clk_src[] = { F(100000000, P_GPLL0_DIV, 3, 0, 0), F(200000000, P_GPLL0, 3, 0, 0), F(266666667, P_MMPLL0, 3, 0, 0), { } }; static struct clk_rcg2 csi0phytimer_clk_src = { .cmd_rcgr = 0x3000, .hid_width = 5, .parent_map = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div_map, .freq_tbl = ftbl_csi0phytimer_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "csi0phytimer_clk_src", .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div, .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div), .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 csi1phytimer_clk_src = { .cmd_rcgr = 0x3030, .hid_width = 5, .parent_map = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div_map, .freq_tbl = ftbl_csi0phytimer_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "csi1phytimer_clk_src", .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div, .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div), .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 csi2phytimer_clk_src = { .cmd_rcgr = 0x3060, .hid_width = 5, .parent_map = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div_map, .freq_tbl = ftbl_csi0phytimer_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "csi2phytimer_clk_src", .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div, .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div), .ops = &clk_rcg2_ops, }, }; static const struct freq_tbl ftbl_csiphy0_3p_clk_src[] = { F(100000000, P_GPLL0_DIV, 3, 0, 0), F(200000000, P_GPLL0, 3, 0, 0), F(320000000, P_MMPLL4, 3, 0, 0), F(384000000, P_MMPLL4, 2.5, 0, 0), { } }; static struct clk_rcg2 csiphy0_3p_clk_src = { .cmd_rcgr = 0x3240, .hid_width = 5, .parent_map = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div_map, .freq_tbl = ftbl_csiphy0_3p_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "csiphy0_3p_clk_src", .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div, .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div), .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 csiphy1_3p_clk_src = { .cmd_rcgr = 0x3260, .hid_width = 5, .parent_map = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div_map, .freq_tbl = ftbl_csiphy0_3p_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "csiphy1_3p_clk_src", .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div, .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div), .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 csiphy2_3p_clk_src = { .cmd_rcgr = 0x3280, .hid_width = 5, .parent_map = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div_map, .freq_tbl = ftbl_csiphy0_3p_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "csiphy2_3p_clk_src", .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div, .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div), .ops = &clk_rcg2_ops, }, }; static const struct freq_tbl ftbl_jpeg0_clk_src[] = { F(75000000, P_GPLL0_DIV, 4, 0, 0), F(150000000, P_GPLL0, 4, 0, 0), F(228571429, P_MMPLL0, 3.5, 0, 0), F(266666667, P_MMPLL0, 3, 0, 0), F(320000000, P_MMPLL0, 2.5, 0, 0), F(480000000, P_MMPLL4, 2, 0, 0), { } }; static struct clk_rcg2 jpeg0_clk_src = { .cmd_rcgr = 0x3500, .hid_width = 5, .parent_map = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div_map, .freq_tbl = ftbl_jpeg0_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "jpeg0_clk_src", .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div, .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div), .ops = &clk_rcg2_ops, }, }; static const struct freq_tbl ftbl_jpeg2_clk_src[] = { F(75000000, P_GPLL0_DIV, 4, 0, 0), F(150000000, P_GPLL0, 4, 0, 0), F(228571429, P_MMPLL0, 3.5, 0, 0), F(266666667, P_MMPLL0, 3, 0, 0), F(320000000, P_MMPLL0, 2.5, 0, 0), { } }; static struct clk_rcg2 jpeg2_clk_src = { .cmd_rcgr = 0x3540, .hid_width = 5, .parent_map = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div_map, .freq_tbl = ftbl_jpeg2_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "jpeg2_clk_src", .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div, .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div), .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 jpeg_dma_clk_src = { .cmd_rcgr = 0x3560, .hid_width = 5, .parent_map = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div_map, .freq_tbl = ftbl_jpeg0_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "jpeg_dma_clk_src", .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div, .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div), .ops = &clk_rcg2_ops, }, }; static const struct freq_tbl ftbl_vfe0_clk_src[] = { F(75000000, P_GPLL0_DIV, 4, 0, 0), F(100000000, P_GPLL0_DIV, 3, 0, 0), F(300000000, P_GPLL0, 2, 0, 0), F(320000000, P_MMPLL0, 2.5, 0, 0), F(480000000, P_MMPLL4, 2, 0, 0), F(600000000, P_GPLL0, 1, 0, 0), { } }; static struct clk_rcg2 vfe0_clk_src = { .cmd_rcgr = 0x3600, .hid_width = 5, .parent_map = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div_map, .freq_tbl = ftbl_vfe0_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "vfe0_clk_src", .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div, .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div), .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 vfe1_clk_src = { .cmd_rcgr = 0x3620, .hid_width = 5, .parent_map = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div_map, .freq_tbl = ftbl_vfe0_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "vfe1_clk_src", .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div, .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div), .ops = &clk_rcg2_ops, }, }; static const struct freq_tbl ftbl_cpp_clk_src[] = { F(100000000, P_GPLL0_DIV, 3, 0, 0), F(200000000, P_GPLL0, 3, 0, 0), F(320000000, P_MMPLL0, 2.5, 0, 0), F(480000000, P_MMPLL4, 2, 0, 0), F(640000000, P_MMPLL4, 1.5, 0, 0), { } }; static struct clk_rcg2 cpp_clk_src = { .cmd_rcgr = 0x3640, .hid_width = 5, .parent_map = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div_map, .freq_tbl = ftbl_cpp_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "cpp_clk_src", .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div, .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div), .ops = &clk_rcg2_ops, }, }; static const struct freq_tbl ftbl_csi0_clk_src[] = { F(100000000, P_GPLL0_DIV, 3, 0, 0), F(200000000, P_GPLL0, 3, 0, 0), F(266666667, P_MMPLL0, 3, 0, 0), F(480000000, P_MMPLL4, 2, 0, 0), F(600000000, P_GPLL0, 1, 0, 0), { } }; static struct clk_rcg2 csi0_clk_src = { .cmd_rcgr = 0x3090, .hid_width = 5, .parent_map = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div_map, .freq_tbl = ftbl_csi0_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "csi0_clk_src", .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div, .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div), .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 csi1_clk_src = { .cmd_rcgr = 0x3100, .hid_width = 5, .parent_map = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div_map, .freq_tbl = ftbl_csi0_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "csi1_clk_src", .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div, .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div), .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 csi2_clk_src = { .cmd_rcgr = 0x3160, .hid_width = 5, .parent_map = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div_map, .freq_tbl = ftbl_csi0_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "csi2_clk_src", .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div, .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div), .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 csi3_clk_src = { .cmd_rcgr = 0x31c0, .hid_width = 5, .parent_map = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div_map, .freq_tbl = ftbl_csi0_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "csi3_clk_src", .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div, .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div), .ops = &clk_rcg2_ops, }, }; static const struct freq_tbl ftbl_fd_core_clk_src[] = { F(100000000, P_GPLL0_DIV, 3, 0, 0), F(200000000, P_GPLL0, 3, 0, 0), F(400000000, P_MMPLL0, 2, 0, 0), { } }; static struct clk_rcg2 fd_core_clk_src = { .cmd_rcgr = 0x3b00, .hid_width = 5, .parent_map = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div_map, .freq_tbl = ftbl_fd_core_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "fd_core_clk_src", .parent_data = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div, .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div), .ops = &clk_rcg2_ops, }, }; static struct clk_branch mmss_mmagic_ahb_clk = { .halt_reg = 0x5024, .clkr = { .enable_reg = 0x5024, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "mmss_mmagic_ahb_clk", .parent_hws = (const struct clk_hw*[]){ &ahb_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mmss_mmagic_cfg_ahb_clk = { .halt_reg = 0x5054, .clkr = { .enable_reg = 0x5054, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "mmss_mmagic_cfg_ahb_clk", .parent_hws = (const struct clk_hw*[]){ &ahb_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mmss_misc_ahb_clk = { .halt_reg = 0x5018, .clkr = { .enable_reg = 0x5018, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "mmss_misc_ahb_clk", .parent_hws = (const struct clk_hw*[]){ &ahb_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mmss_misc_cxo_clk = { .halt_reg = 0x5014, .clkr = { .enable_reg = 0x5014, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "mmss_misc_cxo_clk", .parent_data = (const struct clk_parent_data[]){ { .fw_name = "xo", .name = "xo_board" }, }, .num_parents = 1, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mmss_mmagic_maxi_clk = { .halt_reg = 0x5074, .clkr = { .enable_reg = 0x5074, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "mmss_mmagic_maxi_clk", .parent_hws = (const struct clk_hw*[]){ &maxi_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mmagic_camss_axi_clk = { .halt_reg = 0x3c44, .clkr = { .enable_reg = 0x3c44, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "mmagic_camss_axi_clk", .parent_hws = (const struct clk_hw*[]){ &axi_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mmagic_camss_noc_cfg_ahb_clk = { .halt_reg = 0x3c48, .clkr = { .enable_reg = 0x3c48, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "mmagic_camss_noc_cfg_ahb_clk", .parent_data = (const struct clk_parent_data[]){ { .fw_name = "gcc_mmss_noc_cfg_ahb_clk", .name = "gcc_mmss_noc_cfg_ahb_clk" }, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch smmu_vfe_ahb_clk = { .halt_reg = 0x3c04, .clkr = { .enable_reg = 0x3c04, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "smmu_vfe_ahb_clk", .parent_hws = (const struct clk_hw*[]){ &ahb_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch smmu_vfe_axi_clk = { .halt_reg = 0x3c08, .clkr = { .enable_reg = 0x3c08, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "smmu_vfe_axi_clk", .parent_hws = (const struct clk_hw*[]){ &axi_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch smmu_cpp_ahb_clk = { .halt_reg = 0x3c14, .clkr = { .enable_reg = 0x3c14, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "smmu_cpp_ahb_clk", .parent_hws = (const struct clk_hw*[]){ &ahb_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch smmu_cpp_axi_clk = { .halt_reg = 0x3c18, .clkr = { .enable_reg = 0x3c18, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "smmu_cpp_axi_clk", .parent_hws = (const struct clk_hw*[]){ &axi_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch smmu_jpeg_ahb_clk = { .halt_reg = 0x3c24, .clkr = { .enable_reg = 0x3c24, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "smmu_jpeg_ahb_clk", .parent_hws = (const struct clk_hw*[]){ &ahb_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch smmu_jpeg_axi_clk = { .halt_reg = 0x3c28, .clkr = { .enable_reg = 0x3c28, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "smmu_jpeg_axi_clk", .parent_hws = (const struct clk_hw*[]){ &axi_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mmagic_mdss_axi_clk = { .halt_reg = 0x2474, .clkr = { .enable_reg = 0x2474, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "mmagic_mdss_axi_clk", .parent_hws = (const struct clk_hw*[]){ &axi_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mmagic_mdss_noc_cfg_ahb_clk = { .halt_reg = 0x2478, .clkr = { .enable_reg = 0x2478, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "mmagic_mdss_noc_cfg_ahb_clk", .parent_data = (const struct clk_parent_data[]){ { .fw_name = "gcc_mmss_noc_cfg_ahb_clk", .name = "gcc_mmss_noc_cfg_ahb_clk" }, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch smmu_rot_ahb_clk = { .halt_reg = 0x2444, .clkr = { .enable_reg = 0x2444, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "smmu_rot_ahb_clk", .parent_hws = (const struct clk_hw*[]){ &ahb_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch smmu_rot_axi_clk = { .halt_reg = 0x2448, .clkr = { .enable_reg = 0x2448, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "smmu_rot_axi_clk", .parent_hws = (const struct clk_hw*[]){ &axi_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch smmu_mdp_ahb_clk = { .halt_reg = 0x2454, .clkr = { .enable_reg = 0x2454, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "smmu_mdp_ahb_clk", .parent_hws = (const struct clk_hw*[]){ &ahb_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch smmu_mdp_axi_clk = { .halt_reg = 0x2458, .clkr = { .enable_reg = 0x2458, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "smmu_mdp_axi_clk", .parent_hws = (const struct clk_hw*[]){ &axi_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mmagic_video_axi_clk = { .halt_reg = 0x1194, .clkr = { .enable_reg = 0x1194, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "mmagic_video_axi_clk", .parent_hws = (const struct clk_hw*[]){ &axi_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mmagic_video_noc_cfg_ahb_clk = { .halt_reg = 0x1198, .clkr = { .enable_reg = 0x1198, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "mmagic_video_noc_cfg_ahb_clk", .parent_data = (const struct clk_parent_data[]){ { .fw_name = "gcc_mmss_noc_cfg_ahb_clk", .name = "gcc_mmss_noc_cfg_ahb_clk" }, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch smmu_video_ahb_clk = { .halt_reg = 0x1174, .clkr = { .enable_reg = 0x1174, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "smmu_video_ahb_clk", .parent_hws = (const struct clk_hw*[]){ &ahb_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch smmu_video_axi_clk = { .halt_reg = 0x1178, .clkr = { .enable_reg = 0x1178, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "smmu_video_axi_clk", .parent_hws = (const struct clk_hw*[]){ &axi_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mmagic_bimc_noc_cfg_ahb_clk = { .halt_reg = 0x5298, .clkr = { .enable_reg = 0x5298, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "mmagic_bimc_noc_cfg_ahb_clk", .parent_data = (const struct clk_parent_data[]){ { .fw_name = "gcc_mmss_noc_cfg_ahb_clk", .name = "gcc_mmss_noc_cfg_ahb_clk" }, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gpu_gx_gfx3d_clk = { .halt_reg = 0x4028, .clkr = { .enable_reg = 0x4028, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gpu_gx_gfx3d_clk", .parent_hws = (const struct clk_hw*[]){ &gfx3d_clk_src.rcg.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gpu_gx_rbbmtimer_clk = { .halt_reg = 0x40b0, .clkr = { .enable_reg = 0x40b0, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gpu_gx_rbbmtimer_clk", .parent_hws = (const struct clk_hw*[]){ &rbbmtimer_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gpu_ahb_clk = { .halt_reg = 0x403c, .clkr = { .enable_reg = 0x403c, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gpu_ahb_clk", .parent_hws = (const struct clk_hw*[]){ &ahb_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gpu_aon_isense_clk = { .halt_reg = 0x4044, .clkr = { .enable_reg = 0x4044, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gpu_aon_isense_clk", .parent_hws = (const struct clk_hw*[]){ &isense_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch vmem_maxi_clk = { .halt_reg = 0x1204, .clkr = { .enable_reg = 0x1204, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "vmem_maxi_clk", .parent_hws = (const struct clk_hw*[]){ &maxi_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch vmem_ahb_clk = { .halt_reg = 0x1208, .clkr = { .enable_reg = 0x1208, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "vmem_ahb_clk", .parent_hws = (const struct clk_hw*[]){ &ahb_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mmss_rbcpr_clk = { .halt_reg = 0x4084, .clkr = { .enable_reg = 0x4084, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "mmss_rbcpr_clk", .parent_hws = (const struct clk_hw*[]){ &rbcpr_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mmss_rbcpr_ahb_clk = { .halt_reg = 0x4088, .clkr = { .enable_reg = 0x4088, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "mmss_rbcpr_ahb_clk", .parent_hws = (const struct clk_hw*[]){ &ahb_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch video_core_clk = { .halt_reg = 0x1028, .clkr = { .enable_reg = 0x1028, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "video_core_clk", .parent_hws = (const struct clk_hw*[]){ &video_core_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch video_axi_clk = { .halt_reg = 0x1034, .clkr = { .enable_reg = 0x1034, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "video_axi_clk", .parent_hws = (const struct clk_hw*[]){ &axi_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch video_maxi_clk = { .halt_reg = 0x1038, .clkr = { .enable_reg = 0x1038, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "video_maxi_clk", .parent_hws = (const struct clk_hw*[]){ &maxi_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch video_ahb_clk = { .halt_reg = 0x1030, .clkr = { .enable_reg = 0x1030, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "video_ahb_clk", .parent_hws = (const struct clk_hw*[]){ &ahb_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch video_subcore0_clk = { .halt_reg = 0x1048, .clkr = { .enable_reg = 0x1048, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "video_subcore0_clk", .parent_hws = (const struct clk_hw*[]){ &video_subcore0_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch video_subcore1_clk = { .halt_reg = 0x104c, .clkr = { .enable_reg = 0x104c, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "video_subcore1_clk", .parent_hws = (const struct clk_hw*[]){ &video_subcore1_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_ahb_clk = { .halt_reg = 0x2308, .clkr = { .enable_reg = 0x2308, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "mdss_ahb_clk", .parent_hws = (const struct clk_hw*[]){ &ahb_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_hdmi_ahb_clk = { .halt_reg = 0x230c, .clkr = { .enable_reg = 0x230c, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "mdss_hdmi_ahb_clk", .parent_hws = (const struct clk_hw*[]){ &ahb_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_axi_clk = { .halt_reg = 0x2310, .clkr = { .enable_reg = 0x2310, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "mdss_axi_clk", .parent_hws = (const struct clk_hw*[]){ &axi_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_pclk0_clk = { .halt_reg = 0x2314, .clkr = { .enable_reg = 0x2314, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "mdss_pclk0_clk", .parent_hws = (const struct clk_hw*[]){ &pclk0_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_pclk1_clk = { .halt_reg = 0x2318, .clkr = { .enable_reg = 0x2318, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "mdss_pclk1_clk", .parent_hws = (const struct clk_hw*[]){ &pclk1_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_mdp_clk = { .halt_reg = 0x231c, .clkr = { .enable_reg = 0x231c, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "mdss_mdp_clk", .parent_hws = (const struct clk_hw*[]){ &mdp_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_extpclk_clk = { .halt_reg = 0x2324, .clkr = { .enable_reg = 0x2324, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "mdss_extpclk_clk", .parent_hws = (const struct clk_hw*[]){ &extpclk_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_vsync_clk = { .halt_reg = 0x2328, .clkr = { .enable_reg = 0x2328, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "mdss_vsync_clk", .parent_hws = (const struct clk_hw*[]){ &vsync_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_hdmi_clk = { .halt_reg = 0x2338, .clkr = { .enable_reg = 0x2338, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "mdss_hdmi_clk", .parent_hws = (const struct clk_hw*[]){ &hdmi_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_byte0_clk = { .halt_reg = 0x233c, .clkr = { .enable_reg = 0x233c, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "mdss_byte0_clk", .parent_hws = (const struct clk_hw*[]){ &byte0_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_byte1_clk = { .halt_reg = 0x2340, .clkr = { .enable_reg = 0x2340, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "mdss_byte1_clk", .parent_hws = (const struct clk_hw*[]){ &byte1_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_esc0_clk = { .halt_reg = 0x2344, .clkr = { .enable_reg = 0x2344, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "mdss_esc0_clk", .parent_hws = (const struct clk_hw*[]){ &esc0_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch mdss_esc1_clk = { .halt_reg = 0x2348, .clkr = { .enable_reg = 0x2348, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "mdss_esc1_clk", .parent_hws = (const struct clk_hw*[]){ &esc1_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_top_ahb_clk = { .halt_reg = 0x3484, .clkr = { .enable_reg = 0x3484, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_top_ahb_clk", .parent_hws = (const struct clk_hw*[]){ &ahb_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_ahb_clk = { .halt_reg = 0x348c, .clkr = { .enable_reg = 0x348c, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_ahb_clk", .parent_hws = (const struct clk_hw*[]){ &ahb_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_micro_ahb_clk = { .halt_reg = 0x3494, .clkr = { .enable_reg = 0x3494, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_micro_ahb_clk", .parent_hws = (const struct clk_hw*[]){ &ahb_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_gp0_clk = { .halt_reg = 0x3444, .clkr = { .enable_reg = 0x3444, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_gp0_clk", .parent_hws = (const struct clk_hw*[]){ &camss_gp0_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_gp1_clk = { .halt_reg = 0x3474, .clkr = { .enable_reg = 0x3474, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_gp1_clk", .parent_hws = (const struct clk_hw*[]){ &camss_gp1_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_mclk0_clk = { .halt_reg = 0x3384, .clkr = { .enable_reg = 0x3384, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_mclk0_clk", .parent_hws = (const struct clk_hw*[]){ &mclk0_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_mclk1_clk = { .halt_reg = 0x33b4, .clkr = { .enable_reg = 0x33b4, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_mclk1_clk", .parent_hws = (const struct clk_hw*[]){ &mclk1_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_mclk2_clk = { .halt_reg = 0x33e4, .clkr = { .enable_reg = 0x33e4, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_mclk2_clk", .parent_hws = (const struct clk_hw*[]){ &mclk2_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_mclk3_clk = { .halt_reg = 0x3414, .clkr = { .enable_reg = 0x3414, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_mclk3_clk", .parent_hws = (const struct clk_hw*[]){ &mclk3_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_cci_clk = { .halt_reg = 0x3344, .clkr = { .enable_reg = 0x3344, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_cci_clk", .parent_hws = (const struct clk_hw*[]){ &cci_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_cci_ahb_clk = { .halt_reg = 0x3348, .clkr = { .enable_reg = 0x3348, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_cci_ahb_clk", .parent_hws = (const struct clk_hw*[]){ &ahb_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_csi0phytimer_clk = { .halt_reg = 0x3024, .clkr = { .enable_reg = 0x3024, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_csi0phytimer_clk", .parent_hws = (const struct clk_hw*[]){ &csi0phytimer_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_csi1phytimer_clk = { .halt_reg = 0x3054, .clkr = { .enable_reg = 0x3054, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_csi1phytimer_clk", .parent_hws = (const struct clk_hw*[]){ &csi1phytimer_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_csi2phytimer_clk = { .halt_reg = 0x3084, .clkr = { .enable_reg = 0x3084, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_csi2phytimer_clk", .parent_hws = (const struct clk_hw*[]){ &csi2phytimer_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_csiphy0_3p_clk = { .halt_reg = 0x3234, .clkr = { .enable_reg = 0x3234, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_csiphy0_3p_clk", .parent_hws = (const struct clk_hw*[]){ &csiphy0_3p_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_csiphy1_3p_clk = { .halt_reg = 0x3254, .clkr = { .enable_reg = 0x3254, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_csiphy1_3p_clk", .parent_hws = (const struct clk_hw*[]){ &csiphy1_3p_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_csiphy2_3p_clk = { .halt_reg = 0x3274, .clkr = { .enable_reg = 0x3274, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_csiphy2_3p_clk", .parent_hws = (const struct clk_hw*[]){ &csiphy2_3p_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_jpeg0_clk = { .halt_reg = 0x35a8, .clkr = { .enable_reg = 0x35a8, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_jpeg0_clk", .parent_hws = (const struct clk_hw*[]){ &jpeg0_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_jpeg2_clk = { .halt_reg = 0x35b0, .clkr = { .enable_reg = 0x35b0, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_jpeg2_clk", .parent_hws = (const struct clk_hw*[]){ &jpeg2_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_jpeg_dma_clk = { .halt_reg = 0x35c0, .clkr = { .enable_reg = 0x35c0, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_jpeg_dma_clk", .parent_hws = (const struct clk_hw*[]){ &jpeg_dma_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_jpeg_ahb_clk = { .halt_reg = 0x35b4, .clkr = { .enable_reg = 0x35b4, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_jpeg_ahb_clk", .parent_hws = (const struct clk_hw*[]){ &ahb_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_jpeg_axi_clk = { .halt_reg = 0x35b8, .clkr = { .enable_reg = 0x35b8, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_jpeg_axi_clk", .parent_hws = (const struct clk_hw*[]){ &axi_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_vfe_ahb_clk = { .halt_reg = 0x36b8, .clkr = { .enable_reg = 0x36b8, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_vfe_ahb_clk", .parent_hws = (const struct clk_hw*[]){ &ahb_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_vfe_axi_clk = { .halt_reg = 0x36bc, .clkr = { .enable_reg = 0x36bc, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_vfe_axi_clk", .parent_hws = (const struct clk_hw*[]){ &axi_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_vfe0_clk = { .halt_reg = 0x36a8, .clkr = { .enable_reg = 0x36a8, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_vfe0_clk", .parent_hws = (const struct clk_hw*[]){ &vfe0_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_vfe0_stream_clk = { .halt_reg = 0x3720, .clkr = { .enable_reg = 0x3720, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_vfe0_stream_clk", .parent_hws = (const struct clk_hw*[]){ &vfe0_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_vfe0_ahb_clk = { .halt_reg = 0x3668, .clkr = { .enable_reg = 0x3668, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_vfe0_ahb_clk", .parent_hws = (const struct clk_hw*[]){ &ahb_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_vfe1_clk = { .halt_reg = 0x36ac, .clkr = { .enable_reg = 0x36ac, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_vfe1_clk", .parent_hws = (const struct clk_hw*[]){ &vfe1_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_vfe1_stream_clk = { .halt_reg = 0x3724, .clkr = { .enable_reg = 0x3724, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_vfe1_stream_clk", .parent_hws = (const struct clk_hw*[]){ &vfe1_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_vfe1_ahb_clk = { .halt_reg = 0x3678, .clkr = { .enable_reg = 0x3678, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_vfe1_ahb_clk", .parent_hws = (const struct clk_hw*[]){ &ahb_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_csi_vfe0_clk = { .halt_reg = 0x3704, .clkr = { .enable_reg = 0x3704, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_csi_vfe0_clk", .parent_hws = (const struct clk_hw*[]){ &vfe0_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_csi_vfe1_clk = { .halt_reg = 0x3714, .clkr = { .enable_reg = 0x3714, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_csi_vfe1_clk", .parent_hws = (const struct clk_hw*[]){ &vfe1_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_cpp_vbif_ahb_clk = { .halt_reg = 0x36c8, .clkr = { .enable_reg = 0x36c8, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_cpp_vbif_ahb_clk", .parent_hws = (const struct clk_hw*[]){ &ahb_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_cpp_axi_clk = { .halt_reg = 0x36c4, .clkr = { .enable_reg = 0x36c4, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_cpp_axi_clk", .parent_hws = (const struct clk_hw*[]){ &axi_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_cpp_clk = { .halt_reg = 0x36b0, .clkr = { .enable_reg = 0x36b0, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_cpp_clk", .parent_hws = (const struct clk_hw*[]){ &cpp_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_cpp_ahb_clk = { .halt_reg = 0x36b4, .clkr = { .enable_reg = 0x36b4, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_cpp_ahb_clk", .parent_hws = (const struct clk_hw*[]){ &ahb_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_csi0_clk = { .halt_reg = 0x30b4, .clkr = { .enable_reg = 0x30b4, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_csi0_clk", .parent_hws = (const struct clk_hw*[]){ &csi0_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_csi0_ahb_clk = { .halt_reg = 0x30bc, .clkr = { .enable_reg = 0x30bc, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_csi0_ahb_clk", .parent_hws = (const struct clk_hw*[]){ &ahb_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_csi0phy_clk = { .halt_reg = 0x30c4, .clkr = { .enable_reg = 0x30c4, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_csi0phy_clk", .parent_hws = (const struct clk_hw*[]){ &csi0_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_csi0rdi_clk = { .halt_reg = 0x30d4, .clkr = { .enable_reg = 0x30d4, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_csi0rdi_clk", .parent_hws = (const struct clk_hw*[]){ &csi0_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_csi0pix_clk = { .halt_reg = 0x30e4, .clkr = { .enable_reg = 0x30e4, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_csi0pix_clk", .parent_hws = (const struct clk_hw*[]){ &csi0_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_csi1_clk = { .halt_reg = 0x3124, .clkr = { .enable_reg = 0x3124, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_csi1_clk", .parent_hws = (const struct clk_hw*[]){ &csi1_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_csi1_ahb_clk = { .halt_reg = 0x3128, .clkr = { .enable_reg = 0x3128, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_csi1_ahb_clk", .parent_hws = (const struct clk_hw*[]){ &ahb_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_csi1phy_clk = { .halt_reg = 0x3134, .clkr = { .enable_reg = 0x3134, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_csi1phy_clk", .parent_hws = (const struct clk_hw*[]){ &csi1_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_csi1rdi_clk = { .halt_reg = 0x3144, .clkr = { .enable_reg = 0x3144, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_csi1rdi_clk", .parent_hws = (const struct clk_hw*[]){ &csi1_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_csi1pix_clk = { .halt_reg = 0x3154, .clkr = { .enable_reg = 0x3154, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_csi1pix_clk", .parent_hws = (const struct clk_hw*[]){ &csi1_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_csi2_clk = { .halt_reg = 0x3184, .clkr = { .enable_reg = 0x3184, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_csi2_clk", .parent_hws = (const struct clk_hw*[]){ &csi2_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_csi2_ahb_clk = { .halt_reg = 0x3188, .clkr = { .enable_reg = 0x3188, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_csi2_ahb_clk", .parent_hws = (const struct clk_hw*[]){ &ahb_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_csi2phy_clk = { .halt_reg = 0x3194, .clkr = { .enable_reg = 0x3194, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_csi2phy_clk", .parent_hws = (const struct clk_hw*[]){ &csi2_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_csi2rdi_clk = { .halt_reg = 0x31a4, .clkr = { .enable_reg = 0x31a4, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_csi2rdi_clk", .parent_hws = (const struct clk_hw*[]){ &csi2_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_csi2pix_clk = { .halt_reg = 0x31b4, .clkr = { .enable_reg = 0x31b4, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_csi2pix_clk", .parent_hws = (const struct clk_hw*[]){ &csi2_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_csi3_clk = { .halt_reg = 0x31e4, .clkr = { .enable_reg = 0x31e4, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_csi3_clk", .parent_hws = (const struct clk_hw*[]){ &csi3_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_csi3_ahb_clk = { .halt_reg = 0x31e8, .clkr = { .enable_reg = 0x31e8, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_csi3_ahb_clk", .parent_hws = (const struct clk_hw*[]){ &ahb_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_csi3phy_clk = { .halt_reg = 0x31f4, .clkr = { .enable_reg = 0x31f4, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_csi3phy_clk", .parent_hws = (const struct clk_hw*[]){ &csi3_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_csi3rdi_clk = { .halt_reg = 0x3204, .clkr = { .enable_reg = 0x3204, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_csi3rdi_clk", .parent_hws = (const struct clk_hw*[]){ &csi3_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_csi3pix_clk = { .halt_reg = 0x3214, .clkr = { .enable_reg = 0x3214, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_csi3pix_clk", .parent_hws = (const struct clk_hw*[]){ &csi3_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch camss_ispif_ahb_clk = { .halt_reg = 0x3224, .clkr = { .enable_reg = 0x3224, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "camss_ispif_ahb_clk", .parent_hws = (const struct clk_hw*[]){ &ahb_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch fd_core_clk = { .halt_reg = 0x3b68, .clkr = { .enable_reg = 0x3b68, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "fd_core_clk", .parent_hws = (const struct clk_hw*[]){ &fd_core_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch fd_core_uar_clk = { .halt_reg = 0x3b6c, .clkr = { .enable_reg = 0x3b6c, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "fd_core_uar_clk", .parent_hws = (const struct clk_hw*[]){ &fd_core_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch fd_ahb_clk = { .halt_reg = 0x3ba74, .clkr = { .enable_reg = 0x3ba74, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "fd_ahb_clk", .parent_hws = (const struct clk_hw*[]){ &ahb_clk_src.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_hw *mmcc_msm8996_hws[] = { &gpll0_div.hw, }; static struct gdsc mmagic_bimc_gdsc = { .gdscr = 0x529c, .pd = { .name = "mmagic_bimc", }, .pwrsts = PWRSTS_OFF_ON, .flags = ALWAYS_ON, }; static struct gdsc mmagic_video_gdsc = { .gdscr = 0x119c, .gds_hw_ctrl = 0x120c, .pd = { .name = "mmagic_video", }, .pwrsts = PWRSTS_OFF_ON, .flags = VOTABLE | ALWAYS_ON, }; static struct gdsc mmagic_mdss_gdsc = { .gdscr = 0x247c, .gds_hw_ctrl = 0x2480, .pd = { .name = "mmagic_mdss", }, .pwrsts = PWRSTS_OFF_ON, .flags = VOTABLE | ALWAYS_ON, }; static struct gdsc mmagic_camss_gdsc = { .gdscr = 0x3c4c, .gds_hw_ctrl = 0x3c50, .pd = { .name = "mmagic_camss", }, .pwrsts = PWRSTS_OFF_ON, .flags = VOTABLE | ALWAYS_ON, }; static struct gdsc venus_gdsc = { .gdscr = 0x1024, .cxcs = (unsigned int []){ 0x1028, 0x1034, 0x1038 }, .cxc_count = 3, .pd = { .name = "venus", }, .parent = &mmagic_video_gdsc.pd, .pwrsts = PWRSTS_OFF_ON, }; static struct gdsc venus_core0_gdsc = { .gdscr = 0x1040, .cxcs = (unsigned int []){ 0x1048 }, .cxc_count = 1, .pd = { .name = "venus_core0", }, .parent = &venus_gdsc.pd, .pwrsts = PWRSTS_OFF_ON, .flags = HW_CTRL, }; static struct gdsc venus_core1_gdsc = { .gdscr = 0x1044, .cxcs = (unsigned int []){ 0x104c }, .cxc_count = 1, .pd = { .name = "venus_core1", }, .parent = &venus_gdsc.pd, .pwrsts = PWRSTS_OFF_ON, .flags = HW_CTRL, }; static struct gdsc camss_gdsc = { .gdscr = 0x34a0, .cxcs = (unsigned int []){ 0x36bc, 0x36c4 }, .cxc_count = 2, .pd = { .name = "camss", }, .parent = &mmagic_camss_gdsc.pd, .pwrsts = PWRSTS_OFF_ON, }; static struct gdsc vfe0_gdsc = { .gdscr = 0x3664, .cxcs = (unsigned int []){ 0x36a8 }, .cxc_count = 1, .pd = { .name = "vfe0", }, .parent = &camss_gdsc.pd, .pwrsts = PWRSTS_OFF_ON, }; static struct gdsc vfe1_gdsc = { .gdscr = 0x3674, .cxcs = (unsigned int []){ 0x36ac }, .cxc_count = 1, .pd = { .name = "vfe1", }, .parent = &camss_gdsc.pd, .pwrsts = PWRSTS_OFF_ON, }; static struct gdsc jpeg_gdsc = { .gdscr = 0x35a4, .cxcs = (unsigned int []){ 0x35a8, 0x35b0, 0x35c0, 0x35b8 }, .cxc_count = 4, .pd = { .name = "jpeg", }, .parent = &camss_gdsc.pd, .pwrsts = PWRSTS_OFF_ON, }; static struct gdsc cpp_gdsc = { .gdscr = 0x36d4, .cxcs = (unsigned int []){ 0x36b0 }, .cxc_count = 1, .pd = { .name = "cpp", }, .parent = &camss_gdsc.pd, .pwrsts = PWRSTS_OFF_ON, }; static struct gdsc fd_gdsc = { .gdscr = 0x3b64, .cxcs = (unsigned int []){ 0x3b68, 0x3b6c }, .cxc_count = 2, .pd = { .name = "fd", }, .parent = &camss_gdsc.pd, .pwrsts = PWRSTS_OFF_ON, }; static struct gdsc mdss_gdsc = { .gdscr = 0x2304, .cxcs = (unsigned int []){ 0x2310, 0x231c }, .cxc_count = 2, .pd = { .name = "mdss", }, .parent = &mmagic_mdss_gdsc.pd, .pwrsts = PWRSTS_OFF_ON, }; static struct gdsc gpu_gdsc = { .gdscr = 0x4034, .gds_hw_ctrl = 0x4038, .pd = { .name = "gpu", }, .pwrsts = PWRSTS_OFF_ON, .flags = VOTABLE, }; static struct gdsc gpu_gx_gdsc = { .gdscr = 0x4024, .clamp_io_ctrl = 0x4300, .cxcs = (unsigned int []){ 0x4028 }, .cxc_count = 1, .pd = { .name = "gpu_gx", }, .pwrsts = PWRSTS_OFF_ON, .parent = &gpu_gdsc.pd, .flags = CLAMP_IO, .supply = "vdd-gfx", }; static struct clk_regmap *mmcc_msm8996_clocks[] = { [MMPLL0_EARLY] = &mmpll0_early.clkr, [MMPLL0_PLL] = &mmpll0.clkr, [MMPLL1_EARLY] = &mmpll1_early.clkr, [MMPLL1_PLL] = &mmpll1.clkr, [MMPLL2_EARLY] = &mmpll2_early.clkr, [MMPLL2_PLL] = &mmpll2.clkr, [MMPLL3_EARLY] = &mmpll3_early.clkr, [MMPLL3_PLL] = &mmpll3.clkr, [MMPLL4_EARLY] = &mmpll4_early.clkr, [MMPLL4_PLL] = &mmpll4.clkr, [MMPLL5_EARLY] = &mmpll5_early.clkr, [MMPLL5_PLL] = &mmpll5.clkr, [MMPLL8_EARLY] = &mmpll8_early.clkr, [MMPLL8_PLL] = &mmpll8.clkr, [MMPLL9_EARLY] = &mmpll9_early.clkr, [MMPLL9_PLL] = &mmpll9.clkr, [AHB_CLK_SRC] = &ahb_clk_src.clkr, [AXI_CLK_SRC] = &axi_clk_src.clkr, [MAXI_CLK_SRC] = &maxi_clk_src.clkr, [GFX3D_CLK_SRC] = &gfx3d_clk_src.rcg.clkr, [RBBMTIMER_CLK_SRC] = &rbbmtimer_clk_src.clkr, [ISENSE_CLK_SRC] = &isense_clk_src.clkr, [RBCPR_CLK_SRC] = &rbcpr_clk_src.clkr, [VIDEO_CORE_CLK_SRC] = &video_core_clk_src.clkr, [VIDEO_SUBCORE0_CLK_SRC] = &video_subcore0_clk_src.clkr, [VIDEO_SUBCORE1_CLK_SRC] = &video_subcore1_clk_src.clkr, [PCLK0_CLK_SRC] = &pclk0_clk_src.clkr, [PCLK1_CLK_SRC] = &pclk1_clk_src.clkr, [MDP_CLK_SRC] = &mdp_clk_src.clkr, [EXTPCLK_CLK_SRC] = &extpclk_clk_src.clkr, [VSYNC_CLK_SRC] = &vsync_clk_src.clkr, [HDMI_CLK_SRC] = &hdmi_clk_src.clkr, [BYTE0_CLK_SRC] = &byte0_clk_src.clkr, [BYTE1_CLK_SRC] = &byte1_clk_src.clkr, [ESC0_CLK_SRC] = &esc0_clk_src.clkr, [ESC1_CLK_SRC] = &esc1_clk_src.clkr, [CAMSS_GP0_CLK_SRC] = &camss_gp0_clk_src.clkr, [CAMSS_GP1_CLK_SRC] = &camss_gp1_clk_src.clkr, [MCLK0_CLK_SRC] = &mclk0_clk_src.clkr, [MCLK1_CLK_SRC] = &mclk1_clk_src.clkr, [MCLK2_CLK_SRC] = &mclk2_clk_src.clkr, [MCLK3_CLK_SRC] = &mclk3_clk_src.clkr, [CCI_CLK_SRC] = &cci_clk_src.clkr, [CSI0PHYTIMER_CLK_SRC] = &csi0phytimer_clk_src.clkr, [CSI1PHYTIMER_CLK_SRC] = &csi1phytimer_clk_src.clkr, [CSI2PHYTIMER_CLK_SRC] = &csi2phytimer_clk_src.clkr, [CSIPHY0_3P_CLK_SRC] = &csiphy0_3p_clk_src.clkr, [CSIPHY1_3P_CLK_SRC] = &csiphy1_3p_clk_src.clkr, [CSIPHY2_3P_CLK_SRC] = &csiphy2_3p_clk_src.clkr, [JPEG0_CLK_SRC] = &jpeg0_clk_src.clkr, [JPEG2_CLK_SRC] = &jpeg2_clk_src.clkr, [JPEG_DMA_CLK_SRC] = &jpeg_dma_clk_src.clkr, [VFE0_CLK_SRC] = &vfe0_clk_src.clkr, [VFE1_CLK_SRC] = &vfe1_clk_src.clkr, [CPP_CLK_SRC] = &cpp_clk_src.clkr, [CSI0_CLK_SRC] = &csi0_clk_src.clkr, [CSI1_CLK_SRC] = &csi1_clk_src.clkr, [CSI2_CLK_SRC] = &csi2_clk_src.clkr, [CSI3_CLK_SRC] = &csi3_clk_src.clkr, [FD_CORE_CLK_SRC] = &fd_core_clk_src.clkr, [MMSS_MMAGIC_AHB_CLK] = &mmss_mmagic_ahb_clk.clkr, [MMSS_MMAGIC_CFG_AHB_CLK] = &mmss_mmagic_cfg_ahb_clk.clkr, [MMSS_MISC_AHB_CLK] = &mmss_misc_ahb_clk.clkr, [MMSS_MISC_CXO_CLK] = &mmss_misc_cxo_clk.clkr, [MMSS_MMAGIC_MAXI_CLK] = &mmss_mmagic_maxi_clk.clkr, [MMAGIC_CAMSS_AXI_CLK] = &mmagic_camss_axi_clk.clkr, [MMAGIC_CAMSS_NOC_CFG_AHB_CLK] = &mmagic_camss_noc_cfg_ahb_clk.clkr, [SMMU_VFE_AHB_CLK] = &smmu_vfe_ahb_clk.clkr, [SMMU_VFE_AXI_CLK] = &smmu_vfe_axi_clk.clkr, [SMMU_CPP_AHB_CLK] = &smmu_cpp_ahb_clk.clkr, [SMMU_CPP_AXI_CLK] = &smmu_cpp_axi_clk.clkr, [SMMU_JPEG_AHB_CLK] = &smmu_jpeg_ahb_clk.clkr, [SMMU_JPEG_AXI_CLK] = &smmu_jpeg_axi_clk.clkr, [MMAGIC_MDSS_AXI_CLK] = &mmagic_mdss_axi_clk.clkr, [MMAGIC_MDSS_NOC_CFG_AHB_CLK] = &mmagic_mdss_noc_cfg_ahb_clk.clkr, [SMMU_ROT_AHB_CLK] = &smmu_rot_ahb_clk.clkr, [SMMU_ROT_AXI_CLK] = &smmu_rot_axi_clk.clkr, [SMMU_MDP_AHB_CLK] = &smmu_mdp_ahb_clk.clkr, [SMMU_MDP_AXI_CLK] = &smmu_mdp_axi_clk.clkr, [MMAGIC_VIDEO_AXI_CLK] = &mmagic_video_axi_clk.clkr, [MMAGIC_VIDEO_NOC_CFG_AHB_CLK] = &mmagic_video_noc_cfg_ahb_clk.clkr, [SMMU_VIDEO_AHB_CLK] = &smmu_video_ahb_clk.clkr, [SMMU_VIDEO_AXI_CLK] = &smmu_video_axi_clk.clkr, [MMAGIC_BIMC_NOC_CFG_AHB_CLK] = &mmagic_bimc_noc_cfg_ahb_clk.clkr, [GPU_GX_GFX3D_CLK] = &gpu_gx_gfx3d_clk.clkr, [GPU_GX_RBBMTIMER_CLK] = &gpu_gx_rbbmtimer_clk.clkr, [GPU_AHB_CLK] = &gpu_ahb_clk.clkr, [GPU_AON_ISENSE_CLK] = &gpu_aon_isense_clk.clkr, [VMEM_MAXI_CLK] = &vmem_maxi_clk.clkr, [VMEM_AHB_CLK] = &vmem_ahb_clk.clkr, [MMSS_RBCPR_CLK] = &mmss_rbcpr_clk.clkr, [MMSS_RBCPR_AHB_CLK] = &mmss_rbcpr_ahb_clk.clkr, [VIDEO_CORE_CLK] = &video_core_clk.clkr, [VIDEO_AXI_CLK] = &video_axi_clk.clkr, [VIDEO_MAXI_CLK] = &video_maxi_clk.clkr, [VIDEO_AHB_CLK] = &video_ahb_clk.clkr, [VIDEO_SUBCORE0_CLK] = &video_subcore0_clk.clkr, [VIDEO_SUBCORE1_CLK] = &video_subcore1_clk.clkr, [MDSS_AHB_CLK] = &mdss_ahb_clk.clkr, [MDSS_HDMI_AHB_CLK] = &mdss_hdmi_ahb_clk.clkr, [MDSS_AXI_CLK] = &mdss_axi_clk.clkr, [MDSS_PCLK0_CLK] = &mdss_pclk0_clk.clkr, [MDSS_PCLK1_CLK] = &mdss_pclk1_clk.clkr, [MDSS_MDP_CLK] = &mdss_mdp_clk.clkr, [MDSS_EXTPCLK_CLK] = &mdss_extpclk_clk.clkr, [MDSS_VSYNC_CLK] = &mdss_vsync_clk.clkr, [MDSS_HDMI_CLK] = &mdss_hdmi_clk.clkr, [MDSS_BYTE0_CLK] = &mdss_byte0_clk.clkr, [MDSS_BYTE1_CLK] = &mdss_byte1_clk.clkr, [MDSS_ESC0_CLK] = &mdss_esc0_clk.clkr, [MDSS_ESC1_CLK] = &mdss_esc1_clk.clkr, [CAMSS_TOP_AHB_CLK] = &camss_top_ahb_clk.clkr, [CAMSS_AHB_CLK] = &camss_ahb_clk.clkr, [CAMSS_MICRO_AHB_CLK] = &camss_micro_ahb_clk.clkr, [CAMSS_GP0_CLK] = &camss_gp0_clk.clkr, [CAMSS_GP1_CLK] = &camss_gp1_clk.clkr, [CAMSS_MCLK0_CLK] = &camss_mclk0_clk.clkr, [CAMSS_MCLK1_CLK] = &camss_mclk1_clk.clkr, [CAMSS_MCLK2_CLK] = &camss_mclk2_clk.clkr, [CAMSS_MCLK3_CLK] = &camss_mclk3_clk.clkr, [CAMSS_CCI_CLK] = &camss_cci_clk.clkr, [CAMSS_CCI_AHB_CLK] = &camss_cci_ahb_clk.clkr, [CAMSS_CSI0PHYTIMER_CLK] = &camss_csi0phytimer_clk.clkr, [CAMSS_CSI1PHYTIMER_CLK] = &camss_csi1phytimer_clk.clkr, [CAMSS_CSI2PHYTIMER_CLK] = &camss_csi2phytimer_clk.clkr, [CAMSS_CSIPHY0_3P_CLK] = &camss_csiphy0_3p_clk.clkr, [CAMSS_CSIPHY1_3P_CLK] = &camss_csiphy1_3p_clk.clkr, [CAMSS_CSIPHY2_3P_CLK] = &camss_csiphy2_3p_clk.clkr, [CAMSS_JPEG0_CLK] = &camss_jpeg0_clk.clkr, [CAMSS_JPEG2_CLK] = &camss_jpeg2_clk.clkr, [CAMSS_JPEG_DMA_CLK] = &camss_jpeg_dma_clk.clkr, [CAMSS_JPEG_AHB_CLK] = &camss_jpeg_ahb_clk.clkr, [CAMSS_JPEG_AXI_CLK] = &camss_jpeg_axi_clk.clkr, [CAMSS_VFE_AHB_CLK] = &camss_vfe_ahb_clk.clkr, [CAMSS_VFE_AXI_CLK] = &camss_vfe_axi_clk.clkr, [CAMSS_VFE0_CLK] = &camss_vfe0_clk.clkr, [CAMSS_VFE0_STREAM_CLK] = &camss_vfe0_stream_clk.clkr, [CAMSS_VFE0_AHB_CLK] = &camss_vfe0_ahb_clk.clkr, [CAMSS_VFE1_CLK] = &camss_vfe1_clk.clkr, [CAMSS_VFE1_STREAM_CLK] = &camss_vfe1_stream_clk.clkr, [CAMSS_VFE1_AHB_CLK] = &camss_vfe1_ahb_clk.clkr, [CAMSS_CSI_VFE0_CLK] = &camss_csi_vfe0_clk.clkr, [CAMSS_CSI_VFE1_CLK] = &camss_csi_vfe1_clk.clkr, [CAMSS_CPP_VBIF_AHB_CLK] = &camss_cpp_vbif_ahb_clk.clkr, [CAMSS_CPP_AXI_CLK] = &camss_cpp_axi_clk.clkr, [CAMSS_CPP_CLK] = &camss_cpp_clk.clkr, [CAMSS_CPP_AHB_CLK] = &camss_cpp_ahb_clk.clkr, [CAMSS_CSI0_CLK] = &camss_csi0_clk.clkr, [CAMSS_CSI0_AHB_CLK] = &camss_csi0_ahb_clk.clkr, [CAMSS_CSI0PHY_CLK] = &camss_csi0phy_clk.clkr, [CAMSS_CSI0RDI_CLK] = &camss_csi0rdi_clk.clkr, [CAMSS_CSI0PIX_CLK] = &camss_csi0pix_clk.clkr, [CAMSS_CSI1_CLK] = &camss_csi1_clk.clkr, [CAMSS_CSI1_AHB_CLK] = &camss_csi1_ahb_clk.clkr, [CAMSS_CSI1PHY_CLK] = &camss_csi1phy_clk.clkr, [CAMSS_CSI1RDI_CLK] = &camss_csi1rdi_clk.clkr, [CAMSS_CSI1PIX_CLK] = &camss_csi1pix_clk.clkr, [CAMSS_CSI2_CLK] = &camss_csi2_clk.clkr, [CAMSS_CSI2_AHB_CLK] = &camss_csi2_ahb_clk.clkr, [CAMSS_CSI2PHY_CLK] = &camss_csi2phy_clk.clkr, [CAMSS_CSI2RDI_CLK] = &camss_csi2rdi_clk.clkr, [CAMSS_CSI2PIX_CLK] = &camss_csi2pix_clk.clkr, [CAMSS_CSI3_CLK] = &camss_csi3_clk.clkr, [CAMSS_CSI3_AHB_CLK] = &camss_csi3_ahb_clk.clkr, [CAMSS_CSI3PHY_CLK] = &camss_csi3phy_clk.clkr, [CAMSS_CSI3RDI_CLK] = &camss_csi3rdi_clk.clkr, [CAMSS_CSI3PIX_CLK] = &camss_csi3pix_clk.clkr, [CAMSS_ISPIF_AHB_CLK] = &camss_ispif_ahb_clk.clkr, [FD_CORE_CLK] = &fd_core_clk.clkr, [FD_CORE_UAR_CLK] = &fd_core_uar_clk.clkr, [FD_AHB_CLK] = &fd_ahb_clk.clkr, }; static struct gdsc *mmcc_msm8996_gdscs[] = { [MMAGIC_BIMC_GDSC] = &mmagic_bimc_gdsc, [MMAGIC_VIDEO_GDSC] = &mmagic_video_gdsc, [MMAGIC_MDSS_GDSC] = &mmagic_mdss_gdsc, [MMAGIC_CAMSS_GDSC] = &mmagic_camss_gdsc, [VENUS_GDSC] = &venus_gdsc, [VENUS_CORE0_GDSC] = &venus_core0_gdsc, [VENUS_CORE1_GDSC] = &venus_core1_gdsc, [CAMSS_GDSC] = &camss_gdsc, [VFE0_GDSC] = &vfe0_gdsc, [VFE1_GDSC] = &vfe1_gdsc, [JPEG_GDSC] = &jpeg_gdsc, [CPP_GDSC] = &cpp_gdsc, [FD_GDSC] = &fd_gdsc, [MDSS_GDSC] = &mdss_gdsc, [GPU_GDSC] = &gpu_gdsc, [GPU_GX_GDSC] = &gpu_gx_gdsc, }; static const struct qcom_reset_map mmcc_msm8996_resets[] = { [MMAGICAHB_BCR] = { 0x5020 }, [MMAGIC_CFG_BCR] = { 0x5050 }, [MISC_BCR] = { 0x5010 }, [BTO_BCR] = { 0x5030 }, [MMAGICAXI_BCR] = { 0x5060 }, [MMAGICMAXI_BCR] = { 0x5070 }, [DSA_BCR] = { 0x50a0 }, [MMAGIC_CAMSS_BCR] = { 0x3c40 }, [THROTTLE_CAMSS_BCR] = { 0x3c30 }, [SMMU_VFE_BCR] = { 0x3c00 }, [SMMU_CPP_BCR] = { 0x3c10 }, [SMMU_JPEG_BCR] = { 0x3c20 }, [MMAGIC_MDSS_BCR] = { 0x2470 }, [THROTTLE_MDSS_BCR] = { 0x2460 }, [SMMU_ROT_BCR] = { 0x2440 }, [SMMU_MDP_BCR] = { 0x2450 }, [MMAGIC_VIDEO_BCR] = { 0x1190 }, [THROTTLE_VIDEO_BCR] = { 0x1180 }, [SMMU_VIDEO_BCR] = { 0x1170 }, [MMAGIC_BIMC_BCR] = { 0x5290 }, [GPU_GX_BCR] = { 0x4020 }, [GPU_BCR] = { 0x4030 }, [GPU_AON_BCR] = { 0x4040 }, [VMEM_BCR] = { 0x1200 }, [MMSS_RBCPR_BCR] = { 0x4080 }, [VIDEO_BCR] = { 0x1020 }, [MDSS_BCR] = { 0x2300 }, [CAMSS_TOP_BCR] = { 0x3480 }, [CAMSS_AHB_BCR] = { 0x3488 }, [CAMSS_MICRO_BCR] = { 0x3490 }, [CAMSS_CCI_BCR] = { 0x3340 }, [CAMSS_PHY0_BCR] = { 0x3020 }, [CAMSS_PHY1_BCR] = { 0x3050 }, [CAMSS_PHY2_BCR] = { 0x3080 }, [CAMSS_CSIPHY0_3P_BCR] = { 0x3230 }, [CAMSS_CSIPHY1_3P_BCR] = { 0x3250 }, [CAMSS_CSIPHY2_3P_BCR] = { 0x3270 }, [CAMSS_JPEG_BCR] = { 0x35a0 }, [CAMSS_VFE_BCR] = { 0x36a0 }, [CAMSS_VFE0_BCR] = { 0x3660 }, [CAMSS_VFE1_BCR] = { 0x3670 }, [CAMSS_CSI_VFE0_BCR] = { 0x3700 }, [CAMSS_CSI_VFE1_BCR] = { 0x3710 }, [CAMSS_CPP_TOP_BCR] = { 0x36c0 }, [CAMSS_CPP_BCR] = { 0x36d0 }, [CAMSS_CSI0_BCR] = { 0x30b0 }, [CAMSS_CSI0RDI_BCR] = { 0x30d0 }, [CAMSS_CSI0PIX_BCR] = { 0x30e0 }, [CAMSS_CSI1_BCR] = { 0x3120 }, [CAMSS_CSI1RDI_BCR] = { 0x3140 }, [CAMSS_CSI1PIX_BCR] = { 0x3150 }, [CAMSS_CSI2_BCR] = { 0x3180 }, [CAMSS_CSI2RDI_BCR] = { 0x31a0 }, [CAMSS_CSI2PIX_BCR] = { 0x31b0 }, [CAMSS_CSI3_BCR] = { 0x31e0 }, [CAMSS_CSI3RDI_BCR] = { 0x3200 }, [CAMSS_CSI3PIX_BCR] = { 0x3210 }, [CAMSS_ISPIF_BCR] = { 0x3220 }, [FD_BCR] = { 0x3b60 }, [MMSS_SPDM_RM_BCR] = { 0x300 }, }; static const struct regmap_config mmcc_msm8996_regmap_config = { .reg_bits = 32, .reg_stride = 4, .val_bits = 32, .max_register = 0xb008, .fast_io = true, }; static const struct qcom_cc_desc mmcc_msm8996_desc = { .config = &mmcc_msm8996_regmap_config, .clks = mmcc_msm8996_clocks, .num_clks = ARRAY_SIZE(mmcc_msm8996_clocks), .resets = mmcc_msm8996_resets, .num_resets = ARRAY_SIZE(mmcc_msm8996_resets), .gdscs = mmcc_msm8996_gdscs, .num_gdscs = ARRAY_SIZE(mmcc_msm8996_gdscs), .clk_hws = mmcc_msm8996_hws, .num_clk_hws = ARRAY_SIZE(mmcc_msm8996_hws), }; static const struct of_device_id mmcc_msm8996_match_table[] = { { .compatible = "qcom,mmcc-msm8996" }, { } }; MODULE_DEVICE_TABLE(of, mmcc_msm8996_match_table); static int mmcc_msm8996_probe(struct platform_device *pdev) { struct regmap *regmap; regmap = qcom_cc_map(pdev, &mmcc_msm8996_desc); if (IS_ERR(regmap)) return PTR_ERR(regmap); /* Disable the AHB DCD */ regmap_update_bits(regmap, 0x50d8, BIT(31), 0); /* Disable the NoC FSM for mmss_mmagic_cfg_ahb_clk */ regmap_update_bits(regmap, 0x5054, BIT(15), 0); return qcom_cc_really_probe(&pdev->dev, &mmcc_msm8996_desc, regmap); } static struct platform_driver mmcc_msm8996_driver = { .probe = mmcc_msm8996_probe, .driver = { .name = "mmcc-msm8996", .of_match_table = mmcc_msm8996_match_table, }, }; module_platform_driver(mmcc_msm8996_driver); MODULE_DESCRIPTION("QCOM MMCC MSM8996 Driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:mmcc-msm8996");
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright 2019 Hans de Goede <[email protected]> */ #include <linux/module.h> #include <linux/pm.h> #include <linux/usb.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_atomic_state_helper.h> #include <drm/drm_client_setup.h> #include <drm/drm_connector.h> #include <drm/drm_damage_helper.h> #include <drm/drm_drv.h> #include <drm/drm_edid.h> #include <drm/drm_fbdev_shmem.h> #include <drm/drm_file.h> #include <drm/drm_format_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_gem_shmem_helper.h> #include <drm/drm_ioctl.h> #include <drm/drm_managed.h> #include <drm/drm_modeset_helper_vtables.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> static bool eco_mode; module_param(eco_mode, bool, 0644); MODULE_PARM_DESC(eco_mode, "Turn on Eco mode (less bright, more silent)"); #define DRIVER_NAME "gm12u320" #define DRIVER_DESC "Grain Media GM12U320 USB projector display" #define DRIVER_DATE "2019" #define DRIVER_MAJOR 1 #define DRIVER_MINOR 0 /* * The DLP has an actual width of 854 pixels, but that is not a multiple * of 8, breaking things left and right, so we export a width of 848. */ #define GM12U320_USER_WIDTH 848 #define GM12U320_REAL_WIDTH 854 #define GM12U320_HEIGHT 480 #define GM12U320_BLOCK_COUNT 20 #define GM12U320_ERR(fmt, ...) \ DRM_DEV_ERROR(gm12u320->dev.dev, fmt, ##__VA_ARGS__) #define MISC_RCV_EPT 1 #define DATA_RCV_EPT 2 #define DATA_SND_EPT 3 #define MISC_SND_EPT 4 #define DATA_BLOCK_HEADER_SIZE 84 #define DATA_BLOCK_CONTENT_SIZE 64512 #define DATA_BLOCK_FOOTER_SIZE 20 #define DATA_BLOCK_SIZE (DATA_BLOCK_HEADER_SIZE + \ DATA_BLOCK_CONTENT_SIZE + \ DATA_BLOCK_FOOTER_SIZE) #define DATA_LAST_BLOCK_CONTENT_SIZE 4032 #define DATA_LAST_BLOCK_SIZE (DATA_BLOCK_HEADER_SIZE + \ DATA_LAST_BLOCK_CONTENT_SIZE + \ DATA_BLOCK_FOOTER_SIZE) #define CMD_SIZE 31 #define READ_STATUS_SIZE 13 #define MISC_VALUE_SIZE 4 #define CMD_TIMEOUT 200 #define DATA_TIMEOUT 1000 #define IDLE_TIMEOUT 2000 #define FIRST_FRAME_TIMEOUT 2000 #define MISC_REQ_GET_SET_ECO_A 0xff #define MISC_REQ_GET_SET_ECO_B 0x35 /* Windows driver does once every second, with arg d = 1, other args 0 */ #define MISC_REQ_UNKNOWN1_A 0xff #define MISC_REQ_UNKNOWN1_B 0x38 /* Windows driver does this on init, with arg a, b = 0, c = 0xa0, d = 4 */ #define MISC_REQ_UNKNOWN2_A 0xa5 #define MISC_REQ_UNKNOWN2_B 0x00 struct gm12u320_device { struct drm_device dev; struct device *dmadev; struct drm_simple_display_pipe pipe; struct drm_connector conn; unsigned char *cmd_buf; unsigned char *data_buf[GM12U320_BLOCK_COUNT]; struct { struct delayed_work work; struct mutex lock; struct drm_framebuffer *fb; struct drm_rect rect; int frame; int draw_status_timeout; struct iosys_map src_map; } fb_update; }; #define to_gm12u320(__dev) container_of(__dev, struct gm12u320_device, dev) static const char cmd_data[CMD_SIZE] = { 0x55, 0x53, 0x42, 0x43, 0x00, 0x00, 0x00, 0x00, 0x68, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x10, 0xff, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; static const char cmd_draw[CMD_SIZE] = { 0x55, 0x53, 0x42, 0x43, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0xfe, 0x00, 0x00, 0x00, 0xc0, 0xd1, 0x05, 0x00, 0x40, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; static const char cmd_misc[CMD_SIZE] = { 0x55, 0x53, 0x42, 0x43, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x80, 0x01, 0x10, 0xfd, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; static const char data_block_header[DATA_BLOCK_HEADER_SIZE] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfb, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x15, 0x00, 0x00, 0xfc, 0x00, 0x00, 0x01, 0x00, 0x00, 0xdb }; static const char data_last_block_header[DATA_BLOCK_HEADER_SIZE] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfb, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2a, 0x00, 0x20, 0x00, 0xc0, 0x0f, 0x00, 0x00, 0x01, 0x00, 0x00, 0xd7 }; static const char data_block_footer[DATA_BLOCK_FOOTER_SIZE] = { 0xfb, 0x14, 0x02, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x4f }; static inline struct usb_device *gm12u320_to_usb_device(struct gm12u320_device *gm12u320) { return interface_to_usbdev(to_usb_interface(gm12u320->dev.dev)); } static int gm12u320_usb_alloc(struct gm12u320_device *gm12u320) { int i, block_size; const char *hdr; gm12u320->cmd_buf = drmm_kmalloc(&gm12u320->dev, CMD_SIZE, GFP_KERNEL); if (!gm12u320->cmd_buf) return -ENOMEM; for (i = 0; i < GM12U320_BLOCK_COUNT; i++) { if (i == GM12U320_BLOCK_COUNT - 1) { block_size = DATA_LAST_BLOCK_SIZE; hdr = data_last_block_header; } else { block_size = DATA_BLOCK_SIZE; hdr = data_block_header; } gm12u320->data_buf[i] = drmm_kzalloc(&gm12u320->dev, block_size, GFP_KERNEL); if (!gm12u320->data_buf[i]) return -ENOMEM; memcpy(gm12u320->data_buf[i], hdr, DATA_BLOCK_HEADER_SIZE); memcpy(gm12u320->data_buf[i] + (block_size - DATA_BLOCK_FOOTER_SIZE), data_block_footer, DATA_BLOCK_FOOTER_SIZE); } return 0; } static int gm12u320_misc_request(struct gm12u320_device *gm12u320, u8 req_a, u8 req_b, u8 arg_a, u8 arg_b, u8 arg_c, u8 arg_d) { struct usb_device *udev = gm12u320_to_usb_device(gm12u320); int ret, len; memcpy(gm12u320->cmd_buf, &cmd_misc, CMD_SIZE); gm12u320->cmd_buf[20] = req_a; gm12u320->cmd_buf[21] = req_b; gm12u320->cmd_buf[22] = arg_a; gm12u320->cmd_buf[23] = arg_b; gm12u320->cmd_buf[24] = arg_c; gm12u320->cmd_buf[25] = arg_d; /* Send request */ ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, MISC_SND_EPT), gm12u320->cmd_buf, CMD_SIZE, &len, CMD_TIMEOUT); if (ret || len != CMD_SIZE) { GM12U320_ERR("Misc. req. error %d\n", ret); return -EIO; } /* Read value */ ret = usb_bulk_msg(udev, usb_rcvbulkpipe(udev, MISC_RCV_EPT), gm12u320->cmd_buf, MISC_VALUE_SIZE, &len, DATA_TIMEOUT); if (ret || len != MISC_VALUE_SIZE) { GM12U320_ERR("Misc. value error %d\n", ret); return -EIO; } /* cmd_buf[0] now contains the read value, which we don't use */ /* Read status */ ret = usb_bulk_msg(udev, usb_rcvbulkpipe(udev, MISC_RCV_EPT), gm12u320->cmd_buf, READ_STATUS_SIZE, &len, CMD_TIMEOUT); if (ret || len != READ_STATUS_SIZE) { GM12U320_ERR("Misc. status error %d\n", ret); return -EIO; } return 0; } static void gm12u320_32bpp_to_24bpp_packed(u8 *dst, u8 *src, int len) { while (len--) { *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; src++; } } static void gm12u320_copy_fb_to_blocks(struct gm12u320_device *gm12u320) { int block, dst_offset, len, remain, ret, x1, x2, y1, y2; struct drm_framebuffer *fb; void *vaddr; u8 *src; mutex_lock(&gm12u320->fb_update.lock); if (!gm12u320->fb_update.fb) goto unlock; fb = gm12u320->fb_update.fb; x1 = gm12u320->fb_update.rect.x1; x2 = gm12u320->fb_update.rect.x2; y1 = gm12u320->fb_update.rect.y1; y2 = gm12u320->fb_update.rect.y2; vaddr = gm12u320->fb_update.src_map.vaddr; /* TODO: Use mapping abstraction properly */ ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE); if (ret) { GM12U320_ERR("drm_gem_fb_begin_cpu_access err: %d\n", ret); goto put_fb; } src = vaddr + y1 * fb->pitches[0] + x1 * 4; x1 += (GM12U320_REAL_WIDTH - GM12U320_USER_WIDTH) / 2; x2 += (GM12U320_REAL_WIDTH - GM12U320_USER_WIDTH) / 2; for (; y1 < y2; y1++) { remain = 0; len = (x2 - x1) * 3; dst_offset = (y1 * GM12U320_REAL_WIDTH + x1) * 3; block = dst_offset / DATA_BLOCK_CONTENT_SIZE; dst_offset %= DATA_BLOCK_CONTENT_SIZE; if ((dst_offset + len) > DATA_BLOCK_CONTENT_SIZE) { remain = dst_offset + len - DATA_BLOCK_CONTENT_SIZE; len = DATA_BLOCK_CONTENT_SIZE - dst_offset; } dst_offset += DATA_BLOCK_HEADER_SIZE; len /= 3; gm12u320_32bpp_to_24bpp_packed( gm12u320->data_buf[block] + dst_offset, src, len); if (remain) { block++; dst_offset = DATA_BLOCK_HEADER_SIZE; gm12u320_32bpp_to_24bpp_packed( gm12u320->data_buf[block] + dst_offset, src + len * 4, remain / 3); } src += fb->pitches[0]; } drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); put_fb: drm_framebuffer_put(fb); gm12u320->fb_update.fb = NULL; unlock: mutex_unlock(&gm12u320->fb_update.lock); } static void gm12u320_fb_update_work(struct work_struct *work) { struct gm12u320_device *gm12u320 = container_of(to_delayed_work(work), struct gm12u320_device, fb_update.work); struct usb_device *udev = gm12u320_to_usb_device(gm12u320); int block, block_size, len; int ret = 0; gm12u320_copy_fb_to_blocks(gm12u320); for (block = 0; block < GM12U320_BLOCK_COUNT; block++) { if (block == GM12U320_BLOCK_COUNT - 1) block_size = DATA_LAST_BLOCK_SIZE; else block_size = DATA_BLOCK_SIZE; /* Send data command to device */ memcpy(gm12u320->cmd_buf, cmd_data, CMD_SIZE); gm12u320->cmd_buf[8] = block_size & 0xff; gm12u320->cmd_buf[9] = block_size >> 8; gm12u320->cmd_buf[20] = 0xfc - block * 4; gm12u320->cmd_buf[21] = block | (gm12u320->fb_update.frame << 7); ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, DATA_SND_EPT), gm12u320->cmd_buf, CMD_SIZE, &len, CMD_TIMEOUT); if (ret || len != CMD_SIZE) goto err; /* Send data block to device */ ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, DATA_SND_EPT), gm12u320->data_buf[block], block_size, &len, DATA_TIMEOUT); if (ret || len != block_size) goto err; /* Read status */ ret = usb_bulk_msg(udev, usb_rcvbulkpipe(udev, DATA_RCV_EPT), gm12u320->cmd_buf, READ_STATUS_SIZE, &len, CMD_TIMEOUT); if (ret || len != READ_STATUS_SIZE) goto err; } /* Send draw command to device */ memcpy(gm12u320->cmd_buf, cmd_draw, CMD_SIZE); ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, DATA_SND_EPT), gm12u320->cmd_buf, CMD_SIZE, &len, CMD_TIMEOUT); if (ret || len != CMD_SIZE) goto err; /* Read status */ ret = usb_bulk_msg(udev, usb_rcvbulkpipe(udev, DATA_RCV_EPT), gm12u320->cmd_buf, READ_STATUS_SIZE, &len, gm12u320->fb_update.draw_status_timeout); if (ret || len != READ_STATUS_SIZE) goto err; gm12u320->fb_update.draw_status_timeout = CMD_TIMEOUT; gm12u320->fb_update.frame = !gm12u320->fb_update.frame; /* * We must draw a frame every 2s otherwise the projector * switches back to showing its logo. */ queue_delayed_work(system_long_wq, &gm12u320->fb_update.work, msecs_to_jiffies(IDLE_TIMEOUT)); return; err: /* Do not log errors caused by module unload or device unplug */ if (ret != -ENODEV && ret != -ECONNRESET && ret != -ESHUTDOWN) GM12U320_ERR("Frame update error: %d\n", ret); } static void gm12u320_fb_mark_dirty(struct drm_framebuffer *fb, const struct iosys_map *map, struct drm_rect *dirty) { struct gm12u320_device *gm12u320 = to_gm12u320(fb->dev); struct drm_framebuffer *old_fb = NULL; bool wakeup = false; mutex_lock(&gm12u320->fb_update.lock); if (gm12u320->fb_update.fb != fb) { old_fb = gm12u320->fb_update.fb; drm_framebuffer_get(fb); gm12u320->fb_update.fb = fb; gm12u320->fb_update.rect = *dirty; gm12u320->fb_update.src_map = *map; wakeup = true; } else { struct drm_rect *rect = &gm12u320->fb_update.rect; rect->x1 = min(rect->x1, dirty->x1); rect->y1 = min(rect->y1, dirty->y1); rect->x2 = max(rect->x2, dirty->x2); rect->y2 = max(rect->y2, dirty->y2); } mutex_unlock(&gm12u320->fb_update.lock); if (wakeup) mod_delayed_work(system_long_wq, &gm12u320->fb_update.work, 0); if (old_fb) drm_framebuffer_put(old_fb); } static void gm12u320_stop_fb_update(struct gm12u320_device *gm12u320) { struct drm_framebuffer *old_fb; cancel_delayed_work_sync(&gm12u320->fb_update.work); mutex_lock(&gm12u320->fb_update.lock); old_fb = gm12u320->fb_update.fb; gm12u320->fb_update.fb = NULL; iosys_map_clear(&gm12u320->fb_update.src_map); mutex_unlock(&gm12u320->fb_update.lock); drm_framebuffer_put(old_fb); } static int gm12u320_set_ecomode(struct gm12u320_device *gm12u320) { return gm12u320_misc_request(gm12u320, MISC_REQ_GET_SET_ECO_A, MISC_REQ_GET_SET_ECO_B, 0x01 /* set */, eco_mode ? 0x01 : 0x00, 0x00, 0x01); } /* ------------------------------------------------------------------ */ /* gm12u320 connector */ /* * We use fake EDID info so that userspace know that it is dealing with * an Acer projector, rather then listing this as an "unknown" monitor. * Note this assumes this driver is only ever used with the Acer C120, if we * add support for other devices the vendor and model should be parameterized. */ static const struct edid gm12u320_edid = { .header = { 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 }, .mfg_id = { 0x04, 0x72 }, /* "ACR" */ .prod_code = { 0x20, 0xc1 }, /* C120h */ .serial = 0xaa55aa55, .mfg_week = 1, .mfg_year = 16, .version = 1, /* EDID 1.3 */ .revision = 3, /* EDID 1.3 */ .input = 0x08, /* Analog input */ .features = 0x0a, /* Pref timing in DTD 1 */ .standard_timings = { { 1, 1 }, { 1, 1 }, { 1, 1 }, { 1, 1 }, { 1, 1 }, { 1, 1 }, { 1, 1 }, { 1, 1 } }, .detailed_timings = { { .pixel_clock = 3383, /* hactive = 848, hblank = 256 */ .data.pixel_data.hactive_lo = 0x50, .data.pixel_data.hblank_lo = 0x00, .data.pixel_data.hactive_hblank_hi = 0x31, /* vactive = 480, vblank = 28 */ .data.pixel_data.vactive_lo = 0xe0, .data.pixel_data.vblank_lo = 0x1c, .data.pixel_data.vactive_vblank_hi = 0x10, /* hsync offset 40 pw 128, vsync offset 1 pw 4 */ .data.pixel_data.hsync_offset_lo = 0x28, .data.pixel_data.hsync_pulse_width_lo = 0x80, .data.pixel_data.vsync_offset_pulse_width_lo = 0x14, .data.pixel_data.hsync_vsync_offset_pulse_width_hi = 0x00, /* Digital separate syncs, hsync+, vsync+ */ .data.pixel_data.misc = 0x1e, }, { .pixel_clock = 0, .data.other_data.type = 0xfd, /* Monitor ranges */ .data.other_data.data.range.min_vfreq = 59, .data.other_data.data.range.max_vfreq = 61, .data.other_data.data.range.min_hfreq_khz = 29, .data.other_data.data.range.max_hfreq_khz = 32, .data.other_data.data.range.pixel_clock_mhz = 4, /* 40 MHz */ .data.other_data.data.range.flags = 0, .data.other_data.data.range.formula.cvt = { 0xa0, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20 }, }, { .pixel_clock = 0, .data.other_data.type = 0xfc, /* Model string */ .data.other_data.data.str.str = { 'P', 'r', 'o', 'j', 'e', 'c', 't', 'o', 'r', '\n', ' ', ' ', ' ' }, }, { .pixel_clock = 0, .data.other_data.type = 0xfe, /* Unspecified text / padding */ .data.other_data.data.str.str = { '\n', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ' }, } }, .checksum = 0x13, }; static int gm12u320_conn_get_modes(struct drm_connector *connector) { const struct drm_edid *drm_edid; int count; drm_edid = drm_edid_alloc(&gm12u320_edid, sizeof(gm12u320_edid)); drm_edid_connector_update(connector, drm_edid); count = drm_edid_connector_add_modes(connector); drm_edid_free(drm_edid); return count; } static const struct drm_connector_helper_funcs gm12u320_conn_helper_funcs = { .get_modes = gm12u320_conn_get_modes, }; static const struct drm_connector_funcs gm12u320_conn_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .destroy = drm_connector_cleanup, .reset = drm_atomic_helper_connector_reset, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; static int gm12u320_conn_init(struct gm12u320_device *gm12u320) { drm_connector_helper_add(&gm12u320->conn, &gm12u320_conn_helper_funcs); return drm_connector_init(&gm12u320->dev, &gm12u320->conn, &gm12u320_conn_funcs, DRM_MODE_CONNECTOR_VGA); } /* ------------------------------------------------------------------ */ /* gm12u320 (simple) display pipe */ static void gm12u320_pipe_enable(struct drm_simple_display_pipe *pipe, struct drm_crtc_state *crtc_state, struct drm_plane_state *plane_state) { struct drm_rect rect = { 0, 0, GM12U320_USER_WIDTH, GM12U320_HEIGHT }; struct gm12u320_device *gm12u320 = to_gm12u320(pipe->crtc.dev); struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); gm12u320->fb_update.draw_status_timeout = FIRST_FRAME_TIMEOUT; gm12u320_fb_mark_dirty(plane_state->fb, &shadow_plane_state->data[0], &rect); } static void gm12u320_pipe_disable(struct drm_simple_display_pipe *pipe) { struct gm12u320_device *gm12u320 = to_gm12u320(pipe->crtc.dev); gm12u320_stop_fb_update(gm12u320); } static void gm12u320_pipe_update(struct drm_simple_display_pipe *pipe, struct drm_plane_state *old_state) { struct drm_plane_state *state = pipe->plane.state; struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state); struct drm_rect rect; if (drm_atomic_helper_damage_merged(old_state, state, &rect)) gm12u320_fb_mark_dirty(state->fb, &shadow_plane_state->data[0], &rect); } static const struct drm_simple_display_pipe_funcs gm12u320_pipe_funcs = { .enable = gm12u320_pipe_enable, .disable = gm12u320_pipe_disable, .update = gm12u320_pipe_update, DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS, }; static const uint32_t gm12u320_pipe_formats[] = { DRM_FORMAT_XRGB8888, }; static const uint64_t gm12u320_pipe_modifiers[] = { DRM_FORMAT_MOD_LINEAR, DRM_FORMAT_MOD_INVALID }; /* * FIXME: Dma-buf sharing requires DMA support by the importing device. * This function is a workaround to make USB devices work as well. * See todo.rst for how to fix the issue in the dma-buf framework. */ static struct drm_gem_object *gm12u320_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf) { struct gm12u320_device *gm12u320 = to_gm12u320(dev); if (!gm12u320->dmadev) return ERR_PTR(-ENODEV); return drm_gem_prime_import_dev(dev, dma_buf, gm12u320->dmadev); } DEFINE_DRM_GEM_FOPS(gm12u320_fops); static const struct drm_driver gm12u320_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, .name = DRIVER_NAME, .desc = DRIVER_DESC, .date = DRIVER_DATE, .major = DRIVER_MAJOR, .minor = DRIVER_MINOR, .fops = &gm12u320_fops, DRM_GEM_SHMEM_DRIVER_OPS, .gem_prime_import = gm12u320_gem_prime_import, DRM_FBDEV_SHMEM_DRIVER_OPS, }; static const struct drm_mode_config_funcs gm12u320_mode_config_funcs = { .fb_create = drm_gem_fb_create_with_dirty, .atomic_check = drm_atomic_helper_check, .atomic_commit = drm_atomic_helper_commit, }; static int gm12u320_usb_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct gm12u320_device *gm12u320; struct drm_device *dev; int ret; /* * The gm12u320 presents itself to the system as 2 usb mass-storage * interfaces, we only care about / need the first one. */ if (interface->cur_altsetting->desc.bInterfaceNumber != 0) return -ENODEV; gm12u320 = devm_drm_dev_alloc(&interface->dev, &gm12u320_drm_driver, struct gm12u320_device, dev); if (IS_ERR(gm12u320)) return PTR_ERR(gm12u320); dev = &gm12u320->dev; gm12u320->dmadev = usb_intf_get_dma_device(to_usb_interface(dev->dev)); if (!gm12u320->dmadev) drm_warn(dev, "buffer sharing not supported"); /* not an error */ INIT_DELAYED_WORK(&gm12u320->fb_update.work, gm12u320_fb_update_work); mutex_init(&gm12u320->fb_update.lock); ret = drmm_mode_config_init(dev); if (ret) goto err_put_device; dev->mode_config.min_width = GM12U320_USER_WIDTH; dev->mode_config.max_width = GM12U320_USER_WIDTH; dev->mode_config.min_height = GM12U320_HEIGHT; dev->mode_config.max_height = GM12U320_HEIGHT; dev->mode_config.funcs = &gm12u320_mode_config_funcs; ret = gm12u320_usb_alloc(gm12u320); if (ret) goto err_put_device; ret = gm12u320_set_ecomode(gm12u320); if (ret) goto err_put_device; ret = gm12u320_conn_init(gm12u320); if (ret) goto err_put_device; ret = drm_simple_display_pipe_init(&gm12u320->dev, &gm12u320->pipe, &gm12u320_pipe_funcs, gm12u320_pipe_formats, ARRAY_SIZE(gm12u320_pipe_formats), gm12u320_pipe_modifiers, &gm12u320->conn); if (ret) goto err_put_device; drm_mode_config_reset(dev); usb_set_intfdata(interface, dev); ret = drm_dev_register(dev, 0); if (ret) goto err_put_device; drm_client_setup(dev, NULL); return 0; err_put_device: put_device(gm12u320->dmadev); return ret; } static void gm12u320_usb_disconnect(struct usb_interface *interface) { struct drm_device *dev = usb_get_intfdata(interface); struct gm12u320_device *gm12u320 = to_gm12u320(dev); put_device(gm12u320->dmadev); gm12u320->dmadev = NULL; drm_dev_unplug(dev); drm_atomic_helper_shutdown(dev); } static int gm12u320_suspend(struct usb_interface *interface, pm_message_t message) { struct drm_device *dev = usb_get_intfdata(interface); return drm_mode_config_helper_suspend(dev); } static int gm12u320_resume(struct usb_interface *interface) { struct drm_device *dev = usb_get_intfdata(interface); struct gm12u320_device *gm12u320 = to_gm12u320(dev); gm12u320_set_ecomode(gm12u320); return drm_mode_config_helper_resume(dev); } static const struct usb_device_id id_table[] = { { USB_DEVICE(0x1de1, 0xc102) }, {}, }; MODULE_DEVICE_TABLE(usb, id_table); static struct usb_driver gm12u320_usb_driver = { .name = "gm12u320", .probe = gm12u320_usb_probe, .disconnect = gm12u320_usb_disconnect, .id_table = id_table, .suspend = pm_ptr(gm12u320_suspend), .resume = pm_ptr(gm12u320_resume), .reset_resume = pm_ptr(gm12u320_resume), }; module_usb_driver(gm12u320_usb_driver); MODULE_AUTHOR("Hans de Goede <[email protected]>"); MODULE_DESCRIPTION("GM12U320 driver for USB projectors"); MODULE_LICENSE("GPL");
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2021 MediaTek Inc. * Author: Sam Shih <[email protected]> * Author: Wenzhen Yu <[email protected]> */ #include <linux/clk-provider.h> #include <linux/mod_devicetable.h> #include <linux/platform_device.h> #include "clk-gate.h" #include "clk-mtk.h" #include "clk-mux.h" #include "clk-pll.h" #include <dt-bindings/clock/mt7986-clk.h> #include <linux/clk.h> #define MT7986_PLL_FMAX (2500UL * MHZ) #define CON0_MT7986_RST_BAR BIT(27) #define PLL_xtal(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \ _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift, \ _div_table, _parent_name) \ { \ .id = _id, .name = _name, .reg = _reg, .pwr_reg = _pwr_reg, \ .en_mask = _en_mask, .flags = _flags, \ .rst_bar_mask = CON0_MT7986_RST_BAR, .fmax = MT7986_PLL_FMAX, \ .pcwbits = _pcwbits, .pd_reg = _pd_reg, .pd_shift = _pd_shift, \ .tuner_reg = _tuner_reg, .pcw_reg = _pcw_reg, \ .pcw_shift = _pcw_shift, .div_table = _div_table, \ .parent_name = _parent_name, \ } #define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, _pd_reg, \ _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift) \ PLL_xtal(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \ _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift, NULL, \ "clkxtal") static const struct mtk_pll_data plls[] = { PLL(CLK_APMIXED_ARMPLL, "armpll", 0x0200, 0x020C, 0x0, PLL_AO, 32, 0x0200, 4, 0, 0x0204, 0), PLL(CLK_APMIXED_NET2PLL, "net2pll", 0x0210, 0x021C, 0x0, 0, 32, 0x0210, 4, 0, 0x0214, 0), PLL(CLK_APMIXED_MMPLL, "mmpll", 0x0220, 0x022C, 0x0, 0, 32, 0x0220, 4, 0, 0x0224, 0), PLL(CLK_APMIXED_SGMPLL, "sgmpll", 0x0230, 0x023c, 0x0, 0, 32, 0x0230, 4, 0, 0x0234, 0), PLL(CLK_APMIXED_WEDMCUPLL, "wedmcupll", 0x0240, 0x024c, 0x0, 0, 32, 0x0240, 4, 0, 0x0244, 0), PLL(CLK_APMIXED_NET1PLL, "net1pll", 0x0250, 0x025c, 0x0, 0, 32, 0x0250, 4, 0, 0x0254, 0), PLL(CLK_APMIXED_MPLL, "mpll", 0x0260, 0x0270, 0x0, 0, 32, 0x0260, 4, 0, 0x0264, 0), PLL(CLK_APMIXED_APLL2, "apll2", 0x0278, 0x0288, 0x0, 0, 32, 0x0278, 4, 0, 0x027c, 0), }; static const struct of_device_id of_match_clk_mt7986_apmixed[] = { { .compatible = "mediatek,mt7986-apmixedsys", }, { } }; MODULE_DEVICE_TABLE(of, of_match_clk_mt7986_apmixed); static int clk_mt7986_apmixed_probe(struct platform_device *pdev) { struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; int r; clk_data = mtk_alloc_clk_data(ARRAY_SIZE(plls)); if (!clk_data) return -ENOMEM; mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data); r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) { pr_err("%s(): could not register clock provider: %d\n", __func__, r); goto free_apmixed_data; } return r; free_apmixed_data: mtk_free_clk_data(clk_data); return r; } static struct platform_driver clk_mt7986_apmixed_drv = { .probe = clk_mt7986_apmixed_probe, .driver = { .name = "clk-mt7986-apmixed", .of_match_table = of_match_clk_mt7986_apmixed, }, }; builtin_platform_driver(clk_mt7986_apmixed_drv); MODULE_DESCRIPTION("MediaTek MT7986 apmixedsys clocks driver"); MODULE_LICENSE("GPL");
// SPDX-License-Identifier: GPL-2.0 /****************************************************************************/ /* * linux/fs/binfmt_flat.c * * Copyright (C) 2000-2003 David McCullough <[email protected]> * Copyright (C) 2002 Greg Ungerer <[email protected]> * Copyright (C) 2002 SnapGear, by Paul Dale <[email protected]> * Copyright (C) 2000, 2001 Lineo, by David McCullough <[email protected]> * based heavily on: * * linux/fs/binfmt_aout.c: * Copyright (C) 1991, 1992, 1996 Linus Torvalds * linux/fs/binfmt_flat.c for 2.0 kernel * Copyright (C) 1998 Kenneth Albanowski <[email protected]> * JAN/99 -- coded full program relocation ([email protected]) */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/sched.h> #include <linux/sched/task_stack.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/errno.h> #include <linux/signal.h> #include <linux/string.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/ptrace.h> #include <linux/user.h> #include <linux/slab.h> #include <linux/binfmts.h> #include <linux/personality.h> #include <linux/init.h> #include <linux/flat.h> #include <linux/uaccess.h> #include <linux/vmalloc.h> #include <asm/byteorder.h> #include <linux/unaligned.h> #include <asm/cacheflush.h> #include <asm/page.h> #include <asm/flat.h> #ifndef flat_get_relocate_addr #define flat_get_relocate_addr(rel) (rel) #endif /****************************************************************************/ /* * User data (data section and bss) needs to be aligned. * We pick 0x20 here because it is the max value elf2flt has always * used in producing FLAT files, and because it seems to be large * enough to make all the gcc alignment related tests happy. */ #define FLAT_DATA_ALIGN (0x20) /* * User data (stack) also needs to be aligned. * Here we can be a bit looser than the data sections since this * needs to only meet arch ABI requirements. */ #define FLAT_STACK_ALIGN max_t(unsigned long, sizeof(void *), ARCH_SLAB_MINALIGN) #define RELOC_FAILED 0xff00ff01 /* Relocation incorrect somewhere */ #define UNLOADED_LIB 0x7ff000ff /* Placeholder for unused library */ #define MAX_SHARED_LIBS (1) #ifdef CONFIG_BINFMT_FLAT_NO_DATA_START_OFFSET #define DATA_START_OFFSET_WORDS (0) #define MAX_SHARED_LIBS_UPDATE (0) #else #define DATA_START_OFFSET_WORDS (MAX_SHARED_LIBS) #define MAX_SHARED_LIBS_UPDATE (MAX_SHARED_LIBS) #endif struct lib_info { struct { unsigned long start_code; /* Start of text segment */ unsigned long start_data; /* Start of data segment */ unsigned long start_brk; /* End of data segment */ unsigned long text_len; /* Length of text segment */ unsigned long entry; /* Start address for this module */ unsigned long build_date; /* When this one was compiled */ bool loaded; /* Has this library been loaded? */ } lib_list[MAX_SHARED_LIBS]; }; static int load_flat_binary(struct linux_binprm *); static struct linux_binfmt flat_format = { .module = THIS_MODULE, .load_binary = load_flat_binary, }; /****************************************************************************/ /* * create_flat_tables() parses the env- and arg-strings in new user * memory and creates the pointer tables from them, and puts their * addresses on the "stack", recording the new stack pointer value. */ static int create_flat_tables(struct linux_binprm *bprm, unsigned long arg_start) { char __user *p; unsigned long __user *sp; long i, len; p = (char __user *)arg_start; sp = (unsigned long __user *)current->mm->start_stack; sp -= bprm->envc + 1; sp -= bprm->argc + 1; if (IS_ENABLED(CONFIG_BINFMT_FLAT_ARGVP_ENVP_ON_STACK)) sp -= 2; /* argvp + envp */ sp -= 1; /* &argc */ current->mm->start_stack = (unsigned long)sp & -FLAT_STACK_ALIGN; sp = (unsigned long __user *)current->mm->start_stack; if (put_user(bprm->argc, sp++)) return -EFAULT; if (IS_ENABLED(CONFIG_BINFMT_FLAT_ARGVP_ENVP_ON_STACK)) { unsigned long argv, envp; argv = (unsigned long)(sp + 2); envp = (unsigned long)(sp + 2 + bprm->argc + 1); if (put_user(argv, sp++) || put_user(envp, sp++)) return -EFAULT; } current->mm->arg_start = (unsigned long)p; for (i = bprm->argc; i > 0; i--) { if (put_user((unsigned long)p, sp++)) return -EFAULT; len = strnlen_user(p, MAX_ARG_STRLEN); if (!len || len > MAX_ARG_STRLEN) return -EINVAL; p += len; } if (put_user(0, sp++)) return -EFAULT; current->mm->arg_end = (unsigned long)p; current->mm->env_start = (unsigned long) p; for (i = bprm->envc; i > 0; i--) { if (put_user((unsigned long)p, sp++)) return -EFAULT; len = strnlen_user(p, MAX_ARG_STRLEN); if (!len || len > MAX_ARG_STRLEN) return -EINVAL; p += len; } if (put_user(0, sp++)) return -EFAULT; current->mm->env_end = (unsigned long)p; return 0; } /****************************************************************************/ #ifdef CONFIG_BINFMT_ZFLAT #include <linux/zlib.h> #define LBUFSIZE 4000 /* gzip flag byte */ #define ASCII_FLAG 0x01 /* bit 0 set: file probably ASCII text */ #define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */ #define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */ #define ORIG_NAME 0x08 /* bit 3 set: original file name present */ #define COMMENT 0x10 /* bit 4 set: file comment present */ #define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */ #define RESERVED 0xC0 /* bit 6,7: reserved */ static int decompress_exec(struct linux_binprm *bprm, loff_t fpos, char *dst, long len, int fd) { unsigned char *buf; z_stream strm; int ret, retval; pr_debug("decompress_exec(offset=%llx,buf=%p,len=%lx)\n", fpos, dst, len); memset(&strm, 0, sizeof(strm)); strm.workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL); if (!strm.workspace) return -ENOMEM; buf = kmalloc(LBUFSIZE, GFP_KERNEL); if (!buf) { retval = -ENOMEM; goto out_free; } /* Read in first chunk of data and parse gzip header. */ ret = kernel_read(bprm->file, buf, LBUFSIZE, &fpos); strm.next_in = buf; strm.avail_in = ret; strm.total_in = 0; retval = -ENOEXEC; /* Check minimum size -- gzip header */ if (ret < 10) { pr_debug("file too small?\n"); goto out_free_buf; } /* Check gzip magic number */ if ((buf[0] != 037) || ((buf[1] != 0213) && (buf[1] != 0236))) { pr_debug("unknown compression magic?\n"); goto out_free_buf; } /* Check gzip method */ if (buf[2] != 8) { pr_debug("unknown compression method?\n"); goto out_free_buf; } /* Check gzip flags */ if ((buf[3] & ENCRYPTED) || (buf[3] & CONTINUATION) || (buf[3] & RESERVED)) { pr_debug("unknown flags?\n"); goto out_free_buf; } ret = 10; if (buf[3] & EXTRA_FIELD) { ret += 2 + buf[10] + (buf[11] << 8); if (unlikely(ret >= LBUFSIZE)) { pr_debug("buffer overflow (EXTRA)?\n"); goto out_free_buf; } } if (buf[3] & ORIG_NAME) { while (ret < LBUFSIZE && buf[ret++] != 0) ; if (unlikely(ret == LBUFSIZE)) { pr_debug("buffer overflow (ORIG_NAME)?\n"); goto out_free_buf; } } if (buf[3] & COMMENT) { while (ret < LBUFSIZE && buf[ret++] != 0) ; if (unlikely(ret == LBUFSIZE)) { pr_debug("buffer overflow (COMMENT)?\n"); goto out_free_buf; } } strm.next_in += ret; strm.avail_in -= ret; strm.next_out = dst; strm.avail_out = len; strm.total_out = 0; if (zlib_inflateInit2(&strm, -MAX_WBITS) != Z_OK) { pr_debug("zlib init failed?\n"); goto out_free_buf; } while ((ret = zlib_inflate(&strm, Z_NO_FLUSH)) == Z_OK) { ret = kernel_read(bprm->file, buf, LBUFSIZE, &fpos); if (ret <= 0) break; len -= ret; strm.next_in = buf; strm.avail_in = ret; strm.total_in = 0; } if (ret < 0) { pr_debug("decompression failed (%d), %s\n", ret, strm.msg); goto out_zlib; } retval = 0; out_zlib: zlib_inflateEnd(&strm); out_free_buf: kfree(buf); out_free: kfree(strm.workspace); return retval; } #endif /* CONFIG_BINFMT_ZFLAT */ /****************************************************************************/ static unsigned long calc_reloc(unsigned long r, struct lib_info *p) { unsigned long addr; unsigned long start_brk; unsigned long start_data; unsigned long text_len; unsigned long start_code; start_brk = p->lib_list[0].start_brk; start_data = p->lib_list[0].start_data; start_code = p->lib_list[0].start_code; text_len = p->lib_list[0].text_len; if (r > start_brk - start_data + text_len) { pr_err("reloc outside program 0x%lx (0 - 0x%lx/0x%lx)", r, start_brk-start_data+text_len, text_len); goto failed; } if (r < text_len) /* In text segment */ addr = r + start_code; else /* In data segment */ addr = r - text_len + start_data; /* Range checked already above so doing the range tests is redundant...*/ return addr; failed: pr_cont(", killing %s!\n", current->comm); send_sig(SIGSEGV, current, 0); return RELOC_FAILED; } /****************************************************************************/ #ifdef CONFIG_BINFMT_FLAT_OLD static void old_reloc(unsigned long rl) { static const char *segment[] = { "TEXT", "DATA", "BSS", "*UNKNOWN*" }; flat_v2_reloc_t r; unsigned long __user *ptr; unsigned long val; r.value = rl; #if defined(CONFIG_COLDFIRE) ptr = (unsigned long __user *)(current->mm->start_code + r.reloc.offset); #else ptr = (unsigned long __user *)(current->mm->start_data + r.reloc.offset); #endif get_user(val, ptr); pr_debug("Relocation of variable at DATASEG+%x " "(address %p, currently %lx) into segment %s\n", r.reloc.offset, ptr, val, segment[r.reloc.type]); switch (r.reloc.type) { case OLD_FLAT_RELOC_TYPE_TEXT: val += current->mm->start_code; break; case OLD_FLAT_RELOC_TYPE_DATA: val += current->mm->start_data; break; case OLD_FLAT_RELOC_TYPE_BSS: val += current->mm->end_data; break; default: pr_err("Unknown relocation type=%x\n", r.reloc.type); break; } put_user(val, ptr); pr_debug("Relocation became %lx\n", val); } #endif /* CONFIG_BINFMT_FLAT_OLD */ /****************************************************************************/ static inline u32 __user *skip_got_header(u32 __user *rp) { if (IS_ENABLED(CONFIG_RISCV)) { /* * RISC-V has a 16 byte GOT PLT header for elf64-riscv * and 8 byte GOT PLT header for elf32-riscv. * Skip the whole GOT PLT header, since it is reserved * for the dynamic linker (ld.so). */ u32 rp_val0, rp_val1; if (get_user(rp_val0, rp)) return rp; if (get_user(rp_val1, rp + 1)) return rp; if (rp_val0 == 0xffffffff && rp_val1 == 0xffffffff) rp += 4; else if (rp_val0 == 0xffffffff) rp += 2; } return rp; } static int load_flat_file(struct linux_binprm *bprm, struct lib_info *libinfo, unsigned long *extra_stack) { struct flat_hdr *hdr; unsigned long textpos, datapos, realdatastart; u32 text_len, data_len, bss_len, stack_len, full_data, flags; unsigned long len, memp, memp_size, extra, rlim; __be32 __user *reloc; u32 __user *rp; int i, rev, relocs; loff_t fpos; unsigned long start_code, end_code; ssize_t result; int ret; hdr = ((struct flat_hdr *) bprm->buf); /* exec-header */ text_len = ntohl(hdr->data_start); data_len = ntohl(hdr->data_end) - ntohl(hdr->data_start); bss_len = ntohl(hdr->bss_end) - ntohl(hdr->data_end); stack_len = ntohl(hdr->stack_size); if (extra_stack) { stack_len += *extra_stack; *extra_stack = stack_len; } relocs = ntohl(hdr->reloc_count); flags = ntohl(hdr->flags); rev = ntohl(hdr->rev); full_data = data_len + relocs * sizeof(unsigned long); if (strncmp(hdr->magic, "bFLT", 4)) { /* * Previously, here was a printk to tell people * "BINFMT_FLAT: bad header magic". * But for the kernel which also use ELF FD-PIC format, this * error message is confusing. * because a lot of people do not manage to produce good */ ret = -ENOEXEC; goto err; } if (flags & FLAT_FLAG_KTRACE) pr_info("Loading file: %s\n", bprm->filename); #ifdef CONFIG_BINFMT_FLAT_OLD if (rev != FLAT_VERSION && rev != OLD_FLAT_VERSION) { pr_err("bad flat file version 0x%x (supported 0x%lx and 0x%lx)\n", rev, FLAT_VERSION, OLD_FLAT_VERSION); ret = -ENOEXEC; goto err; } /* * fix up the flags for the older format, there were all kinds * of endian hacks, this only works for the simple cases */ if (rev == OLD_FLAT_VERSION && (flags || IS_ENABLED(CONFIG_BINFMT_FLAT_OLD_ALWAYS_RAM))) flags = FLAT_FLAG_RAM; #else /* CONFIG_BINFMT_FLAT_OLD */ if (rev != FLAT_VERSION) { pr_err("bad flat file version 0x%x (supported 0x%lx)\n", rev, FLAT_VERSION); ret = -ENOEXEC; goto err; } #endif /* !CONFIG_BINFMT_FLAT_OLD */ /* * Make sure the header params are sane. * 28 bits (256 MB) is way more than reasonable in this case. * If some top bits are set we have probable binary corruption. */ if ((text_len | data_len | bss_len | stack_len | full_data) >> 28) { pr_err("bad header\n"); ret = -ENOEXEC; goto err; } #ifndef CONFIG_BINFMT_ZFLAT if (flags & (FLAT_FLAG_GZIP|FLAT_FLAG_GZDATA)) { pr_err("Support for ZFLAT executables is not enabled.\n"); ret = -ENOEXEC; goto err; } #endif /* * Check initial limits. This avoids letting people circumvent * size limits imposed on them by creating programs with large * arrays in the data or bss. */ rlim = rlimit(RLIMIT_DATA); if (rlim >= RLIM_INFINITY) rlim = ~0; if (data_len + bss_len > rlim) { ret = -ENOMEM; goto err; } /* Flush all traces of the currently running executable */ ret = begin_new_exec(bprm); if (ret) goto err; /* OK, This is the point of no return */ set_personality(PER_LINUX_32BIT); setup_new_exec(bprm); /* * calculate the extra space we need to map in */ extra = max_t(unsigned long, bss_len + stack_len, relocs * sizeof(unsigned long)); /* * there are a couple of cases here, the separate code/data * case, and then the fully copied to RAM case which lumps * it all together. */ if (!IS_ENABLED(CONFIG_MMU) && !(flags & (FLAT_FLAG_RAM|FLAT_FLAG_GZIP))) { /* * this should give us a ROM ptr, but if it doesn't we don't * really care */ pr_debug("ROM mapping of file (we hope)\n"); textpos = vm_mmap(bprm->file, 0, text_len, PROT_READ|PROT_EXEC, MAP_PRIVATE, 0); if (!textpos || IS_ERR_VALUE(textpos)) { ret = textpos; if (!textpos) ret = -ENOMEM; pr_err("Unable to mmap process text, errno %d\n", ret); goto err; } len = data_len + extra + DATA_START_OFFSET_WORDS * sizeof(unsigned long); len = PAGE_ALIGN(len); realdatastart = vm_mmap(NULL, 0, len, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE, 0); if (realdatastart == 0 || IS_ERR_VALUE(realdatastart)) { ret = realdatastart; if (!realdatastart) ret = -ENOMEM; pr_err("Unable to allocate RAM for process data, " "errno %d\n", ret); vm_munmap(textpos, text_len); goto err; } datapos = ALIGN(realdatastart + DATA_START_OFFSET_WORDS * sizeof(unsigned long), FLAT_DATA_ALIGN); pr_debug("Allocated data+bss+stack (%u bytes): %lx\n", data_len + bss_len + stack_len, datapos); fpos = ntohl(hdr->data_start); #ifdef CONFIG_BINFMT_ZFLAT if (flags & FLAT_FLAG_GZDATA) { result = decompress_exec(bprm, fpos, (char *)datapos, full_data, 0); } else #endif { result = read_code(bprm->file, datapos, fpos, full_data); } if (IS_ERR_VALUE(result)) { ret = result; pr_err("Unable to read data+bss, errno %d\n", ret); vm_munmap(textpos, text_len); vm_munmap(realdatastart, len); goto err; } reloc = (__be32 __user *) (datapos + (ntohl(hdr->reloc_start) - text_len)); memp = realdatastart; memp_size = len; } else { len = text_len + data_len + extra + DATA_START_OFFSET_WORDS * sizeof(u32); len = PAGE_ALIGN(len); textpos = vm_mmap(NULL, 0, len, PROT_READ | PROT_EXEC | PROT_WRITE, MAP_PRIVATE, 0); if (!textpos || IS_ERR_VALUE(textpos)) { ret = textpos; if (!textpos) ret = -ENOMEM; pr_err("Unable to allocate RAM for process text/data, " "errno %d\n", ret); goto err; } realdatastart = textpos + ntohl(hdr->data_start); datapos = ALIGN(realdatastart + DATA_START_OFFSET_WORDS * sizeof(u32), FLAT_DATA_ALIGN); reloc = (__be32 __user *) (datapos + (ntohl(hdr->reloc_start) - text_len)); memp = textpos; memp_size = len; #ifdef CONFIG_BINFMT_ZFLAT /* * load it all in and treat it like a RAM load from now on */ if (flags & FLAT_FLAG_GZIP) { #ifndef CONFIG_MMU result = decompress_exec(bprm, sizeof(struct flat_hdr), (((char *)textpos) + sizeof(struct flat_hdr)), (text_len + full_data - sizeof(struct flat_hdr)), 0); memmove((void *) datapos, (void *) realdatastart, full_data); #else /* * This is used on MMU systems mainly for testing. * Let's use a kernel buffer to simplify things. */ long unz_text_len = text_len - sizeof(struct flat_hdr); long unz_len = unz_text_len + full_data; char *unz_data = vmalloc(unz_len); if (!unz_data) { result = -ENOMEM; } else { result = decompress_exec(bprm, sizeof(struct flat_hdr), unz_data, unz_len, 0); if (result == 0 && (copy_to_user((void __user *)textpos + sizeof(struct flat_hdr), unz_data, unz_text_len) || copy_to_user((void __user *)datapos, unz_data + unz_text_len, full_data))) result = -EFAULT; vfree(unz_data); } #endif } else if (flags & FLAT_FLAG_GZDATA) { result = read_code(bprm->file, textpos, 0, text_len); if (!IS_ERR_VALUE(result)) { #ifndef CONFIG_MMU result = decompress_exec(bprm, text_len, (char *) datapos, full_data, 0); #else char *unz_data = vmalloc(full_data); if (!unz_data) { result = -ENOMEM; } else { result = decompress_exec(bprm, text_len, unz_data, full_data, 0); if (result == 0 && copy_to_user((void __user *)datapos, unz_data, full_data)) result = -EFAULT; vfree(unz_data); } #endif } } else #endif /* CONFIG_BINFMT_ZFLAT */ { result = read_code(bprm->file, textpos, 0, text_len); if (!IS_ERR_VALUE(result)) result = read_code(bprm->file, datapos, ntohl(hdr->data_start), full_data); } if (IS_ERR_VALUE(result)) { ret = result; pr_err("Unable to read code+data+bss, errno %d\n", ret); vm_munmap(textpos, text_len + data_len + extra + DATA_START_OFFSET_WORDS * sizeof(u32)); goto err; } } start_code = textpos + sizeof(struct flat_hdr); end_code = textpos + text_len; text_len -= sizeof(struct flat_hdr); /* the real code len */ /* The main program needs a little extra setup in the task structure */ current->mm->start_code = start_code; current->mm->end_code = end_code; current->mm->start_data = datapos; current->mm->end_data = datapos + data_len; /* * set up the brk stuff, uses any slack left in data/bss/stack * allocation. We put the brk after the bss (between the bss * and stack) like other platforms. * Userspace code relies on the stack pointer starting out at * an address right at the end of a page. */ current->mm->start_brk = datapos + data_len + bss_len; current->mm->brk = (current->mm->start_brk + 3) & ~3; #ifndef CONFIG_MMU current->mm->context.end_brk = memp + memp_size - stack_len; #endif if (flags & FLAT_FLAG_KTRACE) { pr_info("Mapping is %lx, Entry point is %x, data_start is %x\n", textpos, 0x00ffffff&ntohl(hdr->entry), ntohl(hdr->data_start)); pr_info("%s %s: TEXT=%lx-%lx DATA=%lx-%lx BSS=%lx-%lx\n", "Load", bprm->filename, start_code, end_code, datapos, datapos + data_len, datapos + data_len, (datapos + data_len + bss_len + 3) & ~3); } /* Store the current module values into the global library structure */ libinfo->lib_list[0].start_code = start_code; libinfo->lib_list[0].start_data = datapos; libinfo->lib_list[0].start_brk = datapos + data_len + bss_len; libinfo->lib_list[0].text_len = text_len; libinfo->lib_list[0].loaded = 1; libinfo->lib_list[0].entry = (0x00ffffff & ntohl(hdr->entry)) + textpos; libinfo->lib_list[0].build_date = ntohl(hdr->build_date); /* * We just load the allocations into some temporary memory to * help simplify all this mumbo jumbo * * We've got two different sections of relocation entries. * The first is the GOT which resides at the beginning of the data segment * and is terminated with a -1. This one can be relocated in place. * The second is the extra relocation entries tacked after the image's * data segment. These require a little more processing as the entry is * really an offset into the image which contains an offset into the * image. */ if (flags & FLAT_FLAG_GOTPIC) { rp = skip_got_header((u32 __user *) datapos); for (; ; rp++) { u32 addr, rp_val; if (get_user(rp_val, rp)) return -EFAULT; if (rp_val == 0xffffffff) break; if (rp_val) { addr = calc_reloc(rp_val, libinfo); if (addr == RELOC_FAILED) { ret = -ENOEXEC; goto err; } if (put_user(addr, rp)) return -EFAULT; } } } /* * Now run through the relocation entries. * We've got to be careful here as C++ produces relocatable zero * entries in the constructor and destructor tables which are then * tested for being not zero (which will always occur unless we're * based from address zero). This causes an endless loop as __start * is at zero. The solution used is to not relocate zero addresses. * This has the negative side effect of not allowing a global data * reference to be statically initialised to _stext (I've moved * __start to address 4 so that is okay). */ if (rev > OLD_FLAT_VERSION) { for (i = 0; i < relocs; i++) { u32 addr, relval; __be32 tmp; /* * Get the address of the pointer to be * relocated (of course, the address has to be * relocated first). */ if (get_user(tmp, reloc + i)) return -EFAULT; relval = ntohl(tmp); addr = flat_get_relocate_addr(relval); rp = (u32 __user *)calc_reloc(addr, libinfo); if (rp == (u32 __user *)RELOC_FAILED) { ret = -ENOEXEC; goto err; } /* Get the pointer's value. */ ret = flat_get_addr_from_rp(rp, relval, flags, &addr); if (unlikely(ret)) goto err; if (addr != 0) { /* * Do the relocation. PIC relocs in the data section are * already in target order */ if ((flags & FLAT_FLAG_GOTPIC) == 0) { /* * Meh, the same value can have a different * byte order based on a flag.. */ addr = ntohl((__force __be32)addr); } addr = calc_reloc(addr, libinfo); if (addr == RELOC_FAILED) { ret = -ENOEXEC; goto err; } /* Write back the relocated pointer. */ ret = flat_put_addr_at_rp(rp, addr, relval); if (unlikely(ret)) goto err; } } #ifdef CONFIG_BINFMT_FLAT_OLD } else { for (i = 0; i < relocs; i++) { __be32 relval; if (get_user(relval, reloc + i)) return -EFAULT; old_reloc(ntohl(relval)); } #endif /* CONFIG_BINFMT_FLAT_OLD */ } flush_icache_user_range(start_code, end_code); /* zero the BSS, BRK and stack areas */ if (clear_user((void __user *)(datapos + data_len), bss_len + (memp + memp_size - stack_len - /* end brk */ libinfo->lib_list[0].start_brk) + /* start brk */ stack_len)) return -EFAULT; return 0; err: return ret; } /****************************************************************************/ /* * These are the functions used to load flat style executables and shared * libraries. There is no binary dependent code anywhere else. */ static int load_flat_binary(struct linux_binprm *bprm) { struct lib_info libinfo; struct pt_regs *regs = current_pt_regs(); unsigned long stack_len = 0; unsigned long start_addr; int res; int i, j; memset(&libinfo, 0, sizeof(libinfo)); /* * We have to add the size of our arguments to our stack size * otherwise it's too easy for users to create stack overflows * by passing in a huge argument list. And yes, we have to be * pedantic and include space for the argv/envp array as it may have * a lot of entries. */ #ifndef CONFIG_MMU stack_len += PAGE_SIZE * MAX_ARG_PAGES - bprm->p; /* the strings */ #endif stack_len += (bprm->argc + 1) * sizeof(char *); /* the argv array */ stack_len += (bprm->envc + 1) * sizeof(char *); /* the envp array */ stack_len = ALIGN(stack_len, FLAT_STACK_ALIGN); res = load_flat_file(bprm, &libinfo, &stack_len); if (res < 0) return res; /* Update data segment pointers for all libraries */ for (i = 0; i < MAX_SHARED_LIBS_UPDATE; i++) { if (!libinfo.lib_list[i].loaded) continue; for (j = 0; j < MAX_SHARED_LIBS; j++) { unsigned long val = libinfo.lib_list[j].loaded ? libinfo.lib_list[j].start_data : UNLOADED_LIB; unsigned long __user *p = (unsigned long __user *) libinfo.lib_list[i].start_data; p -= j + 1; if (put_user(val, p)) return -EFAULT; } } set_binfmt(&flat_format); #ifdef CONFIG_MMU res = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT); if (!res) res = create_flat_tables(bprm, bprm->p); #else /* Stash our initial stack pointer into the mm structure */ current->mm->start_stack = ((current->mm->context.end_brk + stack_len + 3) & ~3) - 4; pr_debug("sp=%lx\n", current->mm->start_stack); /* copy the arg pages onto the stack */ res = transfer_args_to_stack(bprm, &current->mm->start_stack); if (!res) res = create_flat_tables(bprm, current->mm->start_stack); #endif if (res) return res; /* Fake some return addresses to ensure the call chain will * initialise library in order for us. We are required to call * lib 1 first, then 2, ... and finally the main program (id 0). */ start_addr = libinfo.lib_list[0].entry; #ifdef FLAT_PLAT_INIT FLAT_PLAT_INIT(regs); #endif finalize_exec(bprm); pr_debug("start_thread(regs=0x%p, entry=0x%lx, start_stack=0x%lx)\n", regs, start_addr, current->mm->start_stack); start_thread(regs, start_addr, current->mm->start_stack); return 0; } /****************************************************************************/ static int __init init_flat_binfmt(void) { register_binfmt(&flat_format); return 0; } core_initcall(init_flat_binfmt); /****************************************************************************/
// SPDX-License-Identifier: GPL-2.0 /* * PCI Backend Operations - respond to PCI requests from Frontend * * Author: Ryan Wilson <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define dev_fmt pr_fmt #include <linux/moduleparam.h> #include <linux/wait.h> #include <linux/bitops.h> #include <xen/events.h> #include <linux/sched.h> #include "pciback.h" static irqreturn_t xen_pcibk_guest_interrupt(int irq, void *dev_id); /* Ensure a device is has the fake IRQ handler "turned on/off" and is * ready to be exported. This MUST be run after xen_pcibk_reset_device * which does the actual PCI device enable/disable. */ static void xen_pcibk_control_isr(struct pci_dev *dev, int reset) { struct xen_pcibk_dev_data *dev_data; int rc; int enable = 0; dev_data = pci_get_drvdata(dev); if (!dev_data) return; /* We don't deal with bridges */ if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL) return; if (reset) { dev_data->enable_intx = 0; dev_data->ack_intr = 0; } enable = dev_data->enable_intx; /* Asked to disable, but ISR isn't runnig */ if (!enable && !dev_data->isr_on) return; /* Squirrel away the IRQs in the dev_data. We need this * b/c when device transitions to MSI, the dev->irq is * overwritten with the MSI vector. */ if (enable) dev_data->irq = dev->irq; /* * SR-IOV devices in all use MSI-X and have no legacy * interrupts, so inhibit creating a fake IRQ handler for them. */ if (dev_data->irq == 0) goto out; dev_dbg(&dev->dev, "%s: #%d %s %s%s %s-> %s\n", dev_data->irq_name, dev_data->irq, pci_is_enabled(dev) ? "on" : "off", dev->msi_enabled ? "MSI" : "", dev->msix_enabled ? "MSI/X" : "", dev_data->isr_on ? "enable" : "disable", enable ? "enable" : "disable"); if (enable) { /* * The MSI or MSI-X should not have an IRQ handler. Otherwise * if the guest terminates we BUG_ON in free_msi_irqs. */ if (dev->msi_enabled || dev->msix_enabled) goto out; rc = request_irq(dev_data->irq, xen_pcibk_guest_interrupt, IRQF_SHARED, dev_data->irq_name, dev); if (rc) { dev_err(&dev->dev, "%s: failed to install fake IRQ " \ "handler for IRQ %d! (rc:%d)\n", dev_data->irq_name, dev_data->irq, rc); goto out; } } else { free_irq(dev_data->irq, dev); dev_data->irq = 0; } dev_data->isr_on = enable; dev_data->ack_intr = enable; out: dev_dbg(&dev->dev, "%s: #%d %s %s%s %s\n", dev_data->irq_name, dev_data->irq, pci_is_enabled(dev) ? "on" : "off", dev->msi_enabled ? "MSI" : "", dev->msix_enabled ? "MSI/X" : "", enable ? (dev_data->isr_on ? "enabled" : "failed to enable") : (dev_data->isr_on ? "failed to disable" : "disabled")); } /* Ensure a device is "turned off" and ready to be exported. * (Also see xen_pcibk_config_reset to ensure virtual configuration space is * ready to be re-exported) */ void xen_pcibk_reset_device(struct pci_dev *dev) { u16 cmd; xen_pcibk_control_isr(dev, 1 /* reset device */); /* Disable devices (but not bridges) */ if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) { #ifdef CONFIG_PCI_MSI /* The guest could have been abruptly killed without * disabling MSI/MSI-X interrupts.*/ if (dev->msix_enabled) pci_disable_msix(dev); if (dev->msi_enabled) pci_disable_msi(dev); #endif if (pci_is_enabled(dev)) pci_disable_device(dev); dev->is_busmaster = 0; } else { pci_read_config_word(dev, PCI_COMMAND, &cmd); if (cmd & (PCI_COMMAND_INVALIDATE)) { cmd &= ~(PCI_COMMAND_INVALIDATE); pci_write_config_word(dev, PCI_COMMAND, cmd); dev->is_busmaster = 0; } } } #ifdef CONFIG_PCI_MSI static int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev, struct pci_dev *dev, struct xen_pci_op *op) { struct xen_pcibk_dev_data *dev_data; int status; if (dev->msi_enabled) status = -EALREADY; else if (dev->msix_enabled) status = -ENXIO; else status = pci_enable_msi(dev); if (status) { dev_warn_ratelimited(&dev->dev, "error enabling MSI for guest %u: err %d\n", pdev->xdev->otherend_id, status); op->value = 0; return XEN_PCI_ERR_op_failed; } /* The value the guest needs is actually the IDT vector, not * the local domain's IRQ number. */ op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0; dev_dbg(&dev->dev, "MSI: %d\n", op->value); dev_data = pci_get_drvdata(dev); if (dev_data) dev_data->ack_intr = 0; return 0; } static int xen_pcibk_disable_msi(struct xen_pcibk_device *pdev, struct pci_dev *dev, struct xen_pci_op *op) { if (dev->msi_enabled) { struct xen_pcibk_dev_data *dev_data; pci_disable_msi(dev); dev_data = pci_get_drvdata(dev); if (dev_data) dev_data->ack_intr = 1; } op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0; dev_dbg(&dev->dev, "MSI: %d\n", op->value); return 0; } static int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev, struct pci_dev *dev, struct xen_pci_op *op) { struct xen_pcibk_dev_data *dev_data; int i, result; struct msix_entry *entries; u16 cmd; dev_dbg(&dev->dev, "enable MSI-X\n"); if (op->value > SH_INFO_MAX_VEC) return -EINVAL; if (dev->msix_enabled) return -EALREADY; /* * PCI_COMMAND_MEMORY must be enabled, otherwise we may not be able * to access the BARs where the MSI-X entries reside. * But VF devices are unique in which the PF needs to be checked. */ pci_read_config_word(pci_physfn(dev), PCI_COMMAND, &cmd); if (dev->msi_enabled || !(cmd & PCI_COMMAND_MEMORY)) return -ENXIO; entries = kmalloc_array(op->value, sizeof(*entries), GFP_KERNEL); if (entries == NULL) return -ENOMEM; for (i = 0; i < op->value; i++) { entries[i].entry = op->msix_entries[i].entry; entries[i].vector = op->msix_entries[i].vector; } result = pci_enable_msix_exact(dev, entries, op->value); if (result == 0) { for (i = 0; i < op->value; i++) { op->msix_entries[i].entry = entries[i].entry; if (entries[i].vector) { op->msix_entries[i].vector = xen_pirq_from_irq(entries[i].vector); dev_dbg(&dev->dev, "MSI-X[%d]: %d\n", i, op->msix_entries[i].vector); } } } else dev_warn_ratelimited(&dev->dev, "error enabling MSI-X for guest %u: err %d!\n", pdev->xdev->otherend_id, result); kfree(entries); op->value = result; dev_data = pci_get_drvdata(dev); if (dev_data) dev_data->ack_intr = 0; return result > 0 ? 0 : result; } static int xen_pcibk_disable_msix(struct xen_pcibk_device *pdev, struct pci_dev *dev, struct xen_pci_op *op) { if (dev->msix_enabled) { struct xen_pcibk_dev_data *dev_data; pci_disable_msix(dev); dev_data = pci_get_drvdata(dev); if (dev_data) dev_data->ack_intr = 1; } /* * SR-IOV devices (which don't have any legacy IRQ) have * an undefined IRQ value of zero. */ op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0; dev_dbg(&dev->dev, "MSI-X: %d\n", op->value); return 0; } #endif static inline bool xen_pcibk_test_op_pending(struct xen_pcibk_device *pdev) { return test_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags) && !test_and_set_bit(_PDEVF_op_active, &pdev->flags); } /* * Now the same evtchn is used for both pcifront conf_read_write request * as well as pcie aer front end ack. We use a new work_queue to schedule * xen_pcibk conf_read_write service for avoiding confict with aer_core * do_recovery job which also use the system default work_queue */ static void xen_pcibk_test_and_schedule_op(struct xen_pcibk_device *pdev) { bool eoi = true; /* Check that frontend is requesting an operation and that we are not * already processing a request */ if (xen_pcibk_test_op_pending(pdev)) { schedule_work(&pdev->op_work); eoi = false; } /*_XEN_PCIB_active should have been cleared by pcifront. And also make sure xen_pcibk is waiting for ack by checking _PCIB_op_pending*/ if (!test_bit(_XEN_PCIB_active, (unsigned long *)&pdev->sh_info->flags) && test_bit(_PCIB_op_pending, &pdev->flags)) { wake_up(&xen_pcibk_aer_wait_queue); eoi = false; } /* EOI if there was nothing to do. */ if (eoi) xen_pcibk_lateeoi(pdev, XEN_EOI_FLAG_SPURIOUS); } /* Performing the configuration space reads/writes must not be done in atomic * context because some of the pci_* functions can sleep (mostly due to ACPI * use of semaphores). This function is intended to be called from a work * queue in process context taking a struct xen_pcibk_device as a parameter */ static void xen_pcibk_do_one_op(struct xen_pcibk_device *pdev) { struct pci_dev *dev; struct xen_pcibk_dev_data *dev_data = NULL; struct xen_pci_op *op = &pdev->op; int test_intx = 0; #ifdef CONFIG_PCI_MSI unsigned int nr = 0; #endif *op = pdev->sh_info->op; barrier(); dev = xen_pcibk_get_pci_dev(pdev, op->domain, op->bus, op->devfn); if (dev == NULL) op->err = XEN_PCI_ERR_dev_not_found; else { dev_data = pci_get_drvdata(dev); if (dev_data) test_intx = dev_data->enable_intx; switch (op->cmd) { case XEN_PCI_OP_conf_read: op->err = xen_pcibk_config_read(dev, op->offset, op->size, &op->value); break; case XEN_PCI_OP_conf_write: op->err = xen_pcibk_config_write(dev, op->offset, op->size, op->value); break; #ifdef CONFIG_PCI_MSI case XEN_PCI_OP_enable_msi: op->err = xen_pcibk_enable_msi(pdev, dev, op); break; case XEN_PCI_OP_disable_msi: op->err = xen_pcibk_disable_msi(pdev, dev, op); break; case XEN_PCI_OP_enable_msix: nr = op->value; op->err = xen_pcibk_enable_msix(pdev, dev, op); break; case XEN_PCI_OP_disable_msix: op->err = xen_pcibk_disable_msix(pdev, dev, op); break; #endif default: op->err = XEN_PCI_ERR_not_implemented; break; } } if (!op->err && dev && dev_data) { /* Transition detected */ if ((dev_data->enable_intx != test_intx)) xen_pcibk_control_isr(dev, 0 /* no reset */); } pdev->sh_info->op.err = op->err; pdev->sh_info->op.value = op->value; #ifdef CONFIG_PCI_MSI if (op->cmd == XEN_PCI_OP_enable_msix && op->err == 0) { unsigned int i; for (i = 0; i < nr; i++) pdev->sh_info->op.msix_entries[i].vector = op->msix_entries[i].vector; } #endif /* Tell the driver domain that we're done. */ wmb(); clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags); notify_remote_via_irq(pdev->evtchn_irq); /* Mark that we're done. */ smp_mb__before_atomic(); /* /after/ clearing PCIF_active */ clear_bit(_PDEVF_op_active, &pdev->flags); smp_mb__after_atomic(); /* /before/ final check for work */ } void xen_pcibk_do_op(struct work_struct *data) { struct xen_pcibk_device *pdev = container_of(data, struct xen_pcibk_device, op_work); do { xen_pcibk_do_one_op(pdev); } while (xen_pcibk_test_op_pending(pdev)); xen_pcibk_lateeoi(pdev, 0); } irqreturn_t xen_pcibk_handle_event(int irq, void *dev_id) { struct xen_pcibk_device *pdev = dev_id; bool eoi; /* IRQs might come in before pdev->evtchn_irq is written. */ if (unlikely(pdev->evtchn_irq != irq)) pdev->evtchn_irq = irq; eoi = test_and_set_bit(_EOI_pending, &pdev->flags); WARN(eoi, "IRQ while EOI pending\n"); xen_pcibk_test_and_schedule_op(pdev); return IRQ_HANDLED; } static irqreturn_t xen_pcibk_guest_interrupt(int irq, void *dev_id) { struct pci_dev *dev = (struct pci_dev *)dev_id; struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev); if (dev_data->isr_on && dev_data->ack_intr) { dev_data->handled++; if ((dev_data->handled % 1000) == 0) { if (xen_test_irq_shared(irq)) { dev_info(&dev->dev, "%s IRQ line is not shared " "with other domains. Turning ISR off\n", dev_data->irq_name); dev_data->ack_intr = 0; } } return IRQ_HANDLED; } return IRQ_NONE; }
// SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ #define barrier_var(var) /**/ /* undef #define UNROLL */ #define INLINE /**/ #include "profiler.inc.h"
// SPDX-License-Identifier: GPL-2.0 #include <libunwind-aarch64.h> #include <stdlib.h> extern int UNW_OBJ(dwarf_search_unwind_table) (unw_addr_space_t as, unw_word_t ip, unw_dyn_info_t *di, unw_proc_info_t *pi, int need_unwind_info, void *arg); #define dwarf_search_unwind_table UNW_OBJ(dwarf_search_unwind_table) static unw_accessors_t accessors; int main(void) { unw_addr_space_t addr_space; addr_space = unw_create_addr_space(&accessors, 0); if (addr_space) return 0; unw_init_remote(NULL, addr_space, NULL); dwarf_search_unwind_table(addr_space, 0, NULL, NULL, 0, NULL); return 0; }
// SPDX-License-Identifier: GPL-2.0 /* * Marvell MV98DX3236 SoC clocks * * Copyright (C) 2012 Marvell * * Gregory CLEMENT <[email protected]> * Sebastian Hesselbarth <[email protected]> * Andrew Lunn <[email protected]> * */ #include <linux/kernel.h> #include <linux/clk-provider.h> #include <linux/io.h> #include <linux/of.h> #include "common.h" /* * For 98DX4251 Sample At Reset the CPU, DDR and Main PLL clocks are all * defined at the same time * * SAR1[20:18] : CPU frequency DDR frequency MPLL frequency * 0 = 400 MHz 400 MHz 800 MHz * 2 = 667 MHz 667 MHz 2000 MHz * 3 = 800 MHz 800 MHz 1600 MHz * others reserved. * * For 98DX3236 Sample At Reset the CPU, DDR and Main PLL clocks are all * defined at the same time * * SAR1[20:18] : CPU frequency DDR frequency MPLL frequency * 1 = 667 MHz 667 MHz 2000 MHz * 2 = 400 MHz 400 MHz 400 MHz * 3 = 800 MHz 800 MHz 800 MHz * 5 = 800 MHz 400 MHz 800 MHz * others reserved. */ #define SAR1_MV98DX3236_CPU_DDR_MPLL_FREQ_OPT 18 #define SAR1_MV98DX3236_CPU_DDR_MPLL_FREQ_OPT_MASK 0x7 static u32 __init mv98dx3236_get_tclk_freq(void __iomem *sar) { /* Tclk = 200MHz, no SaR dependency */ return 200000000; } static const u32 mv98dx3236_cpu_frequencies[] __initconst = { 0, 667000000, 400000000, 800000000, 0, 800000000, 0, 0, }; static const u32 mv98dx4251_cpu_frequencies[] __initconst = { 400000000, 0, 667000000, 800000000, 0, 0, 0, 0, }; static u32 __init mv98dx3236_get_cpu_freq(void __iomem *sar) { u32 cpu_freq = 0; u8 cpu_freq_select = 0; cpu_freq_select = ((readl(sar) >> SAR1_MV98DX3236_CPU_DDR_MPLL_FREQ_OPT) & SAR1_MV98DX3236_CPU_DDR_MPLL_FREQ_OPT_MASK); if (of_machine_is_compatible("marvell,armadaxp-98dx4251")) cpu_freq = mv98dx4251_cpu_frequencies[cpu_freq_select]; else if (of_machine_is_compatible("marvell,armadaxp-98dx3236")) cpu_freq = mv98dx3236_cpu_frequencies[cpu_freq_select]; if (!cpu_freq) pr_err("CPU freq select unsupported %d\n", cpu_freq_select); return cpu_freq; } enum { MV98DX3236_CPU_TO_DDR, MV98DX3236_CPU_TO_MPLL }; static const struct coreclk_ratio mv98dx3236_core_ratios[] __initconst = { { .id = MV98DX3236_CPU_TO_DDR, .name = "ddrclk" }, { .id = MV98DX3236_CPU_TO_MPLL, .name = "mpll" }, }; static const int __initconst mv98dx3236_cpu_mpll_ratios[8][2] = { {0, 1}, {3, 1}, {1, 1}, {1, 1}, {0, 1}, {1, 1}, {0, 1}, {0, 1}, }; static const int __initconst mv98dx3236_cpu_ddr_ratios[8][2] = { {0, 1}, {1, 1}, {1, 1}, {1, 1}, {0, 1}, {1, 2}, {0, 1}, {0, 1}, }; static const int __initconst mv98dx4251_cpu_mpll_ratios[8][2] = { {2, 1}, {0, 1}, {3, 1}, {2, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, }; static const int __initconst mv98dx4251_cpu_ddr_ratios[8][2] = { {1, 1}, {0, 1}, {1, 1}, {1, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, }; static void __init mv98dx3236_get_clk_ratio( void __iomem *sar, int id, int *mult, int *div) { u32 opt = ((readl(sar) >> SAR1_MV98DX3236_CPU_DDR_MPLL_FREQ_OPT) & SAR1_MV98DX3236_CPU_DDR_MPLL_FREQ_OPT_MASK); switch (id) { case MV98DX3236_CPU_TO_DDR: if (of_machine_is_compatible("marvell,armadaxp-98dx4251")) { *mult = mv98dx4251_cpu_ddr_ratios[opt][0]; *div = mv98dx4251_cpu_ddr_ratios[opt][1]; } else if (of_machine_is_compatible("marvell,armadaxp-98dx3236")) { *mult = mv98dx3236_cpu_ddr_ratios[opt][0]; *div = mv98dx3236_cpu_ddr_ratios[opt][1]; } break; case MV98DX3236_CPU_TO_MPLL: if (of_machine_is_compatible("marvell,armadaxp-98dx4251")) { *mult = mv98dx4251_cpu_mpll_ratios[opt][0]; *div = mv98dx4251_cpu_mpll_ratios[opt][1]; } else if (of_machine_is_compatible("marvell,armadaxp-98dx3236")) { *mult = mv98dx3236_cpu_mpll_ratios[opt][0]; *div = mv98dx3236_cpu_mpll_ratios[opt][1]; } break; } } static const struct coreclk_soc_desc mv98dx3236_core_clocks = { .get_tclk_freq = mv98dx3236_get_tclk_freq, .get_cpu_freq = mv98dx3236_get_cpu_freq, .get_clk_ratio = mv98dx3236_get_clk_ratio, .ratios = mv98dx3236_core_ratios, .num_ratios = ARRAY_SIZE(mv98dx3236_core_ratios), }; /* * Clock Gating Control */ static const struct clk_gating_soc_desc mv98dx3236_gating_desc[] __initconst = { { "ge1", NULL, 3, 0 }, { "ge0", NULL, 4, 0 }, { "pex00", NULL, 5, 0 }, { "sdio", NULL, 17, 0 }, { "usb0", NULL, 18, 0 }, { "xor0", NULL, 22, 0 }, { } }; static void __init mv98dx3236_clk_init(struct device_node *np) { struct device_node *cgnp = of_find_compatible_node(NULL, NULL, "marvell,mv98dx3236-gating-clock"); mvebu_coreclk_setup(np, &mv98dx3236_core_clocks); if (cgnp) { mvebu_clk_gating_setup(cgnp, mv98dx3236_gating_desc); of_node_put(cgnp); } } CLK_OF_DECLARE(mv98dx3236_clk, "marvell,mv98dx3236-core-clock", mv98dx3236_clk_init);
// SPDX-License-Identifier: GPL-2.0+ /* * HID driver for gaming keys on Razer Blackwidow gaming keyboards * Macro Key Keycodes: M1 = 191, M2 = 192, M3 = 193, M4 = 194, M5 = 195 * * Copyright (c) 2021 Jelle van der Waa <[email protected]> */ #include <linux/device.h> #include <linux/hid.h> #include <linux/module.h> #include <linux/random.h> #include <linux/sched.h> #include <linux/usb.h> #include <linux/wait.h> #include "hid-ids.h" #define map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, EV_KEY, (c)) #define RAZER_BLACKWIDOW_TRANSFER_BUF_SIZE 91 static bool macro_key_remapping = 1; module_param(macro_key_remapping, bool, 0644); MODULE_PARM_DESC(macro_key_remapping, " on (Y) off (N)"); static unsigned char blackwidow_init[RAZER_BLACKWIDOW_TRANSFER_BUF_SIZE] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x04, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00 }; static int razer_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { if (!macro_key_remapping) return 0; if ((usage->hid & HID_UP_KEYBOARD) != HID_UP_KEYBOARD) return 0; switch (usage->hid & ~HID_UP_KEYBOARD) { case 0x68: map_key_clear(KEY_MACRO1); return 1; case 0x69: map_key_clear(KEY_MACRO2); return 1; case 0x6a: map_key_clear(KEY_MACRO3); return 1; case 0x6b: map_key_clear(KEY_MACRO4); return 1; case 0x6c: map_key_clear(KEY_MACRO5); return 1; } return 0; } static int razer_probe(struct hid_device *hdev, const struct hid_device_id *id) { char *buf; int ret = 0; ret = hid_parse(hdev); if (ret) return ret; /* * Only send the enable macro keys command for the third device * identified as mouse input. */ if (hdev->type == HID_TYPE_USBMOUSE) { buf = kmemdup(blackwidow_init, RAZER_BLACKWIDOW_TRANSFER_BUF_SIZE, GFP_KERNEL); if (buf == NULL) return -ENOMEM; ret = hid_hw_raw_request(hdev, 0, buf, RAZER_BLACKWIDOW_TRANSFER_BUF_SIZE, HID_FEATURE_REPORT, HID_REQ_SET_REPORT); if (ret != RAZER_BLACKWIDOW_TRANSFER_BUF_SIZE) hid_err(hdev, "failed to enable macro keys: %d\n", ret); kfree(buf); } return hid_hw_start(hdev, HID_CONNECT_DEFAULT); } static const struct hid_device_id razer_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_RAZER, USB_DEVICE_ID_RAZER_BLACKWIDOW) }, { HID_USB_DEVICE(USB_VENDOR_ID_RAZER, USB_DEVICE_ID_RAZER_BLACKWIDOW_CLASSIC) }, { HID_USB_DEVICE(USB_VENDOR_ID_RAZER, USB_DEVICE_ID_RAZER_BLACKWIDOW_ULTIMATE) }, { } }; MODULE_DEVICE_TABLE(hid, razer_devices); static struct hid_driver razer_driver = { .name = "razer", .id_table = razer_devices, .input_mapping = razer_input_mapping, .probe = razer_probe, }; module_hid_driver(razer_driver); MODULE_AUTHOR("Jelle van der Waa <[email protected]>"); MODULE_DESCRIPTION("HID driver for gaming keys on Razer Blackwidow gaming keyboards"); MODULE_LICENSE("GPL");
// SPDX-License-Identifier: GPL-2.0-only /* * ARM GIC v2m MSI(-X) support * Support for Message Signaled Interrupts for systems that * implement ARM Generic Interrupt Controller: GICv2m. * * Copyright (C) 2014 Advanced Micro Devices, Inc. * Authors: Suravee Suthikulpanit <[email protected]> * Harish Kasiviswanathan <[email protected]> * Brandon Anderson <[email protected]> */ #define pr_fmt(fmt) "GICv2m: " fmt #include <linux/acpi.h> #include <linux/iommu.h> #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/msi.h> #include <linux/of_address.h> #include <linux/of_pci.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/irqchip/arm-gic.h> #include <linux/irqchip/arm-gic-common.h> #include "irq-msi-lib.h" /* * MSI_TYPER: * [31:26] Reserved * [25:16] lowest SPI assigned to MSI * [15:10] Reserved * [9:0] Numer of SPIs assigned to MSI */ #define V2M_MSI_TYPER 0x008 #define V2M_MSI_TYPER_BASE_SHIFT 16 #define V2M_MSI_TYPER_BASE_MASK 0x3FF #define V2M_MSI_TYPER_NUM_MASK 0x3FF #define V2M_MSI_SETSPI_NS 0x040 #define V2M_MIN_SPI 32 #define V2M_MAX_SPI 1019 #define V2M_MSI_IIDR 0xFCC #define V2M_MSI_TYPER_BASE_SPI(x) \ (((x) >> V2M_MSI_TYPER_BASE_SHIFT) & V2M_MSI_TYPER_BASE_MASK) #define V2M_MSI_TYPER_NUM_SPI(x) ((x) & V2M_MSI_TYPER_NUM_MASK) /* APM X-Gene with GICv2m MSI_IIDR register value */ #define XGENE_GICV2M_MSI_IIDR 0x06000170 /* Broadcom NS2 GICv2m MSI_IIDR register value */ #define BCM_NS2_GICV2M_MSI_IIDR 0x0000013f /* List of flags for specific v2m implementation */ #define GICV2M_NEEDS_SPI_OFFSET 0x00000001 #define GICV2M_GRAVITON_ADDRESS_ONLY 0x00000002 static LIST_HEAD(v2m_nodes); static DEFINE_SPINLOCK(v2m_lock); struct v2m_data { struct list_head entry; struct fwnode_handle *fwnode; struct resource res; /* GICv2m resource */ void __iomem *base; /* GICv2m virt address */ u32 spi_start; /* The SPI number that MSIs start */ u32 nr_spis; /* The number of SPIs for MSIs */ u32 spi_offset; /* offset to be subtracted from SPI number */ unsigned long *bm; /* MSI vector bitmap */ u32 flags; /* v2m flags for specific implementation */ }; static phys_addr_t gicv2m_get_msi_addr(struct v2m_data *v2m, int hwirq) { if (v2m->flags & GICV2M_GRAVITON_ADDRESS_ONLY) return v2m->res.start | ((hwirq - 32) << 3); else return v2m->res.start + V2M_MSI_SETSPI_NS; } static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { struct v2m_data *v2m = irq_data_get_irq_chip_data(data); phys_addr_t addr = gicv2m_get_msi_addr(v2m, data->hwirq); msg->address_hi = upper_32_bits(addr); msg->address_lo = lower_32_bits(addr); if (v2m->flags & GICV2M_GRAVITON_ADDRESS_ONLY) msg->data = 0; else msg->data = data->hwirq; if (v2m->flags & GICV2M_NEEDS_SPI_OFFSET) msg->data -= v2m->spi_offset; iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data), msg); } static struct irq_chip gicv2m_irq_chip = { .name = "GICv2m", .irq_mask = irq_chip_mask_parent, .irq_unmask = irq_chip_unmask_parent, .irq_eoi = irq_chip_eoi_parent, .irq_set_affinity = irq_chip_set_affinity_parent, .irq_compose_msi_msg = gicv2m_compose_msi_msg, }; static int gicv2m_irq_gic_domain_alloc(struct irq_domain *domain, unsigned int virq, irq_hw_number_t hwirq) { struct irq_fwspec fwspec; struct irq_data *d; int err; if (is_of_node(domain->parent->fwnode)) { fwspec.fwnode = domain->parent->fwnode; fwspec.param_count = 3; fwspec.param[0] = 0; fwspec.param[1] = hwirq - 32; fwspec.param[2] = IRQ_TYPE_EDGE_RISING; } else if (is_fwnode_irqchip(domain->parent->fwnode)) { fwspec.fwnode = domain->parent->fwnode; fwspec.param_count = 2; fwspec.param[0] = hwirq; fwspec.param[1] = IRQ_TYPE_EDGE_RISING; } else { return -EINVAL; } err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); if (err) return err; /* Configure the interrupt line to be edge */ d = irq_domain_get_irq_data(domain->parent, virq); d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING); return 0; } static void gicv2m_unalloc_msi(struct v2m_data *v2m, unsigned int hwirq, int nr_irqs) { spin_lock(&v2m_lock); bitmap_release_region(v2m->bm, hwirq - v2m->spi_start, get_count_order(nr_irqs)); spin_unlock(&v2m_lock); } static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *args) { msi_alloc_info_t *info = args; struct v2m_data *v2m = NULL, *tmp; int hwirq, offset, i, err = 0; spin_lock(&v2m_lock); list_for_each_entry(tmp, &v2m_nodes, entry) { offset = bitmap_find_free_region(tmp->bm, tmp->nr_spis, get_count_order(nr_irqs)); if (offset >= 0) { v2m = tmp; break; } } spin_unlock(&v2m_lock); if (!v2m) return -ENOSPC; hwirq = v2m->spi_start + offset; err = iommu_dma_prepare_msi(info->desc, gicv2m_get_msi_addr(v2m, hwirq)); if (err) return err; for (i = 0; i < nr_irqs; i++) { err = gicv2m_irq_gic_domain_alloc(domain, virq + i, hwirq + i); if (err) goto fail; irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, &gicv2m_irq_chip, v2m); } return 0; fail: irq_domain_free_irqs_parent(domain, virq, nr_irqs); gicv2m_unalloc_msi(v2m, hwirq, nr_irqs); return err; } static void gicv2m_irq_domain_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { struct irq_data *d = irq_domain_get_irq_data(domain, virq); struct v2m_data *v2m = irq_data_get_irq_chip_data(d); gicv2m_unalloc_msi(v2m, d->hwirq, nr_irqs); irq_domain_free_irqs_parent(domain, virq, nr_irqs); } static const struct irq_domain_ops gicv2m_domain_ops = { .select = msi_lib_irq_domain_select, .alloc = gicv2m_irq_domain_alloc, .free = gicv2m_irq_domain_free, }; static bool is_msi_spi_valid(u32 base, u32 num) { if (base < V2M_MIN_SPI) { pr_err("Invalid MSI base SPI (base:%u)\n", base); return false; } if ((num == 0) || (base + num > V2M_MAX_SPI)) { pr_err("Number of SPIs (%u) exceed maximum (%u)\n", num, V2M_MAX_SPI - V2M_MIN_SPI + 1); return false; } return true; } static void __init gicv2m_teardown(void) { struct v2m_data *v2m, *tmp; list_for_each_entry_safe(v2m, tmp, &v2m_nodes, entry) { list_del(&v2m->entry); bitmap_free(v2m->bm); iounmap(v2m->base); of_node_put(to_of_node(v2m->fwnode)); if (is_fwnode_irqchip(v2m->fwnode)) irq_domain_free_fwnode(v2m->fwnode); kfree(v2m); } } #define GICV2M_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ MSI_FLAG_USE_DEF_CHIP_OPS | \ MSI_FLAG_PCI_MSI_MASK_PARENT) #define GICV2M_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \ MSI_FLAG_PCI_MSIX | \ MSI_FLAG_MULTI_PCI_MSI) static struct msi_parent_ops gicv2m_msi_parent_ops = { .supported_flags = GICV2M_MSI_FLAGS_SUPPORTED, .required_flags = GICV2M_MSI_FLAGS_REQUIRED, .bus_select_token = DOMAIN_BUS_NEXUS, .bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI, .prefix = "GICv2m-", .init_dev_msi_info = msi_lib_init_dev_msi_info, }; static __init int gicv2m_allocate_domains(struct irq_domain *parent) { struct irq_domain *inner_domain; struct v2m_data *v2m; v2m = list_first_entry_or_null(&v2m_nodes, struct v2m_data, entry); if (!v2m) return 0; inner_domain = irq_domain_create_hierarchy(parent, 0, 0, v2m->fwnode, &gicv2m_domain_ops, v2m); if (!inner_domain) { pr_err("Failed to create GICv2m domain\n"); return -ENOMEM; } irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS); inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT; inner_domain->msi_parent_ops = &gicv2m_msi_parent_ops; return 0; } static int __init gicv2m_init_one(struct fwnode_handle *fwnode, u32 spi_start, u32 nr_spis, struct resource *res, u32 flags) { int ret; struct v2m_data *v2m; v2m = kzalloc(sizeof(struct v2m_data), GFP_KERNEL); if (!v2m) return -ENOMEM; INIT_LIST_HEAD(&v2m->entry); v2m->fwnode = fwnode; v2m->flags = flags; memcpy(&v2m->res, res, sizeof(struct resource)); v2m->base = ioremap(v2m->res.start, resource_size(&v2m->res)); if (!v2m->base) { pr_err("Failed to map GICv2m resource\n"); ret = -ENOMEM; goto err_free_v2m; } if (spi_start && nr_spis) { v2m->spi_start = spi_start; v2m->nr_spis = nr_spis; } else { u32 typer; /* Graviton should always have explicit spi_start/nr_spis */ if (v2m->flags & GICV2M_GRAVITON_ADDRESS_ONLY) { ret = -EINVAL; goto err_iounmap; } typer = readl_relaxed(v2m->base + V2M_MSI_TYPER); v2m->spi_start = V2M_MSI_TYPER_BASE_SPI(typer); v2m->nr_spis = V2M_MSI_TYPER_NUM_SPI(typer); } if (!is_msi_spi_valid(v2m->spi_start, v2m->nr_spis)) { ret = -EINVAL; goto err_iounmap; } /* * APM X-Gene GICv2m implementation has an erratum where * the MSI data needs to be the offset from the spi_start * in order to trigger the correct MSI interrupt. This is * different from the standard GICv2m implementation where * the MSI data is the absolute value within the range from * spi_start to (spi_start + num_spis). * * Broadcom NS2 GICv2m implementation has an erratum where the MSI data * is 'spi_number - 32' * * Reading that register fails on the Graviton implementation */ if (!(v2m->flags & GICV2M_GRAVITON_ADDRESS_ONLY)) { switch (readl_relaxed(v2m->base + V2M_MSI_IIDR)) { case XGENE_GICV2M_MSI_IIDR: v2m->flags |= GICV2M_NEEDS_SPI_OFFSET; v2m->spi_offset = v2m->spi_start; break; case BCM_NS2_GICV2M_MSI_IIDR: v2m->flags |= GICV2M_NEEDS_SPI_OFFSET; v2m->spi_offset = 32; break; } } v2m->bm = bitmap_zalloc(v2m->nr_spis, GFP_KERNEL); if (!v2m->bm) { ret = -ENOMEM; goto err_iounmap; } list_add_tail(&v2m->entry, &v2m_nodes); pr_info("range%pR, SPI[%d:%d]\n", res, v2m->spi_start, (v2m->spi_start + v2m->nr_spis - 1)); return 0; err_iounmap: iounmap(v2m->base); err_free_v2m: kfree(v2m); return ret; } static __initconst struct of_device_id gicv2m_device_id[] = { { .compatible = "arm,gic-v2m-frame", }, {}, }; static int __init gicv2m_of_init(struct fwnode_handle *parent_handle, struct irq_domain *parent) { int ret = 0; struct device_node *node = to_of_node(parent_handle); struct device_node *child; for (child = of_find_matching_node(node, gicv2m_device_id); child; child = of_find_matching_node(child, gicv2m_device_id)) { u32 spi_start = 0, nr_spis = 0; struct resource res; if (!of_property_read_bool(child, "msi-controller")) continue; ret = of_address_to_resource(child, 0, &res); if (ret) { pr_err("Failed to allocate v2m resource.\n"); break; } if (!of_property_read_u32(child, "arm,msi-base-spi", &spi_start) && !of_property_read_u32(child, "arm,msi-num-spis", &nr_spis)) pr_info("DT overriding V2M MSI_TYPER (base:%u, num:%u)\n", spi_start, nr_spis); ret = gicv2m_init_one(&child->fwnode, spi_start, nr_spis, &res, 0); if (ret) break; } if (ret && child) of_node_put(child); if (!ret) ret = gicv2m_allocate_domains(parent); if (ret) gicv2m_teardown(); return ret; } #ifdef CONFIG_ACPI static int acpi_num_msi; static __init struct fwnode_handle *gicv2m_get_fwnode(struct device *dev) { struct v2m_data *data; if (WARN_ON(acpi_num_msi <= 0)) return NULL; /* We only return the fwnode of the first MSI frame. */ data = list_first_entry_or_null(&v2m_nodes, struct v2m_data, entry); if (!data) return NULL; return data->fwnode; } static __init bool acpi_check_amazon_graviton_quirks(void) { static struct acpi_table_madt *madt; acpi_status status; bool rc = false; #define ACPI_AMZN_OEM_ID "AMAZON" status = acpi_get_table(ACPI_SIG_MADT, 0, (struct acpi_table_header **)&madt); if (ACPI_FAILURE(status) || !madt) return rc; rc = !memcmp(madt->header.oem_id, ACPI_AMZN_OEM_ID, ACPI_OEM_ID_SIZE); acpi_put_table((struct acpi_table_header *)madt); return rc; } static int __init acpi_parse_madt_msi(union acpi_subtable_headers *header, const unsigned long end) { int ret; struct resource res; u32 spi_start = 0, nr_spis = 0; struct acpi_madt_generic_msi_frame *m; struct fwnode_handle *fwnode; u32 flags = 0; m = (struct acpi_madt_generic_msi_frame *)header; if (BAD_MADT_ENTRY(m, end)) return -EINVAL; res.start = m->base_address; res.end = m->base_address + SZ_4K - 1; res.flags = IORESOURCE_MEM; if (acpi_check_amazon_graviton_quirks()) { pr_info("applying Amazon Graviton quirk\n"); res.end = res.start + SZ_8K - 1; flags |= GICV2M_GRAVITON_ADDRESS_ONLY; gicv2m_msi_parent_ops.supported_flags &= ~MSI_FLAG_MULTI_PCI_MSI; } if (m->flags & ACPI_MADT_OVERRIDE_SPI_VALUES) { spi_start = m->spi_base; nr_spis = m->spi_count; pr_info("ACPI overriding V2M MSI_TYPER (base:%u, num:%u)\n", spi_start, nr_spis); } fwnode = irq_domain_alloc_fwnode(&res.start); if (!fwnode) { pr_err("Unable to allocate GICv2m domain token\n"); return -EINVAL; } ret = gicv2m_init_one(fwnode, spi_start, nr_spis, &res, flags); if (ret) irq_domain_free_fwnode(fwnode); return ret; } static int __init gicv2m_acpi_init(struct irq_domain *parent) { int ret; if (acpi_num_msi > 0) return 0; acpi_num_msi = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_MSI_FRAME, acpi_parse_madt_msi, 0); if (acpi_num_msi <= 0) goto err_out; ret = gicv2m_allocate_domains(parent); if (ret) goto err_out; pci_msi_register_fwnode_provider(&gicv2m_get_fwnode); return 0; err_out: gicv2m_teardown(); return -EINVAL; } #else /* CONFIG_ACPI */ static int __init gicv2m_acpi_init(struct irq_domain *parent) { return -EINVAL; } #endif /* CONFIG_ACPI */ int __init gicv2m_init(struct fwnode_handle *parent_handle, struct irq_domain *parent) { if (is_of_node(parent_handle)) return gicv2m_of_init(parent_handle, parent); return gicv2m_acpi_init(parent); }
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_X86_PROCESSOR_FLAGS_H #define _ASM_X86_PROCESSOR_FLAGS_H #include <uapi/asm/processor-flags.h> #include <linux/mem_encrypt.h> #ifdef CONFIG_VM86 #define X86_VM_MASK X86_EFLAGS_VM #else #define X86_VM_MASK 0 /* No VM86 support */ #endif /* * CR3's layout varies depending on several things. * * If CR4.PCIDE is set (64-bit only), then CR3[11:0] is the address space ID. * If PAE is enabled, then CR3[11:5] is part of the PDPT address * (i.e. it's 32-byte aligned, not page-aligned) and CR3[4:0] is ignored. * Otherwise (non-PAE, non-PCID), CR3[3] is PWT, CR3[4] is PCD, and * CR3[2:0] and CR3[11:5] are ignored. * * In all cases, Linux puts zeros in the low ignored bits and in PWT and PCD. * * CR3[63] is always read as zero. If CR4.PCIDE is set, then CR3[63] may be * written as 1 to prevent the write to CR3 from flushing the TLB. * * On systems with SME, one bit (in a variable position!) is stolen to indicate * that the top-level paging structure is encrypted. * * On systemms with LAM, bits 61 and 62 are used to indicate LAM mode. * * All of the remaining bits indicate the physical address of the top-level * paging structure. * * CR3_ADDR_MASK is the mask used by read_cr3_pa(). */ #ifdef CONFIG_X86_64 /* Mask off the address space ID and SME encryption bits. */ #define CR3_ADDR_MASK __sme_clr(PHYSICAL_PAGE_MASK) #define CR3_PCID_MASK 0xFFFull #define CR3_NOFLUSH BIT_ULL(63) #else /* * CR3_ADDR_MASK needs at least bits 31:5 set on PAE systems, and we save * a tiny bit of code size by setting all the bits. */ #define CR3_ADDR_MASK 0xFFFFFFFFull #define CR3_PCID_MASK 0ull #define CR3_NOFLUSH 0 #endif #ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION # define X86_CR3_PTI_PCID_USER_BIT 11 #endif #endif /* _ASM_X86_PROCESSOR_FLAGS_H */
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. */ #include <linux/vmalloc.h> #include <linux/mm.h> #include <linux/errno.h> #include <rdma/uverbs_ioctl.h> #include "rxe.h" #include "rxe_loc.h" #include "rxe_queue.h" void rxe_mmap_release(struct kref *ref) { struct rxe_mmap_info *ip = container_of(ref, struct rxe_mmap_info, ref); struct rxe_dev *rxe = to_rdev(ip->context->device); spin_lock_bh(&rxe->pending_lock); if (!list_empty(&ip->pending_mmaps)) list_del(&ip->pending_mmaps); spin_unlock_bh(&rxe->pending_lock); vfree(ip->obj); /* buf */ kfree(ip); } /* * open and close keep track of how many times the memory region is mapped, * to avoid releasing it. */ static void rxe_vma_open(struct vm_area_struct *vma) { struct rxe_mmap_info *ip = vma->vm_private_data; kref_get(&ip->ref); } static void rxe_vma_close(struct vm_area_struct *vma) { struct rxe_mmap_info *ip = vma->vm_private_data; kref_put(&ip->ref, rxe_mmap_release); } static const struct vm_operations_struct rxe_vm_ops = { .open = rxe_vma_open, .close = rxe_vma_close, }; /** * rxe_mmap - create a new mmap region * @context: the IB user context of the process making the mmap() call * @vma: the VMA to be initialized * Return zero if the mmap is OK. Otherwise, return an errno. */ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) { struct rxe_dev *rxe = to_rdev(context->device); unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; unsigned long size = vma->vm_end - vma->vm_start; struct rxe_mmap_info *ip, *pp; int ret; /* * Search the device's list of objects waiting for a mmap call. * Normally, this list is very short since a call to create a * CQ, QP, or SRQ is soon followed by a call to mmap(). */ spin_lock_bh(&rxe->pending_lock); list_for_each_entry_safe(ip, pp, &rxe->pending_mmaps, pending_mmaps) { if (context != ip->context || (__u64)offset != ip->info.offset) continue; /* Don't allow a mmap larger than the object. */ if (size > ip->info.size) { rxe_dbg_dev(rxe, "mmap region is larger than the object!\n"); spin_unlock_bh(&rxe->pending_lock); ret = -EINVAL; goto done; } goto found_it; } rxe_dbg_dev(rxe, "unable to find pending mmap info\n"); spin_unlock_bh(&rxe->pending_lock); ret = -EINVAL; goto done; found_it: list_del_init(&ip->pending_mmaps); spin_unlock_bh(&rxe->pending_lock); ret = remap_vmalloc_range(vma, ip->obj, 0); if (ret) { rxe_dbg_dev(rxe, "err %d from remap_vmalloc_range\n", ret); goto done; } vma->vm_ops = &rxe_vm_ops; vma->vm_private_data = ip; rxe_vma_open(vma); done: return ret; } /* * Allocate information for rxe_mmap */ struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe, u32 size, struct ib_udata *udata, void *obj) { struct rxe_mmap_info *ip; if (!udata) return ERR_PTR(-EINVAL); ip = kmalloc(sizeof(*ip), GFP_KERNEL); if (!ip) return ERR_PTR(-ENOMEM); size = PAGE_ALIGN(size); spin_lock_bh(&rxe->mmap_offset_lock); if (rxe->mmap_offset == 0) rxe->mmap_offset = ALIGN(PAGE_SIZE, SHMLBA); ip->info.offset = rxe->mmap_offset; rxe->mmap_offset += ALIGN(size, SHMLBA); spin_unlock_bh(&rxe->mmap_offset_lock); INIT_LIST_HEAD(&ip->pending_mmaps); ip->info.size = size; ip->context = container_of(udata, struct uverbs_attr_bundle, driver_udata) ->context; ip->obj = obj; kref_init(&ip->ref); return ip; }
// SPDX-License-Identifier: (GPL-2.0+ OR MIT) /* * Google Trogdor dts fragment for detachables * * Copyright 2024 Google LLC. */ /* This file must be included after sc7180-trogdor.dtsi to modify cros_ec */ &cros_ec { keyboard-controller { compatible = "google,cros-ec-keyb-switches"; }; };
// SPDX-License-Identifier: GPL-2.0-only /* * PCI glue for ISHTP provider device (ISH) driver * * Copyright (c) 2014-2016, Intel Corporation. */ #include <linux/acpi.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/device.h> #include <linux/fs.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/sched.h> #include <linux/suspend.h> #include <linux/interrupt.h> #include <linux/workqueue.h> #define CREATE_TRACE_POINTS #include <trace/events/intel_ish.h> #include "ishtp-dev.h" #include "hw-ish.h" enum ishtp_driver_data_index { ISHTP_DRIVER_DATA_NONE, ISHTP_DRIVER_DATA_LNL_M, }; #define ISH_FW_GEN_LNL_M "lnlm" #define ISH_FIRMWARE_PATH(gen) "intel/ish/ish_" gen ".bin" #define ISH_FIRMWARE_PATH_ALL "intel/ish/ish_*.bin" static struct ishtp_driver_data ishtp_driver_data[] = { [ISHTP_DRIVER_DATA_LNL_M] = { .fw_generation = ISH_FW_GEN_LNL_M, }, }; static const struct pci_device_id ish_pci_tbl[] = { {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_CHV)}, {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_BXT_Ax)}, {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_BXT_Bx)}, {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_APL_Ax)}, {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_SPT_Ax)}, {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_CNL_Ax)}, {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_GLK_Ax)}, {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_CNL_H)}, {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_ICL_MOBILE)}, {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_SPT_H)}, {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_CML_LP)}, {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_CMP_H)}, {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_EHL_Ax)}, {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_TGL_LP)}, {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_TGL_H)}, {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_ADL_S)}, {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_ADL_P)}, {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_ADL_N)}, {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_RPL_S)}, {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_MTL_P)}, {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_ARL_H)}, {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_ARL_S)}, {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_LNL_M), .driver_data = ISHTP_DRIVER_DATA_LNL_M}, {} }; MODULE_DEVICE_TABLE(pci, ish_pci_tbl); /** * ish_event_tracer() - Callback function to dump trace messages * @dev: ishtp device * @format: printf style format * * Callback to direct log messages to Linux trace buffers */ static __printf(2, 3) void ish_event_tracer(struct ishtp_device *dev, const char *format, ...) { if (trace_ishtp_dump_enabled()) { va_list args; char tmp_buf[100]; va_start(args, format); vsnprintf(tmp_buf, sizeof(tmp_buf), format, args); va_end(args); trace_ishtp_dump(tmp_buf); } } /** * ish_init() - Init function * @dev: ishtp device * * This function initialize wait queues for suspend/resume and call * calls hadware initialization function. This will initiate * startup sequence * * Return: 0 for success or error code for failure */ static int ish_init(struct ishtp_device *dev) { int ret; /* Set the state of ISH HW to start */ ret = ish_hw_start(dev); if (ret) { dev_err(dev->devc, "ISH: hw start failed.\n"); return ret; } /* Start the inter process communication to ISH processor */ ret = ishtp_start(dev); if (ret) { dev_err(dev->devc, "ISHTP: Protocol init failed.\n"); return ret; } return 0; } static const struct pci_device_id ish_invalid_pci_ids[] = { /* Mehlow platform special pci ids */ {PCI_VDEVICE(INTEL, 0xA309)}, {PCI_VDEVICE(INTEL, 0xA30A)}, {} }; static inline bool ish_should_enter_d0i3(struct pci_dev *pdev) { return !pm_suspend_via_firmware() || pdev->device == PCI_DEVICE_ID_INTEL_ISH_CHV; } static inline bool ish_should_leave_d0i3(struct pci_dev *pdev) { return !pm_resume_via_firmware() || pdev->device == PCI_DEVICE_ID_INTEL_ISH_CHV; } /** * ish_probe() - PCI driver probe callback * @pdev: pci device * @ent: pci device id * * Initialize PCI function, setup interrupt and call for ISH initialization * * Return: 0 for success or error code for failure */ static int ish_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { int ret; struct ish_hw *hw; unsigned long irq_flag = 0; struct ishtp_device *ishtp; struct device *dev = &pdev->dev; /* Check for invalid platforms for ISH support */ if (pci_dev_present(ish_invalid_pci_ids)) return -ENODEV; /* enable pci dev */ ret = pcim_enable_device(pdev); if (ret) { dev_err(dev, "ISH: Failed to enable PCI device\n"); return ret; } /* set PCI host mastering */ pci_set_master(pdev); /* pci request regions for ISH driver */ ret = pcim_iomap_regions(pdev, 1 << 0, KBUILD_MODNAME); if (ret) { dev_err(dev, "ISH: Failed to get PCI regions\n"); return ret; } /* allocates and initializes the ISH dev structure */ ishtp = ish_dev_init(pdev); if (!ishtp) { ret = -ENOMEM; return ret; } hw = to_ish_hw(ishtp); ishtp->print_log = ish_event_tracer; ishtp->driver_data = &ishtp_driver_data[ent->driver_data]; /* mapping IO device memory */ hw->mem_addr = pcim_iomap_table(pdev)[0]; ishtp->pdev = pdev; /* request and enable interrupt */ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); if (ret < 0) { dev_err(dev, "ISH: Failed to allocate IRQ vectors\n"); return ret; } if (!pdev->msi_enabled && !pdev->msix_enabled) irq_flag = IRQF_SHARED; ret = devm_request_irq(dev, pdev->irq, ish_irq_handler, irq_flag, KBUILD_MODNAME, ishtp); if (ret) { dev_err(dev, "ISH: request IRQ %d failed\n", pdev->irq); return ret; } dev_set_drvdata(ishtp->devc, ishtp); init_waitqueue_head(&ishtp->suspend_wait); init_waitqueue_head(&ishtp->resume_wait); /* Enable PME for EHL */ if (pdev->device == PCI_DEVICE_ID_INTEL_ISH_EHL_Ax) device_init_wakeup(dev, true); ret = ish_init(ishtp); if (ret) return ret; return 0; } /** * ish_remove() - PCI driver remove callback * @pdev: pci device * * This function does cleanup of ISH on pci remove callback */ static void ish_remove(struct pci_dev *pdev) { struct ishtp_device *ishtp_dev = pci_get_drvdata(pdev); ishtp_bus_remove_all_clients(ishtp_dev, false); ish_device_disable(ishtp_dev); } /** * ish_shutdown() - PCI driver shutdown callback * @pdev: pci device * * This function sets up wakeup for S5 */ static void ish_shutdown(struct pci_dev *pdev) { if (pdev->device == PCI_DEVICE_ID_INTEL_ISH_EHL_Ax) pci_prepare_to_sleep(pdev); } static struct device __maybe_unused *ish_resume_device; /* 50ms to get resume response */ #define WAIT_FOR_RESUME_ACK_MS 50 /** * ish_resume_handler() - Work function to complete resume * @work: work struct * * The resume work function to complete resume function asynchronously. * There are two resume paths, one where ISH is not powered off, * in that case a simple resume message is enough, others we need * a reset sequence. */ static void __maybe_unused ish_resume_handler(struct work_struct *work) { struct pci_dev *pdev = to_pci_dev(ish_resume_device); struct ishtp_device *dev = pci_get_drvdata(pdev); uint32_t fwsts = dev->ops->get_fw_status(dev); if (ish_should_leave_d0i3(pdev) && !dev->suspend_flag && IPC_IS_ISH_ILUP(fwsts)) { if (device_may_wakeup(&pdev->dev)) disable_irq_wake(pdev->irq); ish_set_host_ready(dev); ishtp_send_resume(dev); /* Waiting to get resume response */ if (dev->resume_flag) wait_event_interruptible_timeout(dev->resume_wait, !dev->resume_flag, msecs_to_jiffies(WAIT_FOR_RESUME_ACK_MS)); /* * If the flag is not cleared, something is wrong with ISH FW. * So on resume, need to go through init sequence again. */ if (dev->resume_flag) ish_init(dev); } else { /* * Resume from the D3, full reboot of ISH processor will happen, * so need to go through init sequence again. */ ish_init(dev); } } /** * ish_suspend() - ISH suspend callback * @device: device pointer * * ISH suspend callback * * Return: 0 to the pm core */ static int __maybe_unused ish_suspend(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct ishtp_device *dev = pci_get_drvdata(pdev); if (ish_should_enter_d0i3(pdev)) { /* * If previous suspend hasn't been asnwered then ISH is likely * dead, don't attempt nested notification */ if (dev->suspend_flag) return 0; dev->resume_flag = 0; dev->suspend_flag = 1; ishtp_send_suspend(dev); /* 25 ms should be enough for live ISH to flush all IPC buf */ if (dev->suspend_flag) wait_event_interruptible_timeout(dev->suspend_wait, !dev->suspend_flag, msecs_to_jiffies(25)); if (dev->suspend_flag) { /* * It looks like FW halt, clear the DMA bit, and put * ISH into D3, and FW would reset on resume. */ ish_disable_dma(dev); } else { /* * Save state so PCI core will keep the device at D0, * the ISH would enter D0i3 */ pci_save_state(pdev); if (device_may_wakeup(&pdev->dev)) enable_irq_wake(pdev->irq); } } else { /* * Clear the DMA bit before putting ISH into D3, * or ISH FW would reset automatically. */ ish_disable_dma(dev); } return 0; } static __maybe_unused DECLARE_WORK(resume_work, ish_resume_handler); /** * ish_resume() - ISH resume callback * @device: device pointer * * ISH resume callback * * Return: 0 to the pm core */ static int __maybe_unused ish_resume(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct ishtp_device *dev = pci_get_drvdata(pdev); ish_resume_device = device; dev->resume_flag = 1; schedule_work(&resume_work); return 0; } static SIMPLE_DEV_PM_OPS(ish_pm_ops, ish_suspend, ish_resume); static ssize_t base_version_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct ishtp_device *dev = dev_get_drvdata(cdev); return sysfs_emit(buf, "%u.%u.%u.%u\n", dev->base_ver.major, dev->base_ver.minor, dev->base_ver.hotfix, dev->base_ver.build); } static DEVICE_ATTR_RO(base_version); static ssize_t project_version_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct ishtp_device *dev = dev_get_drvdata(cdev); return sysfs_emit(buf, "%u.%u.%u.%u\n", dev->prj_ver.major, dev->prj_ver.minor, dev->prj_ver.hotfix, dev->prj_ver.build); } static DEVICE_ATTR_RO(project_version); static struct attribute *ish_firmware_attrs[] = { &dev_attr_base_version.attr, &dev_attr_project_version.attr, NULL }; static umode_t firmware_is_visible(struct kobject *kobj, struct attribute *attr, int i) { struct ishtp_device *dev = dev_get_drvdata(kobj_to_dev(kobj)); return dev->driver_data->fw_generation ? attr->mode : 0; } static const struct attribute_group ish_firmware_group = { .name = "firmware", .attrs = ish_firmware_attrs, .is_visible = firmware_is_visible, }; __ATTRIBUTE_GROUPS(ish_firmware); static struct pci_driver ish_driver = { .name = KBUILD_MODNAME, .id_table = ish_pci_tbl, .probe = ish_probe, .remove = ish_remove, .shutdown = ish_shutdown, .driver.pm = &ish_pm_ops, .dev_groups = ish_firmware_groups, }; module_pci_driver(ish_driver); /* Original author */ MODULE_AUTHOR("Daniel Drubin <[email protected]>"); /* Adoption to upstream Linux kernel */ MODULE_AUTHOR("Srinivas Pandruvada <[email protected]>"); MODULE_DESCRIPTION("Intel(R) Integrated Sensor Hub PCI Device Driver"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE(ISH_FIRMWARE_PATH(ISH_FW_GEN_LNL_M)); MODULE_FIRMWARE(ISH_FIRMWARE_PATH_ALL);
/* SPDX-License-Identifier: GPL-2.0 */ /* spinlock.h: 32-bit Sparc spinlock support. * * Copyright (C) 1997 David S. Miller ([email protected]) */ #ifndef __SPARC_SPINLOCK_H #define __SPARC_SPINLOCK_H #ifndef __ASSEMBLY__ #include <asm/psr.h> #include <asm/barrier.h> #include <asm/processor.h> /* for cpu_relax */ #define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) static inline void arch_spin_lock(arch_spinlock_t *lock) { __asm__ __volatile__( "\n1:\n\t" "ldstub [%0], %%g2\n\t" "orcc %%g2, 0x0, %%g0\n\t" "bne,a 2f\n\t" " ldub [%0], %%g2\n\t" ".subsection 2\n" "2:\n\t" "orcc %%g2, 0x0, %%g0\n\t" "bne,a 2b\n\t" " ldub [%0], %%g2\n\t" "b,a 1b\n\t" ".previous\n" : /* no outputs */ : "r" (lock) : "g2", "memory", "cc"); } static inline int arch_spin_trylock(arch_spinlock_t *lock) { unsigned int result; __asm__ __volatile__("ldstub [%1], %0" : "=r" (result) : "r" (lock) : "memory"); return (result == 0); } static inline void arch_spin_unlock(arch_spinlock_t *lock) { __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); } /* Read-write spinlocks, allowing multiple readers * but only one writer. * * NOTE! it is quite common to have readers in interrupts * but no interrupt writers. For those circumstances we * can "mix" irq-safe locks - any writer needs to get a * irq-safe write-lock, but readers can get non-irqsafe * read-locks. * * XXX This might create some problems with my dual spinlock * XXX scheme, deadlocks etc. -DaveM * * Sort of like atomic_t's on Sparc, but even more clever. * * ------------------------------------ * | 24-bit counter | wlock | arch_rwlock_t * ------------------------------------ * 31 8 7 0 * * wlock signifies the one writer is in or somebody is updating * counter. For a writer, if he successfully acquires the wlock, * but counter is non-zero, he has to release the lock and wait, * till both counter and wlock are zero. * * Unfortunately this scheme limits us to ~16,000,000 cpus. */ static inline void __arch_read_lock(arch_rwlock_t *rw) { register arch_rwlock_t *lp asm("g1"); lp = rw; __asm__ __volatile__( "mov %%o7, %%g4\n\t" "call ___rw_read_enter\n\t" " ldstub [%%g1 + 3], %%g2\n" : /* no outputs */ : "r" (lp) : "g2", "g4", "memory", "cc"); } #define arch_read_lock(lock) \ do { unsigned long flags; \ local_irq_save(flags); \ __arch_read_lock(lock); \ local_irq_restore(flags); \ } while(0) static inline void __arch_read_unlock(arch_rwlock_t *rw) { register arch_rwlock_t *lp asm("g1"); lp = rw; __asm__ __volatile__( "mov %%o7, %%g4\n\t" "call ___rw_read_exit\n\t" " ldstub [%%g1 + 3], %%g2\n" : /* no outputs */ : "r" (lp) : "g2", "g4", "memory", "cc"); } #define arch_read_unlock(lock) \ do { unsigned long flags; \ local_irq_save(flags); \ __arch_read_unlock(lock); \ local_irq_restore(flags); \ } while(0) static inline void arch_write_lock(arch_rwlock_t *rw) { register arch_rwlock_t *lp asm("g1"); lp = rw; __asm__ __volatile__( "mov %%o7, %%g4\n\t" "call ___rw_write_enter\n\t" " ldstub [%%g1 + 3], %%g2\n" : /* no outputs */ : "r" (lp) : "g2", "g4", "memory", "cc"); *(volatile __u32 *)&lp->lock = ~0U; } static inline void arch_write_unlock(arch_rwlock_t *lock) { __asm__ __volatile__( " st %%g0, [%0]" : /* no outputs */ : "r" (lock) : "memory"); } static inline int arch_write_trylock(arch_rwlock_t *rw) { unsigned int val; __asm__ __volatile__("ldstub [%1 + 3], %0" : "=r" (val) : "r" (&rw->lock) : "memory"); if (val == 0) { val = rw->lock & ~0xff; if (val) ((volatile u8*)&rw->lock)[3] = 0; else *(volatile u32*)&rw->lock = ~0U; } return (val == 0); } static inline int __arch_read_trylock(arch_rwlock_t *rw) { register arch_rwlock_t *lp asm("g1"); register int res asm("o0"); lp = rw; __asm__ __volatile__( "mov %%o7, %%g4\n\t" "call ___rw_read_try\n\t" " ldstub [%%g1 + 3], %%g2\n" : "=r" (res) : "r" (lp) : "g2", "g4", "memory", "cc"); return res; } #define arch_read_trylock(lock) \ ({ unsigned long flags; \ int res; \ local_irq_save(flags); \ res = __arch_read_trylock(lock); \ local_irq_restore(flags); \ res; \ }) #endif /* !(__ASSEMBLY__) */ #endif /* __SPARC_SPINLOCK_H */
// SPDX-License-Identifier: GPL-2.0-only /* * Driver for AK8813 / AK8814 TV-ecoders from Asahi Kasei Microsystems Co., Ltd. (AKM) * * Copyright (C) 2010, Guennadi Liakhovetski <[email protected]> */ #include <linux/i2c.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/videodev2.h> #include <linux/module.h> #include <media/i2c/ak881x.h> #include <media/v4l2-common.h> #include <media/v4l2-device.h> #define AK881X_INTERFACE_MODE 0 #define AK881X_VIDEO_PROCESS1 1 #define AK881X_VIDEO_PROCESS2 2 #define AK881X_VIDEO_PROCESS3 3 #define AK881X_DAC_MODE 5 #define AK881X_STATUS 0x24 #define AK881X_DEVICE_ID 0x25 #define AK881X_DEVICE_REVISION 0x26 struct ak881x { struct v4l2_subdev subdev; struct ak881x_pdata *pdata; unsigned int lines; char revision; /* DEVICE_REVISION content */ }; static int reg_read(struct i2c_client *client, const u8 reg) { return i2c_smbus_read_byte_data(client, reg); } static int reg_write(struct i2c_client *client, const u8 reg, const u8 data) { return i2c_smbus_write_byte_data(client, reg, data); } static int reg_set(struct i2c_client *client, const u8 reg, const u8 data, u8 mask) { int ret = reg_read(client, reg); if (ret < 0) return ret; return reg_write(client, reg, (ret & ~mask) | (data & mask)); } static struct ak881x *to_ak881x(const struct i2c_client *client) { return container_of(i2c_get_clientdata(client), struct ak881x, subdev); } #ifdef CONFIG_VIDEO_ADV_DEBUG static int ak881x_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); if (reg->reg > 0x26) return -EINVAL; reg->size = 1; reg->val = reg_read(client, reg->reg); if (reg->val > 0xffff) return -EIO; return 0; } static int ak881x_s_register(struct v4l2_subdev *sd, const struct v4l2_dbg_register *reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); if (reg->reg > 0x26) return -EINVAL; if (reg_write(client, reg->reg, reg->val) < 0) return -EIO; return 0; } #endif static int ak881x_fill_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *format) { struct v4l2_mbus_framefmt *mf = &format->format; struct i2c_client *client = v4l2_get_subdevdata(sd); struct ak881x *ak881x = to_ak881x(client); if (format->pad) return -EINVAL; v4l_bound_align_image(&mf->width, 0, 720, 2, &mf->height, 0, ak881x->lines, 1, 0); mf->field = V4L2_FIELD_INTERLACED; mf->code = MEDIA_BUS_FMT_YUYV8_2X8; mf->colorspace = V4L2_COLORSPACE_SMPTE170M; return 0; } static int ak881x_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_mbus_code_enum *code) { if (code->pad || code->index) return -EINVAL; code->code = MEDIA_BUS_FMT_YUYV8_2X8; return 0; } static int ak881x_get_selection(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_selection *sel) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct ak881x *ak881x = to_ak881x(client); if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE) return -EINVAL; switch (sel->target) { case V4L2_SEL_TGT_CROP_BOUNDS: sel->r.left = 0; sel->r.top = 0; sel->r.width = 720; sel->r.height = ak881x->lines; return 0; default: return -EINVAL; } } static int ak881x_s_std_output(struct v4l2_subdev *sd, v4l2_std_id std) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct ak881x *ak881x = to_ak881x(client); u8 vp1; if (std == V4L2_STD_NTSC_443) { vp1 = 3; ak881x->lines = 480; } else if (std == V4L2_STD_PAL_M) { vp1 = 5; ak881x->lines = 480; } else if (std == V4L2_STD_PAL_60) { vp1 = 7; ak881x->lines = 480; } else if (std & V4L2_STD_NTSC) { vp1 = 0; ak881x->lines = 480; } else if (std & V4L2_STD_PAL) { vp1 = 0xf; ak881x->lines = 576; } else { /* No SECAM or PAL_N/Nc supported */ return -EINVAL; } reg_set(client, AK881X_VIDEO_PROCESS1, vp1, 0xf); return 0; } static int ak881x_s_stream(struct v4l2_subdev *sd, int enable) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct ak881x *ak881x = to_ak881x(client); if (enable) { u8 dac; /* For colour-bar testing set bit 6 of AK881X_VIDEO_PROCESS1 */ /* Default: composite output */ if (ak881x->pdata->flags & AK881X_COMPONENT) dac = 3; else dac = 4; /* Turn on the DAC(s) */ reg_write(client, AK881X_DAC_MODE, dac); dev_dbg(&client->dev, "chip status 0x%x\n", reg_read(client, AK881X_STATUS)); } else { /* ...and clear bit 6 of AK881X_VIDEO_PROCESS1 here */ reg_write(client, AK881X_DAC_MODE, 0); dev_dbg(&client->dev, "chip status 0x%x\n", reg_read(client, AK881X_STATUS)); } return 0; } static const struct v4l2_subdev_core_ops ak881x_subdev_core_ops = { #ifdef CONFIG_VIDEO_ADV_DEBUG .g_register = ak881x_g_register, .s_register = ak881x_s_register, #endif }; static const struct v4l2_subdev_video_ops ak881x_subdev_video_ops = { .s_std_output = ak881x_s_std_output, .s_stream = ak881x_s_stream, }; static const struct v4l2_subdev_pad_ops ak881x_subdev_pad_ops = { .enum_mbus_code = ak881x_enum_mbus_code, .get_selection = ak881x_get_selection, .set_fmt = ak881x_fill_fmt, .get_fmt = ak881x_fill_fmt, }; static const struct v4l2_subdev_ops ak881x_subdev_ops = { .core = &ak881x_subdev_core_ops, .video = &ak881x_subdev_video_ops, .pad = &ak881x_subdev_pad_ops, }; static int ak881x_probe(struct i2c_client *client) { struct i2c_adapter *adapter = client->adapter; struct ak881x *ak881x; u8 ifmode, data; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { dev_warn(&adapter->dev, "I2C-Adapter doesn't support I2C_FUNC_SMBUS_WORD\n"); return -EIO; } ak881x = devm_kzalloc(&client->dev, sizeof(*ak881x), GFP_KERNEL); if (!ak881x) return -ENOMEM; v4l2_i2c_subdev_init(&ak881x->subdev, client, &ak881x_subdev_ops); data = reg_read(client, AK881X_DEVICE_ID); switch (data) { case 0x13: case 0x14: break; default: dev_err(&client->dev, "No ak881x chip detected, register read %x\n", data); return -ENODEV; } ak881x->revision = reg_read(client, AK881X_DEVICE_REVISION); ak881x->pdata = client->dev.platform_data; if (ak881x->pdata) { if (ak881x->pdata->flags & AK881X_FIELD) ifmode = 4; else ifmode = 0; switch (ak881x->pdata->flags & AK881X_IF_MODE_MASK) { case AK881X_IF_MODE_BT656: ifmode |= 1; break; case AK881X_IF_MODE_MASTER: ifmode |= 2; break; case AK881X_IF_MODE_SLAVE: default: break; } dev_dbg(&client->dev, "IF mode %x\n", ifmode); /* * "Line Blanking No." seems to be the same as the number of * "black" lines on, e.g., SuperH VOU, whose default value of 20 * "incidentally" matches ak881x' default */ reg_write(client, AK881X_INTERFACE_MODE, ifmode | (20 << 3)); } /* Hardware default: NTSC-M */ ak881x->lines = 480; dev_info(&client->dev, "Detected an ak881x chip ID %x, revision %x\n", data, ak881x->revision); return 0; } static void ak881x_remove(struct i2c_client *client) { struct ak881x *ak881x = to_ak881x(client); v4l2_device_unregister_subdev(&ak881x->subdev); } static const struct i2c_device_id ak881x_id[] = { { "ak8813" }, { "ak8814" }, { } }; MODULE_DEVICE_TABLE(i2c, ak881x_id); static struct i2c_driver ak881x_i2c_driver = { .driver = { .name = "ak881x", }, .probe = ak881x_probe, .remove = ak881x_remove, .id_table = ak881x_id, }; module_i2c_driver(ak881x_i2c_driver); MODULE_DESCRIPTION("TV-output driver for ak8813/ak8814"); MODULE_AUTHOR("Guennadi Liakhovetski <[email protected]>"); MODULE_LICENSE("GPL v2");
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved */ #include <drm/drm_managed.h> #include <drm/display/drm_dsc_helper.h> #include "dpu_kms.h" #include "dpu_hw_catalog.h" #include "dpu_hwio.h" #include "dpu_hw_mdss.h" #include "dpu_hw_dsc.h" #define DSC_CMN_MAIN_CNF 0x00 /* DPU_DSC_ENC register offsets */ #define ENC_DF_CTRL 0x00 #define ENC_GENERAL_STATUS 0x04 #define ENC_HSLICE_STATUS 0x08 #define ENC_OUT_STATUS 0x0C #define ENC_INT_STAT 0x10 #define ENC_INT_CLR 0x14 #define ENC_INT_MASK 0x18 #define DSC_MAIN_CONF 0x30 #define DSC_PICTURE_SIZE 0x34 #define DSC_SLICE_SIZE 0x38 #define DSC_MISC_SIZE 0x3C #define DSC_HRD_DELAYS 0x40 #define DSC_RC_SCALE 0x44 #define DSC_RC_SCALE_INC_DEC 0x48 #define DSC_RC_OFFSETS_1 0x4C #define DSC_RC_OFFSETS_2 0x50 #define DSC_RC_OFFSETS_3 0x54 #define DSC_RC_OFFSETS_4 0x58 #define DSC_FLATNESS_QP 0x5C #define DSC_RC_MODEL_SIZE 0x60 #define DSC_RC_CONFIG 0x64 #define DSC_RC_BUF_THRESH_0 0x68 #define DSC_RC_BUF_THRESH_1 0x6C #define DSC_RC_BUF_THRESH_2 0x70 #define DSC_RC_BUF_THRESH_3 0x74 #define DSC_RC_MIN_QP_0 0x78 #define DSC_RC_MIN_QP_1 0x7C #define DSC_RC_MIN_QP_2 0x80 #define DSC_RC_MAX_QP_0 0x84 #define DSC_RC_MAX_QP_1 0x88 #define DSC_RC_MAX_QP_2 0x8C #define DSC_RC_RANGE_BPG_OFFSETS_0 0x90 #define DSC_RC_RANGE_BPG_OFFSETS_1 0x94 #define DSC_RC_RANGE_BPG_OFFSETS_2 0x98 /* DPU_DSC_CTL register offsets */ #define DSC_CTL 0x00 #define DSC_CFG 0x04 #define DSC_DATA_IN_SWAP 0x08 #define DSC_CLK_CTRL 0x0C static int _dsc_calc_output_buf_max_addr(struct dpu_hw_dsc *hw_dsc, int num_softslice) { int max_addr = 2400 / num_softslice; if (hw_dsc->caps->features & BIT(DPU_DSC_NATIVE_42x_EN)) max_addr /= 2; return max_addr - 1; }; static void dpu_hw_dsc_disable_1_2(struct dpu_hw_dsc *hw_dsc) { struct dpu_hw_blk_reg_map *hw; const struct dpu_dsc_sub_blks *sblk; if (!hw_dsc) return; hw = &hw_dsc->hw; sblk = hw_dsc->caps->sblk; DPU_REG_WRITE(hw, sblk->ctl.base + DSC_CFG, 0); DPU_REG_WRITE(hw, sblk->enc.base + ENC_DF_CTRL, 0); DPU_REG_WRITE(hw, sblk->enc.base + DSC_MAIN_CONF, 0); } static void dpu_hw_dsc_config_1_2(struct dpu_hw_dsc *hw_dsc, struct drm_dsc_config *dsc, u32 mode, u32 initial_lines) { struct dpu_hw_blk_reg_map *hw; const struct dpu_dsc_sub_blks *sblk; u32 data = 0; u32 det_thresh_flatness; u32 num_active_slice_per_enc; u32 bpp; if (!hw_dsc || !dsc) return; hw = &hw_dsc->hw; sblk = hw_dsc->caps->sblk; if (mode & DSC_MODE_SPLIT_PANEL) data |= BIT(0); if (mode & DSC_MODE_MULTIPLEX) data |= BIT(1); num_active_slice_per_enc = dsc->slice_count; if (mode & DSC_MODE_MULTIPLEX) num_active_slice_per_enc = dsc->slice_count / 2; data |= (num_active_slice_per_enc & 0x3) << 7; DPU_REG_WRITE(hw, DSC_CMN_MAIN_CNF, data); data = (initial_lines & 0xff); if (mode & DSC_MODE_VIDEO) data |= BIT(9); data |= (_dsc_calc_output_buf_max_addr(hw_dsc, num_active_slice_per_enc) << 18); DPU_REG_WRITE(hw, sblk->enc.base + ENC_DF_CTRL, data); data = (dsc->dsc_version_minor & 0xf) << 28; if (dsc->dsc_version_minor == 0x2) { if (dsc->native_422) data |= BIT(22); if (dsc->native_420) data |= BIT(21); } bpp = dsc->bits_per_pixel; /* as per hw requirement bpp should be programmed * twice the actual value in case of 420 or 422 encoding */ if (dsc->native_422 || dsc->native_420) bpp = 2 * bpp; data |= bpp << 10; if (dsc->block_pred_enable) data |= BIT(20); if (dsc->convert_rgb) data |= BIT(4); data |= (dsc->line_buf_depth & 0xf) << 6; data |= dsc->bits_per_component & 0xf; DPU_REG_WRITE(hw, sblk->enc.base + DSC_MAIN_CONF, data); data = (dsc->pic_width & 0xffff) | ((dsc->pic_height & 0xffff) << 16); DPU_REG_WRITE(hw, sblk->enc.base + DSC_PICTURE_SIZE, data); data = (dsc->slice_width & 0xffff) | ((dsc->slice_height & 0xffff) << 16); DPU_REG_WRITE(hw, sblk->enc.base + DSC_SLICE_SIZE, data); DPU_REG_WRITE(hw, sblk->enc.base + DSC_MISC_SIZE, (dsc->slice_chunk_size) & 0xffff); data = (dsc->initial_xmit_delay & 0xffff) | ((dsc->initial_dec_delay & 0xffff) << 16); DPU_REG_WRITE(hw, sblk->enc.base + DSC_HRD_DELAYS, data); DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_SCALE, dsc->initial_scale_value & 0x3f); data = (dsc->scale_increment_interval & 0xffff) | ((dsc->scale_decrement_interval & 0x7ff) << 16); DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_SCALE_INC_DEC, data); data = (dsc->first_line_bpg_offset & 0x1f) | ((dsc->second_line_bpg_offset & 0x1f) << 5); DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_OFFSETS_1, data); data = (dsc->nfl_bpg_offset & 0xffff) | ((dsc->slice_bpg_offset & 0xffff) << 16); DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_OFFSETS_2, data); data = (dsc->initial_offset & 0xffff) | ((dsc->final_offset & 0xffff) << 16); DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_OFFSETS_3, data); data = (dsc->nsl_bpg_offset & 0xffff) | ((dsc->second_line_offset_adj & 0xffff) << 16); DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_OFFSETS_4, data); det_thresh_flatness = drm_dsc_flatness_det_thresh(dsc); data = (dsc->flatness_min_qp & 0x1f) | ((dsc->flatness_max_qp & 0x1f) << 5) | ((det_thresh_flatness & 0xff) << 10); DPU_REG_WRITE(hw, sblk->enc.base + DSC_FLATNESS_QP, data); DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_MODEL_SIZE, (dsc->rc_model_size) & 0xffff); data = dsc->rc_edge_factor & 0xf; data |= (dsc->rc_quant_incr_limit0 & 0x1f) << 8; data |= (dsc->rc_quant_incr_limit1 & 0x1f) << 13; data |= (dsc->rc_tgt_offset_high & 0xf) << 20; data |= (dsc->rc_tgt_offset_low & 0xf) << 24; DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_CONFIG, data); /* program the dsc wrapper */ data = BIT(0); /* encoder enable */ if (dsc->native_422) data |= BIT(8); else if (dsc->native_420) data |= BIT(9); if (!dsc->convert_rgb) data |= BIT(10); if (dsc->bits_per_component == 8) data |= BIT(11); if (mode & DSC_MODE_SPLIT_PANEL) data |= BIT(12); if (mode & DSC_MODE_MULTIPLEX) data |= BIT(13); if (!(mode & DSC_MODE_VIDEO)) data |= BIT(17); DPU_REG_WRITE(hw, sblk->ctl.base + DSC_CFG, data); } static void dpu_hw_dsc_config_thresh_1_2(struct dpu_hw_dsc *hw_dsc, struct drm_dsc_config *dsc) { struct dpu_hw_blk_reg_map *hw; const struct dpu_dsc_sub_blks *sblk; struct drm_dsc_rc_range_parameters *rc; if (!hw_dsc || !dsc) return; hw = &hw_dsc->hw; sblk = hw_dsc->caps->sblk; rc = dsc->rc_range_params; /* * With BUF_THRESH -- 14 in total * each register contains 4 thresh values with the last register * containing only 2 thresh values */ DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_BUF_THRESH_0, (dsc->rc_buf_thresh[0] << 0) | (dsc->rc_buf_thresh[1] << 8) | (dsc->rc_buf_thresh[2] << 16) | (dsc->rc_buf_thresh[3] << 24)); DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_BUF_THRESH_1, (dsc->rc_buf_thresh[4] << 0) | (dsc->rc_buf_thresh[5] << 8) | (dsc->rc_buf_thresh[6] << 16) | (dsc->rc_buf_thresh[7] << 24)); DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_BUF_THRESH_2, (dsc->rc_buf_thresh[8] << 0) | (dsc->rc_buf_thresh[9] << 8) | (dsc->rc_buf_thresh[10] << 16) | (dsc->rc_buf_thresh[11] << 24)); DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_BUF_THRESH_3, (dsc->rc_buf_thresh[12] << 0) | (dsc->rc_buf_thresh[13] << 8)); /* * with min/max_QP -- 5 bits * each register contains 5 min_qp or max_qp for total of 15 * * With BPG_OFFSET -- 6 bits * each register contains 5 BPG_offset for total of 15 */ DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_MIN_QP_0, (rc[0].range_min_qp << 0) | (rc[1].range_min_qp << 5) | (rc[2].range_min_qp << 10) | (rc[3].range_min_qp << 15) | (rc[4].range_min_qp << 20)); DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_MAX_QP_0, (rc[0].range_max_qp << 0) | (rc[1].range_max_qp << 5) | (rc[2].range_max_qp << 10) | (rc[3].range_max_qp << 15) | (rc[4].range_max_qp << 20)); DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_RANGE_BPG_OFFSETS_0, (rc[0].range_bpg_offset << 0) | (rc[1].range_bpg_offset << 6) | (rc[2].range_bpg_offset << 12) | (rc[3].range_bpg_offset << 18) | (rc[4].range_bpg_offset << 24)); DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_MIN_QP_1, (rc[5].range_min_qp << 0) | (rc[6].range_min_qp << 5) | (rc[7].range_min_qp << 10) | (rc[8].range_min_qp << 15) | (rc[9].range_min_qp << 20)); DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_MAX_QP_1, (rc[5].range_max_qp << 0) | (rc[6].range_max_qp << 5) | (rc[7].range_max_qp << 10) | (rc[8].range_max_qp << 15) | (rc[9].range_max_qp << 20)); DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_RANGE_BPG_OFFSETS_1, (rc[5].range_bpg_offset << 0) | (rc[6].range_bpg_offset << 6) | (rc[7].range_bpg_offset << 12) | (rc[8].range_bpg_offset << 18) | (rc[9].range_bpg_offset << 24)); DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_MIN_QP_2, (rc[10].range_min_qp << 0) | (rc[11].range_min_qp << 5) | (rc[12].range_min_qp << 10) | (rc[13].range_min_qp << 15) | (rc[14].range_min_qp << 20)); DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_MAX_QP_2, (rc[10].range_max_qp << 0) | (rc[11].range_max_qp << 5) | (rc[12].range_max_qp << 10) | (rc[13].range_max_qp << 15) | (rc[14].range_max_qp << 20)); DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_RANGE_BPG_OFFSETS_2, (rc[10].range_bpg_offset << 0) | (rc[11].range_bpg_offset << 6) | (rc[12].range_bpg_offset << 12) | (rc[13].range_bpg_offset << 18) | (rc[14].range_bpg_offset << 24)); } static void dpu_hw_dsc_bind_pingpong_blk_1_2(struct dpu_hw_dsc *hw_dsc, const enum dpu_pingpong pp) { struct dpu_hw_blk_reg_map *hw; const struct dpu_dsc_sub_blks *sblk; int mux_cfg = 0xf; /* Disabled */ hw = &hw_dsc->hw; sblk = hw_dsc->caps->sblk; if (pp) mux_cfg = (pp - PINGPONG_0) & 0x7; DPU_REG_WRITE(hw, sblk->ctl.base + DSC_CTL, mux_cfg); } static void _setup_dcs_ops_1_2(struct dpu_hw_dsc_ops *ops, const unsigned long features) { ops->dsc_disable = dpu_hw_dsc_disable_1_2; ops->dsc_config = dpu_hw_dsc_config_1_2; ops->dsc_config_thresh = dpu_hw_dsc_config_thresh_1_2; ops->dsc_bind_pingpong_blk = dpu_hw_dsc_bind_pingpong_blk_1_2; } /** * dpu_hw_dsc_init_1_2() - initializes the v1.2 DSC hw driver object * @dev: Corresponding device for devres management * @cfg: DSC catalog entry for which driver object is required * @addr: Mapped register io address of MDP * Returns: Error code or allocated dpu_hw_dsc context */ struct dpu_hw_dsc *dpu_hw_dsc_init_1_2(struct drm_device *dev, const struct dpu_dsc_cfg *cfg, void __iomem *addr) { struct dpu_hw_dsc *c; c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL); if (!c) return ERR_PTR(-ENOMEM); c->hw.blk_addr = addr + cfg->base; c->hw.log_mask = DPU_DBG_MASK_DSC; c->idx = cfg->id; c->caps = cfg; _setup_dcs_ops_1_2(&c->ops, c->caps->features); return c; }
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2017 Glider bvba */ #ifndef __DT_BINDINGS_POWER_R8A77995_SYSC_H__ #define __DT_BINDINGS_POWER_R8A77995_SYSC_H__ /* * These power domain indices match the numbers of the interrupt bits * representing the power areas in the various Interrupt Registers * (e.g. SYSCISR, Interrupt Status Register) */ #define R8A77995_PD_CA53_CPU0 5 #define R8A77995_PD_CA53_SCU 21 /* Always-on power area */ #define R8A77995_PD_ALWAYS_ON 32 #endif /* __DT_BINDINGS_POWER_R8A77995_SYSC_H__ */
// SPDX-License-Identifier: GPL-2.0 /* * Mellanox BlueField I2C bus driver * * Copyright (C) 2020 Mellanox Technologies, Ltd. */ #include <linux/acpi.h> #include <linux/bitfield.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/i2c.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/string.h> /* Defines what functionality is present. */ #define MLXBF_I2C_FUNC_SMBUS_BLOCK \ (I2C_FUNC_SMBUS_BLOCK_DATA | I2C_FUNC_SMBUS_BLOCK_PROC_CALL) #define MLXBF_I2C_FUNC_SMBUS_DEFAULT \ (I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | \ I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_I2C_BLOCK | \ I2C_FUNC_SMBUS_PROC_CALL) #define MLXBF_I2C_FUNC_ALL \ (MLXBF_I2C_FUNC_SMBUS_DEFAULT | MLXBF_I2C_FUNC_SMBUS_BLOCK | \ I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SLAVE) /* Shared resources info in BlueField platforms. */ #define MLXBF_I2C_COALESCE_TYU_ADDR 0x02801300 #define MLXBF_I2C_COALESCE_TYU_SIZE 0x010 #define MLXBF_I2C_GPIO_TYU_ADDR 0x02802000 #define MLXBF_I2C_GPIO_TYU_SIZE 0x100 #define MLXBF_I2C_COREPLL_TYU_ADDR 0x02800358 #define MLXBF_I2C_COREPLL_TYU_SIZE 0x008 #define MLXBF_I2C_COREPLL_YU_ADDR 0x02800c30 #define MLXBF_I2C_COREPLL_YU_SIZE 0x00c #define MLXBF_I2C_COREPLL_RSH_YU_ADDR 0x13409824 #define MLXBF_I2C_COREPLL_RSH_YU_SIZE 0x00c #define MLXBF_I2C_SHARED_RES_MAX 3 /* * Note that the following SMBus, CAUSE, GPIO and PLL register addresses * refer to their respective offsets relative to the corresponding * memory-mapped region whose addresses are specified in either the DT or * the ACPI tables or above. */ /* * SMBus Master core clock frequency. Timing configurations are * strongly dependent on the core clock frequency of the SMBus * Master. Default value is set to 400MHz. */ #define MLXBF_I2C_TYU_PLL_OUT_FREQ (400 * 1000 * 1000) /* Reference clock for Bluefield - 156 MHz. */ #define MLXBF_I2C_PLL_IN_FREQ 156250000ULL /* Constant used to determine the PLL frequency. */ #define MLNXBF_I2C_COREPLL_CONST 16384ULL #define MLXBF_I2C_FREQUENCY_1GHZ 1000000000ULL /* PLL registers. */ #define MLXBF_I2C_CORE_PLL_REG1 0x4 #define MLXBF_I2C_CORE_PLL_REG2 0x8 /* OR cause register. */ #define MLXBF_I2C_CAUSE_OR_EVTEN0 0x14 #define MLXBF_I2C_CAUSE_OR_CLEAR 0x18 /* Arbiter Cause Register. */ #define MLXBF_I2C_CAUSE_ARBITER 0x1c /* * Cause Status flags. Note that those bits might be considered * as interrupt enabled bits. */ /* Transaction ended with STOP. */ #define MLXBF_I2C_CAUSE_TRANSACTION_ENDED BIT(0) /* Master arbitration lost. */ #define MLXBF_I2C_CAUSE_M_ARBITRATION_LOST BIT(1) /* Unexpected start detected. */ #define MLXBF_I2C_CAUSE_UNEXPECTED_START BIT(2) /* Unexpected stop detected. */ #define MLXBF_I2C_CAUSE_UNEXPECTED_STOP BIT(3) /* Wait for transfer continuation. */ #define MLXBF_I2C_CAUSE_WAIT_FOR_FW_DATA BIT(4) /* Failed to generate STOP. */ #define MLXBF_I2C_CAUSE_PUT_STOP_FAILED BIT(5) /* Failed to generate START. */ #define MLXBF_I2C_CAUSE_PUT_START_FAILED BIT(6) /* Clock toggle completed. */ #define MLXBF_I2C_CAUSE_CLK_TOGGLE_DONE BIT(7) /* Transfer timeout occurred. */ #define MLXBF_I2C_CAUSE_M_FW_TIMEOUT BIT(8) /* Master busy bit reset. */ #define MLXBF_I2C_CAUSE_M_GW_BUSY_FALL BIT(9) #define MLXBF_I2C_CAUSE_MASTER_ARBITER_BITS_MASK GENMASK(9, 0) #define MLXBF_I2C_CAUSE_MASTER_STATUS_ERROR \ (MLXBF_I2C_CAUSE_M_ARBITRATION_LOST | \ MLXBF_I2C_CAUSE_UNEXPECTED_START | \ MLXBF_I2C_CAUSE_UNEXPECTED_STOP | \ MLXBF_I2C_CAUSE_PUT_STOP_FAILED | \ MLXBF_I2C_CAUSE_PUT_START_FAILED | \ MLXBF_I2C_CAUSE_CLK_TOGGLE_DONE | \ MLXBF_I2C_CAUSE_M_FW_TIMEOUT) /* * Slave cause status flags. Note that those bits might be considered * as interrupt enabled bits. */ /* Write transaction received successfully. */ #define MLXBF_I2C_CAUSE_WRITE_SUCCESS BIT(0) /* Read transaction received, waiting for response. */ #define MLXBF_I2C_CAUSE_READ_WAIT_FW_RESPONSE BIT(13) /* Slave busy bit reset. */ #define MLXBF_I2C_CAUSE_S_GW_BUSY_FALL BIT(18) /* Cause coalesce registers. */ #define MLXBF_I2C_CAUSE_COALESCE_0 0x00 #define MLXBF_I2C_CAUSE_TYU_SLAVE_BIT 3 #define MLXBF_I2C_CAUSE_YU_SLAVE_BIT 1 /* Functional enable register. */ #define MLXBF_I2C_GPIO_0_FUNC_EN_0 0x28 /* Force OE enable register. */ #define MLXBF_I2C_GPIO_0_FORCE_OE_EN 0x30 /* * Note that Smbus GWs are on GPIOs 30:25. Two pins are used to control * SDA/SCL lines: * * SMBUS GW0 -> bits[26:25] * SMBUS GW1 -> bits[28:27] * SMBUS GW2 -> bits[30:29] */ #define MLXBF_I2C_GPIO_SMBUS_GW_PINS(num) (25 + ((num) << 1)) /* Note that gw_id can be 0,1 or 2. */ #define MLXBF_I2C_GPIO_SMBUS_GW_MASK(num) \ (0xffffffff & (~(0x3 << MLXBF_I2C_GPIO_SMBUS_GW_PINS(num)))) #define MLXBF_I2C_GPIO_SMBUS_GW_RESET_PINS(num, val) \ ((val) & MLXBF_I2C_GPIO_SMBUS_GW_MASK(num)) #define MLXBF_I2C_GPIO_SMBUS_GW_ASSERT_PINS(num, val) \ ((val) | (0x3 << MLXBF_I2C_GPIO_SMBUS_GW_PINS(num))) /* * Defines SMBus operating frequency and core clock frequency. * According to ADB files, default values are compliant to 100KHz SMBus * @ 400MHz core clock. The driver should be able to calculate core * frequency based on PLL parameters. */ #define MLXBF_I2C_COREPLL_FREQ MLXBF_I2C_TYU_PLL_OUT_FREQ /* Core PLL TYU configuration. */ #define MLXBF_I2C_COREPLL_CORE_F_TYU_MASK GENMASK(15, 3) #define MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK GENMASK(19, 16) #define MLXBF_I2C_COREPLL_CORE_R_TYU_MASK GENMASK(25, 20) /* Core PLL YU configuration. */ #define MLXBF_I2C_COREPLL_CORE_F_YU_MASK GENMASK(25, 0) #define MLXBF_I2C_COREPLL_CORE_OD_YU_MASK GENMASK(3, 0) #define MLXBF_I2C_COREPLL_CORE_R_YU_MASK GENMASK(31, 26) /* SMBus timing parameters. */ #define MLXBF_I2C_SMBUS_TIMER_SCL_LOW_SCL_HIGH 0x00 #define MLXBF_I2C_SMBUS_TIMER_FALL_RISE_SPIKE 0x04 #define MLXBF_I2C_SMBUS_TIMER_THOLD 0x08 #define MLXBF_I2C_SMBUS_TIMER_TSETUP_START_STOP 0x0c #define MLXBF_I2C_SMBUS_TIMER_TSETUP_DATA 0x10 #define MLXBF_I2C_SMBUS_THIGH_MAX_TBUF 0x14 #define MLXBF_I2C_SMBUS_SCL_LOW_TIMEOUT 0x18 #define MLXBF_I2C_SHIFT_0 0 #define MLXBF_I2C_SHIFT_8 8 #define MLXBF_I2C_SHIFT_16 16 #define MLXBF_I2C_SHIFT_24 24 #define MLXBF_I2C_MASK_8 GENMASK(7, 0) #define MLXBF_I2C_MASK_16 GENMASK(15, 0) #define MLXBF_I2C_MST_ADDR_OFFSET 0x200 /* SMBus Master GW. */ #define MLXBF_I2C_SMBUS_MASTER_GW 0x0 /* Number of bytes received and sent. */ #define MLXBF_I2C_YU_SMBUS_RS_BYTES 0x100 #define MLXBF_I2C_RSH_YU_SMBUS_RS_BYTES 0x10c /* Packet error check (PEC) value. */ #define MLXBF_I2C_SMBUS_MASTER_PEC 0x104 /* Status bits (ACK/NACK/FW Timeout). */ #define MLXBF_I2C_SMBUS_MASTER_STATUS 0x108 /* SMbus Master Finite State Machine. */ #define MLXBF_I2C_YU_SMBUS_MASTER_FSM 0x110 #define MLXBF_I2C_RSH_YU_SMBUS_MASTER_FSM 0x100 /* SMBus master GW control bits offset in MLXBF_I2C_SMBUS_MASTER_GW[31:3]. */ #define MLXBF_I2C_MASTER_LOCK_BIT BIT(31) /* Lock bit. */ #define MLXBF_I2C_MASTER_BUSY_BIT BIT(30) /* Busy bit. */ #define MLXBF_I2C_MASTER_START_BIT BIT(29) /* Control start. */ #define MLXBF_I2C_MASTER_CTL_WRITE_BIT BIT(28) /* Control write phase. */ #define MLXBF_I2C_MASTER_CTL_READ_BIT BIT(19) /* Control read phase. */ #define MLXBF_I2C_MASTER_STOP_BIT BIT(3) /* Control stop. */ #define MLXBF_I2C_MASTER_ENABLE \ (MLXBF_I2C_MASTER_LOCK_BIT | MLXBF_I2C_MASTER_BUSY_BIT | \ MLXBF_I2C_MASTER_START_BIT | MLXBF_I2C_MASTER_STOP_BIT) #define MLXBF_I2C_MASTER_ENABLE_WRITE \ (MLXBF_I2C_MASTER_ENABLE | MLXBF_I2C_MASTER_CTL_WRITE_BIT) #define MLXBF_I2C_MASTER_ENABLE_READ \ (MLXBF_I2C_MASTER_ENABLE | MLXBF_I2C_MASTER_CTL_READ_BIT) #define MLXBF_I2C_MASTER_WRITE_SHIFT 21 /* Control write bytes */ #define MLXBF_I2C_MASTER_SEND_PEC_SHIFT 20 /* Send PEC byte when set to 1 */ #define MLXBF_I2C_MASTER_PARSE_EXP_SHIFT 11 /* Control parse expected bytes */ #define MLXBF_I2C_MASTER_SLV_ADDR_SHIFT 12 /* Slave address */ #define MLXBF_I2C_MASTER_READ_SHIFT 4 /* Control read bytes */ /* SMBus master GW Data descriptor. */ #define MLXBF_I2C_MASTER_DATA_DESC_ADDR 0x80 #define MLXBF_I2C_MASTER_DATA_DESC_SIZE 0x80 /* Size in bytes. */ /* Maximum bytes to read/write per SMBus transaction. */ #define MLXBF_I2C_MASTER_DATA_R_LENGTH MLXBF_I2C_MASTER_DATA_DESC_SIZE #define MLXBF_I2C_MASTER_DATA_W_LENGTH (MLXBF_I2C_MASTER_DATA_DESC_SIZE - 1) /* All bytes were transmitted. */ #define MLXBF_I2C_SMBUS_STATUS_BYTE_CNT_DONE BIT(0) /* NACK received. */ #define MLXBF_I2C_SMBUS_STATUS_NACK_RCV BIT(1) /* Slave's byte count >128 bytes. */ #define MLXBF_I2C_SMBUS_STATUS_READ_ERR BIT(2) /* Timeout occurred. */ #define MLXBF_I2C_SMBUS_STATUS_FW_TIMEOUT BIT(3) #define MLXBF_I2C_SMBUS_MASTER_STATUS_MASK GENMASK(3, 0) #define MLXBF_I2C_SMBUS_MASTER_STATUS_ERROR \ (MLXBF_I2C_SMBUS_STATUS_NACK_RCV | \ MLXBF_I2C_SMBUS_STATUS_READ_ERR | \ MLXBF_I2C_SMBUS_STATUS_FW_TIMEOUT) #define MLXBF_I2C_SMBUS_MASTER_FSM_STOP_MASK BIT(31) #define MLXBF_I2C_SMBUS_MASTER_FSM_PS_STATE_MASK BIT(15) #define MLXBF_I2C_SLV_ADDR_OFFSET 0x400 /* SMBus slave GW. */ #define MLXBF_I2C_SMBUS_SLAVE_GW 0x0 /* Number of bytes received and sent from/to master. */ #define MLXBF_I2C_SMBUS_SLAVE_RS_MASTER_BYTES 0x100 /* Packet error check (PEC) value. */ #define MLXBF_I2C_SMBUS_SLAVE_PEC 0x104 /* SMBus slave Finite State Machine (FSM). */ #define MLXBF_I2C_SMBUS_SLAVE_FSM 0x110 /* * Should be set when all raised causes handled, and cleared by HW on * every new cause. */ #define MLXBF_I2C_SMBUS_SLAVE_READY 0x12c /* SMBus slave GW control bits offset in MLXBF_I2C_SMBUS_SLAVE_GW[31:19]. */ #define MLXBF_I2C_SLAVE_BUSY_BIT BIT(30) /* Busy bit. */ #define MLXBF_I2C_SLAVE_WRITE_BIT BIT(29) /* Control write enable. */ #define MLXBF_I2C_SLAVE_ENABLE \ (MLXBF_I2C_SLAVE_BUSY_BIT | MLXBF_I2C_SLAVE_WRITE_BIT) #define MLXBF_I2C_SLAVE_WRITE_BYTES_SHIFT 22 /* Number of bytes to write. */ #define MLXBF_I2C_SLAVE_SEND_PEC_SHIFT 21 /* Send PEC byte shift. */ /* SMBus slave GW Data descriptor. */ #define MLXBF_I2C_SLAVE_DATA_DESC_ADDR 0x80 #define MLXBF_I2C_SLAVE_DATA_DESC_SIZE 0x80 /* Size in bytes. */ /* SMbus slave configuration registers. */ #define MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG 0x114 #define MLXBF_I2C_SMBUS_SLAVE_ADDR_CNT 16 #define MLXBF_I2C_SMBUS_SLAVE_ADDR_EN_BIT BIT(7) #define MLXBF_I2C_SMBUS_SLAVE_ADDR_MASK GENMASK(6, 0) /* * Timeout is given in microsends. Note also that timeout handling is not * exact. */ #define MLXBF_I2C_SMBUS_TIMEOUT (300 * 1000) /* 300ms */ #define MLXBF_I2C_SMBUS_LOCK_POLL_TIMEOUT (300 * 1000) /* 300ms */ /* Polling frequency in microseconds. */ #define MLXBF_I2C_POLL_FREQ_IN_USEC 200 #define MLXBF_I2C_SMBUS_OP_CNT_1 1 #define MLXBF_I2C_SMBUS_OP_CNT_2 2 #define MLXBF_I2C_SMBUS_OP_CNT_3 3 #define MLXBF_I2C_SMBUS_MAX_OP_CNT MLXBF_I2C_SMBUS_OP_CNT_3 /* Helper macro to define an I2C resource parameters. */ #define MLXBF_I2C_RES_PARAMS(addr, size, str) \ { \ .start = (addr), \ .end = (addr) + (size) - 1, \ .name = (str) \ } enum { MLXBF_I2C_TIMING_100KHZ = 100000, MLXBF_I2C_TIMING_400KHZ = 400000, MLXBF_I2C_TIMING_1000KHZ = 1000000, }; enum { MLXBF_I2C_F_READ = BIT(0), MLXBF_I2C_F_WRITE = BIT(1), MLXBF_I2C_F_NORESTART = BIT(3), MLXBF_I2C_F_SMBUS_OPERATION = BIT(4), MLXBF_I2C_F_SMBUS_BLOCK = BIT(5), MLXBF_I2C_F_SMBUS_PEC = BIT(6), MLXBF_I2C_F_SMBUS_PROCESS_CALL = BIT(7), }; /* Mellanox BlueField chip type. */ enum mlxbf_i2c_chip_type { MLXBF_I2C_CHIP_TYPE_1, /* Mellanox BlueField-1 chip. */ MLXBF_I2C_CHIP_TYPE_2, /* Mellanox BlueField-2 chip. */ MLXBF_I2C_CHIP_TYPE_3 /* Mellanox BlueField-3 chip. */ }; /* List of chip resources that are being accessed by the driver. */ enum { MLXBF_I2C_SMBUS_RES, MLXBF_I2C_MST_CAUSE_RES, MLXBF_I2C_SLV_CAUSE_RES, MLXBF_I2C_COALESCE_RES, MLXBF_I2C_SMBUS_TIMER_RES, MLXBF_I2C_SMBUS_MST_RES, MLXBF_I2C_SMBUS_SLV_RES, MLXBF_I2C_COREPLL_RES, MLXBF_I2C_GPIO_RES, MLXBF_I2C_END_RES }; /* Encapsulates timing parameters. */ struct mlxbf_i2c_timings { u16 scl_high; /* Clock high period. */ u16 scl_low; /* Clock low period. */ u8 sda_rise; /* Data rise time. */ u8 sda_fall; /* Data fall time. */ u8 scl_rise; /* Clock rise time. */ u8 scl_fall; /* Clock fall time. */ u16 hold_start; /* Hold time after (REPEATED) START. */ u16 hold_data; /* Data hold time. */ u16 setup_start; /* REPEATED START condition setup time. */ u16 setup_stop; /* STOP condition setup time. */ u16 setup_data; /* Data setup time. */ u16 pad; /* Padding. */ u16 buf; /* Bus free time between STOP and START. */ u16 thigh_max; /* Thigh max. */ u32 timeout; /* Detect clock low timeout. */ }; struct mlxbf_i2c_smbus_operation { u32 flags; u32 length; /* Buffer length in bytes. */ u8 *buffer; }; struct mlxbf_i2c_smbus_request { u8 slave; u8 operation_cnt; struct mlxbf_i2c_smbus_operation operation[MLXBF_I2C_SMBUS_MAX_OP_CNT]; }; struct mlxbf_i2c_resource { void __iomem *io; struct resource *params; struct mutex *lock; /* Mutex to protect mlxbf_i2c_resource. */ u8 type; }; struct mlxbf_i2c_chip_info { enum mlxbf_i2c_chip_type type; /* Chip shared resources that are being used by the I2C controller. */ struct mlxbf_i2c_resource *shared_res[MLXBF_I2C_SHARED_RES_MAX]; /* Callback to calculate the core PLL frequency. */ u64 (*calculate_freq)(struct mlxbf_i2c_resource *corepll_res); /* Registers' address offset */ u32 smbus_master_rs_bytes_off; u32 smbus_master_fsm_off; }; struct mlxbf_i2c_priv { const struct mlxbf_i2c_chip_info *chip; struct i2c_adapter adap; struct mlxbf_i2c_resource *smbus; struct mlxbf_i2c_resource *timer; struct mlxbf_i2c_resource *mst; struct mlxbf_i2c_resource *slv; struct mlxbf_i2c_resource *mst_cause; struct mlxbf_i2c_resource *slv_cause; struct mlxbf_i2c_resource *coalesce; u64 frequency; /* Core frequency in Hz. */ int bus; /* Physical bus identifier. */ int irq; struct i2c_client *slave[MLXBF_I2C_SMBUS_SLAVE_ADDR_CNT]; u32 resource_version; }; /* Core PLL frequency. */ static u64 mlxbf_i2c_corepll_frequency; static struct resource mlxbf_i2c_coalesce_tyu_params = MLXBF_I2C_RES_PARAMS(MLXBF_I2C_COALESCE_TYU_ADDR, MLXBF_I2C_COALESCE_TYU_SIZE, "COALESCE_MEM"); static struct resource mlxbf_i2c_corepll_tyu_params = MLXBF_I2C_RES_PARAMS(MLXBF_I2C_COREPLL_TYU_ADDR, MLXBF_I2C_COREPLL_TYU_SIZE, "COREPLL_MEM"); static struct resource mlxbf_i2c_corepll_yu_params = MLXBF_I2C_RES_PARAMS(MLXBF_I2C_COREPLL_YU_ADDR, MLXBF_I2C_COREPLL_YU_SIZE, "COREPLL_MEM"); static struct resource mlxbf_i2c_corepll_rsh_yu_params = MLXBF_I2C_RES_PARAMS(MLXBF_I2C_COREPLL_RSH_YU_ADDR, MLXBF_I2C_COREPLL_RSH_YU_SIZE, "COREPLL_MEM"); static struct resource mlxbf_i2c_gpio_tyu_params = MLXBF_I2C_RES_PARAMS(MLXBF_I2C_GPIO_TYU_ADDR, MLXBF_I2C_GPIO_TYU_SIZE, "GPIO_MEM"); static struct mutex mlxbf_i2c_coalesce_lock; static struct mutex mlxbf_i2c_corepll_lock; static struct mutex mlxbf_i2c_gpio_lock; static struct mlxbf_i2c_resource mlxbf_i2c_coalesce_res[] = { [MLXBF_I2C_CHIP_TYPE_1] = { .params = &mlxbf_i2c_coalesce_tyu_params, .lock = &mlxbf_i2c_coalesce_lock, .type = MLXBF_I2C_COALESCE_RES }, {} }; static struct mlxbf_i2c_resource mlxbf_i2c_corepll_res[] = { [MLXBF_I2C_CHIP_TYPE_1] = { .params = &mlxbf_i2c_corepll_tyu_params, .lock = &mlxbf_i2c_corepll_lock, .type = MLXBF_I2C_COREPLL_RES }, [MLXBF_I2C_CHIP_TYPE_2] = { .params = &mlxbf_i2c_corepll_yu_params, .lock = &mlxbf_i2c_corepll_lock, .type = MLXBF_I2C_COREPLL_RES, }, [MLXBF_I2C_CHIP_TYPE_3] = { .params = &mlxbf_i2c_corepll_rsh_yu_params, .lock = &mlxbf_i2c_corepll_lock, .type = MLXBF_I2C_COREPLL_RES, } }; static struct mlxbf_i2c_resource mlxbf_i2c_gpio_res[] = { [MLXBF_I2C_CHIP_TYPE_1] = { .params = &mlxbf_i2c_gpio_tyu_params, .lock = &mlxbf_i2c_gpio_lock, .type = MLXBF_I2C_GPIO_RES }, {} }; static u8 mlxbf_i2c_bus_count; static struct mutex mlxbf_i2c_bus_lock; /* * Function to poll a set of bits at a specific address; it checks whether * the bits are equal to zero when eq_zero is set to 'true', and not equal * to zero when eq_zero is set to 'false'. * Note that the timeout is given in microseconds. */ static u32 mlxbf_i2c_poll(void __iomem *io, u32 addr, u32 mask, bool eq_zero, u32 timeout) { u32 bits; timeout = (timeout / MLXBF_I2C_POLL_FREQ_IN_USEC) + 1; do { bits = readl(io + addr) & mask; if (eq_zero ? bits == 0 : bits != 0) return eq_zero ? 1 : bits; udelay(MLXBF_I2C_POLL_FREQ_IN_USEC); } while (timeout-- != 0); return 0; } /* * SW must make sure that the SMBus Master GW is idle before starting * a transaction. Accordingly, this function polls the Master FSM stop * bit; it returns false when the bit is asserted, true if not. */ static bool mlxbf_i2c_smbus_master_wait_for_idle(struct mlxbf_i2c_priv *priv) { u32 mask = MLXBF_I2C_SMBUS_MASTER_FSM_STOP_MASK; u32 addr = priv->chip->smbus_master_fsm_off; u32 timeout = MLXBF_I2C_SMBUS_TIMEOUT; if (mlxbf_i2c_poll(priv->mst->io, addr, mask, true, timeout)) return true; return false; } /* * wait for the lock to be released before acquiring it. */ static bool mlxbf_i2c_smbus_master_lock(struct mlxbf_i2c_priv *priv) { if (mlxbf_i2c_poll(priv->mst->io, MLXBF_I2C_SMBUS_MASTER_GW, MLXBF_I2C_MASTER_LOCK_BIT, true, MLXBF_I2C_SMBUS_LOCK_POLL_TIMEOUT)) return true; return false; } static void mlxbf_i2c_smbus_master_unlock(struct mlxbf_i2c_priv *priv) { /* Clear the gw to clear the lock */ writel(0, priv->mst->io + MLXBF_I2C_SMBUS_MASTER_GW); } static bool mlxbf_i2c_smbus_transaction_success(u32 master_status, u32 cause_status) { /* * When transaction ended with STOP, all bytes were transmitted, * and no NACK received, then the transaction ended successfully. * On the other hand, when the GW is configured with the stop bit * de-asserted then the SMBus expects the following GW configuration * for transfer continuation. */ if ((cause_status & MLXBF_I2C_CAUSE_WAIT_FOR_FW_DATA) || ((cause_status & MLXBF_I2C_CAUSE_TRANSACTION_ENDED) && (master_status & MLXBF_I2C_SMBUS_STATUS_BYTE_CNT_DONE) && !(master_status & MLXBF_I2C_SMBUS_STATUS_NACK_RCV))) return true; return false; } /* * Poll SMBus master status and return transaction status, * i.e. whether succeeded or failed. I2C and SMBus fault codes * are returned as negative numbers from most calls, with zero * or some positive number indicating a non-fault return. */ static int mlxbf_i2c_smbus_check_status(struct mlxbf_i2c_priv *priv) { u32 master_status_bits; u32 cause_status_bits; /* * GW busy bit is raised by the driver and cleared by the HW * when the transaction is completed. The busy bit is a good * indicator of transaction status. So poll the busy bit, and * then read the cause and master status bits to determine if * errors occurred during the transaction. */ mlxbf_i2c_poll(priv->mst->io, MLXBF_I2C_SMBUS_MASTER_GW, MLXBF_I2C_MASTER_BUSY_BIT, true, MLXBF_I2C_SMBUS_TIMEOUT); /* Read cause status bits. */ cause_status_bits = readl(priv->mst_cause->io + MLXBF_I2C_CAUSE_ARBITER); cause_status_bits &= MLXBF_I2C_CAUSE_MASTER_ARBITER_BITS_MASK; /* * Parse both Cause and Master GW bits, then return transaction status. */ master_status_bits = readl(priv->mst->io + MLXBF_I2C_SMBUS_MASTER_STATUS); master_status_bits &= MLXBF_I2C_SMBUS_MASTER_STATUS_MASK; if (mlxbf_i2c_smbus_transaction_success(master_status_bits, cause_status_bits)) return 0; /* * In case of timeout on GW busy, the ISR will clear busy bit but * transaction ended bits cause will not be set so the transaction * fails. Then, we must check Master GW status bits. */ if ((master_status_bits & MLXBF_I2C_SMBUS_MASTER_STATUS_ERROR) && (cause_status_bits & (MLXBF_I2C_CAUSE_TRANSACTION_ENDED | MLXBF_I2C_CAUSE_M_GW_BUSY_FALL))) return -EIO; if (cause_status_bits & MLXBF_I2C_CAUSE_MASTER_STATUS_ERROR) return -EAGAIN; return -ETIMEDOUT; } static void mlxbf_i2c_smbus_write_data(struct mlxbf_i2c_priv *priv, const u8 *data, u8 length, u32 addr, bool is_master) { u8 offset, aligned_length; u32 data32; aligned_length = round_up(length, 4); /* * Copy data bytes from 4-byte aligned source buffer. * Data copied to the Master GW Data Descriptor MUST be shifted * left so the data starts at the MSB of the descriptor registers * as required by the underlying hardware. Enable byte swapping * when writing data bytes to the 32 * 32-bit HW Data registers * a.k.a Master GW Data Descriptor. */ for (offset = 0; offset < aligned_length; offset += sizeof(u32)) { data32 = *((u32 *)(data + offset)); if (is_master) iowrite32be(data32, priv->mst->io + addr + offset); else iowrite32be(data32, priv->slv->io + addr + offset); } } static void mlxbf_i2c_smbus_read_data(struct mlxbf_i2c_priv *priv, u8 *data, u8 length, u32 addr, bool is_master) { u32 data32, mask; u8 byte, offset; mask = sizeof(u32) - 1; /* * Data bytes in the Master GW Data Descriptor are shifted left * so the data starts at the MSB of the descriptor registers as * set by the underlying hardware. Enable byte swapping while * reading data bytes from the 32 * 32-bit HW Data registers * a.k.a Master GW Data Descriptor. */ for (offset = 0; offset < (length & ~mask); offset += sizeof(u32)) { if (is_master) data32 = ioread32be(priv->mst->io + addr + offset); else data32 = ioread32be(priv->slv->io + addr + offset); *((u32 *)(data + offset)) = data32; } if (!(length & mask)) return; if (is_master) data32 = ioread32be(priv->mst->io + addr + offset); else data32 = ioread32be(priv->slv->io + addr + offset); for (byte = 0; byte < (length & mask); byte++) { data[offset + byte] = data32 & GENMASK(7, 0); data32 = ror32(data32, MLXBF_I2C_SHIFT_8); } } static int mlxbf_i2c_smbus_enable(struct mlxbf_i2c_priv *priv, u8 slave, u8 len, u8 block_en, u8 pec_en, bool read) { u32 command; /* Set Master GW control word. */ if (read) { command = MLXBF_I2C_MASTER_ENABLE_READ; command |= rol32(len, MLXBF_I2C_MASTER_READ_SHIFT); } else { command = MLXBF_I2C_MASTER_ENABLE_WRITE; command |= rol32(len, MLXBF_I2C_MASTER_WRITE_SHIFT); } command |= rol32(slave, MLXBF_I2C_MASTER_SLV_ADDR_SHIFT); command |= rol32(block_en, MLXBF_I2C_MASTER_PARSE_EXP_SHIFT); command |= rol32(pec_en, MLXBF_I2C_MASTER_SEND_PEC_SHIFT); /* Clear status bits. */ writel(0x0, priv->mst->io + MLXBF_I2C_SMBUS_MASTER_STATUS); /* Set the cause data. */ writel(~0x0, priv->mst_cause->io + MLXBF_I2C_CAUSE_OR_CLEAR); /* Zero PEC byte. */ writel(0x0, priv->mst->io + MLXBF_I2C_SMBUS_MASTER_PEC); /* Zero byte count. */ writel(0x0, priv->mst->io + priv->chip->smbus_master_rs_bytes_off); /* GW activation. */ writel(command, priv->mst->io + MLXBF_I2C_SMBUS_MASTER_GW); /* * Poll master status and check status bits. An ACK is sent when * completing writing data to the bus (Master 'byte_count_done' bit * is set to 1). */ return mlxbf_i2c_smbus_check_status(priv); } static int mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv, struct mlxbf_i2c_smbus_request *request) { u8 data_desc[MLXBF_I2C_MASTER_DATA_DESC_SIZE] = { 0 }; u8 op_idx, data_idx, data_len, write_len, read_len; struct mlxbf_i2c_smbus_operation *operation; u8 read_en, write_en, block_en, pec_en; u8 slave, flags, addr; u8 *read_buf; int ret = 0; if (request->operation_cnt > MLXBF_I2C_SMBUS_MAX_OP_CNT) return -EINVAL; read_buf = NULL; data_idx = 0; read_en = 0; write_en = 0; write_len = 0; read_len = 0; block_en = 0; pec_en = 0; slave = request->slave & GENMASK(6, 0); addr = slave << 1; /* * Try to acquire the smbus gw lock before any reads of the GW register since * a read sets the lock. */ if (WARN_ON(!mlxbf_i2c_smbus_master_lock(priv))) return -EBUSY; /* Check whether the HW is idle */ if (WARN_ON(!mlxbf_i2c_smbus_master_wait_for_idle(priv))) { ret = -EBUSY; goto out_unlock; } /* Set first byte. */ data_desc[data_idx++] = addr; for (op_idx = 0; op_idx < request->operation_cnt; op_idx++) { operation = &request->operation[op_idx]; flags = operation->flags; /* * Note that read and write operations might be handled by a * single command. If the MLXBF_I2C_F_SMBUS_OPERATION is set * then write command byte and set the optional SMBus specific * bits such as block_en and pec_en. These bits MUST be * submitted by the first operation only. */ if (op_idx == 0 && flags & MLXBF_I2C_F_SMBUS_OPERATION) { block_en = flags & MLXBF_I2C_F_SMBUS_BLOCK; pec_en = flags & MLXBF_I2C_F_SMBUS_PEC; } if (flags & MLXBF_I2C_F_WRITE) { write_en = 1; write_len += operation->length; if (data_idx + operation->length > MLXBF_I2C_MASTER_DATA_DESC_SIZE) { ret = -ENOBUFS; goto out_unlock; } memcpy(data_desc + data_idx, operation->buffer, operation->length); data_idx += operation->length; } /* * We assume that read operations are performed only once per * SMBus transaction. *TBD* protect this statement so it won't * be executed twice? or return an error if we try to read more * than once? */ if (flags & MLXBF_I2C_F_READ) { read_en = 1; /* Subtract 1 as required by HW. */ read_len = operation->length - 1; read_buf = operation->buffer; } } /* Set Master GW data descriptor. */ data_len = write_len + 1; /* Add one byte of the slave address. */ /* * Note that data_len cannot be 0. Indeed, the slave address byte * must be written to the data registers. */ mlxbf_i2c_smbus_write_data(priv, (const u8 *)data_desc, data_len, MLXBF_I2C_MASTER_DATA_DESC_ADDR, true); if (write_en) { ret = mlxbf_i2c_smbus_enable(priv, slave, write_len, block_en, pec_en, 0); if (ret) goto out_unlock; } if (read_en) { /* Write slave address to Master GW data descriptor. */ mlxbf_i2c_smbus_write_data(priv, (const u8 *)&addr, 1, MLXBF_I2C_MASTER_DATA_DESC_ADDR, true); ret = mlxbf_i2c_smbus_enable(priv, slave, read_len, block_en, pec_en, 1); if (!ret) { /* Get Master GW data descriptor. */ mlxbf_i2c_smbus_read_data(priv, data_desc, read_len + 1, MLXBF_I2C_MASTER_DATA_DESC_ADDR, true); /* Get data from Master GW data descriptor. */ memcpy(read_buf, data_desc, read_len + 1); } /* * After a read operation the SMBus FSM ps (present state) * needs to be 'manually' reset. This should be removed in * next tag integration. */ writel(MLXBF_I2C_SMBUS_MASTER_FSM_PS_STATE_MASK, priv->mst->io + priv->chip->smbus_master_fsm_off); } out_unlock: mlxbf_i2c_smbus_master_unlock(priv); return ret; } /* I2C SMBus protocols. */ static void mlxbf_i2c_smbus_quick_command(struct mlxbf_i2c_smbus_request *request, u8 read) { request->operation_cnt = MLXBF_I2C_SMBUS_OP_CNT_1; request->operation[0].length = 0; request->operation[0].flags = MLXBF_I2C_F_WRITE; request->operation[0].flags |= read ? MLXBF_I2C_F_READ : 0; } static void mlxbf_i2c_smbus_byte_func(struct mlxbf_i2c_smbus_request *request, u8 *data, bool read, bool pec_check) { request->operation_cnt = MLXBF_I2C_SMBUS_OP_CNT_1; request->operation[0].length = 1; request->operation[0].length += pec_check; request->operation[0].flags = MLXBF_I2C_F_SMBUS_OPERATION; request->operation[0].flags |= read ? MLXBF_I2C_F_READ : MLXBF_I2C_F_WRITE; request->operation[0].flags |= pec_check ? MLXBF_I2C_F_SMBUS_PEC : 0; request->operation[0].buffer = data; } static void mlxbf_i2c_smbus_data_byte_func(struct mlxbf_i2c_smbus_request *request, u8 *command, u8 *data, bool read, bool pec_check) { request->operation_cnt = MLXBF_I2C_SMBUS_OP_CNT_2; request->operation[0].length = 1; request->operation[0].flags = MLXBF_I2C_F_SMBUS_OPERATION | MLXBF_I2C_F_WRITE; request->operation[0].flags |= pec_check ? MLXBF_I2C_F_SMBUS_PEC : 0; request->operation[0].buffer = command; request->operation[1].length = 1; request->operation[1].length += pec_check; request->operation[1].flags = read ? MLXBF_I2C_F_READ : MLXBF_I2C_F_WRITE; request->operation[1].buffer = data; } static void mlxbf_i2c_smbus_data_word_func(struct mlxbf_i2c_smbus_request *request, u8 *command, u8 *data, bool read, bool pec_check) { request->operation_cnt = MLXBF_I2C_SMBUS_OP_CNT_2; request->operation[0].length = 1; request->operation[0].flags = MLXBF_I2C_F_SMBUS_OPERATION | MLXBF_I2C_F_WRITE; request->operation[0].flags |= pec_check ? MLXBF_I2C_F_SMBUS_PEC : 0; request->operation[0].buffer = command; request->operation[1].length = 2; request->operation[1].length += pec_check; request->operation[1].flags = read ? MLXBF_I2C_F_READ : MLXBF_I2C_F_WRITE; request->operation[1].buffer = data; } static void mlxbf_i2c_smbus_i2c_block_func(struct mlxbf_i2c_smbus_request *request, u8 *command, u8 *data, u8 *data_len, bool read, bool pec_check) { request->operation_cnt = MLXBF_I2C_SMBUS_OP_CNT_2; request->operation[0].length = 1; request->operation[0].flags = MLXBF_I2C_F_SMBUS_OPERATION | MLXBF_I2C_F_WRITE; request->operation[0].flags |= pec_check ? MLXBF_I2C_F_SMBUS_PEC : 0; request->operation[0].buffer = command; /* * As specified in the standard, the max number of bytes to read/write * per block operation is 32 bytes. In Golan code, the controller can * read up to 128 bytes and write up to 127 bytes. */ request->operation[1].length = (*data_len + pec_check > I2C_SMBUS_BLOCK_MAX) ? I2C_SMBUS_BLOCK_MAX : *data_len + pec_check; request->operation[1].flags = read ? MLXBF_I2C_F_READ : MLXBF_I2C_F_WRITE; /* * Skip the first data byte, which corresponds to the number of bytes * to read/write. */ request->operation[1].buffer = data + 1; *data_len = request->operation[1].length; /* Set the number of byte to read. This will be used by userspace. */ if (read) data[0] = *data_len; } static void mlxbf_i2c_smbus_block_func(struct mlxbf_i2c_smbus_request *request, u8 *command, u8 *data, u8 *data_len, bool read, bool pec_check) { request->operation_cnt = MLXBF_I2C_SMBUS_OP_CNT_2; request->operation[0].length = 1; request->operation[0].flags = MLXBF_I2C_F_SMBUS_OPERATION | MLXBF_I2C_F_WRITE; request->operation[0].flags |= MLXBF_I2C_F_SMBUS_BLOCK; request->operation[0].flags |= pec_check ? MLXBF_I2C_F_SMBUS_PEC : 0; request->operation[0].buffer = command; request->operation[1].length = (*data_len + pec_check > I2C_SMBUS_BLOCK_MAX) ? I2C_SMBUS_BLOCK_MAX : *data_len + pec_check; request->operation[1].flags = read ? MLXBF_I2C_F_READ : MLXBF_I2C_F_WRITE; request->operation[1].buffer = data + 1; *data_len = request->operation[1].length; /* Set the number of bytes to read. This will be used by userspace. */ if (read) data[0] = *data_len; } static void mlxbf_i2c_smbus_process_call_func(struct mlxbf_i2c_smbus_request *request, u8 *command, u8 *data, bool pec_check) { request->operation_cnt = MLXBF_I2C_SMBUS_OP_CNT_3; request->operation[0].length = 1; request->operation[0].flags = MLXBF_I2C_F_SMBUS_OPERATION | MLXBF_I2C_F_WRITE; request->operation[0].flags |= MLXBF_I2C_F_SMBUS_BLOCK; request->operation[0].flags |= pec_check ? MLXBF_I2C_F_SMBUS_PEC : 0; request->operation[0].buffer = command; request->operation[1].length = 2; request->operation[1].flags = MLXBF_I2C_F_WRITE; request->operation[1].buffer = data; request->operation[2].length = 3; request->operation[2].flags = MLXBF_I2C_F_READ; request->operation[2].buffer = data; } static void mlxbf_i2c_smbus_blk_process_call_func(struct mlxbf_i2c_smbus_request *request, u8 *command, u8 *data, u8 *data_len, bool pec_check) { u32 length; request->operation_cnt = MLXBF_I2C_SMBUS_OP_CNT_3; request->operation[0].length = 1; request->operation[0].flags = MLXBF_I2C_F_SMBUS_OPERATION | MLXBF_I2C_F_WRITE; request->operation[0].flags |= MLXBF_I2C_F_SMBUS_BLOCK; request->operation[0].flags |= (pec_check) ? MLXBF_I2C_F_SMBUS_PEC : 0; request->operation[0].buffer = command; length = (*data_len + pec_check > I2C_SMBUS_BLOCK_MAX) ? I2C_SMBUS_BLOCK_MAX : *data_len + pec_check; request->operation[1].length = length - pec_check; request->operation[1].flags = MLXBF_I2C_F_WRITE; request->operation[1].buffer = data; request->operation[2].length = length; request->operation[2].flags = MLXBF_I2C_F_READ; request->operation[2].buffer = data; *data_len = length; /* including PEC byte. */ } /* Initialization functions. */ static bool mlxbf_i2c_has_chip_type(struct mlxbf_i2c_priv *priv, u8 type) { return priv->chip->type == type; } static struct mlxbf_i2c_resource * mlxbf_i2c_get_shared_resource(struct mlxbf_i2c_priv *priv, u8 type) { const struct mlxbf_i2c_chip_info *chip = priv->chip; struct mlxbf_i2c_resource *res; u8 res_idx = 0; for (res_idx = 0; res_idx < MLXBF_I2C_SHARED_RES_MAX; res_idx++) { res = chip->shared_res[res_idx]; if (res && res->type == type) return res; } return NULL; } static int mlxbf_i2c_init_resource(struct platform_device *pdev, struct mlxbf_i2c_resource **res, u8 type) { struct mlxbf_i2c_resource *tmp_res; struct device *dev = &pdev->dev; if (!res || *res || type >= MLXBF_I2C_END_RES) return -EINVAL; tmp_res = devm_kzalloc(dev, sizeof(struct mlxbf_i2c_resource), GFP_KERNEL); if (!tmp_res) return -ENOMEM; tmp_res->io = devm_platform_get_and_ioremap_resource(pdev, type, &tmp_res->params); if (IS_ERR(tmp_res->io)) { devm_kfree(dev, tmp_res); return PTR_ERR(tmp_res->io); } tmp_res->type = type; *res = tmp_res; return 0; } static u32 mlxbf_i2c_get_ticks(struct mlxbf_i2c_priv *priv, u64 nanoseconds, bool minimum) { u64 frequency; u32 ticks; /* * Compute ticks as follow: * * Ticks * Time = --------- x 10^9 => Ticks = Time x Frequency x 10^-9 * Frequency */ frequency = priv->frequency; ticks = (nanoseconds * frequency) / MLXBF_I2C_FREQUENCY_1GHZ; /* * The number of ticks is rounded down and if minimum is equal to 1 * then add one tick. */ if (minimum) ticks++; return ticks; } static u32 mlxbf_i2c_set_timer(struct mlxbf_i2c_priv *priv, u64 nsec, bool opt, u32 mask, u8 shift) { u32 val = (mlxbf_i2c_get_ticks(priv, nsec, opt) & mask) << shift; return val; } static void mlxbf_i2c_set_timings(struct mlxbf_i2c_priv *priv, const struct mlxbf_i2c_timings *timings) { u32 timer; timer = mlxbf_i2c_set_timer(priv, timings->scl_high, false, MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_0); timer |= mlxbf_i2c_set_timer(priv, timings->scl_low, false, MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_16); writel(timer, priv->timer->io + MLXBF_I2C_SMBUS_TIMER_SCL_LOW_SCL_HIGH); timer = mlxbf_i2c_set_timer(priv, timings->sda_rise, false, MLXBF_I2C_MASK_8, MLXBF_I2C_SHIFT_0); timer |= mlxbf_i2c_set_timer(priv, timings->sda_fall, false, MLXBF_I2C_MASK_8, MLXBF_I2C_SHIFT_8); timer |= mlxbf_i2c_set_timer(priv, timings->scl_rise, false, MLXBF_I2C_MASK_8, MLXBF_I2C_SHIFT_16); timer |= mlxbf_i2c_set_timer(priv, timings->scl_fall, false, MLXBF_I2C_MASK_8, MLXBF_I2C_SHIFT_24); writel(timer, priv->timer->io + MLXBF_I2C_SMBUS_TIMER_FALL_RISE_SPIKE); timer = mlxbf_i2c_set_timer(priv, timings->hold_start, true, MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_0); timer |= mlxbf_i2c_set_timer(priv, timings->hold_data, true, MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_16); writel(timer, priv->timer->io + MLXBF_I2C_SMBUS_TIMER_THOLD); timer = mlxbf_i2c_set_timer(priv, timings->setup_start, true, MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_0); timer |= mlxbf_i2c_set_timer(priv, timings->setup_stop, true, MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_16); writel(timer, priv->timer->io + MLXBF_I2C_SMBUS_TIMER_TSETUP_START_STOP); timer = mlxbf_i2c_set_timer(priv, timings->setup_data, true, MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_0); writel(timer, priv->timer->io + MLXBF_I2C_SMBUS_TIMER_TSETUP_DATA); timer = mlxbf_i2c_set_timer(priv, timings->buf, false, MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_0); timer |= mlxbf_i2c_set_timer(priv, timings->thigh_max, false, MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_16); writel(timer, priv->timer->io + MLXBF_I2C_SMBUS_THIGH_MAX_TBUF); timer = timings->timeout; writel(timer, priv->timer->io + MLXBF_I2C_SMBUS_SCL_LOW_TIMEOUT); } enum mlxbf_i2c_timings_config { MLXBF_I2C_TIMING_CONFIG_100KHZ, MLXBF_I2C_TIMING_CONFIG_400KHZ, MLXBF_I2C_TIMING_CONFIG_1000KHZ, }; /* * Note that the mlxbf_i2c_timings->timeout value is not related to the * bus frequency, it is impacted by the time it takes the driver to * complete data transmission before transaction abort. */ static const struct mlxbf_i2c_timings mlxbf_i2c_timings[] = { [MLXBF_I2C_TIMING_CONFIG_100KHZ] = { .scl_high = 4810, .scl_low = 5000, .hold_start = 4000, .setup_start = 4800, .setup_stop = 4000, .setup_data = 250, .sda_rise = 50, .sda_fall = 50, .scl_rise = 50, .scl_fall = 50, .hold_data = 300, .buf = 20000, .thigh_max = 5000, .timeout = 106500 }, [MLXBF_I2C_TIMING_CONFIG_400KHZ] = { .scl_high = 1011, .scl_low = 1300, .hold_start = 600, .setup_start = 700, .setup_stop = 600, .setup_data = 100, .sda_rise = 50, .sda_fall = 50, .scl_rise = 50, .scl_fall = 50, .hold_data = 300, .buf = 20000, .thigh_max = 5000, .timeout = 106500 }, [MLXBF_I2C_TIMING_CONFIG_1000KHZ] = { .scl_high = 600, .scl_low = 1300, .hold_start = 600, .setup_start = 600, .setup_stop = 600, .setup_data = 100, .sda_rise = 50, .sda_fall = 50, .scl_rise = 50, .scl_fall = 50, .hold_data = 300, .buf = 20000, .thigh_max = 5000, .timeout = 106500 } }; static int mlxbf_i2c_init_timings(struct platform_device *pdev, struct mlxbf_i2c_priv *priv) { enum mlxbf_i2c_timings_config config_idx; struct device *dev = &pdev->dev; u32 config_khz; int ret; ret = device_property_read_u32(dev, "clock-frequency", &config_khz); if (ret < 0) config_khz = I2C_MAX_STANDARD_MODE_FREQ; switch (config_khz) { default: /* Default settings is 100 KHz. */ pr_warn("Illegal value %d: defaulting to 100 KHz\n", config_khz); fallthrough; case I2C_MAX_STANDARD_MODE_FREQ: config_idx = MLXBF_I2C_TIMING_CONFIG_100KHZ; break; case I2C_MAX_FAST_MODE_FREQ: config_idx = MLXBF_I2C_TIMING_CONFIG_400KHZ; break; case I2C_MAX_FAST_MODE_PLUS_FREQ: config_idx = MLXBF_I2C_TIMING_CONFIG_1000KHZ; break; } mlxbf_i2c_set_timings(priv, &mlxbf_i2c_timings[config_idx]); return 0; } static int mlxbf_i2c_get_gpio(struct platform_device *pdev, struct mlxbf_i2c_priv *priv) { struct mlxbf_i2c_resource *gpio_res; struct device *dev = &pdev->dev; struct resource *params; resource_size_t size; gpio_res = mlxbf_i2c_get_shared_resource(priv, MLXBF_I2C_GPIO_RES); if (!gpio_res) return -EPERM; /* * The GPIO region in TYU space is shared among I2C busses. * This function MUST be serialized to avoid racing when * claiming the memory region and/or setting up the GPIO. */ lockdep_assert_held(gpio_res->lock); /* Check whether the memory map exist. */ if (gpio_res->io) return 0; params = gpio_res->params; size = resource_size(params); if (!devm_request_mem_region(dev, params->start, size, params->name)) return -EFAULT; gpio_res->io = devm_ioremap(dev, params->start, size); if (!gpio_res->io) { devm_release_mem_region(dev, params->start, size); return -ENOMEM; } return 0; } static int mlxbf_i2c_release_gpio(struct platform_device *pdev, struct mlxbf_i2c_priv *priv) { struct mlxbf_i2c_resource *gpio_res; struct device *dev = &pdev->dev; struct resource *params; gpio_res = mlxbf_i2c_get_shared_resource(priv, MLXBF_I2C_GPIO_RES); if (!gpio_res) return 0; mutex_lock(gpio_res->lock); if (gpio_res->io) { /* Release the GPIO resource. */ params = gpio_res->params; devm_iounmap(dev, gpio_res->io); devm_release_mem_region(dev, params->start, resource_size(params)); } mutex_unlock(gpio_res->lock); return 0; } static int mlxbf_i2c_get_corepll(struct platform_device *pdev, struct mlxbf_i2c_priv *priv) { struct mlxbf_i2c_resource *corepll_res; struct device *dev = &pdev->dev; struct resource *params; resource_size_t size; corepll_res = mlxbf_i2c_get_shared_resource(priv, MLXBF_I2C_COREPLL_RES); if (!corepll_res) return -EPERM; /* * The COREPLL region in TYU space is shared among I2C busses. * This function MUST be serialized to avoid racing when * claiming the memory region. */ lockdep_assert_held(corepll_res->lock); /* Check whether the memory map exist. */ if (corepll_res->io) return 0; params = corepll_res->params; size = resource_size(params); if (!devm_request_mem_region(dev, params->start, size, params->name)) return -EFAULT; corepll_res->io = devm_ioremap(dev, params->start, size); if (!corepll_res->io) { devm_release_mem_region(dev, params->start, size); return -ENOMEM; } return 0; } static int mlxbf_i2c_release_corepll(struct platform_device *pdev, struct mlxbf_i2c_priv *priv) { struct mlxbf_i2c_resource *corepll_res; struct device *dev = &pdev->dev; struct resource *params; corepll_res = mlxbf_i2c_get_shared_resource(priv, MLXBF_I2C_COREPLL_RES); mutex_lock(corepll_res->lock); if (corepll_res->io) { /* Release the CorePLL resource. */ params = corepll_res->params; devm_iounmap(dev, corepll_res->io); devm_release_mem_region(dev, params->start, resource_size(params)); } mutex_unlock(corepll_res->lock); return 0; } static int mlxbf_i2c_init_master(struct platform_device *pdev, struct mlxbf_i2c_priv *priv) { struct mlxbf_i2c_resource *gpio_res; struct device *dev = &pdev->dev; u32 config_reg; int ret; /* This configuration is only needed for BlueField 1. */ if (!mlxbf_i2c_has_chip_type(priv, MLXBF_I2C_CHIP_TYPE_1)) return 0; gpio_res = mlxbf_i2c_get_shared_resource(priv, MLXBF_I2C_GPIO_RES); if (!gpio_res) return -EPERM; /* * The GPIO region in TYU space is shared among I2C busses. * This function MUST be serialized to avoid racing when * claiming the memory region and/or setting up the GPIO. */ mutex_lock(gpio_res->lock); ret = mlxbf_i2c_get_gpio(pdev, priv); if (ret < 0) { dev_err(dev, "Failed to get gpio resource"); mutex_unlock(gpio_res->lock); return ret; } /* * TYU - Configuration for GPIO pins. Those pins must be asserted in * MLXBF_I2C_GPIO_0_FUNC_EN_0, i.e. GPIO 0 is controlled by HW, and must * be reset in MLXBF_I2C_GPIO_0_FORCE_OE_EN, i.e. GPIO_OE will be driven * instead of HW_OE. * For now, we do not reset the GPIO state when the driver is removed. * First, it is not necessary to disable the bus since we are using * the same busses. Then, some busses might be shared among Linux and * platform firmware; disabling the bus might compromise the system * functionality. */ config_reg = readl(gpio_res->io + MLXBF_I2C_GPIO_0_FUNC_EN_0); config_reg = MLXBF_I2C_GPIO_SMBUS_GW_ASSERT_PINS(priv->bus, config_reg); writel(config_reg, gpio_res->io + MLXBF_I2C_GPIO_0_FUNC_EN_0); config_reg = readl(gpio_res->io + MLXBF_I2C_GPIO_0_FORCE_OE_EN); config_reg = MLXBF_I2C_GPIO_SMBUS_GW_RESET_PINS(priv->bus, config_reg); writel(config_reg, gpio_res->io + MLXBF_I2C_GPIO_0_FORCE_OE_EN); mutex_unlock(gpio_res->lock); return 0; } static u64 mlxbf_i2c_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res) { u64 core_frequency; u8 core_od, core_r; u32 corepll_val; u16 core_f; corepll_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG1); /* Get Core PLL configuration bits. */ core_f = FIELD_GET(MLXBF_I2C_COREPLL_CORE_F_TYU_MASK, corepll_val); core_od = FIELD_GET(MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK, corepll_val); core_r = FIELD_GET(MLXBF_I2C_COREPLL_CORE_R_TYU_MASK, corepll_val); /* * Compute PLL output frequency as follow: * * CORE_F + 1 * PLL_OUT_FREQ = PLL_IN_FREQ * ---------------------------- * (CORE_R + 1) * (CORE_OD + 1) * * Where PLL_OUT_FREQ and PLL_IN_FREQ refer to CoreFrequency * and PadFrequency, respectively. */ core_frequency = MLXBF_I2C_PLL_IN_FREQ * (++core_f); core_frequency /= (++core_r) * (++core_od); return core_frequency; } static u64 mlxbf_i2c_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res) { u32 corepll_reg1_val, corepll_reg2_val; u64 corepll_frequency; u8 core_od, core_r; u32 core_f; corepll_reg1_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG1); corepll_reg2_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG2); /* Get Core PLL configuration bits */ core_f = FIELD_GET(MLXBF_I2C_COREPLL_CORE_F_YU_MASK, corepll_reg1_val); core_r = FIELD_GET(MLXBF_I2C_COREPLL_CORE_R_YU_MASK, corepll_reg1_val); core_od = FIELD_GET(MLXBF_I2C_COREPLL_CORE_OD_YU_MASK, corepll_reg2_val); /* * Compute PLL output frequency as follow: * * CORE_F / 16384 * PLL_OUT_FREQ = PLL_IN_FREQ * ---------------------------- * (CORE_R + 1) * (CORE_OD + 1) * * Where PLL_OUT_FREQ and PLL_IN_FREQ refer to CoreFrequency * and PadFrequency, respectively. */ corepll_frequency = (MLXBF_I2C_PLL_IN_FREQ * core_f) / MLNXBF_I2C_COREPLL_CONST; corepll_frequency /= (++core_r) * (++core_od); return corepll_frequency; } static int mlxbf_i2c_calculate_corepll_freq(struct platform_device *pdev, struct mlxbf_i2c_priv *priv) { const struct mlxbf_i2c_chip_info *chip = priv->chip; struct mlxbf_i2c_resource *corepll_res; struct device *dev = &pdev->dev; u64 *freq = &priv->frequency; int ret; corepll_res = mlxbf_i2c_get_shared_resource(priv, MLXBF_I2C_COREPLL_RES); if (!corepll_res) return -EPERM; /* * First, check whether the TYU core Clock frequency is set. * The TYU core frequency is the same for all I2C busses; when * the first device gets probed the frequency is determined and * stored into a globally visible variable. So, first of all, * check whether the frequency is already set. Here, we assume * that the frequency is expected to be greater than 0. */ mutex_lock(corepll_res->lock); if (!mlxbf_i2c_corepll_frequency) { if (!chip->calculate_freq) { mutex_unlock(corepll_res->lock); return -EPERM; } ret = mlxbf_i2c_get_corepll(pdev, priv); if (ret < 0) { dev_err(dev, "Failed to get corePLL resource"); mutex_unlock(corepll_res->lock); return ret; } mlxbf_i2c_corepll_frequency = chip->calculate_freq(corepll_res); } mutex_unlock(corepll_res->lock); *freq = mlxbf_i2c_corepll_frequency; return 0; } static int mlxbf_i2c_slave_enable(struct mlxbf_i2c_priv *priv, struct i2c_client *slave) { u8 reg, reg_cnt, byte, addr_tmp; u32 slave_reg, slave_reg_tmp; if (!priv) return -EPERM; reg_cnt = MLXBF_I2C_SMBUS_SLAVE_ADDR_CNT >> 2; /* * Read the slave registers. There are 4 * 32-bit slave registers. * Each slave register can hold up to 4 * 8-bit slave configuration: * 1) A 7-bit address * 2) And a status bit (1 if enabled, 0 if not). * Look for the next available slave register slot. */ for (reg = 0; reg < reg_cnt; reg++) { slave_reg = readl(priv->slv->io + MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG + reg * 0x4); /* * Each register holds 4 slave addresses. So, we have to keep * the byte order consistent with the value read in order to * update the register correctly, if needed. */ slave_reg_tmp = slave_reg; for (byte = 0; byte < 4; byte++) { addr_tmp = slave_reg_tmp & GENMASK(7, 0); /* * If an enable bit is not set in the * MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG register, then the * slave address slot associated with that bit is * free. So set the enable bit and write the * slave address bits. */ if (!(addr_tmp & MLXBF_I2C_SMBUS_SLAVE_ADDR_EN_BIT)) { slave_reg &= ~(MLXBF_I2C_SMBUS_SLAVE_ADDR_MASK << (byte * 8)); slave_reg |= (slave->addr << (byte * 8)); slave_reg |= MLXBF_I2C_SMBUS_SLAVE_ADDR_EN_BIT << (byte * 8); writel(slave_reg, priv->slv->io + MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG + (reg * 0x4)); /* * Set the slave at the corresponding index. */ priv->slave[(reg * 4) + byte] = slave; return 0; } /* Parse next byte. */ slave_reg_tmp >>= 8; } } return -EBUSY; } static int mlxbf_i2c_slave_disable(struct mlxbf_i2c_priv *priv, u8 addr) { u8 addr_tmp, reg, reg_cnt, byte; u32 slave_reg, slave_reg_tmp; reg_cnt = MLXBF_I2C_SMBUS_SLAVE_ADDR_CNT >> 2; /* * Read the slave registers. There are 4 * 32-bit slave registers. * Each slave register can hold up to 4 * 8-bit slave configuration: * 1) A 7-bit address * 2) And a status bit (1 if enabled, 0 if not). * Check if addr is present in the registers. */ for (reg = 0; reg < reg_cnt; reg++) { slave_reg = readl(priv->slv->io + MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG + reg * 0x4); /* Check whether the address slots are empty. */ if (!slave_reg) continue; /* * Check if addr matches any of the 4 slave addresses * in the register. */ slave_reg_tmp = slave_reg; for (byte = 0; byte < 4; byte++) { addr_tmp = slave_reg_tmp & MLXBF_I2C_SMBUS_SLAVE_ADDR_MASK; /* * Parse slave address bytes and check whether the * slave address already exists. */ if (addr_tmp == addr) { /* Clear the slave address slot. */ slave_reg &= ~(GENMASK(7, 0) << (byte * 8)); writel(slave_reg, priv->slv->io + MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG + (reg * 0x4)); /* Free slave at the corresponding index */ priv->slave[(reg * 4) + byte] = NULL; return 0; } /* Parse next byte. */ slave_reg_tmp >>= 8; } } return -ENXIO; } static int mlxbf_i2c_init_coalesce(struct platform_device *pdev, struct mlxbf_i2c_priv *priv) { struct mlxbf_i2c_resource *coalesce_res; struct resource *params; resource_size_t size; int ret = 0; /* * Unlike BlueField-1 platform, the coalesce registers is a dedicated * resource in the next generations of BlueField. */ if (mlxbf_i2c_has_chip_type(priv, MLXBF_I2C_CHIP_TYPE_1)) { coalesce_res = mlxbf_i2c_get_shared_resource(priv, MLXBF_I2C_COALESCE_RES); if (!coalesce_res) return -EPERM; /* * The Cause Coalesce group in TYU space is shared among * I2C busses. This function MUST be serialized to avoid * racing when claiming the memory region. */ lockdep_assert_held(mlxbf_i2c_gpio_res->lock); /* Check whether the memory map exist. */ if (coalesce_res->io) { priv->coalesce = coalesce_res; return 0; } params = coalesce_res->params; size = resource_size(params); if (!request_mem_region(params->start, size, params->name)) return -EFAULT; coalesce_res->io = ioremap(params->start, size); if (!coalesce_res->io) { release_mem_region(params->start, size); return -ENOMEM; } priv->coalesce = coalesce_res; } else { ret = mlxbf_i2c_init_resource(pdev, &priv->coalesce, MLXBF_I2C_COALESCE_RES); } return ret; } static int mlxbf_i2c_release_coalesce(struct platform_device *pdev, struct mlxbf_i2c_priv *priv) { struct mlxbf_i2c_resource *coalesce_res; struct device *dev = &pdev->dev; struct resource *params; resource_size_t size; coalesce_res = priv->coalesce; if (coalesce_res->io) { params = coalesce_res->params; size = resource_size(params); if (mlxbf_i2c_has_chip_type(priv, MLXBF_I2C_CHIP_TYPE_1)) { mutex_lock(coalesce_res->lock); iounmap(coalesce_res->io); release_mem_region(params->start, size); mutex_unlock(coalesce_res->lock); } else { devm_release_mem_region(dev, params->start, size); } } return 0; } static int mlxbf_i2c_init_slave(struct platform_device *pdev, struct mlxbf_i2c_priv *priv) { struct device *dev = &pdev->dev; u32 int_reg; int ret; /* Reset FSM. */ writel(0, priv->slv->io + MLXBF_I2C_SMBUS_SLAVE_FSM); /* * Enable slave cause interrupt bits. Drive * MLXBF_I2C_CAUSE_READ_WAIT_FW_RESPONSE and * MLXBF_I2C_CAUSE_WRITE_SUCCESS, these are enabled when an external * masters issue a Read and Write, respectively. But, clear all * interrupts first. */ writel(~0, priv->slv_cause->io + MLXBF_I2C_CAUSE_OR_CLEAR); int_reg = MLXBF_I2C_CAUSE_READ_WAIT_FW_RESPONSE; int_reg |= MLXBF_I2C_CAUSE_WRITE_SUCCESS; writel(int_reg, priv->slv_cause->io + MLXBF_I2C_CAUSE_OR_EVTEN0); /* Finally, set the 'ready' bit to start handling transactions. */ writel(0x1, priv->slv->io + MLXBF_I2C_SMBUS_SLAVE_READY); /* Initialize the cause coalesce resource. */ ret = mlxbf_i2c_init_coalesce(pdev, priv); if (ret < 0) { dev_err(dev, "failed to initialize cause coalesce\n"); return ret; } return 0; } static bool mlxbf_i2c_has_coalesce(struct mlxbf_i2c_priv *priv, bool *read, bool *write) { const struct mlxbf_i2c_chip_info *chip = priv->chip; u32 coalesce0_reg, cause_reg; u8 slave_shift, is_set; *write = false; *read = false; slave_shift = chip->type != MLXBF_I2C_CHIP_TYPE_1 ? MLXBF_I2C_CAUSE_YU_SLAVE_BIT : priv->bus + MLXBF_I2C_CAUSE_TYU_SLAVE_BIT; coalesce0_reg = readl(priv->coalesce->io + MLXBF_I2C_CAUSE_COALESCE_0); is_set = coalesce0_reg & (1 << slave_shift); if (!is_set) return false; /* Check the source of the interrupt, i.e. whether a Read or Write. */ cause_reg = readl(priv->slv_cause->io + MLXBF_I2C_CAUSE_ARBITER); if (cause_reg & MLXBF_I2C_CAUSE_READ_WAIT_FW_RESPONSE) *read = true; else if (cause_reg & MLXBF_I2C_CAUSE_WRITE_SUCCESS) *write = true; /* Clear cause bits. */ writel(~0x0, priv->slv_cause->io + MLXBF_I2C_CAUSE_OR_CLEAR); return true; } static bool mlxbf_i2c_slave_wait_for_idle(struct mlxbf_i2c_priv *priv, u32 timeout) { u32 mask = MLXBF_I2C_CAUSE_S_GW_BUSY_FALL; u32 addr = MLXBF_I2C_CAUSE_ARBITER; if (mlxbf_i2c_poll(priv->slv_cause->io, addr, mask, false, timeout)) return true; return false; } static struct i2c_client *mlxbf_i2c_get_slave_from_addr( struct mlxbf_i2c_priv *priv, u8 addr) { int i; for (i = 0; i < MLXBF_I2C_SMBUS_SLAVE_ADDR_CNT; i++) { if (!priv->slave[i]) continue; if (priv->slave[i]->addr == addr) return priv->slave[i]; } return NULL; } /* * Send byte to 'external' smbus master. This function is executed when * an external smbus master wants to read data from the BlueField. */ static int mlxbf_i2c_irq_send(struct mlxbf_i2c_priv *priv, u8 recv_bytes) { u8 data_desc[MLXBF_I2C_SLAVE_DATA_DESC_SIZE] = { 0 }; u8 write_size, pec_en, addr, value, byte_cnt; struct i2c_client *slave; u32 control32, data32; int ret = 0; /* * Read the first byte received from the external master to * determine the slave address. This byte is located in the * first data descriptor register of the slave GW. */ data32 = ioread32be(priv->slv->io + MLXBF_I2C_SLAVE_DATA_DESC_ADDR); addr = (data32 & GENMASK(7, 0)) >> 1; /* * Check if the slave address received in the data descriptor register * matches any of the slave addresses registered. If there is a match, * set the slave. */ slave = mlxbf_i2c_get_slave_from_addr(priv, addr); if (!slave) { ret = -ENXIO; goto clear_csr; } /* * An I2C read can consist of a WRITE bit transaction followed by * a READ bit transaction. Indeed, slave devices often expect * the slave address to be followed by the internal address. * So, write the internal address byte first, and then, send the * requested data to the master. */ if (recv_bytes > 1) { i2c_slave_event(slave, I2C_SLAVE_WRITE_REQUESTED, &value); value = (data32 >> 8) & GENMASK(7, 0); ret = i2c_slave_event(slave, I2C_SLAVE_WRITE_RECEIVED, &value); i2c_slave_event(slave, I2C_SLAVE_STOP, &value); if (ret < 0) goto clear_csr; } /* * Send data to the master. Currently, the driver supports * READ_BYTE, READ_WORD and BLOCK READ protocols. The * hardware can send up to 128 bytes per transfer which is * the total size of the data registers. */ i2c_slave_event(slave, I2C_SLAVE_READ_REQUESTED, &value); for (byte_cnt = 0; byte_cnt < MLXBF_I2C_SLAVE_DATA_DESC_SIZE; byte_cnt++) { data_desc[byte_cnt] = value; i2c_slave_event(slave, I2C_SLAVE_READ_PROCESSED, &value); } /* Send a stop condition to the backend. */ i2c_slave_event(slave, I2C_SLAVE_STOP, &value); /* Set the number of bytes to write to master. */ write_size = (byte_cnt - 1) & 0x7f; /* Write data to Slave GW data descriptor. */ mlxbf_i2c_smbus_write_data(priv, data_desc, byte_cnt, MLXBF_I2C_SLAVE_DATA_DESC_ADDR, false); pec_en = 0; /* Disable PEC since it is not supported. */ /* Prepare control word. */ control32 = MLXBF_I2C_SLAVE_ENABLE; control32 |= rol32(write_size, MLXBF_I2C_SLAVE_WRITE_BYTES_SHIFT); control32 |= rol32(pec_en, MLXBF_I2C_SLAVE_SEND_PEC_SHIFT); writel(control32, priv->slv->io + MLXBF_I2C_SMBUS_SLAVE_GW); /* * Wait until the transfer is completed; the driver will wait * until the GW is idle, a cause will rise on fall of GW busy. */ mlxbf_i2c_slave_wait_for_idle(priv, MLXBF_I2C_SMBUS_TIMEOUT); clear_csr: /* Release the Slave GW. */ writel(0x0, priv->slv->io + MLXBF_I2C_SMBUS_SLAVE_RS_MASTER_BYTES); writel(0x0, priv->slv->io + MLXBF_I2C_SMBUS_SLAVE_PEC); writel(0x1, priv->slv->io + MLXBF_I2C_SMBUS_SLAVE_READY); return ret; } /* * Receive bytes from 'external' smbus master. This function is executed when * an external smbus master wants to write data to the BlueField. */ static int mlxbf_i2c_irq_recv(struct mlxbf_i2c_priv *priv, u8 recv_bytes) { u8 data_desc[MLXBF_I2C_SLAVE_DATA_DESC_SIZE] = { 0 }; struct i2c_client *slave; u8 value, byte, addr; int ret = 0; /* Read data from Slave GW data descriptor. */ mlxbf_i2c_smbus_read_data(priv, data_desc, recv_bytes, MLXBF_I2C_SLAVE_DATA_DESC_ADDR, false); addr = data_desc[0] >> 1; /* * Check if the slave address received in the data descriptor register * matches any of the slave addresses registered. */ slave = mlxbf_i2c_get_slave_from_addr(priv, addr); if (!slave) { ret = -EINVAL; goto clear_csr; } /* * Notify the slave backend that an smbus master wants to write data * to the BlueField. */ i2c_slave_event(slave, I2C_SLAVE_WRITE_REQUESTED, &value); /* Send the received data to the slave backend. */ for (byte = 1; byte < recv_bytes; byte++) { value = data_desc[byte]; ret = i2c_slave_event(slave, I2C_SLAVE_WRITE_RECEIVED, &value); if (ret < 0) break; } /* * Send a stop event to the slave backend, to signal * the end of the write transactions. */ i2c_slave_event(slave, I2C_SLAVE_STOP, &value); clear_csr: /* Release the Slave GW. */ writel(0x0, priv->slv->io + MLXBF_I2C_SMBUS_SLAVE_RS_MASTER_BYTES); writel(0x0, priv->slv->io + MLXBF_I2C_SMBUS_SLAVE_PEC); writel(0x1, priv->slv->io + MLXBF_I2C_SMBUS_SLAVE_READY); return ret; } static irqreturn_t mlxbf_i2c_irq(int irq, void *ptr) { struct mlxbf_i2c_priv *priv = ptr; bool read, write, irq_is_set; u32 rw_bytes_reg; u8 recv_bytes; /* * Read TYU interrupt register and determine the source of the * interrupt. Based on the source of the interrupt one of the * following actions are performed: * - Receive data and send response to master. * - Send data and release slave GW. * * Handle read/write transaction only. CRmaster and Iarp requests * are ignored for now. */ irq_is_set = mlxbf_i2c_has_coalesce(priv, &read, &write); if (!irq_is_set || (!read && !write)) { /* Nothing to do here, interrupt was not from this device. */ return IRQ_NONE; } /* * The MLXBF_I2C_SMBUS_SLAVE_RS_MASTER_BYTES includes the number of * bytes from/to master. These are defined by 8-bits each. If the lower * 8 bits are set, then the master expect to read N bytes from the * slave, if the higher 8 bits are sent then the slave expect N bytes * from the master. */ rw_bytes_reg = readl(priv->slv->io + MLXBF_I2C_SMBUS_SLAVE_RS_MASTER_BYTES); recv_bytes = (rw_bytes_reg >> 8) & GENMASK(7, 0); /* * For now, the slave supports 128 bytes transfer. Discard remaining * data bytes if the master wrote more than * MLXBF_I2C_SLAVE_DATA_DESC_SIZE, i.e, the actual size of the slave * data descriptor. * * Note that we will never expect to transfer more than 128 bytes; as * specified in the SMBus standard, block transactions cannot exceed * 32 bytes. */ recv_bytes = recv_bytes > MLXBF_I2C_SLAVE_DATA_DESC_SIZE ? MLXBF_I2C_SLAVE_DATA_DESC_SIZE : recv_bytes; if (read) mlxbf_i2c_irq_send(priv, recv_bytes); else mlxbf_i2c_irq_recv(priv, recv_bytes); return IRQ_HANDLED; } /* Return negative errno on error. */ static s32 mlxbf_i2c_smbus_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { struct mlxbf_i2c_smbus_request request = { 0 }; struct mlxbf_i2c_priv *priv; bool read, pec; u8 byte_cnt; request.slave = addr; read = (read_write == I2C_SMBUS_READ); pec = flags & I2C_FUNC_SMBUS_PEC; switch (size) { case I2C_SMBUS_QUICK: mlxbf_i2c_smbus_quick_command(&request, read); dev_dbg(&adap->dev, "smbus quick, slave 0x%02x\n", addr); break; case I2C_SMBUS_BYTE: mlxbf_i2c_smbus_byte_func(&request, read ? &data->byte : &command, read, pec); dev_dbg(&adap->dev, "smbus %s byte, slave 0x%02x.\n", read ? "read" : "write", addr); break; case I2C_SMBUS_BYTE_DATA: mlxbf_i2c_smbus_data_byte_func(&request, &command, &data->byte, read, pec); dev_dbg(&adap->dev, "smbus %s byte data at 0x%02x, slave 0x%02x.\n", read ? "read" : "write", command, addr); break; case I2C_SMBUS_WORD_DATA: mlxbf_i2c_smbus_data_word_func(&request, &command, (u8 *)&data->word, read, pec); dev_dbg(&adap->dev, "smbus %s word data at 0x%02x, slave 0x%02x.\n", read ? "read" : "write", command, addr); break; case I2C_SMBUS_I2C_BLOCK_DATA: byte_cnt = data->block[0]; mlxbf_i2c_smbus_i2c_block_func(&request, &command, data->block, &byte_cnt, read, pec); dev_dbg(&adap->dev, "i2c %s block data, %d bytes at 0x%02x, slave 0x%02x.\n", read ? "read" : "write", byte_cnt, command, addr); break; case I2C_SMBUS_BLOCK_DATA: byte_cnt = read ? I2C_SMBUS_BLOCK_MAX : data->block[0]; mlxbf_i2c_smbus_block_func(&request, &command, data->block, &byte_cnt, read, pec); dev_dbg(&adap->dev, "smbus %s block data, %d bytes at 0x%02x, slave 0x%02x.\n", read ? "read" : "write", byte_cnt, command, addr); break; case I2C_FUNC_SMBUS_PROC_CALL: mlxbf_i2c_smbus_process_call_func(&request, &command, (u8 *)&data->word, pec); dev_dbg(&adap->dev, "process call, wr/rd at 0x%02x, slave 0x%02x.\n", command, addr); break; case I2C_FUNC_SMBUS_BLOCK_PROC_CALL: byte_cnt = data->block[0]; mlxbf_i2c_smbus_blk_process_call_func(&request, &command, data->block, &byte_cnt, pec); dev_dbg(&adap->dev, "block process call, wr/rd %d bytes, slave 0x%02x.\n", byte_cnt, addr); break; default: dev_dbg(&adap->dev, "Unsupported I2C/SMBus command %d\n", size); return -EOPNOTSUPP; } priv = i2c_get_adapdata(adap); return mlxbf_i2c_smbus_start_transaction(priv, &request); } static int mlxbf_i2c_reg_slave(struct i2c_client *slave) { struct mlxbf_i2c_priv *priv = i2c_get_adapdata(slave->adapter); struct device *dev = &slave->dev; int ret; /* * Do not support ten bit chip address and do not use Packet Error * Checking (PEC). */ if (slave->flags & (I2C_CLIENT_TEN | I2C_CLIENT_PEC)) { dev_err(dev, "SMBus PEC and 10 bit address not supported\n"); return -EAFNOSUPPORT; } ret = mlxbf_i2c_slave_enable(priv, slave); if (ret) dev_err(dev, "Surpassed max number of registered slaves allowed\n"); return 0; } static int mlxbf_i2c_unreg_slave(struct i2c_client *slave) { struct mlxbf_i2c_priv *priv = i2c_get_adapdata(slave->adapter); struct device *dev = &slave->dev; int ret; /* * Unregister slave by: * 1) Disabling the slave address in hardware * 2) Freeing priv->slave at the corresponding index */ ret = mlxbf_i2c_slave_disable(priv, slave->addr); if (ret) dev_err(dev, "Unable to find slave 0x%x\n", slave->addr); return ret; } static u32 mlxbf_i2c_functionality(struct i2c_adapter *adap) { return MLXBF_I2C_FUNC_ALL; } static struct mlxbf_i2c_chip_info mlxbf_i2c_chip[] = { [MLXBF_I2C_CHIP_TYPE_1] = { .type = MLXBF_I2C_CHIP_TYPE_1, .shared_res = { [0] = &mlxbf_i2c_coalesce_res[MLXBF_I2C_CHIP_TYPE_1], [1] = &mlxbf_i2c_corepll_res[MLXBF_I2C_CHIP_TYPE_1], [2] = &mlxbf_i2c_gpio_res[MLXBF_I2C_CHIP_TYPE_1] }, .calculate_freq = mlxbf_i2c_calculate_freq_from_tyu, .smbus_master_rs_bytes_off = MLXBF_I2C_YU_SMBUS_RS_BYTES, .smbus_master_fsm_off = MLXBF_I2C_YU_SMBUS_MASTER_FSM }, [MLXBF_I2C_CHIP_TYPE_2] = { .type = MLXBF_I2C_CHIP_TYPE_2, .shared_res = { [0] = &mlxbf_i2c_corepll_res[MLXBF_I2C_CHIP_TYPE_2] }, .calculate_freq = mlxbf_i2c_calculate_freq_from_yu, .smbus_master_rs_bytes_off = MLXBF_I2C_YU_SMBUS_RS_BYTES, .smbus_master_fsm_off = MLXBF_I2C_YU_SMBUS_MASTER_FSM }, [MLXBF_I2C_CHIP_TYPE_3] = { .type = MLXBF_I2C_CHIP_TYPE_3, .shared_res = { [0] = &mlxbf_i2c_corepll_res[MLXBF_I2C_CHIP_TYPE_3] }, .calculate_freq = mlxbf_i2c_calculate_freq_from_yu, .smbus_master_rs_bytes_off = MLXBF_I2C_RSH_YU_SMBUS_RS_BYTES, .smbus_master_fsm_off = MLXBF_I2C_RSH_YU_SMBUS_MASTER_FSM } }; static const struct i2c_algorithm mlxbf_i2c_algo = { .smbus_xfer = mlxbf_i2c_smbus_xfer, .functionality = mlxbf_i2c_functionality, .reg_slave = mlxbf_i2c_reg_slave, .unreg_slave = mlxbf_i2c_unreg_slave, }; static struct i2c_adapter_quirks mlxbf_i2c_quirks = { .max_read_len = MLXBF_I2C_MASTER_DATA_R_LENGTH, .max_write_len = MLXBF_I2C_MASTER_DATA_W_LENGTH, }; static const struct acpi_device_id mlxbf_i2c_acpi_ids[] = { { "MLNXBF03", (kernel_ulong_t)&mlxbf_i2c_chip[MLXBF_I2C_CHIP_TYPE_1] }, { "MLNXBF23", (kernel_ulong_t)&mlxbf_i2c_chip[MLXBF_I2C_CHIP_TYPE_2] }, { "MLNXBF31", (kernel_ulong_t)&mlxbf_i2c_chip[MLXBF_I2C_CHIP_TYPE_3] }, {}, }; MODULE_DEVICE_TABLE(acpi, mlxbf_i2c_acpi_ids); static int mlxbf_i2c_acpi_probe(struct device *dev, struct mlxbf_i2c_priv *priv) { const struct acpi_device_id *aid; u64 bus_id; int ret; if (acpi_disabled) return -ENOENT; aid = acpi_match_device(mlxbf_i2c_acpi_ids, dev); if (!aid) return -ENODEV; priv->chip = (struct mlxbf_i2c_chip_info *)aid->driver_data; ret = acpi_dev_uid_to_integer(ACPI_COMPANION(dev), &bus_id); if (ret) { dev_err(dev, "Cannot retrieve UID\n"); return ret; } priv->bus = bus_id; return 0; } static int mlxbf_i2c_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct mlxbf_i2c_priv *priv; struct i2c_adapter *adap; u32 resource_version; int irq, ret; priv = devm_kzalloc(dev, sizeof(struct mlxbf_i2c_priv), GFP_KERNEL); if (!priv) return -ENOMEM; ret = mlxbf_i2c_acpi_probe(dev, priv); if (ret < 0) return ret; /* This property allows the driver to stay backward compatible with older * ACPI tables. * Starting BlueField-3 SoC, the "smbus" resource was broken down into 3 * separate resources "timer", "master" and "slave". */ if (device_property_read_u32(dev, "resource_version", &resource_version)) resource_version = 0; priv->resource_version = resource_version; if (priv->chip->type < MLXBF_I2C_CHIP_TYPE_3 && resource_version == 0) { priv->timer = devm_kzalloc(dev, sizeof(struct mlxbf_i2c_resource), GFP_KERNEL); if (!priv->timer) return -ENOMEM; priv->mst = devm_kzalloc(dev, sizeof(struct mlxbf_i2c_resource), GFP_KERNEL); if (!priv->mst) return -ENOMEM; priv->slv = devm_kzalloc(dev, sizeof(struct mlxbf_i2c_resource), GFP_KERNEL); if (!priv->slv) return -ENOMEM; ret = mlxbf_i2c_init_resource(pdev, &priv->smbus, MLXBF_I2C_SMBUS_RES); if (ret < 0) return dev_err_probe(dev, ret, "Cannot fetch smbus resource info"); priv->timer->io = priv->smbus->io; priv->mst->io = priv->smbus->io + MLXBF_I2C_MST_ADDR_OFFSET; priv->slv->io = priv->smbus->io + MLXBF_I2C_SLV_ADDR_OFFSET; } else { ret = mlxbf_i2c_init_resource(pdev, &priv->timer, MLXBF_I2C_SMBUS_TIMER_RES); if (ret < 0) return dev_err_probe(dev, ret, "Cannot fetch timer resource info"); ret = mlxbf_i2c_init_resource(pdev, &priv->mst, MLXBF_I2C_SMBUS_MST_RES); if (ret < 0) return dev_err_probe(dev, ret, "Cannot fetch master resource info"); ret = mlxbf_i2c_init_resource(pdev, &priv->slv, MLXBF_I2C_SMBUS_SLV_RES); if (ret < 0) return dev_err_probe(dev, ret, "Cannot fetch slave resource info"); } ret = mlxbf_i2c_init_resource(pdev, &priv->mst_cause, MLXBF_I2C_MST_CAUSE_RES); if (ret < 0) return dev_err_probe(dev, ret, "Cannot fetch cause master resource info"); ret = mlxbf_i2c_init_resource(pdev, &priv->slv_cause, MLXBF_I2C_SLV_CAUSE_RES); if (ret < 0) return dev_err_probe(dev, ret, "Cannot fetch cause slave resource info"); adap = &priv->adap; adap->owner = THIS_MODULE; adap->class = I2C_CLASS_HWMON; adap->algo = &mlxbf_i2c_algo; adap->quirks = &mlxbf_i2c_quirks; adap->dev.parent = dev; adap->dev.of_node = dev->of_node; adap->nr = priv->bus; snprintf(adap->name, sizeof(adap->name), "i2c%d", adap->nr); i2c_set_adapdata(adap, priv); /* Read Core PLL frequency. */ ret = mlxbf_i2c_calculate_corepll_freq(pdev, priv); if (ret < 0) { dev_err(dev, "cannot get core clock frequency\n"); /* Set to default value. */ priv->frequency = MLXBF_I2C_COREPLL_FREQ; } /* * Initialize master. * Note that a physical bus might be shared among Linux and firmware * (e.g., ATF). Thus, the bus should be initialized and ready and * bus initialization would be unnecessary. This requires additional * knowledge about physical busses. But, since an extra initialization * does not really hurt, then keep the code as is. */ ret = mlxbf_i2c_init_master(pdev, priv); if (ret < 0) return dev_err_probe(dev, ret, "failed to initialize smbus master %d", priv->bus); mlxbf_i2c_init_timings(pdev, priv); mlxbf_i2c_init_slave(pdev, priv); irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; ret = devm_request_irq(dev, irq, mlxbf_i2c_irq, IRQF_SHARED | IRQF_PROBE_SHARED, dev_name(dev), priv); if (ret < 0) return dev_err_probe(dev, ret, "Cannot get irq %d\n", irq); priv->irq = irq; platform_set_drvdata(pdev, priv); ret = i2c_add_numbered_adapter(adap); if (ret < 0) return ret; mutex_lock(&mlxbf_i2c_bus_lock); mlxbf_i2c_bus_count++; mutex_unlock(&mlxbf_i2c_bus_lock); return 0; } static void mlxbf_i2c_remove(struct platform_device *pdev) { struct mlxbf_i2c_priv *priv = platform_get_drvdata(pdev); struct device *dev = &pdev->dev; struct resource *params; if (priv->chip->type < MLXBF_I2C_CHIP_TYPE_3 && priv->resource_version == 0) { params = priv->smbus->params; devm_release_mem_region(dev, params->start, resource_size(params)); } else { params = priv->timer->params; devm_release_mem_region(dev, params->start, resource_size(params)); params = priv->mst->params; devm_release_mem_region(dev, params->start, resource_size(params)); params = priv->slv->params; devm_release_mem_region(dev, params->start, resource_size(params)); } params = priv->mst_cause->params; devm_release_mem_region(dev, params->start, resource_size(params)); params = priv->slv_cause->params; devm_release_mem_region(dev, params->start, resource_size(params)); /* * Release shared resources. This should be done when releasing * the I2C controller. */ mutex_lock(&mlxbf_i2c_bus_lock); if (--mlxbf_i2c_bus_count == 0) { mlxbf_i2c_release_coalesce(pdev, priv); mlxbf_i2c_release_corepll(pdev, priv); mlxbf_i2c_release_gpio(pdev, priv); } mutex_unlock(&mlxbf_i2c_bus_lock); devm_free_irq(dev, priv->irq, priv); i2c_del_adapter(&priv->adap); } static struct platform_driver mlxbf_i2c_driver = { .probe = mlxbf_i2c_probe, .remove = mlxbf_i2c_remove, .driver = { .name = "i2c-mlxbf", .acpi_match_table = ACPI_PTR(mlxbf_i2c_acpi_ids), }, }; static int __init mlxbf_i2c_init(void) { mutex_init(&mlxbf_i2c_coalesce_lock); mutex_init(&mlxbf_i2c_corepll_lock); mutex_init(&mlxbf_i2c_gpio_lock); mutex_init(&mlxbf_i2c_bus_lock); return platform_driver_register(&mlxbf_i2c_driver); } module_init(mlxbf_i2c_init); static void __exit mlxbf_i2c_exit(void) { platform_driver_unregister(&mlxbf_i2c_driver); mutex_destroy(&mlxbf_i2c_bus_lock); mutex_destroy(&mlxbf_i2c_gpio_lock); mutex_destroy(&mlxbf_i2c_corepll_lock); mutex_destroy(&mlxbf_i2c_coalesce_lock); } module_exit(mlxbf_i2c_exit); MODULE_DESCRIPTION("Mellanox BlueField I2C bus driver"); MODULE_AUTHOR("Khalil Blaiech <[email protected]>"); MODULE_AUTHOR("Asmaa Mnebhi <[email protected]>"); MODULE_LICENSE("GPL v2");
// SPDX-License-Identifier: GPL-2.0 /* * ddbridge-ci.c: Digital Devices bridge CI (DuoFlex, CI Bridge) support * * Copyright (C) 2010-2017 Digital Devices GmbH * Marcus Metzler <[email protected]> * Ralph Metzler <[email protected]> */ #include "ddbridge.h" #include "ddbridge-regs.h" #include "ddbridge-ci.h" #include "ddbridge-io.h" #include "ddbridge-i2c.h" #include "cxd2099.h" /* Octopus CI internal CI interface */ static int wait_ci_ready(struct ddb_ci *ci) { u32 count = 10; ndelay(500); do { if (ddbreadl(ci->port->dev, CI_CONTROL(ci->nr)) & CI_READY) break; usleep_range(1, 2); if ((--count) == 0) return -1; } while (1); return 0; } static int read_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int address) { struct ddb_ci *ci = ca->data; u32 val, off = (address >> 1) & (CI_BUFFER_SIZE - 1); if (address > CI_BUFFER_SIZE) return -1; ddbwritel(ci->port->dev, CI_READ_CMD | (1 << 16) | address, CI_DO_READ_ATTRIBUTES(ci->nr)); wait_ci_ready(ci); val = 0xff & ddbreadl(ci->port->dev, CI_BUFFER(ci->nr) + off); return val; } static int write_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int address, u8 value) { struct ddb_ci *ci = ca->data; ddbwritel(ci->port->dev, CI_WRITE_CMD | (value << 16) | address, CI_DO_ATTRIBUTE_RW(ci->nr)); wait_ci_ready(ci); return 0; } static int read_cam_control(struct dvb_ca_en50221 *ca, int slot, u8 address) { u32 count = 100; struct ddb_ci *ci = ca->data; u32 res; ddbwritel(ci->port->dev, CI_READ_CMD | address, CI_DO_IO_RW(ci->nr)); ndelay(500); do { res = ddbreadl(ci->port->dev, CI_READDATA(ci->nr)); if (res & CI_READY) break; usleep_range(1, 2); if ((--count) == 0) return -1; } while (1); return 0xff & res; } static int write_cam_control(struct dvb_ca_en50221 *ca, int slot, u8 address, u8 value) { struct ddb_ci *ci = ca->data; ddbwritel(ci->port->dev, CI_WRITE_CMD | (value << 16) | address, CI_DO_IO_RW(ci->nr)); wait_ci_ready(ci); return 0; } static int slot_reset(struct dvb_ca_en50221 *ca, int slot) { struct ddb_ci *ci = ca->data; ddbwritel(ci->port->dev, CI_POWER_ON, CI_CONTROL(ci->nr)); msleep(100); ddbwritel(ci->port->dev, CI_POWER_ON | CI_RESET_CAM, CI_CONTROL(ci->nr)); ddbwritel(ci->port->dev, CI_ENABLE | CI_POWER_ON | CI_RESET_CAM, CI_CONTROL(ci->nr)); usleep_range(20, 25); ddbwritel(ci->port->dev, CI_ENABLE | CI_POWER_ON, CI_CONTROL(ci->nr)); return 0; } static int slot_shutdown(struct dvb_ca_en50221 *ca, int slot) { struct ddb_ci *ci = ca->data; ddbwritel(ci->port->dev, 0, CI_CONTROL(ci->nr)); msleep(300); return 0; } static int slot_ts_enable(struct dvb_ca_en50221 *ca, int slot) { struct ddb_ci *ci = ca->data; u32 val = ddbreadl(ci->port->dev, CI_CONTROL(ci->nr)); ddbwritel(ci->port->dev, val | CI_BYPASS_DISABLE, CI_CONTROL(ci->nr)); return 0; } static int poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int open) { struct ddb_ci *ci = ca->data; u32 val = ddbreadl(ci->port->dev, CI_CONTROL(ci->nr)); int stat = 0; if (val & CI_CAM_DETECT) stat |= DVB_CA_EN50221_POLL_CAM_PRESENT; if (val & CI_CAM_READY) stat |= DVB_CA_EN50221_POLL_CAM_READY; return stat; } static struct dvb_ca_en50221 en_templ = { .read_attribute_mem = read_attribute_mem, .write_attribute_mem = write_attribute_mem, .read_cam_control = read_cam_control, .write_cam_control = write_cam_control, .slot_reset = slot_reset, .slot_shutdown = slot_shutdown, .slot_ts_enable = slot_ts_enable, .poll_slot_status = poll_slot_status, }; static void ci_attach(struct ddb_port *port) { struct ddb_ci *ci; ci = kzalloc(sizeof(*ci), GFP_KERNEL); if (!ci) return; memcpy(&ci->en, &en_templ, sizeof(en_templ)); ci->en.data = ci; port->en = &ci->en; port->en_freedata = 1; ci->port = port; ci->nr = port->nr - 2; } /* DuoFlex Dual CI support */ static int write_creg(struct ddb_ci *ci, u8 data, u8 mask) { struct i2c_adapter *i2c = &ci->port->i2c->adap; u8 adr = (ci->port->type == DDB_CI_EXTERNAL_XO2) ? 0x12 : 0x13; ci->port->creg = (ci->port->creg & ~mask) | data; return i2c_write_reg(i2c, adr, 0x02, ci->port->creg); } static int read_attribute_mem_xo2(struct dvb_ca_en50221 *ca, int slot, int address) { struct ddb_ci *ci = ca->data; struct i2c_adapter *i2c = &ci->port->i2c->adap; u8 adr = (ci->port->type == DDB_CI_EXTERNAL_XO2) ? 0x12 : 0x13; int res; u8 val; res = i2c_read_reg16(i2c, adr, 0x8000 | address, &val); return res ? res : val; } static int write_attribute_mem_xo2(struct dvb_ca_en50221 *ca, int slot, int address, u8 value) { struct ddb_ci *ci = ca->data; struct i2c_adapter *i2c = &ci->port->i2c->adap; u8 adr = (ci->port->type == DDB_CI_EXTERNAL_XO2) ? 0x12 : 0x13; return i2c_write_reg16(i2c, adr, 0x8000 | address, value); } static int read_cam_control_xo2(struct dvb_ca_en50221 *ca, int slot, u8 address) { struct ddb_ci *ci = ca->data; struct i2c_adapter *i2c = &ci->port->i2c->adap; u8 adr = (ci->port->type == DDB_CI_EXTERNAL_XO2) ? 0x12 : 0x13; u8 val; int res; res = i2c_read_reg(i2c, adr, 0x20 | (address & 3), &val); return res ? res : val; } static int write_cam_control_xo2(struct dvb_ca_en50221 *ca, int slot, u8 address, u8 value) { struct ddb_ci *ci = ca->data; struct i2c_adapter *i2c = &ci->port->i2c->adap; u8 adr = (ci->port->type == DDB_CI_EXTERNAL_XO2) ? 0x12 : 0x13; return i2c_write_reg(i2c, adr, 0x20 | (address & 3), value); } static int slot_reset_xo2(struct dvb_ca_en50221 *ca, int slot) { struct ddb_ci *ci = ca->data; dev_dbg(ci->port->dev->dev, "%s\n", __func__); write_creg(ci, 0x01, 0x01); write_creg(ci, 0x04, 0x04); msleep(20); write_creg(ci, 0x02, 0x02); write_creg(ci, 0x00, 0x04); write_creg(ci, 0x18, 0x18); return 0; } static int slot_shutdown_xo2(struct dvb_ca_en50221 *ca, int slot) { struct ddb_ci *ci = ca->data; dev_dbg(ci->port->dev->dev, "%s\n", __func__); write_creg(ci, 0x10, 0xff); write_creg(ci, 0x08, 0x08); return 0; } static int slot_ts_enable_xo2(struct dvb_ca_en50221 *ca, int slot) { struct ddb_ci *ci = ca->data; dev_dbg(ci->port->dev->dev, "%s\n", __func__); write_creg(ci, 0x00, 0x10); return 0; } static int poll_slot_status_xo2(struct dvb_ca_en50221 *ca, int slot, int open) { struct ddb_ci *ci = ca->data; struct i2c_adapter *i2c = &ci->port->i2c->adap; u8 adr = (ci->port->type == DDB_CI_EXTERNAL_XO2) ? 0x12 : 0x13; u8 val = 0; int stat = 0; i2c_read_reg(i2c, adr, 0x01, &val); if (val & 2) stat |= DVB_CA_EN50221_POLL_CAM_PRESENT; if (val & 1) stat |= DVB_CA_EN50221_POLL_CAM_READY; return stat; } static struct dvb_ca_en50221 en_xo2_templ = { .read_attribute_mem = read_attribute_mem_xo2, .write_attribute_mem = write_attribute_mem_xo2, .read_cam_control = read_cam_control_xo2, .write_cam_control = write_cam_control_xo2, .slot_reset = slot_reset_xo2, .slot_shutdown = slot_shutdown_xo2, .slot_ts_enable = slot_ts_enable_xo2, .poll_slot_status = poll_slot_status_xo2, }; static void ci_xo2_attach(struct ddb_port *port) { struct ddb_ci *ci; ci = kzalloc(sizeof(*ci), GFP_KERNEL); if (!ci) return; memcpy(&ci->en, &en_xo2_templ, sizeof(en_xo2_templ)); ci->en.data = ci; port->en = &ci->en; port->en_freedata = 1; ci->port = port; ci->nr = port->nr - 2; ci->port->creg = 0; write_creg(ci, 0x10, 0xff); write_creg(ci, 0x08, 0x08); } static const struct cxd2099_cfg cxd_cfgtmpl = { .bitrate = 72000, .polarity = 1, .clock_mode = 1, .max_i2c = 512, }; static int ci_cxd2099_attach(struct ddb_port *port, u32 bitrate) { struct cxd2099_cfg cxd_cfg = cxd_cfgtmpl; struct i2c_client *client; cxd_cfg.bitrate = bitrate; cxd_cfg.en = &port->en; client = dvb_module_probe("cxd2099", NULL, &port->i2c->adap, 0x40, &cxd_cfg); if (!client) goto err; port->dvb[0].i2c_client[0] = client; port->en_freedata = 0; return 0; err: dev_err(port->dev->dev, "CXD2099AR attach failed\n"); return -ENODEV; } int ddb_ci_attach(struct ddb_port *port, u32 bitrate) { int ret; switch (port->type) { case DDB_CI_EXTERNAL_SONY: ret = ci_cxd2099_attach(port, bitrate); if (ret) return -ENODEV; break; case DDB_CI_EXTERNAL_XO2: case DDB_CI_EXTERNAL_XO2_B: ci_xo2_attach(port); break; case DDB_CI_INTERNAL: ci_attach(port); break; default: return -ENODEV; } if (!port->en) return -ENODEV; dvb_ca_en50221_init(port->dvb[0].adap, port->en, 0, 1); return 0; } void ddb_ci_detach(struct ddb_port *port) { if (port->dvb[0].dev) dvb_unregister_device(port->dvb[0].dev); if (port->en) { dvb_ca_en50221_release(port->en); dvb_module_release(port->dvb[0].i2c_client[0]); port->dvb[0].i2c_client[0] = NULL; /* free alloc'ed memory if needed */ if (port->en_freedata) kfree(port->en->data); port->en = NULL; } }
/* SPDX-License-Identifier: GPL-2.0 */ /* * mtk-afe-fe-dais.h -- Mediatek afe fe dai operator definition * * Copyright (c) 2016 MediaTek Inc. * Author: Garlic Tseng <[email protected]> */ #ifndef _MTK_AFE_FE_DAI_H_ #define _MTK_AFE_FE_DAI_H_ struct snd_soc_dai_ops; struct mtk_base_afe; struct mtk_base_afe_memif; int mtk_afe_fe_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai); void mtk_afe_fe_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai); int mtk_afe_fe_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai); int mtk_afe_fe_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai); int mtk_afe_fe_prepare(struct snd_pcm_substream *substream, struct snd_soc_dai *dai); int mtk_afe_fe_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai); extern const struct snd_soc_dai_ops mtk_afe_fe_ops; int mtk_dynamic_irq_acquire(struct mtk_base_afe *afe); int mtk_dynamic_irq_release(struct mtk_base_afe *afe, int irq_id); int mtk_afe_suspend(struct snd_soc_component *component); int mtk_afe_resume(struct snd_soc_component *component); int mtk_memif_set_enable(struct mtk_base_afe *afe, int id); int mtk_memif_set_disable(struct mtk_base_afe *afe, int id); int mtk_memif_set_addr(struct mtk_base_afe *afe, int id, unsigned char *dma_area, dma_addr_t dma_addr, size_t dma_bytes); int mtk_memif_set_channel(struct mtk_base_afe *afe, int id, unsigned int channel); int mtk_memif_set_rate(struct mtk_base_afe *afe, int id, unsigned int rate); int mtk_memif_set_rate_substream(struct snd_pcm_substream *substream, int id, unsigned int rate); int mtk_memif_set_format(struct mtk_base_afe *afe, int id, snd_pcm_format_t format); int mtk_memif_set_pbuf_size(struct mtk_base_afe *afe, int id, int pbuf_size); #endif
// SPDX-License-Identifier: GPL-2.0-only /* * ispcsi2.c * * TI OMAP3 ISP - CSI2 module * * Copyright (C) 2010 Nokia Corporation * Copyright (C) 2009 Texas Instruments, Inc. * * Contacts: Laurent Pinchart <[email protected]> * Sakari Ailus <[email protected]> */ #include <linux/delay.h> #include <media/v4l2-common.h> #include <linux/v4l2-mediabus.h> #include <linux/mm.h> #include "isp.h" #include "ispreg.h" #include "ispcsi2.h" /* * csi2_if_enable - Enable CSI2 Receiver interface. * @enable: enable flag * */ static void csi2_if_enable(struct isp_device *isp, struct isp_csi2_device *csi2, u8 enable) { struct isp_csi2_ctrl_cfg *currctrl = &csi2->ctrl; isp_reg_clr_set(isp, csi2->regs1, ISPCSI2_CTRL, ISPCSI2_CTRL_IF_EN, enable ? ISPCSI2_CTRL_IF_EN : 0); currctrl->if_enable = enable; } /* * csi2_recv_config - CSI2 receiver module configuration. * @currctrl: isp_csi2_ctrl_cfg structure * */ static void csi2_recv_config(struct isp_device *isp, struct isp_csi2_device *csi2, struct isp_csi2_ctrl_cfg *currctrl) { u32 reg; reg = isp_reg_readl(isp, csi2->regs1, ISPCSI2_CTRL); if (currctrl->frame_mode) reg |= ISPCSI2_CTRL_FRAME; else reg &= ~ISPCSI2_CTRL_FRAME; if (currctrl->vp_clk_enable) reg |= ISPCSI2_CTRL_VP_CLK_EN; else reg &= ~ISPCSI2_CTRL_VP_CLK_EN; if (currctrl->vp_only_enable) reg |= ISPCSI2_CTRL_VP_ONLY_EN; else reg &= ~ISPCSI2_CTRL_VP_ONLY_EN; reg &= ~ISPCSI2_CTRL_VP_OUT_CTRL_MASK; reg |= currctrl->vp_out_ctrl << ISPCSI2_CTRL_VP_OUT_CTRL_SHIFT; if (currctrl->ecc_enable) reg |= ISPCSI2_CTRL_ECC_EN; else reg &= ~ISPCSI2_CTRL_ECC_EN; isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_CTRL); } static const unsigned int csi2_input_fmts[] = { MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8, MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8, MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8, MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8, MEDIA_BUS_FMT_YUYV8_2X8, }; /* To set the format on the CSI2 requires a mapping function that takes * the following inputs: * - 3 different formats (at this time) * - 2 destinations (mem, vp+mem) (vp only handled separately) * - 2 decompression options (on, off) * - 2 isp revisions (certain format must be handled differently on OMAP3630) * Output should be CSI2 frame format code * Array indices as follows: [format][dest][decompr][is_3630] * Not all combinations are valid. 0 means invalid. */ static const u16 __csi2_fmt_map[3][2][2][2] = { /* RAW10 formats */ { /* Output to memory */ { /* No DPCM decompression */ { CSI2_PIX_FMT_RAW10_EXP16, CSI2_PIX_FMT_RAW10_EXP16 }, /* DPCM decompression */ { 0, 0 }, }, /* Output to both */ { /* No DPCM decompression */ { CSI2_PIX_FMT_RAW10_EXP16_VP, CSI2_PIX_FMT_RAW10_EXP16_VP }, /* DPCM decompression */ { 0, 0 }, }, }, /* RAW10 DPCM8 formats */ { /* Output to memory */ { /* No DPCM decompression */ { CSI2_PIX_FMT_RAW8, CSI2_USERDEF_8BIT_DATA1 }, /* DPCM decompression */ { CSI2_PIX_FMT_RAW8_DPCM10_EXP16, CSI2_USERDEF_8BIT_DATA1_DPCM10 }, }, /* Output to both */ { /* No DPCM decompression */ { CSI2_PIX_FMT_RAW8_VP, CSI2_PIX_FMT_RAW8_VP }, /* DPCM decompression */ { CSI2_PIX_FMT_RAW8_DPCM10_VP, CSI2_USERDEF_8BIT_DATA1_DPCM10_VP }, }, }, /* YUYV8 2X8 formats */ { /* Output to memory */ { /* No DPCM decompression */ { CSI2_PIX_FMT_YUV422_8BIT, CSI2_PIX_FMT_YUV422_8BIT }, /* DPCM decompression */ { 0, 0 }, }, /* Output to both */ { /* No DPCM decompression */ { CSI2_PIX_FMT_YUV422_8BIT_VP, CSI2_PIX_FMT_YUV422_8BIT_VP }, /* DPCM decompression */ { 0, 0 }, }, }, }; /* * csi2_ctx_map_format - Map CSI2 sink media bus format to CSI2 format ID * @csi2: ISP CSI2 device * * Returns CSI2 physical format id */ static u16 csi2_ctx_map_format(struct isp_csi2_device *csi2) { const struct v4l2_mbus_framefmt *fmt = &csi2->formats[CSI2_PAD_SINK]; int fmtidx, destidx, is_3630; switch (fmt->code) { case MEDIA_BUS_FMT_SGRBG10_1X10: case MEDIA_BUS_FMT_SRGGB10_1X10: case MEDIA_BUS_FMT_SBGGR10_1X10: case MEDIA_BUS_FMT_SGBRG10_1X10: fmtidx = 0; break; case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8: case MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8: case MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8: case MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8: fmtidx = 1; break; case MEDIA_BUS_FMT_YUYV8_2X8: fmtidx = 2; break; default: WARN(1, KERN_ERR "CSI2: pixel format %08x unsupported!\n", fmt->code); return 0; } if (!(csi2->output & CSI2_OUTPUT_CCDC) && !(csi2->output & CSI2_OUTPUT_MEMORY)) { /* Neither output enabled is a valid combination */ return CSI2_PIX_FMT_OTHERS; } /* If we need to skip frames at the beginning of the stream disable the * video port to avoid sending the skipped frames to the CCDC. */ destidx = csi2->frame_skip ? 0 : !!(csi2->output & CSI2_OUTPUT_CCDC); is_3630 = csi2->isp->revision == ISP_REVISION_15_0; return __csi2_fmt_map[fmtidx][destidx][csi2->dpcm_decompress][is_3630]; } /* * csi2_set_outaddr - Set memory address to save output image * @csi2: Pointer to ISP CSI2a device. * @addr: ISP MMU Mapped 32-bit memory address aligned on 32 byte boundary. * * Sets the memory address where the output will be saved. * * Returns 0 if successful, or -EINVAL if the address is not in the 32 byte * boundary. */ static void csi2_set_outaddr(struct isp_csi2_device *csi2, u32 addr) { struct isp_device *isp = csi2->isp; struct isp_csi2_ctx_cfg *ctx = &csi2->contexts[0]; ctx->ping_addr = addr; ctx->pong_addr = addr; isp_reg_writel(isp, ctx->ping_addr, csi2->regs1, ISPCSI2_CTX_DAT_PING_ADDR(ctx->ctxnum)); isp_reg_writel(isp, ctx->pong_addr, csi2->regs1, ISPCSI2_CTX_DAT_PONG_ADDR(ctx->ctxnum)); } /* * is_usr_def_mapping - Checks whether USER_DEF_MAPPING should * be enabled by CSI2. * @format_id: mapped format id * */ static inline int is_usr_def_mapping(u32 format_id) { return (format_id & 0x40) ? 1 : 0; } /* * csi2_ctx_enable - Enable specified CSI2 context * @ctxnum: Context number, valid between 0 and 7 values. * @enable: enable * */ static void csi2_ctx_enable(struct isp_device *isp, struct isp_csi2_device *csi2, u8 ctxnum, u8 enable) { struct isp_csi2_ctx_cfg *ctx = &csi2->contexts[ctxnum]; unsigned int skip = 0; u32 reg; reg = isp_reg_readl(isp, csi2->regs1, ISPCSI2_CTX_CTRL1(ctxnum)); if (enable) { if (csi2->frame_skip) skip = csi2->frame_skip; else if (csi2->output & CSI2_OUTPUT_MEMORY) skip = 1; reg &= ~ISPCSI2_CTX_CTRL1_COUNT_MASK; reg |= ISPCSI2_CTX_CTRL1_COUNT_UNLOCK | (skip << ISPCSI2_CTX_CTRL1_COUNT_SHIFT) | ISPCSI2_CTX_CTRL1_CTX_EN; } else { reg &= ~ISPCSI2_CTX_CTRL1_CTX_EN; } isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_CTX_CTRL1(ctxnum)); ctx->enabled = enable; } /* * csi2_ctx_config - CSI2 context configuration. * @ctx: context configuration * */ static void csi2_ctx_config(struct isp_device *isp, struct isp_csi2_device *csi2, struct isp_csi2_ctx_cfg *ctx) { u32 reg; /* Set up CSI2_CTx_CTRL1 */ reg = isp_reg_readl(isp, csi2->regs1, ISPCSI2_CTX_CTRL1(ctx->ctxnum)); if (ctx->eof_enabled) reg |= ISPCSI2_CTX_CTRL1_EOF_EN; else reg &= ~ISPCSI2_CTX_CTRL1_EOF_EN; if (ctx->eol_enabled) reg |= ISPCSI2_CTX_CTRL1_EOL_EN; else reg &= ~ISPCSI2_CTX_CTRL1_EOL_EN; if (ctx->checksum_enabled) reg |= ISPCSI2_CTX_CTRL1_CS_EN; else reg &= ~ISPCSI2_CTX_CTRL1_CS_EN; isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_CTX_CTRL1(ctx->ctxnum)); /* Set up CSI2_CTx_CTRL2 */ reg = isp_reg_readl(isp, csi2->regs1, ISPCSI2_CTX_CTRL2(ctx->ctxnum)); reg &= ~(ISPCSI2_CTX_CTRL2_VIRTUAL_ID_MASK); reg |= ctx->virtual_id << ISPCSI2_CTX_CTRL2_VIRTUAL_ID_SHIFT; reg &= ~(ISPCSI2_CTX_CTRL2_FORMAT_MASK); reg |= ctx->format_id << ISPCSI2_CTX_CTRL2_FORMAT_SHIFT; if (ctx->dpcm_decompress) { if (ctx->dpcm_predictor) reg |= ISPCSI2_CTX_CTRL2_DPCM_PRED; else reg &= ~ISPCSI2_CTX_CTRL2_DPCM_PRED; } if (is_usr_def_mapping(ctx->format_id)) { reg &= ~ISPCSI2_CTX_CTRL2_USER_DEF_MAP_MASK; reg |= 2 << ISPCSI2_CTX_CTRL2_USER_DEF_MAP_SHIFT; } isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_CTX_CTRL2(ctx->ctxnum)); /* Set up CSI2_CTx_CTRL3 */ reg = isp_reg_readl(isp, csi2->regs1, ISPCSI2_CTX_CTRL3(ctx->ctxnum)); reg &= ~(ISPCSI2_CTX_CTRL3_ALPHA_MASK); reg |= (ctx->alpha << ISPCSI2_CTX_CTRL3_ALPHA_SHIFT); isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_CTX_CTRL3(ctx->ctxnum)); /* Set up CSI2_CTx_DAT_OFST */ reg = isp_reg_readl(isp, csi2->regs1, ISPCSI2_CTX_DAT_OFST(ctx->ctxnum)); reg &= ~ISPCSI2_CTX_DAT_OFST_OFST_MASK; reg |= ctx->data_offset << ISPCSI2_CTX_DAT_OFST_OFST_SHIFT; isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_CTX_DAT_OFST(ctx->ctxnum)); isp_reg_writel(isp, ctx->ping_addr, csi2->regs1, ISPCSI2_CTX_DAT_PING_ADDR(ctx->ctxnum)); isp_reg_writel(isp, ctx->pong_addr, csi2->regs1, ISPCSI2_CTX_DAT_PONG_ADDR(ctx->ctxnum)); } /* * csi2_timing_config - CSI2 timing configuration. * @timing: csi2_timing_cfg structure */ static void csi2_timing_config(struct isp_device *isp, struct isp_csi2_device *csi2, struct isp_csi2_timing_cfg *timing) { u32 reg; reg = isp_reg_readl(isp, csi2->regs1, ISPCSI2_TIMING); if (timing->force_rx_mode) reg |= ISPCSI2_TIMING_FORCE_RX_MODE_IO(timing->ionum); else reg &= ~ISPCSI2_TIMING_FORCE_RX_MODE_IO(timing->ionum); if (timing->stop_state_16x) reg |= ISPCSI2_TIMING_STOP_STATE_X16_IO(timing->ionum); else reg &= ~ISPCSI2_TIMING_STOP_STATE_X16_IO(timing->ionum); if (timing->stop_state_4x) reg |= ISPCSI2_TIMING_STOP_STATE_X4_IO(timing->ionum); else reg &= ~ISPCSI2_TIMING_STOP_STATE_X4_IO(timing->ionum); reg &= ~ISPCSI2_TIMING_STOP_STATE_COUNTER_IO_MASK(timing->ionum); reg |= timing->stop_state_counter << ISPCSI2_TIMING_STOP_STATE_COUNTER_IO_SHIFT(timing->ionum); isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_TIMING); } /* * csi2_irq_ctx_set - Enables CSI2 Context IRQs. * @enable: Enable/disable CSI2 Context interrupts */ static void csi2_irq_ctx_set(struct isp_device *isp, struct isp_csi2_device *csi2, int enable) { int i; for (i = 0; i < 8; i++) { isp_reg_writel(isp, ISPCSI2_CTX_IRQSTATUS_FE_IRQ, csi2->regs1, ISPCSI2_CTX_IRQSTATUS(i)); if (enable) isp_reg_set(isp, csi2->regs1, ISPCSI2_CTX_IRQENABLE(i), ISPCSI2_CTX_IRQSTATUS_FE_IRQ); else isp_reg_clr(isp, csi2->regs1, ISPCSI2_CTX_IRQENABLE(i), ISPCSI2_CTX_IRQSTATUS_FE_IRQ); } } /* * csi2_irq_complexio1_set - Enables CSI2 ComplexIO IRQs. * @enable: Enable/disable CSI2 ComplexIO #1 interrupts */ static void csi2_irq_complexio1_set(struct isp_device *isp, struct isp_csi2_device *csi2, int enable) { u32 reg; reg = ISPCSI2_PHY_IRQENABLE_STATEALLULPMEXIT | ISPCSI2_PHY_IRQENABLE_STATEALLULPMENTER | ISPCSI2_PHY_IRQENABLE_STATEULPM5 | ISPCSI2_PHY_IRQENABLE_ERRCONTROL5 | ISPCSI2_PHY_IRQENABLE_ERRESC5 | ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS5 | ISPCSI2_PHY_IRQENABLE_ERRSOTHS5 | ISPCSI2_PHY_IRQENABLE_STATEULPM4 | ISPCSI2_PHY_IRQENABLE_ERRCONTROL4 | ISPCSI2_PHY_IRQENABLE_ERRESC4 | ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS4 | ISPCSI2_PHY_IRQENABLE_ERRSOTHS4 | ISPCSI2_PHY_IRQENABLE_STATEULPM3 | ISPCSI2_PHY_IRQENABLE_ERRCONTROL3 | ISPCSI2_PHY_IRQENABLE_ERRESC3 | ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS3 | ISPCSI2_PHY_IRQENABLE_ERRSOTHS3 | ISPCSI2_PHY_IRQENABLE_STATEULPM2 | ISPCSI2_PHY_IRQENABLE_ERRCONTROL2 | ISPCSI2_PHY_IRQENABLE_ERRESC2 | ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS2 | ISPCSI2_PHY_IRQENABLE_ERRSOTHS2 | ISPCSI2_PHY_IRQENABLE_STATEULPM1 | ISPCSI2_PHY_IRQENABLE_ERRCONTROL1 | ISPCSI2_PHY_IRQENABLE_ERRESC1 | ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS1 | ISPCSI2_PHY_IRQENABLE_ERRSOTHS1; isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_PHY_IRQSTATUS); if (enable) reg |= isp_reg_readl(isp, csi2->regs1, ISPCSI2_PHY_IRQENABLE); else reg = 0; isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_PHY_IRQENABLE); } /* * csi2_irq_status_set - Enables CSI2 Status IRQs. * @enable: Enable/disable CSI2 Status interrupts */ static void csi2_irq_status_set(struct isp_device *isp, struct isp_csi2_device *csi2, int enable) { u32 reg; reg = ISPCSI2_IRQSTATUS_OCP_ERR_IRQ | ISPCSI2_IRQSTATUS_SHORT_PACKET_IRQ | ISPCSI2_IRQSTATUS_ECC_CORRECTION_IRQ | ISPCSI2_IRQSTATUS_ECC_NO_CORRECTION_IRQ | ISPCSI2_IRQSTATUS_COMPLEXIO2_ERR_IRQ | ISPCSI2_IRQSTATUS_COMPLEXIO1_ERR_IRQ | ISPCSI2_IRQSTATUS_FIFO_OVF_IRQ | ISPCSI2_IRQSTATUS_CONTEXT(0); isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_IRQSTATUS); if (enable) reg |= isp_reg_readl(isp, csi2->regs1, ISPCSI2_IRQENABLE); else reg = 0; isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_IRQENABLE); } /* * omap3isp_csi2_reset - Resets the CSI2 module. * * Must be called with the phy lock held. * * Returns 0 if successful, or -EBUSY if power command didn't respond. */ int omap3isp_csi2_reset(struct isp_csi2_device *csi2) { struct isp_device *isp = csi2->isp; u8 soft_reset_retries = 0; u32 reg; int i; if (!csi2->available) return -ENODEV; if (csi2->phy->entity) return -EBUSY; isp_reg_set(isp, csi2->regs1, ISPCSI2_SYSCONFIG, ISPCSI2_SYSCONFIG_SOFT_RESET); do { reg = isp_reg_readl(isp, csi2->regs1, ISPCSI2_SYSSTATUS) & ISPCSI2_SYSSTATUS_RESET_DONE; if (reg == ISPCSI2_SYSSTATUS_RESET_DONE) break; soft_reset_retries++; if (soft_reset_retries < 5) udelay(100); } while (soft_reset_retries < 5); if (soft_reset_retries == 5) { dev_err(isp->dev, "CSI2: Soft reset try count exceeded!\n"); return -EBUSY; } if (isp->revision == ISP_REVISION_15_0) isp_reg_set(isp, csi2->regs1, ISPCSI2_PHY_CFG, ISPCSI2_PHY_CFG_RESET_CTRL); i = 100; do { reg = isp_reg_readl(isp, csi2->phy->phy_regs, ISPCSIPHY_REG1) & ISPCSIPHY_REG1_RESET_DONE_CTRLCLK; if (reg == ISPCSIPHY_REG1_RESET_DONE_CTRLCLK) break; udelay(100); } while (--i > 0); if (i == 0) { dev_err(isp->dev, "CSI2: Reset for CSI2_96M_FCLK domain Failed!\n"); return -EBUSY; } if (isp->autoidle) isp_reg_clr_set(isp, csi2->regs1, ISPCSI2_SYSCONFIG, ISPCSI2_SYSCONFIG_MSTANDBY_MODE_MASK | ISPCSI2_SYSCONFIG_AUTO_IDLE, ISPCSI2_SYSCONFIG_MSTANDBY_MODE_SMART | ((isp->revision == ISP_REVISION_15_0) ? ISPCSI2_SYSCONFIG_AUTO_IDLE : 0)); else isp_reg_clr_set(isp, csi2->regs1, ISPCSI2_SYSCONFIG, ISPCSI2_SYSCONFIG_MSTANDBY_MODE_MASK | ISPCSI2_SYSCONFIG_AUTO_IDLE, ISPCSI2_SYSCONFIG_MSTANDBY_MODE_NO); return 0; } static int csi2_configure(struct isp_csi2_device *csi2) { struct isp_pipeline *pipe = to_isp_pipeline(&csi2->subdev.entity); const struct isp_bus_cfg *buscfg; struct isp_device *isp = csi2->isp; struct isp_csi2_timing_cfg *timing = &csi2->timing[0]; struct v4l2_subdev *sensor; struct media_pad *pad; /* * CSI2 fields that can be updated while the context has * been enabled or the interface has been enabled are not * updated dynamically currently. So we do not allow to * reconfigure if either has been enabled */ if (csi2->contexts[0].enabled || csi2->ctrl.if_enable) return -EBUSY; pad = media_pad_remote_pad_first(&csi2->pads[CSI2_PAD_SINK]); sensor = media_entity_to_v4l2_subdev(pad->entity); buscfg = v4l2_subdev_to_bus_cfg(pipe->external); if (WARN_ON(!buscfg)) return -EPIPE; csi2->frame_skip = 0; v4l2_subdev_call(sensor, sensor, g_skip_frames, &csi2->frame_skip); csi2->ctrl.vp_out_ctrl = clamp_t(unsigned int, pipe->l3_ick / pipe->external_rate - 1, 1, 3); dev_dbg(isp->dev, "%s: l3_ick %lu, external_rate %u, vp_out_ctrl %u\n", __func__, pipe->l3_ick, pipe->external_rate, csi2->ctrl.vp_out_ctrl); csi2->ctrl.frame_mode = ISP_CSI2_FRAME_IMMEDIATE; csi2->ctrl.ecc_enable = buscfg->bus.csi2.crc; timing->ionum = 1; timing->force_rx_mode = 1; timing->stop_state_16x = 1; timing->stop_state_4x = 1; timing->stop_state_counter = 0x1FF; /* * The CSI2 receiver can't do any format conversion except DPCM * decompression, so every set_format call configures both pads * and enables DPCM decompression as a special case: */ if (csi2->formats[CSI2_PAD_SINK].code != csi2->formats[CSI2_PAD_SOURCE].code) csi2->dpcm_decompress = true; else csi2->dpcm_decompress = false; csi2->contexts[0].format_id = csi2_ctx_map_format(csi2); if (csi2->video_out.bpl_padding == 0) csi2->contexts[0].data_offset = 0; else csi2->contexts[0].data_offset = csi2->video_out.bpl_value; /* * Enable end of frame and end of line signals generation for * context 0. These signals are generated from CSI2 receiver to * qualify the last pixel of a frame and the last pixel of a line. * Without enabling the signals CSI2 receiver writes data to memory * beyond buffer size and/or data line offset is not handled correctly. */ csi2->contexts[0].eof_enabled = 1; csi2->contexts[0].eol_enabled = 1; csi2_irq_complexio1_set(isp, csi2, 1); csi2_irq_ctx_set(isp, csi2, 1); csi2_irq_status_set(isp, csi2, 1); /* Set configuration (timings, format and links) */ csi2_timing_config(isp, csi2, timing); csi2_recv_config(isp, csi2, &csi2->ctrl); csi2_ctx_config(isp, csi2, &csi2->contexts[0]); return 0; } /* * csi2_print_status - Prints CSI2 debug information. */ #define CSI2_PRINT_REGISTER(isp, regs, name)\ dev_dbg(isp->dev, "###CSI2 " #name "=0x%08x\n", \ isp_reg_readl(isp, regs, ISPCSI2_##name)) static void csi2_print_status(struct isp_csi2_device *csi2) { struct isp_device *isp = csi2->isp; if (!csi2->available) return; dev_dbg(isp->dev, "-------------CSI2 Register dump-------------\n"); CSI2_PRINT_REGISTER(isp, csi2->regs1, SYSCONFIG); CSI2_PRINT_REGISTER(isp, csi2->regs1, SYSSTATUS); CSI2_PRINT_REGISTER(isp, csi2->regs1, IRQENABLE); CSI2_PRINT_REGISTER(isp, csi2->regs1, IRQSTATUS); CSI2_PRINT_REGISTER(isp, csi2->regs1, CTRL); CSI2_PRINT_REGISTER(isp, csi2->regs1, DBG_H); CSI2_PRINT_REGISTER(isp, csi2->regs1, GNQ); CSI2_PRINT_REGISTER(isp, csi2->regs1, PHY_CFG); CSI2_PRINT_REGISTER(isp, csi2->regs1, PHY_IRQSTATUS); CSI2_PRINT_REGISTER(isp, csi2->regs1, SHORT_PACKET); CSI2_PRINT_REGISTER(isp, csi2->regs1, PHY_IRQENABLE); CSI2_PRINT_REGISTER(isp, csi2->regs1, DBG_P); CSI2_PRINT_REGISTER(isp, csi2->regs1, TIMING); CSI2_PRINT_REGISTER(isp, csi2->regs1, CTX_CTRL1(0)); CSI2_PRINT_REGISTER(isp, csi2->regs1, CTX_CTRL2(0)); CSI2_PRINT_REGISTER(isp, csi2->regs1, CTX_DAT_OFST(0)); CSI2_PRINT_REGISTER(isp, csi2->regs1, CTX_DAT_PING_ADDR(0)); CSI2_PRINT_REGISTER(isp, csi2->regs1, CTX_DAT_PONG_ADDR(0)); CSI2_PRINT_REGISTER(isp, csi2->regs1, CTX_IRQENABLE(0)); CSI2_PRINT_REGISTER(isp, csi2->regs1, CTX_IRQSTATUS(0)); CSI2_PRINT_REGISTER(isp, csi2->regs1, CTX_CTRL3(0)); dev_dbg(isp->dev, "--------------------------------------------\n"); } /* ----------------------------------------------------------------------------- * Interrupt handling */ /* * csi2_isr_buffer - Does buffer handling at end-of-frame * when writing to memory. */ static void csi2_isr_buffer(struct isp_csi2_device *csi2) { struct isp_device *isp = csi2->isp; struct isp_buffer *buffer; csi2_ctx_enable(isp, csi2, 0, 0); buffer = omap3isp_video_buffer_next(&csi2->video_out); /* * Let video queue operation restart engine if there is an underrun * condition. */ if (buffer == NULL) return; csi2_set_outaddr(csi2, buffer->dma); csi2_ctx_enable(isp, csi2, 0, 1); } static void csi2_isr_ctx(struct isp_csi2_device *csi2, struct isp_csi2_ctx_cfg *ctx) { struct isp_device *isp = csi2->isp; unsigned int n = ctx->ctxnum; u32 status; status = isp_reg_readl(isp, csi2->regs1, ISPCSI2_CTX_IRQSTATUS(n)); isp_reg_writel(isp, status, csi2->regs1, ISPCSI2_CTX_IRQSTATUS(n)); if (!(status & ISPCSI2_CTX_IRQSTATUS_FE_IRQ)) return; /* Skip interrupts until we reach the frame skip count. The CSI2 will be * automatically disabled, as the frame skip count has been programmed * in the CSI2_CTx_CTRL1::COUNT field, so re-enable it. * * It would have been nice to rely on the FRAME_NUMBER interrupt instead * but it turned out that the interrupt is only generated when the CSI2 * writes to memory (the CSI2_CTx_CTRL1::COUNT field is decreased * correctly and reaches 0 when data is forwarded to the video port only * but no interrupt arrives). Maybe a CSI2 hardware bug. */ if (csi2->frame_skip) { csi2->frame_skip--; if (csi2->frame_skip == 0) { ctx->format_id = csi2_ctx_map_format(csi2); csi2_ctx_config(isp, csi2, ctx); csi2_ctx_enable(isp, csi2, n, 1); } return; } if (csi2->output & CSI2_OUTPUT_MEMORY) csi2_isr_buffer(csi2); } /* * omap3isp_csi2_isr - CSI2 interrupt handling. */ void omap3isp_csi2_isr(struct isp_csi2_device *csi2) { struct isp_pipeline *pipe = to_isp_pipeline(&csi2->subdev.entity); u32 csi2_irqstatus, cpxio1_irqstatus; struct isp_device *isp = csi2->isp; if (!csi2->available) return; csi2_irqstatus = isp_reg_readl(isp, csi2->regs1, ISPCSI2_IRQSTATUS); isp_reg_writel(isp, csi2_irqstatus, csi2->regs1, ISPCSI2_IRQSTATUS); /* Failure Cases */ if (csi2_irqstatus & ISPCSI2_IRQSTATUS_COMPLEXIO1_ERR_IRQ) { cpxio1_irqstatus = isp_reg_readl(isp, csi2->regs1, ISPCSI2_PHY_IRQSTATUS); isp_reg_writel(isp, cpxio1_irqstatus, csi2->regs1, ISPCSI2_PHY_IRQSTATUS); dev_dbg(isp->dev, "CSI2: ComplexIO Error IRQ %x\n", cpxio1_irqstatus); pipe->error = true; } if (csi2_irqstatus & (ISPCSI2_IRQSTATUS_OCP_ERR_IRQ | ISPCSI2_IRQSTATUS_SHORT_PACKET_IRQ | ISPCSI2_IRQSTATUS_ECC_NO_CORRECTION_IRQ | ISPCSI2_IRQSTATUS_COMPLEXIO2_ERR_IRQ | ISPCSI2_IRQSTATUS_FIFO_OVF_IRQ)) { dev_dbg(isp->dev, "CSI2 Err: OCP:%d, Short_pack:%d, ECC:%d, CPXIO2:%d, FIFO_OVF:%d,\n", (csi2_irqstatus & ISPCSI2_IRQSTATUS_OCP_ERR_IRQ) ? 1 : 0, (csi2_irqstatus & ISPCSI2_IRQSTATUS_SHORT_PACKET_IRQ) ? 1 : 0, (csi2_irqstatus & ISPCSI2_IRQSTATUS_ECC_NO_CORRECTION_IRQ) ? 1 : 0, (csi2_irqstatus & ISPCSI2_IRQSTATUS_COMPLEXIO2_ERR_IRQ) ? 1 : 0, (csi2_irqstatus & ISPCSI2_IRQSTATUS_FIFO_OVF_IRQ) ? 1 : 0); pipe->error = true; } if (omap3isp_module_sync_is_stopping(&csi2->wait, &csi2->stopping)) return; /* Successful cases */ if (csi2_irqstatus & ISPCSI2_IRQSTATUS_CONTEXT(0)) csi2_isr_ctx(csi2, &csi2->contexts[0]); if (csi2_irqstatus & ISPCSI2_IRQSTATUS_ECC_CORRECTION_IRQ) dev_dbg(isp->dev, "CSI2: ECC correction done\n"); } /* ----------------------------------------------------------------------------- * ISP video operations */ /* * csi2_queue - Queues the first buffer when using memory output * @video: The video node * @buffer: buffer to queue */ static int csi2_queue(struct isp_video *video, struct isp_buffer *buffer) { struct isp_device *isp = video->isp; struct isp_csi2_device *csi2 = &isp->isp_csi2a; csi2_set_outaddr(csi2, buffer->dma); /* * If streaming was enabled before there was a buffer queued * or underrun happened in the ISR, the hardware was not enabled * and DMA queue flag ISP_VIDEO_DMAQUEUE_UNDERRUN is still set. * Enable it now. */ if (csi2->video_out.dmaqueue_flags & ISP_VIDEO_DMAQUEUE_UNDERRUN) { /* Enable / disable context 0 and IRQs */ csi2_if_enable(isp, csi2, 1); csi2_ctx_enable(isp, csi2, 0, 1); isp_video_dmaqueue_flags_clr(&csi2->video_out); } return 0; } static const struct isp_video_operations csi2_ispvideo_ops = { .queue = csi2_queue, }; /* ----------------------------------------------------------------------------- * V4L2 subdev operations */ static struct v4l2_mbus_framefmt * __csi2_get_format(struct isp_csi2_device *csi2, struct v4l2_subdev_state *sd_state, unsigned int pad, enum v4l2_subdev_format_whence which) { if (which == V4L2_SUBDEV_FORMAT_TRY) return v4l2_subdev_state_get_format(sd_state, pad); else return &csi2->formats[pad]; } static void csi2_try_format(struct isp_csi2_device *csi2, struct v4l2_subdev_state *sd_state, unsigned int pad, struct v4l2_mbus_framefmt *fmt, enum v4l2_subdev_format_whence which) { u32 pixelcode; struct v4l2_mbus_framefmt *format; const struct isp_format_info *info; unsigned int i; switch (pad) { case CSI2_PAD_SINK: /* Clamp the width and height to valid range (1-8191). */ for (i = 0; i < ARRAY_SIZE(csi2_input_fmts); i++) { if (fmt->code == csi2_input_fmts[i]) break; } /* If not found, use SGRBG10 as default */ if (i >= ARRAY_SIZE(csi2_input_fmts)) fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10; fmt->width = clamp_t(u32, fmt->width, 1, 8191); fmt->height = clamp_t(u32, fmt->height, 1, 8191); break; case CSI2_PAD_SOURCE: /* Source format same as sink format, except for DPCM * compression. */ pixelcode = fmt->code; format = __csi2_get_format(csi2, sd_state, CSI2_PAD_SINK, which); memcpy(fmt, format, sizeof(*fmt)); /* * Only Allow DPCM decompression, and check that the * pattern is preserved */ info = omap3isp_video_format_info(fmt->code); if (info->uncompressed == pixelcode) fmt->code = pixelcode; break; } /* RGB, non-interlaced */ fmt->colorspace = V4L2_COLORSPACE_SRGB; fmt->field = V4L2_FIELD_NONE; } /* * csi2_enum_mbus_code - Handle pixel format enumeration * @sd : pointer to v4l2 subdev structure * @sd_state: V4L2 subdev state * @code : pointer to v4l2_subdev_mbus_code_enum structure * return -EINVAL or zero on success */ static int csi2_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_mbus_code_enum *code) { struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd); struct v4l2_mbus_framefmt *format; const struct isp_format_info *info; if (code->pad == CSI2_PAD_SINK) { if (code->index >= ARRAY_SIZE(csi2_input_fmts)) return -EINVAL; code->code = csi2_input_fmts[code->index]; } else { format = __csi2_get_format(csi2, sd_state, CSI2_PAD_SINK, code->which); switch (code->index) { case 0: /* Passthrough sink pad code */ code->code = format->code; break; case 1: /* Uncompressed code */ info = omap3isp_video_format_info(format->code); if (info->uncompressed == format->code) return -EINVAL; code->code = info->uncompressed; break; default: return -EINVAL; } } return 0; } static int csi2_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_frame_size_enum *fse) { struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd); struct v4l2_mbus_framefmt format; if (fse->index != 0) return -EINVAL; format.code = fse->code; format.width = 1; format.height = 1; csi2_try_format(csi2, sd_state, fse->pad, &format, fse->which); fse->min_width = format.width; fse->min_height = format.height; if (format.code != fse->code) return -EINVAL; format.code = fse->code; format.width = -1; format.height = -1; csi2_try_format(csi2, sd_state, fse->pad, &format, fse->which); fse->max_width = format.width; fse->max_height = format.height; return 0; } /* * csi2_get_format - Handle get format by pads subdev method * @sd : pointer to v4l2 subdev structure * @sd_state: V4L2 subdev state * @fmt: pointer to v4l2 subdev format structure * return -EINVAL or zero on success */ static int csi2_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt) { struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd); struct v4l2_mbus_framefmt *format; format = __csi2_get_format(csi2, sd_state, fmt->pad, fmt->which); if (format == NULL) return -EINVAL; fmt->format = *format; return 0; } /* * csi2_set_format - Handle set format by pads subdev method * @sd : pointer to v4l2 subdev structure * @sd_state: V4L2 subdev state * @fmt: pointer to v4l2 subdev format structure * return -EINVAL or zero on success */ static int csi2_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt) { struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd); struct v4l2_mbus_framefmt *format; format = __csi2_get_format(csi2, sd_state, fmt->pad, fmt->which); if (format == NULL) return -EINVAL; csi2_try_format(csi2, sd_state, fmt->pad, &fmt->format, fmt->which); *format = fmt->format; /* Propagate the format from sink to source */ if (fmt->pad == CSI2_PAD_SINK) { format = __csi2_get_format(csi2, sd_state, CSI2_PAD_SOURCE, fmt->which); *format = fmt->format; csi2_try_format(csi2, sd_state, CSI2_PAD_SOURCE, format, fmt->which); } return 0; } /* * csi2_init_formats - Initialize formats on all pads * @sd: ISP CSI2 V4L2 subdevice * @fh: V4L2 subdev file handle * * Initialize all pad formats with default values. If fh is not NULL, try * formats are initialized on the file handle. Otherwise active formats are * initialized on the device. */ static int csi2_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { struct v4l2_subdev_format format; memset(&format, 0, sizeof(format)); format.pad = CSI2_PAD_SINK; format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE; format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10; format.format.width = 4096; format.format.height = 4096; csi2_set_format(sd, fh ? fh->state : NULL, &format); return 0; } /* * csi2_set_stream - Enable/Disable streaming on the CSI2 module * @sd: ISP CSI2 V4L2 subdevice * @enable: ISP pipeline stream state * * Return 0 on success or a negative error code otherwise. */ static int csi2_set_stream(struct v4l2_subdev *sd, int enable) { struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd); struct isp_device *isp = csi2->isp; struct isp_video *video_out = &csi2->video_out; switch (enable) { case ISP_PIPELINE_STREAM_CONTINUOUS: if (omap3isp_csiphy_acquire(csi2->phy, &sd->entity) < 0) return -ENODEV; if (csi2->output & CSI2_OUTPUT_MEMORY) omap3isp_sbl_enable(isp, OMAP3_ISP_SBL_CSI2A_WRITE); csi2_configure(csi2); csi2_print_status(csi2); /* * When outputting to memory with no buffer available, let the * buffer queue handler start the hardware. A DMA queue flag * ISP_VIDEO_DMAQUEUE_QUEUED will be set as soon as there is * a buffer available. */ if (csi2->output & CSI2_OUTPUT_MEMORY && !(video_out->dmaqueue_flags & ISP_VIDEO_DMAQUEUE_QUEUED)) break; /* Enable context 0 and IRQs */ atomic_set(&csi2->stopping, 0); csi2_ctx_enable(isp, csi2, 0, 1); csi2_if_enable(isp, csi2, 1); isp_video_dmaqueue_flags_clr(video_out); break; case ISP_PIPELINE_STREAM_STOPPED: if (csi2->state == ISP_PIPELINE_STREAM_STOPPED) return 0; if (omap3isp_module_sync_idle(&sd->entity, &csi2->wait, &csi2->stopping)) dev_dbg(isp->dev, "%s: module stop timeout.\n", sd->name); csi2_ctx_enable(isp, csi2, 0, 0); csi2_if_enable(isp, csi2, 0); csi2_irq_ctx_set(isp, csi2, 0); omap3isp_csiphy_release(csi2->phy); isp_video_dmaqueue_flags_clr(video_out); omap3isp_sbl_disable(isp, OMAP3_ISP_SBL_CSI2A_WRITE); break; } csi2->state = enable; return 0; } /* subdev video operations */ static const struct v4l2_subdev_video_ops csi2_video_ops = { .s_stream = csi2_set_stream, }; /* subdev pad operations */ static const struct v4l2_subdev_pad_ops csi2_pad_ops = { .enum_mbus_code = csi2_enum_mbus_code, .enum_frame_size = csi2_enum_frame_size, .get_fmt = csi2_get_format, .set_fmt = csi2_set_format, }; /* subdev operations */ static const struct v4l2_subdev_ops csi2_ops = { .video = &csi2_video_ops, .pad = &csi2_pad_ops, }; /* subdev internal operations */ static const struct v4l2_subdev_internal_ops csi2_internal_ops = { .open = csi2_init_formats, }; /* ----------------------------------------------------------------------------- * Media entity operations */ /* * csi2_link_setup - Setup CSI2 connections. * @entity : Pointer to media entity structure * @local : Pointer to local pad array * @remote : Pointer to remote pad array * @flags : Link flags * return -EINVAL or zero on success */ static int csi2_link_setup(struct media_entity *entity, const struct media_pad *local, const struct media_pad *remote, u32 flags) { struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity); struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd); struct isp_csi2_ctrl_cfg *ctrl = &csi2->ctrl; unsigned int index = local->index; /* * The ISP core doesn't support pipelines with multiple video outputs. * Revisit this when it will be implemented, and return -EBUSY for now. */ /* FIXME: this is actually a hack! */ if (is_media_entity_v4l2_subdev(remote->entity)) index |= 2 << 16; switch (index) { case CSI2_PAD_SOURCE: if (flags & MEDIA_LNK_FL_ENABLED) { if (csi2->output & ~CSI2_OUTPUT_MEMORY) return -EBUSY; csi2->output |= CSI2_OUTPUT_MEMORY; } else { csi2->output &= ~CSI2_OUTPUT_MEMORY; } break; case CSI2_PAD_SOURCE | 2 << 16: if (flags & MEDIA_LNK_FL_ENABLED) { if (csi2->output & ~CSI2_OUTPUT_CCDC) return -EBUSY; csi2->output |= CSI2_OUTPUT_CCDC; } else { csi2->output &= ~CSI2_OUTPUT_CCDC; } break; default: /* Link from camera to CSI2 is fixed... */ return -EINVAL; } ctrl->vp_only_enable = (csi2->output & CSI2_OUTPUT_MEMORY) ? false : true; ctrl->vp_clk_enable = !!(csi2->output & CSI2_OUTPUT_CCDC); return 0; } /* media operations */ static const struct media_entity_operations csi2_media_ops = { .link_setup = csi2_link_setup, .link_validate = v4l2_subdev_link_validate, }; void omap3isp_csi2_unregister_entities(struct isp_csi2_device *csi2) { v4l2_device_unregister_subdev(&csi2->subdev); omap3isp_video_unregister(&csi2->video_out); } int omap3isp_csi2_register_entities(struct isp_csi2_device *csi2, struct v4l2_device *vdev) { int ret; /* Register the subdev and video nodes. */ csi2->subdev.dev = vdev->mdev->dev; ret = v4l2_device_register_subdev(vdev, &csi2->subdev); if (ret < 0) goto error; ret = omap3isp_video_register(&csi2->video_out, vdev); if (ret < 0) goto error; return 0; error: omap3isp_csi2_unregister_entities(csi2); return ret; } /* ----------------------------------------------------------------------------- * ISP CSI2 initialisation and cleanup */ /* * csi2_init_entities - Initialize subdev and media entity. * @csi2: Pointer to csi2 structure. * return -ENOMEM or zero on success */ static int csi2_init_entities(struct isp_csi2_device *csi2) { struct v4l2_subdev *sd = &csi2->subdev; struct media_pad *pads = csi2->pads; struct media_entity *me = &sd->entity; int ret; v4l2_subdev_init(sd, &csi2_ops); sd->internal_ops = &csi2_internal_ops; strscpy(sd->name, "OMAP3 ISP CSI2a", sizeof(sd->name)); sd->grp_id = 1 << 16; /* group ID for isp subdevs */ v4l2_set_subdevdata(sd, csi2); sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; pads[CSI2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE; pads[CSI2_PAD_SINK].flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT; me->ops = &csi2_media_ops; ret = media_entity_pads_init(me, CSI2_PADS_NUM, pads); if (ret < 0) return ret; csi2_init_formats(sd, NULL); /* Video device node */ csi2->video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; csi2->video_out.ops = &csi2_ispvideo_ops; csi2->video_out.bpl_alignment = 32; csi2->video_out.bpl_zero_padding = 1; csi2->video_out.bpl_max = 0x1ffe0; csi2->video_out.isp = csi2->isp; csi2->video_out.capture_mem = PAGE_ALIGN(4096 * 4096) * 3; ret = omap3isp_video_init(&csi2->video_out, "CSI2a"); if (ret < 0) goto error_video; return 0; error_video: media_entity_cleanup(&csi2->subdev.entity); return ret; } /* * omap3isp_csi2_init - Routine for module driver init */ int omap3isp_csi2_init(struct isp_device *isp) { struct isp_csi2_device *csi2a = &isp->isp_csi2a; struct isp_csi2_device *csi2c = &isp->isp_csi2c; int ret; csi2a->isp = isp; csi2a->available = 1; csi2a->regs1 = OMAP3_ISP_IOMEM_CSI2A_REGS1; csi2a->regs2 = OMAP3_ISP_IOMEM_CSI2A_REGS2; csi2a->phy = &isp->isp_csiphy2; csi2a->state = ISP_PIPELINE_STREAM_STOPPED; init_waitqueue_head(&csi2a->wait); ret = csi2_init_entities(csi2a); if (ret < 0) return ret; if (isp->revision == ISP_REVISION_15_0) { csi2c->isp = isp; csi2c->available = 1; csi2c->regs1 = OMAP3_ISP_IOMEM_CSI2C_REGS1; csi2c->regs2 = OMAP3_ISP_IOMEM_CSI2C_REGS2; csi2c->phy = &isp->isp_csiphy1; csi2c->state = ISP_PIPELINE_STREAM_STOPPED; init_waitqueue_head(&csi2c->wait); } return 0; } /* * omap3isp_csi2_cleanup - Routine for module driver cleanup */ void omap3isp_csi2_cleanup(struct isp_device *isp) { struct isp_csi2_device *csi2a = &isp->isp_csi2a; omap3isp_video_cleanup(&csi2a->video_out); media_entity_cleanup(&csi2a->subdev.entity); }
// SPDX-License-Identifier: GPL-2.0-only /* * dice_pcm.c - a part of driver for DICE based devices * * Copyright (c) Clemens Ladisch <[email protected]> * Copyright (c) 2014 Takashi Sakamoto <[email protected]> */ #include "dice.h" static int dice_rate_constraint(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_pcm_substream *substream = rule->private; struct snd_dice *dice = substream->private_data; unsigned int index = substream->pcm->device; const struct snd_interval *c = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_CHANNELS); struct snd_interval *r = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); struct snd_interval rates = { .min = UINT_MAX, .max = 0, .integer = 1 }; unsigned int *pcm_channels; enum snd_dice_rate_mode mode; unsigned int i, rate; if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) pcm_channels = dice->tx_pcm_chs[index]; else pcm_channels = dice->rx_pcm_chs[index]; for (i = 0; i < ARRAY_SIZE(snd_dice_rates); ++i) { rate = snd_dice_rates[i]; if (snd_dice_stream_get_rate_mode(dice, rate, &mode) < 0) continue; if (!snd_interval_test(c, pcm_channels[mode])) continue; rates.min = min(rates.min, rate); rates.max = max(rates.max, rate); } return snd_interval_refine(r, &rates); } static int dice_channels_constraint(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_pcm_substream *substream = rule->private; struct snd_dice *dice = substream->private_data; unsigned int index = substream->pcm->device; const struct snd_interval *r = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE); struct snd_interval *c = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); struct snd_interval channels = { .min = UINT_MAX, .max = 0, .integer = 1 }; unsigned int *pcm_channels; enum snd_dice_rate_mode mode; unsigned int i, rate; if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) pcm_channels = dice->tx_pcm_chs[index]; else pcm_channels = dice->rx_pcm_chs[index]; for (i = 0; i < ARRAY_SIZE(snd_dice_rates); ++i) { rate = snd_dice_rates[i]; if (snd_dice_stream_get_rate_mode(dice, rate, &mode) < 0) continue; if (!snd_interval_test(r, rate)) continue; channels.min = min(channels.min, pcm_channels[mode]); channels.max = max(channels.max, pcm_channels[mode]); } return snd_interval_refine(c, &channels); } static int limit_channels_and_rates(struct snd_dice *dice, struct snd_pcm_runtime *runtime, enum amdtp_stream_direction dir, unsigned int index) { struct snd_pcm_hardware *hw = &runtime->hw; unsigned int *pcm_channels; unsigned int i; if (dir == AMDTP_IN_STREAM) pcm_channels = dice->tx_pcm_chs[index]; else pcm_channels = dice->rx_pcm_chs[index]; hw->channels_min = UINT_MAX; hw->channels_max = 0; for (i = 0; i < ARRAY_SIZE(snd_dice_rates); ++i) { enum snd_dice_rate_mode mode; unsigned int rate, channels; rate = snd_dice_rates[i]; if (snd_dice_stream_get_rate_mode(dice, rate, &mode) < 0) continue; hw->rates |= snd_pcm_rate_to_rate_bit(rate); channels = pcm_channels[mode]; if (channels == 0) continue; hw->channels_min = min(hw->channels_min, channels); hw->channels_max = max(hw->channels_max, channels); } snd_pcm_limit_hw_rates(runtime); return 0; } static int init_hw_info(struct snd_dice *dice, struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_pcm_hardware *hw = &runtime->hw; unsigned int index = substream->pcm->device; enum amdtp_stream_direction dir; struct amdtp_stream *stream; int err; if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) { hw->formats = AM824_IN_PCM_FORMAT_BITS; dir = AMDTP_IN_STREAM; stream = &dice->tx_stream[index]; } else { hw->formats = AM824_OUT_PCM_FORMAT_BITS; dir = AMDTP_OUT_STREAM; stream = &dice->rx_stream[index]; } err = limit_channels_and_rates(dice, substream->runtime, dir, index); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, dice_rate_constraint, substream, SNDRV_PCM_HW_PARAM_CHANNELS, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS, dice_channels_constraint, substream, SNDRV_PCM_HW_PARAM_RATE, -1); if (err < 0) return err; return amdtp_am824_add_pcm_hw_constraints(stream, runtime); } static int pcm_open(struct snd_pcm_substream *substream) { struct snd_dice *dice = substream->private_data; struct amdtp_domain *d = &dice->domain; unsigned int source; bool internal; int err; err = snd_dice_stream_lock_try(dice); if (err < 0) return err; err = init_hw_info(dice, substream); if (err < 0) goto err_locked; err = snd_dice_transaction_get_clock_source(dice, &source); if (err < 0) goto err_locked; switch (source) { case CLOCK_SOURCE_AES1: case CLOCK_SOURCE_AES2: case CLOCK_SOURCE_AES3: case CLOCK_SOURCE_AES4: case CLOCK_SOURCE_AES_ANY: case CLOCK_SOURCE_ADAT: case CLOCK_SOURCE_TDIF: case CLOCK_SOURCE_WC: internal = false; break; default: internal = true; break; } mutex_lock(&dice->mutex); // When source of clock is not internal or any stream is reserved for // transmission of PCM frames, the available sampling rate is limited // at current one. if (!internal || (dice->substreams_counter > 0 && d->events_per_period > 0)) { unsigned int frames_per_period = d->events_per_period; unsigned int frames_per_buffer = d->events_per_buffer; unsigned int rate; err = snd_dice_transaction_get_rate(dice, &rate); if (err < 0) { mutex_unlock(&dice->mutex); goto err_locked; } substream->runtime->hw.rate_min = rate; substream->runtime->hw.rate_max = rate; if (frames_per_period > 0) { // For double_pcm_frame quirk. if (rate > 96000 && !dice->disable_double_pcm_frames) { frames_per_period *= 2; frames_per_buffer *= 2; } err = snd_pcm_hw_constraint_minmax(substream->runtime, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, frames_per_period, frames_per_period); if (err < 0) { mutex_unlock(&dice->mutex); goto err_locked; } err = snd_pcm_hw_constraint_minmax(substream->runtime, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, frames_per_buffer, frames_per_buffer); if (err < 0) { mutex_unlock(&dice->mutex); goto err_locked; } } } mutex_unlock(&dice->mutex); snd_pcm_set_sync(substream); return 0; err_locked: snd_dice_stream_lock_release(dice); return err; } static int pcm_close(struct snd_pcm_substream *substream) { struct snd_dice *dice = substream->private_data; snd_dice_stream_lock_release(dice); return 0; } static int pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct snd_dice *dice = substream->private_data; int err = 0; if (substream->runtime->state == SNDRV_PCM_STATE_OPEN) { unsigned int rate = params_rate(hw_params); unsigned int events_per_period = params_period_size(hw_params); unsigned int events_per_buffer = params_buffer_size(hw_params); mutex_lock(&dice->mutex); // For double_pcm_frame quirk. if (rate > 96000 && !dice->disable_double_pcm_frames) { events_per_period /= 2; events_per_buffer /= 2; } err = snd_dice_stream_reserve_duplex(dice, rate, events_per_period, events_per_buffer); if (err >= 0) ++dice->substreams_counter; mutex_unlock(&dice->mutex); } return err; } static int pcm_hw_free(struct snd_pcm_substream *substream) { struct snd_dice *dice = substream->private_data; mutex_lock(&dice->mutex); if (substream->runtime->state != SNDRV_PCM_STATE_OPEN) --dice->substreams_counter; snd_dice_stream_stop_duplex(dice); mutex_unlock(&dice->mutex); return 0; } static int capture_prepare(struct snd_pcm_substream *substream) { struct snd_dice *dice = substream->private_data; struct amdtp_stream *stream = &dice->tx_stream[substream->pcm->device]; int err; mutex_lock(&dice->mutex); err = snd_dice_stream_start_duplex(dice); mutex_unlock(&dice->mutex); if (err >= 0) amdtp_stream_pcm_prepare(stream); return 0; } static int playback_prepare(struct snd_pcm_substream *substream) { struct snd_dice *dice = substream->private_data; struct amdtp_stream *stream = &dice->rx_stream[substream->pcm->device]; int err; mutex_lock(&dice->mutex); err = snd_dice_stream_start_duplex(dice); mutex_unlock(&dice->mutex); if (err >= 0) amdtp_stream_pcm_prepare(stream); return err; } static int capture_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_dice *dice = substream->private_data; struct amdtp_stream *stream = &dice->tx_stream[substream->pcm->device]; switch (cmd) { case SNDRV_PCM_TRIGGER_START: amdtp_stream_pcm_trigger(stream, substream); break; case SNDRV_PCM_TRIGGER_STOP: amdtp_stream_pcm_trigger(stream, NULL); break; default: return -EINVAL; } return 0; } static int playback_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_dice *dice = substream->private_data; struct amdtp_stream *stream = &dice->rx_stream[substream->pcm->device]; switch (cmd) { case SNDRV_PCM_TRIGGER_START: amdtp_stream_pcm_trigger(stream, substream); break; case SNDRV_PCM_TRIGGER_STOP: amdtp_stream_pcm_trigger(stream, NULL); break; default: return -EINVAL; } return 0; } static snd_pcm_uframes_t capture_pointer(struct snd_pcm_substream *substream) { struct snd_dice *dice = substream->private_data; struct amdtp_stream *stream = &dice->tx_stream[substream->pcm->device]; return amdtp_domain_stream_pcm_pointer(&dice->domain, stream); } static snd_pcm_uframes_t playback_pointer(struct snd_pcm_substream *substream) { struct snd_dice *dice = substream->private_data; struct amdtp_stream *stream = &dice->rx_stream[substream->pcm->device]; return amdtp_domain_stream_pcm_pointer(&dice->domain, stream); } static int capture_ack(struct snd_pcm_substream *substream) { struct snd_dice *dice = substream->private_data; struct amdtp_stream *stream = &dice->tx_stream[substream->pcm->device]; return amdtp_domain_stream_pcm_ack(&dice->domain, stream); } static int playback_ack(struct snd_pcm_substream *substream) { struct snd_dice *dice = substream->private_data; struct amdtp_stream *stream = &dice->rx_stream[substream->pcm->device]; return amdtp_domain_stream_pcm_ack(&dice->domain, stream); } int snd_dice_create_pcm(struct snd_dice *dice) { static const struct snd_pcm_ops capture_ops = { .open = pcm_open, .close = pcm_close, .hw_params = pcm_hw_params, .hw_free = pcm_hw_free, .prepare = capture_prepare, .trigger = capture_trigger, .pointer = capture_pointer, .ack = capture_ack, }; static const struct snd_pcm_ops playback_ops = { .open = pcm_open, .close = pcm_close, .hw_params = pcm_hw_params, .hw_free = pcm_hw_free, .prepare = playback_prepare, .trigger = playback_trigger, .pointer = playback_pointer, .ack = playback_ack, }; struct snd_pcm *pcm; unsigned int capture, playback; int i, j; int err; for (i = 0; i < MAX_STREAMS; i++) { capture = playback = 0; for (j = 0; j < SND_DICE_RATE_MODE_COUNT; ++j) { if (dice->tx_pcm_chs[i][j] > 0) capture = 1; if (dice->rx_pcm_chs[i][j] > 0) playback = 1; } err = snd_pcm_new(dice->card, "DICE", i, playback, capture, &pcm); if (err < 0) return err; pcm->private_data = dice; pcm->nonatomic = true; strcpy(pcm->name, dice->card->shortname); if (capture > 0) snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &capture_ops); if (playback > 0) snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &playback_ops); snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_VMALLOC, NULL, 0, 0); } return 0; }
// SPDX-License-Identifier: GPL-2.0 #include <linux/nmi.h> #include <linux/cpufreq.h> #include <linux/perf/arm_pmu.h> /* * Safe maximum CPU frequency in case a particular platform doesn't implement * cpufreq driver. Although, architecture doesn't put any restrictions on * maximum frequency but 5 GHz seems to be safe maximum given the available * Arm CPUs in the market which are clocked much less than 5 GHz. On the other * hand, we can't make it much higher as it would lead to a large hard-lockup * detection timeout on parts which are running slower (eg. 1GHz on * Developerbox) and doesn't possess a cpufreq driver. */ #define SAFE_MAX_CPU_FREQ 5000000000UL // 5 GHz u64 hw_nmi_get_sample_period(int watchdog_thresh) { unsigned int cpu = smp_processor_id(); unsigned long max_cpu_freq; max_cpu_freq = cpufreq_get_hw_max_freq(cpu) * 1000UL; if (!max_cpu_freq) max_cpu_freq = SAFE_MAX_CPU_FREQ; return (u64)max_cpu_freq * watchdog_thresh; } bool __init arch_perf_nmi_is_available(void) { /* * hardlockup_detector_perf_init() will success even if Pseudo-NMI turns off, * however, the pmu interrupts will act like a normal interrupt instead of * NMI and the hardlockup detector would be broken. */ return arm_pmu_irq_is_nmi(); }
/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (C) 2021 Cadence Design Systems Inc. */ #ifndef _XTENSA_PLATFORM_ISS_SIMCALL_GDBIO_H #define _XTENSA_PLATFORM_ISS_SIMCALL_GDBIO_H /* * System call like services offered by the GDBIO host. */ #define SYS_open -2 #define SYS_close -3 #define SYS_read -4 #define SYS_write -5 #define SYS_lseek -6 static int errno; static inline int __simc(int a, int b, int c, int d) { register int a1 asm("a2") = a; register int b1 asm("a6") = b; register int c1 asm("a3") = c; register int d1 asm("a4") = d; __asm__ __volatile__ ( "break 1, 14\n" : "+r"(a1), "+r"(c1) : "r"(b1), "r"(d1) : "memory"); errno = c1; return a1; } #endif /* _XTENSA_PLATFORM_ISS_SIMCALL_GDBIO_H */
// SPDX-License-Identifier: GPL-2.0-only /* * Guest agent for virtio-trace * * Copyright (C) 2012 Hitachi, Ltd. * Created by Yoshihiro Yunomae <[email protected]> * Masami Hiramatsu <[email protected]> */ #define _GNU_SOURCE #include <limits.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include "trace-agent.h" #define PAGE_SIZE (sysconf(_SC_PAGE_SIZE)) #define PIPE_DEF_BUFS 16 #define PIPE_MIN_SIZE (PAGE_SIZE*PIPE_DEF_BUFS) #define PIPE_MAX_SIZE (1024*1024) #define TRACEFS "/sys/kernel/tracing" #define DEBUGFS "/sys/kernel/debug/tracing" #define READ_PATH_FMT "%s/per_cpu/cpu%d/trace_pipe_raw" #define WRITE_PATH_FMT "/dev/virtio-ports/trace-path-cpu%d" #define CTL_PATH "/dev/virtio-ports/agent-ctl-path" pthread_mutex_t mutex_notify = PTHREAD_MUTEX_INITIALIZER; pthread_cond_t cond_wakeup = PTHREAD_COND_INITIALIZER; static int get_total_cpus(void) { int nr_cpus = (int)sysconf(_SC_NPROCESSORS_CONF); if (nr_cpus <= 0) { pr_err("Could not read cpus\n"); goto error; } else if (nr_cpus > MAX_CPUS) { pr_err("Exceed max cpus(%d)\n", (int)MAX_CPUS); goto error; } return nr_cpus; error: exit(EXIT_FAILURE); } static void *agent_info_new(void) { struct agent_info *s; int i; s = zalloc(sizeof(struct agent_info)); if (s == NULL) { pr_err("agent_info zalloc error\n"); exit(EXIT_FAILURE); } s->pipe_size = PIPE_INIT; s->use_stdout = false; s->cpus = get_total_cpus(); s->ctl_fd = -1; /* read/write threads init */ for (i = 0; i < s->cpus; i++) s->rw_ti[i] = rw_thread_info_new(); return s; } static unsigned long parse_size(const char *arg) { unsigned long value, round; char *ptr; value = strtoul(arg, &ptr, 10); switch (*ptr) { case 'K': case 'k': value <<= 10; break; case 'M': case 'm': value <<= 20; break; default: break; } if (value > PIPE_MAX_SIZE) { pr_err("Pipe size must be less than 1MB\n"); goto error; } else if (value < PIPE_MIN_SIZE) { pr_err("Pipe size must be over 64KB\n"); goto error; } /* Align buffer size with page unit */ round = value & (PAGE_SIZE - 1); value = value - round; return value; error: return 0; } static void usage(char const *prg) { pr_err("usage: %s [-h] [-o] [-s <size of pipe>]\n", prg); } static const char *make_path(int cpu_num, bool this_is_write_path) { int ret; char *buf; buf = zalloc(PATH_MAX); if (buf == NULL) { pr_err("Could not allocate buffer\n"); goto error; } if (this_is_write_path) /* write(output) path */ ret = snprintf(buf, PATH_MAX, WRITE_PATH_FMT, cpu_num); else { /* read(input) path */ ret = snprintf(buf, PATH_MAX, READ_PATH_FMT, TRACEFS, cpu_num); if (ret > 0 && access(buf, F_OK) != 0) ret = snprintf(buf, PATH_MAX, READ_PATH_FMT, DEBUGFS, cpu_num); } if (ret <= 0) { pr_err("Failed to generate %s path(CPU#%d):%d\n", this_is_write_path ? "read" : "write", cpu_num, ret); goto error; } return buf; error: free(buf); return NULL; } static const char *make_input_path(int cpu_num) { return make_path(cpu_num, false); } static const char *make_output_path(int cpu_num) { return make_path(cpu_num, true); } static void *agent_info_init(struct agent_info *s) { int cpu; const char *in_path = NULL; const char *out_path = NULL; /* init read/write threads */ for (cpu = 0; cpu < s->cpus; cpu++) { /* set read(input) path per read/write thread */ in_path = make_input_path(cpu); if (in_path == NULL) goto error; /* set write(output) path per read/write thread*/ if (!s->use_stdout) { out_path = make_output_path(cpu); if (out_path == NULL) goto error; } else /* stdout mode */ pr_debug("stdout mode\n"); rw_thread_init(cpu, in_path, out_path, s->use_stdout, s->pipe_size, s->rw_ti[cpu]); } /* init controller of read/write threads */ s->ctl_fd = rw_ctl_init((const char *)CTL_PATH); return NULL; error: exit(EXIT_FAILURE); } static void *parse_args(int argc, char *argv[], struct agent_info *s) { int cmd; unsigned long size; while ((cmd = getopt(argc, argv, "hos:")) != -1) { switch (cmd) { /* stdout mode */ case 'o': s->use_stdout = true; break; /* size of pipe */ case 's': size = parse_size(optarg); if (size == 0) goto error; s->pipe_size = size; break; case 'h': default: usage(argv[0]); goto error; } } agent_info_init(s); return NULL; error: exit(EXIT_FAILURE); } static void agent_main_loop(struct agent_info *s) { int cpu; pthread_t rw_thread_per_cpu[MAX_CPUS]; /* Start all read/write threads */ for (cpu = 0; cpu < s->cpus; cpu++) rw_thread_per_cpu[cpu] = rw_thread_run(s->rw_ti[cpu]); rw_ctl_loop(s->ctl_fd); /* Finish all read/write threads */ for (cpu = 0; cpu < s->cpus; cpu++) { int ret; ret = pthread_join(rw_thread_per_cpu[cpu], NULL); if (ret != 0) { pr_err("pthread_join() error:%d (cpu %d)\n", ret, cpu); exit(EXIT_FAILURE); } } } static void agent_info_free(struct agent_info *s) { int i; close(s->ctl_fd); for (i = 0; i < s->cpus; i++) { close(s->rw_ti[i]->in_fd); close(s->rw_ti[i]->out_fd); close(s->rw_ti[i]->read_pipe); close(s->rw_ti[i]->write_pipe); free(s->rw_ti[i]); } free(s); } int main(int argc, char *argv[]) { struct agent_info *s = NULL; s = agent_info_new(); parse_args(argc, argv, s); agent_main_loop(s); agent_info_free(s); return 0; }
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Machine specific IO port address definition for generic. * Written by Osamu Tomita <[email protected]> */ #ifndef __LINUX_I8253_H #define __LINUX_I8253_H #include <linux/param.h> #include <linux/spinlock.h> #include <linux/timex.h> /* i8253A PIT registers */ #define PIT_MODE 0x43 #define PIT_CH0 0x40 #define PIT_CH2 0x42 #define PIT_LATCH ((PIT_TICK_RATE + HZ/2) / HZ) extern raw_spinlock_t i8253_lock; extern struct clock_event_device i8253_clockevent; extern void clockevent_i8253_init(bool oneshot); extern void clockevent_i8253_disable(void); extern void setup_pit_timer(void); #endif /* __LINUX_I8253_H */
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _NF_CONNTRACK_SYNPROXY_H #define _NF_CONNTRACK_SYNPROXY_H #include <net/netfilter/nf_conntrack_seqadj.h> #include <net/netns/generic.h> struct nf_conn_synproxy { u32 isn; u32 its; u32 tsoff; }; static inline struct nf_conn_synproxy *nfct_synproxy(const struct nf_conn *ct) { #if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY) return nf_ct_ext_find(ct, NF_CT_EXT_SYNPROXY); #else return NULL; #endif } static inline struct nf_conn_synproxy *nfct_synproxy_ext_add(struct nf_conn *ct) { #if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY) return nf_ct_ext_add(ct, NF_CT_EXT_SYNPROXY, GFP_ATOMIC); #else return NULL; #endif } static inline bool nf_ct_add_synproxy(struct nf_conn *ct, const struct nf_conn *tmpl) { #if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY) if (tmpl && nfct_synproxy(tmpl)) { if (!nfct_seqadj_ext_add(ct)) return false; if (!nfct_synproxy_ext_add(ct)) return false; } #endif return true; } #endif /* _NF_CONNTRACK_SYNPROXY_H */
// SPDX-License-Identifier: GPL-2.0-only /* $Id: sunlance.c,v 1.112 2002/01/15 06:48:55 davem Exp $ * lance.c: Linux/Sparc/Lance driver * * Written 1995, 1996 by Miguel de Icaza * Sources: * The Linux depca driver * The Linux lance driver. * The Linux skeleton driver. * The NetBSD Sparc/Lance driver. * Theo de Raadt ([email protected]) * NCR92C990 Lan Controller manual * * 1.4: * Added support to run with a ledma on the Sun4m * * 1.5: * Added multiple card detection. * * 4/17/96: Burst sizes and tpe selection on sun4m by Eddie C. Dost * ([email protected]) * * 5/15/96: auto carrier detection on sun4m by Eddie C. Dost * ([email protected]) * * 5/17/96: lebuffer on scsi/ether cards now work David S. Miller * ([email protected]) * * 5/29/96: override option 'tpe-link-test?', if it is 'false', as * this disables auto carrier detection on sun4m. Eddie C. Dost * ([email protected]) * * 1.7: * 6/26/96: Bug fix for multiple ledmas, miguel. * * 1.8: * Stole multicast code from depca.c, fixed lance_tx. * * 1.9: * 8/21/96: Fixed the multicast code (Pedro Roque) * * 8/28/96: Send fake packet in lance_open() if auto_select is true, * so we can detect the carrier loss condition in time. * Eddie C. Dost ([email protected]) * * 9/15/96: Align rx_buf so that eth_copy_and_sum() won't cause an * MNA trap during chksum_partial_copy(). ([email protected]) * * 11/17/96: Handle LE_C0_MERR in lance_interrupt(). ([email protected]) * * 12/22/96: Don't loop forever in lance_rx() on incomplete packets. * This was the sun4c killer. Shit, stupid bug. * ([email protected]) * * 1.10: * 1/26/97: Modularize driver. ([email protected]) * * 1.11: * 12/27/97: Added sun4d support. ([email protected]) * * 1.12: * 11/3/99: Fixed SMP race in lance_start_xmit found by davem. * Anton Blanchard ([email protected]) * 2.00: 11/9/99: Massive overhaul and port to new SBUS driver interfaces. * David S. Miller ([email protected]) * 2.01: * 11/08/01: Use library crc32 functions ([email protected]) * */ #undef DEBUG_DRIVER static char lancestr[] = "LANCE"; #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/crc32.h> #include <linux/errno.h> #include <linux/socket.h> /* Used for the temporal inet entries and routing */ #include <linux/route.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/ethtool.h> #include <linux/bitops.h> #include <linux/dma-mapping.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/gfp.h> #include <linux/pgtable.h> #include <asm/io.h> #include <asm/dma.h> #include <asm/byteorder.h> /* Used by the checksum routines */ #include <asm/idprom.h> #include <asm/prom.h> #include <asm/auxio.h> /* For tpe-link-test? setting */ #include <asm/irq.h> #define DRV_NAME "sunlance" #define DRV_RELDATE "8/24/03" #define DRV_AUTHOR "Miguel de Icaza ([email protected])" MODULE_AUTHOR(DRV_AUTHOR); MODULE_DESCRIPTION("Sun Lance ethernet driver"); MODULE_LICENSE("GPL"); /* Define: 2^4 Tx buffers and 2^4 Rx buffers */ #ifndef LANCE_LOG_TX_BUFFERS #define LANCE_LOG_TX_BUFFERS 4 #define LANCE_LOG_RX_BUFFERS 4 #endif #define LE_CSR0 0 #define LE_CSR1 1 #define LE_CSR2 2 #define LE_CSR3 3 #define LE_MO_PROM 0x8000 /* Enable promiscuous mode */ #define LE_C0_ERR 0x8000 /* Error: set if BAB, SQE, MISS or ME is set */ #define LE_C0_BABL 0x4000 /* BAB: Babble: tx timeout. */ #define LE_C0_CERR 0x2000 /* SQE: Signal quality error */ #define LE_C0_MISS 0x1000 /* MISS: Missed a packet */ #define LE_C0_MERR 0x0800 /* ME: Memory error */ #define LE_C0_RINT 0x0400 /* Received interrupt */ #define LE_C0_TINT 0x0200 /* Transmitter Interrupt */ #define LE_C0_IDON 0x0100 /* IFIN: Init finished. */ #define LE_C0_INTR 0x0080 /* Interrupt or error */ #define LE_C0_INEA 0x0040 /* Interrupt enable */ #define LE_C0_RXON 0x0020 /* Receiver on */ #define LE_C0_TXON 0x0010 /* Transmitter on */ #define LE_C0_TDMD 0x0008 /* Transmitter demand */ #define LE_C0_STOP 0x0004 /* Stop the card */ #define LE_C0_STRT 0x0002 /* Start the card */ #define LE_C0_INIT 0x0001 /* Init the card */ #define LE_C3_BSWP 0x4 /* SWAP */ #define LE_C3_ACON 0x2 /* ALE Control */ #define LE_C3_BCON 0x1 /* Byte control */ /* Receive message descriptor 1 */ #define LE_R1_OWN 0x80 /* Who owns the entry */ #define LE_R1_ERR 0x40 /* Error: if FRA, OFL, CRC or BUF is set */ #define LE_R1_FRA 0x20 /* FRA: Frame error */ #define LE_R1_OFL 0x10 /* OFL: Frame overflow */ #define LE_R1_CRC 0x08 /* CRC error */ #define LE_R1_BUF 0x04 /* BUF: Buffer error */ #define LE_R1_SOP 0x02 /* Start of packet */ #define LE_R1_EOP 0x01 /* End of packet */ #define LE_R1_POK 0x03 /* Packet is complete: SOP + EOP */ #define LE_T1_OWN 0x80 /* Lance owns the packet */ #define LE_T1_ERR 0x40 /* Error summary */ #define LE_T1_EMORE 0x10 /* Error: more than one retry needed */ #define LE_T1_EONE 0x08 /* Error: one retry needed */ #define LE_T1_EDEF 0x04 /* Error: deferred */ #define LE_T1_SOP 0x02 /* Start of packet */ #define LE_T1_EOP 0x01 /* End of packet */ #define LE_T1_POK 0x03 /* Packet is complete: SOP + EOP */ #define LE_T3_BUF 0x8000 /* Buffer error */ #define LE_T3_UFL 0x4000 /* Error underflow */ #define LE_T3_LCOL 0x1000 /* Error late collision */ #define LE_T3_CLOS 0x0800 /* Error carrier loss */ #define LE_T3_RTY 0x0400 /* Error retry */ #define LE_T3_TDR 0x03ff /* Time Domain Reflectometry counter */ #define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS)) #define TX_RING_MOD_MASK (TX_RING_SIZE - 1) #define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29) #define TX_NEXT(__x) (((__x)+1) & TX_RING_MOD_MASK) #define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS)) #define RX_RING_MOD_MASK (RX_RING_SIZE - 1) #define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29) #define RX_NEXT(__x) (((__x)+1) & RX_RING_MOD_MASK) #define PKT_BUF_SZ 1544 #define RX_BUFF_SIZE PKT_BUF_SZ #define TX_BUFF_SIZE PKT_BUF_SZ struct lance_rx_desc { u16 rmd0; /* low address of packet */ u8 rmd1_bits; /* descriptor bits */ u8 rmd1_hadr; /* high address of packet */ s16 length; /* This length is 2s complement (negative)! * Buffer length */ u16 mblength; /* This is the actual number of bytes received */ }; struct lance_tx_desc { u16 tmd0; /* low address of packet */ u8 tmd1_bits; /* descriptor bits */ u8 tmd1_hadr; /* high address of packet */ s16 length; /* Length is 2s complement (negative)! */ u16 misc; }; /* The LANCE initialization block, described in databook. */ /* On the Sparc, this block should be on a DMA region */ struct lance_init_block { u16 mode; /* Pre-set mode (reg. 15) */ u8 phys_addr[6]; /* Physical ethernet address */ u32 filter[2]; /* Multicast filter. */ /* Receive and transmit ring base, along with extra bits. */ u16 rx_ptr; /* receive descriptor addr */ u16 rx_len; /* receive len and high addr */ u16 tx_ptr; /* transmit descriptor addr */ u16 tx_len; /* transmit len and high addr */ /* The Tx and Rx ring entries must aligned on 8-byte boundaries. */ struct lance_rx_desc brx_ring[RX_RING_SIZE]; struct lance_tx_desc btx_ring[TX_RING_SIZE]; u8 tx_buf [TX_RING_SIZE][TX_BUFF_SIZE]; u8 pad[2]; /* align rx_buf for copy_and_sum(). */ u8 rx_buf [RX_RING_SIZE][RX_BUFF_SIZE]; }; #define libdesc_offset(rt, elem) \ ((__u32)(((unsigned long)(&(((struct lance_init_block *)0)->rt[elem]))))) #define libbuff_offset(rt, elem) \ ((__u32)(((unsigned long)(&(((struct lance_init_block *)0)->rt[elem][0]))))) struct lance_private { void __iomem *lregs; /* Lance RAP/RDP regs. */ void __iomem *dregs; /* DMA controller regs. */ struct lance_init_block __iomem *init_block_iomem; struct lance_init_block *init_block_mem; spinlock_t lock; int rx_new, tx_new; int rx_old, tx_old; struct platform_device *ledma; /* If set this points to ledma */ char tpe; /* cable-selection is TPE */ char auto_select; /* cable-selection by carrier */ char burst_sizes; /* ledma SBus burst sizes */ char pio_buffer; /* init block in PIO space? */ unsigned short busmaster_regval; void (*init_ring)(struct net_device *); void (*rx)(struct net_device *); void (*tx)(struct net_device *); char *name; dma_addr_t init_block_dvma; struct net_device *dev; /* Backpointer */ struct platform_device *op; struct platform_device *lebuffer; struct timer_list multicast_timer; }; #define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\ lp->tx_old+TX_RING_MOD_MASK-lp->tx_new:\ lp->tx_old - lp->tx_new-1) /* Lance registers. */ #define RDP 0x00UL /* register data port */ #define RAP 0x02UL /* register address port */ #define LANCE_REG_SIZE 0x04UL #define STOP_LANCE(__lp) \ do { void __iomem *__base = (__lp)->lregs; \ sbus_writew(LE_CSR0, __base + RAP); \ sbus_writew(LE_C0_STOP, __base + RDP); \ } while (0) int sparc_lance_debug = 2; /* The Lance uses 24 bit addresses */ /* On the Sun4c the DVMA will provide the remaining bytes for us */ /* On the Sun4m we have to instruct the ledma to provide them */ /* Even worse, on scsi/ether SBUS cards, the init block and the * transmit/receive buffers are addresses as offsets from absolute * zero on the lebuffer PIO area. -DaveM */ #define LANCE_ADDR(x) ((long)(x) & ~0xff000000) /* Load the CSR registers */ static void load_csrs(struct lance_private *lp) { u32 leptr; if (lp->pio_buffer) leptr = 0; else leptr = LANCE_ADDR(lp->init_block_dvma); sbus_writew(LE_CSR1, lp->lregs + RAP); sbus_writew(leptr & 0xffff, lp->lregs + RDP); sbus_writew(LE_CSR2, lp->lregs + RAP); sbus_writew(leptr >> 16, lp->lregs + RDP); sbus_writew(LE_CSR3, lp->lregs + RAP); sbus_writew(lp->busmaster_regval, lp->lregs + RDP); /* Point back to csr0 */ sbus_writew(LE_CSR0, lp->lregs + RAP); } /* Setup the Lance Rx and Tx rings */ static void lance_init_ring_dvma(struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); struct lance_init_block *ib = lp->init_block_mem; dma_addr_t aib = lp->init_block_dvma; __u32 leptr; int i; /* Lock out other processes while setting up hardware */ netif_stop_queue(dev); lp->rx_new = lp->tx_new = 0; lp->rx_old = lp->tx_old = 0; /* Copy the ethernet address to the lance init block * Note that on the sparc you need to swap the ethernet address. */ ib->phys_addr [0] = dev->dev_addr [1]; ib->phys_addr [1] = dev->dev_addr [0]; ib->phys_addr [2] = dev->dev_addr [3]; ib->phys_addr [3] = dev->dev_addr [2]; ib->phys_addr [4] = dev->dev_addr [5]; ib->phys_addr [5] = dev->dev_addr [4]; /* Setup the Tx ring entries */ for (i = 0; i < TX_RING_SIZE; i++) { leptr = LANCE_ADDR(aib + libbuff_offset(tx_buf, i)); ib->btx_ring [i].tmd0 = leptr; ib->btx_ring [i].tmd1_hadr = leptr >> 16; ib->btx_ring [i].tmd1_bits = 0; ib->btx_ring [i].length = 0xf000; /* The ones required by tmd2 */ ib->btx_ring [i].misc = 0; } /* Setup the Rx ring entries */ for (i = 0; i < RX_RING_SIZE; i++) { leptr = LANCE_ADDR(aib + libbuff_offset(rx_buf, i)); ib->brx_ring [i].rmd0 = leptr; ib->brx_ring [i].rmd1_hadr = leptr >> 16; ib->brx_ring [i].rmd1_bits = LE_R1_OWN; ib->brx_ring [i].length = -RX_BUFF_SIZE | 0xf000; ib->brx_ring [i].mblength = 0; } /* Setup the initialization block */ /* Setup rx descriptor pointer */ leptr = LANCE_ADDR(aib + libdesc_offset(brx_ring, 0)); ib->rx_len = (LANCE_LOG_RX_BUFFERS << 13) | (leptr >> 16); ib->rx_ptr = leptr; /* Setup tx descriptor pointer */ leptr = LANCE_ADDR(aib + libdesc_offset(btx_ring, 0)); ib->tx_len = (LANCE_LOG_TX_BUFFERS << 13) | (leptr >> 16); ib->tx_ptr = leptr; } static void lance_init_ring_pio(struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); struct lance_init_block __iomem *ib = lp->init_block_iomem; u32 leptr; int i; /* Lock out other processes while setting up hardware */ netif_stop_queue(dev); lp->rx_new = lp->tx_new = 0; lp->rx_old = lp->tx_old = 0; /* Copy the ethernet address to the lance init block * Note that on the sparc you need to swap the ethernet address. */ sbus_writeb(dev->dev_addr[1], &ib->phys_addr[0]); sbus_writeb(dev->dev_addr[0], &ib->phys_addr[1]); sbus_writeb(dev->dev_addr[3], &ib->phys_addr[2]); sbus_writeb(dev->dev_addr[2], &ib->phys_addr[3]); sbus_writeb(dev->dev_addr[5], &ib->phys_addr[4]); sbus_writeb(dev->dev_addr[4], &ib->phys_addr[5]); /* Setup the Tx ring entries */ for (i = 0; i < TX_RING_SIZE; i++) { leptr = libbuff_offset(tx_buf, i); sbus_writew(leptr, &ib->btx_ring [i].tmd0); sbus_writeb(leptr >> 16,&ib->btx_ring [i].tmd1_hadr); sbus_writeb(0, &ib->btx_ring [i].tmd1_bits); /* The ones required by tmd2 */ sbus_writew(0xf000, &ib->btx_ring [i].length); sbus_writew(0, &ib->btx_ring [i].misc); } /* Setup the Rx ring entries */ for (i = 0; i < RX_RING_SIZE; i++) { leptr = libbuff_offset(rx_buf, i); sbus_writew(leptr, &ib->brx_ring [i].rmd0); sbus_writeb(leptr >> 16,&ib->brx_ring [i].rmd1_hadr); sbus_writeb(LE_R1_OWN, &ib->brx_ring [i].rmd1_bits); sbus_writew(-RX_BUFF_SIZE|0xf000, &ib->brx_ring [i].length); sbus_writew(0, &ib->brx_ring [i].mblength); } /* Setup the initialization block */ /* Setup rx descriptor pointer */ leptr = libdesc_offset(brx_ring, 0); sbus_writew((LANCE_LOG_RX_BUFFERS << 13) | (leptr >> 16), &ib->rx_len); sbus_writew(leptr, &ib->rx_ptr); /* Setup tx descriptor pointer */ leptr = libdesc_offset(btx_ring, 0); sbus_writew((LANCE_LOG_TX_BUFFERS << 13) | (leptr >> 16), &ib->tx_len); sbus_writew(leptr, &ib->tx_ptr); } static void init_restart_ledma(struct lance_private *lp) { u32 csr = sbus_readl(lp->dregs + DMA_CSR); if (!(csr & DMA_HNDL_ERROR)) { /* E-Cache draining */ while (sbus_readl(lp->dregs + DMA_CSR) & DMA_FIFO_ISDRAIN) barrier(); } csr = sbus_readl(lp->dregs + DMA_CSR); csr &= ~DMA_E_BURSTS; if (lp->burst_sizes & DMA_BURST32) csr |= DMA_E_BURST32; else csr |= DMA_E_BURST16; csr |= (DMA_DSBL_RD_DRN | DMA_DSBL_WR_INV | DMA_FIFO_INV); if (lp->tpe) csr |= DMA_EN_ENETAUI; else csr &= ~DMA_EN_ENETAUI; udelay(20); sbus_writel(csr, lp->dregs + DMA_CSR); udelay(200); } static int init_restart_lance(struct lance_private *lp) { u16 regval = 0; int i; if (lp->dregs) init_restart_ledma(lp); sbus_writew(LE_CSR0, lp->lregs + RAP); sbus_writew(LE_C0_INIT, lp->lregs + RDP); /* Wait for the lance to complete initialization */ for (i = 0; i < 100; i++) { regval = sbus_readw(lp->lregs + RDP); if (regval & (LE_C0_ERR | LE_C0_IDON)) break; barrier(); } if (i == 100 || (regval & LE_C0_ERR)) { printk(KERN_ERR "LANCE unopened after %d ticks, csr0=%4.4x.\n", i, regval); if (lp->dregs) printk("dcsr=%8.8x\n", sbus_readl(lp->dregs + DMA_CSR)); return -1; } /* Clear IDON by writing a "1", enable interrupts and start lance */ sbus_writew(LE_C0_IDON, lp->lregs + RDP); sbus_writew(LE_C0_INEA | LE_C0_STRT, lp->lregs + RDP); if (lp->dregs) { u32 csr = sbus_readl(lp->dregs + DMA_CSR); csr |= DMA_INT_ENAB; sbus_writel(csr, lp->dregs + DMA_CSR); } return 0; } static void lance_rx_dvma(struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); struct lance_init_block *ib = lp->init_block_mem; struct lance_rx_desc *rd; u8 bits; int len, entry = lp->rx_new; struct sk_buff *skb; for (rd = &ib->brx_ring [entry]; !((bits = rd->rmd1_bits) & LE_R1_OWN); rd = &ib->brx_ring [entry]) { /* We got an incomplete frame? */ if ((bits & LE_R1_POK) != LE_R1_POK) { dev->stats.rx_over_errors++; dev->stats.rx_errors++; } else if (bits & LE_R1_ERR) { /* Count only the end frame as a rx error, * not the beginning */ if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++; if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++; if (bits & LE_R1_OFL) dev->stats.rx_over_errors++; if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++; if (bits & LE_R1_EOP) dev->stats.rx_errors++; } else { len = (rd->mblength & 0xfff) - 4; skb = netdev_alloc_skb(dev, len + 2); if (!skb) { dev->stats.rx_dropped++; rd->mblength = 0; rd->rmd1_bits = LE_R1_OWN; lp->rx_new = RX_NEXT(entry); return; } dev->stats.rx_bytes += len; skb_reserve(skb, 2); /* 16 byte align */ skb_put(skb, len); /* make room */ skb_copy_to_linear_data(skb, (unsigned char *)&(ib->rx_buf [entry][0]), len); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->stats.rx_packets++; } /* Return the packet to the pool */ rd->mblength = 0; rd->rmd1_bits = LE_R1_OWN; entry = RX_NEXT(entry); } lp->rx_new = entry; } static void lance_tx_dvma(struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); struct lance_init_block *ib = lp->init_block_mem; int i, j; spin_lock(&lp->lock); j = lp->tx_old; for (i = j; i != lp->tx_new; i = j) { struct lance_tx_desc *td = &ib->btx_ring [i]; u8 bits = td->tmd1_bits; /* If we hit a packet not owned by us, stop */ if (bits & LE_T1_OWN) break; if (bits & LE_T1_ERR) { u16 status = td->misc; dev->stats.tx_errors++; if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++; if (status & LE_T3_LCOL) dev->stats.tx_window_errors++; if (status & LE_T3_CLOS) { dev->stats.tx_carrier_errors++; if (lp->auto_select) { lp->tpe = 1 - lp->tpe; printk(KERN_NOTICE "%s: Carrier Lost, trying %s\n", dev->name, lp->tpe?"TPE":"AUI"); STOP_LANCE(lp); lp->init_ring(dev); load_csrs(lp); init_restart_lance(lp); goto out; } } /* Buffer errors and underflows turn off the * transmitter, restart the adapter. */ if (status & (LE_T3_BUF|LE_T3_UFL)) { dev->stats.tx_fifo_errors++; printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, restarting\n", dev->name); STOP_LANCE(lp); lp->init_ring(dev); load_csrs(lp); init_restart_lance(lp); goto out; } } else if ((bits & LE_T1_POK) == LE_T1_POK) { /* * So we don't count the packet more than once. */ td->tmd1_bits = bits & ~(LE_T1_POK); /* One collision before packet was sent. */ if (bits & LE_T1_EONE) dev->stats.collisions++; /* More than one collision, be optimistic. */ if (bits & LE_T1_EMORE) dev->stats.collisions += 2; dev->stats.tx_packets++; } j = TX_NEXT(j); } lp->tx_old = j; out: if (netif_queue_stopped(dev) && TX_BUFFS_AVAIL > 0) netif_wake_queue(dev); spin_unlock(&lp->lock); } static void lance_piocopy_to_skb(struct sk_buff *skb, void __iomem *piobuf, int len) { u16 *p16 = (u16 *) skb->data; u32 *p32; u8 *p8; void __iomem *pbuf = piobuf; /* We know here that both src and dest are on a 16bit boundary. */ *p16++ = sbus_readw(pbuf); p32 = (u32 *) p16; pbuf += 2; len -= 2; while (len >= 4) { *p32++ = sbus_readl(pbuf); pbuf += 4; len -= 4; } p8 = (u8 *) p32; if (len >= 2) { p16 = (u16 *) p32; *p16++ = sbus_readw(pbuf); pbuf += 2; len -= 2; p8 = (u8 *) p16; } if (len >= 1) *p8 = sbus_readb(pbuf); } static void lance_rx_pio(struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); struct lance_init_block __iomem *ib = lp->init_block_iomem; struct lance_rx_desc __iomem *rd; unsigned char bits; int len, entry; struct sk_buff *skb; entry = lp->rx_new; for (rd = &ib->brx_ring [entry]; !((bits = sbus_readb(&rd->rmd1_bits)) & LE_R1_OWN); rd = &ib->brx_ring [entry]) { /* We got an incomplete frame? */ if ((bits & LE_R1_POK) != LE_R1_POK) { dev->stats.rx_over_errors++; dev->stats.rx_errors++; } else if (bits & LE_R1_ERR) { /* Count only the end frame as a rx error, * not the beginning */ if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++; if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++; if (bits & LE_R1_OFL) dev->stats.rx_over_errors++; if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++; if (bits & LE_R1_EOP) dev->stats.rx_errors++; } else { len = (sbus_readw(&rd->mblength) & 0xfff) - 4; skb = netdev_alloc_skb(dev, len + 2); if (!skb) { dev->stats.rx_dropped++; sbus_writew(0, &rd->mblength); sbus_writeb(LE_R1_OWN, &rd->rmd1_bits); lp->rx_new = RX_NEXT(entry); return; } dev->stats.rx_bytes += len; skb_reserve (skb, 2); /* 16 byte align */ skb_put(skb, len); /* make room */ lance_piocopy_to_skb(skb, &(ib->rx_buf[entry][0]), len); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->stats.rx_packets++; } /* Return the packet to the pool */ sbus_writew(0, &rd->mblength); sbus_writeb(LE_R1_OWN, &rd->rmd1_bits); entry = RX_NEXT(entry); } lp->rx_new = entry; } static void lance_tx_pio(struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); struct lance_init_block __iomem *ib = lp->init_block_iomem; int i, j; spin_lock(&lp->lock); j = lp->tx_old; for (i = j; i != lp->tx_new; i = j) { struct lance_tx_desc __iomem *td = &ib->btx_ring [i]; u8 bits = sbus_readb(&td->tmd1_bits); /* If we hit a packet not owned by us, stop */ if (bits & LE_T1_OWN) break; if (bits & LE_T1_ERR) { u16 status = sbus_readw(&td->misc); dev->stats.tx_errors++; if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++; if (status & LE_T3_LCOL) dev->stats.tx_window_errors++; if (status & LE_T3_CLOS) { dev->stats.tx_carrier_errors++; if (lp->auto_select) { lp->tpe = 1 - lp->tpe; printk(KERN_NOTICE "%s: Carrier Lost, trying %s\n", dev->name, lp->tpe?"TPE":"AUI"); STOP_LANCE(lp); lp->init_ring(dev); load_csrs(lp); init_restart_lance(lp); goto out; } } /* Buffer errors and underflows turn off the * transmitter, restart the adapter. */ if (status & (LE_T3_BUF|LE_T3_UFL)) { dev->stats.tx_fifo_errors++; printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, restarting\n", dev->name); STOP_LANCE(lp); lp->init_ring(dev); load_csrs(lp); init_restart_lance(lp); goto out; } } else if ((bits & LE_T1_POK) == LE_T1_POK) { /* * So we don't count the packet more than once. */ sbus_writeb(bits & ~(LE_T1_POK), &td->tmd1_bits); /* One collision before packet was sent. */ if (bits & LE_T1_EONE) dev->stats.collisions++; /* More than one collision, be optimistic. */ if (bits & LE_T1_EMORE) dev->stats.collisions += 2; dev->stats.tx_packets++; } j = TX_NEXT(j); } lp->tx_old = j; if (netif_queue_stopped(dev) && TX_BUFFS_AVAIL > 0) netif_wake_queue(dev); out: spin_unlock(&lp->lock); } static irqreturn_t lance_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct lance_private *lp = netdev_priv(dev); int csr0; sbus_writew(LE_CSR0, lp->lregs + RAP); csr0 = sbus_readw(lp->lregs + RDP); /* Acknowledge all the interrupt sources ASAP */ sbus_writew(csr0 & (LE_C0_INTR | LE_C0_TINT | LE_C0_RINT), lp->lregs + RDP); if ((csr0 & LE_C0_ERR) != 0) { /* Clear the error condition */ sbus_writew((LE_C0_BABL | LE_C0_ERR | LE_C0_MISS | LE_C0_CERR | LE_C0_MERR), lp->lregs + RDP); } if (csr0 & LE_C0_RINT) lp->rx(dev); if (csr0 & LE_C0_TINT) lp->tx(dev); if (csr0 & LE_C0_BABL) dev->stats.tx_errors++; if (csr0 & LE_C0_MISS) dev->stats.rx_errors++; if (csr0 & LE_C0_MERR) { if (lp->dregs) { u32 addr = sbus_readl(lp->dregs + DMA_ADDR); printk(KERN_ERR "%s: Memory error, status %04x, addr %06x\n", dev->name, csr0, addr & 0xffffff); } else { printk(KERN_ERR "%s: Memory error, status %04x\n", dev->name, csr0); } sbus_writew(LE_C0_STOP, lp->lregs + RDP); if (lp->dregs) { u32 dma_csr = sbus_readl(lp->dregs + DMA_CSR); dma_csr |= DMA_FIFO_INV; sbus_writel(dma_csr, lp->dregs + DMA_CSR); } lp->init_ring(dev); load_csrs(lp); init_restart_lance(lp); netif_wake_queue(dev); } sbus_writew(LE_C0_INEA, lp->lregs + RDP); return IRQ_HANDLED; } /* Build a fake network packet and send it to ourselves. */ static void build_fake_packet(struct lance_private *lp) { struct net_device *dev = lp->dev; int i, entry; entry = lp->tx_new & TX_RING_MOD_MASK; if (lp->pio_buffer) { struct lance_init_block __iomem *ib = lp->init_block_iomem; u16 __iomem *packet = (u16 __iomem *) &(ib->tx_buf[entry][0]); struct ethhdr __iomem *eth = (struct ethhdr __iomem *) packet; for (i = 0; i < (ETH_ZLEN / sizeof(u16)); i++) sbus_writew(0, &packet[i]); for (i = 0; i < 6; i++) { sbus_writeb(dev->dev_addr[i], &eth->h_dest[i]); sbus_writeb(dev->dev_addr[i], &eth->h_source[i]); } sbus_writew((-ETH_ZLEN) | 0xf000, &ib->btx_ring[entry].length); sbus_writew(0, &ib->btx_ring[entry].misc); sbus_writeb(LE_T1_POK|LE_T1_OWN, &ib->btx_ring[entry].tmd1_bits); } else { struct lance_init_block *ib = lp->init_block_mem; u16 *packet = (u16 *) &(ib->tx_buf[entry][0]); struct ethhdr *eth = (struct ethhdr *) packet; memset(packet, 0, ETH_ZLEN); for (i = 0; i < 6; i++) { eth->h_dest[i] = dev->dev_addr[i]; eth->h_source[i] = dev->dev_addr[i]; } ib->btx_ring[entry].length = (-ETH_ZLEN) | 0xf000; ib->btx_ring[entry].misc = 0; ib->btx_ring[entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN); } lp->tx_new = TX_NEXT(entry); } static int lance_open(struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); int status = 0; STOP_LANCE(lp); if (request_irq(dev->irq, lance_interrupt, IRQF_SHARED, lancestr, (void *) dev)) { printk(KERN_ERR "Lance: Can't get irq %d\n", dev->irq); return -EAGAIN; } /* On the 4m, setup the ledma to provide the upper bits for buffers */ if (lp->dregs) { u32 regval = lp->init_block_dvma & 0xff000000; sbus_writel(regval, lp->dregs + DMA_TEST); } /* Set mode and clear multicast filter only at device open, * so that lance_init_ring() called at any error will not * forget multicast filters. * * BTW it is common bug in all lance drivers! --ANK */ if (lp->pio_buffer) { struct lance_init_block __iomem *ib = lp->init_block_iomem; sbus_writew(0, &ib->mode); sbus_writel(0, &ib->filter[0]); sbus_writel(0, &ib->filter[1]); } else { struct lance_init_block *ib = lp->init_block_mem; ib->mode = 0; ib->filter [0] = 0; ib->filter [1] = 0; } lp->init_ring(dev); load_csrs(lp); netif_start_queue(dev); status = init_restart_lance(lp); if (!status && lp->auto_select) { build_fake_packet(lp); sbus_writew(LE_C0_INEA | LE_C0_TDMD, lp->lregs + RDP); } return status; } static int lance_close(struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); netif_stop_queue(dev); del_timer_sync(&lp->multicast_timer); STOP_LANCE(lp); free_irq(dev->irq, (void *) dev); return 0; } static int lance_reset(struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); int status; STOP_LANCE(lp); /* On the 4m, reset the dma too */ if (lp->dregs) { u32 csr, addr; printk(KERN_ERR "resetting ledma\n"); csr = sbus_readl(lp->dregs + DMA_CSR); sbus_writel(csr | DMA_RST_ENET, lp->dregs + DMA_CSR); udelay(200); sbus_writel(csr & ~DMA_RST_ENET, lp->dregs + DMA_CSR); addr = lp->init_block_dvma & 0xff000000; sbus_writel(addr, lp->dregs + DMA_TEST); } lp->init_ring(dev); load_csrs(lp); netif_trans_update(dev); /* prevent tx timeout */ status = init_restart_lance(lp); return status; } static void lance_piocopy_from_skb(void __iomem *dest, unsigned char *src, int len) { void __iomem *piobuf = dest; u32 *p32; u16 *p16; u8 *p8; switch ((unsigned long)src & 0x3) { case 0: p32 = (u32 *) src; while (len >= 4) { sbus_writel(*p32, piobuf); p32++; piobuf += 4; len -= 4; } src = (char *) p32; break; case 1: case 3: p8 = (u8 *) src; while (len >= 4) { u32 val; val = p8[0] << 24; val |= p8[1] << 16; val |= p8[2] << 8; val |= p8[3]; sbus_writel(val, piobuf); p8 += 4; piobuf += 4; len -= 4; } src = (char *) p8; break; case 2: p16 = (u16 *) src; while (len >= 4) { u32 val = p16[0]<<16 | p16[1]; sbus_writel(val, piobuf); p16 += 2; piobuf += 4; len -= 4; } src = (char *) p16; break; } if (len >= 2) { u16 val = src[0] << 8 | src[1]; sbus_writew(val, piobuf); src += 2; piobuf += 2; len -= 2; } if (len >= 1) sbus_writeb(src[0], piobuf); } static void lance_piozero(void __iomem *dest, int len) { void __iomem *piobuf = dest; if ((unsigned long)piobuf & 1) { sbus_writeb(0, piobuf); piobuf += 1; len -= 1; if (len == 0) return; } if (len == 1) { sbus_writeb(0, piobuf); return; } if ((unsigned long)piobuf & 2) { sbus_writew(0, piobuf); piobuf += 2; len -= 2; if (len == 0) return; } while (len >= 4) { sbus_writel(0, piobuf); piobuf += 4; len -= 4; } if (len >= 2) { sbus_writew(0, piobuf); piobuf += 2; len -= 2; } if (len >= 1) sbus_writeb(0, piobuf); } static void lance_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct lance_private *lp = netdev_priv(dev); printk(KERN_ERR "%s: transmit timed out, status %04x, reset\n", dev->name, sbus_readw(lp->lregs + RDP)); lance_reset(dev); netif_wake_queue(dev); } static netdev_tx_t lance_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); int entry, skblen, len; skblen = skb->len; len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen; spin_lock_irq(&lp->lock); dev->stats.tx_bytes += len; entry = lp->tx_new & TX_RING_MOD_MASK; if (lp->pio_buffer) { struct lance_init_block __iomem *ib = lp->init_block_iomem; sbus_writew((-len) | 0xf000, &ib->btx_ring[entry].length); sbus_writew(0, &ib->btx_ring[entry].misc); lance_piocopy_from_skb(&ib->tx_buf[entry][0], skb->data, skblen); if (len != skblen) lance_piozero(&ib->tx_buf[entry][skblen], len - skblen); sbus_writeb(LE_T1_POK | LE_T1_OWN, &ib->btx_ring[entry].tmd1_bits); } else { struct lance_init_block *ib = lp->init_block_mem; ib->btx_ring [entry].length = (-len) | 0xf000; ib->btx_ring [entry].misc = 0; skb_copy_from_linear_data(skb, &ib->tx_buf [entry][0], skblen); if (len != skblen) memset((char *) &ib->tx_buf [entry][skblen], 0, len - skblen); ib->btx_ring [entry].tmd1_bits = (LE_T1_POK | LE_T1_OWN); } lp->tx_new = TX_NEXT(entry); if (TX_BUFFS_AVAIL <= 0) netif_stop_queue(dev); /* Kick the lance: transmit now */ sbus_writew(LE_C0_INEA | LE_C0_TDMD, lp->lregs + RDP); /* Read back CSR to invalidate the E-Cache. * This is needed, because DMA_DSBL_WR_INV is set. */ if (lp->dregs) sbus_readw(lp->lregs + RDP); spin_unlock_irq(&lp->lock); dev_kfree_skb(skb); return NETDEV_TX_OK; } /* taken from the depca driver */ static void lance_load_multicast(struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); struct netdev_hw_addr *ha; u32 crc; u32 val; /* set all multicast bits */ if (dev->flags & IFF_ALLMULTI) val = ~0; else val = 0; if (lp->pio_buffer) { struct lance_init_block __iomem *ib = lp->init_block_iomem; sbus_writel(val, &ib->filter[0]); sbus_writel(val, &ib->filter[1]); } else { struct lance_init_block *ib = lp->init_block_mem; ib->filter [0] = val; ib->filter [1] = val; } if (dev->flags & IFF_ALLMULTI) return; /* Add addresses */ netdev_for_each_mc_addr(ha, dev) { crc = ether_crc_le(6, ha->addr); crc = crc >> 26; if (lp->pio_buffer) { struct lance_init_block __iomem *ib = lp->init_block_iomem; u16 __iomem *mcast_table = (u16 __iomem *) &ib->filter; u16 tmp = sbus_readw(&mcast_table[crc>>4]); tmp |= 1 << (crc & 0xf); sbus_writew(tmp, &mcast_table[crc>>4]); } else { struct lance_init_block *ib = lp->init_block_mem; u16 *mcast_table = (u16 *) &ib->filter; mcast_table [crc >> 4] |= 1 << (crc & 0xf); } } } static void lance_set_multicast(struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); struct lance_init_block *ib_mem = lp->init_block_mem; struct lance_init_block __iomem *ib_iomem = lp->init_block_iomem; u16 mode; if (!netif_running(dev)) return; if (lp->tx_old != lp->tx_new) { mod_timer(&lp->multicast_timer, jiffies + 4); netif_wake_queue(dev); return; } netif_stop_queue(dev); STOP_LANCE(lp); lp->init_ring(dev); if (lp->pio_buffer) mode = sbus_readw(&ib_iomem->mode); else mode = ib_mem->mode; if (dev->flags & IFF_PROMISC) { mode |= LE_MO_PROM; if (lp->pio_buffer) sbus_writew(mode, &ib_iomem->mode); else ib_mem->mode = mode; } else { mode &= ~LE_MO_PROM; if (lp->pio_buffer) sbus_writew(mode, &ib_iomem->mode); else ib_mem->mode = mode; lance_load_multicast(dev); } load_csrs(lp); init_restart_lance(lp); netif_wake_queue(dev); } static void lance_set_multicast_retry(struct timer_list *t) { struct lance_private *lp = from_timer(lp, t, multicast_timer); struct net_device *dev = lp->dev; lance_set_multicast(dev); } static void lance_free_hwresources(struct lance_private *lp) { if (lp->lregs) of_iounmap(&lp->op->resource[0], lp->lregs, LANCE_REG_SIZE); if (lp->dregs) { struct platform_device *ledma = lp->ledma; of_iounmap(&ledma->resource[0], lp->dregs, resource_size(&ledma->resource[0])); } if (lp->init_block_iomem) { of_iounmap(&lp->lebuffer->resource[0], lp->init_block_iomem, sizeof(struct lance_init_block)); } else if (lp->init_block_mem) { dma_free_coherent(&lp->op->dev, sizeof(struct lance_init_block), lp->init_block_mem, lp->init_block_dvma); } } /* Ethtool support... */ static void sparc_lance_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strscpy(info->driver, "sunlance", sizeof(info->driver)); } static const struct ethtool_ops sparc_lance_ethtool_ops = { .get_drvinfo = sparc_lance_get_drvinfo, .get_link = ethtool_op_get_link, }; static const struct net_device_ops sparc_lance_ops = { .ndo_open = lance_open, .ndo_stop = lance_close, .ndo_start_xmit = lance_start_xmit, .ndo_set_rx_mode = lance_set_multicast, .ndo_tx_timeout = lance_tx_timeout, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; static int sparc_lance_probe_one(struct platform_device *op, struct platform_device *ledma, struct platform_device *lebuffer) { struct device_node *dp = op->dev.of_node; struct lance_private *lp; struct net_device *dev; dev = alloc_etherdev(sizeof(struct lance_private) + 8); if (!dev) return -ENOMEM; lp = netdev_priv(dev); spin_lock_init(&lp->lock); /* Copy the IDPROM ethernet address to the device structure, later we * will copy the address in the device structure to the lance * initialization block. */ eth_hw_addr_set(dev, idprom->id_ethaddr); /* Get the IO region */ lp->lregs = of_ioremap(&op->resource[0], 0, LANCE_REG_SIZE, lancestr); if (!lp->lregs) { printk(KERN_ERR "SunLance: Cannot map registers.\n"); goto fail; } lp->ledma = ledma; if (lp->ledma) { lp->dregs = of_ioremap(&ledma->resource[0], 0, resource_size(&ledma->resource[0]), "ledma"); if (!lp->dregs) { printk(KERN_ERR "SunLance: Cannot map " "ledma registers.\n"); goto fail; } } lp->op = op; lp->lebuffer = lebuffer; if (lebuffer) { /* sanity check */ if (lebuffer->resource[0].start & 7) { printk(KERN_ERR "SunLance: ERROR: Rx and Tx rings not on even boundary.\n"); goto fail; } lp->init_block_iomem = of_ioremap(&lebuffer->resource[0], 0, sizeof(struct lance_init_block), "lebuffer"); if (!lp->init_block_iomem) { printk(KERN_ERR "SunLance: Cannot map PIO buffer.\n"); goto fail; } lp->init_block_dvma = 0; lp->pio_buffer = 1; lp->init_ring = lance_init_ring_pio; lp->rx = lance_rx_pio; lp->tx = lance_tx_pio; } else { lp->init_block_mem = dma_alloc_coherent(&op->dev, sizeof(struct lance_init_block), &lp->init_block_dvma, GFP_ATOMIC); if (!lp->init_block_mem) goto fail; lp->pio_buffer = 0; lp->init_ring = lance_init_ring_dvma; lp->rx = lance_rx_dvma; lp->tx = lance_tx_dvma; } lp->busmaster_regval = of_getintprop_default(dp, "busmaster-regval", (LE_C3_BSWP | LE_C3_ACON | LE_C3_BCON)); lp->name = lancestr; lp->burst_sizes = 0; if (lp->ledma) { struct device_node *ledma_dp = ledma->dev.of_node; struct device_node *sbus_dp; unsigned int sbmask; const char *prop; u32 csr; /* Find burst-size property for ledma */ lp->burst_sizes = of_getintprop_default(ledma_dp, "burst-sizes", 0); /* ledma may be capable of fast bursts, but sbus may not. */ sbus_dp = ledma_dp->parent; sbmask = of_getintprop_default(sbus_dp, "burst-sizes", DMA_BURSTBITS); lp->burst_sizes &= sbmask; /* Get the cable-selection property */ prop = of_get_property(ledma_dp, "cable-selection", NULL); if (!prop || prop[0] == '\0') { struct device_node *nd; printk(KERN_INFO "SunLance: using " "auto-carrier-detection.\n"); nd = of_find_node_by_path("/options"); if (!nd) goto no_link_test; prop = of_get_property(nd, "tpe-link-test?", NULL); if (!prop) goto node_put; if (strcmp(prop, "true")) { printk(KERN_NOTICE "SunLance: warning: overriding option " "'tpe-link-test?'\n"); printk(KERN_NOTICE "SunLance: warning: mail any problems " "to [email protected]\n"); auxio_set_lte(AUXIO_LTE_ON); } node_put: of_node_put(nd); no_link_test: lp->auto_select = 1; lp->tpe = 0; } else if (!strcmp(prop, "aui")) { lp->auto_select = 0; lp->tpe = 0; } else { lp->auto_select = 0; lp->tpe = 1; } /* Reset ledma */ csr = sbus_readl(lp->dregs + DMA_CSR); sbus_writel(csr | DMA_RST_ENET, lp->dregs + DMA_CSR); udelay(200); sbus_writel(csr & ~DMA_RST_ENET, lp->dregs + DMA_CSR); } else lp->dregs = NULL; lp->dev = dev; SET_NETDEV_DEV(dev, &op->dev); dev->watchdog_timeo = 5*HZ; dev->ethtool_ops = &sparc_lance_ethtool_ops; dev->netdev_ops = &sparc_lance_ops; dev->irq = op->archdata.irqs[0]; /* We cannot sleep if the chip is busy during a * multicast list update event, because such events * can occur from interrupts (ex. IPv6). So we * use a timer to try again later when necessary. -DaveM */ timer_setup(&lp->multicast_timer, lance_set_multicast_retry, 0); if (register_netdev(dev)) { printk(KERN_ERR "SunLance: Cannot register device.\n"); goto fail; } platform_set_drvdata(op, lp); printk(KERN_INFO "%s: LANCE %pM\n", dev->name, dev->dev_addr); return 0; fail: lance_free_hwresources(lp); free_netdev(dev); return -ENODEV; } static int sunlance_sbus_probe(struct platform_device *op) { struct platform_device *parent = to_platform_device(op->dev.parent); struct device_node *parent_dp = parent->dev.of_node; int err; if (of_node_name_eq(parent_dp, "ledma")) { err = sparc_lance_probe_one(op, parent, NULL); } else if (of_node_name_eq(parent_dp, "lebuffer")) { err = sparc_lance_probe_one(op, NULL, parent); } else err = sparc_lance_probe_one(op, NULL, NULL); return err; } static void sunlance_sbus_remove(struct platform_device *op) { struct lance_private *lp = platform_get_drvdata(op); struct net_device *net_dev = lp->dev; unregister_netdev(net_dev); lance_free_hwresources(lp); free_netdev(net_dev); } static const struct of_device_id sunlance_sbus_match[] = { { .name = "le", }, {}, }; MODULE_DEVICE_TABLE(of, sunlance_sbus_match); static struct platform_driver sunlance_sbus_driver = { .driver = { .name = "sunlance", .of_match_table = sunlance_sbus_match, }, .probe = sunlance_sbus_probe, .remove = sunlance_sbus_remove, }; module_platform_driver(sunlance_sbus_driver);
/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */ #ifndef _MLXSW_SPECTRUM_MCROUTER_TCAM_H #define _MLXSW_SPECTRUM_MCROUTER_TCAM_H #include "spectrum.h" #include "spectrum_mr.h" extern const struct mlxsw_sp_mr_ops mlxsw_sp_mr_tcam_ops; #endif
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Huawei HiNIC PCI Express Linux driver * Copyright(c) 2017 Huawei Technologies Co., Ltd */ #ifndef HINIC_RX_H #define HINIC_RX_H #include <linux/types.h> #include <linux/netdevice.h> #include <linux/u64_stats_sync.h> #include <linux/interrupt.h> #include "hinic_hw_qp.h" #define HINIC_RX_CSUM_OFFLOAD_EN 0xFFF #define HINIC_RX_CSUM_HW_CHECK_NONE BIT(7) #define HINIC_RX_CSUM_IPSU_OTHER_ERR BIT(8) struct hinic_rxq_stats { u64 pkts; u64 bytes; u64 errors; u64 csum_errors; u64 other_errors; u64 alloc_skb_err; struct u64_stats_sync syncp; }; struct hinic_rxq { struct net_device *netdev; struct hinic_rq *rq; struct hinic_rxq_stats rxq_stats; char *irq_name; u16 buf_len; u32 rx_buff_shift; struct napi_struct napi; }; void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats); int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq, struct net_device *netdev); void hinic_clean_rxq(struct hinic_rxq *rxq); #endif
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2016, Avago Technologies */ #ifndef _NVME_FC_TRANSPORT_H #define _NVME_FC_TRANSPORT_H 1 /* * Common definitions between the nvme_fc (host) transport and * nvmet_fc (target) transport implementation. */ /* * ****************** FC-NVME LS HANDLING ****************** */ union nvmefc_ls_requests { struct fcnvme_ls_rqst_w0 w0; struct fcnvme_ls_cr_assoc_rqst rq_cr_assoc; struct fcnvme_ls_cr_conn_rqst rq_cr_conn; struct fcnvme_ls_disconnect_assoc_rqst rq_dis_assoc; struct fcnvme_ls_disconnect_conn_rqst rq_dis_conn; } __aligned(128); /* alignment for other things alloc'd with */ union nvmefc_ls_responses { struct fcnvme_ls_rjt rsp_rjt; struct fcnvme_ls_cr_assoc_acc rsp_cr_assoc; struct fcnvme_ls_cr_conn_acc rsp_cr_conn; struct fcnvme_ls_disconnect_assoc_acc rsp_dis_assoc; struct fcnvme_ls_disconnect_conn_acc rsp_dis_conn; } __aligned(128); /* alignment for other things alloc'd with */ static inline void nvme_fc_format_rsp_hdr(void *buf, u8 ls_cmd, __be32 desc_len, u8 rqst_ls_cmd) { struct fcnvme_ls_acc_hdr *acc = buf; acc->w0.ls_cmd = ls_cmd; acc->desc_list_len = desc_len; acc->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST); acc->rqst.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)); acc->rqst.w0.ls_cmd = rqst_ls_cmd; } static inline int nvme_fc_format_rjt(void *buf, u16 buflen, u8 ls_cmd, u8 reason, u8 explanation, u8 vendor) { struct fcnvme_ls_rjt *rjt = buf; nvme_fc_format_rsp_hdr(buf, FCNVME_LSDESC_RQST, fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt)), ls_cmd); rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT); rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt)); rjt->rjt.reason_code = reason; rjt->rjt.reason_explanation = explanation; rjt->rjt.vendor = vendor; return sizeof(struct fcnvme_ls_rjt); } /* Validation Error indexes into the string table below */ enum { VERR_NO_ERROR = 0, VERR_CR_ASSOC_LEN = 1, VERR_CR_ASSOC_RQST_LEN = 2, VERR_CR_ASSOC_CMD = 3, VERR_CR_ASSOC_CMD_LEN = 4, VERR_ERSP_RATIO = 5, VERR_ASSOC_ALLOC_FAIL = 6, VERR_QUEUE_ALLOC_FAIL = 7, VERR_CR_CONN_LEN = 8, VERR_CR_CONN_RQST_LEN = 9, VERR_ASSOC_ID = 10, VERR_ASSOC_ID_LEN = 11, VERR_NO_ASSOC = 12, VERR_CONN_ID = 13, VERR_CONN_ID_LEN = 14, VERR_INVAL_CONN = 15, VERR_CR_CONN_CMD = 16, VERR_CR_CONN_CMD_LEN = 17, VERR_DISCONN_LEN = 18, VERR_DISCONN_RQST_LEN = 19, VERR_DISCONN_CMD = 20, VERR_DISCONN_CMD_LEN = 21, VERR_DISCONN_SCOPE = 22, VERR_RS_LEN = 23, VERR_RS_RQST_LEN = 24, VERR_RS_CMD = 25, VERR_RS_CMD_LEN = 26, VERR_RS_RCTL = 27, VERR_RS_RO = 28, VERR_LSACC = 29, VERR_LSDESC_RQST = 30, VERR_LSDESC_RQST_LEN = 31, VERR_CR_ASSOC = 32, VERR_CR_ASSOC_ACC_LEN = 33, VERR_CR_CONN = 34, VERR_CR_CONN_ACC_LEN = 35, VERR_DISCONN = 36, VERR_DISCONN_ACC_LEN = 37, }; static char *validation_errors[] = { "OK", "Bad CR_ASSOC Length", "Bad CR_ASSOC Rqst Length", "Not CR_ASSOC Cmd", "Bad CR_ASSOC Cmd Length", "Bad Ersp Ratio", "Association Allocation Failed", "Queue Allocation Failed", "Bad CR_CONN Length", "Bad CR_CONN Rqst Length", "Not Association ID", "Bad Association ID Length", "No Association", "Not Connection ID", "Bad Connection ID Length", "Invalid Connection ID", "Not CR_CONN Cmd", "Bad CR_CONN Cmd Length", "Bad DISCONN Length", "Bad DISCONN Rqst Length", "Not DISCONN Cmd", "Bad DISCONN Cmd Length", "Bad Disconnect Scope", "Bad RS Length", "Bad RS Rqst Length", "Not RS Cmd", "Bad RS Cmd Length", "Bad RS R_CTL", "Bad RS Relative Offset", "Not LS_ACC", "Not LSDESC_RQST", "Bad LSDESC_RQST Length", "Not CR_ASSOC Rqst", "Bad CR_ASSOC ACC Length", "Not CR_CONN Rqst", "Bad CR_CONN ACC Length", "Not Disconnect Rqst", "Bad Disconnect ACC Length", }; #define NVME_FC_LAST_LS_CMD_VALUE FCNVME_LS_DISCONNECT_CONN static char *nvmefc_ls_names[] = { "Reserved (0)", "RJT (1)", "ACC (2)", "Create Association", "Create Connection", "Disconnect Association", "Disconnect Connection", }; static inline void nvmefc_fmt_lsreq_discon_assoc(struct nvmefc_ls_req *lsreq, struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst, struct fcnvme_ls_disconnect_assoc_acc *discon_acc, u64 association_id) { lsreq->rqstaddr = discon_rqst; lsreq->rqstlen = sizeof(*discon_rqst); lsreq->rspaddr = discon_acc; lsreq->rsplen = sizeof(*discon_acc); lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC; discon_rqst->w0.ls_cmd = FCNVME_LS_DISCONNECT_ASSOC; discon_rqst->desc_list_len = cpu_to_be32( sizeof(struct fcnvme_lsdesc_assoc_id) + sizeof(struct fcnvme_lsdesc_disconn_cmd)); discon_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID); discon_rqst->associd.desc_len = fcnvme_lsdesc_len( sizeof(struct fcnvme_lsdesc_assoc_id)); discon_rqst->associd.association_id = cpu_to_be64(association_id); discon_rqst->discon_cmd.desc_tag = cpu_to_be32( FCNVME_LSDESC_DISCONN_CMD); discon_rqst->discon_cmd.desc_len = fcnvme_lsdesc_len( sizeof(struct fcnvme_lsdesc_disconn_cmd)); } static inline int nvmefc_vldt_lsreq_discon_assoc(u32 rqstlen, struct fcnvme_ls_disconnect_assoc_rqst *rqst) { int ret = 0; if (rqstlen < sizeof(struct fcnvme_ls_disconnect_assoc_rqst)) ret = VERR_DISCONN_LEN; else if (rqst->desc_list_len != fcnvme_lsdesc_len( sizeof(struct fcnvme_ls_disconnect_assoc_rqst))) ret = VERR_DISCONN_RQST_LEN; else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID)) ret = VERR_ASSOC_ID; else if (rqst->associd.desc_len != fcnvme_lsdesc_len( sizeof(struct fcnvme_lsdesc_assoc_id))) ret = VERR_ASSOC_ID_LEN; else if (rqst->discon_cmd.desc_tag != cpu_to_be32(FCNVME_LSDESC_DISCONN_CMD)) ret = VERR_DISCONN_CMD; else if (rqst->discon_cmd.desc_len != fcnvme_lsdesc_len( sizeof(struct fcnvme_lsdesc_disconn_cmd))) ret = VERR_DISCONN_CMD_LEN; /* * As the standard changed on the LS, check if old format and scope * something other than Association (e.g. 0). */ else if (rqst->discon_cmd.rsvd8[0]) ret = VERR_DISCONN_SCOPE; return ret; } #endif /* _NVME_FC_TRANSPORT_H */
/* SPDX-License-Identifier: GPL-2.0 */ /* Marvell Octeon EP (EndPoint) VF Ethernet Driver * * Copyright (C) 2020 Marvell. * */ #ifndef _OCTEP_VF_TX_H_ #define _OCTEP_VF_TX_H_ #define IQ_SEND_OK 0 #define IQ_SEND_STOP 1 #define IQ_SEND_FAILED -1 #define TX_BUFTYPE_NONE 0 #define TX_BUFTYPE_NET 1 #define TX_BUFTYPE_NET_SG 2 #define NUM_TX_BUFTYPES 3 /* Hardware format for Scatter/Gather list * * 63 48|47 32|31 16|15 0 * ----------------------------------------- * | Len 0 | Len 1 | Len 2 | Len 3 | * ----------------------------------------- * | Ptr 0 | * ----------------------------------------- * | Ptr 1 | * ----------------------------------------- * | Ptr 2 | * ----------------------------------------- * | Ptr 3 | * ----------------------------------------- */ struct octep_vf_tx_sglist_desc { u16 len[4]; dma_addr_t dma_ptr[4]; }; static_assert(sizeof(struct octep_vf_tx_sglist_desc) == 40); /* Each Scatter/Gather entry sent to hardwar hold four pointers. * So, number of entries required is (MAX_SKB_FRAGS + 1)/4, where '+1' * is for main skb which also goes as a gather buffer to Octeon hardware. * To allocate sufficient SGLIST entries for a packet with max fragments, * align by adding 3 before calcuating max SGLIST entries per packet. */ #define OCTEP_VF_SGLIST_ENTRIES_PER_PKT ((MAX_SKB_FRAGS + 1 + 3) / 4) #define OCTEP_VF_SGLIST_SIZE_PER_PKT \ (OCTEP_VF_SGLIST_ENTRIES_PER_PKT * sizeof(struct octep_vf_tx_sglist_desc)) struct octep_vf_tx_buffer { struct sk_buff *skb; dma_addr_t dma; struct octep_vf_tx_sglist_desc *sglist; dma_addr_t sglist_dma; u8 gather; }; #define OCTEP_VF_IQ_TXBUFF_INFO_SIZE (sizeof(struct octep_vf_tx_buffer)) /* VF Hardware interface Tx statistics */ struct octep_vf_iface_tx_stats { /* Total frames sent on the interface */ u64 pkts; /* Total octets sent on the interface */ u64 octs; /* Packets sent to a broadcast DMAC */ u64 bcst; /* Packets sent to the multicast DMAC */ u64 mcst; /* Packets dropped */ u64 dropped; /* Reserved */ u64 reserved[13]; }; /* VF Input Queue statistics */ struct octep_vf_iq_stats { /* Instructions posted to this queue. */ u64 instr_posted; /* Instructions copied by hardware for processing. */ u64 instr_completed; /* Instructions that could not be processed. */ u64 instr_dropped; /* Bytes sent through this queue. */ u64 bytes_sent; /* Gather entries sent through this queue. */ u64 sgentry_sent; /* Number of transmit failures due to TX_BUSY */ u64 tx_busy; /* Number of times the queue is restarted */ u64 restart_cnt; }; /* The instruction (input) queue. * The input queue is used to post raw (instruction) mode data or packet * data to Octeon device from the host. Each input queue (up to 4) for * a Octeon device has one such structure to represent it. */ struct octep_vf_iq { u32 q_no; struct octep_vf_device *octep_vf_dev; struct net_device *netdev; struct device *dev; struct netdev_queue *netdev_q; /* Index in input ring where driver should write the next packet */ u16 host_write_index; /* Index in input ring where Octeon is expected to read next packet */ u16 octep_vf_read_index; /* This index aids in finding the window in the queue where Octeon * has read the commands. */ u16 flush_index; /* Statistics for this input queue. */ struct octep_vf_iq_stats stats; /* Pointer to the Virtual Base addr of the input ring. */ struct octep_vf_tx_desc_hw *desc_ring; /* DMA mapped base address of the input descriptor ring. */ dma_addr_t desc_ring_dma; /* Info of Tx buffers pending completion. */ struct octep_vf_tx_buffer *buff_info; /* Base pointer to Scatter/Gather lists for all ring descriptors. */ struct octep_vf_tx_sglist_desc *sglist; /* DMA mapped addr of Scatter Gather Lists */ dma_addr_t sglist_dma; /* Octeon doorbell register for the ring. */ u8 __iomem *doorbell_reg; /* Octeon instruction count register for this ring. */ u8 __iomem *inst_cnt_reg; /* interrupt level register for this ring */ u8 __iomem *intr_lvl_reg; /* Maximum no. of instructions in this queue. */ u32 max_count; u32 ring_size_mask; u32 pkt_in_done; u32 pkts_processed; u32 status; /* Number of instructions pending to be posted to Octeon. */ u32 fill_cnt; /* The max. number of instructions that can be held pending by the * driver before ringing doorbell. */ u32 fill_threshold; }; /* Hardware Tx Instruction Header */ struct octep_vf_instr_hdr { /* Data Len */ u64 tlen:16; /* Reserved */ u64 rsvd:20; /* PKIND for SDP */ u64 pkind:6; /* Front Data size */ u64 fsz:6; /* No. of entries in gather list */ u64 gsz:14; /* Gather indicator 1=gather*/ u64 gather:1; /* Reserved3 */ u64 reserved3:1; }; static_assert(sizeof(struct octep_vf_instr_hdr) == 8); /* Tx offload flags */ #define OCTEP_VF_TX_OFFLOAD_VLAN_INSERT BIT(0) #define OCTEP_VF_TX_OFFLOAD_IPV4_CKSUM BIT(1) #define OCTEP_VF_TX_OFFLOAD_UDP_CKSUM BIT(2) #define OCTEP_VF_TX_OFFLOAD_TCP_CKSUM BIT(3) #define OCTEP_VF_TX_OFFLOAD_SCTP_CKSUM BIT(4) #define OCTEP_VF_TX_OFFLOAD_TCP_TSO BIT(5) #define OCTEP_VF_TX_OFFLOAD_UDP_TSO BIT(6) #define OCTEP_VF_TX_OFFLOAD_CKSUM (OCTEP_VF_TX_OFFLOAD_IPV4_CKSUM | \ OCTEP_VF_TX_OFFLOAD_UDP_CKSUM | \ OCTEP_VF_TX_OFFLOAD_TCP_CKSUM) #define OCTEP_VF_TX_OFFLOAD_TSO (OCTEP_VF_TX_OFFLOAD_TCP_TSO | \ OCTEP_VF_TX_OFFLOAD_UDP_TSO) #define OCTEP_VF_TX_IP_CSUM(flags) ((flags) & \ (OCTEP_VF_TX_OFFLOAD_IPV4_CKSUM | \ OCTEP_VF_TX_OFFLOAD_TCP_CKSUM | \ OCTEP_VF_TX_OFFLOAD_UDP_CKSUM)) #define OCTEP_VF_TX_TSO(flags) ((flags) & \ (OCTEP_VF_TX_OFFLOAD_TCP_TSO | \ OCTEP_VF_TX_OFFLOAD_UDP_TSO)) struct tx_mdata { /* offload flags */ u16 ol_flags; /* gso size */ u16 gso_size; /* gso flags */ u16 gso_segs; /* reserved */ u16 rsvd1; /* reserved */ u64 rsvd2; }; static_assert(sizeof(struct tx_mdata) == 16); /* 64-byte Tx instruction format. * Format of instruction for a 64-byte mode input queue. * * only first 16-bytes (dptr and ih) are mandatory; rest are optional * and filled by the driver based on firmware/hardware capabilities. * These optional headers together called Front Data and its size is * described by ih->fsz. */ struct octep_vf_tx_desc_hw { /* Pointer where the input data is available. */ u64 dptr; /* Instruction Header. */ union { struct octep_vf_instr_hdr ih; u64 ih64; }; union { u64 txm64[2]; struct tx_mdata txm; }; /* Additional headers available in a 64-byte instruction. */ u64 exhdr[4]; }; static_assert(sizeof(struct octep_vf_tx_desc_hw) == 64); #define OCTEP_VF_IQ_DESC_SIZE (sizeof(struct octep_vf_tx_desc_hw)) #endif /* _OCTEP_VF_TX_H_ */
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __DT_BINDINGS_POWER_RK3399_POWER_H__ #define __DT_BINDINGS_POWER_RK3399_POWER_H__ /* VD_CORE_L */ #define RK3399_PD_A53_L0 0 #define RK3399_PD_A53_L1 1 #define RK3399_PD_A53_L2 2 #define RK3399_PD_A53_L3 3 #define RK3399_PD_SCU_L 4 /* VD_CORE_B */ #define RK3399_PD_A72_B0 5 #define RK3399_PD_A72_B1 6 #define RK3399_PD_SCU_B 7 /* VD_LOGIC */ #define RK3399_PD_TCPD0 8 #define RK3399_PD_TCPD1 9 #define RK3399_PD_CCI 10 #define RK3399_PD_CCI0 11 #define RK3399_PD_CCI1 12 #define RK3399_PD_PERILP 13 #define RK3399_PD_PERIHP 14 #define RK3399_PD_VIO 15 #define RK3399_PD_VO 16 #define RK3399_PD_VOPB 17 #define RK3399_PD_VOPL 18 #define RK3399_PD_ISP0 19 #define RK3399_PD_ISP1 20 #define RK3399_PD_HDCP 21 #define RK3399_PD_GMAC 22 #define RK3399_PD_EMMC 23 #define RK3399_PD_USB3 24 #define RK3399_PD_EDP 25 #define RK3399_PD_GIC 26 #define RK3399_PD_SD 27 #define RK3399_PD_SDIOAUDIO 28 #define RK3399_PD_ALIVE 29 /* VD_CENTER */ #define RK3399_PD_CENTER 30 #define RK3399_PD_VCODEC 31 #define RK3399_PD_VDU 32 #define RK3399_PD_RGA 33 #define RK3399_PD_IEP 34 /* VD_GPU */ #define RK3399_PD_GPU 35 /* VD_PMU */ #define RK3399_PD_PMU 36 #endif
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* Copyright (c) 2021, Microsoft Corporation. */ #include <linux/inetdevice.h> #include <linux/etherdevice.h> #include <linux/mm.h> #include <linux/bpf.h> #include <linux/bpf_trace.h> #include <net/xdp.h> #include <net/mana/mana.h> void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev) { u16 txq_idx = skb_get_queue_mapping(skb); struct netdev_queue *ndevtxq; int rc; __skb_push(skb, ETH_HLEN); ndevtxq = netdev_get_tx_queue(ndev, txq_idx); __netif_tx_lock(ndevtxq, smp_processor_id()); rc = mana_start_xmit(skb, ndev); __netif_tx_unlock(ndevtxq); if (dev_xmit_complete(rc)) return; dev_kfree_skb_any(skb); ndev->stats.tx_dropped++; } static int mana_xdp_xmit_fm(struct net_device *ndev, struct xdp_frame *frame, u16 q_idx) { struct sk_buff *skb; skb = xdp_build_skb_from_frame(frame, ndev); if (unlikely(!skb)) return -ENOMEM; skb_set_queue_mapping(skb, q_idx); mana_xdp_tx(skb, ndev); return 0; } int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames, u32 flags) { struct mana_port_context *apc = netdev_priv(ndev); struct mana_stats_tx *tx_stats; int i, count = 0; u16 q_idx; if (unlikely(!apc->port_is_up)) return 0; q_idx = smp_processor_id() % ndev->real_num_tx_queues; for (i = 0; i < n; i++) { if (mana_xdp_xmit_fm(ndev, frames[i], q_idx)) break; count++; } tx_stats = &apc->tx_qp[q_idx].txq.stats; u64_stats_update_begin(&tx_stats->syncp); tx_stats->xdp_xmit += count; u64_stats_update_end(&tx_stats->syncp); return count; } u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq, struct xdp_buff *xdp, void *buf_va, uint pkt_len) { struct mana_stats_rx *rx_stats; struct bpf_prog *prog; u32 act = XDP_PASS; rcu_read_lock(); prog = rcu_dereference(rxq->bpf_prog); if (!prog) goto out; xdp_init_buff(xdp, PAGE_SIZE, &rxq->xdp_rxq); xdp_prepare_buff(xdp, buf_va, XDP_PACKET_HEADROOM, pkt_len, false); act = bpf_prog_run_xdp(prog, xdp); rx_stats = &rxq->stats; switch (act) { case XDP_PASS: case XDP_TX: case XDP_DROP: break; case XDP_REDIRECT: rxq->xdp_rc = xdp_do_redirect(ndev, xdp, prog); if (!rxq->xdp_rc) { rxq->xdp_flush = true; u64_stats_update_begin(&rx_stats->syncp); rx_stats->packets++; rx_stats->bytes += pkt_len; rx_stats->xdp_redirect++; u64_stats_update_end(&rx_stats->syncp); break; } fallthrough; case XDP_ABORTED: trace_xdp_exception(ndev, prog, act); break; default: bpf_warn_invalid_xdp_action(ndev, prog, act); } out: rcu_read_unlock(); return act; } struct bpf_prog *mana_xdp_get(struct mana_port_context *apc) { ASSERT_RTNL(); return apc->bpf_prog; } static struct bpf_prog *mana_chn_xdp_get(struct mana_port_context *apc) { return rtnl_dereference(apc->rxqs[0]->bpf_prog); } /* Set xdp program on channels */ void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog) { struct bpf_prog *old_prog = mana_chn_xdp_get(apc); unsigned int num_queues = apc->num_queues; int i; ASSERT_RTNL(); if (old_prog == prog) return; if (prog) bpf_prog_add(prog, num_queues); for (i = 0; i < num_queues; i++) rcu_assign_pointer(apc->rxqs[i]->bpf_prog, prog); if (old_prog) for (i = 0; i < num_queues; i++) bpf_prog_put(old_prog); } static int mana_xdp_set(struct net_device *ndev, struct bpf_prog *prog, struct netlink_ext_ack *extack) { struct mana_port_context *apc = netdev_priv(ndev); struct bpf_prog *old_prog; struct gdma_context *gc; gc = apc->ac->gdma_dev->gdma_context; old_prog = mana_xdp_get(apc); if (!old_prog && !prog) return 0; if (prog && ndev->mtu > MANA_XDP_MTU_MAX) { netdev_err(ndev, "XDP: mtu:%u too large, mtu_max:%lu\n", ndev->mtu, MANA_XDP_MTU_MAX); NL_SET_ERR_MSG_MOD(extack, "XDP: mtu too large"); return -EOPNOTSUPP; } /* One refcnt of the prog is hold by the caller already, so * don't increase refcnt for this one. */ apc->bpf_prog = prog; if (old_prog) bpf_prog_put(old_prog); if (apc->port_is_up) mana_chn_setxdp(apc, prog); if (prog) ndev->max_mtu = MANA_XDP_MTU_MAX; else ndev->max_mtu = gc->adapter_mtu - ETH_HLEN; return 0; } int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf) { struct netlink_ext_ack *extack = bpf->extack; int ret; switch (bpf->command) { case XDP_SETUP_PROG: return mana_xdp_set(ndev, bpf->prog, extack); default: return -EOPNOTSUPP; } return ret; }
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2023 Loongson Technology Corporation Limited */ #include <drm/drm_debugfs.h> #include "lsdc_benchmark.h" #include "lsdc_drv.h" #include "lsdc_gem.h" #include "lsdc_ttm.h" typedef void (*lsdc_copy_proc_t)(struct lsdc_bo *src_bo, struct lsdc_bo *dst_bo, unsigned int size, int n); static void lsdc_copy_gtt_to_vram_cpu(struct lsdc_bo *src_bo, struct lsdc_bo *dst_bo, unsigned int size, int n) { lsdc_bo_kmap(src_bo); lsdc_bo_kmap(dst_bo); while (n--) memcpy_toio(dst_bo->kptr, src_bo->kptr, size); lsdc_bo_kunmap(src_bo); lsdc_bo_kunmap(dst_bo); } static void lsdc_copy_vram_to_gtt_cpu(struct lsdc_bo *src_bo, struct lsdc_bo *dst_bo, unsigned int size, int n) { lsdc_bo_kmap(src_bo); lsdc_bo_kmap(dst_bo); while (n--) memcpy_fromio(dst_bo->kptr, src_bo->kptr, size); lsdc_bo_kunmap(src_bo); lsdc_bo_kunmap(dst_bo); } static void lsdc_copy_gtt_to_gtt_cpu(struct lsdc_bo *src_bo, struct lsdc_bo *dst_bo, unsigned int size, int n) { lsdc_bo_kmap(src_bo); lsdc_bo_kmap(dst_bo); while (n--) memcpy(dst_bo->kptr, src_bo->kptr, size); lsdc_bo_kunmap(src_bo); lsdc_bo_kunmap(dst_bo); } static void lsdc_benchmark_copy(struct lsdc_device *ldev, unsigned int size, unsigned int n, u32 src_domain, u32 dst_domain, lsdc_copy_proc_t copy_proc, struct drm_printer *p) { struct drm_device *ddev = &ldev->base; struct lsdc_bo *src_bo; struct lsdc_bo *dst_bo; unsigned long start_jiffies; unsigned long end_jiffies; unsigned int throughput; unsigned int time; src_bo = lsdc_bo_create_kernel_pinned(ddev, src_domain, size); dst_bo = lsdc_bo_create_kernel_pinned(ddev, dst_domain, size); start_jiffies = jiffies; copy_proc(src_bo, dst_bo, size, n); end_jiffies = jiffies; lsdc_bo_free_kernel_pinned(src_bo); lsdc_bo_free_kernel_pinned(dst_bo); time = jiffies_to_msecs(end_jiffies - start_jiffies); throughput = (n * (size >> 10)) / time; drm_printf(p, "Copy bo of %uKiB %u times from %s to %s in %ums: %uMB/s\n", size >> 10, n, lsdc_domain_to_str(src_domain), lsdc_domain_to_str(dst_domain), time, throughput); } int lsdc_show_benchmark_copy(struct lsdc_device *ldev, struct drm_printer *p) { unsigned int buffer_size = 1920 * 1080 * 4; unsigned int iteration = 60; lsdc_benchmark_copy(ldev, buffer_size, iteration, LSDC_GEM_DOMAIN_GTT, LSDC_GEM_DOMAIN_GTT, lsdc_copy_gtt_to_gtt_cpu, p); lsdc_benchmark_copy(ldev, buffer_size, iteration, LSDC_GEM_DOMAIN_GTT, LSDC_GEM_DOMAIN_VRAM, lsdc_copy_gtt_to_vram_cpu, p); lsdc_benchmark_copy(ldev, buffer_size, iteration, LSDC_GEM_DOMAIN_VRAM, LSDC_GEM_DOMAIN_GTT, lsdc_copy_vram_to_gtt_cpu, p); return 0; }
// SPDX-License-Identifier: GPL-2.0 #include "qcom-msm8974.dtsi" #include "pm8841.dtsi" #include "pm8941.dtsi" #include <dt-bindings/input/input.h> #include <dt-bindings/leds/common.h> #include <dt-bindings/pinctrl/qcom,pmic-gpio.h> / { model = "LGE MSM 8974 HAMMERHEAD"; compatible = "lge,hammerhead", "qcom,msm8974"; chassis-type = "handset"; aliases { serial0 = &blsp1_uart1; serial1 = &blsp2_uart4; }; chosen { stdout-path = "serial0:115200n8"; }; gpio-keys { compatible = "gpio-keys"; pinctrl-names = "default"; pinctrl-0 = <&gpio_keys_pin_a>; key-volume-up { label = "volume_up"; gpios = <&pm8941_gpios 2 GPIO_ACTIVE_LOW>; linux,input-type = <1>; linux,code = <KEY_VOLUMEUP>; }; key-volume-down { label = "volume_down"; gpios = <&pm8941_gpios 3 GPIO_ACTIVE_LOW>; linux,input-type = <1>; linux,code = <KEY_VOLUMEDOWN>; }; }; clk_pwm: pwm { compatible = "clk-pwm"; clocks = <&mmcc CAMSS_GP1_CLK>; pinctrl-0 = <&vibrator_pin>; pinctrl-names = "default"; #pwm-cells = <2>; }; vibrator { compatible = "pwm-vibrator"; pwms = <&clk_pwm 0 100000>; pwm-names = "enable"; vcc-supply = <&pm8941_l19>; enable-gpios = <&tlmm 60 GPIO_ACTIVE_HIGH>; }; vreg_boost: vreg-boost { compatible = "regulator-fixed"; regulator-name = "vreg-boost"; regulator-min-microvolt = <3150000>; regulator-max-microvolt = <3150000>; regulator-always-on; regulator-boot-on; gpio = <&pm8941_gpios 21 GPIO_ACTIVE_HIGH>; enable-active-high; pinctrl-names = "default"; pinctrl-0 = <&boost_bypass_n_pin>; }; vreg_vph_pwr: vreg-vph-pwr { compatible = "regulator-fixed"; regulator-name = "vph-pwr"; regulator-min-microvolt = <3600000>; regulator-max-microvolt = <3600000>; regulator-always-on; }; vreg_wlan: wlan-regulator { compatible = "regulator-fixed"; regulator-name = "wl-reg"; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; gpio = <&tlmm 26 GPIO_ACTIVE_HIGH>; enable-active-high; pinctrl-names = "default"; pinctrl-0 = <&wlan_regulator_pin>; }; }; &blsp1_i2c1 { status = "okay"; clock-frequency = <100000>; charger: bq24192@6b { compatible = "ti,bq24192"; reg = <0x6b>; interrupts-extended = <&spmi_bus 0 0xd5 0 IRQ_TYPE_EDGE_FALLING>; omit-battery-class; usb_otg_vbus: usb-otg-vbus { }; }; fuelgauge: max17048@36 { compatible = "maxim,max17048"; reg = <0x36>; maxim,double-soc; maxim,rcomp = /bits/ 8 <0x4d>; interrupt-parent = <&tlmm>; interrupts = <9 IRQ_TYPE_LEVEL_LOW>; pinctrl-names = "default"; pinctrl-0 = <&fuelgauge_pin>; maxim,alert-low-soc-level = <2>; }; }; &blsp1_i2c2 { status = "okay"; clock-frequency = <355000>; synaptics@70 { compatible = "syna,rmi4-i2c"; reg = <0x70>; interrupts-extended = <&tlmm 5 IRQ_TYPE_EDGE_FALLING>; vdd-supply = <&pm8941_l22>; vio-supply = <&pm8941_lvs3>; pinctrl-names = "default"; pinctrl-0 = <&touch_pin>; #address-cells = <1>; #size-cells = <0>; rmi4-f01@1 { reg = <0x1>; syna,nosleep-mode = <1>; }; rmi4-f12@12 { reg = <0x12>; syna,sensor-type = <1>; }; }; }; &blsp1_i2c3 { status = "okay"; clock-frequency = <100000>; sensor@39 { compatible = "avago,apds9930"; reg = <0x39>; interrupts-extended = <&tlmm 61 IRQ_TYPE_EDGE_FALLING>; vdd-supply = <&pm8941_l17>; vddio-supply = <&pm8941_lvs1>; led-max-microamp = <100000>; amstaos,proximity-diodes = <0>; }; }; &blsp2_i2c5 { status = "okay"; clock-frequency = <355000>; backlight: led-controller@38 { compatible = "ti,lm3630a"; status = "okay"; reg = <0x38>; #address-cells = <1>; #size-cells = <0>; led@0 { reg = <0>; led-sources = <0 1>; label = "lcd-backlight"; default-brightness = <200>; }; }; }; &blsp2_i2c6 { status = "okay"; clock-frequency = <100000>; mpu6515@68 { compatible = "invensense,mpu6515"; reg = <0x68>; interrupts-extended = <&tlmm 73 IRQ_TYPE_EDGE_FALLING>; vddio-supply = <&pm8941_lvs1>; pinctrl-names = "default"; pinctrl-0 = <&mpu6515_pin>; mount-matrix = "0", "-1", "0", "-1", "0", "0", "0", "0", "1"; i2c-gate { #address-cells = <1>; #size-cells = <0>; ak8963@f { compatible = "asahi-kasei,ak8963"; reg = <0x0f>; gpios = <&tlmm 67 GPIO_ACTIVE_HIGH>; vid-supply = <&pm8941_lvs1>; vdd-supply = <&pm8941_l17>; }; bmp280@76 { compatible = "bosch,bmp280"; reg = <0x76>; vdda-supply = <&pm8941_lvs1>; vddd-supply = <&pm8941_l17>; }; }; }; }; &blsp1_uart1 { status = "okay"; }; &blsp2_uart4 { status = "okay"; bluetooth { compatible = "brcm,bcm43438-bt"; max-speed = <3000000>; pinctrl-names = "default"; pinctrl-0 = <&bt_pin>; host-wakeup-gpios = <&tlmm 42 GPIO_ACTIVE_HIGH>; device-wakeup-gpios = <&tlmm 62 GPIO_ACTIVE_HIGH>; shutdown-gpios = <&tlmm 41 GPIO_ACTIVE_HIGH>; }; }; &mdss { status = "okay"; }; &mdss_dsi0 { status = "okay"; vdda-supply = <&pm8941_l2>; vdd-supply = <&pm8941_lvs3>; vddio-supply = <&pm8941_l12>; panel: panel@0 { reg = <0>; compatible = "lg,acx467akm-7"; backlight = <&backlight>; pinctrl-names = "default"; pinctrl-0 = <&panel_pin>; port { panel_in: endpoint { remote-endpoint = <&mdss_dsi0_out>; }; }; }; }; &mdss_dsi0_out { remote-endpoint = <&panel_in>; data-lanes = <0 1 2 3>; }; &mdss_dsi0_phy { status = "okay"; vddio-supply = <&pm8941_l12>; }; &pm8941_gpios { gpio_keys_pin_a: gpio-keys-active-state { pins = "gpio2", "gpio3"; function = "normal"; bias-pull-up; power-source = <PM8941_GPIO_S3>; }; fuelgauge_pin: fuelgauge-int-state { pins = "gpio9"; function = "normal"; bias-disable; input-enable; power-source = <PM8941_GPIO_S3>; }; wlan_sleep_clk_pin: wl-sleep-clk-state { pins = "gpio16"; function = "func2"; output-high; power-source = <PM8941_GPIO_S3>; }; wlan_regulator_pin: wl-reg-active-state { pins = "gpio17"; function = "normal"; bias-disable; power-source = <PM8941_GPIO_S3>; }; otg-hog { gpio-hog; gpios = <35 GPIO_ACTIVE_HIGH>; output-high; line-name = "otg-gpio"; }; }; &pm8941_lpg { status = "okay"; qcom,power-source = <1>; multi-led { color = <LED_COLOR_ID_RGB>; function = LED_FUNCTION_STATUS; #address-cells = <1>; #size-cells = <0>; led@7 { reg = <7>; color = <LED_COLOR_ID_RED>; }; led@6 { reg = <6>; color = <LED_COLOR_ID_GREEN>; }; led@5 { reg = <5>; color = <LED_COLOR_ID_BLUE>; }; }; }; &remoteproc_adsp { cx-supply = <&pm8841_s2>; status = "okay"; }; &remoteproc_mss { cx-supply = <&pm8841_s2>; mss-supply = <&pm8841_s3>; mx-supply = <&pm8841_s1>; pll-supply = <&pm8941_l12>; status = "okay"; }; &rpm_requests { regulators-0 { compatible = "qcom,rpm-pm8841-regulators"; pm8841_s1: s1 { regulator-min-microvolt = <675000>; regulator-max-microvolt = <1050000>; }; pm8841_s2: s2 { regulator-min-microvolt = <500000>; regulator-max-microvolt = <1050000>; }; pm8841_s3: s3 { regulator-min-microvolt = <1050000>; regulator-max-microvolt = <1050000>; }; pm8841_s4: s4 { regulator-min-microvolt = <815000>; regulator-max-microvolt = <900000>; }; }; regulators-1 { compatible = "qcom,rpm-pm8941-regulators"; vdd_l1_l3-supply = <&pm8941_s1>; vdd_l2_lvs1_2_3-supply = <&pm8941_s3>; vdd_l4_l11-supply = <&pm8941_s1>; vdd_l5_l7-supply = <&pm8941_s2>; vdd_l6_l12_l14_l15-supply = <&pm8941_s2>; vdd_l8_l16_l18_l19-supply = <&vreg_vph_pwr>; vdd_l9_l10_l17_l22-supply = <&vreg_boost>; vdd_l13_l20_l23_l24-supply = <&vreg_boost>; vdd_l21-supply = <&vreg_boost>; pm8941_s1: s1 { regulator-min-microvolt = <1300000>; regulator-max-microvolt = <1300000>; regulator-always-on; regulator-boot-on; }; pm8941_s2: s2 { regulator-min-microvolt = <2150000>; regulator-max-microvolt = <2150000>; regulator-boot-on; }; pm8941_s3: s3 { regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; regulator-always-on; regulator-boot-on; }; pm8941_l1: l1 { regulator-min-microvolt = <1225000>; regulator-max-microvolt = <1225000>; regulator-always-on; regulator-boot-on; }; pm8941_l2: l2 { regulator-min-microvolt = <1200000>; regulator-max-microvolt = <1200000>; }; pm8941_l3: l3 { regulator-min-microvolt = <1225000>; regulator-max-microvolt = <1225000>; }; pm8941_l4: l4 { regulator-min-microvolt = <1225000>; regulator-max-microvolt = <1225000>; }; pm8941_l5: l5 { regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; }; pm8941_l6: l6 { regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; regulator-boot-on; }; pm8941_l7: l7 { regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; regulator-boot-on; }; pm8941_l8: l8 { regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; }; pm8941_l9: l9 { regulator-min-microvolt = <1800000>; regulator-max-microvolt = <2950000>; }; pm8941_l10: l10 { regulator-min-microvolt = <1800000>; regulator-max-microvolt = <2950000>; }; pm8941_l11: l11 { regulator-min-microvolt = <1300000>; regulator-max-microvolt = <1300000>; }; pm8941_l12: l12 { regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; regulator-always-on; regulator-boot-on; }; pm8941_l13: l13 { regulator-min-microvolt = <1800000>; regulator-max-microvolt = <2950000>; regulator-boot-on; }; pm8941_l14: l14 { regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; }; pm8941_l15: l15 { regulator-min-microvolt = <2050000>; regulator-max-microvolt = <2050000>; }; pm8941_l16: l16 { regulator-min-microvolt = <2700000>; regulator-max-microvolt = <2700000>; }; pm8941_l17: l17 { regulator-min-microvolt = <2850000>; regulator-max-microvolt = <2850000>; }; pm8941_l18: l18 { regulator-min-microvolt = <2850000>; regulator-max-microvolt = <2850000>; }; pm8941_l19: l19 { regulator-min-microvolt = <3000000>; regulator-max-microvolt = <3300000>; }; pm8941_l20: l20 { regulator-min-microvolt = <2950000>; regulator-max-microvolt = <2950000>; regulator-system-load = <200000>; regulator-allow-set-load; regulator-boot-on; }; pm8941_l21: l21 { regulator-min-microvolt = <2950000>; regulator-max-microvolt = <2950000>; regulator-boot-on; }; pm8941_l22: l22 { regulator-min-microvolt = <3000000>; regulator-max-microvolt = <3300000>; }; pm8941_l23: l23 { regulator-min-microvolt = <3000000>; regulator-max-microvolt = <3000000>; }; pm8941_l24: l24 { regulator-min-microvolt = <3075000>; regulator-max-microvolt = <3075000>; regulator-boot-on; }; pm8941_lvs1: lvs1 {}; pm8941_lvs3: lvs3 {}; }; }; &sdhc_1 { status = "okay"; vmmc-supply = <&pm8941_l20>; vqmmc-supply = <&pm8941_s3>; pinctrl-names = "default", "sleep"; pinctrl-0 = <&sdc1_on>; pinctrl-1 = <&sdc1_off>; }; &sdhc_2 { status = "okay"; max-frequency = <100000000>; vmmc-supply = <&vreg_wlan>; vqmmc-supply = <&pm8941_s3>; non-removable; pinctrl-names = "default", "sleep"; pinctrl-0 = <&sdc2_on>; pinctrl-1 = <&sdc2_off>; bcrmf@1 { compatible = "brcm,bcm4339-fmac", "brcm,bcm4329-fmac"; reg = <1>; brcm,drive-strength = <10>; pinctrl-names = "default"; pinctrl-0 = <&wlan_sleep_clk_pin>; }; }; &tlmm { sdc1_on: sdc1-on-state { clk-pins { pins = "sdc1_clk"; drive-strength = <16>; bias-disable; }; cmd-data-pins { pins = "sdc1_cmd", "sdc1_data"; drive-strength = <10>; bias-pull-up; }; }; sdc2_on: sdc2-on-state { clk-pins { pins = "sdc2_clk"; drive-strength = <6>; bias-disable; }; cmd-data-pins { pins = "sdc2_cmd", "sdc2_data"; drive-strength = <6>; bias-pull-up; }; }; mpu6515_pin: mpu6515-state { pins = "gpio73"; function = "gpio"; bias-disable; }; touch_pin: touch-state { int-pins { pins = "gpio5"; function = "gpio"; drive-strength = <2>; bias-disable; }; reset-pins { pins = "gpio8"; function = "gpio"; drive-strength = <2>; bias-pull-up; }; }; panel_pin: panel-state { pins = "gpio12"; function = "mdp_vsync"; drive-strength = <2>; bias-disable; }; bt_pin: bt-state { hostwake-pins { pins = "gpio42"; function = "gpio"; }; devwake-pins { pins = "gpio62"; function = "gpio"; }; shutdown-pins { pins = "gpio41"; function = "gpio"; }; }; vibrator_pin: vibrator-state { core-pins { pins = "gpio27"; function = "gp1_clk"; drive-strength = <6>; bias-disable; }; enable-pins { pins = "gpio60"; function = "gpio"; drive-strength = <2>; bias-disable; }; }; }; &usb { status = "okay"; phys = <&usb_hs1_phy>; phy-select = <&tcsr 0xb000 0>; extcon = <&charger>, <&usb_id>; vbus-supply = <&usb_otg_vbus>; hnp-disable; srp-disable; adp-disable; }; &usb_hs1_phy { status = "okay"; v1p8-supply = <&pm8941_l6>; v3p3-supply = <&pm8941_l24>; qcom,init-seq = /bits/ 8 <0x1 0x64>; };
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../wifi.h" #include "../base.h" #include "../core.h" #include "reg.h" #include "def.h" #include "phy_common.h" #include "dm_common.h" static const u32 ofdmswing_table[OFDM_TABLE_SIZE_92D] = { 0x7f8001fe, /* 0, +6.0dB */ 0x788001e2, /* 1, +5.5dB */ 0x71c001c7, /* 2, +5.0dB */ 0x6b8001ae, /* 3, +4.5dB */ 0x65400195, /* 4, +4.0dB */ 0x5fc0017f, /* 5, +3.5dB */ 0x5a400169, /* 6, +3.0dB */ 0x55400155, /* 7, +2.5dB */ 0x50800142, /* 8, +2.0dB */ 0x4c000130, /* 9, +1.5dB */ 0x47c0011f, /* 10, +1.0dB */ 0x43c0010f, /* 11, +0.5dB */ 0x40000100, /* 12, +0dB */ 0x3c8000f2, /* 13, -0.5dB */ 0x390000e4, /* 14, -1.0dB */ 0x35c000d7, /* 15, -1.5dB */ 0x32c000cb, /* 16, -2.0dB */ 0x300000c0, /* 17, -2.5dB */ 0x2d4000b5, /* 18, -3.0dB */ 0x2ac000ab, /* 19, -3.5dB */ 0x288000a2, /* 20, -4.0dB */ 0x26000098, /* 21, -4.5dB */ 0x24000090, /* 22, -5.0dB */ 0x22000088, /* 23, -5.5dB */ 0x20000080, /* 24, -6.0dB */ 0x1e400079, /* 25, -6.5dB */ 0x1c800072, /* 26, -7.0dB */ 0x1b00006c, /* 27. -7.5dB */ 0x19800066, /* 28, -8.0dB */ 0x18000060, /* 29, -8.5dB */ 0x16c0005b, /* 30, -9.0dB */ 0x15800056, /* 31, -9.5dB */ 0x14400051, /* 32, -10.0dB */ 0x1300004c, /* 33, -10.5dB */ 0x12000048, /* 34, -11.0dB */ 0x11000044, /* 35, -11.5dB */ 0x10000040, /* 36, -12.0dB */ 0x0f00003c, /* 37, -12.5dB */ 0x0e400039, /* 38, -13.0dB */ 0x0d800036, /* 39, -13.5dB */ 0x0cc00033, /* 40, -14.0dB */ 0x0c000030, /* 41, -14.5dB */ 0x0b40002d, /* 42, -15.0dB */ }; static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = { {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04}, /* 0, +0dB */ {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04}, /* 1, -0.5dB */ {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, /* 2, -1.0dB */ {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03}, /* 3, -1.5dB */ {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, /* 4, -2.0dB */ {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03}, /* 5, -2.5dB */ {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, /* 6, -3.0dB */ {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03}, /* 7, -3.5dB */ {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, /* 8, -4.0dB */ {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02}, /* 9, -4.5dB */ {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, /* 10, -5.0dB */ {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02}, /* 11, -5.5dB */ {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, /* 12, -6.0dB */ {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02}, /* 13, -6.5dB */ {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, /* 14, -7.0dB */ {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02}, /* 15, -7.5dB */ {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, /* 16, -8.0dB */ {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02}, /* 17, -8.5dB */ {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, /* 18, -9.0dB */ {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 19, -9.5dB */ {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 20, -10.0dB */ {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 21, -10.5dB */ {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 22, -11.0dB */ {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01}, /* 23, -11.5dB */ {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01}, /* 24, -12.0dB */ {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01}, /* 25, -12.5dB */ {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01}, /* 26, -13.0dB */ {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 27, -13.5dB */ {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 28, -14.0dB */ {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 29, -14.5dB */ {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 30, -15.0dB */ {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01}, /* 31, -15.5dB */ {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01} /* 32, -16.0dB */ }; static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = { {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00}, /* 0, +0dB */ {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00}, /* 1, -0.5dB */ {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, /* 2, -1.0dB */ {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00}, /* 3, -1.5dB */ {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, /* 4, -2.0dB */ {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00}, /* 5, -2.5dB */ {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, /* 6, -3.0dB */ {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00}, /* 7, -3.5dB */ {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, /* 8, -4.0dB */ {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00}, /* 9, -4.5dB */ {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, /* 10, -5.0dB */ {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 11, -5.5dB */ {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 12, -6.0dB */ {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00}, /* 13, -6.5dB */ {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, /* 14, -7.0dB */ {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 15, -7.5dB */ {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 16, -8.0dB */ {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 17, -8.5dB */ {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 18, -9.0dB */ {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 19, -9.5dB */ {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 20, -10.0dB */ {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 21, -10.5dB */ {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 22, -11.0dB */ {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 23, -11.5dB */ {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 24, -12.0dB */ {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 25, -12.5dB */ {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 26, -13.0dB */ {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 27, -13.5dB */ {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 28, -14.0dB */ {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 29, -14.5dB */ {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 30, -15.0dB */ {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 31, -15.5dB */ {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00} /* 32, -16.0dB */ }; static void rtl92d_dm_rxgain_tracking_thermalmeter(struct ieee80211_hw *hw) { static const u8 index_mapping[RX_INDEX_MAPPING_NUM] = { 0x0f, 0x0f, 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08, 0x07, 0x06, 0x05, 0x04, 0x04, 0x03, 0x02 }; struct rtl_priv *rtlpriv = rtl_priv(hw); int i, idx; u32 u4tmp; idx = rtlpriv->efuse.eeprom_thermalmeter - rtlpriv->dm.thermalvalue_rxgain; u4tmp = index_mapping[idx] << 12; rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "===> Rx Gain %x\n", u4tmp); for (i = RF90_PATH_A; i < rtlpriv->phy.num_total_rfpath; i++) rtl_set_rfreg(hw, i, 0x3C, RFREG_OFFSET_MASK, (rtlpriv->phy.reg_rf3c[i] & ~0xF000) | u4tmp); } static void rtl92d_bandtype_2_4G(struct ieee80211_hw *hw, long *temp_cckg, u8 *cck_index_old) { struct rtl_priv *rtlpriv = rtl_priv(hw); unsigned long flag = 0; const u8 *cckswing; long temp_cck; int i; /* Query CCK default setting From 0xa24 */ rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag); temp_cck = rtl_get_bbreg(hw, RCCK0_TXFILTER2, MASKDWORD) & MASKCCK; rtl92d_release_cckandrw_pagea_ctl(hw, &flag); for (i = 0; i < CCK_TABLE_LENGTH; i++) { if (rtlpriv->dm.cck_inch14) cckswing = &cckswing_table_ch14[i][2]; else cckswing = &cckswing_table_ch1ch13[i][2]; if (temp_cck == le32_to_cpu(*((__le32 *)cckswing))) { *cck_index_old = (u8)i; rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "Initial reg0x%x = 0x%lx, cck_index = 0x%x, ch14 %d\n", RCCK0_TXFILTER2, temp_cck, *cck_index_old, rtlpriv->dm.cck_inch14); break; } } *temp_cckg = temp_cck; } static void rtl92d_bandtype_5G(struct rtl_hal *rtlhal, u8 *ofdm_index, bool *internal_pa, u8 thermalvalue, u8 delta, u8 rf, struct rtl_efuse *rtlefuse, struct rtl_priv *rtlpriv, struct rtl_phy *rtlphy, const u8 index_mapping[5][INDEX_MAPPING_NUM], const u8 index_mapping_pa[8][INDEX_MAPPING_NUM]) { u8 offset = 0; u8 index; int i; for (i = 0; i < rf; i++) { if (rtlhal->macphymode == DUALMAC_DUALPHY && rtlhal->interfaceindex == 1) /* MAC 1 5G */ *internal_pa = rtlefuse->internal_pa_5g[1]; else *internal_pa = rtlefuse->internal_pa_5g[i]; if (*internal_pa) { if (rtlhal->interfaceindex == 1 || i == rf) offset = 4; else offset = 0; if (rtlphy->current_channel >= 100 && rtlphy->current_channel <= 165) offset += 2; } else { if (rtlhal->interfaceindex == 1 || i == rf) offset = 2; else offset = 0; } if (thermalvalue > rtlefuse->eeprom_thermalmeter) offset++; if (*internal_pa) { if (delta > INDEX_MAPPING_NUM - 1) index = index_mapping_pa[offset] [INDEX_MAPPING_NUM - 1]; else index = index_mapping_pa[offset][delta]; } else { if (delta > INDEX_MAPPING_NUM - 1) index = index_mapping[offset][INDEX_MAPPING_NUM - 1]; else index = index_mapping[offset][delta]; } if (thermalvalue > rtlefuse->eeprom_thermalmeter) { if (*internal_pa && thermalvalue > 0x12) { ofdm_index[i] = rtlpriv->dm.ofdm_index[i] - ((delta / 2) * 3 + (delta % 2)); } else { ofdm_index[i] -= index; } } else { ofdm_index[i] += index; } } } static void rtl92d_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw *hw) { static const u8 index_mapping[5][INDEX_MAPPING_NUM] = { /* 5G, path A/MAC 0, decrease power */ {0, 1, 3, 6, 8, 9, 11, 13, 14, 16, 17, 18, 18}, /* 5G, path A/MAC 0, increase power */ {0, 2, 4, 5, 7, 10, 12, 14, 16, 18, 18, 18, 18}, /* 5G, path B/MAC 1, decrease power */ {0, 2, 3, 6, 8, 9, 11, 13, 14, 16, 17, 18, 18}, /* 5G, path B/MAC 1, increase power */ {0, 2, 4, 5, 7, 10, 13, 16, 16, 18, 18, 18, 18}, /* 2.4G, for decreas power */ {0, 1, 2, 3, 4, 5, 6, 7, 7, 8, 9, 10, 10}, }; static const u8 index_mapping_internal_pa[8][INDEX_MAPPING_NUM] = { /* 5G, path A/MAC 0, ch36-64, decrease power */ {0, 1, 2, 4, 6, 7, 9, 11, 12, 14, 15, 16, 16}, /* 5G, path A/MAC 0, ch36-64, increase power */ {0, 2, 4, 5, 7, 10, 12, 14, 16, 18, 18, 18, 18}, /* 5G, path A/MAC 0, ch100-165, decrease power */ {0, 1, 2, 3, 5, 6, 8, 10, 11, 13, 14, 15, 15}, /* 5G, path A/MAC 0, ch100-165, increase power */ {0, 2, 4, 5, 7, 10, 12, 14, 16, 18, 18, 18, 18}, /* 5G, path B/MAC 1, ch36-64, decrease power */ {0, 1, 2, 4, 6, 7, 9, 11, 12, 14, 15, 16, 16}, /* 5G, path B/MAC 1, ch36-64, increase power */ {0, 2, 4, 5, 7, 10, 13, 16, 16, 18, 18, 18, 18}, /* 5G, path B/MAC 1, ch100-165, decrease power */ {0, 1, 2, 3, 5, 6, 8, 9, 10, 12, 13, 14, 14}, /* 5G, path B/MAC 1, ch100-165, increase power */ {0, 2, 4, 5, 7, 10, 13, 16, 16, 18, 18, 18, 18}, }; struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_efuse *rtlefuse = rtl_efuse(rtlpriv); struct rtl_hal *rtlhal = rtl_hal(rtlpriv); struct rtl_phy *rtlphy = &rtlpriv->phy; struct rtl_dm *dm = &rtlpriv->dm; u8 thermalvalue, delta, delta_lck, delta_iqk, delta_rxgain; u8 ofdm_min_index = 6, ofdm_min_index_internal_pa = 3, rf; long ele_a = 0, ele_d, temp_cck, val_x, value32; bool is2t = IS_92D_SINGLEPHY(rtlhal->version); u8 offset, thermalvalue_avg_count = 0; u8 ofdm_index_old[2] = {0, 0}; u32 thermalvalue_avg = 0; bool internal_pa = false; long val_y, ele_c = 0; s8 cck_index_old = 0; u8 indexforchannel; u8 ofdm_index[2]; s8 cck_index = 0; u8 index, swing; int i; indexforchannel = rtl92d_get_rightchnlplace_for_iqk(rtlphy->current_channel); dm->txpower_trackinginit = true; rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "\n"); thermalvalue = (u8)rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0xf800); rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x\n", thermalvalue, dm->thermalvalue, rtlefuse->eeprom_thermalmeter); if (!thermalvalue) goto exit; if (is2t) rf = 2; else rf = 1; if (dm->thermalvalue && !rtlhal->reloadtxpowerindex) goto old_index_done; ele_d = rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD) & MASKOFDM_D; for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) { if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) { ofdm_index_old[0] = (u8)i; rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "Initial pathA ele_d reg0x%x = 0x%lx, ofdm_index=0x%x\n", ROFDM0_XATXIQIMBALANCE, ele_d, ofdm_index_old[0]); break; } } if (is2t) { ele_d = rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, MASKDWORD); ele_d &= MASKOFDM_D; for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) { if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) { ofdm_index_old[1] = (u8)i; rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "Initial pathB ele_d reg 0x%x = 0x%lx, ofdm_index = 0x%x\n", ROFDM0_XBTXIQIMBALANCE, ele_d, ofdm_index_old[1]); break; } } } if (rtlhal->current_bandtype == BAND_ON_2_4G) { rtl92d_bandtype_2_4G(hw, &temp_cck, &cck_index_old); } else { temp_cck = 0x090e1317; cck_index_old = 12; } if (!dm->thermalvalue) { dm->thermalvalue = rtlefuse->eeprom_thermalmeter; dm->thermalvalue_lck = thermalvalue; dm->thermalvalue_iqk = thermalvalue; dm->thermalvalue_rxgain = rtlefuse->eeprom_thermalmeter; for (i = 0; i < rf; i++) dm->ofdm_index[i] = ofdm_index_old[i]; dm->cck_index = cck_index_old; } if (rtlhal->reloadtxpowerindex) { for (i = 0; i < rf; i++) dm->ofdm_index[i] = ofdm_index_old[i]; dm->cck_index = cck_index_old; rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "reload ofdm index for band switch\n"); } old_index_done: for (i = 0; i < rf; i++) ofdm_index[i] = dm->ofdm_index[i]; dm->thermalvalue_avg[dm->thermalvalue_avg_index] = thermalvalue; dm->thermalvalue_avg_index++; if (dm->thermalvalue_avg_index == AVG_THERMAL_NUM) dm->thermalvalue_avg_index = 0; for (i = 0; i < AVG_THERMAL_NUM; i++) { if (dm->thermalvalue_avg[i]) { thermalvalue_avg += dm->thermalvalue_avg[i]; thermalvalue_avg_count++; } } if (thermalvalue_avg_count) thermalvalue = (u8)(thermalvalue_avg / thermalvalue_avg_count); if (rtlhal->reloadtxpowerindex) { delta = abs_diff(thermalvalue, rtlefuse->eeprom_thermalmeter); rtlhal->reloadtxpowerindex = false; dm->done_txpower = false; } else if (dm->done_txpower) { delta = abs_diff(thermalvalue, dm->thermalvalue); } else { delta = abs_diff(thermalvalue, rtlefuse->eeprom_thermalmeter); } delta_lck = abs_diff(thermalvalue, dm->thermalvalue_lck); delta_iqk = abs_diff(thermalvalue, dm->thermalvalue_iqk); delta_rxgain = abs_diff(thermalvalue, dm->thermalvalue_rxgain); rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x delta 0x%x delta_lck 0x%x delta_iqk 0x%x\n", thermalvalue, dm->thermalvalue, rtlefuse->eeprom_thermalmeter, delta, delta_lck, delta_iqk); if (delta_lck > rtlefuse->delta_lck && rtlefuse->delta_lck != 0) { dm->thermalvalue_lck = thermalvalue; rtlpriv->cfg->ops->phy_lc_calibrate(hw, is2t); } if (delta == 0 || !dm->txpower_track_control) goto check_delta; dm->done_txpower = true; delta = abs_diff(thermalvalue, rtlefuse->eeprom_thermalmeter); if (rtlhal->current_bandtype == BAND_ON_2_4G) { offset = 4; if (delta > INDEX_MAPPING_NUM - 1) index = index_mapping[offset][INDEX_MAPPING_NUM - 1]; else index = index_mapping[offset][delta]; if (thermalvalue > dm->thermalvalue) { for (i = 0; i < rf; i++) ofdm_index[i] -= delta; cck_index -= delta; } else { for (i = 0; i < rf; i++) ofdm_index[i] += index; cck_index += index; } } else if (rtlhal->current_bandtype == BAND_ON_5G) { rtl92d_bandtype_5G(rtlhal, ofdm_index, &internal_pa, thermalvalue, delta, rf, rtlefuse, rtlpriv, rtlphy, index_mapping, index_mapping_internal_pa); } if (is2t) { rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "temp OFDM_A_index=0x%x, OFDM_B_index = 0x%x, cck_index=0x%x\n", dm->ofdm_index[0], dm->ofdm_index[1], dm->cck_index); } else { rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "temp OFDM_A_index=0x%x, cck_index = 0x%x\n", dm->ofdm_index[0], dm->cck_index); } for (i = 0; i < rf; i++) { if (ofdm_index[i] > OFDM_TABLE_SIZE_92D - 1) { ofdm_index[i] = OFDM_TABLE_SIZE_92D - 1; } else if (internal_pa || rtlhal->current_bandtype == BAND_ON_2_4G) { if (ofdm_index[i] < ofdm_min_index_internal_pa) ofdm_index[i] = ofdm_min_index_internal_pa; } else if (ofdm_index[i] < ofdm_min_index) { ofdm_index[i] = ofdm_min_index; } } if (rtlhal->current_bandtype == BAND_ON_2_4G) { if (cck_index > CCK_TABLE_SIZE - 1) cck_index = CCK_TABLE_SIZE - 1; else if (cck_index < 0) cck_index = 0; } if (is2t) { rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "new OFDM_A_index=0x%x, OFDM_B_index = 0x%x, cck_index=0x%x\n", ofdm_index[0], ofdm_index[1], cck_index); } else { rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "new OFDM_A_index=0x%x, cck_index = 0x%x\n", ofdm_index[0], cck_index); } ele_d = (ofdmswing_table[ofdm_index[0]] & 0xFFC00000) >> 22; val_x = rtlphy->iqk_matrix[indexforchannel].value[0][0]; val_y = rtlphy->iqk_matrix[indexforchannel].value[0][1]; if (val_x != 0) { if ((val_x & 0x00000200) != 0) val_x = val_x | 0xFFFFFC00; ele_a = ((val_x * ele_d) >> 8) & 0x000003FF; /* new element C = element D x Y */ if ((val_y & 0x00000200) != 0) val_y = val_y | 0xFFFFFC00; ele_c = ((val_y * ele_d) >> 8) & 0x000003FF; /* write new elements A, C, D to regC80 and * regC94, element B is always 0 */ value32 = (ele_d << 22) | ((ele_c & 0x3F) << 16) | ele_a; rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD, value32); value32 = (ele_c & 0x000003C0) >> 6; rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS, value32); value32 = ((val_x * ele_d) >> 7) & 0x01; rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(24), value32); } else { rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD, ofdmswing_table[ofdm_index[0]]); rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS, 0x00); rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(24), 0x00); } rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "TxPwrTracking for interface %d path A: X = 0x%lx, Y = 0x%lx ele_A = 0x%lx ele_C = 0x%lx ele_D = 0x%lx 0xe94 = 0x%lx 0xe9c = 0x%lx\n", rtlhal->interfaceindex, val_x, val_y, ele_a, ele_c, ele_d, val_x, val_y); if (rtlhal->current_bandtype == BAND_ON_2_4G) { /* Adjust CCK according to IQK result */ for (i = 0; i < 8; i++) { if (dm->cck_inch14) swing = cckswing_table_ch14[cck_index][i]; else swing = cckswing_table_ch1ch13[cck_index][i]; rtl_write_byte(rtlpriv, 0xa22 + i, swing); } } if (is2t) { ele_d = (ofdmswing_table[ofdm_index[1]] & 0xFFC00000) >> 22; val_x = rtlphy->iqk_matrix[indexforchannel].value[0][4]; val_y = rtlphy->iqk_matrix[indexforchannel].value[0][5]; if (val_x != 0) { if ((val_x & 0x00000200) != 0) /* consider minus */ val_x = val_x | 0xFFFFFC00; ele_a = ((val_x * ele_d) >> 8) & 0x000003FF; /* new element C = element D x Y */ if ((val_y & 0x00000200) != 0) val_y = val_y | 0xFFFFFC00; ele_c = ((val_y * ele_d) >> 8) & 0x00003FF; /* write new elements A, C, D to regC88 * and regC9C, element B is always 0 */ value32 = (ele_d << 22) | ((ele_c & 0x3F) << 16) | ele_a; rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, MASKDWORD, value32); value32 = (ele_c & 0x000003C0) >> 6; rtl_set_bbreg(hw, ROFDM0_XDTXAFE, MASKH4BITS, value32); value32 = ((val_x * ele_d) >> 7) & 0x01; rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(28), value32); } else { rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, MASKDWORD, ofdmswing_table[ofdm_index[1]]); rtl_set_bbreg(hw, ROFDM0_XDTXAFE, MASKH4BITS, 0x00); rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(28), 0x00); } rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "TxPwrTracking path B: X = 0x%lx, Y = 0x%lx ele_A = 0x%lx ele_C = 0x%lx ele_D = 0x%lx 0xeb4 = 0x%lx 0xebc = 0x%lx\n", val_x, val_y, ele_a, ele_c, ele_d, val_x, val_y); } rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "TxPwrTracking 0xc80 = 0x%x, 0xc94 = 0x%x RF 0x24 = 0x%x\n", rtl_get_bbreg(hw, 0xc80, MASKDWORD), rtl_get_bbreg(hw, 0xc94, MASKDWORD), rtl_get_rfreg(hw, RF90_PATH_A, 0x24, RFREG_OFFSET_MASK)); check_delta: if (delta_iqk > rtlefuse->delta_iqk && rtlefuse->delta_iqk != 0) { rtl92d_phy_reset_iqk_result(hw); dm->thermalvalue_iqk = thermalvalue; rtlpriv->cfg->ops->phy_iq_calibrate(hw); } if (delta_rxgain > 0 && rtlhal->current_bandtype == BAND_ON_5G && thermalvalue <= rtlefuse->eeprom_thermalmeter) { dm->thermalvalue_rxgain = thermalvalue; rtl92d_dm_rxgain_tracking_thermalmeter(hw); } if (dm->txpower_track_control) dm->thermalvalue = thermalvalue; exit: rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "<===\n"); } void rtl92d_dm_initialize_txpower_tracking(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); rtlpriv->dm.txpower_tracking = true; rtlpriv->dm.txpower_trackinginit = false; rtlpriv->dm.txpower_track_control = true; rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "pMgntInfo->txpower_tracking = %d\n", rtlpriv->dm.txpower_tracking); } EXPORT_SYMBOL_GPL(rtl92d_dm_initialize_txpower_tracking); void rtl92d_dm_check_txpower_tracking_thermal_meter(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); if (!rtlpriv->dm.txpower_tracking) return; if (!rtlpriv->dm.tm_trigger) { rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, BIT(17) | BIT(16), 0x03); rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "Trigger 92S Thermal Meter!!\n"); rtlpriv->dm.tm_trigger = 1; } else { rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "Schedule TxPowerTracking direct call!!\n"); rtl92d_dm_txpower_tracking_callback_thermalmeter(hw); rtlpriv->dm.tm_trigger = 0; } } EXPORT_SYMBOL_GPL(rtl92d_dm_check_txpower_tracking_thermal_meter); void rtl92d_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct false_alarm_statistics *falsealm_cnt = &rtlpriv->falsealm_cnt; unsigned long flag = 0; u32 ret_value; /* hold ofdm counter */ rtl_set_bbreg(hw, ROFDM0_LSTF, BIT(31), 1); /* hold page C counter */ rtl_set_bbreg(hw, ROFDM1_LSTF, BIT(31), 1); /* hold page D counter */ ret_value = rtl_get_bbreg(hw, ROFDM0_FRAMESYNC, MASKDWORD); falsealm_cnt->cnt_fast_fsync_fail = ret_value & 0xffff; falsealm_cnt->cnt_sb_search_fail = (ret_value & 0xffff0000) >> 16; ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER1, MASKDWORD); falsealm_cnt->cnt_parity_fail = (ret_value & 0xffff0000) >> 16; ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER2, MASKDWORD); falsealm_cnt->cnt_rate_illegal = ret_value & 0xffff; falsealm_cnt->cnt_crc8_fail = (ret_value & 0xffff0000) >> 16; ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD); falsealm_cnt->cnt_mcs_fail = ret_value & 0xffff; falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail + falsealm_cnt->cnt_rate_illegal + falsealm_cnt->cnt_crc8_fail + falsealm_cnt->cnt_mcs_fail + falsealm_cnt->cnt_fast_fsync_fail + falsealm_cnt->cnt_sb_search_fail; if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G) { rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag); ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERLOWER, MASKBYTE0); falsealm_cnt->cnt_cck_fail = ret_value; ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERUPPER, MASKBYTE3); falsealm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8; rtl92d_release_cckandrw_pagea_ctl(hw, &flag); } else { falsealm_cnt->cnt_cck_fail = 0; } falsealm_cnt->cnt_all = falsealm_cnt->cnt_ofdm_fail + falsealm_cnt->cnt_cck_fail; /* reset false alarm counter registers */ rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 1); rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 0); /* update ofdm counter */ rtl_set_bbreg(hw, ROFDM0_LSTF, BIT(31), 0); /* update page C counter */ rtl_set_bbreg(hw, ROFDM1_LSTF, BIT(31), 0); /* update page D counter */ if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G) { /* reset cck counter */ rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag); rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 0); /* enable cck counter */ rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 2); rtl92d_release_cckandrw_pagea_ctl(hw, &flag); } rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "Cnt_Fast_Fsync_fail = %x, Cnt_SB_Search_fail = %x\n", falsealm_cnt->cnt_fast_fsync_fail, falsealm_cnt->cnt_sb_search_fail); rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "Cnt_Parity_Fail = %x, Cnt_Rate_Illegal = %x, Cnt_Crc8_fail = %x, Cnt_Mcs_fail = %x\n", falsealm_cnt->cnt_parity_fail, falsealm_cnt->cnt_rate_illegal, falsealm_cnt->cnt_crc8_fail, falsealm_cnt->cnt_mcs_fail); rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "Cnt_Ofdm_fail = %x, Cnt_Cck_fail = %x, Cnt_all = %x\n", falsealm_cnt->cnt_ofdm_fail, falsealm_cnt->cnt_cck_fail, falsealm_cnt->cnt_all); } EXPORT_SYMBOL_GPL(rtl92d_dm_false_alarm_counter_statistics); void rtl92d_dm_find_minimum_rssi(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct dig_t *de_digtable = &rtlpriv->dm_digtable; struct rtl_mac *mac = rtl_mac(rtlpriv); /* Determine the minimum RSSI */ if (mac->link_state < MAC80211_LINKED && rtlpriv->dm.entry_min_undec_sm_pwdb == 0) { de_digtable->min_undec_pwdb_for_dm = 0; rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD, "Not connected to any\n"); } if (mac->link_state >= MAC80211_LINKED) { if (mac->opmode == NL80211_IFTYPE_AP || mac->opmode == NL80211_IFTYPE_ADHOC) { de_digtable->min_undec_pwdb_for_dm = rtlpriv->dm.entry_min_undec_sm_pwdb; rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD, "AP Client PWDB = 0x%lx\n", rtlpriv->dm.entry_min_undec_sm_pwdb); } else { de_digtable->min_undec_pwdb_for_dm = rtlpriv->dm.undec_sm_pwdb; rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD, "STA Default Port PWDB = 0x%x\n", de_digtable->min_undec_pwdb_for_dm); } } else { de_digtable->min_undec_pwdb_for_dm = rtlpriv->dm.entry_min_undec_sm_pwdb; rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD, "AP Ext Port or disconnect PWDB = 0x%x\n", de_digtable->min_undec_pwdb_for_dm); } rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "MinUndecoratedPWDBForDM =%d\n", de_digtable->min_undec_pwdb_for_dm); } EXPORT_SYMBOL_GPL(rtl92d_dm_find_minimum_rssi); static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct dig_t *de_digtable = &rtlpriv->dm_digtable; unsigned long flag = 0; if (de_digtable->cursta_cstate == DIG_STA_CONNECT) { if (de_digtable->pre_cck_pd_state == CCK_PD_STAGE_LOWRSSI) { if (de_digtable->min_undec_pwdb_for_dm <= 25) de_digtable->cur_cck_pd_state = CCK_PD_STAGE_LOWRSSI; else de_digtable->cur_cck_pd_state = CCK_PD_STAGE_HIGHRSSI; } else { if (de_digtable->min_undec_pwdb_for_dm <= 20) de_digtable->cur_cck_pd_state = CCK_PD_STAGE_LOWRSSI; else de_digtable->cur_cck_pd_state = CCK_PD_STAGE_HIGHRSSI; } } else { de_digtable->cur_cck_pd_state = CCK_PD_STAGE_LOWRSSI; } if (de_digtable->pre_cck_pd_state != de_digtable->cur_cck_pd_state) { if (de_digtable->cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI) { rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag); rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0x83); rtl92d_release_cckandrw_pagea_ctl(hw, &flag); } else { rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag); rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0xcd); rtl92d_release_cckandrw_pagea_ctl(hw, &flag); } de_digtable->pre_cck_pd_state = de_digtable->cur_cck_pd_state; } rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "CurSTAConnectState=%s\n", de_digtable->cursta_cstate == DIG_STA_CONNECT ? "DIG_STA_CONNECT " : "DIG_STA_DISCONNECT"); rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "CCKPDStage=%s\n", de_digtable->cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI ? "Low RSSI " : "High RSSI "); rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "is92d single phy =%x\n", IS_92D_SINGLEPHY(rtlpriv->rtlhal.version)); } void rtl92d_dm_write_dig(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct dig_t *de_digtable = &rtlpriv->dm_digtable; rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "cur_igvalue = 0x%x, pre_igvalue = 0x%x, back_val = %d\n", de_digtable->cur_igvalue, de_digtable->pre_igvalue, de_digtable->back_val); if (!de_digtable->dig_enable_flag) { rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "DIG is disabled\n"); de_digtable->pre_igvalue = 0x17; return; } if (de_digtable->pre_igvalue != de_digtable->cur_igvalue) { rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f, de_digtable->cur_igvalue); rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f, de_digtable->cur_igvalue); de_digtable->pre_igvalue = de_digtable->cur_igvalue; } } EXPORT_SYMBOL_GPL(rtl92d_dm_write_dig); static void rtl92d_early_mode_enabled(struct rtl_priv *rtlpriv) { struct dig_t *de_digtable = &rtlpriv->dm_digtable; if (rtlpriv->mac80211.link_state >= MAC80211_LINKED && rtlpriv->mac80211.vendor == PEER_CISCO) { rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "IOT_PEER = CISCO\n"); if (de_digtable->last_min_undec_pwdb_for_dm >= 50 && de_digtable->min_undec_pwdb_for_dm < 50) { rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x00); rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "Early Mode Off\n"); } else if (de_digtable->last_min_undec_pwdb_for_dm <= 55 && de_digtable->min_undec_pwdb_for_dm > 55) { rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x0f); rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "Early Mode On\n"); } } else if (!(rtl_read_byte(rtlpriv, REG_EARLY_MODE_CONTROL) & 0xf)) { rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x0f); rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "Early Mode On\n"); } } void rtl92d_dm_dig(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct dig_t *de_digtable = &rtlpriv->dm_digtable; u8 value_igi = de_digtable->cur_igvalue; struct false_alarm_statistics *falsealm_cnt = &rtlpriv->falsealm_cnt; rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "==>\n"); if (rtlpriv->rtlhal.earlymode_enable) { rtl92d_early_mode_enabled(rtlpriv); de_digtable->last_min_undec_pwdb_for_dm = de_digtable->min_undec_pwdb_for_dm; } if (!rtlpriv->dm.dm_initialgain_enable) return; /* because we will send data pkt when scanning * this will cause some ap like gear-3700 wep TP * lower if we return here, this is the diff of * mac80211 driver vs ieee80211 driver */ /* if (rtlpriv->mac80211.act_scanning) * return; */ /* Not STA mode return tmp */ if (rtlpriv->mac80211.opmode != NL80211_IFTYPE_STATION) return; rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "progress\n"); /* Decide the current status and if modify initial gain or not */ if (rtlpriv->mac80211.link_state >= MAC80211_LINKED) de_digtable->cursta_cstate = DIG_STA_CONNECT; else de_digtable->cursta_cstate = DIG_STA_DISCONNECT; /* adjust initial gain according to false alarm counter */ if (falsealm_cnt->cnt_all < DM_DIG_FA_TH0) value_igi--; else if (falsealm_cnt->cnt_all < DM_DIG_FA_TH1) value_igi += 0; else if (falsealm_cnt->cnt_all < DM_DIG_FA_TH2) value_igi++; else if (falsealm_cnt->cnt_all >= DM_DIG_FA_TH2) value_igi += 2; rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "dm_DIG() Before: large_fa_hit=%d, forbidden_igi=%x\n", de_digtable->large_fa_hit, de_digtable->forbidden_igi); rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "dm_DIG() Before: Recover_cnt=%d, rx_gain_min=%x\n", de_digtable->recover_cnt, de_digtable->rx_gain_min); /* deal with abnormally large false alarm */ if (falsealm_cnt->cnt_all > 10000) { rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "dm_DIG(): Abnormally false alarm case\n"); de_digtable->large_fa_hit++; if (de_digtable->forbidden_igi < de_digtable->cur_igvalue) { de_digtable->forbidden_igi = de_digtable->cur_igvalue; de_digtable->large_fa_hit = 1; } if (de_digtable->large_fa_hit >= 3) { if ((de_digtable->forbidden_igi + 1) > DM_DIG_MAX) de_digtable->rx_gain_min = DM_DIG_MAX; else de_digtable->rx_gain_min = (de_digtable->forbidden_igi + 1); de_digtable->recover_cnt = 3600; /* 3600=2hr */ } } else { /* Recovery mechanism for IGI lower bound */ if (de_digtable->recover_cnt != 0) { de_digtable->recover_cnt--; } else { if (de_digtable->large_fa_hit == 0) { if ((de_digtable->forbidden_igi - 1) < DM_DIG_FA_LOWER) { de_digtable->forbidden_igi = DM_DIG_FA_LOWER; de_digtable->rx_gain_min = DM_DIG_FA_LOWER; } else { de_digtable->forbidden_igi--; de_digtable->rx_gain_min = (de_digtable->forbidden_igi + 1); } } else if (de_digtable->large_fa_hit == 3) { de_digtable->large_fa_hit = 0; } } } rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "dm_DIG() After: large_fa_hit=%d, forbidden_igi=%x\n", de_digtable->large_fa_hit, de_digtable->forbidden_igi); rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "dm_DIG() After: recover_cnt=%d, rx_gain_min=%x\n", de_digtable->recover_cnt, de_digtable->rx_gain_min); if (value_igi > DM_DIG_MAX) value_igi = DM_DIG_MAX; else if (value_igi < de_digtable->rx_gain_min) value_igi = de_digtable->rx_gain_min; de_digtable->cur_igvalue = value_igi; rtl92d_dm_write_dig(hw); if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G) rtl92d_dm_cck_packet_detection_thresh(hw); rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "<<==\n"); } EXPORT_SYMBOL_GPL(rtl92d_dm_dig); void rtl92d_dm_init_edca_turbo(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); rtlpriv->dm.current_turbo_edca = false; rtlpriv->dm.is_any_nonbepkts = false; rtlpriv->dm.is_cur_rdlstate = false; } EXPORT_SYMBOL_GPL(rtl92d_dm_init_edca_turbo); void rtl92d_dm_check_edca_turbo(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); const u32 edca_be_ul = 0x5ea42b; const u32 edca_be_dl = 0x5ea42b; static u64 last_txok_cnt; static u64 last_rxok_cnt; u64 cur_txok_cnt; u64 cur_rxok_cnt; if (mac->link_state != MAC80211_LINKED) { rtlpriv->dm.current_turbo_edca = false; goto exit; } if (!rtlpriv->dm.is_any_nonbepkts && !rtlpriv->dm.disable_framebursting) { cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt; cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt; if (cur_rxok_cnt > 4 * cur_txok_cnt) { if (!rtlpriv->dm.is_cur_rdlstate || !rtlpriv->dm.current_turbo_edca) { rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, edca_be_dl); rtlpriv->dm.is_cur_rdlstate = true; } } else { if (rtlpriv->dm.is_cur_rdlstate || !rtlpriv->dm.current_turbo_edca) { rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, edca_be_ul); rtlpriv->dm.is_cur_rdlstate = false; } } rtlpriv->dm.current_turbo_edca = true; } else { if (rtlpriv->dm.current_turbo_edca) { u8 tmp = AC0_BE; rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM, &tmp); rtlpriv->dm.current_turbo_edca = false; } } exit: rtlpriv->dm.is_any_nonbepkts = false; last_txok_cnt = rtlpriv->stats.txbytesunicast; last_rxok_cnt = rtlpriv->stats.rxbytesunicast; } EXPORT_SYMBOL_GPL(rtl92d_dm_check_edca_turbo); void rtl92d_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rate_adaptive *ra = &rtlpriv->ra; ra->ratr_state = DM_RATR_STA_INIT; ra->pre_ratr_state = DM_RATR_STA_INIT; if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER) rtlpriv->dm.useramask = true; else rtlpriv->dm.useramask = false; } EXPORT_SYMBOL_GPL(rtl92d_dm_init_rate_adaptive_mask);
/* Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #ifndef __DC_ABM_H__ #define __DC_ABM_H__ #include "dm_services_types.h" struct abm_save_restore; struct abm { struct dc_context *ctx; const struct abm_funcs *funcs; bool dmcu_is_running; }; struct abm_funcs { void (*abm_init)(struct abm *abm, uint32_t back_light, uint32_t user_level); bool (*set_abm_level)(struct abm *abm, unsigned int abm_level); bool (*set_abm_immediate_disable)(struct abm *abm, unsigned int panel_inst); bool (*set_pipe)(struct abm *abm, unsigned int controller_id, unsigned int panel_inst); /* backlight_pwm_u16_16 is unsigned 32 bit, * 16 bit integer + 16 fractional, where 1.0 is max backlight value. */ bool (*set_backlight_level_pwm)(struct abm *abm, unsigned int backlight_pwm_u16_16, unsigned int frame_ramp, unsigned int controller_id, unsigned int panel_inst); unsigned int (*get_current_backlight)(struct abm *abm); unsigned int (*get_target_backlight)(struct abm *abm); bool (*init_abm_config)(struct abm *abm, const char *src, unsigned int bytes, unsigned int inst); bool (*set_abm_pause)(struct abm *abm, bool pause, unsigned int panel_inst, unsigned int otg_inst); bool (*save_restore)( struct abm *abm, unsigned int panel_inst, struct abm_save_restore *pData); bool (*set_pipe_ex)(struct abm *abm, unsigned int otg_inst, unsigned int option, unsigned int panel_inst, unsigned int pwrseq_inst); }; #endif
/* SPDX-License-Identifier: MIT */ /* * Copyright © 2023-2024 Intel Corporation */ #ifndef _XE_GT_SRIOV_PF_SERVICE_TYPES_H_ #define _XE_GT_SRIOV_PF_SERVICE_TYPES_H_ #include <linux/types.h> struct xe_reg; /** * struct xe_gt_sriov_pf_service_version - VF/PF ABI Version. * @major: the major version of the VF/PF ABI * @minor: the minor version of the VF/PF ABI * * See `GuC Relay Communication`_. */ struct xe_gt_sriov_pf_service_version { u16 major; u16 minor; }; /** * struct xe_gt_sriov_pf_service_runtime_regs - Runtime data shared with VFs. * @regs: pointer to static array with register offsets. * @values: pointer to array with captured register values. * @size: size of the regs and value arrays. */ struct xe_gt_sriov_pf_service_runtime_regs { const struct xe_reg *regs; u32 *values; u32 size; }; /** * struct xe_gt_sriov_pf_service - Data used by the PF service. * @version: information about VF/PF ABI versions for current platform. * @version.base: lowest VF/PF ABI version that could be negotiated with VF. * @version.latest: latest VF/PF ABI version supported by the PF driver. * @runtime: runtime data shared with VFs. */ struct xe_gt_sriov_pf_service { struct { struct xe_gt_sriov_pf_service_version base; struct xe_gt_sriov_pf_service_version latest; } version; struct xe_gt_sriov_pf_service_runtime_regs runtime; }; #endif
// SPDX-License-Identifier: (GPL-2.0+ OR MIT) /* * Copyright (c) 2018 Linaro Ltd. */ /dts-v1/; #include "rk3399-rock960.dtsi" / { model = "96boards Rock960"; compatible = "vamrs,rock960", "rockchip,rk3399"; chosen { stdout-path = "serial2:1500000n8"; }; leds { compatible = "gpio-leds"; pinctrl-names = "default"; pinctrl-0 = <&user_led1_pin>, <&user_led2_pin>, <&user_led3_pin>, <&user_led4_pin>, <&wlan_led_pin>, <&bt_led_pin>; user_led1: led-1 { label = "green:user1"; gpios = <&gpio4 RK_PC2 0>; linux,default-trigger = "heartbeat"; }; user_led2: led-2 { label = "green:user2"; gpios = <&gpio4 RK_PC6 0>; linux,default-trigger = "mmc0"; }; user_led3: led-3 { label = "green:user3"; gpios = <&gpio4 RK_PD0 0>; linux,default-trigger = "mmc1"; }; user_led4: led-4 { label = "green:user4"; gpios = <&gpio4 RK_PD4 0>; panic-indicator; linux,default-trigger = "none"; }; wlan_active_led: led-5 { label = "yellow:wlan"; gpios = <&gpio4 RK_PD5 0>; linux,default-trigger = "phy0tx"; default-state = "off"; }; bt_active_led: led-6 { label = "blue:bt"; gpios = <&gpio4 RK_PD6 0>; linux,default-trigger = "hci0-power"; default-state = "off"; }; }; }; &cpu_alert0 { temperature = <65000>; }; &cpu_thermal { sustainable-power = <1550>; cooling-maps { map0 { trip = <&cpu_alert1>; }; }; }; &pcie0 { ep-gpios = <&gpio2 RK_PA2 GPIO_ACTIVE_HIGH>; }; &pinctrl { leds { user_led1_pin: user-led1-pin { rockchip,pins = <4 RK_PC2 RK_FUNC_GPIO &pcfg_pull_none>; }; user_led2_pin: user-led2-pin { rockchip,pins = <4 RK_PC6 RK_FUNC_GPIO &pcfg_pull_none>; }; user_led3_pin: user-led3-pin { rockchip,pins = <4 RK_PD0 RK_FUNC_GPIO &pcfg_pull_none>; }; user_led4_pin: user-led4-pin { rockchip,pins = <4 RK_PD4 RK_FUNC_GPIO &pcfg_pull_none>; }; wlan_led_pin: wlan-led-pin { rockchip,pins = <4 RK_PD5 RK_FUNC_GPIO &pcfg_pull_none>; }; bt_led_pin: bt-led-pin { rockchip,pins = <4 RK_PD6 RK_FUNC_GPIO &pcfg_pull_none>; }; }; pcie { pcie_drv: pcie-drv { rockchip,pins = <2 RK_PA5 RK_FUNC_GPIO &pcfg_pull_none>; }; }; usb2 { host_vbus_drv: host-vbus-drv { rockchip,pins = <4 RK_PD1 RK_FUNC_GPIO &pcfg_pull_none>; }; }; }; &spi0 { /* On Low speed expansion (LS-SPI0) */ status = "okay"; }; &spi4 { /* On High speed expansion (HS-SPI1) */ status = "okay"; }; &usbdrd_dwc3_0 { dr_mode = "otg"; }; &usbdrd_dwc3_1 { dr_mode = "host"; }; &vcc3v3_pcie { gpio = <&gpio2 5 GPIO_ACTIVE_HIGH>; }; &vcc5v0_host { gpio = <&gpio4 25 GPIO_ACTIVE_HIGH>; };
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _SPARC64_TLB_H #define _SPARC64_TLB_H #include <linux/swap.h> #include <linux/pagemap.h> #include <asm/tlbflush.h> #include <asm/mmu_context.h> #ifdef CONFIG_SMP void smp_flush_tlb_pending(struct mm_struct *, unsigned long, unsigned long *); #endif #ifdef CONFIG_SMP void smp_flush_tlb_mm(struct mm_struct *mm); #define do_flush_tlb_mm(mm) smp_flush_tlb_mm(mm) #else #define do_flush_tlb_mm(mm) __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT) #endif void __flush_tlb_pending(unsigned long, unsigned long, unsigned long *); void flush_tlb_pending(void); #define tlb_flush(tlb) flush_tlb_pending() /* * SPARC64's hardware TLB fill does not use the Linux page-tables * and therefore we don't need a TLBI when freeing page-table pages. */ #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE #define tlb_needs_table_invalidate() (false) #endif #include <asm-generic/tlb.h> #endif /* _SPARC64_TLB_H */
/* fp_trig.c: floating-point math routines for the Linux-m68k floating point emulator. Copyright (c) 1998-1999 David Huggins-Daines / Roman Zippel. I hereby give permission, free of charge, to copy, modify, and redistribute this software, in source or binary form, provided that the above copyright notice and the following disclaimer are included in all such copies. THIS SOFTWARE IS PROVIDED "AS IS", WITH ABSOLUTELY NO WARRANTY, REAL OR IMPLIED. */ #include "fp_emu.h" #include "fp_trig.h" struct fp_ext *fp_fsin(struct fp_ext *dest, struct fp_ext *src) { uprint("fsin\n"); fp_monadic_check(dest, src); return dest; } struct fp_ext *fp_fcos(struct fp_ext *dest, struct fp_ext *src) { uprint("fcos\n"); fp_monadic_check(dest, src); return dest; } struct fp_ext *fp_ftan(struct fp_ext *dest, struct fp_ext *src) { uprint("ftan\n"); fp_monadic_check(dest, src); return dest; } struct fp_ext *fp_fasin(struct fp_ext *dest, struct fp_ext *src) { uprint("fasin\n"); fp_monadic_check(dest, src); return dest; } struct fp_ext *fp_facos(struct fp_ext *dest, struct fp_ext *src) { uprint("facos\n"); fp_monadic_check(dest, src); return dest; } struct fp_ext *fp_fatan(struct fp_ext *dest, struct fp_ext *src) { uprint("fatan\n"); fp_monadic_check(dest, src); return dest; } struct fp_ext *fp_fsinh(struct fp_ext *dest, struct fp_ext *src) { uprint("fsinh\n"); fp_monadic_check(dest, src); return dest; } struct fp_ext *fp_fcosh(struct fp_ext *dest, struct fp_ext *src) { uprint("fcosh\n"); fp_monadic_check(dest, src); return dest; } struct fp_ext *fp_ftanh(struct fp_ext *dest, struct fp_ext *src) { uprint("ftanh\n"); fp_monadic_check(dest, src); return dest; } struct fp_ext *fp_fatanh(struct fp_ext *dest, struct fp_ext *src) { uprint("fatanh\n"); fp_monadic_check(dest, src); return dest; } struct fp_ext *fp_fsincos0(struct fp_ext *dest, struct fp_ext *src) { uprint("fsincos0\n"); return dest; } struct fp_ext *fp_fsincos1(struct fp_ext *dest, struct fp_ext *src) { uprint("fsincos1\n"); return dest; } struct fp_ext *fp_fsincos2(struct fp_ext *dest, struct fp_ext *src) { uprint("fsincos2\n"); return dest; } struct fp_ext *fp_fsincos3(struct fp_ext *dest, struct fp_ext *src) { uprint("fsincos3\n"); return dest; } struct fp_ext *fp_fsincos4(struct fp_ext *dest, struct fp_ext *src) { uprint("fsincos4\n"); return dest; } struct fp_ext *fp_fsincos5(struct fp_ext *dest, struct fp_ext *src) { uprint("fsincos5\n"); return dest; } struct fp_ext *fp_fsincos6(struct fp_ext *dest, struct fp_ext *src) { uprint("fsincos6\n"); return dest; } struct fp_ext *fp_fsincos7(struct fp_ext *dest, struct fp_ext *src) { uprint("fsincos7\n"); return dest; }
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * stv0900.h * * Driver for ST STV0900 satellite demodulator IC. * * Copyright (C) ST Microelectronics. * Copyright (C) 2009 NetUP Inc. * Copyright (C) 2009 Igor M. Liplianin <[email protected]> */ #ifndef STV0900_H #define STV0900_H #include <linux/dvb/frontend.h> #include <media/dvb_frontend.h> struct stv0900_reg { u16 addr; u8 val; }; struct stv0900_config { u8 demod_address; u8 demod_mode; u32 xtal; u8 clkmode;/* 0 for CLKI, 2 for XTALI */ u8 diseqc_mode; u8 path1_mode; u8 path2_mode; struct stv0900_reg *ts_config_regs; u8 tun1_maddress;/* 0, 1, 2, 3 for 0xc0, 0xc2, 0xc4, 0xc6 */ u8 tun2_maddress; u8 tun1_adc;/* 1 for stv6110, 2 for stb6100 */ u8 tun2_adc; u8 tun1_type;/* for now 3 for stb6100 auto, else - software */ u8 tun2_type; /* Set device param to start dma */ int (*set_ts_params)(struct dvb_frontend *fe, int is_punctured); /* Hook for Lock LED */ void (*set_lock_led)(struct dvb_frontend *fe, int offon); }; #if IS_REACHABLE(CONFIG_DVB_STV0900) extern struct dvb_frontend *stv0900_attach(const struct stv0900_config *config, struct i2c_adapter *i2c, int demod); #else static inline struct dvb_frontend *stv0900_attach(const struct stv0900_config *config, struct i2c_adapter *i2c, int demod) { printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); return NULL; } #endif #endif
/* * This file is part of the Chelsio T4 Ethernet driver for Linux. * * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef __T4_HW_H #define __T4_HW_H #include <linux/types.h> enum { NCHAN = 4, /* # of HW channels */ MAX_MTU = 9600, /* max MAC MTU, excluding header + FCS */ EEPROMSIZE = 17408,/* Serial EEPROM physical size */ EEPROMVSIZE = 32768,/* Serial EEPROM virtual address space size */ EEPROMPFSIZE = 1024, /* EEPROM writable area size for PFn, n>0 */ RSS_NENTRIES = 2048, /* # of entries in RSS mapping table */ T6_RSS_NENTRIES = 4096, /* # of entries in RSS mapping table */ TCB_SIZE = 128, /* TCB size */ NMTUS = 16, /* size of MTU table */ NCCTRL_WIN = 32, /* # of congestion control windows */ NTX_SCHED = 8, /* # of HW Tx scheduling queues */ PM_NSTATS = 5, /* # of PM stats */ T6_PM_NSTATS = 7, /* # of PM stats in T6 */ MBOX_LEN = 64, /* mailbox size in bytes */ TRACE_LEN = 112, /* length of trace data and mask */ FILTER_OPT_LEN = 36, /* filter tuple width for optional components */ }; enum { CIM_NUM_IBQ = 6, /* # of CIM IBQs */ CIM_NUM_OBQ = 6, /* # of CIM OBQs */ CIM_NUM_OBQ_T5 = 8, /* # of CIM OBQs for T5 adapter */ CIMLA_SIZE = 2048, /* # of 32-bit words in CIM LA */ CIM_PIFLA_SIZE = 64, /* # of 192-bit words in CIM PIF LA */ CIM_MALA_SIZE = 64, /* # of 160-bit words in CIM MA LA */ CIM_IBQ_SIZE = 128, /* # of 128-bit words in a CIM IBQ */ CIM_OBQ_SIZE = 128, /* # of 128-bit words in a CIM OBQ */ TPLA_SIZE = 128, /* # of 64-bit words in TP LA */ ULPRX_LA_SIZE = 512, /* # of 256-bit words in ULP_RX LA */ }; /* SGE context types */ enum ctxt_type { CTXT_EGRESS, CTXT_INGRESS, CTXT_FLM, CTXT_CNM, }; enum { SF_PAGE_SIZE = 256, /* serial flash page size */ SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */ }; enum { RSP_TYPE_FLBUF, RSP_TYPE_CPL, RSP_TYPE_INTR }; /* response entry types */ enum { MBOX_OWNER_NONE, MBOX_OWNER_FW, MBOX_OWNER_DRV }; /* mailbox owners */ enum { SGE_MAX_WR_LEN = 512, /* max WR size in bytes */ SGE_CTXT_SIZE = 24, /* size of SGE context */ SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */ SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */ SGE_NDBQTIMERS = 8, /* # of Doorbell Queue Timer values */ SGE_MAX_IQ_SIZE = 65520, SGE_TIMER_RSTRT_CNTR = 6, /* restart RX packet threshold counter */ SGE_TIMER_UPD_CIDX = 7, /* update cidx only */ SGE_EQ_IDXSIZE = 64, /* egress queue pidx/cidx unit size */ SGE_INTRDST_PCI = 0, /* interrupt destination is PCI-E */ SGE_INTRDST_IQ = 1, /* destination is an ingress queue */ SGE_UPDATEDEL_NONE = 0, /* ingress queue pidx update delivery */ SGE_UPDATEDEL_INTR = 1, /* interrupt */ SGE_UPDATEDEL_STPG = 2, /* status page */ SGE_UPDATEDEL_BOTH = 3, /* interrupt and status page */ SGE_HOSTFCMODE_NONE = 0, /* egress queue cidx updates */ SGE_HOSTFCMODE_IQ = 1, /* sent to ingress queue */ SGE_HOSTFCMODE_STPG = 2, /* sent to status page */ SGE_HOSTFCMODE_BOTH = 3, /* ingress queue and status page */ SGE_FETCHBURSTMIN_16B = 0,/* egress queue descriptor fetch minimum */ SGE_FETCHBURSTMIN_32B = 1, SGE_FETCHBURSTMIN_64B = 2, SGE_FETCHBURSTMIN_128B = 3, SGE_FETCHBURSTMAX_64B = 0,/* egress queue descriptor fetch maximum */ SGE_FETCHBURSTMAX_128B = 1, SGE_FETCHBURSTMAX_256B = 2, SGE_FETCHBURSTMAX_512B = 3, SGE_CIDXFLUSHTHRESH_1 = 0,/* egress queue cidx flush threshold */ SGE_CIDXFLUSHTHRESH_2 = 1, SGE_CIDXFLUSHTHRESH_4 = 2, SGE_CIDXFLUSHTHRESH_8 = 3, SGE_CIDXFLUSHTHRESH_16 = 4, SGE_CIDXFLUSHTHRESH_32 = 5, SGE_CIDXFLUSHTHRESH_64 = 6, SGE_CIDXFLUSHTHRESH_128 = 7, SGE_INGPADBOUNDARY_SHIFT = 5,/* ingress queue pad boundary */ }; /* PCI-e memory window access */ enum pcie_memwin { MEMWIN_NIC = 0, MEMWIN_RSVD1 = 1, MEMWIN_RSVD2 = 2, MEMWIN_RDMA = 3, MEMWIN_RSVD4 = 4, MEMWIN_FOISCSI = 5, MEMWIN_CSIOSTOR = 6, MEMWIN_RSVD7 = 7, }; struct sge_qstat { /* data written to SGE queue status entries */ __be32 qid; __be16 cidx; __be16 pidx; }; /* * Structure for last 128 bits of response descriptors */ struct rsp_ctrl { __be32 hdrbuflen_pidx; __be32 pldbuflen_qid; union { u8 type_gen; __be64 last_flit; }; }; #define RSPD_NEWBUF_S 31 #define RSPD_NEWBUF_V(x) ((x) << RSPD_NEWBUF_S) #define RSPD_NEWBUF_F RSPD_NEWBUF_V(1U) #define RSPD_LEN_S 0 #define RSPD_LEN_M 0x7fffffff #define RSPD_LEN_G(x) (((x) >> RSPD_LEN_S) & RSPD_LEN_M) #define RSPD_QID_S RSPD_LEN_S #define RSPD_QID_M RSPD_LEN_M #define RSPD_QID_G(x) RSPD_LEN_G(x) #define RSPD_GEN_S 7 #define RSPD_TYPE_S 4 #define RSPD_TYPE_M 0x3 #define RSPD_TYPE_G(x) (((x) >> RSPD_TYPE_S) & RSPD_TYPE_M) /* Rx queue interrupt deferral fields: counter enable and timer index */ #define QINTR_CNT_EN_S 0 #define QINTR_CNT_EN_V(x) ((x) << QINTR_CNT_EN_S) #define QINTR_CNT_EN_F QINTR_CNT_EN_V(1U) #define QINTR_TIMER_IDX_S 1 #define QINTR_TIMER_IDX_M 0x7 #define QINTR_TIMER_IDX_V(x) ((x) << QINTR_TIMER_IDX_S) #define QINTR_TIMER_IDX_G(x) (((x) >> QINTR_TIMER_IDX_S) & QINTR_TIMER_IDX_M) /* * Flash layout. */ #define FLASH_START(start) ((start) * SF_SEC_SIZE) #define FLASH_MAX_SIZE(nsecs) ((nsecs) * SF_SEC_SIZE) enum { /* * Various Expansion-ROM boot images, etc. */ FLASH_EXP_ROM_START_SEC = 0, FLASH_EXP_ROM_NSECS = 6, FLASH_EXP_ROM_START = FLASH_START(FLASH_EXP_ROM_START_SEC), FLASH_EXP_ROM_MAX_SIZE = FLASH_MAX_SIZE(FLASH_EXP_ROM_NSECS), /* * iSCSI Boot Firmware Table (iBFT) and other driver-related * parameters ... */ FLASH_IBFT_START_SEC = 6, FLASH_IBFT_NSECS = 1, FLASH_IBFT_START = FLASH_START(FLASH_IBFT_START_SEC), FLASH_IBFT_MAX_SIZE = FLASH_MAX_SIZE(FLASH_IBFT_NSECS), /* * Boot configuration data. */ FLASH_BOOTCFG_START_SEC = 7, FLASH_BOOTCFG_NSECS = 1, FLASH_BOOTCFG_START = FLASH_START(FLASH_BOOTCFG_START_SEC), FLASH_BOOTCFG_MAX_SIZE = FLASH_MAX_SIZE(FLASH_BOOTCFG_NSECS), /* * Location of firmware image in FLASH. */ FLASH_FW_START_SEC = 8, FLASH_FW_NSECS = 16, FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC), FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS), /* Location of bootstrap firmware image in FLASH. */ FLASH_FWBOOTSTRAP_START_SEC = 27, FLASH_FWBOOTSTRAP_NSECS = 1, FLASH_FWBOOTSTRAP_START = FLASH_START(FLASH_FWBOOTSTRAP_START_SEC), FLASH_FWBOOTSTRAP_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FWBOOTSTRAP_NSECS), /* * iSCSI persistent/crash information. */ FLASH_ISCSI_CRASH_START_SEC = 29, FLASH_ISCSI_CRASH_NSECS = 1, FLASH_ISCSI_CRASH_START = FLASH_START(FLASH_ISCSI_CRASH_START_SEC), FLASH_ISCSI_CRASH_MAX_SIZE = FLASH_MAX_SIZE(FLASH_ISCSI_CRASH_NSECS), /* * FCoE persistent/crash information. */ FLASH_FCOE_CRASH_START_SEC = 30, FLASH_FCOE_CRASH_NSECS = 1, FLASH_FCOE_CRASH_START = FLASH_START(FLASH_FCOE_CRASH_START_SEC), FLASH_FCOE_CRASH_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FCOE_CRASH_NSECS), /* * Location of Firmware Configuration File in FLASH. Since the FPGA * "FLASH" is smaller we need to store the Configuration File in a * different location -- which will overlap the end of the firmware * image if firmware ever gets that large ... */ FLASH_CFG_START_SEC = 31, FLASH_CFG_NSECS = 1, FLASH_CFG_START = FLASH_START(FLASH_CFG_START_SEC), FLASH_CFG_MAX_SIZE = FLASH_MAX_SIZE(FLASH_CFG_NSECS), /* We don't support FLASH devices which can't support the full * standard set of sections which we need for normal * operations. */ FLASH_MIN_SIZE = FLASH_CFG_START + FLASH_CFG_MAX_SIZE, FLASH_FPGA_CFG_START_SEC = 15, FLASH_FPGA_CFG_START = FLASH_START(FLASH_FPGA_CFG_START_SEC), /* * Sectors 32-63 are reserved for FLASH failover. */ }; #undef FLASH_START #undef FLASH_MAX_SIZE #define SGE_TIMESTAMP_S 0 #define SGE_TIMESTAMP_M 0xfffffffffffffffULL #define SGE_TIMESTAMP_V(x) ((__u64)(x) << SGE_TIMESTAMP_S) #define SGE_TIMESTAMP_G(x) (((__u64)(x) >> SGE_TIMESTAMP_S) & SGE_TIMESTAMP_M) #define I2C_DEV_ADDR_A0 0xa0 #define I2C_DEV_ADDR_A2 0xa2 #define I2C_PAGE_SIZE 0x100 #define SFP_DIAG_TYPE_ADDR 0x5c #define SFP_DIAG_TYPE_LEN 0x1 #define SFP_DIAG_ADDRMODE BIT(2) #define SFP_DIAG_IMPLEMENTED BIT(6) #define SFF_8472_COMP_ADDR 0x5e #define SFF_8472_COMP_LEN 0x1 #define SFF_REV_ADDR 0x1 #define SFF_REV_LEN 0x1 #endif /* __T4_HW_H */
// SPDX-License-Identifier: GPL-2.0-or-later /* Mantis PCI bridge driver Copyright (C) Manu Abraham ([email protected]) */ #include <linux/kernel.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/interrupt.h> #include <asm/io.h> #include <media/dmxdev.h> #include <media/dvbdev.h> #include <media/dvb_demux.h> #include <media/dvb_frontend.h> #include <media/dvb_net.h> #include "mantis_common.h" #include "mantis_hif.h" #include "mantis_link.h" /* temporary due to physical layer stuff */ #include "mantis_reg.h" static int mantis_hif_sbuf_opdone_wait(struct mantis_ca *ca) { struct mantis_pci *mantis = ca->ca_priv; int rc = 0; if (wait_event_timeout(ca->hif_opdone_wq, ca->hif_event & MANTIS_SBUF_OPDONE, msecs_to_jiffies(500)) == -ERESTARTSYS) { dprintk(MANTIS_ERROR, 1, "Adapter(%d) Slot(0): Smart buffer operation timeout !", mantis->num); rc = -EREMOTEIO; } dprintk(MANTIS_DEBUG, 1, "Smart Buffer Operation complete"); ca->hif_event &= ~MANTIS_SBUF_OPDONE; return rc; } static int mantis_hif_write_wait(struct mantis_ca *ca) { struct mantis_pci *mantis = ca->ca_priv; u32 opdone = 0, timeout = 0; int rc = 0; if (wait_event_timeout(ca->hif_write_wq, mantis->gpif_status & MANTIS_GPIF_WRACK, msecs_to_jiffies(500)) == -ERESTARTSYS) { dprintk(MANTIS_ERROR, 1, "Adapter(%d) Slot(0): Write ACK timed out !", mantis->num); rc = -EREMOTEIO; } dprintk(MANTIS_DEBUG, 1, "Write Acknowledged"); mantis->gpif_status &= ~MANTIS_GPIF_WRACK; while (!opdone) { opdone = (mmread(MANTIS_GPIF_STATUS) & MANTIS_SBUF_OPDONE); udelay(500); timeout++; if (timeout > 100) { dprintk(MANTIS_ERROR, 1, "Adapter(%d) Slot(0): Write operation timed out!", mantis->num); rc = -ETIMEDOUT; break; } } dprintk(MANTIS_DEBUG, 1, "HIF Write success"); return rc; } int mantis_hif_read_mem(struct mantis_ca *ca, u32 addr) { struct mantis_pci *mantis = ca->ca_priv; u32 hif_addr = 0, data, count = 4; dprintk(MANTIS_DEBUG, 1, "Adapter(%d) Slot(0): Request HIF Mem Read", mantis->num); mutex_lock(&ca->ca_lock); hif_addr &= ~MANTIS_GPIF_PCMCIAREG; hif_addr &= ~MANTIS_GPIF_PCMCIAIOM; hif_addr |= MANTIS_HIF_STATUS; hif_addr |= addr; mmwrite(hif_addr, MANTIS_GPIF_BRADDR); mmwrite(count, MANTIS_GPIF_BRBYTES); udelay(20); mmwrite(hif_addr | MANTIS_GPIF_HIFRDWRN, MANTIS_GPIF_ADDR); if (mantis_hif_sbuf_opdone_wait(ca) != 0) { dprintk(MANTIS_ERROR, 1, "Adapter(%d) Slot(0): GPIF Smart Buffer operation failed", mantis->num); mutex_unlock(&ca->ca_lock); return -EREMOTEIO; } data = mmread(MANTIS_GPIF_DIN); mutex_unlock(&ca->ca_lock); dprintk(MANTIS_DEBUG, 1, "Mem Read: 0x%02x", data); return (data >> 24) & 0xff; } int mantis_hif_write_mem(struct mantis_ca *ca, u32 addr, u8 data) { struct mantis_slot *slot = ca->slot; struct mantis_pci *mantis = ca->ca_priv; u32 hif_addr = 0; dprintk(MANTIS_DEBUG, 1, "Adapter(%d) Slot(0): Request HIF Mem Write", mantis->num); mutex_lock(&ca->ca_lock); hif_addr &= ~MANTIS_GPIF_HIFRDWRN; hif_addr &= ~MANTIS_GPIF_PCMCIAREG; hif_addr &= ~MANTIS_GPIF_PCMCIAIOM; hif_addr |= MANTIS_HIF_STATUS; hif_addr |= addr; mmwrite(slot->slave_cfg, MANTIS_GPIF_CFGSLA); /* Slot0 alone for now */ mmwrite(hif_addr, MANTIS_GPIF_ADDR); mmwrite(data, MANTIS_GPIF_DOUT); if (mantis_hif_write_wait(ca) != 0) { dprintk(MANTIS_ERROR, 1, "Adapter(%d) Slot(0): HIF Smart Buffer operation failed", mantis->num); mutex_unlock(&ca->ca_lock); return -EREMOTEIO; } dprintk(MANTIS_DEBUG, 1, "Mem Write: (0x%02x to 0x%02x)", data, addr); mutex_unlock(&ca->ca_lock); return 0; } int mantis_hif_read_iom(struct mantis_ca *ca, u32 addr) { struct mantis_pci *mantis = ca->ca_priv; u32 data, hif_addr = 0; dprintk(MANTIS_DEBUG, 1, "Adapter(%d) Slot(0): Request HIF I/O Read", mantis->num); mutex_lock(&ca->ca_lock); hif_addr &= ~MANTIS_GPIF_PCMCIAREG; hif_addr |= MANTIS_GPIF_PCMCIAIOM; hif_addr |= MANTIS_HIF_STATUS; hif_addr |= addr; mmwrite(hif_addr, MANTIS_GPIF_BRADDR); mmwrite(1, MANTIS_GPIF_BRBYTES); udelay(20); mmwrite(hif_addr | MANTIS_GPIF_HIFRDWRN, MANTIS_GPIF_ADDR); if (mantis_hif_sbuf_opdone_wait(ca) != 0) { dprintk(MANTIS_ERROR, 1, "Adapter(%d) Slot(0): HIF Smart Buffer operation failed", mantis->num); mutex_unlock(&ca->ca_lock); return -EREMOTEIO; } data = mmread(MANTIS_GPIF_DIN); dprintk(MANTIS_DEBUG, 1, "I/O Read: 0x%02x", data); udelay(50); mutex_unlock(&ca->ca_lock); return (u8) data; } int mantis_hif_write_iom(struct mantis_ca *ca, u32 addr, u8 data) { struct mantis_pci *mantis = ca->ca_priv; u32 hif_addr = 0; dprintk(MANTIS_DEBUG, 1, "Adapter(%d) Slot(0): Request HIF I/O Write", mantis->num); mutex_lock(&ca->ca_lock); hif_addr &= ~MANTIS_GPIF_PCMCIAREG; hif_addr &= ~MANTIS_GPIF_HIFRDWRN; hif_addr |= MANTIS_GPIF_PCMCIAIOM; hif_addr |= MANTIS_HIF_STATUS; hif_addr |= addr; mmwrite(hif_addr, MANTIS_GPIF_ADDR); mmwrite(data, MANTIS_GPIF_DOUT); if (mantis_hif_write_wait(ca) != 0) { dprintk(MANTIS_ERROR, 1, "Adapter(%d) Slot(0): HIF Smart Buffer operation failed", mantis->num); mutex_unlock(&ca->ca_lock); return -EREMOTEIO; } dprintk(MANTIS_DEBUG, 1, "I/O Write: (0x%02x to 0x%02x)", data, addr); mutex_unlock(&ca->ca_lock); udelay(50); return 0; } int mantis_hif_init(struct mantis_ca *ca) { struct mantis_slot *slot = ca->slot; struct mantis_pci *mantis = ca->ca_priv; u32 irqcfg; slot[0].slave_cfg = 0x70773028; dprintk(MANTIS_ERROR, 1, "Adapter(%d) Initializing Mantis Host Interface", mantis->num); mutex_lock(&ca->ca_lock); irqcfg = mmread(MANTIS_GPIF_IRQCFG); irqcfg = MANTIS_MASK_BRRDY | MANTIS_MASK_WRACK | MANTIS_MASK_EXTIRQ | MANTIS_MASK_WSTO | MANTIS_MASK_OTHERR | MANTIS_MASK_OVFLW; mmwrite(irqcfg, MANTIS_GPIF_IRQCFG); mutex_unlock(&ca->ca_lock); return 0; } void mantis_hif_exit(struct mantis_ca *ca) { struct mantis_pci *mantis = ca->ca_priv; u32 irqcfg; dprintk(MANTIS_ERROR, 1, "Adapter(%d) Exiting Mantis Host Interface", mantis->num); mutex_lock(&ca->ca_lock); irqcfg = mmread(MANTIS_GPIF_IRQCFG); irqcfg &= ~MANTIS_MASK_BRRDY; mmwrite(irqcfg, MANTIS_GPIF_IRQCFG); mutex_unlock(&ca->ca_lock); }
/* * Copyright (C) 2023 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef _smuio_10_0_2_SH_MASK_HEADER // addressBlock: smuio_smuio_misc_SmuSmuioDec //SMUIO_MCM_CONFIG #define SMUIO_MCM_CONFIG__DIE_ID__SHIFT 0x0 #define SMUIO_MCM_CONFIG__PKG_TYPE__SHIFT 0x2 #define SMUIO_MCM_CONFIG__SOCKET_ID__SHIFT 0x5 #define SMUIO_MCM_CONFIG__PKG_SUBTYPE__SHIFT 0x6 #define SMUIO_MCM_CONFIG__CONSOLE_K__SHIFT 0x10 #define SMUIO_MCM_CONFIG__CONSOLE_A__SHIFT 0x11 #define SMUIO_MCM_CONFIG__DIE_ID_MASK 0x00000003L #define SMUIO_MCM_CONFIG__PKG_TYPE_MASK 0x0000001CL #define SMUIO_MCM_CONFIG__SOCKET_ID_MASK 0x00000020L #define SMUIO_MCM_CONFIG__PKG_SUBTYPE_MASK 0x000000C0L #define SMUIO_MCM_CONFIG__CONSOLE_K_MASK 0x00010000L #define SMUIO_MCM_CONFIG__CONSOLE_A_MASK 0x00020000L //IP_DISCOVERY_VERSION #define IP_DISCOVERY_VERSION__IP_DISCOVERY_VERSION__SHIFT 0x0 #define IP_DISCOVERY_VERSION__IP_DISCOVERY_VERSION_MASK 0xFFFFFFFFL //IO_SMUIO_PINSTRAP #define IO_SMUIO_PINSTRAP__AUD_PORT_CONN__SHIFT 0x0 #define IO_SMUIO_PINSTRAP__AUD__SHIFT 0x3 #define IO_SMUIO_PINSTRAP__AUD_PORT_CONN_MASK 0x00000007L #define IO_SMUIO_PINSTRAP__AUD_MASK 0x00000018L //SCRATCH_REGISTER0 #define SCRATCH_REGISTER0__ScratchPad0__SHIFT 0x0 #define SCRATCH_REGISTER0__ScratchPad0_MASK 0xFFFFFFFFL //SCRATCH_REGISTER1 #define SCRATCH_REGISTER1__ScratchPad1__SHIFT 0x0 #define SCRATCH_REGISTER1__ScratchPad1_MASK 0xFFFFFFFFL //SCRATCH_REGISTER2 #define SCRATCH_REGISTER2__ScratchPad2__SHIFT 0x0 #define SCRATCH_REGISTER2__ScratchPad2_MASK 0xFFFFFFFFL //SCRATCH_REGISTER3 #define SCRATCH_REGISTER3__ScratchPad3__SHIFT 0x0 #define SCRATCH_REGISTER3__ScratchPad3_MASK 0xFFFFFFFFL //SCRATCH_REGISTER4 #define SCRATCH_REGISTER4__ScratchPad4__SHIFT 0x0 #define SCRATCH_REGISTER4__ScratchPad4_MASK 0xFFFFFFFFL //SCRATCH_REGISTER5 #define SCRATCH_REGISTER5__ScratchPad5__SHIFT 0x0 #define SCRATCH_REGISTER5__ScratchPad5_MASK 0xFFFFFFFFL //SCRATCH_REGISTER6 #define SCRATCH_REGISTER6__ScratchPad6__SHIFT 0x0 #define SCRATCH_REGISTER6__ScratchPad6_MASK 0xFFFFFFFFL //SCRATCH_REGISTER7 #define SCRATCH_REGISTER7__ScratchPad7__SHIFT 0x0 #define SCRATCH_REGISTER7__ScratchPad7_MASK 0xFFFFFFFFL // addressBlock: smuio_smuio_reset_SmuSmuioDec //SMUIO_MP_RESET_INTR #define SMUIO_MP_RESET_INTR__SMUIO_MP_RESET_INTR__SHIFT 0x0 #define SMUIO_MP_RESET_INTR__SMUIO_MP_RESET_INTR_MASK 0x00000001L //SMUIO_SOC_HALT #define SMUIO_SOC_HALT__WDT_FORCE_PWROK_EN__SHIFT 0x2 #define SMUIO_SOC_HALT__WDT_FORCE_RESETn_EN__SHIFT 0x3 #define SMUIO_SOC_HALT__WDT_FORCE_PWROK_EN_MASK 0x00000004L #define SMUIO_SOC_HALT__WDT_FORCE_RESETn_EN_MASK 0x00000008L //SMUIO_GFX_MISC_CNTL #define SMUIO_GFX_MISC_CNTL__SMU_GFX_cold_vs_gfxoff__SHIFT 0x0 #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT 0x1 #define SMUIO_GFX_MISC_CNTL__PWR_GFX_DLDO_CLK_SWITCH__SHIFT 0x3 #define SMUIO_GFX_MISC_CNTL__PWR_GFX_RLC_CGPG_EN__SHIFT 0x4 #define SMUIO_GFX_MISC_CNTL__SMU_GFX_cold_vs_gfxoff_MASK 0x00000001L #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK 0x00000006L #define SMUIO_GFX_MISC_CNTL__PWR_GFX_DLDO_CLK_SWITCH_MASK 0x00000008L #define SMUIO_GFX_MISC_CNTL__PWR_GFX_RLC_CGPG_EN_MASK 0x00000010L // addressBlock: smuio_smuio_ccxctrl_SmuSmuioDec //PWROK_REFCLK_GAP_CYCLES #define PWROK_REFCLK_GAP_CYCLES__Pwrok_PreAssertion_clkgap_cycles__SHIFT 0x0 #define PWROK_REFCLK_GAP_CYCLES__Pwrok_PostAssertion_clkgap_cycles__SHIFT 0x8 #define PWROK_REFCLK_GAP_CYCLES__Pwrok_PreAssertion_clkgap_cycles_MASK 0x000000FFL #define PWROK_REFCLK_GAP_CYCLES__Pwrok_PostAssertion_clkgap_cycles_MASK 0x0000FF00L //GOLDEN_TSC_INCREMENT_UPPER #define GOLDEN_TSC_INCREMENT_UPPER__GoldenTscIncrementUpper__SHIFT 0x0 #define GOLDEN_TSC_INCREMENT_UPPER__GoldenTscIncrementUpper_MASK 0x00FFFFFFL //GOLDEN_TSC_INCREMENT_LOWER #define GOLDEN_TSC_INCREMENT_LOWER__GoldenTscIncrementLower__SHIFT 0x0 #define GOLDEN_TSC_INCREMENT_LOWER__GoldenTscIncrementLower_MASK 0xFFFFFFFFL //GOLDEN_TSC_COUNT_UPPER #define GOLDEN_TSC_COUNT_UPPER__GoldenTscCountUpper__SHIFT 0x0 #define GOLDEN_TSC_COUNT_UPPER__GoldenTscCountUpper_MASK 0x00FFFFFFL //GOLDEN_TSC_COUNT_LOWER #define GOLDEN_TSC_COUNT_LOWER__GoldenTscCountLower__SHIFT 0x0 #define GOLDEN_TSC_COUNT_LOWER__GoldenTscCountLower_MASK 0xFFFFFFFFL //GFX_GOLDEN_TSC_SHADOW_UPPER #define GFX_GOLDEN_TSC_SHADOW_UPPER__GfxGoldenTscShadowUpper__SHIFT 0x0 #define GFX_GOLDEN_TSC_SHADOW_UPPER__GfxGoldenTscShadowUpper_MASK 0x00FFFFFFL //GFX_GOLDEN_TSC_SHADOW_LOWER #define GFX_GOLDEN_TSC_SHADOW_LOWER__GfxGoldenTscShadowLower__SHIFT 0x0 #define GFX_GOLDEN_TSC_SHADOW_LOWER__GfxGoldenTscShadowLower_MASK 0xFFFFFFFFL //SOC_GOLDEN_TSC_SHADOW_UPPER #define SOC_GOLDEN_TSC_SHADOW_UPPER__SocGoldenTscShadowUpper__SHIFT 0x0 #define SOC_GOLDEN_TSC_SHADOW_UPPER__SocGoldenTscShadowUpper_MASK 0x00FFFFFFL //SOC_GOLDEN_TSC_SHADOW_LOWER #define SOC_GOLDEN_TSC_SHADOW_LOWER__SocGoldenTscShadowLower__SHIFT 0x0 #define SOC_GOLDEN_TSC_SHADOW_LOWER__SocGoldenTscShadowLower_MASK 0xFFFFFFFFL //SOC_GAP_PWROK #define SOC_GAP_PWROK__soc_gap_pwrok__SHIFT 0x0 #define SOC_GAP_PWROK__soc_gap_pwrok_MASK 0x00000001L // addressBlock: smuio_smuio_swtimer_SmuSmuioDec //PWR_VIRT_RESET_REQ #define PWR_VIRT_RESET_REQ__VF_FLR__SHIFT 0x0 #define PWR_VIRT_RESET_REQ__PF_FLR__SHIFT 0x1f #define PWR_VIRT_RESET_REQ__VF_FLR_MASK 0x7FFFFFFFL #define PWR_VIRT_RESET_REQ__PF_FLR_MASK 0x80000000L //PWR_DISP_TIMER_CONTROL #define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_COUNT__SHIFT 0x0 #define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_ENABLE__SHIFT 0x19 #define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_DISABLE__SHIFT 0x1a #define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MASK__SHIFT 0x1b #define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT_AK__SHIFT 0x1c #define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_TYPE__SHIFT 0x1d #define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MODE__SHIFT 0x1e #define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_COUNT_MASK 0x01FFFFFFL #define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_ENABLE_MASK 0x02000000L #define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_DISABLE_MASK 0x04000000L #define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MASK_MASK 0x08000000L #define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT_AK_MASK 0x10000000L #define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_TYPE_MASK 0x20000000L #define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MODE_MASK 0x40000000L //PWR_DISP_TIMER2_CONTROL #define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_COUNT__SHIFT 0x0 #define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_ENABLE__SHIFT 0x19 #define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_DISABLE__SHIFT 0x1a #define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MASK__SHIFT 0x1b #define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_STAT_AK__SHIFT 0x1c #define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_TYPE__SHIFT 0x1d #define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MODE__SHIFT 0x1e #define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_COUNT_MASK 0x01FFFFFFL #define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_ENABLE_MASK 0x02000000L #define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_DISABLE_MASK 0x04000000L #define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MASK_MASK 0x08000000L #define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_STAT_AK_MASK 0x10000000L #define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_TYPE_MASK 0x20000000L #define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MODE_MASK 0x40000000L //PWR_DISP_TIMER_GLOBAL_CONTROL #define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_WIDTH__SHIFT 0x0 #define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_EN__SHIFT 0xa #define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_WIDTH_MASK 0x000003FFL #define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_EN_MASK 0x00000400L //PWR_IH_CONTROL #define PWR_IH_CONTROL__MAX_CREDIT__SHIFT 0x0 #define PWR_IH_CONTROL__DISP_TIMER_TRIGGER_MASK__SHIFT 0x5 #define PWR_IH_CONTROL__DISP_TIMER2_TRIGGER_MASK__SHIFT 0x6 #define PWR_IH_CONTROL__PWR_IH_CLK_GATE_EN__SHIFT 0x1f #define PWR_IH_CONTROL__MAX_CREDIT_MASK 0x0000001FL #define PWR_IH_CONTROL__DISP_TIMER_TRIGGER_MASK_MASK 0x00000020L #define PWR_IH_CONTROL__DISP_TIMER2_TRIGGER_MASK_MASK 0x00000040L #define PWR_IH_CONTROL__PWR_IH_CLK_GATE_EN_MASK 0x80000000L // addressBlock: smuio_smuio_svi0_SmuSmuioDec //SMUSVI0_TEL_PLANE0 #define SMUSVI0_TEL_PLANE0__SVI0_PLANE0_IDDCOR__SHIFT 0x0 #define SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT 0x10 #define SMUSVI0_TEL_PLANE0__SVI0_PLANE0_IDDCOR_MASK 0x000000FFL #define SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK 0x01FF0000L //SMUSVI0_PLANE0_CURRENTVID #define SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID__SHIFT 0x18 #define SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID_MASK 0xFF000000L #endif
// SPDX-License-Identifier: GPL-2.0 /****************************************************************************** * * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved. * ******************************************************************************/ #include <drv_types.h> #include <hal_btcoex.h> #include <linux/jiffies.h> #ifndef dev_to_sdio_func #define dev_to_sdio_func(d) container_of(d, struct sdio_func, dev) #endif static const struct sdio_device_id sdio_ids[] = { { SDIO_DEVICE(0x024c, 0x0523), }, { SDIO_DEVICE(0x024c, 0x0525), }, { SDIO_DEVICE(0x024c, 0x0623), }, { SDIO_DEVICE(0x024c, 0x0626), }, { SDIO_DEVICE(0x024c, 0x0627), }, { SDIO_DEVICE(0x024c, 0xb723), }, { /* end: all zeroes */ }, }; MODULE_DEVICE_TABLE(sdio, sdio_ids); static int rtw_drv_init(struct sdio_func *func, const struct sdio_device_id *id); static void rtw_dev_remove(struct sdio_func *func); static int rtw_sdio_resume(struct device *dev); static int rtw_sdio_suspend(struct device *dev); static const struct dev_pm_ops rtw_sdio_pm_ops = { .suspend = rtw_sdio_suspend, .resume = rtw_sdio_resume, }; static struct sdio_driver rtl8723bs_sdio_driver = { .probe = rtw_drv_init, .remove = rtw_dev_remove, .name = "rtl8723bs", .id_table = sdio_ids, .drv = { .pm = &rtw_sdio_pm_ops, } }; static void sd_sync_int_hdl(struct sdio_func *func) { struct dvobj_priv *psdpriv; psdpriv = sdio_get_drvdata(func); if (!psdpriv->if1) return; rtw_sdio_set_irq_thd(psdpriv, current); sd_int_hdl(psdpriv->if1); rtw_sdio_set_irq_thd(psdpriv, NULL); } static int sdio_alloc_irq(struct dvobj_priv *dvobj) { struct sdio_data *psdio_data; struct sdio_func *func; int err; psdio_data = &dvobj->intf_data; func = psdio_data->func; sdio_claim_host(func); err = sdio_claim_irq(func, &sd_sync_int_hdl); if (err) { dvobj->drv_dbg.dbg_sdio_alloc_irq_error_cnt++; netdev_crit(dvobj->if1->pnetdev, "%s: sdio_claim_irq FAIL(%d)!\n", __func__, err); } else { dvobj->drv_dbg.dbg_sdio_alloc_irq_cnt++; dvobj->irq_alloc = 1; } sdio_release_host(func); return err?_FAIL:_SUCCESS; } static void sdio_free_irq(struct dvobj_priv *dvobj) { struct sdio_data *psdio_data; struct sdio_func *func; int err; if (dvobj->irq_alloc) { psdio_data = &dvobj->intf_data; func = psdio_data->func; if (func) { sdio_claim_host(func); err = sdio_release_irq(func); if (err) { dvobj->drv_dbg.dbg_sdio_free_irq_error_cnt++; netdev_err(dvobj->if1->pnetdev, "%s: sdio_release_irq FAIL(%d)!\n", __func__, err); } else dvobj->drv_dbg.dbg_sdio_free_irq_cnt++; sdio_release_host(func); } dvobj->irq_alloc = 0; } } static u32 sdio_init(struct dvobj_priv *dvobj) { struct sdio_data *psdio_data; struct sdio_func *func; int err; psdio_data = &dvobj->intf_data; func = psdio_data->func; /* 3 1. init SDIO bus */ sdio_claim_host(func); err = sdio_enable_func(func); if (err) { dvobj->drv_dbg.dbg_sdio_init_error_cnt++; goto release; } err = sdio_set_block_size(func, 512); if (err) { dvobj->drv_dbg.dbg_sdio_init_error_cnt++; goto release; } psdio_data->block_transfer_len = 512; psdio_data->tx_block_mode = 1; psdio_data->rx_block_mode = 1; release: sdio_release_host(func); if (err) return _FAIL; return _SUCCESS; } static void sdio_deinit(struct dvobj_priv *dvobj) { struct sdio_func *func; int err; func = dvobj->intf_data.func; if (func) { sdio_claim_host(func); err = sdio_disable_func(func); if (err) dvobj->drv_dbg.dbg_sdio_deinit_error_cnt++; if (dvobj->irq_alloc) { err = sdio_release_irq(func); if (err) dvobj->drv_dbg.dbg_sdio_free_irq_error_cnt++; else dvobj->drv_dbg.dbg_sdio_free_irq_cnt++; } sdio_release_host(func); } } static struct dvobj_priv *sdio_dvobj_init(struct sdio_func *func) { int status = _FAIL; struct dvobj_priv *dvobj = NULL; struct sdio_data *psdio; dvobj = devobj_init(); if (!dvobj) goto exit; sdio_set_drvdata(func, dvobj); psdio = &dvobj->intf_data; psdio->func = func; if (sdio_init(dvobj) != _SUCCESS) goto free_dvobj; rtw_reset_continual_io_error(dvobj); status = _SUCCESS; free_dvobj: if (status != _SUCCESS && dvobj) { sdio_set_drvdata(func, NULL); devobj_deinit(dvobj); dvobj = NULL; } exit: return dvobj; } static void sdio_dvobj_deinit(struct sdio_func *func) { struct dvobj_priv *dvobj = sdio_get_drvdata(func); sdio_set_drvdata(func, NULL); if (dvobj) { sdio_deinit(dvobj); devobj_deinit(dvobj); } } void rtw_set_hal_ops(struct adapter *padapter) { /* alloc memory for HAL DATA */ rtw_hal_data_init(padapter); rtl8723bs_set_hal_ops(padapter); } static void sd_intf_start(struct adapter *padapter) { if (!padapter) return; /* hal dep */ rtw_hal_enable_interrupt(padapter); } static void sd_intf_stop(struct adapter *padapter) { if (!padapter) return; /* hal dep */ rtw_hal_disable_interrupt(padapter); } static struct adapter *rtw_sdio_if1_init(struct dvobj_priv *dvobj, const struct sdio_device_id *pdid) { int status = _FAIL; struct net_device *pnetdev; struct adapter *padapter = NULL; struct sdio_data *psdio = &dvobj->intf_data; padapter = vzalloc(sizeof(*padapter)); if (!padapter) goto exit; padapter->dvobj = dvobj; dvobj->if1 = padapter; padapter->bDriverStopped = true; dvobj->padapters = padapter; padapter->iface_id = 0; /* 3 1. init network device data */ pnetdev = rtw_init_netdev(padapter); if (!pnetdev) goto free_adapter; SET_NETDEV_DEV(pnetdev, dvobj_to_dev(dvobj)); padapter = rtw_netdev_priv(pnetdev); /* 3 3. init driver special setting, interface, OS and hardware relative */ /* 4 3.1 set hardware operation functions */ rtw_set_hal_ops(padapter); /* 3 5. initialize Chip version */ padapter->intf_start = &sd_intf_start; padapter->intf_stop = &sd_intf_stop; padapter->intf_init = &sdio_init; padapter->intf_deinit = &sdio_deinit; padapter->intf_alloc_irq = &sdio_alloc_irq; padapter->intf_free_irq = &sdio_free_irq; if (rtw_init_io_priv(padapter, sdio_set_intf_ops) == _FAIL) goto free_hal_data; rtw_hal_read_chip_version(padapter); rtw_hal_chip_configure(padapter); hal_btcoex_Initialize((void *) padapter); /* 3 6. read efuse/eeprom data */ rtw_hal_read_chip_info(padapter); /* 3 7. init driver common data */ if (rtw_init_drv_sw(padapter) == _FAIL) goto free_hal_data; rtw_wdev_alloc(padapter, dvobj_to_dev(dvobj)); /* 3 8. get WLan MAC address */ /* set mac addr */ rtw_macaddr_cfg(&psdio->func->dev, padapter->eeprompriv.mac_addr); rtw_hal_disable_interrupt(padapter); status = _SUCCESS; free_hal_data: if (status != _SUCCESS && padapter->HalData) kfree(padapter->HalData); if (status != _SUCCESS) { rtw_wdev_unregister(padapter->rtw_wdev); rtw_wdev_free(padapter->rtw_wdev); } free_adapter: if (status != _SUCCESS) { if (pnetdev) rtw_free_netdev(pnetdev); else vfree((u8 *)padapter); padapter = NULL; } exit: return padapter; } static void rtw_sdio_if1_deinit(struct adapter *if1) { struct net_device *pnetdev = if1->pnetdev; struct mlme_priv *pmlmepriv = &if1->mlmepriv; if (check_fwstate(pmlmepriv, _FW_LINKED)) rtw_disassoc_cmd(if1, 0, false); free_mlme_ap_info(if1); rtw_cancel_all_timer(if1); rtw_dev_unload(if1); if (if1->rtw_wdev) rtw_wdev_free(if1->rtw_wdev); rtw_free_drv_sw(if1); if (pnetdev) rtw_free_netdev(pnetdev); } /* * drv_init() - a device potentially for us * * notes: drv_init() is called when the bus driver has located a card for us to support. * We accept the new device by returning 0. */ static int rtw_drv_init( struct sdio_func *func, const struct sdio_device_id *id) { int status = _FAIL; struct adapter *if1 = NULL; struct dvobj_priv *dvobj; dvobj = sdio_dvobj_init(func); if (!dvobj) goto exit; if1 = rtw_sdio_if1_init(dvobj, id); if (!if1) goto free_dvobj; /* dev_alloc_name && register_netdev */ status = rtw_drv_register_netdev(if1); if (status != _SUCCESS) goto free_if1; if (sdio_alloc_irq(dvobj) != _SUCCESS) goto free_if1; status = _SUCCESS; free_if1: if (status != _SUCCESS && if1) rtw_sdio_if1_deinit(if1); free_dvobj: if (status != _SUCCESS) sdio_dvobj_deinit(func); exit: return status == _SUCCESS ? 0 : -ENODEV; } static void rtw_dev_remove(struct sdio_func *func) { struct dvobj_priv *dvobj = sdio_get_drvdata(func); struct adapter *padapter = dvobj->if1; dvobj->processing_dev_remove = true; rtw_unregister_netdevs(dvobj); if (!padapter->bSurpriseRemoved) { int err; /* test surprise remove */ sdio_claim_host(func); sdio_readb(func, 0, &err); sdio_release_host(func); if (err == -ENOMEDIUM) padapter->bSurpriseRemoved = true; } rtw_ps_deny(padapter, PS_DENY_DRV_REMOVE); rtw_pm_set_ips(padapter, IPS_NONE); rtw_pm_set_lps(padapter, PS_MODE_ACTIVE); LeaveAllPowerSaveMode(padapter); rtw_btcoex_HaltNotify(padapter); rtw_sdio_if1_deinit(padapter); sdio_dvobj_deinit(func); } static int rtw_sdio_suspend(struct device *dev) { struct sdio_func *func = dev_to_sdio_func(dev); struct dvobj_priv *psdpriv = sdio_get_drvdata(func); struct pwrctrl_priv *pwrpriv = dvobj_to_pwrctl(psdpriv); struct adapter *padapter = psdpriv->if1; struct debug_priv *pdbgpriv = &psdpriv->drv_dbg; if (padapter->bDriverStopped) return 0; if (pwrpriv->bInSuspend) { pdbgpriv->dbg_suspend_error_cnt++; return 0; } rtw_suspend_common(padapter); return 0; } static int rtw_resume_process(struct adapter *padapter) { struct pwrctrl_priv *pwrpriv = adapter_to_pwrctl(padapter); struct dvobj_priv *psdpriv = padapter->dvobj; struct debug_priv *pdbgpriv = &psdpriv->drv_dbg; if (!pwrpriv->bInSuspend) { pdbgpriv->dbg_resume_error_cnt++; return -1; } return rtw_resume_common(padapter); } static int rtw_sdio_resume(struct device *dev) { struct sdio_func *func = dev_to_sdio_func(dev); struct dvobj_priv *psdpriv = sdio_get_drvdata(func); struct adapter *padapter = psdpriv->if1; struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; int ret = 0; struct debug_priv *pdbgpriv = &psdpriv->drv_dbg; pdbgpriv->dbg_resume_cnt++; ret = rtw_resume_process(padapter); pmlmeext->last_scan_time = jiffies; return ret; } static int __init rtw_drv_entry(void) { return sdio_register_driver(&rtl8723bs_sdio_driver); } module_init(rtw_drv_entry); static void __exit rtw_drv_halt(void) { sdio_unregister_driver(&rtl8723bs_sdio_driver); } module_exit(rtw_drv_halt);
/* * Copyright 2022 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #ifndef __DML32_DISPLAY_RQ_DLG_CALC_H__ #define __DML32_DISPLAY_RQ_DLG_CALC_H__ #include "../display_rq_dlg_helpers.h" struct display_mode_lib; /* * Function: dml_rq_dlg_get_rq_reg * Main entry point for test to get the register values out of this DML class. * This function calls <get_rq_param> and <extract_rq_regs> functions to calculate * and then populate the rq_regs struct * Input: * pipe_param - pipe source configuration (e.g. vp, pitch, scaling, dest, etc.) * Output: * rq_regs - struct that holds all the RQ registers field value. * See also: <display_rq_regs_st> */ void dml32_rq_dlg_get_rq_reg(display_rq_regs_st *rq_regs, struct display_mode_lib *mode_lib, const display_e2e_pipe_params_st *e2e_pipe_param, const unsigned int num_pipes, const unsigned int pipe_idx); /* * Function: dml_rq_dlg_get_dlg_reg * Calculate and return DLG and TTU register struct given the system setting * Output: * dlg_regs - output DLG register struct * ttu_regs - output DLG TTU register struct * Input: * e2e_pipe_param - "compacted" array of e2e pipe param struct * num_pipes - num of active "pipe" or "route" * pipe_idx - index that identifies the e2e_pipe_param that corresponding to this dlg * cstate - 0: when calculate min_ttu_vblank it is assumed cstate is not required. 1: Normal mode, cstate is considered. * Added for legacy or unrealistic timing tests. */ void dml32_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib, display_dlg_regs_st *dlg_regs, display_ttu_regs_st *ttu_regs, display_e2e_pipe_params_st *e2e_pipe_param, const unsigned int num_pipes, const unsigned int pipe_idx); #endif
/* * Copyright 2019 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * */ #include "amdgpu.h" #include "mmhub_v2_0.h" #include "mmhub/mmhub_2_0_0_offset.h" #include "mmhub/mmhub_2_0_0_sh_mask.h" #include "mmhub/mmhub_2_0_0_default.h" #include "navi10_enum.h" #include "gc/gc_10_1_0_offset.h" #include "soc15_common.h" #define mmDAGB0_CNTL_MISC2_Sienna_Cichlid 0x0070 #define mmDAGB0_CNTL_MISC2_Sienna_Cichlid_BASE_IDX 0 static const char *mmhub_client_ids_navi1x[][2] = { [3][0] = "DCEDMC", [4][0] = "DCEVGA", [5][0] = "MP0", [6][0] = "MP1", [13][0] = "VMC", [14][0] = "HDP", [15][0] = "OSS", [16][0] = "VCNU", [17][0] = "JPEG", [18][0] = "VCN", [3][1] = "DCEDMC", [4][1] = "DCEXFC", [5][1] = "DCEVGA", [6][1] = "DCEDWB", [7][1] = "MP0", [8][1] = "MP1", [9][1] = "DBGU1", [10][1] = "DBGU0", [11][1] = "XDP", [14][1] = "HDP", [15][1] = "OSS", [16][1] = "VCNU", [17][1] = "JPEG", [18][1] = "VCN", }; static const char *mmhub_client_ids_sienna_cichlid[][2] = { [3][0] = "DCEDMC", [4][0] = "DCEVGA", [5][0] = "MP0", [6][0] = "MP1", [8][0] = "VMC", [9][0] = "VCNU0", [10][0] = "JPEG", [12][0] = "VCNU1", [13][0] = "VCN1", [14][0] = "HDP", [15][0] = "OSS", [32+11][0] = "VCN0", [0][1] = "DBGU0", [1][1] = "DBGU1", [2][1] = "DCEDWB", [3][1] = "DCEDMC", [4][1] = "DCEVGA", [5][1] = "MP0", [6][1] = "MP1", [7][1] = "XDP", [9][1] = "VCNU0", [10][1] = "JPEG", [11][1] = "VCN0", [12][1] = "VCNU1", [13][1] = "VCN1", [14][1] = "HDP", [15][1] = "OSS", }; static const char *mmhub_client_ids_beige_goby[][2] = { [3][0] = "DCEDMC", [4][0] = "DCEVGA", [5][0] = "MP0", [6][0] = "MP1", [8][0] = "VMC", [9][0] = "VCNU0", [11][0] = "VCN0", [14][0] = "HDP", [15][0] = "OSS", [0][1] = "DBGU0", [1][1] = "DBGU1", [2][1] = "DCEDWB", [3][1] = "DCEDMC", [4][1] = "DCEVGA", [5][1] = "MP0", [6][1] = "MP1", [7][1] = "XDP", [9][1] = "VCNU0", [11][1] = "VCN0", [14][1] = "HDP", [15][1] = "OSS", }; static uint32_t mmhub_v2_0_get_invalidate_req(unsigned int vmid, uint32_t flush_type) { u32 req = 0; /* invalidate using legacy mode on vmid*/ req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, PER_VMID_INVALIDATE_REQ, 1 << vmid); req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type); req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1); req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1); req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1); req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1); req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1); req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0); return req; } static void mmhub_v2_0_print_l2_protection_fault_status(struct amdgpu_device *adev, uint32_t status) { uint32_t cid, rw; const char *mmhub_cid = NULL; cid = REG_GET_FIELD(status, MMVM_L2_PROTECTION_FAULT_STATUS, CID); rw = REG_GET_FIELD(status, MMVM_L2_PROTECTION_FAULT_STATUS, RW); dev_err(adev->dev, "MMVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", status); switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { case IP_VERSION(2, 0, 0): case IP_VERSION(2, 0, 2): mmhub_cid = mmhub_client_ids_navi1x[cid][rw]; break; case IP_VERSION(2, 1, 0): case IP_VERSION(2, 1, 1): mmhub_cid = mmhub_client_ids_sienna_cichlid[cid][rw]; break; case IP_VERSION(2, 1, 2): mmhub_cid = mmhub_client_ids_beige_goby[cid][rw]; break; default: mmhub_cid = NULL; break; } dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n", mmhub_cid ? mmhub_cid : "unknown", cid); dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n", REG_GET_FIELD(status, MMVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS)); dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n", REG_GET_FIELD(status, MMVM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR)); dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n", REG_GET_FIELD(status, MMVM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS)); dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n", REG_GET_FIELD(status, MMVM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR)); dev_err(adev->dev, "\t RW: 0x%x\n", rw); } static void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, hub->ctx_addr_distance * vmid, lower_32_bits(page_table_base)); WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, hub->ctx_addr_distance * vmid, upper_32_bits(page_table_base)); } static void mmhub_v2_0_init_gart_aperture_regs(struct amdgpu_device *adev) { uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); mmhub_v2_0_setup_vm_pt_regs(adev, 0, pt_base); WREG32_SOC15_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, (u32)(adev->gmc.gart_start >> 12)); WREG32_SOC15_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, (u32)(adev->gmc.gart_start >> 44)); WREG32_SOC15_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, (u32)(adev->gmc.gart_end >> 12)); WREG32_SOC15_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, (u32)(adev->gmc.gart_end >> 44)); } static void mmhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev) { uint64_t value; uint32_t tmp; if (!amdgpu_sriov_vf(adev)) { /* Program the AGP BAR */ WREG32_SOC15_RLC(MMHUB, 0, mmMMMC_VM_AGP_BASE, 0); WREG32_SOC15_RLC(MMHUB, 0, mmMMMC_VM_AGP_BOT, adev->gmc.agp_start >> 24); WREG32_SOC15_RLC(MMHUB, 0, mmMMMC_VM_AGP_TOP, adev->gmc.agp_end >> 24); /* Program the system aperture low logical page number. */ WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_LOW_ADDR, min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR, max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); } /* Set default page address. */ value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr); WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, (u32)(value >> 12)); WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, (u32)(value >> 44)); /* Program "protection fault". */ WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32, (u32)(adev->dummy_page_addr >> 12)); WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32, (u32)((u64)adev->dummy_page_addr >> 44)); tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL2); tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL2, ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1); WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL2, tmp); } static void mmhub_v2_0_init_tlb_regs(struct amdgpu_device *adev) { uint32_t tmp; /* Setup TLB control */ tmp = RREG32_SOC15(MMHUB, 0, mmMMMC_VM_MX_L1_TLB_CNTL); tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1); tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3); tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1); tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, MTYPE, MTYPE_UC); /* UC, uncached */ WREG32_SOC15(MMHUB, 0, mmMMMC_VM_MX_L1_TLB_CNTL, tmp); } static void mmhub_v2_0_init_cache_regs(struct amdgpu_device *adev) { uint32_t tmp; /* These registers are not accessible to VF-SRIOV. * The PF will program them instead. */ if (amdgpu_sriov_vf(adev)) return; /* Setup L2 cache */ tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL); tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_CACHE, 1); tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 0); tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1); /* XXX for emulation, Refer to closed source code.*/ tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE, 0); tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0); tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0); WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL, tmp); tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL2); tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL2, tmp); tmp = mmMMVM_L2_CNTL3_DEFAULT; if (adev->gmc.translate_further) { tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 12); tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 9); } else { tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 9); tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 6); } WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL3, tmp); tmp = mmMMVM_L2_CNTL4_DEFAULT; tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0); tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0); WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL4, tmp); tmp = mmMMVM_L2_CNTL5_DEFAULT; tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL5, L2_CACHE_SMALLK_FRAGMENT_SIZE, 0); WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL5, tmp); } static void mmhub_v2_0_enable_system_domain(struct amdgpu_device *adev) { uint32_t tmp; tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_CNTL); tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0); tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); WREG32_SOC15_RLC(MMHUB, 0, mmMMVM_CONTEXT0_CNTL, tmp); } static void mmhub_v2_0_disable_identity_aperture(struct amdgpu_device *adev) { /* These registers are not accessible to VF-SRIOV. * The PF will program them instead. */ if (amdgpu_sriov_vf(adev)) return; WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32, 0xFFFFFFFF); WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32, 0x0000000F); WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32, 0); WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32, 0); WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 0); WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 0); } static void mmhub_v2_0_setup_vmid_config(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; int i; uint32_t tmp; for (i = 0; i <= 14; i++) { tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_CNTL, i * hub->ctx_distance); tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, adev->vm_manager.num_level); tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1); tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1); tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1); tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE, adev->vm_manager.block_size - 9); /* Send no-retry XNACK on fault to suppress VM fault storm. */ tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, !adev->gmc.noretry); WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT1_CNTL, i * hub->ctx_distance, tmp); WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, i * hub->ctx_addr_distance, 0); WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, i * hub->ctx_addr_distance, 0); WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32, i * hub->ctx_addr_distance, lower_32_bits(adev->vm_manager.max_pfn - 1)); WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32, i * hub->ctx_addr_distance, upper_32_bits(adev->vm_manager.max_pfn - 1)); } hub->vm_cntx_cntl = tmp; } static void mmhub_v2_0_program_invalidation(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; unsigned i; for (i = 0; i < 18; ++i) { WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32, i * hub->eng_addr_distance, 0xffffffff); WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_HI32, i * hub->eng_addr_distance, 0x1f); } } static int mmhub_v2_0_gart_enable(struct amdgpu_device *adev) { /* GART Enable. */ mmhub_v2_0_init_gart_aperture_regs(adev); mmhub_v2_0_init_system_aperture_regs(adev); mmhub_v2_0_init_tlb_regs(adev); mmhub_v2_0_init_cache_regs(adev); mmhub_v2_0_enable_system_domain(adev); mmhub_v2_0_disable_identity_aperture(adev); mmhub_v2_0_setup_vmid_config(adev); mmhub_v2_0_program_invalidation(adev); return 0; } static void mmhub_v2_0_gart_disable(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; u32 tmp; u32 i; /* Disable all tables */ for (i = 0; i < AMDGPU_NUM_VMID; i++) WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT0_CNTL, i * hub->ctx_distance, 0); /* Setup TLB control */ tmp = RREG32_SOC15(MMHUB, 0, mmMMMC_VM_MX_L1_TLB_CNTL); tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0); tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0); WREG32_SOC15(MMHUB, 0, mmMMMC_VM_MX_L1_TLB_CNTL, tmp); /* Setup L2 cache */ tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL); tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_CACHE, 0); WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL, tmp); WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL3, 0); } /** * mmhub_v2_0_set_fault_enable_default - update GART/VM fault handling * * @adev: amdgpu_device pointer * @value: true redirects VM faults to the default page */ static void mmhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev, bool value) { u32 tmp; /* These registers are not accessible to VF-SRIOV. * The PF will program them instead. */ if (amdgpu_sriov_vf(adev)) return; tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL); tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value); tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value); tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value); tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT, value); tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value); tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value); tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, value); tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value); tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value); if (!value) { tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, CRASH_ON_NO_RETRY_FAULT, 1); tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, CRASH_ON_RETRY_FAULT, 1); } WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL, tmp); } static const struct amdgpu_vmhub_funcs mmhub_v2_0_vmhub_funcs = { .print_l2_protection_fault_status = mmhub_v2_0_print_l2_protection_fault_status, .get_invalidate_req = mmhub_v2_0_get_invalidate_req, }; static void mmhub_v2_0_init(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32); hub->ctx0_ptb_addr_hi32 = SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32); hub->vm_inv_eng0_sem = SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_SEM); hub->vm_inv_eng0_req = SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_REQ); hub->vm_inv_eng0_ack = SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_ACK); hub->vm_context0_cntl = SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_CNTL); hub->vm_l2_pro_fault_status = SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_STATUS); hub->vm_l2_pro_fault_cntl = SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL); hub->ctx_distance = mmMMVM_CONTEXT1_CNTL - mmMMVM_CONTEXT0_CNTL; hub->ctx_addr_distance = mmMMVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 - mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; hub->eng_distance = mmMMVM_INVALIDATE_ENG1_REQ - mmMMVM_INVALIDATE_ENG0_REQ; hub->eng_addr_distance = mmMMVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 - mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32; hub->vm_cntx_cntl_vm_fault = MMVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | MMVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | MMVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | MMVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | MMVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | MMVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | MMVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK; hub->vmhub_funcs = &mmhub_v2_0_vmhub_funcs; } static void mmhub_v2_0_update_medium_grain_clock_gating(struct amdgpu_device *adev, bool enable) { uint32_t def, data, def1, data1; if (!(adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) return; switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { case IP_VERSION(2, 1, 0): case IP_VERSION(2, 1, 1): case IP_VERSION(2, 1, 2): def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_Sienna_Cichlid); break; default: def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG); def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2); break; } if (enable) { data |= MM_ATC_L2_MISC_CG__ENABLE_MASK; data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK | DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK | DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK | DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK); } else { data &= ~MM_ATC_L2_MISC_CG__ENABLE_MASK; data1 |= (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK | DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK | DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK | DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK); } switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { case IP_VERSION(2, 1, 0): case IP_VERSION(2, 1, 1): case IP_VERSION(2, 1, 2): if (def1 != data1) WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_Sienna_Cichlid, data1); break; default: if (def != data) WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG, data); if (def1 != data1) WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2, data1); break; } } static void mmhub_v2_0_update_medium_grain_light_sleep(struct amdgpu_device *adev, bool enable) { uint32_t def, data; if (!(adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) return; switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { case IP_VERSION(2, 1, 0): case IP_VERSION(2, 1, 1): case IP_VERSION(2, 1, 2): /* There is no ATCL2 in MMHUB for 2.1.x */ return; default: def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG); break; } if (enable) data |= MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK; else data &= ~MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK; if (def != data) WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG, data); } static int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev, enum amd_clockgating_state state) { if (amdgpu_sriov_vf(adev)) return 0; switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { case IP_VERSION(2, 0, 0): case IP_VERSION(2, 0, 2): case IP_VERSION(2, 1, 0): case IP_VERSION(2, 1, 1): case IP_VERSION(2, 1, 2): mmhub_v2_0_update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE); mmhub_v2_0_update_medium_grain_light_sleep(adev, state == AMD_CG_STATE_GATE); break; default: break; } return 0; } static void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u64 *flags) { u32 data, data1; if (amdgpu_sriov_vf(adev)) *flags = 0; switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { case IP_VERSION(2, 1, 0): case IP_VERSION(2, 1, 1): case IP_VERSION(2, 1, 2): /* There is no ATCL2 in MMHUB for 2.1.x. Keep the status * based on DAGB */ data = MM_ATC_L2_MISC_CG__ENABLE_MASK; data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_Sienna_Cichlid); break; default: data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG); data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2); break; } /* AMD_CG_SUPPORT_MC_MGCG */ if ((data & MM_ATC_L2_MISC_CG__ENABLE_MASK) && !(data1 & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK | DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK | DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK | DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK))) *flags |= AMD_CG_SUPPORT_MC_MGCG; /* AMD_CG_SUPPORT_MC_LS */ if (data & MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK) *flags |= AMD_CG_SUPPORT_MC_LS; } const struct amdgpu_mmhub_funcs mmhub_v2_0_funcs = { .init = mmhub_v2_0_init, .gart_enable = mmhub_v2_0_gart_enable, .set_fault_enable_default = mmhub_v2_0_set_fault_enable_default, .gart_disable = mmhub_v2_0_gart_disable, .set_clockgating = mmhub_v2_0_set_clockgating, .get_clockgating = mmhub_v2_0_get_clockgating, .setup_vm_pt_regs = mmhub_v2_0_setup_vm_pt_regs, };
/* SPDX-License-Identifier: GPL-2.0 */ /* * Driver for the NXP ISP1761 device controller * * Copyright 2021 Linaro, Rui Miguel Silva * Copyright 2014 Ideas on Board Oy * * Contacts: * Laurent Pinchart <[email protected]> * Rui Miguel Silva <[email protected]> */ #ifndef _ISP1760_UDC_H_ #define _ISP1760_UDC_H_ #include <linux/ioport.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/timer.h> #include <linux/usb/gadget.h> #include "isp1760-regs.h" struct isp1760_device; struct isp1760_udc; enum isp1760_ctrl_state { ISP1760_CTRL_SETUP, /* Waiting for a SETUP transaction */ ISP1760_CTRL_DATA_IN, /* Setup received, data IN stage */ ISP1760_CTRL_DATA_OUT, /* Setup received, data OUT stage */ ISP1760_CTRL_STATUS, /* 0-length request in status stage */ }; struct isp1760_ep { struct isp1760_udc *udc; struct usb_ep ep; struct list_head queue; unsigned int addr; unsigned int maxpacket; char name[7]; const struct usb_endpoint_descriptor *desc; bool rx_pending; bool halted; bool wedged; }; /** * struct isp1760_udc - UDC state information * irq: IRQ number * irqname: IRQ name (as passed to request_irq) * regs: regmap for UDC registers * driver: Gadget driver * gadget: Gadget device * lock: Protects driver, vbus_timer, ep, ep0_*, DC_EPINDEX register * ep: Array of endpoints * ep0_state: Control request state for endpoint 0 * ep0_dir: Direction of the current control request * ep0_length: Length of the current control request * connected: Tracks gadget driver bus connection state */ struct isp1760_udc { struct isp1760_device *isp; int irq; char *irqname; struct regmap *regs; struct regmap_field *fields[DC_FIELD_MAX]; struct usb_gadget_driver *driver; struct usb_gadget gadget; spinlock_t lock; struct timer_list vbus_timer; struct isp1760_ep ep[15]; enum isp1760_ctrl_state ep0_state; u8 ep0_dir; u16 ep0_length; bool connected; bool is_isp1763; unsigned int devstatus; }; #ifdef CONFIG_USB_ISP1761_UDC int isp1760_udc_register(struct isp1760_device *isp, int irq, unsigned long irqflags); void isp1760_udc_unregister(struct isp1760_device *isp); #else static inline int isp1760_udc_register(struct isp1760_device *isp, int irq, unsigned long irqflags) { return 0; } static inline void isp1760_udc_unregister(struct isp1760_device *isp) { } #endif #endif
// SPDX-License-Identifier: GPL-2.0-only /* * wm8997.c -- WM8997 ALSA SoC Audio driver * * Copyright 2012 Wolfson Microelectronics plc * * Author: Charles Keepax <[email protected]> */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/pm_runtime.h> #include <linux/regmap.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/jack.h> #include <sound/initval.h> #include <sound/tlv.h> #include <linux/mfd/arizona/core.h> #include <linux/mfd/arizona/registers.h> #include "arizona.h" #include "wm8997.h" struct wm8997_priv { struct arizona_priv core; struct arizona_fll fll[2]; }; static DECLARE_TLV_DB_SCALE(ana_tlv, 0, 100, 0); static DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0); static DECLARE_TLV_DB_SCALE(digital_tlv, -6400, 50, 0); static DECLARE_TLV_DB_SCALE(noise_tlv, -13200, 600, 0); static DECLARE_TLV_DB_SCALE(ng_tlv, -10200, 600, 0); static const struct reg_default wm8997_sysclk_reva_patch[] = { { 0x301D, 0x7B15 }, { 0x301B, 0x0050 }, { 0x305D, 0x7B17 }, { 0x305B, 0x0050 }, { 0x3001, 0x08FE }, { 0x3003, 0x00F4 }, { 0x3041, 0x08FF }, { 0x3043, 0x0005 }, { 0x3020, 0x0225 }, { 0x3021, 0x0A00 }, { 0x3022, 0xE24D }, { 0x3023, 0x0800 }, { 0x3024, 0xE24D }, { 0x3025, 0xF000 }, { 0x3060, 0x0226 }, { 0x3061, 0x0A00 }, { 0x3062, 0xE252 }, { 0x3063, 0x0800 }, { 0x3064, 0xE252 }, { 0x3065, 0xF000 }, { 0x3116, 0x022B }, { 0x3117, 0xFA00 }, { 0x3110, 0x246C }, { 0x3111, 0x0A03 }, { 0x3112, 0x246E }, { 0x3113, 0x0A03 }, { 0x3114, 0x2470 }, { 0x3115, 0x0A03 }, { 0x3126, 0x246C }, { 0x3127, 0x0A02 }, { 0x3128, 0x246E }, { 0x3129, 0x0A02 }, { 0x312A, 0x2470 }, { 0x312B, 0xFA02 }, { 0x3125, 0x0800 }, }; static int wm8997_sysclk_ev(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); struct arizona *arizona = dev_get_drvdata(component->dev->parent); struct regmap *regmap = arizona->regmap; const struct reg_default *patch = NULL; int i, patch_size; switch (arizona->rev) { case 0: patch = wm8997_sysclk_reva_patch; patch_size = ARRAY_SIZE(wm8997_sysclk_reva_patch); break; default: break; } switch (event) { case SND_SOC_DAPM_POST_PMU: if (patch) for (i = 0; i < patch_size; i++) regmap_write_async(regmap, patch[i].reg, patch[i].def); break; case SND_SOC_DAPM_PRE_PMD: break; case SND_SOC_DAPM_PRE_PMU: case SND_SOC_DAPM_POST_PMD: return arizona_clk_ev(w, kcontrol, event); default: return 0; } return arizona_dvfs_sysclk_ev(w, kcontrol, event); } static const char * const wm8997_osr_text[] = { "Low power", "Normal", "High performance", }; static const unsigned int wm8997_osr_val[] = { 0x0, 0x3, 0x5, }; static const struct soc_enum wm8997_hpout_osr[] = { SOC_VALUE_ENUM_SINGLE(ARIZONA_OUTPUT_PATH_CONFIG_1L, ARIZONA_OUT1_OSR_SHIFT, 0x7, ARRAY_SIZE(wm8997_osr_text), wm8997_osr_text, wm8997_osr_val), SOC_VALUE_ENUM_SINGLE(ARIZONA_OUTPUT_PATH_CONFIG_3L, ARIZONA_OUT3_OSR_SHIFT, 0x7, ARRAY_SIZE(wm8997_osr_text), wm8997_osr_text, wm8997_osr_val), }; #define WM8997_NG_SRC(name, base) \ SOC_SINGLE(name " NG HPOUT1L Switch", base, 0, 1, 0), \ SOC_SINGLE(name " NG HPOUT1R Switch", base, 1, 1, 0), \ SOC_SINGLE(name " NG EPOUT Switch", base, 4, 1, 0), \ SOC_SINGLE(name " NG SPKOUT Switch", base, 6, 1, 0), \ SOC_SINGLE(name " NG SPKDAT1L Switch", base, 8, 1, 0), \ SOC_SINGLE(name " NG SPKDAT1R Switch", base, 9, 1, 0) static const struct snd_kcontrol_new wm8997_snd_controls[] = { SOC_SINGLE("IN1 High Performance Switch", ARIZONA_IN1L_CONTROL, ARIZONA_IN1_OSR_SHIFT, 1, 0), SOC_SINGLE("IN2 High Performance Switch", ARIZONA_IN2L_CONTROL, ARIZONA_IN2_OSR_SHIFT, 1, 0), SOC_SINGLE_RANGE_TLV("IN1L Volume", ARIZONA_IN1L_CONTROL, ARIZONA_IN1L_PGA_VOL_SHIFT, 0x40, 0x5f, 0, ana_tlv), SOC_SINGLE_RANGE_TLV("IN1R Volume", ARIZONA_IN1R_CONTROL, ARIZONA_IN1R_PGA_VOL_SHIFT, 0x40, 0x5f, 0, ana_tlv), SOC_SINGLE_RANGE_TLV("IN2L Volume", ARIZONA_IN2L_CONTROL, ARIZONA_IN2L_PGA_VOL_SHIFT, 0x40, 0x5f, 0, ana_tlv), SOC_SINGLE_RANGE_TLV("IN2R Volume", ARIZONA_IN2R_CONTROL, ARIZONA_IN2R_PGA_VOL_SHIFT, 0x40, 0x5f, 0, ana_tlv), SOC_SINGLE_TLV("IN1L Digital Volume", ARIZONA_ADC_DIGITAL_VOLUME_1L, ARIZONA_IN1L_DIG_VOL_SHIFT, 0xbf, 0, digital_tlv), SOC_SINGLE_TLV("IN1R Digital Volume", ARIZONA_ADC_DIGITAL_VOLUME_1R, ARIZONA_IN1R_DIG_VOL_SHIFT, 0xbf, 0, digital_tlv), SOC_SINGLE_TLV("IN2L Digital Volume", ARIZONA_ADC_DIGITAL_VOLUME_2L, ARIZONA_IN2L_DIG_VOL_SHIFT, 0xbf, 0, digital_tlv), SOC_SINGLE_TLV("IN2R Digital Volume", ARIZONA_ADC_DIGITAL_VOLUME_2R, ARIZONA_IN2R_DIG_VOL_SHIFT, 0xbf, 0, digital_tlv), SOC_ENUM("Input Ramp Up", arizona_in_vi_ramp), SOC_ENUM("Input Ramp Down", arizona_in_vd_ramp), ARIZONA_MIXER_CONTROLS("EQ1", ARIZONA_EQ1MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("EQ2", ARIZONA_EQ2MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("EQ3", ARIZONA_EQ3MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("EQ4", ARIZONA_EQ4MIX_INPUT_1_SOURCE), ARIZONA_EQ_CONTROL("EQ1 Coefficients", ARIZONA_EQ1_2), SOC_SINGLE_TLV("EQ1 B1 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B1_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ1 B2 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B2_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ1 B3 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B3_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ1 B4 Volume", ARIZONA_EQ1_2, ARIZONA_EQ1_B4_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ1 B5 Volume", ARIZONA_EQ1_2, ARIZONA_EQ1_B5_GAIN_SHIFT, 24, 0, eq_tlv), ARIZONA_EQ_CONTROL("EQ2 Coefficients", ARIZONA_EQ2_2), SOC_SINGLE_TLV("EQ2 B1 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B1_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ2 B2 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B2_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ2 B3 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B3_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ2 B4 Volume", ARIZONA_EQ2_2, ARIZONA_EQ2_B4_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ2 B5 Volume", ARIZONA_EQ2_2, ARIZONA_EQ2_B5_GAIN_SHIFT, 24, 0, eq_tlv), ARIZONA_EQ_CONTROL("EQ3 Coefficients", ARIZONA_EQ3_2), SOC_SINGLE_TLV("EQ3 B1 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B1_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ3 B2 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B2_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ3 B3 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B3_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ3 B4 Volume", ARIZONA_EQ3_2, ARIZONA_EQ3_B4_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ3 B5 Volume", ARIZONA_EQ3_2, ARIZONA_EQ3_B5_GAIN_SHIFT, 24, 0, eq_tlv), ARIZONA_EQ_CONTROL("EQ4 Coefficients", ARIZONA_EQ4_2), SOC_SINGLE_TLV("EQ4 B1 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B1_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ4 B2 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B2_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ4 B3 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B3_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ4 B4 Volume", ARIZONA_EQ4_2, ARIZONA_EQ4_B4_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ4 B5 Volume", ARIZONA_EQ4_2, ARIZONA_EQ4_B5_GAIN_SHIFT, 24, 0, eq_tlv), ARIZONA_MIXER_CONTROLS("DRC1L", ARIZONA_DRC1LMIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("DRC1R", ARIZONA_DRC1RMIX_INPUT_1_SOURCE), SND_SOC_BYTES_MASK("DRC1", ARIZONA_DRC1_CTRL1, 5, ARIZONA_DRC1R_ENA | ARIZONA_DRC1L_ENA), ARIZONA_MIXER_CONTROLS("LHPF1", ARIZONA_HPLP1MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("LHPF2", ARIZONA_HPLP2MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("LHPF3", ARIZONA_HPLP3MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("LHPF4", ARIZONA_HPLP4MIX_INPUT_1_SOURCE), SOC_ENUM("LHPF1 Mode", arizona_lhpf1_mode), SOC_ENUM("LHPF2 Mode", arizona_lhpf2_mode), SOC_ENUM("LHPF3 Mode", arizona_lhpf3_mode), SOC_ENUM("LHPF4 Mode", arizona_lhpf4_mode), ARIZONA_LHPF_CONTROL("LHPF1 Coefficients", ARIZONA_HPLPF1_2), ARIZONA_LHPF_CONTROL("LHPF2 Coefficients", ARIZONA_HPLPF2_2), ARIZONA_LHPF_CONTROL("LHPF3 Coefficients", ARIZONA_HPLPF3_2), ARIZONA_LHPF_CONTROL("LHPF4 Coefficients", ARIZONA_HPLPF4_2), SOC_ENUM("ISRC1 FSL", arizona_isrc_fsl[0]), SOC_ENUM("ISRC2 FSL", arizona_isrc_fsl[1]), ARIZONA_MIXER_CONTROLS("Mic", ARIZONA_MICMIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("Noise", ARIZONA_NOISEMIX_INPUT_1_SOURCE), SOC_SINGLE_TLV("Noise Generator Volume", ARIZONA_COMFORT_NOISE_GENERATOR, ARIZONA_NOISE_GEN_GAIN_SHIFT, 0x16, 0, noise_tlv), ARIZONA_MIXER_CONTROLS("HPOUT1L", ARIZONA_OUT1LMIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("HPOUT1R", ARIZONA_OUT1RMIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("EPOUT", ARIZONA_OUT3LMIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("SPKOUT", ARIZONA_OUT4LMIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("SPKDAT1L", ARIZONA_OUT5LMIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("SPKDAT1R", ARIZONA_OUT5RMIX_INPUT_1_SOURCE), SOC_SINGLE("Speaker High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_4L, ARIZONA_OUT4_OSR_SHIFT, 1, 0), SOC_SINGLE("SPKDAT1 High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_5L, ARIZONA_OUT5_OSR_SHIFT, 1, 0), SOC_DOUBLE_R("HPOUT1 Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_1L, ARIZONA_DAC_DIGITAL_VOLUME_1R, ARIZONA_OUT1L_MUTE_SHIFT, 1, 1), SOC_SINGLE("EPOUT Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_3L, ARIZONA_OUT3L_MUTE_SHIFT, 1, 1), SOC_SINGLE("Speaker Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_4L, ARIZONA_OUT4L_MUTE_SHIFT, 1, 1), SOC_DOUBLE_R("SPKDAT1 Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_5L, ARIZONA_DAC_DIGITAL_VOLUME_5R, ARIZONA_OUT5L_MUTE_SHIFT, 1, 1), SOC_DOUBLE_R_TLV("HPOUT1 Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_1L, ARIZONA_DAC_DIGITAL_VOLUME_1R, ARIZONA_OUT1L_VOL_SHIFT, 0xbf, 0, digital_tlv), SOC_SINGLE_TLV("EPOUT Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_3L, ARIZONA_OUT3L_VOL_SHIFT, 0xbf, 0, digital_tlv), SOC_SINGLE_TLV("Speaker Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_4L, ARIZONA_OUT4L_VOL_SHIFT, 0xbf, 0, digital_tlv), SOC_DOUBLE_R_TLV("SPKDAT1 Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_5L, ARIZONA_DAC_DIGITAL_VOLUME_5R, ARIZONA_OUT5L_VOL_SHIFT, 0xbf, 0, digital_tlv), SOC_ENUM("HPOUT1 OSR", wm8997_hpout_osr[0]), SOC_ENUM("EPOUT OSR", wm8997_hpout_osr[1]), SOC_ENUM("Output Ramp Up", arizona_out_vi_ramp), SOC_ENUM("Output Ramp Down", arizona_out_vd_ramp), SOC_DOUBLE("SPKDAT1 Switch", ARIZONA_PDM_SPK1_CTRL_1, ARIZONA_SPK1L_MUTE_SHIFT, ARIZONA_SPK1R_MUTE_SHIFT, 1, 1), SOC_SINGLE("Noise Gate Switch", ARIZONA_NOISE_GATE_CONTROL, ARIZONA_NGATE_ENA_SHIFT, 1, 0), SOC_SINGLE_TLV("Noise Gate Threshold Volume", ARIZONA_NOISE_GATE_CONTROL, ARIZONA_NGATE_THR_SHIFT, 7, 1, ng_tlv), SOC_ENUM("Noise Gate Hold", arizona_ng_hold), WM8997_NG_SRC("HPOUT1L", ARIZONA_NOISE_GATE_SELECT_1L), WM8997_NG_SRC("HPOUT1R", ARIZONA_NOISE_GATE_SELECT_1R), WM8997_NG_SRC("EPOUT", ARIZONA_NOISE_GATE_SELECT_3L), WM8997_NG_SRC("SPKOUT", ARIZONA_NOISE_GATE_SELECT_4L), WM8997_NG_SRC("SPKDAT1L", ARIZONA_NOISE_GATE_SELECT_5L), WM8997_NG_SRC("SPKDAT1R", ARIZONA_NOISE_GATE_SELECT_5R), ARIZONA_MIXER_CONTROLS("AIF1TX1", ARIZONA_AIF1TX1MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("AIF1TX2", ARIZONA_AIF1TX2MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("AIF1TX3", ARIZONA_AIF1TX3MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("AIF1TX4", ARIZONA_AIF1TX4MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("AIF1TX5", ARIZONA_AIF1TX5MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("AIF1TX6", ARIZONA_AIF1TX6MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("AIF1TX7", ARIZONA_AIF1TX7MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("AIF1TX8", ARIZONA_AIF1TX8MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("AIF2TX1", ARIZONA_AIF2TX1MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("AIF2TX2", ARIZONA_AIF2TX2MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("SLIMTX1", ARIZONA_SLIMTX1MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("SLIMTX2", ARIZONA_SLIMTX2MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("SLIMTX3", ARIZONA_SLIMTX3MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("SLIMTX4", ARIZONA_SLIMTX4MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("SLIMTX5", ARIZONA_SLIMTX5MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("SLIMTX6", ARIZONA_SLIMTX6MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("SLIMTX7", ARIZONA_SLIMTX7MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("SLIMTX8", ARIZONA_SLIMTX8MIX_INPUT_1_SOURCE), }; ARIZONA_MIXER_ENUMS(EQ1, ARIZONA_EQ1MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(EQ2, ARIZONA_EQ2MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(EQ3, ARIZONA_EQ3MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(EQ4, ARIZONA_EQ4MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(DRC1L, ARIZONA_DRC1LMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(DRC1R, ARIZONA_DRC1RMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(LHPF1, ARIZONA_HPLP1MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(LHPF2, ARIZONA_HPLP2MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(LHPF3, ARIZONA_HPLP3MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(LHPF4, ARIZONA_HPLP4MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(Mic, ARIZONA_MICMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(Noise, ARIZONA_NOISEMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(PWM1, ARIZONA_PWM1MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(PWM2, ARIZONA_PWM2MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(OUT1L, ARIZONA_OUT1LMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(OUT1R, ARIZONA_OUT1RMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(OUT3, ARIZONA_OUT3LMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(SPKOUT, ARIZONA_OUT4LMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(SPKDAT1L, ARIZONA_OUT5LMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(SPKDAT1R, ARIZONA_OUT5RMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(AIF1TX1, ARIZONA_AIF1TX1MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(AIF1TX2, ARIZONA_AIF1TX2MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(AIF1TX3, ARIZONA_AIF1TX3MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(AIF1TX4, ARIZONA_AIF1TX4MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(AIF1TX5, ARIZONA_AIF1TX5MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(AIF1TX6, ARIZONA_AIF1TX6MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(AIF1TX7, ARIZONA_AIF1TX7MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(AIF1TX8, ARIZONA_AIF1TX8MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(AIF2TX1, ARIZONA_AIF2TX1MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(AIF2TX2, ARIZONA_AIF2TX2MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(SLIMTX1, ARIZONA_SLIMTX1MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(SLIMTX2, ARIZONA_SLIMTX2MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(SLIMTX3, ARIZONA_SLIMTX3MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(SLIMTX4, ARIZONA_SLIMTX4MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(SLIMTX5, ARIZONA_SLIMTX5MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(SLIMTX6, ARIZONA_SLIMTX6MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(SLIMTX7, ARIZONA_SLIMTX7MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(SLIMTX8, ARIZONA_SLIMTX8MIX_INPUT_1_SOURCE); ARIZONA_MUX_ENUMS(ISRC1INT1, ARIZONA_ISRC1INT1MIX_INPUT_1_SOURCE); ARIZONA_MUX_ENUMS(ISRC1INT2, ARIZONA_ISRC1INT2MIX_INPUT_1_SOURCE); ARIZONA_MUX_ENUMS(ISRC1DEC1, ARIZONA_ISRC1DEC1MIX_INPUT_1_SOURCE); ARIZONA_MUX_ENUMS(ISRC1DEC2, ARIZONA_ISRC1DEC2MIX_INPUT_1_SOURCE); ARIZONA_MUX_ENUMS(ISRC2INT1, ARIZONA_ISRC2INT1MIX_INPUT_1_SOURCE); ARIZONA_MUX_ENUMS(ISRC2INT2, ARIZONA_ISRC2INT2MIX_INPUT_1_SOURCE); ARIZONA_MUX_ENUMS(ISRC2DEC1, ARIZONA_ISRC2DEC1MIX_INPUT_1_SOURCE); ARIZONA_MUX_ENUMS(ISRC2DEC2, ARIZONA_ISRC2DEC2MIX_INPUT_1_SOURCE); static const char *wm8997_aec_loopback_texts[] = { "HPOUT1L", "HPOUT1R", "EPOUT", "SPKOUT", "SPKDAT1L", "SPKDAT1R", }; static const unsigned int wm8997_aec_loopback_values[] = { 0, 1, 4, 6, 8, 9, }; static const struct soc_enum wm8997_aec_loopback = SOC_VALUE_ENUM_SINGLE(ARIZONA_DAC_AEC_CONTROL_1, ARIZONA_AEC_LOOPBACK_SRC_SHIFT, 0xf, ARRAY_SIZE(wm8997_aec_loopback_texts), wm8997_aec_loopback_texts, wm8997_aec_loopback_values); static const struct snd_kcontrol_new wm8997_aec_loopback_mux = SOC_DAPM_ENUM("AEC Loopback", wm8997_aec_loopback); static const struct snd_soc_dapm_widget wm8997_dapm_widgets[] = { SND_SOC_DAPM_SUPPLY("SYSCLK", ARIZONA_SYSTEM_CLOCK_1, ARIZONA_SYSCLK_ENA_SHIFT, 0, wm8997_sysclk_ev, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), SND_SOC_DAPM_SUPPLY("ASYNCCLK", ARIZONA_ASYNC_CLOCK_1, ARIZONA_ASYNC_CLK_ENA_SHIFT, 0, arizona_clk_ev, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), SND_SOC_DAPM_SUPPLY("OPCLK", ARIZONA_OUTPUT_SYSTEM_CLOCK, ARIZONA_OPCLK_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("ASYNCOPCLK", ARIZONA_OUTPUT_ASYNC_CLOCK, ARIZONA_OPCLK_ASYNC_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_REGULATOR_SUPPLY("DBVDD2", 0, 0), SND_SOC_DAPM_REGULATOR_SUPPLY("CPVDD", 20, 0), SND_SOC_DAPM_REGULATOR_SUPPLY("MICVDD", 0, SND_SOC_DAPM_REGULATOR_BYPASS), SND_SOC_DAPM_REGULATOR_SUPPLY("SPKVDD", 0, 0), SND_SOC_DAPM_SIGGEN("TONE"), SND_SOC_DAPM_SIGGEN("NOISE"), SND_SOC_DAPM_SIGGEN("HAPTICS"), SND_SOC_DAPM_INPUT("IN1L"), SND_SOC_DAPM_INPUT("IN1R"), SND_SOC_DAPM_INPUT("IN2L"), SND_SOC_DAPM_INPUT("IN2R"), SND_SOC_DAPM_PGA_E("IN1L PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN1L_ENA_SHIFT, 0, NULL, 0, arizona_in_ev, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_PGA_E("IN1R PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN1R_ENA_SHIFT, 0, NULL, 0, arizona_in_ev, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_PGA_E("IN2L PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN2L_ENA_SHIFT, 0, NULL, 0, arizona_in_ev, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_PGA_E("IN2R PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN2R_ENA_SHIFT, 0, NULL, 0, arizona_in_ev, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_SUPPLY("MICBIAS1", ARIZONA_MIC_BIAS_CTRL_1, ARIZONA_MICB1_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("MICBIAS2", ARIZONA_MIC_BIAS_CTRL_2, ARIZONA_MICB2_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("MICBIAS3", ARIZONA_MIC_BIAS_CTRL_3, ARIZONA_MICB3_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("Noise Generator", ARIZONA_COMFORT_NOISE_GENERATOR, ARIZONA_NOISE_GEN_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("Tone Generator 1", ARIZONA_TONE_GENERATOR_1, ARIZONA_TONE1_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("Tone Generator 2", ARIZONA_TONE_GENERATOR_1, ARIZONA_TONE2_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("Mic Mute Mixer", ARIZONA_MIC_NOISE_MIX_CONTROL_1, ARIZONA_MICMUTE_MIX_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("EQ1", ARIZONA_EQ1_1, ARIZONA_EQ1_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("EQ2", ARIZONA_EQ2_1, ARIZONA_EQ2_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("EQ3", ARIZONA_EQ3_1, ARIZONA_EQ3_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("EQ4", ARIZONA_EQ4_1, ARIZONA_EQ4_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("DRC1L", ARIZONA_DRC1_CTRL1, ARIZONA_DRC1L_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("DRC1R", ARIZONA_DRC1_CTRL1, ARIZONA_DRC1R_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("LHPF1", ARIZONA_HPLPF1_1, ARIZONA_LHPF1_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("LHPF2", ARIZONA_HPLPF2_1, ARIZONA_LHPF2_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("LHPF3", ARIZONA_HPLPF3_1, ARIZONA_LHPF3_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("LHPF4", ARIZONA_HPLPF4_1, ARIZONA_LHPF4_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("PWM1 Driver", ARIZONA_PWM_DRIVE_1, ARIZONA_PWM1_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("PWM2 Driver", ARIZONA_PWM_DRIVE_1, ARIZONA_PWM2_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("ISRC1INT1", ARIZONA_ISRC_1_CTRL_3, ARIZONA_ISRC1_INT0_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("ISRC1INT2", ARIZONA_ISRC_1_CTRL_3, ARIZONA_ISRC1_INT1_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("ISRC1DEC1", ARIZONA_ISRC_1_CTRL_3, ARIZONA_ISRC1_DEC0_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("ISRC1DEC2", ARIZONA_ISRC_1_CTRL_3, ARIZONA_ISRC1_DEC1_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("ISRC2INT1", ARIZONA_ISRC_2_CTRL_3, ARIZONA_ISRC2_INT0_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("ISRC2INT2", ARIZONA_ISRC_2_CTRL_3, ARIZONA_ISRC2_INT1_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("ISRC2DEC1", ARIZONA_ISRC_2_CTRL_3, ARIZONA_ISRC2_DEC0_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("ISRC2DEC2", ARIZONA_ISRC_2_CTRL_3, ARIZONA_ISRC2_DEC1_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_AIF_OUT("AIF1TX1", NULL, 0, ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX1_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("AIF1TX2", NULL, 1, ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX2_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("AIF1TX3", NULL, 2, ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX3_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("AIF1TX4", NULL, 3, ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX4_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("AIF1TX5", NULL, 4, ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX5_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("AIF1TX6", NULL, 5, ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX6_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("AIF1TX7", NULL, 6, ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX7_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("AIF1TX8", NULL, 7, ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX8_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("AIF1RX1", NULL, 0, ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX1_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("AIF1RX2", NULL, 1, ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX2_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("AIF1RX3", NULL, 2, ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX3_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("AIF1RX4", NULL, 3, ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX4_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("AIF1RX5", NULL, 4, ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX5_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("AIF1RX6", NULL, 5, ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX6_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("AIF1RX7", NULL, 6, ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX7_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("AIF1RX8", NULL, 7, ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX8_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("AIF2TX1", NULL, 0, ARIZONA_AIF2_TX_ENABLES, ARIZONA_AIF2TX1_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("AIF2TX2", NULL, 1, ARIZONA_AIF2_TX_ENABLES, ARIZONA_AIF2TX2_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("AIF2RX1", NULL, 0, ARIZONA_AIF2_RX_ENABLES, ARIZONA_AIF2RX1_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("AIF2RX2", NULL, 1, ARIZONA_AIF2_RX_ENABLES, ARIZONA_AIF2RX2_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("SLIMTX1", NULL, 0, ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE, ARIZONA_SLIMTX1_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("SLIMTX2", NULL, 1, ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE, ARIZONA_SLIMTX2_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("SLIMTX3", NULL, 2, ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE, ARIZONA_SLIMTX3_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("SLIMTX4", NULL, 3, ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE, ARIZONA_SLIMTX4_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("SLIMTX5", NULL, 4, ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE, ARIZONA_SLIMTX5_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("SLIMTX6", NULL, 5, ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE, ARIZONA_SLIMTX6_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("SLIMTX7", NULL, 6, ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE, ARIZONA_SLIMTX7_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("SLIMTX8", NULL, 7, ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE, ARIZONA_SLIMTX8_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("SLIMRX1", NULL, 0, ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE, ARIZONA_SLIMRX1_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("SLIMRX2", NULL, 1, ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE, ARIZONA_SLIMRX2_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("SLIMRX3", NULL, 2, ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE, ARIZONA_SLIMRX3_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("SLIMRX4", NULL, 3, ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE, ARIZONA_SLIMRX4_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("SLIMRX5", NULL, 4, ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE, ARIZONA_SLIMRX5_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("SLIMRX6", NULL, 5, ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE, ARIZONA_SLIMRX6_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("SLIMRX7", NULL, 6, ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE, ARIZONA_SLIMRX7_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("SLIMRX8", NULL, 7, ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE, ARIZONA_SLIMRX8_ENA_SHIFT, 0), SND_SOC_DAPM_MUX("AEC Loopback", ARIZONA_DAC_AEC_CONTROL_1, ARIZONA_AEC_LOOPBACK_ENA_SHIFT, 0, &wm8997_aec_loopback_mux), SND_SOC_DAPM_PGA_E("OUT1L", SND_SOC_NOPM, ARIZONA_OUT1L_ENA_SHIFT, 0, NULL, 0, arizona_hp_ev, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_PGA_E("OUT1R", SND_SOC_NOPM, ARIZONA_OUT1R_ENA_SHIFT, 0, NULL, 0, arizona_hp_ev, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_PGA_E("OUT3L", ARIZONA_OUTPUT_ENABLES_1, ARIZONA_OUT3L_ENA_SHIFT, 0, NULL, 0, arizona_out_ev, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_PGA_E("OUT5L", ARIZONA_OUTPUT_ENABLES_1, ARIZONA_OUT5L_ENA_SHIFT, 0, NULL, 0, arizona_out_ev, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_PGA_E("OUT5R", ARIZONA_OUTPUT_ENABLES_1, ARIZONA_OUT5R_ENA_SHIFT, 0, NULL, 0, arizona_out_ev, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU), ARIZONA_MIXER_WIDGETS(EQ1, "EQ1"), ARIZONA_MIXER_WIDGETS(EQ2, "EQ2"), ARIZONA_MIXER_WIDGETS(EQ3, "EQ3"), ARIZONA_MIXER_WIDGETS(EQ4, "EQ4"), ARIZONA_MIXER_WIDGETS(DRC1L, "DRC1L"), ARIZONA_MIXER_WIDGETS(DRC1R, "DRC1R"), ARIZONA_MIXER_WIDGETS(LHPF1, "LHPF1"), ARIZONA_MIXER_WIDGETS(LHPF2, "LHPF2"), ARIZONA_MIXER_WIDGETS(LHPF3, "LHPF3"), ARIZONA_MIXER_WIDGETS(LHPF4, "LHPF4"), ARIZONA_MIXER_WIDGETS(Mic, "Mic"), ARIZONA_MIXER_WIDGETS(Noise, "Noise"), ARIZONA_MIXER_WIDGETS(PWM1, "PWM1"), ARIZONA_MIXER_WIDGETS(PWM2, "PWM2"), ARIZONA_MIXER_WIDGETS(OUT1L, "HPOUT1L"), ARIZONA_MIXER_WIDGETS(OUT1R, "HPOUT1R"), ARIZONA_MIXER_WIDGETS(OUT3, "EPOUT"), ARIZONA_MIXER_WIDGETS(SPKOUT, "SPKOUT"), ARIZONA_MIXER_WIDGETS(SPKDAT1L, "SPKDAT1L"), ARIZONA_MIXER_WIDGETS(SPKDAT1R, "SPKDAT1R"), ARIZONA_MIXER_WIDGETS(AIF1TX1, "AIF1TX1"), ARIZONA_MIXER_WIDGETS(AIF1TX2, "AIF1TX2"), ARIZONA_MIXER_WIDGETS(AIF1TX3, "AIF1TX3"), ARIZONA_MIXER_WIDGETS(AIF1TX4, "AIF1TX4"), ARIZONA_MIXER_WIDGETS(AIF1TX5, "AIF1TX5"), ARIZONA_MIXER_WIDGETS(AIF1TX6, "AIF1TX6"), ARIZONA_MIXER_WIDGETS(AIF1TX7, "AIF1TX7"), ARIZONA_MIXER_WIDGETS(AIF1TX8, "AIF1TX8"), ARIZONA_MIXER_WIDGETS(AIF2TX1, "AIF2TX1"), ARIZONA_MIXER_WIDGETS(AIF2TX2, "AIF2TX2"), ARIZONA_MIXER_WIDGETS(SLIMTX1, "SLIMTX1"), ARIZONA_MIXER_WIDGETS(SLIMTX2, "SLIMTX2"), ARIZONA_MIXER_WIDGETS(SLIMTX3, "SLIMTX3"), ARIZONA_MIXER_WIDGETS(SLIMTX4, "SLIMTX4"), ARIZONA_MIXER_WIDGETS(SLIMTX5, "SLIMTX5"), ARIZONA_MIXER_WIDGETS(SLIMTX6, "SLIMTX6"), ARIZONA_MIXER_WIDGETS(SLIMTX7, "SLIMTX7"), ARIZONA_MIXER_WIDGETS(SLIMTX8, "SLIMTX8"), ARIZONA_MUX_WIDGETS(ISRC1DEC1, "ISRC1DEC1"), ARIZONA_MUX_WIDGETS(ISRC1DEC2, "ISRC1DEC2"), ARIZONA_MUX_WIDGETS(ISRC1INT1, "ISRC1INT1"), ARIZONA_MUX_WIDGETS(ISRC1INT2, "ISRC1INT2"), ARIZONA_MUX_WIDGETS(ISRC2DEC1, "ISRC2DEC1"), ARIZONA_MUX_WIDGETS(ISRC2DEC2, "ISRC2DEC2"), ARIZONA_MUX_WIDGETS(ISRC2INT1, "ISRC2INT1"), ARIZONA_MUX_WIDGETS(ISRC2INT2, "ISRC2INT2"), SND_SOC_DAPM_OUTPUT("HPOUT1L"), SND_SOC_DAPM_OUTPUT("HPOUT1R"), SND_SOC_DAPM_OUTPUT("EPOUTN"), SND_SOC_DAPM_OUTPUT("EPOUTP"), SND_SOC_DAPM_OUTPUT("SPKOUTN"), SND_SOC_DAPM_OUTPUT("SPKOUTP"), SND_SOC_DAPM_OUTPUT("SPKDAT1L"), SND_SOC_DAPM_OUTPUT("SPKDAT1R"), SND_SOC_DAPM_OUTPUT("MICSUPP"), }; #define ARIZONA_MIXER_INPUT_ROUTES(name) \ { name, "Noise Generator", "Noise Generator" }, \ { name, "Tone Generator 1", "Tone Generator 1" }, \ { name, "Tone Generator 2", "Tone Generator 2" }, \ { name, "Haptics", "HAPTICS" }, \ { name, "AEC", "AEC Loopback" }, \ { name, "IN1L", "IN1L PGA" }, \ { name, "IN1R", "IN1R PGA" }, \ { name, "IN2L", "IN2L PGA" }, \ { name, "IN2R", "IN2R PGA" }, \ { name, "Mic Mute Mixer", "Mic Mute Mixer" }, \ { name, "AIF1RX1", "AIF1RX1" }, \ { name, "AIF1RX2", "AIF1RX2" }, \ { name, "AIF1RX3", "AIF1RX3" }, \ { name, "AIF1RX4", "AIF1RX4" }, \ { name, "AIF1RX5", "AIF1RX5" }, \ { name, "AIF1RX6", "AIF1RX6" }, \ { name, "AIF1RX7", "AIF1RX7" }, \ { name, "AIF1RX8", "AIF1RX8" }, \ { name, "AIF2RX1", "AIF2RX1" }, \ { name, "AIF2RX2", "AIF2RX2" }, \ { name, "SLIMRX1", "SLIMRX1" }, \ { name, "SLIMRX2", "SLIMRX2" }, \ { name, "SLIMRX3", "SLIMRX3" }, \ { name, "SLIMRX4", "SLIMRX4" }, \ { name, "SLIMRX5", "SLIMRX5" }, \ { name, "SLIMRX6", "SLIMRX6" }, \ { name, "SLIMRX7", "SLIMRX7" }, \ { name, "SLIMRX8", "SLIMRX8" }, \ { name, "EQ1", "EQ1" }, \ { name, "EQ2", "EQ2" }, \ { name, "EQ3", "EQ3" }, \ { name, "EQ4", "EQ4" }, \ { name, "DRC1L", "DRC1L" }, \ { name, "DRC1R", "DRC1R" }, \ { name, "LHPF1", "LHPF1" }, \ { name, "LHPF2", "LHPF2" }, \ { name, "LHPF3", "LHPF3" }, \ { name, "LHPF4", "LHPF4" }, \ { name, "ISRC1DEC1", "ISRC1DEC1" }, \ { name, "ISRC1DEC2", "ISRC1DEC2" }, \ { name, "ISRC1INT1", "ISRC1INT1" }, \ { name, "ISRC1INT2", "ISRC1INT2" }, \ { name, "ISRC2DEC1", "ISRC2DEC1" }, \ { name, "ISRC2DEC2", "ISRC2DEC2" }, \ { name, "ISRC2INT1", "ISRC2INT1" }, \ { name, "ISRC2INT2", "ISRC2INT2" } static const struct snd_soc_dapm_route wm8997_dapm_routes[] = { { "AIF2 Capture", NULL, "DBVDD2" }, { "AIF2 Playback", NULL, "DBVDD2" }, { "OUT1L", NULL, "CPVDD" }, { "OUT1R", NULL, "CPVDD" }, { "OUT3L", NULL, "CPVDD" }, { "OUT4L", NULL, "SPKVDD" }, { "OUT1L", NULL, "SYSCLK" }, { "OUT1R", NULL, "SYSCLK" }, { "OUT3L", NULL, "SYSCLK" }, { "OUT4L", NULL, "SYSCLK" }, { "IN1L", NULL, "SYSCLK" }, { "IN1R", NULL, "SYSCLK" }, { "IN2L", NULL, "SYSCLK" }, { "IN2R", NULL, "SYSCLK" }, { "MICBIAS1", NULL, "MICVDD" }, { "MICBIAS2", NULL, "MICVDD" }, { "MICBIAS3", NULL, "MICVDD" }, { "Noise Generator", NULL, "SYSCLK" }, { "Tone Generator 1", NULL, "SYSCLK" }, { "Tone Generator 2", NULL, "SYSCLK" }, { "Noise Generator", NULL, "NOISE" }, { "Tone Generator 1", NULL, "TONE" }, { "Tone Generator 2", NULL, "TONE" }, { "AIF1 Capture", NULL, "AIF1TX1" }, { "AIF1 Capture", NULL, "AIF1TX2" }, { "AIF1 Capture", NULL, "AIF1TX3" }, { "AIF1 Capture", NULL, "AIF1TX4" }, { "AIF1 Capture", NULL, "AIF1TX5" }, { "AIF1 Capture", NULL, "AIF1TX6" }, { "AIF1 Capture", NULL, "AIF1TX7" }, { "AIF1 Capture", NULL, "AIF1TX8" }, { "AIF1RX1", NULL, "AIF1 Playback" }, { "AIF1RX2", NULL, "AIF1 Playback" }, { "AIF1RX3", NULL, "AIF1 Playback" }, { "AIF1RX4", NULL, "AIF1 Playback" }, { "AIF1RX5", NULL, "AIF1 Playback" }, { "AIF1RX6", NULL, "AIF1 Playback" }, { "AIF1RX7", NULL, "AIF1 Playback" }, { "AIF1RX8", NULL, "AIF1 Playback" }, { "AIF2 Capture", NULL, "AIF2TX1" }, { "AIF2 Capture", NULL, "AIF2TX2" }, { "AIF2RX1", NULL, "AIF2 Playback" }, { "AIF2RX2", NULL, "AIF2 Playback" }, { "Slim1 Capture", NULL, "SLIMTX1" }, { "Slim1 Capture", NULL, "SLIMTX2" }, { "Slim1 Capture", NULL, "SLIMTX3" }, { "Slim1 Capture", NULL, "SLIMTX4" }, { "SLIMRX1", NULL, "Slim1 Playback" }, { "SLIMRX2", NULL, "Slim1 Playback" }, { "SLIMRX3", NULL, "Slim1 Playback" }, { "SLIMRX4", NULL, "Slim1 Playback" }, { "Slim2 Capture", NULL, "SLIMTX5" }, { "Slim2 Capture", NULL, "SLIMTX6" }, { "SLIMRX5", NULL, "Slim2 Playback" }, { "SLIMRX6", NULL, "Slim2 Playback" }, { "Slim3 Capture", NULL, "SLIMTX7" }, { "Slim3 Capture", NULL, "SLIMTX8" }, { "SLIMRX7", NULL, "Slim3 Playback" }, { "SLIMRX8", NULL, "Slim3 Playback" }, { "AIF1 Playback", NULL, "SYSCLK" }, { "AIF2 Playback", NULL, "SYSCLK" }, { "Slim1 Playback", NULL, "SYSCLK" }, { "Slim2 Playback", NULL, "SYSCLK" }, { "Slim3 Playback", NULL, "SYSCLK" }, { "AIF1 Capture", NULL, "SYSCLK" }, { "AIF2 Capture", NULL, "SYSCLK" }, { "Slim1 Capture", NULL, "SYSCLK" }, { "Slim2 Capture", NULL, "SYSCLK" }, { "Slim3 Capture", NULL, "SYSCLK" }, { "IN1L PGA", NULL, "IN1L" }, { "IN1R PGA", NULL, "IN1R" }, { "IN2L PGA", NULL, "IN2L" }, { "IN2R PGA", NULL, "IN2R" }, ARIZONA_MIXER_ROUTES("OUT1L", "HPOUT1L"), ARIZONA_MIXER_ROUTES("OUT1R", "HPOUT1R"), ARIZONA_MIXER_ROUTES("OUT3L", "EPOUT"), ARIZONA_MIXER_ROUTES("OUT4L", "SPKOUT"), ARIZONA_MIXER_ROUTES("OUT5L", "SPKDAT1L"), ARIZONA_MIXER_ROUTES("OUT5R", "SPKDAT1R"), ARIZONA_MIXER_ROUTES("PWM1 Driver", "PWM1"), ARIZONA_MIXER_ROUTES("PWM2 Driver", "PWM2"), ARIZONA_MIXER_ROUTES("AIF1TX1", "AIF1TX1"), ARIZONA_MIXER_ROUTES("AIF1TX2", "AIF1TX2"), ARIZONA_MIXER_ROUTES("AIF1TX3", "AIF1TX3"), ARIZONA_MIXER_ROUTES("AIF1TX4", "AIF1TX4"), ARIZONA_MIXER_ROUTES("AIF1TX5", "AIF1TX5"), ARIZONA_MIXER_ROUTES("AIF1TX6", "AIF1TX6"), ARIZONA_MIXER_ROUTES("AIF1TX7", "AIF1TX7"), ARIZONA_MIXER_ROUTES("AIF1TX8", "AIF1TX8"), ARIZONA_MIXER_ROUTES("AIF2TX1", "AIF2TX1"), ARIZONA_MIXER_ROUTES("AIF2TX2", "AIF2TX2"), ARIZONA_MIXER_ROUTES("SLIMTX1", "SLIMTX1"), ARIZONA_MIXER_ROUTES("SLIMTX2", "SLIMTX2"), ARIZONA_MIXER_ROUTES("SLIMTX3", "SLIMTX3"), ARIZONA_MIXER_ROUTES("SLIMTX4", "SLIMTX4"), ARIZONA_MIXER_ROUTES("SLIMTX5", "SLIMTX5"), ARIZONA_MIXER_ROUTES("SLIMTX6", "SLIMTX6"), ARIZONA_MIXER_ROUTES("SLIMTX7", "SLIMTX7"), ARIZONA_MIXER_ROUTES("SLIMTX8", "SLIMTX8"), ARIZONA_MIXER_ROUTES("EQ1", "EQ1"), ARIZONA_MIXER_ROUTES("EQ2", "EQ2"), ARIZONA_MIXER_ROUTES("EQ3", "EQ3"), ARIZONA_MIXER_ROUTES("EQ4", "EQ4"), ARIZONA_MIXER_ROUTES("DRC1L", "DRC1L"), ARIZONA_MIXER_ROUTES("DRC1R", "DRC1R"), ARIZONA_MIXER_ROUTES("LHPF1", "LHPF1"), ARIZONA_MIXER_ROUTES("LHPF2", "LHPF2"), ARIZONA_MIXER_ROUTES("LHPF3", "LHPF3"), ARIZONA_MIXER_ROUTES("LHPF4", "LHPF4"), ARIZONA_MIXER_ROUTES("Mic Mute Mixer", "Noise"), ARIZONA_MIXER_ROUTES("Mic Mute Mixer", "Mic"), ARIZONA_MUX_ROUTES("ISRC1INT1", "ISRC1INT1"), ARIZONA_MUX_ROUTES("ISRC1INT2", "ISRC1INT2"), ARIZONA_MUX_ROUTES("ISRC1DEC1", "ISRC1DEC1"), ARIZONA_MUX_ROUTES("ISRC1DEC2", "ISRC1DEC2"), ARIZONA_MUX_ROUTES("ISRC2INT1", "ISRC2INT1"), ARIZONA_MUX_ROUTES("ISRC2INT2", "ISRC2INT2"), ARIZONA_MUX_ROUTES("ISRC2DEC1", "ISRC2DEC1"), ARIZONA_MUX_ROUTES("ISRC2DEC2", "ISRC2DEC2"), { "AEC Loopback", "HPOUT1L", "OUT1L" }, { "AEC Loopback", "HPOUT1R", "OUT1R" }, { "HPOUT1L", NULL, "OUT1L" }, { "HPOUT1R", NULL, "OUT1R" }, { "AEC Loopback", "EPOUT", "OUT3L" }, { "EPOUTN", NULL, "OUT3L" }, { "EPOUTP", NULL, "OUT3L" }, { "AEC Loopback", "SPKOUT", "OUT4L" }, { "SPKOUTN", NULL, "OUT4L" }, { "SPKOUTP", NULL, "OUT4L" }, { "AEC Loopback", "SPKDAT1L", "OUT5L" }, { "AEC Loopback", "SPKDAT1R", "OUT5R" }, { "SPKDAT1L", NULL, "OUT5L" }, { "SPKDAT1R", NULL, "OUT5R" }, { "MICSUPP", NULL, "SYSCLK" }, }; static int wm8997_set_fll(struct snd_soc_component *component, int fll_id, int source, unsigned int Fref, unsigned int Fout) { struct wm8997_priv *wm8997 = snd_soc_component_get_drvdata(component); switch (fll_id) { case WM8997_FLL1: return arizona_set_fll(&wm8997->fll[0], source, Fref, Fout); case WM8997_FLL2: return arizona_set_fll(&wm8997->fll[1], source, Fref, Fout); case WM8997_FLL1_REFCLK: return arizona_set_fll_refclk(&wm8997->fll[0], source, Fref, Fout); case WM8997_FLL2_REFCLK: return arizona_set_fll_refclk(&wm8997->fll[1], source, Fref, Fout); default: return -EINVAL; } } #define WM8997_RATES SNDRV_PCM_RATE_KNOT #define WM8997_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE) static struct snd_soc_dai_driver wm8997_dai[] = { { .name = "wm8997-aif1", .id = 1, .base = ARIZONA_AIF1_BCLK_CTRL, .playback = { .stream_name = "AIF1 Playback", .channels_min = 1, .channels_max = 8, .rates = WM8997_RATES, .formats = WM8997_FORMATS, }, .capture = { .stream_name = "AIF1 Capture", .channels_min = 1, .channels_max = 8, .rates = WM8997_RATES, .formats = WM8997_FORMATS, }, .ops = &arizona_dai_ops, .symmetric_rate = 1, .symmetric_sample_bits = 1, }, { .name = "wm8997-aif2", .id = 2, .base = ARIZONA_AIF2_BCLK_CTRL, .playback = { .stream_name = "AIF2 Playback", .channels_min = 1, .channels_max = 2, .rates = WM8997_RATES, .formats = WM8997_FORMATS, }, .capture = { .stream_name = "AIF2 Capture", .channels_min = 1, .channels_max = 2, .rates = WM8997_RATES, .formats = WM8997_FORMATS, }, .ops = &arizona_dai_ops, .symmetric_rate = 1, .symmetric_sample_bits = 1, }, { .name = "wm8997-slim1", .id = 3, .playback = { .stream_name = "Slim1 Playback", .channels_min = 1, .channels_max = 4, .rates = WM8997_RATES, .formats = WM8997_FORMATS, }, .capture = { .stream_name = "Slim1 Capture", .channels_min = 1, .channels_max = 4, .rates = WM8997_RATES, .formats = WM8997_FORMATS, }, .ops = &arizona_simple_dai_ops, }, { .name = "wm8997-slim2", .id = 4, .playback = { .stream_name = "Slim2 Playback", .channels_min = 1, .channels_max = 2, .rates = WM8997_RATES, .formats = WM8997_FORMATS, }, .capture = { .stream_name = "Slim2 Capture", .channels_min = 1, .channels_max = 2, .rates = WM8997_RATES, .formats = WM8997_FORMATS, }, .ops = &arizona_simple_dai_ops, }, { .name = "wm8997-slim3", .id = 5, .playback = { .stream_name = "Slim3 Playback", .channels_min = 1, .channels_max = 2, .rates = WM8997_RATES, .formats = WM8997_FORMATS, }, .capture = { .stream_name = "Slim3 Capture", .channels_min = 1, .channels_max = 2, .rates = WM8997_RATES, .formats = WM8997_FORMATS, }, .ops = &arizona_simple_dai_ops, }, }; static int wm8997_component_probe(struct snd_soc_component *component) { struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component); struct wm8997_priv *priv = snd_soc_component_get_drvdata(component); struct arizona *arizona = priv->core.arizona; int ret; snd_soc_component_init_regmap(component, arizona->regmap); ret = arizona_init_spk(component); if (ret < 0) return ret; snd_soc_component_disable_pin(component, "HAPTICS"); priv->core.arizona->dapm = dapm; return 0; } static void wm8997_component_remove(struct snd_soc_component *component) { struct wm8997_priv *priv = snd_soc_component_get_drvdata(component); priv->core.arizona->dapm = NULL; } #define WM8997_DIG_VU 0x0200 static unsigned int wm8997_digital_vu[] = { ARIZONA_DAC_DIGITAL_VOLUME_1L, ARIZONA_DAC_DIGITAL_VOLUME_1R, ARIZONA_DAC_DIGITAL_VOLUME_3L, ARIZONA_DAC_DIGITAL_VOLUME_4L, ARIZONA_DAC_DIGITAL_VOLUME_5L, ARIZONA_DAC_DIGITAL_VOLUME_5R, }; static const struct snd_soc_component_driver soc_component_dev_wm8997 = { .probe = wm8997_component_probe, .remove = wm8997_component_remove, .set_sysclk = arizona_set_sysclk, .set_pll = wm8997_set_fll, .set_jack = arizona_jack_set_jack, .controls = wm8997_snd_controls, .num_controls = ARRAY_SIZE(wm8997_snd_controls), .dapm_widgets = wm8997_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(wm8997_dapm_widgets), .dapm_routes = wm8997_dapm_routes, .num_dapm_routes = ARRAY_SIZE(wm8997_dapm_routes), .use_pmdown_time = 1, .endianness = 1, }; static int wm8997_probe(struct platform_device *pdev) { struct arizona *arizona = dev_get_drvdata(pdev->dev.parent); struct wm8997_priv *wm8997; int i, ret; wm8997 = devm_kzalloc(&pdev->dev, sizeof(struct wm8997_priv), GFP_KERNEL); if (wm8997 == NULL) return -ENOMEM; platform_set_drvdata(pdev, wm8997); if (IS_ENABLED(CONFIG_OF)) { if (!dev_get_platdata(arizona->dev)) { ret = arizona_of_get_audio_pdata(arizona); if (ret < 0) return ret; } } wm8997->core.arizona = arizona; wm8997->core.num_inputs = 4; arizona_init_dvfs(&wm8997->core); /* This may return -EPROBE_DEFER, so do this early on */ ret = arizona_jack_codec_dev_probe(&wm8997->core, &pdev->dev); if (ret) return ret; for (i = 0; i < ARRAY_SIZE(wm8997->fll); i++) wm8997->fll[i].vco_mult = 1; arizona_init_fll(arizona, 1, ARIZONA_FLL1_CONTROL_1 - 1, ARIZONA_IRQ_FLL1_LOCK, ARIZONA_IRQ_FLL1_CLOCK_OK, &wm8997->fll[0]); arizona_init_fll(arizona, 2, ARIZONA_FLL2_CONTROL_1 - 1, ARIZONA_IRQ_FLL2_LOCK, ARIZONA_IRQ_FLL2_CLOCK_OK, &wm8997->fll[1]); /* SR2 fixed at 8kHz, SR3 fixed at 16kHz */ regmap_update_bits(arizona->regmap, ARIZONA_SAMPLE_RATE_2, ARIZONA_SAMPLE_RATE_2_MASK, 0x11); regmap_update_bits(arizona->regmap, ARIZONA_SAMPLE_RATE_3, ARIZONA_SAMPLE_RATE_3_MASK, 0x12); for (i = 0; i < ARRAY_SIZE(wm8997_dai); i++) arizona_init_dai(&wm8997->core, i); /* Latch volume update bits */ for (i = 0; i < ARRAY_SIZE(wm8997_digital_vu); i++) regmap_update_bits(arizona->regmap, wm8997_digital_vu[i], WM8997_DIG_VU, WM8997_DIG_VU); pm_runtime_enable(&pdev->dev); pm_runtime_idle(&pdev->dev); arizona_init_common(arizona); ret = arizona_init_vol_limit(arizona); if (ret < 0) goto err_jack_codec_dev; ret = arizona_init_spk_irqs(arizona); if (ret < 0) goto err_jack_codec_dev; ret = devm_snd_soc_register_component(&pdev->dev, &soc_component_dev_wm8997, wm8997_dai, ARRAY_SIZE(wm8997_dai)); if (ret < 0) { dev_err(&pdev->dev, "Failed to register component: %d\n", ret); goto err_spk_irqs; } return ret; err_spk_irqs: arizona_free_spk_irqs(arizona); err_jack_codec_dev: pm_runtime_disable(&pdev->dev); arizona_jack_codec_dev_remove(&wm8997->core); return ret; } static void wm8997_remove(struct platform_device *pdev) { struct wm8997_priv *wm8997 = platform_get_drvdata(pdev); struct arizona *arizona = wm8997->core.arizona; pm_runtime_disable(&pdev->dev); arizona_free_spk_irqs(arizona); arizona_jack_codec_dev_remove(&wm8997->core); } static struct platform_driver wm8997_codec_driver = { .driver = { .name = "wm8997-codec", }, .probe = wm8997_probe, .remove = wm8997_remove, }; module_platform_driver(wm8997_codec_driver); MODULE_DESCRIPTION("ASoC WM8997 driver"); MODULE_AUTHOR("Charles Keepax <[email protected]>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:wm8997-codec");
/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */ /* * Copyright (c) Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /* * This file provides common libc dependencies that zstd requires. * The purpose is to allow replacing this file with a custom implementation * to compile zstd without libc support. */ /* Need: * NULL * INT_MAX * UINT_MAX * ZSTD_memcpy() * ZSTD_memset() * ZSTD_memmove() */ #ifndef ZSTD_DEPS_COMMON #define ZSTD_DEPS_COMMON #include <linux/limits.h> #include <linux/stddef.h> #define ZSTD_memcpy(d,s,n) __builtin_memcpy((d),(s),(n)) #define ZSTD_memmove(d,s,n) __builtin_memmove((d),(s),(n)) #define ZSTD_memset(d,s,n) __builtin_memset((d),(s),(n)) #endif /* ZSTD_DEPS_COMMON */ /* * Define malloc as always failing. That means the user must * either use ZSTD_customMem or statically allocate memory. * Need: * ZSTD_malloc() * ZSTD_free() * ZSTD_calloc() */ #ifdef ZSTD_DEPS_NEED_MALLOC #ifndef ZSTD_DEPS_MALLOC #define ZSTD_DEPS_MALLOC #define ZSTD_malloc(s) ({ (void)(s); NULL; }) #define ZSTD_free(p) ((void)(p)) #define ZSTD_calloc(n,s) ({ (void)(n); (void)(s); NULL; }) #endif /* ZSTD_DEPS_MALLOC */ #endif /* ZSTD_DEPS_NEED_MALLOC */ /* * Provides 64-bit math support. * Need: * U64 ZSTD_div64(U64 dividend, U32 divisor) */ #ifdef ZSTD_DEPS_NEED_MATH64 #ifndef ZSTD_DEPS_MATH64 #define ZSTD_DEPS_MATH64 #include <linux/math64.h> static uint64_t ZSTD_div64(uint64_t dividend, uint32_t divisor) { return div_u64(dividend, divisor); } #endif /* ZSTD_DEPS_MATH64 */ #endif /* ZSTD_DEPS_NEED_MATH64 */ /* * This is only requested when DEBUGLEVEL >= 1, meaning * it is disabled in production. * Need: * assert() */ #ifdef ZSTD_DEPS_NEED_ASSERT #ifndef ZSTD_DEPS_ASSERT #define ZSTD_DEPS_ASSERT #include <linux/kernel.h> #define assert(x) WARN_ON(!(x)) #endif /* ZSTD_DEPS_ASSERT */ #endif /* ZSTD_DEPS_NEED_ASSERT */ /* * This is only requested when DEBUGLEVEL >= 2, meaning * it is disabled in production. * Need: * ZSTD_DEBUG_PRINT() */ #ifdef ZSTD_DEPS_NEED_IO #ifndef ZSTD_DEPS_IO #define ZSTD_DEPS_IO #include <linux/printk.h> #define ZSTD_DEBUG_PRINT(...) pr_debug(__VA_ARGS__) #endif /* ZSTD_DEPS_IO */ #endif /* ZSTD_DEPS_NEED_IO */
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2014 Free Electrons * * Author: Boris BREZILLON <[email protected]> * * Allwinner A31 AR100 clock driver */ #include <linux/bitops.h> #include <linux/clk-provider.h> #include <linux/init.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/spinlock.h> #include "clk-factors.h" /* * sun6i_get_ar100_factors - Calculates factors p, m for AR100 * * AR100 rate is calculated as follows * rate = (parent_rate >> p) / (m + 1); */ static void sun6i_get_ar100_factors(struct factors_request *req) { unsigned long div; int shift; /* clock only divides */ if (req->rate > req->parent_rate) req->rate = req->parent_rate; div = DIV_ROUND_UP(req->parent_rate, req->rate); if (div < 32) shift = 0; else if (div >> 1 < 32) shift = 1; else if (div >> 2 < 32) shift = 2; else shift = 3; div >>= shift; if (div > 32) div = 32; req->rate = (req->parent_rate >> shift) / div; req->m = div - 1; req->p = shift; } static const struct clk_factors_config sun6i_ar100_config = { .mwidth = 5, .mshift = 8, .pwidth = 2, .pshift = 4, }; static const struct factors_data sun6i_ar100_data = { .mux = 16, .muxmask = GENMASK(1, 0), .table = &sun6i_ar100_config, .getter = sun6i_get_ar100_factors, }; static DEFINE_SPINLOCK(sun6i_ar100_lock); static int sun6i_a31_ar100_clk_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; void __iomem *reg; struct clk *clk; reg = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(reg)) return PTR_ERR(reg); clk = sunxi_factors_register(np, &sun6i_ar100_data, &sun6i_ar100_lock, reg); if (!clk) return -ENOMEM; platform_set_drvdata(pdev, clk); return 0; } static const struct of_device_id sun6i_a31_ar100_clk_dt_ids[] = { { .compatible = "allwinner,sun6i-a31-ar100-clk" }, { /* sentinel */ } }; static struct platform_driver sun6i_a31_ar100_clk_driver = { .driver = { .name = "sun6i-a31-ar100-clk", .of_match_table = sun6i_a31_ar100_clk_dt_ids, .suppress_bind_attrs = true, }, .probe = sun6i_a31_ar100_clk_probe, }; builtin_platform_driver(sun6i_a31_ar100_clk_driver);
/* * font.h -- `Soft' font definitions * * Created 1995 by Geert Uytterhoeven * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ #ifndef _VIDEO_FONT_H #define _VIDEO_FONT_H #include <linux/types.h> struct font_desc { int idx; const char *name; unsigned int width, height; unsigned int charcount; const void *data; int pref; }; #define VGA8x8_IDX 0 #define VGA8x16_IDX 1 #define PEARL8x8_IDX 2 #define VGA6x11_IDX 3 #define FONT7x14_IDX 4 #define FONT10x18_IDX 5 #define SUN8x16_IDX 6 #define SUN12x22_IDX 7 #define ACORN8x8_IDX 8 #define MINI4x6_IDX 9 #define FONT6x10_IDX 10 #define TER16x32_IDX 11 #define FONT6x8_IDX 12 extern const struct font_desc font_vga_8x8, font_vga_8x16, font_pearl_8x8, font_vga_6x11, font_7x14, font_10x18, font_sun_8x16, font_sun_12x22, font_acorn_8x8, font_mini_4x6, font_6x10, font_ter_16x32, font_6x8; /* Find a font with a specific name */ extern const struct font_desc *find_font(const char *name); /* Get the default font for a specific screen size */ extern const struct font_desc *get_default_font(int xres, int yres, unsigned long *font_w, unsigned long *font_h); /* Max. length for the name of a predefined font */ #define MAX_FONT_NAME 32 /* Extra word getters */ #define REFCOUNT(fd) (((int *)(fd))[-1]) #define FNTSIZE(fd) (((int *)(fd))[-2]) #define FNTCHARCNT(fd) (((int *)(fd))[-3]) #define FNTSUM(fd) (((int *)(fd))[-4]) #define FONT_EXTRA_WORDS 4 struct font_data { unsigned int extra[FONT_EXTRA_WORDS]; const unsigned char data[]; } __packed; #endif /* _VIDEO_FONT_H */
/* SPDX-License-Identifier: GPL-2.0 */ /* This handles the memory map.. */ #if defined(CONFIG_RAMBASE) #define PAGE_OFFSET_RAW CONFIG_RAMBASE #elif defined(CONFIG_SUN3) #define PAGE_OFFSET_RAW 0x0E000000 #else #define PAGE_OFFSET_RAW 0x00000000 #endif
/* * B4420 Silicon/SoC Device Tree Source (post include) * * Copyright 2012 - 2015 Freescale Semiconductor, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Freescale Semiconductor nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * * ALTERNATIVELY, this software may be distributed under the terms of the * GNU General Public License ("GPL") as published by the Free Software * Foundation, either version 2 of that License or (at your option) any * later version. * * This software is provided by Freescale Semiconductor "as is" and any * express or implied warranties, including, but not limited to, the implied * warranties of merchantability and fitness for a particular purpose are * disclaimed. In no event shall Freescale Semiconductor be liable for any * direct, indirect, incidental, special, exemplary, or consequential damages * (including, but not limited to, procurement of substitute goods or services; * loss of use, data, or profits; or business interruption) however caused and * on any theory of liability, whether in contract, strict liability, or tort * (including negligence or otherwise) arising in any way out of the use of * this software, even if advised of the possibility of such damage. */ &bman_fbpr { compatible = "fsl,bman-fbpr"; alloc-ranges = <0 0 0x10000 0>; }; &qman_fqd { compatible = "fsl,qman-fqd"; alloc-ranges = <0 0 0x10000 0>; }; &qman_pfdr { compatible = "fsl,qman-pfdr"; alloc-ranges = <0 0 0x10000 0>; }; &ifc { #address-cells = <2>; #size-cells = <1>; compatible = "fsl,ifc"; interrupts = <25 2 0 0>; }; /* controller at 0x200000 */ &pci0 { compatible = "fsl,b4-pcie", "fsl,qoriq-pcie-v2.4"; device_type = "pci"; #size-cells = <2>; #address-cells = <3>; bus-range = <0x0 0xff>; interrupts = <20 2 0 0>; fsl,iommu-parent = <&pamu0>; pcie@0 { #interrupt-cells = <1>; #size-cells = <2>; #address-cells = <3>; device_type = "pci"; reg = <0 0 0 0 0>; interrupts = <20 2 0 0>; interrupt-map-mask = <0xf800 0 0 7>; interrupt-map = < /* IDSEL 0x0 */ 0000 0 0 1 &mpic 40 1 0 0 0000 0 0 2 &mpic 1 1 0 0 0000 0 0 3 &mpic 2 1 0 0 0000 0 0 4 &mpic 3 1 0 0 >; }; }; &dcsr { #address-cells = <1>; #size-cells = <1>; compatible = "fsl,dcsr", "simple-bus"; dcsr-epu@0 { compatible = "fsl,b4-dcsr-epu", "fsl,dcsr-epu"; interrupts = <52 2 0 0 84 2 0 0 85 2 0 0 94 2 0 0 95 2 0 0>; reg = <0x0 0x1000>; }; dcsr-npc { compatible = "fsl,b4-dcsr-cnpc", "fsl,dcsr-cnpc"; reg = <0x1000 0x1000 0x1002000 0x10000>; }; dcsr-nxc@2000 { compatible = "fsl,dcsr-nxc"; reg = <0x2000 0x1000>; }; dcsr-corenet { compatible = "fsl,dcsr-corenet"; reg = <0x8000 0x1000 0x1A000 0x1000>; }; dcsr-dpaa@9000 { compatible = "fsl,b4-dcsr-dpaa", "fsl,dcsr-dpaa"; reg = <0x9000 0x1000>; }; dcsr-ocn@11000 { compatible = "fsl,b4-dcsr-ocn", "fsl,dcsr-ocn"; reg = <0x11000 0x1000>; }; dcsr-ddr@12000 { compatible = "fsl,dcsr-ddr"; dev-handle = <&ddr1>; reg = <0x12000 0x1000>; }; dcsr-nal@18000 { compatible = "fsl,b4-dcsr-nal", "fsl,dcsr-nal"; reg = <0x18000 0x1000>; }; dcsr-rcpm@22000 { compatible = "fsl,b4-dcsr-rcpm", "fsl,dcsr-rcpm"; reg = <0x22000 0x1000>; }; dcsr-snpc@30000 { compatible = "fsl,b4-dcsr-snpc", "fsl,dcsr-snpc"; reg = <0x30000 0x1000 0x1022000 0x10000>; }; dcsr-snpc@31000 { compatible = "fsl,b4-dcsr-snpc", "fsl,dcsr-snpc"; reg = <0x31000 0x1000 0x1042000 0x10000>; }; dcsr-cpu-sb-proxy@100000 { compatible = "fsl,dcsr-e6500-sb-proxy", "fsl,dcsr-cpu-sb-proxy"; cpu-handle = <&cpu0>; reg = <0x100000 0x1000 0x101000 0x1000>; }; }; &bportals { #address-cells = <0x1>; #size-cells = <0x1>; compatible = "simple-bus"; bman-portal@0 { compatible = "fsl,bman-portal"; reg = <0x0 0x4000>, <0x1000000 0x1000>; interrupts = <105 2 0 0>; }; bman-portal@4000 { compatible = "fsl,bman-portal"; reg = <0x4000 0x4000>, <0x1001000 0x1000>; interrupts = <107 2 0 0>; }; bman-portal@8000 { compatible = "fsl,bman-portal"; reg = <0x8000 0x4000>, <0x1002000 0x1000>; interrupts = <109 2 0 0>; }; bman-portal@c000 { compatible = "fsl,bman-portal"; reg = <0xc000 0x4000>, <0x1003000 0x1000>; interrupts = <111 2 0 0>; }; bman-portal@10000 { compatible = "fsl,bman-portal"; reg = <0x10000 0x4000>, <0x1004000 0x1000>; interrupts = <113 2 0 0>; }; bman-portal@14000 { compatible = "fsl,bman-portal"; reg = <0x14000 0x4000>, <0x1005000 0x1000>; interrupts = <115 2 0 0>; }; bman-portal@18000 { compatible = "fsl,bman-portal"; reg = <0x18000 0x4000>, <0x1006000 0x1000>; interrupts = <117 2 0 0>; }; bman-portal@1c000 { compatible = "fsl,bman-portal"; reg = <0x1c000 0x4000>, <0x1007000 0x1000>; interrupts = <119 2 0 0>; }; bman-portal@20000 { compatible = "fsl,bman-portal"; reg = <0x20000 0x4000>, <0x1008000 0x1000>; interrupts = <121 2 0 0>; }; bman-portal@24000 { compatible = "fsl,bman-portal"; reg = <0x24000 0x4000>, <0x1009000 0x1000>; interrupts = <123 2 0 0>; }; bman-portal@28000 { compatible = "fsl,bman-portal"; reg = <0x28000 0x4000>, <0x100a000 0x1000>; interrupts = <125 2 0 0>; }; bman-portal@2c000 { compatible = "fsl,bman-portal"; reg = <0x2c000 0x4000>, <0x100b000 0x1000>; interrupts = <127 2 0 0>; }; bman-portal@30000 { compatible = "fsl,bman-portal"; reg = <0x30000 0x4000>, <0x100c000 0x1000>; interrupts = <129 2 0 0>; }; bman-portal@34000 { compatible = "fsl,bman-portal"; reg = <0x34000 0x4000>, <0x100d000 0x1000>; interrupts = <131 2 0 0>; }; }; &qportals { #address-cells = <0x1>; #size-cells = <0x1>; compatible = "simple-bus"; qportal0: qman-portal@0 { compatible = "fsl,qman-portal"; reg = <0x0 0x4000>, <0x1000000 0x1000>; interrupts = <104 0x2 0 0>; cell-index = <0x0>; }; qportal1: qman-portal@4000 { compatible = "fsl,qman-portal"; reg = <0x4000 0x4000>, <0x1001000 0x1000>; interrupts = <106 0x2 0 0>; cell-index = <0x1>; }; qportal2: qman-portal@8000 { compatible = "fsl,qman-portal"; reg = <0x8000 0x4000>, <0x1002000 0x1000>; interrupts = <108 0x2 0 0>; cell-index = <0x2>; }; qportal3: qman-portal@c000 { compatible = "fsl,qman-portal"; reg = <0xc000 0x4000>, <0x1003000 0x1000>; interrupts = <110 0x2 0 0>; cell-index = <0x3>; }; qportal4: qman-portal@10000 { compatible = "fsl,qman-portal"; reg = <0x10000 0x4000>, <0x1004000 0x1000>; interrupts = <112 0x2 0 0>; cell-index = <0x4>; }; qportal5: qman-portal@14000 { compatible = "fsl,qman-portal"; reg = <0x14000 0x4000>, <0x1005000 0x1000>; interrupts = <114 0x2 0 0>; cell-index = <0x5>; }; qportal6: qman-portal@18000 { compatible = "fsl,qman-portal"; reg = <0x18000 0x4000>, <0x1006000 0x1000>; interrupts = <116 0x2 0 0>; cell-index = <0x6>; }; qportal7: qman-portal@1c000 { compatible = "fsl,qman-portal"; reg = <0x1c000 0x4000>, <0x1007000 0x1000>; interrupts = <118 0x2 0 0>; cell-index = <0x7>; }; qportal8: qman-portal@20000 { compatible = "fsl,qman-portal"; reg = <0x20000 0x4000>, <0x1008000 0x1000>; interrupts = <120 0x2 0 0>; cell-index = <0x8>; }; qportal9: qman-portal@24000 { compatible = "fsl,qman-portal"; reg = <0x24000 0x4000>, <0x1009000 0x1000>; interrupts = <122 0x2 0 0>; cell-index = <0x9>; }; qportal10: qman-portal@28000 { compatible = "fsl,qman-portal"; reg = <0x28000 0x4000>, <0x100a000 0x1000>; interrupts = <124 0x2 0 0>; cell-index = <0xa>; }; qportal11: qman-portal@2c000 { compatible = "fsl,qman-portal"; reg = <0x2c000 0x4000>, <0x100b000 0x1000>; interrupts = <126 0x2 0 0>; cell-index = <0xb>; }; qportal12: qman-portal@30000 { compatible = "fsl,qman-portal"; reg = <0x30000 0x4000>, <0x100c000 0x1000>; interrupts = <128 0x2 0 0>; cell-index = <0xc>; }; qportal13: qman-portal@34000 { compatible = "fsl,qman-portal"; reg = <0x34000 0x4000>, <0x100d000 0x1000>; interrupts = <130 0x2 0 0>; cell-index = <0xd>; }; }; &soc { #address-cells = <1>; #size-cells = <1>; device_type = "soc"; compatible = "simple-bus"; soc-sram-error { compatible = "fsl,soc-sram-error"; interrupts = <16 2 1 2>; }; corenet-law@0 { compatible = "fsl,corenet-law"; reg = <0x0 0x1000>; fsl,num-laws = <32>; }; ddr1: memory-controller@8000 { compatible = "fsl,qoriq-memory-controller-v4.5", "fsl,qoriq-memory-controller"; reg = <0x8000 0x1000>; interrupts = <16 2 1 8>; }; cpc: l3-cache-controller@10000 { compatible = "fsl,b4-l3-cache-controller", "cache"; reg = <0x10000 0x1000>; interrupts = <16 2 1 4>; }; corenet-cf@18000 { compatible = "fsl,corenet2-cf", "fsl,corenet-cf"; reg = <0x18000 0x1000>; interrupts = <16 2 1 0>; fsl,ccf-num-csdids = <32>; fsl,ccf-num-snoopids = <32>; }; iommu@20000 { compatible = "fsl,pamu-v1.0", "fsl,pamu"; reg = <0x20000 0x4000>; fsl,portid-mapping = <0x8000>; #address-cells = <1>; #size-cells = <1>; interrupts = < 24 2 0 0 16 2 1 1>; /* PCIe, DMA, SRIO */ pamu0: pamu@0 { reg = <0 0x1000>; fsl,primary-cache-geometry = <8 1>; fsl,secondary-cache-geometry = <32 2>; }; /* AXI2, Maple */ pamu1: pamu@1000 { reg = <0x1000 0x1000>; fsl,primary-cache-geometry = <32 1>; fsl,secondary-cache-geometry = <32 2>; }; /* Q/BMan */ pamu2: pamu@2000 { reg = <0x2000 0x1000>; fsl,primary-cache-geometry = <32 1>; fsl,secondary-cache-geometry = <32 2>; }; /* AXI1, FMAN */ pamu3: pamu@3000 { reg = <0x3000 0x1000>; fsl,primary-cache-geometry = <32 1>; fsl,secondary-cache-geometry = <32 2>; }; }; /include/ "qoriq-mpic4.3.dtsi" guts: global-utilities@e0000 { compatible = "fsl,b4-device-config"; reg = <0xe0000 0xe00>; fsl,has-rstcr; fsl,liodn-bits = <12>; }; /include/ "qoriq-clockgen2.dtsi" rcpm: global-utilities@e2000 { compatible = "fsl,b4-rcpm", "fsl,qoriq-rcpm-2.0"; reg = <0xe2000 0x1000>; }; /include/ "elo3-dma-0.dtsi" dma@100300 { fsl,iommu-parent = <&pamu0>; fsl,liodn-reg = <&guts 0x580>; /* DMA1LIODNR */ }; /include/ "elo3-dma-1.dtsi" dma@101300 { fsl,iommu-parent = <&pamu0>; fsl,liodn-reg = <&guts 0x584>; /* DMA2LIODNR */ }; /include/ "qonverge-usb2-dr-0.dtsi" usb0: usb@210000 { compatible = "fsl-usb2-dr-v2.4", "fsl-usb2-dr"; fsl,iommu-parent = <&pamu1>; fsl,liodn-reg = <&guts 0x520>; /* USB1LIODNR */ }; /include/ "qoriq-espi-0.dtsi" spi@110000 { fsl,espi-num-chipselects = <4>; }; /include/ "qoriq-esdhc-0.dtsi" sdhc@114000 { sdhci,auto-cmd12; fsl,iommu-parent = <&pamu1>; fsl,liodn-reg = <&guts 0x530>; /* eSDHCLIODNR */ }; /include/ "qoriq-i2c-0.dtsi" /include/ "qoriq-i2c-1.dtsi" /include/ "qoriq-duart-0.dtsi" /include/ "qoriq-duart-1.dtsi" /include/ "qoriq-sec5.3-0.dtsi" /include/ "qoriq-qman3.dtsi" qman: qman@318000 { interrupts = <16 2 1 28>; }; /include/ "qoriq-bman1.dtsi" bman: bman@31a000 { interrupts = <16 2 1 29>; }; /include/ "qoriq-fman3-0.dtsi" /include/ "qoriq-fman3-0-1g-0.dtsi" /include/ "qoriq-fman3-0-1g-1.dtsi" /include/ "qoriq-fman3-0-1g-2.dtsi" /include/ "qoriq-fman3-0-1g-3.dtsi" fman@400000 { interrupts = <96 2 0 0>, <16 2 1 30>; muram@0 { compatible = "fsl,fman-muram"; reg = <0x0 0x80000>; }; enet0: ethernet@e0000 { }; enet1: ethernet@e2000 { }; enet2: ethernet@e4000 { }; enet3: ethernet@e6000 { }; mdio@fc000 { interrupts = <100 1 0 0>; }; mdio@fd000 { interrupts = <101 1 0 0>; }; }; };
// SPDX-License-Identifier: GPL-2.0-or-later /* * Coda multi-standard codec IP * * Copyright (C) 2014 Philipp Zabel, Pengutronix */ #include <linux/bitops.h> #include "coda.h" #define XY2_INVERT BIT(7) #define XY2_ZERO BIT(6) #define XY2_TB_XOR BIT(5) #define XY2_XYSEL BIT(4) #define XY2_Y (1 << 4) #define XY2_X (0 << 4) #define XY2(luma_sel, luma_bit, chroma_sel, chroma_bit) \ (((XY2_##luma_sel) | (luma_bit)) << 8 | \ (XY2_##chroma_sel) | (chroma_bit)) static const u16 xy2ca_zero_map[16] = { XY2(ZERO, 0, ZERO, 0), XY2(ZERO, 0, ZERO, 0), XY2(ZERO, 0, ZERO, 0), XY2(ZERO, 0, ZERO, 0), XY2(ZERO, 0, ZERO, 0), XY2(ZERO, 0, ZERO, 0), XY2(ZERO, 0, ZERO, 0), XY2(ZERO, 0, ZERO, 0), XY2(ZERO, 0, ZERO, 0), XY2(ZERO, 0, ZERO, 0), XY2(ZERO, 0, ZERO, 0), XY2(ZERO, 0, ZERO, 0), XY2(ZERO, 0, ZERO, 0), XY2(ZERO, 0, ZERO, 0), XY2(ZERO, 0, ZERO, 0), XY2(ZERO, 0, ZERO, 0), }; static const u16 xy2ca_tiled_map[16] = { XY2(Y, 0, Y, 0), XY2(Y, 1, Y, 1), XY2(Y, 2, Y, 2), XY2(Y, 3, X, 3), XY2(X, 3, ZERO, 0), XY2(ZERO, 0, ZERO, 0), XY2(ZERO, 0, ZERO, 0), XY2(ZERO, 0, ZERO, 0), XY2(ZERO, 0, ZERO, 0), XY2(ZERO, 0, ZERO, 0), XY2(ZERO, 0, ZERO, 0), XY2(ZERO, 0, ZERO, 0), XY2(ZERO, 0, ZERO, 0), XY2(ZERO, 0, ZERO, 0), XY2(ZERO, 0, ZERO, 0), XY2(ZERO, 0, ZERO, 0), }; /* * RA[15:0], CA[15:8] are hardwired to contain the 24-bit macroblock * start offset (macroblock size is 16x16 for luma, 16x8 for chroma). * Bits CA[4:0] are set using XY2CA above. BA[3:0] seems to be unused. */ #define RBC_CA (0 << 4) #define RBC_BA (1 << 4) #define RBC_RA (2 << 4) #define RBC_ZERO (3 << 4) #define RBC(luma_sel, luma_bit, chroma_sel, chroma_bit) \ (((RBC_##luma_sel) | (luma_bit)) << 6 | \ (RBC_##chroma_sel) | (chroma_bit)) static const u16 rbc2axi_tiled_map[32] = { RBC(ZERO, 0, ZERO, 0), RBC(ZERO, 0, ZERO, 0), RBC(ZERO, 0, ZERO, 0), RBC(CA, 0, CA, 0), RBC(CA, 1, CA, 1), RBC(CA, 2, CA, 2), RBC(CA, 3, CA, 3), RBC(CA, 4, CA, 8), RBC(CA, 8, CA, 9), RBC(CA, 9, CA, 10), RBC(CA, 10, CA, 11), RBC(CA, 11, CA, 12), RBC(CA, 12, CA, 13), RBC(CA, 13, CA, 14), RBC(CA, 14, CA, 15), RBC(CA, 15, RA, 0), RBC(RA, 0, RA, 1), RBC(RA, 1, RA, 2), RBC(RA, 2, RA, 3), RBC(RA, 3, RA, 4), RBC(RA, 4, RA, 5), RBC(RA, 5, RA, 6), RBC(RA, 6, RA, 7), RBC(RA, 7, RA, 8), RBC(RA, 8, RA, 9), RBC(RA, 9, RA, 10), RBC(RA, 10, RA, 11), RBC(RA, 11, RA, 12), RBC(RA, 12, RA, 13), RBC(RA, 13, RA, 14), RBC(RA, 14, RA, 15), RBC(RA, 15, ZERO, 0), }; void coda_set_gdi_regs(struct coda_ctx *ctx) { struct coda_dev *dev = ctx->dev; const u16 *xy2ca_map; u32 xy2rbc_config; int i; switch (ctx->tiled_map_type) { case GDI_LINEAR_FRAME_MAP: default: xy2ca_map = xy2ca_zero_map; xy2rbc_config = 0; break; case GDI_TILED_FRAME_MB_RASTER_MAP: xy2ca_map = xy2ca_tiled_map; xy2rbc_config = CODA9_XY2RBC_TILED_MAP | CODA9_XY2RBC_CA_INC_HOR | (16 - 1) << 12 | (8 - 1) << 4; break; } for (i = 0; i < 16; i++) coda_write(dev, xy2ca_map[i], CODA9_GDI_XY2_CAS_0 + 4 * i); for (i = 0; i < 4; i++) coda_write(dev, XY2(ZERO, 0, ZERO, 0), CODA9_GDI_XY2_BA_0 + 4 * i); for (i = 0; i < 16; i++) coda_write(dev, XY2(ZERO, 0, ZERO, 0), CODA9_GDI_XY2_RAS_0 + 4 * i); coda_write(dev, xy2rbc_config, CODA9_GDI_XY2_RBC_CONFIG); if (xy2rbc_config) { for (i = 0; i < 32; i++) coda_write(dev, rbc2axi_tiled_map[i], CODA9_GDI_RBC2_AXI_0 + 4 * i); } }
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic iSCSI Offload Driver * Copyright (c) 2016 Cavium Inc. */ #include <linux/blkdev.h> #include <linux/etherdevice.h> #include <linux/if_ether.h> #include <linux/if_vlan.h> #include <scsi/scsi_tcq.h> #include "qedi.h" #include "qedi_iscsi.h" #include "qedi_gbl.h" int qedi_recover_all_conns(struct qedi_ctx *qedi) { struct qedi_conn *qedi_conn; int i; for (i = 0; i < qedi->max_active_conns; i++) { qedi_conn = qedi_get_conn_from_id(qedi, i); if (!qedi_conn) continue; qedi_start_conn_recovery(qedi, qedi_conn); } return SUCCESS; } static int qedi_eh_host_reset(struct scsi_cmnd *cmd) { struct Scsi_Host *shost = cmd->device->host; struct qedi_ctx *qedi; qedi = iscsi_host_priv(shost); return qedi_recover_all_conns(qedi); } const struct scsi_host_template qedi_host_template = { .module = THIS_MODULE, .name = "QLogic QEDI 25/40/100Gb iSCSI Initiator Driver", .proc_name = QEDI_MODULE_NAME, .queuecommand = iscsi_queuecommand, .eh_timed_out = iscsi_eh_cmd_timed_out, .eh_abort_handler = iscsi_eh_abort, .eh_device_reset_handler = iscsi_eh_device_reset, .eh_target_reset_handler = iscsi_eh_recover_target, .eh_host_reset_handler = qedi_eh_host_reset, .target_alloc = iscsi_target_alloc, .change_queue_depth = scsi_change_queue_depth, .can_queue = QEDI_MAX_ISCSI_TASK, .this_id = -1, .sg_tablesize = QEDI_ISCSI_MAX_BDS_PER_CMD, .max_sectors = 0xffff, .dma_boundary = QEDI_HW_DMA_BOUNDARY, .cmd_per_lun = 128, .shost_groups = qedi_shost_groups, .cmd_size = sizeof(struct iscsi_cmd), }; static void qedi_conn_free_login_resources(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn) { if (qedi_conn->gen_pdu.resp_bd_tbl) { dma_free_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE, qedi_conn->gen_pdu.resp_bd_tbl, qedi_conn->gen_pdu.resp_bd_dma); qedi_conn->gen_pdu.resp_bd_tbl = NULL; } if (qedi_conn->gen_pdu.req_bd_tbl) { dma_free_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE, qedi_conn->gen_pdu.req_bd_tbl, qedi_conn->gen_pdu.req_bd_dma); qedi_conn->gen_pdu.req_bd_tbl = NULL; } if (qedi_conn->gen_pdu.resp_buf) { dma_free_coherent(&qedi->pdev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, qedi_conn->gen_pdu.resp_buf, qedi_conn->gen_pdu.resp_dma_addr); qedi_conn->gen_pdu.resp_buf = NULL; } if (qedi_conn->gen_pdu.req_buf) { dma_free_coherent(&qedi->pdev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, qedi_conn->gen_pdu.req_buf, qedi_conn->gen_pdu.req_dma_addr); qedi_conn->gen_pdu.req_buf = NULL; } } static int qedi_conn_alloc_login_resources(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn) { qedi_conn->gen_pdu.req_buf = dma_alloc_coherent(&qedi->pdev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, &qedi_conn->gen_pdu.req_dma_addr, GFP_KERNEL); if (!qedi_conn->gen_pdu.req_buf) goto login_req_buf_failure; qedi_conn->gen_pdu.req_buf_size = 0; qedi_conn->gen_pdu.req_wr_ptr = qedi_conn->gen_pdu.req_buf; qedi_conn->gen_pdu.resp_buf = dma_alloc_coherent(&qedi->pdev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, &qedi_conn->gen_pdu.resp_dma_addr, GFP_KERNEL); if (!qedi_conn->gen_pdu.resp_buf) goto login_resp_buf_failure; qedi_conn->gen_pdu.resp_buf_size = ISCSI_DEF_MAX_RECV_SEG_LEN; qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf; qedi_conn->gen_pdu.req_bd_tbl = dma_alloc_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE, &qedi_conn->gen_pdu.req_bd_dma, GFP_KERNEL); if (!qedi_conn->gen_pdu.req_bd_tbl) goto login_req_bd_tbl_failure; qedi_conn->gen_pdu.resp_bd_tbl = dma_alloc_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE, &qedi_conn->gen_pdu.resp_bd_dma, GFP_KERNEL); if (!qedi_conn->gen_pdu.resp_bd_tbl) goto login_resp_bd_tbl_failure; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SESS, "Allocation successful, cid=0x%x\n", qedi_conn->iscsi_conn_id); return 0; login_resp_bd_tbl_failure: dma_free_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE, qedi_conn->gen_pdu.req_bd_tbl, qedi_conn->gen_pdu.req_bd_dma); qedi_conn->gen_pdu.req_bd_tbl = NULL; login_req_bd_tbl_failure: dma_free_coherent(&qedi->pdev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, qedi_conn->gen_pdu.resp_buf, qedi_conn->gen_pdu.resp_dma_addr); qedi_conn->gen_pdu.resp_buf = NULL; login_resp_buf_failure: dma_free_coherent(&qedi->pdev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, qedi_conn->gen_pdu.req_buf, qedi_conn->gen_pdu.req_dma_addr); qedi_conn->gen_pdu.req_buf = NULL; login_req_buf_failure: iscsi_conn_printk(KERN_ERR, qedi_conn->cls_conn->dd_data, "login resource alloc failed!!\n"); return -ENOMEM; } static void qedi_destroy_cmd_pool(struct qedi_ctx *qedi, struct iscsi_session *session) { int i; for (i = 0; i < session->cmds_max; i++) { struct iscsi_task *task = session->cmds[i]; struct qedi_cmd *cmd = task->dd_data; if (cmd->io_tbl.sge_tbl) dma_free_coherent(&qedi->pdev->dev, QEDI_ISCSI_MAX_BDS_PER_CMD * sizeof(struct scsi_sge), cmd->io_tbl.sge_tbl, cmd->io_tbl.sge_tbl_dma); if (cmd->sense_buffer) dma_free_coherent(&qedi->pdev->dev, SCSI_SENSE_BUFFERSIZE, cmd->sense_buffer, cmd->sense_buffer_dma); } } static int qedi_alloc_sget(struct qedi_ctx *qedi, struct iscsi_session *session, struct qedi_cmd *cmd) { struct qedi_io_bdt *io = &cmd->io_tbl; struct scsi_sge *sge; io->sge_tbl = dma_alloc_coherent(&qedi->pdev->dev, QEDI_ISCSI_MAX_BDS_PER_CMD * sizeof(*sge), &io->sge_tbl_dma, GFP_KERNEL); if (!io->sge_tbl) { iscsi_session_printk(KERN_ERR, session, "Could not allocate BD table.\n"); return -ENOMEM; } io->sge_valid = 0; return 0; } static int qedi_setup_cmd_pool(struct qedi_ctx *qedi, struct iscsi_session *session) { int i; for (i = 0; i < session->cmds_max; i++) { struct iscsi_task *task = session->cmds[i]; struct qedi_cmd *cmd = task->dd_data; task->hdr = &cmd->hdr; task->hdr_max = sizeof(struct iscsi_hdr); if (qedi_alloc_sget(qedi, session, cmd)) goto free_sgets; cmd->sense_buffer = dma_alloc_coherent(&qedi->pdev->dev, SCSI_SENSE_BUFFERSIZE, &cmd->sense_buffer_dma, GFP_KERNEL); if (!cmd->sense_buffer) goto free_sgets; } return 0; free_sgets: qedi_destroy_cmd_pool(qedi, session); return -ENOMEM; } static struct iscsi_cls_session * qedi_session_create(struct iscsi_endpoint *ep, u16 cmds_max, u16 qdepth, uint32_t initial_cmdsn) { struct Scsi_Host *shost; struct iscsi_cls_session *cls_session; struct qedi_ctx *qedi; struct qedi_endpoint *qedi_ep; if (!ep) return NULL; qedi_ep = ep->dd_data; shost = qedi_ep->qedi->shost; qedi = iscsi_host_priv(shost); if (cmds_max > qedi->max_sqes) cmds_max = qedi->max_sqes; else if (cmds_max < QEDI_SQ_WQES_MIN) cmds_max = QEDI_SQ_WQES_MIN; cls_session = iscsi_session_setup(&qedi_iscsi_transport, shost, cmds_max, 0, sizeof(struct qedi_cmd), initial_cmdsn, ISCSI_MAX_TARGET); if (!cls_session) { QEDI_ERR(&qedi->dbg_ctx, "Failed to setup session for ep=%p\n", qedi_ep); return NULL; } if (qedi_setup_cmd_pool(qedi, cls_session->dd_data)) { QEDI_ERR(&qedi->dbg_ctx, "Failed to setup cmd pool for ep=%p\n", qedi_ep); goto session_teardown; } return cls_session; session_teardown: iscsi_session_teardown(cls_session); return NULL; } static void qedi_session_destroy(struct iscsi_cls_session *cls_session) { struct iscsi_session *session = cls_session->dd_data; struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); struct qedi_ctx *qedi = iscsi_host_priv(shost); qedi_destroy_cmd_pool(qedi, session); iscsi_session_teardown(cls_session); } static struct iscsi_cls_conn * qedi_conn_create(struct iscsi_cls_session *cls_session, uint32_t cid) { struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); struct qedi_ctx *qedi = iscsi_host_priv(shost); struct iscsi_cls_conn *cls_conn; struct qedi_conn *qedi_conn; struct iscsi_conn *conn; cls_conn = iscsi_conn_setup(cls_session, sizeof(*qedi_conn), cid); if (!cls_conn) { QEDI_ERR(&qedi->dbg_ctx, "conn_new: iscsi conn setup failed, cid=0x%x, cls_sess=%p!\n", cid, cls_session); return NULL; } conn = cls_conn->dd_data; qedi_conn = conn->dd_data; qedi_conn->cls_conn = cls_conn; qedi_conn->qedi = qedi; qedi_conn->ep = NULL; qedi_conn->active_cmd_count = 0; INIT_LIST_HEAD(&qedi_conn->active_cmd_list); spin_lock_init(&qedi_conn->list_lock); if (qedi_conn_alloc_login_resources(qedi, qedi_conn)) { iscsi_conn_printk(KERN_ALERT, conn, "conn_new: login resc alloc failed, cid=0x%x, cls_sess=%p!!\n", cid, cls_session); goto free_conn; } return cls_conn; free_conn: iscsi_conn_teardown(cls_conn); return NULL; } void qedi_mark_device_missing(struct iscsi_cls_session *cls_session) { struct iscsi_session *session = cls_session->dd_data; struct qedi_conn *qedi_conn = session->leadconn->dd_data; spin_lock_bh(&session->frwd_lock); set_bit(QEDI_BLOCK_IO, &qedi_conn->qedi->flags); spin_unlock_bh(&session->frwd_lock); } void qedi_mark_device_available(struct iscsi_cls_session *cls_session) { struct iscsi_session *session = cls_session->dd_data; struct qedi_conn *qedi_conn = session->leadconn->dd_data; spin_lock_bh(&session->frwd_lock); clear_bit(QEDI_BLOCK_IO, &qedi_conn->qedi->flags); spin_unlock_bh(&session->frwd_lock); } static int qedi_bind_conn_to_iscsi_cid(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn) { u32 iscsi_cid = qedi_conn->iscsi_conn_id; if (qedi->cid_que.conn_cid_tbl[iscsi_cid]) { iscsi_conn_printk(KERN_ALERT, qedi_conn->cls_conn->dd_data, "conn bind - entry #%d not free\n", iscsi_cid); return -EBUSY; } qedi->cid_que.conn_cid_tbl[iscsi_cid] = qedi_conn; return 0; } struct qedi_conn *qedi_get_conn_from_id(struct qedi_ctx *qedi, u32 iscsi_cid) { if (!qedi->cid_que.conn_cid_tbl) { QEDI_ERR(&qedi->dbg_ctx, "missing conn<->cid table\n"); return NULL; } else if (iscsi_cid >= qedi->max_active_conns) { QEDI_ERR(&qedi->dbg_ctx, "wrong cid #%d\n", iscsi_cid); return NULL; } return qedi->cid_que.conn_cid_tbl[iscsi_cid]; } static int qedi_conn_bind(struct iscsi_cls_session *cls_session, struct iscsi_cls_conn *cls_conn, u64 transport_fd, int is_leading) { struct iscsi_conn *conn = cls_conn->dd_data; struct qedi_conn *qedi_conn = conn->dd_data; struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); struct qedi_ctx *qedi = iscsi_host_priv(shost); struct qedi_endpoint *qedi_ep; struct iscsi_endpoint *ep; int rc = 0; ep = iscsi_lookup_endpoint(transport_fd); if (!ep) return -EINVAL; qedi_ep = ep->dd_data; if ((qedi_ep->state == EP_STATE_TCP_FIN_RCVD) || (qedi_ep->state == EP_STATE_TCP_RST_RCVD)) { rc = -EINVAL; goto put_ep; } if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) { rc = -EINVAL; goto put_ep; } qedi_ep->conn = qedi_conn; qedi_conn->ep = qedi_ep; qedi_conn->iscsi_ep = ep; qedi_conn->iscsi_conn_id = qedi_ep->iscsi_cid; qedi_conn->fw_cid = qedi_ep->fw_cid; qedi_conn->cmd_cleanup_req = 0; atomic_set(&qedi_conn->cmd_cleanup_cmpl, 0); if (qedi_bind_conn_to_iscsi_cid(qedi, qedi_conn)) { rc = -EINVAL; goto put_ep; } spin_lock_init(&qedi_conn->tmf_work_lock); INIT_LIST_HEAD(&qedi_conn->tmf_work_list); init_waitqueue_head(&qedi_conn->wait_queue); put_ep: iscsi_put_endpoint(ep); return rc; } static int qedi_iscsi_update_conn(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn) { struct qed_iscsi_params_update *conn_info; struct iscsi_cls_conn *cls_conn = qedi_conn->cls_conn; struct iscsi_conn *conn = cls_conn->dd_data; struct qedi_endpoint *qedi_ep; int rval; qedi_ep = qedi_conn->ep; conn_info = kzalloc(sizeof(*conn_info), GFP_KERNEL); if (!conn_info) { QEDI_ERR(&qedi->dbg_ctx, "memory alloc failed\n"); return -ENOMEM; } conn_info->update_flag = 0; if (conn->hdrdgst_en) SET_FIELD(conn_info->update_flag, ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN, true); if (conn->datadgst_en) SET_FIELD(conn_info->update_flag, ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN, true); if (conn->session->initial_r2t_en) SET_FIELD(conn_info->update_flag, ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T, true); if (conn->session->imm_data_en) SET_FIELD(conn_info->update_flag, ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA, true); conn_info->max_seq_size = conn->session->max_burst; conn_info->max_recv_pdu_length = conn->max_recv_dlength; conn_info->max_send_pdu_length = conn->max_xmit_dlength; conn_info->first_seq_length = conn->session->first_burst; conn_info->exp_stat_sn = conn->exp_statsn; rval = qedi_ops->update_conn(qedi->cdev, qedi_ep->handle, conn_info); if (rval) { rval = -ENXIO; QEDI_ERR(&qedi->dbg_ctx, "Could not update connection\n"); } kfree(conn_info); return rval; } static u16 qedi_calc_mss(u16 pmtu, u8 is_ipv6, u8 tcp_ts_en, u8 vlan_en) { u16 mss = 0; u16 hdrs = TCP_HDR_LEN; if (is_ipv6) hdrs += IPV6_HDR_LEN; else hdrs += IPV4_HDR_LEN; mss = pmtu - hdrs; if (!mss) mss = DEF_MSS; return mss; } static int qedi_iscsi_offload_conn(struct qedi_endpoint *qedi_ep) { struct qed_iscsi_params_offload *conn_info; struct qedi_ctx *qedi = qedi_ep->qedi; int rval; int i; conn_info = kzalloc(sizeof(*conn_info), GFP_KERNEL); if (!conn_info) { QEDI_ERR(&qedi->dbg_ctx, "Failed to allocate memory ep=%p\n", qedi_ep); return -ENOMEM; } ether_addr_copy(conn_info->src.mac, qedi_ep->src_mac); ether_addr_copy(conn_info->dst.mac, qedi_ep->dst_mac); conn_info->src.ip[0] = ntohl(qedi_ep->src_addr[0]); conn_info->dst.ip[0] = ntohl(qedi_ep->dst_addr[0]); if (qedi_ep->ip_type == TCP_IPV4) { conn_info->ip_version = 0; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "After ntohl: src_addr=%pI4, dst_addr=%pI4\n", qedi_ep->src_addr, qedi_ep->dst_addr); } else { for (i = 1; i < 4; i++) { conn_info->src.ip[i] = ntohl(qedi_ep->src_addr[i]); conn_info->dst.ip[i] = ntohl(qedi_ep->dst_addr[i]); } conn_info->ip_version = 1; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "After ntohl: src_addr=%pI6, dst_addr=%pI6\n", qedi_ep->src_addr, qedi_ep->dst_addr); } conn_info->src.port = qedi_ep->src_port; conn_info->dst.port = qedi_ep->dst_port; conn_info->layer_code = ISCSI_SLOW_PATH_LAYER_CODE; conn_info->sq_pbl_addr = qedi_ep->sq_pbl_dma; conn_info->vlan_id = qedi_ep->vlan_id; SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_TS_EN, 1); SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_DA_EN, 1); SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_DA_CNT_EN, 1); SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_KA_EN, 1); conn_info->default_cq = (qedi_ep->fw_cid % qedi->num_queues); conn_info->ka_max_probe_cnt = DEF_KA_MAX_PROBE_COUNT; conn_info->dup_ack_theshold = 3; conn_info->rcv_wnd = 65535; conn_info->ss_thresh = 65535; conn_info->srtt = 300; conn_info->rtt_var = 150; conn_info->flow_label = 0; conn_info->ka_timeout = DEF_KA_TIMEOUT; conn_info->ka_interval = DEF_KA_INTERVAL; conn_info->max_rt_time = DEF_MAX_RT_TIME; conn_info->ttl = DEF_TTL; conn_info->tos_or_tc = DEF_TOS; conn_info->remote_port = qedi_ep->dst_port; conn_info->local_port = qedi_ep->src_port; conn_info->mss = qedi_calc_mss(qedi_ep->pmtu, (qedi_ep->ip_type == TCP_IPV6), 1, (qedi_ep->vlan_id != 0)); conn_info->cwnd = DEF_MAX_CWND * conn_info->mss; conn_info->rcv_wnd_scale = 4; conn_info->da_timeout_value = 200; conn_info->ack_frequency = 2; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "Default cq index [%d], mss [%d]\n", conn_info->default_cq, conn_info->mss); /* Prepare the doorbell parameters */ qedi_ep->db_data.agg_flags = 0; qedi_ep->db_data.params = 0; SET_FIELD(qedi_ep->db_data.params, ISCSI_DB_DATA_DEST, DB_DEST_XCM); SET_FIELD(qedi_ep->db_data.params, ISCSI_DB_DATA_AGG_CMD, DB_AGG_CMD_MAX); SET_FIELD(qedi_ep->db_data.params, ISCSI_DB_DATA_AGG_VAL_SEL, DQ_XCM_ISCSI_SQ_PROD_CMD); SET_FIELD(qedi_ep->db_data.params, ISCSI_DB_DATA_BYPASS_EN, 1); /* Register doorbell with doorbell recovery mechanism */ rval = qedi_ops->common->db_recovery_add(qedi->cdev, qedi_ep->p_doorbell, &qedi_ep->db_data, DB_REC_WIDTH_32B, DB_REC_KERNEL); if (rval) { kfree(conn_info); return rval; } rval = qedi_ops->offload_conn(qedi->cdev, qedi_ep->handle, conn_info); if (rval) { /* delete doorbell from doorbell recovery mechanism */ rval = qedi_ops->common->db_recovery_del(qedi->cdev, qedi_ep->p_doorbell, &qedi_ep->db_data); QEDI_ERR(&qedi->dbg_ctx, "offload_conn returned %d, ep=%p\n", rval, qedi_ep); } kfree(conn_info); return rval; } static int qedi_conn_start(struct iscsi_cls_conn *cls_conn) { struct iscsi_conn *conn = cls_conn->dd_data; struct qedi_conn *qedi_conn = conn->dd_data; struct qedi_ctx *qedi; int rval; qedi = qedi_conn->qedi; rval = qedi_iscsi_update_conn(qedi, qedi_conn); if (rval) { iscsi_conn_printk(KERN_ALERT, conn, "conn_start: FW offload conn failed.\n"); rval = -EINVAL; goto start_err; } spin_lock(&qedi_conn->tmf_work_lock); qedi_conn->fw_cleanup_works = 0; qedi_conn->ep_disconnect_starting = false; spin_unlock(&qedi_conn->tmf_work_lock); qedi_conn->abrt_conn = 0; rval = iscsi_conn_start(cls_conn); if (rval) { iscsi_conn_printk(KERN_ALERT, conn, "iscsi_conn_start: FW offload conn failed!!\n"); } start_err: return rval; } static void qedi_conn_destroy(struct iscsi_cls_conn *cls_conn) { struct iscsi_conn *conn = cls_conn->dd_data; struct qedi_conn *qedi_conn = conn->dd_data; struct Scsi_Host *shost; struct qedi_ctx *qedi; shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn)); qedi = iscsi_host_priv(shost); qedi_conn_free_login_resources(qedi, qedi_conn); iscsi_conn_teardown(cls_conn); } static int qedi_ep_get_param(struct iscsi_endpoint *ep, enum iscsi_param param, char *buf) { struct qedi_endpoint *qedi_ep = ep->dd_data; int len; if (!qedi_ep) return -ENOTCONN; switch (param) { case ISCSI_PARAM_CONN_PORT: len = sprintf(buf, "%hu\n", qedi_ep->dst_port); break; case ISCSI_PARAM_CONN_ADDRESS: if (qedi_ep->ip_type == TCP_IPV4) len = sprintf(buf, "%pI4\n", qedi_ep->dst_addr); else len = sprintf(buf, "%pI6\n", qedi_ep->dst_addr); break; default: return -ENOTCONN; } return len; } static int qedi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param, char *buf) { struct qedi_ctx *qedi; int len; qedi = iscsi_host_priv(shost); switch (param) { case ISCSI_HOST_PARAM_HWADDRESS: len = sysfs_format_mac(buf, qedi->mac, 6); break; case ISCSI_HOST_PARAM_NETDEV_NAME: len = sprintf(buf, "host%d\n", shost->host_no); break; case ISCSI_HOST_PARAM_IPADDRESS: if (qedi->ip_type == TCP_IPV4) len = sprintf(buf, "%pI4\n", qedi->src_ip); else len = sprintf(buf, "%pI6\n", qedi->src_ip); break; default: return iscsi_host_get_param(shost, param, buf); } return len; } static void qedi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats) { struct iscsi_conn *conn = cls_conn->dd_data; struct qed_iscsi_stats iscsi_stats; struct Scsi_Host *shost; struct qedi_ctx *qedi; shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn)); qedi = iscsi_host_priv(shost); qedi_ops->get_stats(qedi->cdev, &iscsi_stats); conn->txdata_octets = iscsi_stats.iscsi_tx_bytes_cnt; conn->rxdata_octets = iscsi_stats.iscsi_rx_bytes_cnt; conn->dataout_pdus_cnt = (uint32_t)iscsi_stats.iscsi_tx_data_pdu_cnt; conn->datain_pdus_cnt = (uint32_t)iscsi_stats.iscsi_rx_data_pdu_cnt; conn->r2t_pdus_cnt = (uint32_t)iscsi_stats.iscsi_rx_r2t_pdu_cnt; stats->txdata_octets = conn->txdata_octets; stats->rxdata_octets = conn->rxdata_octets; stats->scsicmd_pdus = conn->scsicmd_pdus_cnt; stats->dataout_pdus = conn->dataout_pdus_cnt; stats->scsirsp_pdus = conn->scsirsp_pdus_cnt; stats->datain_pdus = conn->datain_pdus_cnt; stats->r2t_pdus = conn->r2t_pdus_cnt; stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt; stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt; stats->digest_err = 0; stats->timeout_err = 0; strcpy(stats->custom[0].desc, "eh_abort_cnt"); stats->custom[0].value = conn->eh_abort_cnt; stats->custom_length = 1; } static void qedi_iscsi_prep_generic_pdu_bd(struct qedi_conn *qedi_conn) { struct scsi_sge *bd_tbl; bd_tbl = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl; bd_tbl->sge_addr.hi = (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32); bd_tbl->sge_addr.lo = (u32)qedi_conn->gen_pdu.req_dma_addr; bd_tbl->sge_len = qedi_conn->gen_pdu.req_wr_ptr - qedi_conn->gen_pdu.req_buf; bd_tbl = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl; bd_tbl->sge_addr.hi = (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32); bd_tbl->sge_addr.lo = (u32)qedi_conn->gen_pdu.resp_dma_addr; bd_tbl->sge_len = ISCSI_DEF_MAX_RECV_SEG_LEN; } static int qedi_iscsi_send_generic_request(struct iscsi_task *task) { struct qedi_cmd *cmd = task->dd_data; struct qedi_conn *qedi_conn = cmd->conn; char *buf; int data_len; int rc = 0; qedi_iscsi_prep_generic_pdu_bd(qedi_conn); switch (task->hdr->opcode & ISCSI_OPCODE_MASK) { case ISCSI_OP_LOGIN: qedi_send_iscsi_login(qedi_conn, task); break; case ISCSI_OP_NOOP_OUT: data_len = qedi_conn->gen_pdu.req_buf_size; buf = qedi_conn->gen_pdu.req_buf; if (data_len) rc = qedi_send_iscsi_nopout(qedi_conn, task, buf, data_len, 1); else rc = qedi_send_iscsi_nopout(qedi_conn, task, NULL, 0, 1); break; case ISCSI_OP_LOGOUT: rc = qedi_send_iscsi_logout(qedi_conn, task); break; case ISCSI_OP_SCSI_TMFUNC: rc = qedi_send_iscsi_tmf(qedi_conn, task); break; case ISCSI_OP_TEXT: rc = qedi_send_iscsi_text(qedi_conn, task); break; default: iscsi_conn_printk(KERN_ALERT, qedi_conn->cls_conn->dd_data, "unsupported op 0x%x\n", task->hdr->opcode); } return rc; } static int qedi_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task) { struct qedi_conn *qedi_conn = conn->dd_data; struct qedi_cmd *cmd = task->dd_data; memset(qedi_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN); qedi_conn->gen_pdu.req_buf_size = task->data_count; if (task->data_count) { memcpy(qedi_conn->gen_pdu.req_buf, task->data, task->data_count); qedi_conn->gen_pdu.req_wr_ptr = qedi_conn->gen_pdu.req_buf + task->data_count; } cmd->conn = conn->dd_data; return qedi_iscsi_send_generic_request(task); } static int qedi_task_xmit(struct iscsi_task *task) { struct iscsi_conn *conn = task->conn; struct qedi_conn *qedi_conn = conn->dd_data; struct qedi_cmd *cmd = task->dd_data; struct scsi_cmnd *sc = task->sc; /* Clear now so in cleanup_task we know it didn't make it */ cmd->scsi_cmd = NULL; cmd->task_id = U16_MAX; if (test_bit(QEDI_IN_SHUTDOWN, &qedi_conn->qedi->flags)) return -ENODEV; if (test_bit(QEDI_BLOCK_IO, &qedi_conn->qedi->flags)) return -EACCES; cmd->state = 0; cmd->task = NULL; cmd->use_slowpath = false; cmd->conn = qedi_conn; cmd->task = task; cmd->io_cmd_in_list = false; INIT_LIST_HEAD(&cmd->io_cmd); if (!sc) return qedi_mtask_xmit(conn, task); cmd->scsi_cmd = sc; return qedi_iscsi_send_ioreq(task); } static void qedi_offload_work(struct work_struct *work) { struct qedi_endpoint *qedi_ep = container_of(work, struct qedi_endpoint, offload_work); struct qedi_ctx *qedi; int wait_delay = 5 * HZ; int ret; qedi = qedi_ep->qedi; ret = qedi_iscsi_offload_conn(qedi_ep); if (ret) { QEDI_ERR(&qedi->dbg_ctx, "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n", qedi_ep->iscsi_cid, qedi_ep, ret); qedi_ep->state = EP_STATE_OFLDCONN_FAILED; return; } ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait, (qedi_ep->state == EP_STATE_OFLDCONN_COMPL), wait_delay); if (ret <= 0 || qedi_ep->state != EP_STATE_OFLDCONN_COMPL) { qedi_ep->state = EP_STATE_OFLDCONN_FAILED; QEDI_ERR(&qedi->dbg_ctx, "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n", qedi_ep->iscsi_cid, qedi_ep); } } static struct iscsi_endpoint * qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, int non_blocking) { struct qedi_ctx *qedi; struct iscsi_endpoint *ep; struct qedi_endpoint *qedi_ep; struct sockaddr_in *addr; struct sockaddr_in6 *addr6; struct iscsi_path path_req; u32 msg_type = ISCSI_KEVENT_IF_DOWN; u32 iscsi_cid = QEDI_CID_RESERVED; u16 len = 0; char *buf = NULL; int ret, tmp; if (!shost) { ret = -ENXIO; QEDI_ERR(NULL, "shost is NULL\n"); return ERR_PTR(ret); } if (qedi_do_not_recover) { ret = -ENOMEM; return ERR_PTR(ret); } qedi = iscsi_host_priv(shost); if (test_bit(QEDI_IN_OFFLINE, &qedi->flags) || test_bit(QEDI_IN_RECOVERY, &qedi->flags)) { ret = -ENOMEM; return ERR_PTR(ret); } if (atomic_read(&qedi->link_state) != QEDI_LINK_UP) { QEDI_WARN(&qedi->dbg_ctx, "qedi link down\n"); return ERR_PTR(-ENXIO); } ep = iscsi_create_endpoint(sizeof(struct qedi_endpoint)); if (!ep) { QEDI_ERR(&qedi->dbg_ctx, "endpoint create fail\n"); ret = -ENOMEM; return ERR_PTR(ret); } qedi_ep = ep->dd_data; memset(qedi_ep, 0, sizeof(struct qedi_endpoint)); INIT_WORK(&qedi_ep->offload_work, qedi_offload_work); qedi_ep->state = EP_STATE_IDLE; qedi_ep->iscsi_cid = (u32)-1; qedi_ep->qedi = qedi; if (dst_addr->sa_family == AF_INET) { addr = (struct sockaddr_in *)dst_addr; memcpy(qedi_ep->dst_addr, &addr->sin_addr.s_addr, sizeof(struct in_addr)); qedi_ep->dst_port = ntohs(addr->sin_port); qedi_ep->ip_type = TCP_IPV4; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "dst_addr=%pI4, dst_port=%u\n", qedi_ep->dst_addr, qedi_ep->dst_port); } else if (dst_addr->sa_family == AF_INET6) { addr6 = (struct sockaddr_in6 *)dst_addr; memcpy(qedi_ep->dst_addr, &addr6->sin6_addr, sizeof(struct in6_addr)); qedi_ep->dst_port = ntohs(addr6->sin6_port); qedi_ep->ip_type = TCP_IPV6; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "dst_addr=%pI6, dst_port=%u\n", qedi_ep->dst_addr, qedi_ep->dst_port); } else { QEDI_ERR(&qedi->dbg_ctx, "Invalid endpoint\n"); } ret = qedi_alloc_sq(qedi, qedi_ep); if (ret) goto ep_conn_exit; ret = qedi_ops->acquire_conn(qedi->cdev, &qedi_ep->handle, &qedi_ep->fw_cid, &qedi_ep->p_doorbell); if (ret) { QEDI_ERR(&qedi->dbg_ctx, "Could not acquire connection\n"); ret = -ENXIO; goto ep_free_sq; } iscsi_cid = qedi_ep->handle; qedi_ep->iscsi_cid = iscsi_cid; init_waitqueue_head(&qedi_ep->ofld_wait); init_waitqueue_head(&qedi_ep->tcp_ofld_wait); qedi_ep->state = EP_STATE_OFLDCONN_START; qedi->ep_tbl[iscsi_cid] = qedi_ep; buf = (char *)&path_req; len = sizeof(path_req); memset(&path_req, 0, len); msg_type = ISCSI_KEVENT_PATH_REQ; path_req.handle = (u64)qedi_ep->iscsi_cid; path_req.pmtu = qedi->ll2_mtu; qedi_ep->pmtu = qedi->ll2_mtu; if (qedi_ep->ip_type == TCP_IPV4) { memcpy(&path_req.dst.v4_addr, &qedi_ep->dst_addr, sizeof(struct in_addr)); path_req.ip_addr_len = 4; } else { memcpy(&path_req.dst.v6_addr, &qedi_ep->dst_addr, sizeof(struct in6_addr)); path_req.ip_addr_len = 16; } ret = iscsi_offload_mesg(shost, &qedi_iscsi_transport, msg_type, buf, len); if (ret) { QEDI_ERR(&qedi->dbg_ctx, "iscsi_offload_mesg() failed for cid=0x%x ret=%d\n", iscsi_cid, ret); goto ep_rel_conn; } atomic_inc(&qedi->num_offloads); return ep; ep_rel_conn: qedi->ep_tbl[iscsi_cid] = NULL; tmp = qedi_ops->release_conn(qedi->cdev, qedi_ep->handle); if (tmp) QEDI_WARN(&qedi->dbg_ctx, "release_conn returned %d\n", tmp); ep_free_sq: qedi_free_sq(qedi, qedi_ep); ep_conn_exit: iscsi_destroy_endpoint(ep); return ERR_PTR(ret); } static int qedi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) { struct qedi_endpoint *qedi_ep; int ret = 0; if (qedi_do_not_recover) return 1; qedi_ep = ep->dd_data; if (qedi_ep->state == EP_STATE_IDLE || qedi_ep->state == EP_STATE_OFLDCONN_NONE || qedi_ep->state == EP_STATE_OFLDCONN_FAILED) return -1; if (qedi_ep->state == EP_STATE_OFLDCONN_COMPL) ret = 1; ret = wait_event_interruptible_timeout(qedi_ep->ofld_wait, QEDI_OFLD_WAIT_STATE(qedi_ep), msecs_to_jiffies(timeout_ms)); if (qedi_ep->state == EP_STATE_OFLDCONN_FAILED) ret = -1; if (ret > 0) return 1; else if (!ret) return 0; else return ret; } static void qedi_cleanup_active_cmd_list(struct qedi_conn *qedi_conn) { struct qedi_cmd *cmd, *cmd_tmp; spin_lock(&qedi_conn->list_lock); list_for_each_entry_safe(cmd, cmd_tmp, &qedi_conn->active_cmd_list, io_cmd) { list_del_init(&cmd->io_cmd); qedi_conn->active_cmd_count--; } spin_unlock(&qedi_conn->list_lock); } static void qedi_ep_disconnect(struct iscsi_endpoint *ep) { struct qedi_endpoint *qedi_ep; struct qedi_conn *qedi_conn = NULL; struct qedi_ctx *qedi; int ret = 0; int wait_delay; int abrt_conn = 0; wait_delay = 60 * HZ + DEF_MAX_RT_TIME; qedi_ep = ep->dd_data; qedi = qedi_ep->qedi; flush_work(&qedi_ep->offload_work); if (qedi_ep->state == EP_STATE_OFLDCONN_START) goto ep_exit_recover; if (qedi_ep->conn) { qedi_conn = qedi_ep->conn; abrt_conn = qedi_conn->abrt_conn; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "cid=0x%x qedi_ep=%p waiting for %d tmfs\n", qedi_ep->iscsi_cid, qedi_ep, qedi_conn->fw_cleanup_works); spin_lock(&qedi_conn->tmf_work_lock); qedi_conn->ep_disconnect_starting = true; while (qedi_conn->fw_cleanup_works > 0) { spin_unlock(&qedi_conn->tmf_work_lock); msleep(1000); spin_lock(&qedi_conn->tmf_work_lock); } spin_unlock(&qedi_conn->tmf_work_lock); if (test_bit(QEDI_IN_RECOVERY, &qedi->flags)) { if (qedi_do_not_recover) { QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "Do not recover cid=0x%x\n", qedi_ep->iscsi_cid); goto ep_exit_recover; } QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "Reset recovery cid=0x%x, qedi_ep=%p, state=0x%x\n", qedi_ep->iscsi_cid, qedi_ep, qedi_ep->state); qedi_cleanup_active_cmd_list(qedi_conn); goto ep_release_conn; } } if (qedi_do_not_recover) goto ep_exit_recover; switch (qedi_ep->state) { case EP_STATE_OFLDCONN_START: case EP_STATE_OFLDCONN_NONE: goto ep_release_conn; case EP_STATE_OFLDCONN_FAILED: break; case EP_STATE_OFLDCONN_COMPL: if (unlikely(!qedi_conn)) break; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "Active cmd count=%d, abrt_conn=%d, ep state=0x%x, cid=0x%x, qedi_conn=%p\n", qedi_conn->active_cmd_count, abrt_conn, qedi_ep->state, qedi_ep->iscsi_cid, qedi_ep->conn ); if (!qedi_conn->active_cmd_count) abrt_conn = 0; else abrt_conn = 1; if (abrt_conn) qedi_clearsq(qedi, qedi_conn, NULL); break; default: break; } if (!abrt_conn) wait_delay += qedi->pf_params.iscsi_pf_params.two_msl_timer; qedi_ep->state = EP_STATE_DISCONN_START; if (test_bit(QEDI_IN_SHUTDOWN, &qedi->flags) || test_bit(QEDI_IN_RECOVERY, &qedi->flags)) goto ep_release_conn; /* Delete doorbell from doorbell recovery mechanism */ ret = qedi_ops->common->db_recovery_del(qedi->cdev, qedi_ep->p_doorbell, &qedi_ep->db_data); ret = qedi_ops->destroy_conn(qedi->cdev, qedi_ep->handle, abrt_conn); if (ret) { QEDI_WARN(&qedi->dbg_ctx, "destroy_conn failed returned %d\n", ret); } else { ret = wait_event_interruptible_timeout( qedi_ep->tcp_ofld_wait, (qedi_ep->state != EP_STATE_DISCONN_START), wait_delay); if ((ret <= 0) || (qedi_ep->state == EP_STATE_DISCONN_START)) { QEDI_WARN(&qedi->dbg_ctx, "Destroy conn timedout or interrupted, ret=%d, delay=%d, cid=0x%x\n", ret, wait_delay, qedi_ep->iscsi_cid); } } ep_release_conn: ret = qedi_ops->release_conn(qedi->cdev, qedi_ep->handle); if (ret) QEDI_WARN(&qedi->dbg_ctx, "release_conn returned %d, cid=0x%x\n", ret, qedi_ep->iscsi_cid); ep_exit_recover: qedi_ep->state = EP_STATE_IDLE; qedi->ep_tbl[qedi_ep->iscsi_cid] = NULL; qedi->cid_que.conn_cid_tbl[qedi_ep->iscsi_cid] = NULL; qedi_free_id(&qedi->lcl_port_tbl, qedi_ep->src_port); qedi_free_sq(qedi, qedi_ep); if (qedi_conn) qedi_conn->ep = NULL; qedi_ep->conn = NULL; qedi_ep->qedi = NULL; atomic_dec(&qedi->num_offloads); iscsi_destroy_endpoint(ep); } static int qedi_data_avail(struct qedi_ctx *qedi, u16 vlanid) { struct qed_dev *cdev = qedi->cdev; struct qedi_uio_dev *udev; struct qedi_uio_ctrl *uctrl; struct sk_buff *skb; u32 len; int rc = 0; udev = qedi->udev; if (!udev) { QEDI_ERR(&qedi->dbg_ctx, "udev is NULL.\n"); return -EINVAL; } uctrl = (struct qedi_uio_ctrl *)udev->uctrl; if (!uctrl) { QEDI_ERR(&qedi->dbg_ctx, "uctlr is NULL.\n"); return -EINVAL; } len = uctrl->host_tx_pkt_len; if (!len) { QEDI_ERR(&qedi->dbg_ctx, "Invalid len %u\n", len); return -EINVAL; } skb = alloc_skb(len, GFP_ATOMIC); if (!skb) { QEDI_ERR(&qedi->dbg_ctx, "alloc_skb failed\n"); return -EINVAL; } skb_put(skb, len); memcpy(skb->data, udev->tx_pkt, len); skb->ip_summed = CHECKSUM_NONE; if (vlanid) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid); rc = qedi_ops->ll2->start_xmit(cdev, skb, 0); if (rc) { QEDI_ERR(&qedi->dbg_ctx, "ll2 start_xmit returned %d\n", rc); kfree_skb(skb); } uctrl->host_tx_pkt_len = 0; uctrl->hw_tx_cons++; return rc; } static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data) { struct qedi_ctx *qedi; struct qedi_endpoint *qedi_ep; int ret = 0; u32 iscsi_cid; u16 port_id = 0; if (!shost) { ret = -ENXIO; QEDI_ERR(NULL, "shost is NULL\n"); return ret; } if (strcmp(shost->hostt->proc_name, "qedi")) { ret = -ENXIO; QEDI_ERR(NULL, "shost %s is invalid\n", shost->hostt->proc_name); return ret; } qedi = iscsi_host_priv(shost); if (path_data->handle == QEDI_PATH_HANDLE) { ret = qedi_data_avail(qedi, path_data->vlan_id); goto set_path_exit; } iscsi_cid = (u32)path_data->handle; if (iscsi_cid >= qedi->max_active_conns) { ret = -EINVAL; goto set_path_exit; } qedi_ep = qedi->ep_tbl[iscsi_cid]; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "iscsi_cid=0x%x, qedi_ep=%p\n", iscsi_cid, qedi_ep); if (!qedi_ep) { ret = -EINVAL; goto set_path_exit; } if (!is_valid_ether_addr(&path_data->mac_addr[0])) { QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n"); qedi_ep->state = EP_STATE_OFLDCONN_NONE; ret = -EIO; goto set_path_exit; } ether_addr_copy(&qedi_ep->src_mac[0], &qedi->mac[0]); ether_addr_copy(&qedi_ep->dst_mac[0], &path_data->mac_addr[0]); qedi_ep->vlan_id = path_data->vlan_id; if (path_data->pmtu < DEF_PATH_MTU) { qedi_ep->pmtu = qedi->ll2_mtu; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "MTU cannot be %u, using default MTU %u\n", path_data->pmtu, qedi_ep->pmtu); } if (path_data->pmtu != qedi->ll2_mtu) { if (path_data->pmtu > JUMBO_MTU) { ret = -EINVAL; QEDI_ERR(NULL, "Invalid MTU %u\n", path_data->pmtu); goto set_path_exit; } qedi_reset_host_mtu(qedi, path_data->pmtu); qedi_ep->pmtu = qedi->ll2_mtu; } port_id = qedi_ep->src_port; if (port_id >= QEDI_LOCAL_PORT_MIN && port_id < QEDI_LOCAL_PORT_MAX) { if (qedi_alloc_id(&qedi->lcl_port_tbl, port_id)) port_id = 0; } else { port_id = 0; } if (!port_id) { port_id = qedi_alloc_new_id(&qedi->lcl_port_tbl); if (port_id == QEDI_LOCAL_PORT_INVALID) { QEDI_ERR(&qedi->dbg_ctx, "Failed to allocate port id for iscsi_cid=0x%x\n", iscsi_cid); ret = -ENOMEM; goto set_path_exit; } } qedi_ep->src_port = port_id; if (qedi_ep->ip_type == TCP_IPV4) { memcpy(&qedi_ep->src_addr[0], &path_data->src.v4_addr, sizeof(struct in_addr)); memcpy(&qedi->src_ip[0], &path_data->src.v4_addr, sizeof(struct in_addr)); qedi->ip_type = TCP_IPV4; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "src addr:port=%pI4:%u, dst addr:port=%pI4:%u\n", qedi_ep->src_addr, qedi_ep->src_port, qedi_ep->dst_addr, qedi_ep->dst_port); } else { memcpy(&qedi_ep->src_addr[0], &path_data->src.v6_addr, sizeof(struct in6_addr)); memcpy(&qedi->src_ip[0], &path_data->src.v6_addr, sizeof(struct in6_addr)); qedi->ip_type = TCP_IPV6; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "src addr:port=%pI6:%u, dst addr:port=%pI6:%u\n", qedi_ep->src_addr, qedi_ep->src_port, qedi_ep->dst_addr, qedi_ep->dst_port); } queue_work(qedi->offload_thread, &qedi_ep->offload_work); ret = 0; set_path_exit: return ret; } static umode_t qedi_attr_is_visible(int param_type, int param) { switch (param_type) { case ISCSI_HOST_PARAM: switch (param) { case ISCSI_HOST_PARAM_NETDEV_NAME: case ISCSI_HOST_PARAM_HWADDRESS: case ISCSI_HOST_PARAM_IPADDRESS: return 0444; default: return 0; } case ISCSI_PARAM: switch (param) { case ISCSI_PARAM_MAX_RECV_DLENGTH: case ISCSI_PARAM_MAX_XMIT_DLENGTH: case ISCSI_PARAM_HDRDGST_EN: case ISCSI_PARAM_DATADGST_EN: case ISCSI_PARAM_CONN_ADDRESS: case ISCSI_PARAM_CONN_PORT: case ISCSI_PARAM_EXP_STATSN: case ISCSI_PARAM_PERSISTENT_ADDRESS: case ISCSI_PARAM_PERSISTENT_PORT: case ISCSI_PARAM_PING_TMO: case ISCSI_PARAM_RECV_TMO: case ISCSI_PARAM_INITIAL_R2T_EN: case ISCSI_PARAM_MAX_R2T: case ISCSI_PARAM_IMM_DATA_EN: case ISCSI_PARAM_FIRST_BURST: case ISCSI_PARAM_MAX_BURST: case ISCSI_PARAM_PDU_INORDER_EN: case ISCSI_PARAM_DATASEQ_INORDER_EN: case ISCSI_PARAM_ERL: case ISCSI_PARAM_TARGET_NAME: case ISCSI_PARAM_TPGT: case ISCSI_PARAM_USERNAME: case ISCSI_PARAM_PASSWORD: case ISCSI_PARAM_USERNAME_IN: case ISCSI_PARAM_PASSWORD_IN: case ISCSI_PARAM_FAST_ABORT: case ISCSI_PARAM_ABORT_TMO: case ISCSI_PARAM_LU_RESET_TMO: case ISCSI_PARAM_TGT_RESET_TMO: case ISCSI_PARAM_IFACE_NAME: case ISCSI_PARAM_INITIATOR_NAME: case ISCSI_PARAM_BOOT_ROOT: case ISCSI_PARAM_BOOT_NIC: case ISCSI_PARAM_BOOT_TARGET: return 0444; default: return 0; } } return 0; } static void qedi_cleanup_task(struct iscsi_task *task) { struct qedi_cmd *cmd; if (task->state == ISCSI_TASK_PENDING) { QEDI_INFO(NULL, QEDI_LOG_IO, "Returning ref_cnt=%d\n", refcount_read(&task->refcount)); return; } if (task->sc) qedi_iscsi_unmap_sg_list(task->dd_data); cmd = task->dd_data; if (cmd->task_id != U16_MAX) qedi_clear_task_idx(iscsi_host_priv(task->conn->session->host), cmd->task_id); cmd->task_id = U16_MAX; cmd->scsi_cmd = NULL; } struct iscsi_transport qedi_iscsi_transport = { .owner = THIS_MODULE, .name = QEDI_MODULE_NAME, .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD | CAP_TEXT_NEGO, .create_session = qedi_session_create, .destroy_session = qedi_session_destroy, .create_conn = qedi_conn_create, .bind_conn = qedi_conn_bind, .unbind_conn = iscsi_conn_unbind, .start_conn = qedi_conn_start, .stop_conn = iscsi_conn_stop, .destroy_conn = qedi_conn_destroy, .set_param = iscsi_set_param, .get_ep_param = qedi_ep_get_param, .get_conn_param = iscsi_conn_get_param, .get_session_param = iscsi_session_get_param, .get_host_param = qedi_host_get_param, .send_pdu = iscsi_conn_send_pdu, .get_stats = qedi_conn_get_stats, .xmit_task = qedi_task_xmit, .cleanup_task = qedi_cleanup_task, .session_recovery_timedout = iscsi_session_recovery_timedout, .ep_connect = qedi_ep_connect, .ep_poll = qedi_ep_poll, .ep_disconnect = qedi_ep_disconnect, .set_path = qedi_set_path, .attr_is_visible = qedi_attr_is_visible, }; void qedi_start_conn_recovery(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn) { struct iscsi_cls_session *cls_sess; struct iscsi_cls_conn *cls_conn; struct iscsi_conn *conn; cls_conn = qedi_conn->cls_conn; conn = cls_conn->dd_data; cls_sess = iscsi_conn_to_session(cls_conn); if (iscsi_is_session_online(cls_sess)) { qedi_conn->abrt_conn = 1; QEDI_ERR(&qedi->dbg_ctx, "Failing connection, state=0x%x, cid=0x%x\n", conn->session->state, qedi_conn->iscsi_conn_id); iscsi_conn_failure(qedi_conn->cls_conn->dd_data, ISCSI_ERR_CONN_FAILED); } } static const struct { enum iscsi_error_types error_code; char *err_string; } qedi_iscsi_error[] = { { ISCSI_STATUS_NONE, "tcp_error none" }, { ISCSI_CONN_ERROR_TASK_CID_MISMATCH, "task cid mismatch" }, { ISCSI_CONN_ERROR_TASK_NOT_VALID, "invalid task" }, { ISCSI_CONN_ERROR_RQ_RING_IS_FULL, "rq ring full" }, { ISCSI_CONN_ERROR_CMDQ_RING_IS_FULL, "cmdq ring full" }, { ISCSI_CONN_ERROR_HQE_CACHING_FAILED, "sge caching failed" }, { ISCSI_CONN_ERROR_HEADER_DIGEST_ERROR, "hdr digest error" }, { ISCSI_CONN_ERROR_LOCAL_COMPLETION_ERROR, "local cmpl error" }, { ISCSI_CONN_ERROR_DATA_OVERRUN, "invalid task" }, { ISCSI_CONN_ERROR_OUT_OF_SGES_ERROR, "out of sge error" }, { ISCSI_CONN_ERROR_TCP_IP_FRAGMENT_ERROR, "tcp ip fragment error" }, { ISCSI_CONN_ERROR_PROTOCOL_ERR_AHS_LEN, "AHS len protocol error" }, { ISCSI_CONN_ERROR_PROTOCOL_ERR_ITT_OUT_OF_RANGE, "itt out of range error" }, { ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SEG_LEN_EXCEEDS_PDU_SIZE, "data seg more than pdu size" }, { ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_OPCODE, "invalid opcode" }, { ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_OPCODE_BEFORE_UPDATE, "invalid opcode before update" }, { ISCSI_CONN_ERROR_UNVALID_NOPIN_DSL, "unexpected opcode" }, { ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_CARRIES_NO_DATA, "r2t carries no data" }, { ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SN, "data sn error" }, { ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_IN_TTT, "data TTT error" }, { ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_TTT, "r2t TTT error" }, { ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_BUFFER_OFFSET, "buffer offset error" }, { ISCSI_CONN_ERROR_PROTOCOL_ERR_BUFFER_OFFSET_OOO, "buffer offset ooo" }, { ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_SN, "data seg len 0" }, { ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0, "data xer len error" }, { ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1, "data xer len1 error" }, { ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_2, "data xer len2 error" }, { ISCSI_CONN_ERROR_PROTOCOL_ERR_LUN, "protocol lun error" }, { ISCSI_CONN_ERROR_PROTOCOL_ERR_F_BIT_ZERO, "f bit zero error" }, { ISCSI_CONN_ERROR_PROTOCOL_ERR_EXP_STAT_SN, "exp stat sn error" }, { ISCSI_CONN_ERROR_PROTOCOL_ERR_DSL_NOT_ZERO, "dsl not zero error" }, { ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_DSL, "invalid dsl" }, { ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SEG_LEN_TOO_BIG, "data seg len too big" }, { ISCSI_CONN_ERROR_PROTOCOL_ERR_OUTSTANDING_R2T_COUNT, "outstanding r2t count error" }, { ISCSI_CONN_ERROR_SENSE_DATA_LENGTH, "sense datalen error" }, }; static char *qedi_get_iscsi_error(enum iscsi_error_types err_code) { int i; char *msg = NULL; for (i = 0; i < ARRAY_SIZE(qedi_iscsi_error); i++) { if (qedi_iscsi_error[i].error_code == err_code) { msg = qedi_iscsi_error[i].err_string; break; } } return msg; } void qedi_process_iscsi_error(struct qedi_endpoint *ep, struct iscsi_eqe_data *data) { struct qedi_conn *qedi_conn; struct qedi_ctx *qedi; char warn_notice[] = "iscsi_warning"; char error_notice[] = "iscsi_error"; char unknown_msg[] = "Unknown error"; char *message; int need_recovery = 0; u32 err_mask = 0; char *msg; if (!ep) return; qedi_conn = ep->conn; if (!qedi_conn) return; qedi = ep->qedi; QEDI_ERR(&qedi->dbg_ctx, "async event iscsi error:0x%x\n", data->error_code); if (err_mask) { need_recovery = 0; message = warn_notice; } else { need_recovery = 1; message = error_notice; } msg = qedi_get_iscsi_error(data->error_code); if (!msg) { need_recovery = 0; msg = unknown_msg; } iscsi_conn_printk(KERN_ALERT, qedi_conn->cls_conn->dd_data, "qedi: %s - %s\n", message, msg); if (need_recovery) qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn); } void qedi_process_tcp_error(struct qedi_endpoint *ep, struct iscsi_eqe_data *data) { struct qedi_conn *qedi_conn; if (!ep) return; qedi_conn = ep->conn; if (!qedi_conn) return; QEDI_ERR(&ep->qedi->dbg_ctx, "async event TCP error:0x%x\n", data->error_code); qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn); }
// SPDX-License-Identifier: GPL-2.0 /* * Copyright 2019 NXP. */ #include <linux/device.h> #include <linux/of.h> #include <linux/slab.h> #include "dcss-dev.h" #define DCSS_BLKCTL_RESET_CTRL 0x00 #define B_CLK_RESETN BIT(0) #define APB_CLK_RESETN BIT(1) #define P_CLK_RESETN BIT(2) #define RTR_CLK_RESETN BIT(4) #define DCSS_BLKCTL_CONTROL0 0x10 #define HDMI_MIPI_CLK_SEL BIT(0) #define DISPMIX_REFCLK_SEL_POS 4 #define DISPMIX_REFCLK_SEL_MASK GENMASK(5, 4) #define DISPMIX_PIXCLK_SEL BIT(8) #define HDMI_SRC_SECURE_EN BIT(16) struct dcss_blkctl { struct dcss_dev *dcss; void __iomem *base_reg; }; void dcss_blkctl_cfg(struct dcss_blkctl *blkctl) { if (blkctl->dcss->hdmi_output) dcss_writel(0, blkctl->base_reg + DCSS_BLKCTL_CONTROL0); else dcss_writel(DISPMIX_PIXCLK_SEL, blkctl->base_reg + DCSS_BLKCTL_CONTROL0); dcss_set(B_CLK_RESETN | APB_CLK_RESETN | P_CLK_RESETN | RTR_CLK_RESETN, blkctl->base_reg + DCSS_BLKCTL_RESET_CTRL); } int dcss_blkctl_init(struct dcss_dev *dcss, unsigned long blkctl_base) { struct dcss_blkctl *blkctl; blkctl = devm_kzalloc(dcss->dev, sizeof(*blkctl), GFP_KERNEL); if (!blkctl) return -ENOMEM; blkctl->base_reg = devm_ioremap(dcss->dev, blkctl_base, SZ_4K); if (!blkctl->base_reg) { dev_err(dcss->dev, "unable to remap BLK CTRL base\n"); return -ENOMEM; } dcss->blkctl = blkctl; blkctl->dcss = dcss; dcss_blkctl_cfg(blkctl); return 0; }
// SPDX-License-Identifier: GPL-2.0 #define DISABLE_BRANCH_PROFILING #define pr_fmt(fmt) "kasan: " fmt /* cpu_feature_enabled() cannot be used this early */ #define USE_EARLY_PGTABLE_L5 #include <linux/memblock.h> #include <linux/kasan.h> #include <linux/kdebug.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/sched/task.h> #include <linux/vmalloc.h> #include <asm/e820/types.h> #include <asm/pgalloc.h> #include <asm/tlbflush.h> #include <asm/sections.h> #include <asm/cpu_entry_area.h> extern struct range pfn_mapped[E820_MAX_ENTRIES]; static p4d_t tmp_p4d_table[MAX_PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE); static __init void *early_alloc(size_t size, int nid, bool should_panic) { void *ptr = memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid); if (!ptr && should_panic) panic("%pS: Failed to allocate page, nid=%d from=%lx\n", (void *)_RET_IP_, nid, __pa(MAX_DMA_ADDRESS)); return ptr; } static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr, unsigned long end, int nid) { pte_t *pte; if (pmd_none(*pmd)) { void *p; if (boot_cpu_has(X86_FEATURE_PSE) && ((end - addr) == PMD_SIZE) && IS_ALIGNED(addr, PMD_SIZE)) { p = early_alloc(PMD_SIZE, nid, false); if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL)) return; memblock_free(p, PMD_SIZE); } p = early_alloc(PAGE_SIZE, nid, true); pmd_populate_kernel(&init_mm, pmd, p); } pte = pte_offset_kernel(pmd, addr); do { pte_t entry; void *p; if (!pte_none(*pte)) continue; p = early_alloc(PAGE_SIZE, nid, true); entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL); set_pte_at(&init_mm, addr, pte, entry); } while (pte++, addr += PAGE_SIZE, addr != end); } static void __init kasan_populate_pud(pud_t *pud, unsigned long addr, unsigned long end, int nid) { pmd_t *pmd; unsigned long next; if (pud_none(*pud)) { void *p; if (boot_cpu_has(X86_FEATURE_GBPAGES) && ((end - addr) == PUD_SIZE) && IS_ALIGNED(addr, PUD_SIZE)) { p = early_alloc(PUD_SIZE, nid, false); if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL)) return; memblock_free(p, PUD_SIZE); } p = early_alloc(PAGE_SIZE, nid, true); pud_populate(&init_mm, pud, p); } pmd = pmd_offset(pud, addr); do { next = pmd_addr_end(addr, end); if (!pmd_leaf(*pmd)) kasan_populate_pmd(pmd, addr, next, nid); } while (pmd++, addr = next, addr != end); } static void __init kasan_populate_p4d(p4d_t *p4d, unsigned long addr, unsigned long end, int nid) { pud_t *pud; unsigned long next; if (p4d_none(*p4d)) { void *p = early_alloc(PAGE_SIZE, nid, true); p4d_populate(&init_mm, p4d, p); } pud = pud_offset(p4d, addr); do { next = pud_addr_end(addr, end); if (!pud_leaf(*pud)) kasan_populate_pud(pud, addr, next, nid); } while (pud++, addr = next, addr != end); } static void __init kasan_populate_pgd(pgd_t *pgd, unsigned long addr, unsigned long end, int nid) { void *p; p4d_t *p4d; unsigned long next; if (pgd_none(*pgd)) { p = early_alloc(PAGE_SIZE, nid, true); pgd_populate(&init_mm, pgd, p); } p4d = p4d_offset(pgd, addr); do { next = p4d_addr_end(addr, end); kasan_populate_p4d(p4d, addr, next, nid); } while (p4d++, addr = next, addr != end); } static void __init kasan_populate_shadow(unsigned long addr, unsigned long end, int nid) { pgd_t *pgd; unsigned long next; addr = addr & PAGE_MASK; end = round_up(end, PAGE_SIZE); pgd = pgd_offset_k(addr); do { next = pgd_addr_end(addr, end); kasan_populate_pgd(pgd, addr, next, nid); } while (pgd++, addr = next, addr != end); } static void __init map_range(struct range *range) { unsigned long start; unsigned long end; start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start)); end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end)); kasan_populate_shadow(start, end, early_pfn_to_nid(range->start)); } static void __init clear_pgds(unsigned long start, unsigned long end) { pgd_t *pgd; /* See comment in kasan_init() */ unsigned long pgd_end = end & PGDIR_MASK; for (; start < pgd_end; start += PGDIR_SIZE) { pgd = pgd_offset_k(start); /* * With folded p4d, pgd_clear() is nop, use p4d_clear() * instead. */ if (pgtable_l5_enabled()) pgd_clear(pgd); else p4d_clear(p4d_offset(pgd, start)); } pgd = pgd_offset_k(start); for (; start < end; start += P4D_SIZE) p4d_clear(p4d_offset(pgd, start)); } static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr) { unsigned long p4d; if (!pgtable_l5_enabled()) return (p4d_t *)pgd; p4d = pgd_val(*pgd) & PTE_PFN_MASK; p4d += __START_KERNEL_map - phys_base; return (p4d_t *)p4d + p4d_index(addr); } static void __init kasan_early_p4d_populate(pgd_t *pgd, unsigned long addr, unsigned long end) { pgd_t pgd_entry; p4d_t *p4d, p4d_entry; unsigned long next; if (pgd_none(*pgd)) { pgd_entry = __pgd(_KERNPG_TABLE | __pa_nodebug(kasan_early_shadow_p4d)); set_pgd(pgd, pgd_entry); } p4d = early_p4d_offset(pgd, addr); do { next = p4d_addr_end(addr, end); if (!p4d_none(*p4d)) continue; p4d_entry = __p4d(_KERNPG_TABLE | __pa_nodebug(kasan_early_shadow_pud)); set_p4d(p4d, p4d_entry); } while (p4d++, addr = next, addr != end && p4d_none(*p4d)); } static void __init kasan_map_early_shadow(pgd_t *pgd) { /* See comment in kasan_init() */ unsigned long addr = KASAN_SHADOW_START & PGDIR_MASK; unsigned long end = KASAN_SHADOW_END; unsigned long next; pgd += pgd_index(addr); do { next = pgd_addr_end(addr, end); kasan_early_p4d_populate(pgd, addr, next); } while (pgd++, addr = next, addr != end); } static void __init kasan_shallow_populate_p4ds(pgd_t *pgd, unsigned long addr, unsigned long end) { p4d_t *p4d; unsigned long next; void *p; p4d = p4d_offset(pgd, addr); do { next = p4d_addr_end(addr, end); if (p4d_none(*p4d)) { p = early_alloc(PAGE_SIZE, NUMA_NO_NODE, true); p4d_populate(&init_mm, p4d, p); } } while (p4d++, addr = next, addr != end); } static void __init kasan_shallow_populate_pgds(void *start, void *end) { unsigned long addr, next; pgd_t *pgd; void *p; addr = (unsigned long)start; pgd = pgd_offset_k(addr); do { next = pgd_addr_end(addr, (unsigned long)end); if (pgd_none(*pgd)) { p = early_alloc(PAGE_SIZE, NUMA_NO_NODE, true); pgd_populate(&init_mm, pgd, p); } /* * we need to populate p4ds to be synced when running in * four level mode - see sync_global_pgds_l4() */ kasan_shallow_populate_p4ds(pgd, addr, next); } while (pgd++, addr = next, addr != (unsigned long)end); } void __init kasan_early_init(void) { int i; pteval_t pte_val = __pa_nodebug(kasan_early_shadow_page) | __PAGE_KERNEL | _PAGE_ENC; pmdval_t pmd_val = __pa_nodebug(kasan_early_shadow_pte) | _KERNPG_TABLE; pudval_t pud_val = __pa_nodebug(kasan_early_shadow_pmd) | _KERNPG_TABLE; p4dval_t p4d_val = __pa_nodebug(kasan_early_shadow_pud) | _KERNPG_TABLE; /* Mask out unsupported __PAGE_KERNEL bits: */ pte_val &= __default_kernel_pte_mask; pmd_val &= __default_kernel_pte_mask; pud_val &= __default_kernel_pte_mask; p4d_val &= __default_kernel_pte_mask; for (i = 0; i < PTRS_PER_PTE; i++) kasan_early_shadow_pte[i] = __pte(pte_val); for (i = 0; i < PTRS_PER_PMD; i++) kasan_early_shadow_pmd[i] = __pmd(pmd_val); for (i = 0; i < PTRS_PER_PUD; i++) kasan_early_shadow_pud[i] = __pud(pud_val); for (i = 0; pgtable_l5_enabled() && i < PTRS_PER_P4D; i++) kasan_early_shadow_p4d[i] = __p4d(p4d_val); kasan_map_early_shadow(early_top_pgt); kasan_map_early_shadow(init_top_pgt); } static unsigned long kasan_mem_to_shadow_align_down(unsigned long va) { unsigned long shadow = (unsigned long)kasan_mem_to_shadow((void *)va); return round_down(shadow, PAGE_SIZE); } static unsigned long kasan_mem_to_shadow_align_up(unsigned long va) { unsigned long shadow = (unsigned long)kasan_mem_to_shadow((void *)va); return round_up(shadow, PAGE_SIZE); } void __init kasan_populate_shadow_for_vaddr(void *va, size_t size, int nid) { unsigned long shadow_start, shadow_end; shadow_start = kasan_mem_to_shadow_align_down((unsigned long)va); shadow_end = kasan_mem_to_shadow_align_up((unsigned long)va + size); kasan_populate_shadow(shadow_start, shadow_end, nid); } void __init kasan_init(void) { unsigned long shadow_cea_begin, shadow_cea_per_cpu_begin, shadow_cea_end; int i; memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt)); /* * We use the same shadow offset for 4- and 5-level paging to * facilitate boot-time switching between paging modes. * As result in 5-level paging mode KASAN_SHADOW_START and * KASAN_SHADOW_END are not aligned to PGD boundary. * * KASAN_SHADOW_START doesn't share PGD with anything else. * We claim whole PGD entry to make things easier. * * KASAN_SHADOW_END lands in the last PGD entry and it collides with * bunch of things like kernel code, modules, EFI mapping, etc. * We need to take extra steps to not overwrite them. */ if (pgtable_l5_enabled()) { void *ptr; ptr = (void *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END)); memcpy(tmp_p4d_table, (void *)ptr, sizeof(tmp_p4d_table)); set_pgd(&early_top_pgt[pgd_index(KASAN_SHADOW_END)], __pgd(__pa(tmp_p4d_table) | _KERNPG_TABLE)); } load_cr3(early_top_pgt); __flush_tlb_all(); clear_pgds(KASAN_SHADOW_START & PGDIR_MASK, KASAN_SHADOW_END); kasan_populate_early_shadow((void *)(KASAN_SHADOW_START & PGDIR_MASK), kasan_mem_to_shadow((void *)PAGE_OFFSET)); for (i = 0; i < E820_MAX_ENTRIES; i++) { if (pfn_mapped[i].end == 0) break; map_range(&pfn_mapped[i]); } shadow_cea_begin = kasan_mem_to_shadow_align_down(CPU_ENTRY_AREA_BASE); shadow_cea_per_cpu_begin = kasan_mem_to_shadow_align_up(CPU_ENTRY_AREA_PER_CPU); shadow_cea_end = kasan_mem_to_shadow_align_up(CPU_ENTRY_AREA_BASE + CPU_ENTRY_AREA_MAP_SIZE); kasan_populate_early_shadow( kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM), kasan_mem_to_shadow((void *)VMALLOC_START)); /* * If we're in full vmalloc mode, don't back vmalloc space with early * shadow pages. Instead, prepopulate pgds/p4ds so they are synced to * the global table and we can populate the lower levels on demand. */ if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) kasan_shallow_populate_pgds( kasan_mem_to_shadow((void *)VMALLOC_START), kasan_mem_to_shadow((void *)VMALLOC_END)); else kasan_populate_early_shadow( kasan_mem_to_shadow((void *)VMALLOC_START), kasan_mem_to_shadow((void *)VMALLOC_END)); kasan_populate_early_shadow( kasan_mem_to_shadow((void *)VMALLOC_END + 1), (void *)shadow_cea_begin); /* * Populate the shadow for the shared portion of the CPU entry area. * Shadows for the per-CPU areas are mapped on-demand, as each CPU's * area is randomly placed somewhere in the 512GiB range and mapping * the entire 512GiB range is prohibitively expensive. */ kasan_populate_shadow(shadow_cea_begin, shadow_cea_per_cpu_begin, 0); kasan_populate_early_shadow((void *)shadow_cea_end, kasan_mem_to_shadow((void *)__START_KERNEL_map)); kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext), (unsigned long)kasan_mem_to_shadow(_end), early_pfn_to_nid(__pa(_stext))); kasan_populate_early_shadow(kasan_mem_to_shadow((void *)MODULES_END), (void *)KASAN_SHADOW_END); load_cr3(init_top_pgt); __flush_tlb_all(); /* * kasan_early_shadow_page has been used as early shadow memory, thus * it may contain some garbage. Now we can clear and write protect it, * since after the TLB flush no one should write to it. */ memset(kasan_early_shadow_page, 0, PAGE_SIZE); for (i = 0; i < PTRS_PER_PTE; i++) { pte_t pte; pgprot_t prot; prot = __pgprot(__PAGE_KERNEL_RO | _PAGE_ENC); pgprot_val(prot) &= __default_kernel_pte_mask; pte = __pte(__pa(kasan_early_shadow_page) | pgprot_val(prot)); set_pte(&kasan_early_shadow_pte[i], pte); } /* Flush TLBs again to be sure that write protection applied. */ __flush_tlb_all(); init_task.kasan_depth = 0; pr_info("KernelAddressSanitizer initialized\n"); }
// SPDX-License-Identifier: GPL-2.0 /* * Generic ASID allocator. * * Based on arch/arm/mm/context.c * * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved. * Copyright (C) 2012 ARM Ltd. */ #include <linux/slab.h> #include <linux/mm_types.h> #include <asm/asid.h> #define reserved_asid(info, cpu) *per_cpu_ptr((info)->reserved, cpu) #define ASID_MASK(info) (~GENMASK((info)->bits - 1, 0)) #define ASID_FIRST_VERSION(info) (1UL << ((info)->bits)) #define asid2idx(info, asid) (((asid) & ~ASID_MASK(info)) >> (info)->ctxt_shift) #define idx2asid(info, idx) (((idx) << (info)->ctxt_shift) & ~ASID_MASK(info)) static void flush_context(struct asid_info *info) { int i; u64 asid; /* Update the list of reserved ASIDs and the ASID bitmap. */ bitmap_zero(info->map, NUM_CTXT_ASIDS(info)); for_each_possible_cpu(i) { asid = atomic64_xchg_relaxed(&active_asid(info, i), 0); /* * If this CPU has already been through a * rollover, but hasn't run another task in * the meantime, we must preserve its reserved * ASID, as this is the only trace we have of * the process it is still running. */ if (asid == 0) asid = reserved_asid(info, i); __set_bit(asid2idx(info, asid), info->map); reserved_asid(info, i) = asid; } /* * Queue a TLB invalidation for each CPU to perform on next * context-switch */ cpumask_setall(&info->flush_pending); } static bool check_update_reserved_asid(struct asid_info *info, u64 asid, u64 newasid) { int cpu; bool hit = false; /* * Iterate over the set of reserved ASIDs looking for a match. * If we find one, then we can update our mm to use newasid * (i.e. the same ASID in the current generation) but we can't * exit the loop early, since we need to ensure that all copies * of the old ASID are updated to reflect the mm. Failure to do * so could result in us missing the reserved ASID in a future * generation. */ for_each_possible_cpu(cpu) { if (reserved_asid(info, cpu) == asid) { hit = true; reserved_asid(info, cpu) = newasid; } } return hit; } static u64 new_context(struct asid_info *info, atomic64_t *pasid, struct mm_struct *mm) { static u32 cur_idx = 1; u64 asid = atomic64_read(pasid); u64 generation = atomic64_read(&info->generation); if (asid != 0) { u64 newasid = generation | (asid & ~ASID_MASK(info)); /* * If our current ASID was active during a rollover, we * can continue to use it and this was just a false alarm. */ if (check_update_reserved_asid(info, asid, newasid)) return newasid; /* * We had a valid ASID in a previous life, so try to re-use * it if possible. */ if (!__test_and_set_bit(asid2idx(info, asid), info->map)) return newasid; } /* * Allocate a free ASID. If we can't find one, take a note of the * currently active ASIDs and mark the TLBs as requiring flushes. We * always count from ASID #2 (index 1), as we use ASID #0 when setting * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd * pairs. */ asid = find_next_zero_bit(info->map, NUM_CTXT_ASIDS(info), cur_idx); if (asid != NUM_CTXT_ASIDS(info)) goto set_asid; /* We're out of ASIDs, so increment the global generation count */ generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION(info), &info->generation); flush_context(info); /* We have more ASIDs than CPUs, so this will always succeed */ asid = find_next_zero_bit(info->map, NUM_CTXT_ASIDS(info), 1); set_asid: __set_bit(asid, info->map); cur_idx = asid; cpumask_clear(mm_cpumask(mm)); return idx2asid(info, asid) | generation; } /* * Generate a new ASID for the context. * * @pasid: Pointer to the current ASID batch allocated. It will be updated * with the new ASID batch. * @cpu: current CPU ID. Must have been acquired through get_cpu() */ void asid_new_context(struct asid_info *info, atomic64_t *pasid, unsigned int cpu, struct mm_struct *mm) { unsigned long flags; u64 asid; raw_spin_lock_irqsave(&info->lock, flags); /* Check that our ASID belongs to the current generation. */ asid = atomic64_read(pasid); if ((asid ^ atomic64_read(&info->generation)) >> info->bits) { asid = new_context(info, pasid, mm); atomic64_set(pasid, asid); } if (cpumask_test_and_clear_cpu(cpu, &info->flush_pending)) info->flush_cpu_ctxt_cb(); atomic64_set(&active_asid(info, cpu), asid); cpumask_set_cpu(cpu, mm_cpumask(mm)); raw_spin_unlock_irqrestore(&info->lock, flags); } /* * Initialize the ASID allocator * * @info: Pointer to the asid allocator structure * @bits: Number of ASIDs available * @asid_per_ctxt: Number of ASIDs to allocate per-context. ASIDs are * allocated contiguously for a given context. This value should be a power of * 2. */ int asid_allocator_init(struct asid_info *info, u32 bits, unsigned int asid_per_ctxt, void (*flush_cpu_ctxt_cb)(void)) { info->bits = bits; info->ctxt_shift = ilog2(asid_per_ctxt); info->flush_cpu_ctxt_cb = flush_cpu_ctxt_cb; /* * Expect allocation after rollover to fail if we don't have at least * one more ASID than CPUs. ASID #0 is always reserved. */ WARN_ON(NUM_CTXT_ASIDS(info) - 1 <= num_possible_cpus()); atomic64_set(&info->generation, ASID_FIRST_VERSION(info)); info->map = bitmap_zalloc(NUM_CTXT_ASIDS(info), GFP_KERNEL); if (!info->map) return -ENOMEM; raw_spin_lock_init(&info->lock); return 0; }
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. * Copyright (C) 2017 Linaro Ltd. */ #ifndef __VENUS_VENC_H__ #define __VENUS_VENC_H__ struct venus_inst; int venc_ctrl_init(struct venus_inst *inst); #endif
// SPDX-License-Identifier: GPL-2.0-or-later /* Audio/video-routing-related ivtv functions. Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com> Copyright (C) 2005-2007 Hans Verkuil <[email protected]> */ #include "ivtv-driver.h" #include "ivtv-i2c.h" #include "ivtv-cards.h" #include "ivtv-gpio.h" #include "ivtv-routing.h" #include <media/drv-intf/msp3400.h> #include <media/i2c/m52790.h> #include <media/i2c/upd64031a.h> #include <media/i2c/upd64083.h> /* Selects the audio input and output according to the current settings. */ void ivtv_audio_set_io(struct ivtv *itv) { const struct ivtv_card_audio_input *in; u32 input, output = 0; /* Determine which input to use */ if (test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags)) in = &itv->card->radio_input; else in = &itv->card->audio_inputs[itv->audio_input]; /* handle muxer chips */ input = in->muxer_input; if (itv->card->hw_muxer & IVTV_HW_M52790) output = M52790_OUT_STEREO; v4l2_subdev_call(itv->sd_muxer, audio, s_routing, input, output, 0); input = in->audio_input; output = 0; if (itv->card->hw_audio & IVTV_HW_MSP34XX) output = MSP_OUTPUT(MSP_SC_IN_DSP_SCART1); ivtv_call_hw(itv, itv->card->hw_audio, audio, s_routing, input, output, 0); } /* Selects the video input and output according to the current settings. */ void ivtv_video_set_io(struct ivtv *itv) { int inp = itv->active_input; u32 input; u32 type; v4l2_subdev_call(itv->sd_video, video, s_routing, itv->card->video_inputs[inp].video_input, 0, 0); type = itv->card->video_inputs[inp].video_type; if (type == IVTV_CARD_INPUT_VID_TUNER) { input = 0; /* Tuner */ } else if (type < IVTV_CARD_INPUT_COMPOSITE1) { input = 2; /* S-Video */ } else { input = 1; /* Composite */ } if (itv->card->hw_video & IVTV_HW_GPIO) ivtv_call_hw(itv, IVTV_HW_GPIO, video, s_routing, input, 0, 0); if (itv->card->hw_video & IVTV_HW_UPD64031A) { if (type == IVTV_CARD_INPUT_VID_TUNER || type >= IVTV_CARD_INPUT_COMPOSITE1) { /* Composite: GR on, connect to 3DYCS */ input = UPD64031A_GR_ON | UPD64031A_3DYCS_COMPOSITE; } else { /* S-Video: GR bypassed, turn it off */ input = UPD64031A_GR_OFF | UPD64031A_3DYCS_DISABLE; } input |= itv->card->gr_config; ivtv_call_hw(itv, IVTV_HW_UPD64031A, video, s_routing, input, 0, 0); } if (itv->card->hw_video & IVTV_HW_UPD6408X) { input = UPD64083_YCS_MODE; if (type > IVTV_CARD_INPUT_VID_TUNER && type < IVTV_CARD_INPUT_COMPOSITE1) { /* S-Video uses YCNR mode and internal Y-ADC, the upd64031a is not used. */ input |= UPD64083_YCNR_MODE; } else if (itv->card->hw_video & IVTV_HW_UPD64031A) { /* Use upd64031a output for tuner and composite(CX23416GYC only) inputs */ if (type == IVTV_CARD_INPUT_VID_TUNER || itv->card->type == IVTV_CARD_CX23416GYC) { input |= UPD64083_EXT_Y_ADC; } } ivtv_call_hw(itv, IVTV_HW_UPD6408X, video, s_routing, input, 0, 0); } }
// SPDX-License-Identifier: GPL-2.0-only #include <linux/kernel.h> #include <net/netlink.h> #include <linux/drbd_genl_api.h> #include "drbd_nla.h" static int drbd_nla_check_mandatory(int maxtype, struct nlattr *nla) { struct nlattr *head = nla_data(nla); int len = nla_len(nla); int rem; /* * validate_nla (called from nla_parse_nested) ignores attributes * beyond maxtype, and does not understand the DRBD_GENLA_F_MANDATORY flag. * In order to have it validate attributes with the DRBD_GENLA_F_MANDATORY * flag set also, check and remove that flag before calling * nla_parse_nested. */ nla_for_each_attr(nla, head, len, rem) { if (nla->nla_type & DRBD_GENLA_F_MANDATORY) { nla->nla_type &= ~DRBD_GENLA_F_MANDATORY; if (nla_type(nla) > maxtype) return -EOPNOTSUPP; } } return 0; } int drbd_nla_parse_nested(struct nlattr *tb[], int maxtype, struct nlattr *nla, const struct nla_policy *policy) { int err; err = drbd_nla_check_mandatory(maxtype, nla); if (!err) err = nla_parse_nested_deprecated(tb, maxtype, nla, policy, NULL); return err; } struct nlattr *drbd_nla_find_nested(int maxtype, struct nlattr *nla, int attrtype) { int err; /* * If any nested attribute has the DRBD_GENLA_F_MANDATORY flag set and * we don't know about that attribute, reject all the nested * attributes. */ err = drbd_nla_check_mandatory(maxtype, nla); if (err) return ERR_PTR(err); return nla_find_nested(nla, attrtype); }
// SPDX-License-Identifier: GPL-2.0-or-later /* rc-su3000.h - Keytable for Geniatech HDStar Remote Controller * * Copyright (c) 2013 by Evgeny Plehov <Evgeny [email protected]> */ #include <media/rc-map.h> #include <linux/module.h> static struct rc_map_table su3000[] = { { 0x25, KEY_POWER }, /* right-bottom Red */ { 0x0a, KEY_MUTE }, /* -/-- */ { 0x01, KEY_NUMERIC_1 }, { 0x02, KEY_NUMERIC_2 }, { 0x03, KEY_NUMERIC_3 }, { 0x04, KEY_NUMERIC_4 }, { 0x05, KEY_NUMERIC_5 }, { 0x06, KEY_NUMERIC_6 }, { 0x07, KEY_NUMERIC_7 }, { 0x08, KEY_NUMERIC_8 }, { 0x09, KEY_NUMERIC_9 }, { 0x00, KEY_NUMERIC_0 }, { 0x20, KEY_UP }, /* CH+ */ { 0x21, KEY_DOWN }, /* CH+ */ { 0x12, KEY_VOLUMEUP }, /* Brightness Up */ { 0x13, KEY_VOLUMEDOWN },/* Brightness Down */ { 0x1f, KEY_RECORD }, { 0x17, KEY_PLAY }, { 0x16, KEY_PAUSE }, { 0x0b, KEY_STOP }, { 0x27, KEY_FASTFORWARD },/* >> */ { 0x26, KEY_REWIND }, /* << */ { 0x0d, KEY_OK }, /* Mute */ { 0x11, KEY_LEFT }, /* VOL- */ { 0x10, KEY_RIGHT }, /* VOL+ */ { 0x29, KEY_BACK }, /* button under 9 */ { 0x2c, KEY_MENU }, /* TTX */ { 0x2b, KEY_EPG }, /* EPG */ { 0x1e, KEY_RED }, /* OSD */ { 0x0e, KEY_GREEN }, /* Window */ { 0x2d, KEY_YELLOW }, /* button under << */ { 0x0f, KEY_BLUE }, /* bottom yellow button */ { 0x14, KEY_AUDIO }, /* Snapshot */ { 0x38, KEY_TV }, /* TV/Radio */ { 0x0c, KEY_ESC } /* upper Red button */ }; static struct rc_map_list su3000_map = { .map = { .scan = su3000, .size = ARRAY_SIZE(su3000), .rc_proto = RC_PROTO_RC5, .name = RC_MAP_SU3000, } }; static int __init init_rc_map_su3000(void) { return rc_map_register(&su3000_map); } static void __exit exit_rc_map_su3000(void) { rc_map_unregister(&su3000_map); } module_init(init_rc_map_su3000) module_exit(exit_rc_map_su3000) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Evgeny Plehov <Evgeny [email protected]>"); MODULE_DESCRIPTION("Geniatech HDStar remote controller keytable");
// SPDX-License-Identifier: GPL-2.0 /dts-v1/; #include "kirkwood.dtsi" #include "kirkwood-98dx4122.dtsi" #include "kirkwood-km_common.dtsi" / { model = "Keymile Kirkwood Fixed Eth"; compatible = "keymile,km_fixedeth", "marvell,kirkwood-98DX4122", "marvell,kirkwood"; memory { device_type = "memory"; reg = <0x00000000 0x10000000>; }; }; &eth0 { status = "okay"; ethernet0-port@0 { speed = <1000>; /* <SPEED_1000> */ duplex = <1>; /* <DUPLEX_FULL> */ }; };
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Linux network driver for QLogic BR-series Converged Network Adapter. */ /* * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved * www.qlogic.com */ /* File for interrupt macros and functions */ #ifndef __BNA_HW_DEFS_H__ #define __BNA_HW_DEFS_H__ #include "bfi_reg.h" /* SW imposed limits */ #define BFI_ENET_DEF_TXQ 1 #define BFI_ENET_DEF_RXP 1 #define BFI_ENET_DEF_UCAM 1 #define BFI_ENET_DEF_RITSZ 1 #define BFI_ENET_MAX_MCAM 256 #define BFI_INVALID_RID -1 #define BFI_IBIDX_SIZE 4 #define BFI_VLAN_WORD_SHIFT 5 /* 32 bits */ #define BFI_VLAN_WORD_MASK 0x1F #define BFI_VLAN_BLOCK_SHIFT 9 /* 512 bits */ #define BFI_VLAN_BMASK_ALL 0xFF #define BFI_COALESCING_TIMER_UNIT 5 /* 5us */ #define BFI_MAX_COALESCING_TIMEO 0xFF /* in 5us units */ #define BFI_MAX_INTERPKT_COUNT 0xFF #define BFI_MAX_INTERPKT_TIMEO 0xF /* in 0.5us units */ #define BFI_TX_COALESCING_TIMEO 20 /* 20 * 5 = 100us */ #define BFI_TX_INTERPKT_COUNT 12 /* Pkt Cnt = 12 */ #define BFI_TX_INTERPKT_TIMEO 15 /* 15 * 0.5 = 7.5us */ #define BFI_RX_COALESCING_TIMEO 12 /* 12 * 5 = 60us */ #define BFI_RX_INTERPKT_COUNT 6 /* Pkt Cnt = 6 */ #define BFI_RX_INTERPKT_TIMEO 3 /* 3 * 0.5 = 1.5us */ #define BFI_TXQ_WI_SIZE 64 /* bytes */ #define BFI_RXQ_WI_SIZE 8 /* bytes */ #define BFI_CQ_WI_SIZE 16 /* bytes */ #define BFI_TX_MAX_WRR_QUOTA 0xFFF #define BFI_TX_MAX_VECTORS_PER_WI 4 #define BFI_TX_MAX_VECTORS_PER_PKT 0xFF #define BFI_TX_MAX_DATA_PER_VECTOR 0xFFFF #define BFI_TX_MAX_DATA_PER_PKT 0xFFFFFF /* Small Q buffer size */ #define BFI_SMALL_RXBUF_SIZE 128 #define BFI_TX_MAX_PRIO 8 #define BFI_TX_PRIO_MAP_ALL 0xFF /* * * Register definitions and macros * */ #define BNA_PCI_REG_CT_ADDRSZ (0x40000) #define ct_reg_addr_init(_bna, _pcidev) \ { \ struct bna_reg_offset reg_offset[] = \ {{HOSTFN0_INT_STATUS, HOSTFN0_INT_MSK}, \ {HOSTFN1_INT_STATUS, HOSTFN1_INT_MSK}, \ {HOSTFN2_INT_STATUS, HOSTFN2_INT_MSK}, \ {HOSTFN3_INT_STATUS, HOSTFN3_INT_MSK} }; \ \ (_bna)->regs.fn_int_status = (_pcidev)->pci_bar_kva + \ reg_offset[(_pcidev)->pci_func].fn_int_status;\ (_bna)->regs.fn_int_mask = (_pcidev)->pci_bar_kva + \ reg_offset[(_pcidev)->pci_func].fn_int_mask;\ } #define ct_bit_defn_init(_bna, _pcidev) \ { \ (_bna)->bits.mbox_status_bits = (__HFN_INT_MBOX_LPU0 | \ __HFN_INT_MBOX_LPU1); \ (_bna)->bits.mbox_mask_bits = (__HFN_INT_MBOX_LPU0 | \ __HFN_INT_MBOX_LPU1); \ (_bna)->bits.error_status_bits = (__HFN_INT_ERR_MASK); \ (_bna)->bits.error_mask_bits = (__HFN_INT_ERR_MASK); \ (_bna)->bits.halt_status_bits = __HFN_INT_LL_HALT; \ (_bna)->bits.halt_mask_bits = __HFN_INT_LL_HALT; \ } #define ct2_reg_addr_init(_bna, _pcidev) \ { \ (_bna)->regs.fn_int_status = (_pcidev)->pci_bar_kva + \ CT2_HOSTFN_INT_STATUS; \ (_bna)->regs.fn_int_mask = (_pcidev)->pci_bar_kva + \ CT2_HOSTFN_INTR_MASK; \ } #define ct2_bit_defn_init(_bna, _pcidev) \ { \ (_bna)->bits.mbox_status_bits = (__HFN_INT_MBOX_LPU0_CT2 | \ __HFN_INT_MBOX_LPU1_CT2); \ (_bna)->bits.mbox_mask_bits = (__HFN_INT_MBOX_LPU0_CT2 | \ __HFN_INT_MBOX_LPU1_CT2); \ (_bna)->bits.error_status_bits = (__HFN_INT_ERR_MASK_CT2); \ (_bna)->bits.error_mask_bits = (__HFN_INT_ERR_MASK_CT2); \ (_bna)->bits.halt_status_bits = __HFN_INT_CPQ_HALT_CT2; \ (_bna)->bits.halt_mask_bits = __HFN_INT_CPQ_HALT_CT2; \ } #define bna_reg_addr_init(_bna, _pcidev) \ { \ switch ((_pcidev)->device_id) { \ case PCI_DEVICE_ID_BROCADE_CT: \ ct_reg_addr_init((_bna), (_pcidev)); \ ct_bit_defn_init((_bna), (_pcidev)); \ break; \ case BFA_PCI_DEVICE_ID_CT2: \ ct2_reg_addr_init((_bna), (_pcidev)); \ ct2_bit_defn_init((_bna), (_pcidev)); \ break; \ } \ } #define bna_port_id_get(_bna) ((_bna)->ioceth.ioc.port_id) /* Interrupt related bits, flags and macros */ #define IB_STATUS_BITS 0x0000ffff #define BNA_IS_MBOX_INTR(_bna, _intr_status) \ ((_intr_status) & (_bna)->bits.mbox_status_bits) #define BNA_IS_HALT_INTR(_bna, _intr_status) \ ((_intr_status) & (_bna)->bits.halt_status_bits) #define BNA_IS_ERR_INTR(_bna, _intr_status) \ ((_intr_status) & (_bna)->bits.error_status_bits) #define BNA_IS_MBOX_ERR_INTR(_bna, _intr_status) \ (BNA_IS_MBOX_INTR(_bna, _intr_status) | \ BNA_IS_ERR_INTR(_bna, _intr_status)) #define BNA_IS_INTX_DATA_INTR(_intr_status) \ ((_intr_status) & IB_STATUS_BITS) #define bna_halt_clear(_bna) \ do { \ u32 init_halt; \ init_halt = readl((_bna)->ioceth.ioc.ioc_regs.ll_halt); \ init_halt &= ~__FW_INIT_HALT_P; \ writel(init_halt, (_bna)->ioceth.ioc.ioc_regs.ll_halt); \ init_halt = readl((_bna)->ioceth.ioc.ioc_regs.ll_halt); \ } while (0) #define bna_intx_disable(_bna, _cur_mask) \ { \ (_cur_mask) = readl((_bna)->regs.fn_int_mask); \ writel(0xffffffff, (_bna)->regs.fn_int_mask); \ } #define bna_intx_enable(bna, new_mask) \ writel((new_mask), (bna)->regs.fn_int_mask) #define bna_mbox_intr_disable(bna) \ do { \ u32 mask; \ mask = readl((bna)->regs.fn_int_mask); \ writel((mask | (bna)->bits.mbox_mask_bits | \ (bna)->bits.error_mask_bits), (bna)->regs.fn_int_mask); \ mask = readl((bna)->regs.fn_int_mask); \ } while (0) #define bna_mbox_intr_enable(bna) \ do { \ u32 mask; \ mask = readl((bna)->regs.fn_int_mask); \ writel((mask & ~((bna)->bits.mbox_mask_bits | \ (bna)->bits.error_mask_bits)), (bna)->regs.fn_int_mask);\ mask = readl((bna)->regs.fn_int_mask); \ } while (0) #define bna_intr_status_get(_bna, _status) \ { \ (_status) = readl((_bna)->regs.fn_int_status); \ if (_status) { \ writel(((_status) & ~(_bna)->bits.mbox_status_bits), \ (_bna)->regs.fn_int_status); \ } \ } /* * MAX ACK EVENTS : No. of acks that can be accumulated in driver, * before acking to h/w. The no. of bits is 16 in the doorbell register, * however we keep this limited to 15 bits. * This is because around the edge of 64K boundary (16 bits), one * single poll can make the accumulated ACK counter cross the 64K boundary, * causing problems, when we try to ack with a value greater than 64K. * 15 bits (32K) should be large enough to accumulate, anyways, and the max. * acked events to h/w can be (32K + max poll weight) (currently 64). */ #define BNA_IB_MAX_ACK_EVENTS BIT(15) /* These macros build the data portion of the TxQ/RxQ doorbell */ #define BNA_DOORBELL_Q_PRD_IDX(_pi) (0x80000000 | (_pi)) #define BNA_DOORBELL_Q_STOP (0x40000000) /* These macros build the data portion of the IB doorbell */ #define BNA_DOORBELL_IB_INT_ACK(_timeout, _events) \ (0x80000000 | ((_timeout) << 16) | (_events)) #define BNA_DOORBELL_IB_INT_DISABLE (0x40000000) /* Set the coalescing timer for the given ib */ #define bna_ib_coalescing_timer_set(_i_dbell, _cls_timer) \ ((_i_dbell)->doorbell_ack = BNA_DOORBELL_IB_INT_ACK((_cls_timer), 0)) /* Acks 'events' # of events for a given ib while disabling interrupts */ #define bna_ib_ack_disable_irq(_i_dbell, _events) \ (writel(BNA_DOORBELL_IB_INT_ACK(0, (_events)), \ (_i_dbell)->doorbell_addr)) /* Acks 'events' # of events for a given ib */ #define bna_ib_ack(_i_dbell, _events) \ (writel(((_i_dbell)->doorbell_ack | (_events)), \ (_i_dbell)->doorbell_addr)) #define bna_ib_start(_bna, _ib, _is_regular) \ { \ u32 intx_mask; \ struct bna_ib *ib = _ib; \ if ((ib->intr_type == BNA_INTR_T_INTX)) { \ bna_intx_disable((_bna), intx_mask); \ intx_mask &= ~(ib->intr_vector); \ bna_intx_enable((_bna), intx_mask); \ } \ bna_ib_coalescing_timer_set(&ib->door_bell, \ ib->coalescing_timeo); \ if (_is_regular) \ bna_ib_ack(&ib->door_bell, 0); \ } #define bna_ib_stop(_bna, _ib) \ { \ u32 intx_mask; \ struct bna_ib *ib = _ib; \ writel(BNA_DOORBELL_IB_INT_DISABLE, \ ib->door_bell.doorbell_addr); \ if (ib->intr_type == BNA_INTR_T_INTX) { \ bna_intx_disable((_bna), intx_mask); \ intx_mask |= ib->intr_vector; \ bna_intx_enable((_bna), intx_mask); \ } \ } #define bna_txq_prod_indx_doorbell(_tcb) \ (writel(BNA_DOORBELL_Q_PRD_IDX((_tcb)->producer_index), \ (_tcb)->q_dbell)) #define bna_rxq_prod_indx_doorbell(_rcb) \ (writel(BNA_DOORBELL_Q_PRD_IDX((_rcb)->producer_index), \ (_rcb)->q_dbell)) /* TxQ, RxQ, CQ related bits, offsets, macros */ /* TxQ Entry Opcodes */ #define BNA_TXQ_WI_SEND (0x402) /* Single Frame Transmission */ #define BNA_TXQ_WI_SEND_LSO (0x403) /* Multi-Frame Transmission */ #define BNA_TXQ_WI_EXTENSION (0x104) /* Extension WI */ /* TxQ Entry Control Flags */ #define BNA_TXQ_WI_CF_FCOE_CRC BIT(8) #define BNA_TXQ_WI_CF_IPID_MODE BIT(5) #define BNA_TXQ_WI_CF_INS_PRIO BIT(4) #define BNA_TXQ_WI_CF_INS_VLAN BIT(3) #define BNA_TXQ_WI_CF_UDP_CKSUM BIT(2) #define BNA_TXQ_WI_CF_TCP_CKSUM BIT(1) #define BNA_TXQ_WI_CF_IP_CKSUM BIT(0) #define BNA_TXQ_WI_L4_HDR_N_OFFSET(_hdr_size, _offset) \ (((_hdr_size) << 10) | ((_offset) & 0x3FF)) /* * Completion Q defines */ /* CQ Entry Flags */ #define BNA_CQ_EF_MAC_ERROR BIT(0) #define BNA_CQ_EF_FCS_ERROR BIT(1) #define BNA_CQ_EF_TOO_LONG BIT(2) #define BNA_CQ_EF_FC_CRC_OK BIT(3) #define BNA_CQ_EF_RSVD1 BIT(4) #define BNA_CQ_EF_L4_CKSUM_OK BIT(5) #define BNA_CQ_EF_L3_CKSUM_OK BIT(6) #define BNA_CQ_EF_HDS_HEADER BIT(7) #define BNA_CQ_EF_UDP BIT(8) #define BNA_CQ_EF_TCP BIT(9) #define BNA_CQ_EF_IP_OPTIONS BIT(10) #define BNA_CQ_EF_IPV6 BIT(11) #define BNA_CQ_EF_IPV4 BIT(12) #define BNA_CQ_EF_VLAN BIT(13) #define BNA_CQ_EF_RSS BIT(14) #define BNA_CQ_EF_RSVD2 BIT(15) #define BNA_CQ_EF_MCAST_MATCH BIT(16) #define BNA_CQ_EF_MCAST BIT(17) #define BNA_CQ_EF_BCAST BIT(18) #define BNA_CQ_EF_REMOTE BIT(19) #define BNA_CQ_EF_LOCAL BIT(20) /* CAT2 ASIC does not use bit 21 as per the SPEC. * Bit 31 is set in every end of frame completion */ #define BNA_CQ_EF_EOP BIT(31) /* Data structures */ struct bna_reg_offset { u32 fn_int_status; u32 fn_int_mask; }; struct bna_bit_defn { u32 mbox_status_bits; u32 mbox_mask_bits; u32 error_status_bits; u32 error_mask_bits; u32 halt_status_bits; u32 halt_mask_bits; }; struct bna_reg { void __iomem *fn_int_status; void __iomem *fn_int_mask; }; /* TxQ Vector (a.k.a. Tx-Buffer Descriptor) */ struct bna_dma_addr { u32 msb; u32 lsb; }; struct bna_txq_wi_vector { u16 reserved; u16 length; /* Only 14 LSB are valid */ struct bna_dma_addr host_addr; /* Tx-Buf DMA addr */ }; /* TxQ Entry Structure * * BEWARE: Load values into this structure with correct endianness. */ struct bna_txq_entry { union { struct { u8 reserved; u8 num_vectors; /* number of vectors present */ u16 opcode; /* Either */ /* BNA_TXQ_WI_SEND or */ /* BNA_TXQ_WI_SEND_LSO */ u16 flags; /* OR of all the flags */ u16 l4_hdr_size_n_offset; u16 vlan_tag; u16 lso_mss; /* Only 14 LSB are valid */ u32 frame_length; /* Only 24 LSB are valid */ } wi; struct { u16 reserved; u16 opcode; /* Must be */ /* BNA_TXQ_WI_EXTENSION */ u32 reserved2[3]; /* Place holder for */ /* removed vector (12 bytes) */ } wi_ext; } hdr; struct bna_txq_wi_vector vector[4]; }; /* RxQ Entry Structure */ struct bna_rxq_entry { /* Rx-Buffer */ struct bna_dma_addr host_addr; /* Rx-Buffer DMA address */ }; /* CQ Entry Structure */ struct bna_cq_entry { u32 flags; u16 vlan_tag; u16 length; u32 rss_hash; u8 valid; u8 reserved1; u8 reserved2; u8 rxq_id; }; #endif /* __BNA_HW_DEFS_H__ */
// SPDX-License-Identifier: GPL-2.0-only /* * DRA7 Clock init * * Copyright (C) 2013 Texas Instruments, Inc. * * Tero Kristo ([email protected]) */ #include <linux/kernel.h> #include <linux/list.h> #include <linux/clk.h> #include <linux/clkdev.h> #include <linux/clk/ti.h> #include <dt-bindings/clock/dra7.h> #include "clock.h" #define DRA7_DPLL_GMAC_DEFFREQ 1000000000 #define DRA7_DPLL_USB_DEFFREQ 960000000 static const struct omap_clkctrl_reg_data dra7_mpu_clkctrl_regs[] __initconst = { { DRA7_MPU_MPU_CLKCTRL, NULL, 0, "dpll_mpu_m2_ck" }, { 0 }, }; static const struct omap_clkctrl_reg_data dra7_dsp1_clkctrl_regs[] __initconst = { { DRA7_DSP1_MMU0_DSP1_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_NO_IDLEST, "dpll_dsp_m2_ck" }, { 0 }, }; static const char * const dra7_ipu1_gfclk_mux_parents[] __initconst = { "dpll_abe_m2x2_ck", "dpll_core_h22x2_ck", NULL, }; static const struct omap_clkctrl_bit_data dra7_mmu_ipu1_bit_data[] __initconst = { { 24, TI_CLK_MUX, dra7_ipu1_gfclk_mux_parents, NULL }, { 0 }, }; static const struct omap_clkctrl_reg_data dra7_ipu1_clkctrl_regs[] __initconst = { { DRA7_IPU1_MMU_IPU1_CLKCTRL, dra7_mmu_ipu1_bit_data, CLKF_HW_SUP | CLKF_NO_IDLEST, "ipu1-clkctrl:0000:24" }, { 0 }, }; static const char * const dra7_mcasp1_aux_gfclk_mux_parents[] __initconst = { "per_abe_x1_gfclk2_div", "video1_clk2_div", "video2_clk2_div", "hdmi_clk2_div", NULL, }; static const char * const dra7_mcasp1_ahclkx_mux_parents[] __initconst = { "abe_24m_fclk", "abe_sys_clk_div", "func_24m_clk", "atl_clkin3_ck", "atl_clkin2_ck", "atl_clkin1_ck", "atl_clkin0_ck", "sys_clkin2", "ref_clkin0_ck", "ref_clkin1_ck", "ref_clkin2_ck", "ref_clkin3_ck", "mlb_clk", "mlbp_clk", NULL, }; static const struct omap_clkctrl_bit_data dra7_mcasp1_bit_data[] __initconst = { { 22, TI_CLK_MUX, dra7_mcasp1_aux_gfclk_mux_parents, NULL }, { 24, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL }, { 28, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL }, { 0 }, }; static const char * const dra7_timer5_gfclk_mux_parents[] __initconst = { "timer_sys_clk_div", "sys_32k_ck", "sys_clkin2", "ref_clkin0_ck", "ref_clkin1_ck", "ref_clkin2_ck", "ref_clkin3_ck", "abe_giclk_div", "video1_div_clk", "video2_div_clk", "hdmi_div_clk", "clkoutmux0_clk_mux", NULL, }; static const struct omap_clkctrl_bit_data dra7_timer5_bit_data[] __initconst = { { 24, TI_CLK_MUX, dra7_timer5_gfclk_mux_parents, NULL }, { 0 }, }; static const struct omap_clkctrl_bit_data dra7_timer6_bit_data[] __initconst = { { 24, TI_CLK_MUX, dra7_timer5_gfclk_mux_parents, NULL }, { 0 }, }; static const struct omap_clkctrl_bit_data dra7_timer7_bit_data[] __initconst = { { 24, TI_CLK_MUX, dra7_timer5_gfclk_mux_parents, NULL }, { 0 }, }; static const struct omap_clkctrl_bit_data dra7_timer8_bit_data[] __initconst = { { 24, TI_CLK_MUX, dra7_timer5_gfclk_mux_parents, NULL }, { 0 }, }; static const char * const dra7_uart6_gfclk_mux_parents[] __initconst = { "func_48m_fclk", "dpll_per_m2x2_ck", NULL, }; static const struct omap_clkctrl_bit_data dra7_uart6_bit_data[] __initconst = { { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL }, { 0 }, }; static const struct omap_clkctrl_reg_data dra7_ipu_clkctrl_regs[] __initconst = { { DRA7_IPU_MCASP1_CLKCTRL, dra7_mcasp1_bit_data, CLKF_SW_SUP, "ipu-clkctrl:0000:22" }, { DRA7_IPU_TIMER5_CLKCTRL, dra7_timer5_bit_data, CLKF_SW_SUP, "ipu-clkctrl:0008:24" }, { DRA7_IPU_TIMER6_CLKCTRL, dra7_timer6_bit_data, CLKF_SW_SUP, "ipu-clkctrl:0010:24" }, { DRA7_IPU_TIMER7_CLKCTRL, dra7_timer7_bit_data, CLKF_SW_SUP, "ipu-clkctrl:0018:24" }, { DRA7_IPU_TIMER8_CLKCTRL, dra7_timer8_bit_data, CLKF_SW_SUP, "ipu-clkctrl:0020:24" }, { DRA7_IPU_I2C5_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" }, { DRA7_IPU_UART6_CLKCTRL, dra7_uart6_bit_data, CLKF_SW_SUP, "ipu-clkctrl:0030:24" }, { 0 }, }; static const struct omap_clkctrl_reg_data dra7_dsp2_clkctrl_regs[] __initconst = { { DRA7_DSP2_MMU0_DSP2_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_NO_IDLEST, "dpll_dsp_m2_ck" }, { 0 }, }; static const struct omap_clkctrl_reg_data dra7_rtc_clkctrl_regs[] __initconst = { { DRA7_RTC_RTCSS_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" }, { 0 }, }; static const char * const dra7_cam_gfclk_mux_parents[] __initconst = { "l3_iclk_div", "core_iss_main_clk", NULL, }; static const struct omap_clkctrl_bit_data dra7_cam_bit_data[] __initconst = { { 24, TI_CLK_MUX, dra7_cam_gfclk_mux_parents, NULL }, { 0 }, }; static const struct omap_clkctrl_reg_data dra7_cam_clkctrl_regs[] __initconst = { { DRA7_CAM_VIP1_CLKCTRL, dra7_cam_bit_data, CLKF_HW_SUP, "l3_iclk_div" }, { DRA7_CAM_VIP2_CLKCTRL, dra7_cam_bit_data, CLKF_HW_SUP, "l3_iclk_div" }, { DRA7_CAM_VIP3_CLKCTRL, dra7_cam_bit_data, CLKF_HW_SUP, "l3_iclk_div" }, { 0 }, }; static const struct omap_clkctrl_reg_data dra7_vpe_clkctrl_regs[] __initconst = { { DRA7_VPE_VPE_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_core_h23x2_ck" }, { 0 }, }; static const struct omap_clkctrl_reg_data dra7_coreaon_clkctrl_regs[] __initconst = { { DRA7_COREAON_SMARTREFLEX_MPU_CLKCTRL, NULL, CLKF_SW_SUP, "wkupaon_iclk_mux" }, { DRA7_COREAON_SMARTREFLEX_CORE_CLKCTRL, NULL, CLKF_SW_SUP, "wkupaon_iclk_mux" }, { 0 }, }; static const struct omap_clkctrl_reg_data dra7_l3main1_clkctrl_regs[] __initconst = { { DRA7_L3MAIN1_L3_MAIN_1_CLKCTRL, NULL, 0, "l3_iclk_div" }, { DRA7_L3MAIN1_GPMC_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" }, { DRA7_L3MAIN1_TPCC_CLKCTRL, NULL, 0, "l3_iclk_div" }, { DRA7_L3MAIN1_TPTC0_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" }, { DRA7_L3MAIN1_TPTC1_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" }, { DRA7_L3MAIN1_VCP1_CLKCTRL, NULL, 0, "l3_iclk_div" }, { DRA7_L3MAIN1_VCP2_CLKCTRL, NULL, 0, "l3_iclk_div" }, { 0 }, }; static const struct omap_clkctrl_reg_data dra7_ipu2_clkctrl_regs[] __initconst = { { DRA7_IPU2_MMU_IPU2_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_NO_IDLEST, "dpll_core_h22x2_ck" }, { 0 }, }; static const struct omap_clkctrl_reg_data dra7_dma_clkctrl_regs[] __initconst = { { DRA7_DMA_DMA_SYSTEM_CLKCTRL, NULL, 0, "l3_iclk_div" }, { 0 }, }; static const struct omap_clkctrl_reg_data dra7_emif_clkctrl_regs[] __initconst = { { DRA7_EMIF_DMM_CLKCTRL, NULL, 0, "l3_iclk_div" }, { 0 }, }; static const char * const dra7_atl_dpll_clk_mux_parents[] __initconst = { "sys_32k_ck", "video1_clkin_ck", "video2_clkin_ck", "hdmi_clkin_ck", NULL, }; static const char * const dra7_atl_gfclk_mux_parents[] __initconst = { "l3_iclk_div", "dpll_abe_m2_ck", "atl-clkctrl:0000:24", NULL, }; static const struct omap_clkctrl_bit_data dra7_atl_bit_data[] __initconst = { { 24, TI_CLK_MUX, dra7_atl_dpll_clk_mux_parents, NULL }, { 26, TI_CLK_MUX, dra7_atl_gfclk_mux_parents, NULL }, { 0 }, }; static const struct omap_clkctrl_reg_data dra7_atl_clkctrl_regs[] __initconst = { { DRA7_ATL_ATL_CLKCTRL, dra7_atl_bit_data, CLKF_SW_SUP, "atl-clkctrl:0000:26" }, { 0 }, }; static const struct omap_clkctrl_reg_data dra7_l4cfg_clkctrl_regs[] __initconst = { { DRA7_L4CFG_L4_CFG_CLKCTRL, NULL, 0, "l3_iclk_div" }, { DRA7_L4CFG_SPINLOCK_CLKCTRL, NULL, 0, "l3_iclk_div" }, { DRA7_L4CFG_MAILBOX1_CLKCTRL, NULL, 0, "l3_iclk_div" }, { DRA7_L4CFG_MAILBOX2_CLKCTRL, NULL, 0, "l3_iclk_div" }, { DRA7_L4CFG_MAILBOX3_CLKCTRL, NULL, 0, "l3_iclk_div" }, { DRA7_L4CFG_MAILBOX4_CLKCTRL, NULL, 0, "l3_iclk_div" }, { DRA7_L4CFG_MAILBOX5_CLKCTRL, NULL, 0, "l3_iclk_div" }, { DRA7_L4CFG_MAILBOX6_CLKCTRL, NULL, 0, "l3_iclk_div" }, { DRA7_L4CFG_MAILBOX7_CLKCTRL, NULL, 0, "l3_iclk_div" }, { DRA7_L4CFG_MAILBOX8_CLKCTRL, NULL, 0, "l3_iclk_div" }, { DRA7_L4CFG_MAILBOX9_CLKCTRL, NULL, 0, "l3_iclk_div" }, { DRA7_L4CFG_MAILBOX10_CLKCTRL, NULL, 0, "l3_iclk_div" }, { DRA7_L4CFG_MAILBOX11_CLKCTRL, NULL, 0, "l3_iclk_div" }, { DRA7_L4CFG_MAILBOX12_CLKCTRL, NULL, 0, "l3_iclk_div" }, { DRA7_L4CFG_MAILBOX13_CLKCTRL, NULL, 0, "l3_iclk_div" }, { 0 }, }; static const struct omap_clkctrl_reg_data dra7_l3instr_clkctrl_regs[] __initconst = { { DRA7_L3INSTR_L3_MAIN_2_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" }, { DRA7_L3INSTR_L3_INSTR_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" }, { 0 }, }; static const struct omap_clkctrl_reg_data dra7_iva_clkctrl_regs[] __initconst = { { DRA7_IVA_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_NO_IDLEST, "dpll_iva_h12x2_ck" }, { DRA7_SL2IF_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_iva_h12x2_ck" }, { 0 }, }; static const char * const dra7_dss_dss_clk_parents[] __initconst = { "dpll_per_h12x2_ck", NULL, }; static const char * const dra7_dss_48mhz_clk_parents[] __initconst = { "func_48m_fclk", NULL, }; static const char * const dra7_dss_hdmi_clk_parents[] __initconst = { "hdmi_dpll_clk_mux", NULL, }; static const char * const dra7_dss_32khz_clk_parents[] __initconst = { "sys_32k_ck", NULL, }; static const char * const dra7_dss_video1_clk_parents[] __initconst = { "video1_dpll_clk_mux", NULL, }; static const char * const dra7_dss_video2_clk_parents[] __initconst = { "video2_dpll_clk_mux", NULL, }; static const struct omap_clkctrl_bit_data dra7_dss_core_bit_data[] __initconst = { { 8, TI_CLK_GATE, dra7_dss_dss_clk_parents, NULL }, { 9, TI_CLK_GATE, dra7_dss_48mhz_clk_parents, NULL }, { 10, TI_CLK_GATE, dra7_dss_hdmi_clk_parents, NULL }, { 11, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL }, { 12, TI_CLK_GATE, dra7_dss_video1_clk_parents, NULL }, { 13, TI_CLK_GATE, dra7_dss_video2_clk_parents, NULL }, { 0 }, }; static const struct omap_clkctrl_reg_data dra7_dss_clkctrl_regs[] __initconst = { { DRA7_DSS_DSS_CORE_CLKCTRL, dra7_dss_core_bit_data, CLKF_SW_SUP, "dss-clkctrl:0000:8" }, { DRA7_DSS_BB2D_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_core_h24x2_ck" }, { 0 }, }; static const char * const dra7_gpu_core_mux_parents[] __initconst = { "dpll_core_h14x2_ck", "dpll_per_h14x2_ck", "dpll_gpu_m2_ck", NULL, }; static const char * const dra7_gpu_hyd_mux_parents[] __initconst = { "dpll_core_h14x2_ck", "dpll_per_h14x2_ck", "dpll_gpu_m2_ck", NULL, }; static const struct omap_clkctrl_bit_data dra7_gpu_core_bit_data[] __initconst = { { 24, TI_CLK_MUX, dra7_gpu_core_mux_parents, NULL, }, { 26, TI_CLK_MUX, dra7_gpu_hyd_mux_parents, NULL, }, { 0 }, }; static const struct omap_clkctrl_reg_data dra7_gpu_clkctrl_regs[] __initconst = { { DRA7_GPU_CLKCTRL, dra7_gpu_core_bit_data, CLKF_SW_SUP, "gpu-clkctrl:0000:24", }, { 0 }, }; static const char * const dra7_mmc1_fclk_mux_parents[] __initconst = { "func_128m_clk", "dpll_per_m2x2_ck", NULL, }; static const char * const dra7_mmc1_fclk_div_parents[] __initconst = { "l3init-clkctrl:0008:24", NULL, }; static const struct omap_clkctrl_div_data dra7_mmc1_fclk_div_data __initconst = { .max_div = 4, .flags = CLK_DIVIDER_POWER_OF_TWO, }; static const struct omap_clkctrl_bit_data dra7_mmc1_bit_data[] __initconst = { { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL }, { 24, TI_CLK_MUX, dra7_mmc1_fclk_mux_parents, NULL }, { 25, TI_CLK_DIVIDER, dra7_mmc1_fclk_div_parents, &dra7_mmc1_fclk_div_data }, { 0 }, }; static const char * const dra7_mmc2_fclk_div_parents[] __initconst = { "l3init-clkctrl:0010:24", NULL, }; static const struct omap_clkctrl_div_data dra7_mmc2_fclk_div_data __initconst = { .max_div = 4, .flags = CLK_DIVIDER_POWER_OF_TWO, }; static const struct omap_clkctrl_bit_data dra7_mmc2_bit_data[] __initconst = { { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL }, { 24, TI_CLK_MUX, dra7_mmc1_fclk_mux_parents, NULL }, { 25, TI_CLK_DIVIDER, dra7_mmc2_fclk_div_parents, &dra7_mmc2_fclk_div_data }, { 0 }, }; static const char * const dra7_usb_otg_ss2_refclk960m_parents[] __initconst = { "l3init_960m_gfclk", NULL, }; static const struct omap_clkctrl_bit_data dra7_usb_otg_ss2_bit_data[] __initconst = { { 8, TI_CLK_GATE, dra7_usb_otg_ss2_refclk960m_parents, NULL }, { 0 }, }; static const char * const dra7_sata_ref_clk_parents[] __initconst = { "sys_clkin1", NULL, }; static const struct omap_clkctrl_bit_data dra7_sata_bit_data[] __initconst = { { 8, TI_CLK_GATE, dra7_sata_ref_clk_parents, NULL }, { 0 }, }; static const struct omap_clkctrl_bit_data dra7_usb_otg_ss1_bit_data[] __initconst = { { 8, TI_CLK_GATE, dra7_usb_otg_ss2_refclk960m_parents, NULL }, { 0 }, }; static const struct omap_clkctrl_reg_data dra7_l3init_clkctrl_regs[] __initconst = { { DRA7_L3INIT_MMC1_CLKCTRL, dra7_mmc1_bit_data, CLKF_SW_SUP, "l3init-clkctrl:0008:25" }, { DRA7_L3INIT_MMC2_CLKCTRL, dra7_mmc2_bit_data, CLKF_SW_SUP, "l3init-clkctrl:0010:25" }, { DRA7_L3INIT_USB_OTG_SS2_CLKCTRL, dra7_usb_otg_ss2_bit_data, CLKF_HW_SUP, "dpll_core_h13x2_ck" }, { DRA7_L3INIT_USB_OTG_SS3_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_core_h13x2_ck" }, { DRA7_L3INIT_USB_OTG_SS4_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_DRA74 | CLKF_SOC_DRA76, "dpll_core_h13x2_ck" }, { DRA7_L3INIT_SATA_CLKCTRL, dra7_sata_bit_data, CLKF_SW_SUP, "func_48m_fclk" }, { DRA7_L3INIT_OCP2SCP1_CLKCTRL, NULL, CLKF_HW_SUP, "l4_root_clk_div" }, { DRA7_L3INIT_OCP2SCP3_CLKCTRL, NULL, CLKF_HW_SUP, "l4_root_clk_div" }, { DRA7_L3INIT_USB_OTG_SS1_CLKCTRL, dra7_usb_otg_ss1_bit_data, CLKF_HW_SUP, "dpll_core_h13x2_ck" }, { 0 }, }; static const char * const dra7_optfclk_pciephy1_clk_parents[] __initconst = { "apll_pcie_ck", NULL, }; static const char * const dra7_optfclk_pciephy1_div_clk_parents[] __initconst = { "optfclk_pciephy_div", NULL, }; static const struct omap_clkctrl_bit_data dra7_pcie1_bit_data[] __initconst = { { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL }, { 9, TI_CLK_GATE, dra7_optfclk_pciephy1_clk_parents, NULL }, { 10, TI_CLK_GATE, dra7_optfclk_pciephy1_div_clk_parents, NULL }, { 0 }, }; static const struct omap_clkctrl_bit_data dra7_pcie2_bit_data[] __initconst = { { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL }, { 9, TI_CLK_GATE, dra7_optfclk_pciephy1_clk_parents, NULL }, { 10, TI_CLK_GATE, dra7_optfclk_pciephy1_div_clk_parents, NULL }, { 0 }, }; static const struct omap_clkctrl_reg_data dra7_pcie_clkctrl_regs[] __initconst = { { DRA7_PCIE_PCIE1_CLKCTRL, dra7_pcie1_bit_data, CLKF_SW_SUP, "l4_root_clk_div" }, { DRA7_PCIE_PCIE2_CLKCTRL, dra7_pcie2_bit_data, CLKF_SW_SUP, "l4_root_clk_div" }, { 0 }, }; static const char * const dra7_rmii_50mhz_clk_mux_parents[] __initconst = { "dpll_gmac_h11x2_ck", "rmii_clk_ck", NULL, }; static const char * const dra7_gmac_rft_clk_mux_parents[] __initconst = { "video1_clkin_ck", "video2_clkin_ck", "dpll_abe_m2_ck", "hdmi_clkin_ck", "l3_iclk_div", NULL, }; static const struct omap_clkctrl_bit_data dra7_gmac_bit_data[] __initconst = { { 24, TI_CLK_MUX, dra7_rmii_50mhz_clk_mux_parents, NULL }, { 25, TI_CLK_MUX, dra7_gmac_rft_clk_mux_parents, NULL }, { 0 }, }; static const struct omap_clkctrl_reg_data dra7_gmac_clkctrl_regs[] __initconst = { { DRA7_GMAC_GMAC_CLKCTRL, dra7_gmac_bit_data, CLKF_SW_SUP, "gmac_main_clk" }, { 0 }, }; static const char * const dra7_timer10_gfclk_mux_parents[] __initconst = { "timer_sys_clk_div", "sys_32k_ck", "sys_clkin2", "ref_clkin0_ck", "ref_clkin1_ck", "ref_clkin2_ck", "ref_clkin3_ck", "abe_giclk_div", "video1_div_clk", "video2_div_clk", "hdmi_div_clk", NULL, }; static const struct omap_clkctrl_bit_data dra7_timer10_bit_data[] __initconst = { { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL }, { 0 }, }; static const struct omap_clkctrl_bit_data dra7_timer11_bit_data[] __initconst = { { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL }, { 0 }, }; static const struct omap_clkctrl_bit_data dra7_timer2_bit_data[] __initconst = { { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL }, { 0 }, }; static const struct omap_clkctrl_bit_data dra7_timer3_bit_data[] __initconst = { { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL }, { 0 }, }; static const struct omap_clkctrl_bit_data dra7_timer4_bit_data[] __initconst = { { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL }, { 0 }, }; static const struct omap_clkctrl_bit_data dra7_timer9_bit_data[] __initconst = { { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL }, { 0 }, }; static const struct omap_clkctrl_bit_data dra7_gpio2_bit_data[] __initconst = { { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL }, { 0 }, }; static const struct omap_clkctrl_bit_data dra7_gpio3_bit_data[] __initconst = { { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL }, { 0 }, }; static const struct omap_clkctrl_bit_data dra7_gpio4_bit_data[] __initconst = { { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL }, { 0 }, }; static const struct omap_clkctrl_bit_data dra7_gpio5_bit_data[] __initconst = { { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL }, { 0 }, }; static const struct omap_clkctrl_bit_data dra7_gpio6_bit_data[] __initconst = { { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL }, { 0 }, }; static const struct omap_clkctrl_bit_data dra7_gpio7_bit_data[] __initconst = { { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL }, { 0 }, }; static const struct omap_clkctrl_bit_data dra7_gpio8_bit_data[] __initconst = { { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL }, { 0 }, }; static const char * const dra7_mmc3_gfclk_div_parents[] __initconst = { "l4per-clkctrl:00f8:24", NULL, }; static const struct omap_clkctrl_div_data dra7_mmc3_gfclk_div_data __initconst = { .max_div = 4, .flags = CLK_DIVIDER_POWER_OF_TWO, }; static const struct omap_clkctrl_bit_data dra7_mmc3_bit_data[] __initconst = { { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL }, { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL }, { 25, TI_CLK_DIVIDER, dra7_mmc3_gfclk_div_parents, &dra7_mmc3_gfclk_div_data }, { 0 }, }; static const char * const dra7_mmc4_gfclk_div_parents[] __initconst = { "l4per-clkctrl:0100:24", NULL, }; static const struct omap_clkctrl_div_data dra7_mmc4_gfclk_div_data __initconst = { .max_div = 4, .flags = CLK_DIVIDER_POWER_OF_TWO, }; static const struct omap_clkctrl_bit_data dra7_mmc4_bit_data[] __initconst = { { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL }, { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL }, { 25, TI_CLK_DIVIDER, dra7_mmc4_gfclk_div_parents, &dra7_mmc4_gfclk_div_data }, { 0 }, }; static const struct omap_clkctrl_bit_data dra7_uart1_bit_data[] __initconst = { { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL }, { 0 }, }; static const struct omap_clkctrl_bit_data dra7_uart2_bit_data[] __initconst = { { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL }, { 0 }, }; static const struct omap_clkctrl_bit_data dra7_uart3_bit_data[] __initconst = { { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL }, { 0 }, }; static const struct omap_clkctrl_bit_data dra7_uart4_bit_data[] __initconst = { { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL }, { 0 }, }; static const struct omap_clkctrl_bit_data dra7_uart5_bit_data[] __initconst = { { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL }, { 0 }, }; static const struct omap_clkctrl_reg_data dra7_l4per_clkctrl_regs[] __initconst = { { DRA7_L4PER_TIMER10_CLKCTRL, dra7_timer10_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0000:24" }, { DRA7_L4PER_TIMER11_CLKCTRL, dra7_timer11_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0008:24" }, { DRA7_L4PER_TIMER2_CLKCTRL, dra7_timer2_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0010:24" }, { DRA7_L4PER_TIMER3_CLKCTRL, dra7_timer3_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0018:24" }, { DRA7_L4PER_TIMER4_CLKCTRL, dra7_timer4_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0020:24" }, { DRA7_L4PER_TIMER9_CLKCTRL, dra7_timer9_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0028:24" }, { DRA7_L4PER_ELM_CLKCTRL, NULL, 0, "l3_iclk_div" }, { DRA7_L4PER_GPIO2_CLKCTRL, dra7_gpio2_bit_data, CLKF_HW_SUP, "l3_iclk_div" }, { DRA7_L4PER_GPIO3_CLKCTRL, dra7_gpio3_bit_data, CLKF_HW_SUP, "l3_iclk_div" }, { DRA7_L4PER_GPIO4_CLKCTRL, dra7_gpio4_bit_data, CLKF_HW_SUP, "l3_iclk_div" }, { DRA7_L4PER_GPIO5_CLKCTRL, dra7_gpio5_bit_data, CLKF_HW_SUP, "l3_iclk_div" }, { DRA7_L4PER_GPIO6_CLKCTRL, dra7_gpio6_bit_data, CLKF_HW_SUP, "l3_iclk_div" }, { DRA7_L4PER_HDQ1W_CLKCTRL, NULL, CLKF_SW_SUP, "func_12m_fclk" }, { DRA7_L4PER_I2C1_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" }, { DRA7_L4PER_I2C2_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" }, { DRA7_L4PER_I2C3_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" }, { DRA7_L4PER_I2C4_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" }, { DRA7_L4PER_L4_PER1_CLKCTRL, NULL, 0, "l3_iclk_div" }, { DRA7_L4PER_MCSPI1_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" }, { DRA7_L4PER_MCSPI2_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" }, { DRA7_L4PER_MCSPI3_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" }, { DRA7_L4PER_MCSPI4_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" }, { DRA7_L4PER_GPIO7_CLKCTRL, dra7_gpio7_bit_data, CLKF_HW_SUP, "l3_iclk_div" }, { DRA7_L4PER_GPIO8_CLKCTRL, dra7_gpio8_bit_data, CLKF_HW_SUP, "l3_iclk_div" }, { DRA7_L4PER_MMC3_CLKCTRL, dra7_mmc3_bit_data, CLKF_SW_SUP, "l4per-clkctrl:00f8:25" }, { DRA7_L4PER_MMC4_CLKCTRL, dra7_mmc4_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0100:25" }, { DRA7_L4PER_UART1_CLKCTRL, dra7_uart1_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0118:24" }, { DRA7_L4PER_UART2_CLKCTRL, dra7_uart2_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0120:24" }, { DRA7_L4PER_UART3_CLKCTRL, dra7_uart3_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0128:24" }, { DRA7_L4PER_UART4_CLKCTRL, dra7_uart4_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0130:24" }, { DRA7_L4PER_UART5_CLKCTRL, dra7_uart5_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0148:24" }, { 0 }, }; static const struct omap_clkctrl_reg_data dra7_l4sec_clkctrl_regs[] __initconst = { { DRA7_L4SEC_AES1_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" }, { DRA7_L4SEC_AES2_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" }, { DRA7_L4SEC_DES_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" }, { DRA7_L4SEC_RNG_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "l4_root_clk_div" }, { DRA7_L4SEC_SHAM_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" }, { DRA7_L4SEC_SHAM2_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" }, { 0 }, }; static const char * const dra7_qspi_gfclk_mux_parents[] __initconst = { "func_128m_clk", "dpll_per_h13x2_ck", NULL, }; static const char * const dra7_qspi_gfclk_div_parents[] __initconst = { "l4per2-clkctrl:012c:24", NULL, }; static const struct omap_clkctrl_div_data dra7_qspi_gfclk_div_data __initconst = { .max_div = 4, .flags = CLK_DIVIDER_POWER_OF_TWO, }; static const struct omap_clkctrl_bit_data dra7_qspi_bit_data[] __initconst = { { 24, TI_CLK_MUX, dra7_qspi_gfclk_mux_parents, NULL }, { 25, TI_CLK_DIVIDER, dra7_qspi_gfclk_div_parents, &dra7_qspi_gfclk_div_data }, { 0 }, }; static const struct omap_clkctrl_bit_data dra7_mcasp2_bit_data[] __initconst = { { 22, TI_CLK_MUX, dra7_mcasp1_aux_gfclk_mux_parents, NULL }, { 24, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL }, { 28, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL }, { 0 }, }; static const struct omap_clkctrl_bit_data dra7_mcasp3_bit_data[] __initconst = { { 22, TI_CLK_MUX, dra7_mcasp1_aux_gfclk_mux_parents, NULL }, { 24, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL }, { 0 }, }; static const struct omap_clkctrl_bit_data dra7_mcasp5_bit_data[] __initconst = { { 22, TI_CLK_MUX, dra7_mcasp1_aux_gfclk_mux_parents, NULL }, { 24, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL }, { 0 }, }; static const struct omap_clkctrl_bit_data dra7_mcasp8_bit_data[] __initconst = { { 22, TI_CLK_MUX, dra7_mcasp1_aux_gfclk_mux_parents, NULL }, { 24, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL }, { 0 }, }; static const struct omap_clkctrl_bit_data dra7_mcasp4_bit_data[] __initconst = { { 22, TI_CLK_MUX, dra7_mcasp1_aux_gfclk_mux_parents, NULL }, { 24, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL }, { 0 }, }; static const struct omap_clkctrl_bit_data dra7_uart7_bit_data[] __initconst = { { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL }, { 0 }, }; static const struct omap_clkctrl_bit_data dra7_uart8_bit_data[] __initconst = { { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL }, { 0 }, }; static const struct omap_clkctrl_bit_data dra7_uart9_bit_data[] __initconst = { { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL }, { 0 }, }; static const struct omap_clkctrl_bit_data dra7_mcasp6_bit_data[] __initconst = { { 22, TI_CLK_MUX, dra7_mcasp1_aux_gfclk_mux_parents, NULL }, { 24, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL }, { 0 }, }; static const struct omap_clkctrl_bit_data dra7_mcasp7_bit_data[] __initconst = { { 22, TI_CLK_MUX, dra7_mcasp1_aux_gfclk_mux_parents, NULL }, { 24, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL }, { 0 }, }; static const struct omap_clkctrl_reg_data dra7_l4per2_clkctrl_regs[] __initconst = { { DRA7_L4PER2_L4_PER2_CLKCTRL, NULL, 0, "l3_iclk_div" }, { DRA7_L4PER2_PRUSS1_CLKCTRL, NULL, CLKF_SW_SUP, "" }, { DRA7_L4PER2_PRUSS2_CLKCTRL, NULL, CLKF_SW_SUP, "" }, { DRA7_L4PER2_EPWMSS1_CLKCTRL, NULL, CLKF_SW_SUP, "l4_root_clk_div" }, { DRA7_L4PER2_EPWMSS2_CLKCTRL, NULL, CLKF_SW_SUP, "l4_root_clk_div" }, { DRA7_L4PER2_EPWMSS0_CLKCTRL, NULL, CLKF_SW_SUP, "l4_root_clk_div" }, { DRA7_L4PER2_QSPI_CLKCTRL, dra7_qspi_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:012c:25" }, { DRA7_L4PER2_MCASP2_CLKCTRL, dra7_mcasp2_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:0154:22" }, { DRA7_L4PER2_MCASP3_CLKCTRL, dra7_mcasp3_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:015c:22" }, { DRA7_L4PER2_MCASP5_CLKCTRL, dra7_mcasp5_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:016c:22" }, { DRA7_L4PER2_MCASP8_CLKCTRL, dra7_mcasp8_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:0184:22" }, { DRA7_L4PER2_MCASP4_CLKCTRL, dra7_mcasp4_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:018c:22" }, { DRA7_L4PER2_UART7_CLKCTRL, dra7_uart7_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:01c4:24" }, { DRA7_L4PER2_UART8_CLKCTRL, dra7_uart8_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:01d4:24" }, { DRA7_L4PER2_UART9_CLKCTRL, dra7_uart9_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:01dc:24" }, { DRA7_L4PER2_DCAN2_CLKCTRL, NULL, CLKF_SW_SUP, "sys_clkin1" }, { DRA7_L4PER2_MCASP6_CLKCTRL, dra7_mcasp6_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:01f8:22" }, { DRA7_L4PER2_MCASP7_CLKCTRL, dra7_mcasp7_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:01fc:22" }, { 0 }, }; static const struct omap_clkctrl_bit_data dra7_timer13_bit_data[] __initconst = { { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL }, { 0 }, }; static const struct omap_clkctrl_bit_data dra7_timer14_bit_data[] __initconst = { { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL }, { 0 }, }; static const struct omap_clkctrl_bit_data dra7_timer15_bit_data[] __initconst = { { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL }, { 0 }, }; static const struct omap_clkctrl_bit_data dra7_timer16_bit_data[] __initconst = { { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL }, { 0 }, }; static const struct omap_clkctrl_reg_data dra7_l4per3_clkctrl_regs[] __initconst = { { DRA7_L4PER3_L4_PER3_CLKCTRL, NULL, 0, "l3_iclk_div" }, { DRA7_L4PER3_TIMER13_CLKCTRL, dra7_timer13_bit_data, CLKF_SW_SUP, "l4per3-clkctrl:00b4:24" }, { DRA7_L4PER3_TIMER14_CLKCTRL, dra7_timer14_bit_data, CLKF_SW_SUP, "l4per3-clkctrl:00bc:24" }, { DRA7_L4PER3_TIMER15_CLKCTRL, dra7_timer15_bit_data, CLKF_SW_SUP, "l4per3-clkctrl:00c4:24" }, { DRA7_L4PER3_TIMER16_CLKCTRL, dra7_timer16_bit_data, CLKF_SW_SUP, "l4per3-clkctrl:011c:24" }, { 0 }, }; static const struct omap_clkctrl_bit_data dra7_gpio1_bit_data[] __initconst = { { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL }, { 0 }, }; static const struct omap_clkctrl_bit_data dra7_timer1_bit_data[] __initconst = { { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL }, { 0 }, }; static const struct omap_clkctrl_bit_data dra7_uart10_bit_data[] __initconst = { { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL }, { 0 }, }; static const char * const dra7_dcan1_sys_clk_mux_parents[] __initconst = { "sys_clkin1", "sys_clkin2", NULL, }; static const struct omap_clkctrl_bit_data dra7_dcan1_bit_data[] __initconst = { { 24, TI_CLK_MUX, dra7_dcan1_sys_clk_mux_parents, NULL }, { 0 }, }; static const struct omap_clkctrl_reg_data dra7_wkupaon_clkctrl_regs[] __initconst = { { DRA7_WKUPAON_L4_WKUP_CLKCTRL, NULL, 0, "wkupaon_iclk_mux" }, { DRA7_WKUPAON_WD_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" }, { DRA7_WKUPAON_GPIO1_CLKCTRL, dra7_gpio1_bit_data, CLKF_HW_SUP, "wkupaon_iclk_mux" }, { DRA7_WKUPAON_TIMER1_CLKCTRL, dra7_timer1_bit_data, CLKF_SW_SUP, "wkupaon-clkctrl:0020:24" }, { DRA7_WKUPAON_TIMER12_CLKCTRL, NULL, CLKF_SOC_NONSEC, "secure_32k_clk_src_ck" }, { DRA7_WKUPAON_COUNTER_32K_CLKCTRL, NULL, 0, "wkupaon_iclk_mux" }, { DRA7_WKUPAON_UART10_CLKCTRL, dra7_uart10_bit_data, CLKF_SW_SUP, "wkupaon-clkctrl:0060:24" }, { DRA7_WKUPAON_DCAN1_CLKCTRL, dra7_dcan1_bit_data, CLKF_SW_SUP, "wkupaon-clkctrl:0068:24" }, { DRA7_WKUPAON_ADC_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_SOC_DRA76, "mcan_clk" }, { 0 }, }; const struct omap_clkctrl_data dra7_clkctrl_data[] __initconst = { { 0x4a005320, dra7_mpu_clkctrl_regs }, { 0x4a005420, dra7_dsp1_clkctrl_regs }, { 0x4a005520, dra7_ipu1_clkctrl_regs }, { 0x4a005550, dra7_ipu_clkctrl_regs }, { 0x4a005620, dra7_dsp2_clkctrl_regs }, { 0x4a005720, dra7_rtc_clkctrl_regs }, { 0x4a005760, dra7_vpe_clkctrl_regs }, { 0x4a008620, dra7_coreaon_clkctrl_regs }, { 0x4a008720, dra7_l3main1_clkctrl_regs }, { 0x4a008920, dra7_ipu2_clkctrl_regs }, { 0x4a008a20, dra7_dma_clkctrl_regs }, { 0x4a008b20, dra7_emif_clkctrl_regs }, { 0x4a008c00, dra7_atl_clkctrl_regs }, { 0x4a008d20, dra7_l4cfg_clkctrl_regs }, { 0x4a008e20, dra7_l3instr_clkctrl_regs }, { 0x4a008f20, dra7_iva_clkctrl_regs }, { 0x4a009020, dra7_cam_clkctrl_regs }, { 0x4a009120, dra7_dss_clkctrl_regs }, { 0x4a009220, dra7_gpu_clkctrl_regs }, { 0x4a009320, dra7_l3init_clkctrl_regs }, { 0x4a0093b0, dra7_pcie_clkctrl_regs }, { 0x4a0093d0, dra7_gmac_clkctrl_regs }, { 0x4a009728, dra7_l4per_clkctrl_regs }, { 0x4a0098a0, dra7_l4sec_clkctrl_regs }, { 0x4a00970c, dra7_l4per2_clkctrl_regs }, { 0x4a009714, dra7_l4per3_clkctrl_regs }, { 0x4ae07820, dra7_wkupaon_clkctrl_regs }, { 0 }, }; static struct ti_dt_clk dra7xx_clks[] = { DT_CLK(NULL, "timer_32k_ck", "sys_32k_ck"), DT_CLK(NULL, "sys_clkin_ck", "timer_sys_clk_div"), DT_CLK(NULL, "sys_clkin", "sys_clkin1"), DT_CLK(NULL, "atl_dpll_clk_mux", "atl-clkctrl:0000:24"), DT_CLK(NULL, "atl_gfclk_mux", "atl-clkctrl:0000:26"), DT_CLK(NULL, "dcan1_sys_clk_mux", "wkupaon-clkctrl:0068:24"), DT_CLK(NULL, "dss_32khz_clk", "dss-clkctrl:0000:11"), DT_CLK(NULL, "dss_48mhz_clk", "dss-clkctrl:0000:9"), DT_CLK(NULL, "dss_dss_clk", "dss-clkctrl:0000:8"), DT_CLK(NULL, "dss_hdmi_clk", "dss-clkctrl:0000:10"), DT_CLK(NULL, "dss_video1_clk", "dss-clkctrl:0000:12"), DT_CLK(NULL, "dss_video2_clk", "dss-clkctrl:0000:13"), DT_CLK(NULL, "gmac_rft_clk_mux", "gmac-clkctrl:0000:25"), DT_CLK(NULL, "gpio1_dbclk", "wkupaon-clkctrl:0018:8"), DT_CLK(NULL, "gpio2_dbclk", "l4per-clkctrl:0038:8"), DT_CLK(NULL, "gpio3_dbclk", "l4per-clkctrl:0040:8"), DT_CLK(NULL, "gpio4_dbclk", "l4per-clkctrl:0048:8"), DT_CLK(NULL, "gpio5_dbclk", "l4per-clkctrl:0050:8"), DT_CLK(NULL, "gpio6_dbclk", "l4per-clkctrl:0058:8"), DT_CLK(NULL, "gpio7_dbclk", "l4per-clkctrl:00e8:8"), DT_CLK(NULL, "gpio8_dbclk", "l4per-clkctrl:00f0:8"), DT_CLK(NULL, "ipu1_gfclk_mux", "ipu1-clkctrl:0000:24"), DT_CLK(NULL, "mcasp1_ahclkr_mux", "ipu-clkctrl:0000:28"), DT_CLK(NULL, "mcasp1_ahclkx_mux", "ipu-clkctrl:0000:24"), DT_CLK(NULL, "mcasp1_aux_gfclk_mux", "ipu-clkctrl:0000:22"), DT_CLK(NULL, "mcasp2_ahclkr_mux", "l4per2-clkctrl:0154:28"), DT_CLK(NULL, "mcasp2_ahclkx_mux", "l4per2-clkctrl:0154:24"), DT_CLK(NULL, "mcasp2_aux_gfclk_mux", "l4per2-clkctrl:0154:22"), DT_CLK(NULL, "mcasp3_ahclkx_mux", "l4per2-clkctrl:015c:24"), DT_CLK(NULL, "mcasp3_aux_gfclk_mux", "l4per2-clkctrl:015c:22"), DT_CLK(NULL, "mcasp4_ahclkx_mux", "l4per2-clkctrl:018c:24"), DT_CLK(NULL, "mcasp4_aux_gfclk_mux", "l4per2-clkctrl:018c:22"), DT_CLK(NULL, "mcasp5_ahclkx_mux", "l4per2-clkctrl:016c:24"), DT_CLK(NULL, "mcasp5_aux_gfclk_mux", "l4per2-clkctrl:016c:22"), DT_CLK(NULL, "mcasp6_ahclkx_mux", "l4per2-clkctrl:01f8:24"), DT_CLK(NULL, "mcasp6_aux_gfclk_mux", "l4per2-clkctrl:01f8:22"), DT_CLK(NULL, "mcasp7_ahclkx_mux", "l4per2-clkctrl:01fc:24"), DT_CLK(NULL, "mcasp7_aux_gfclk_mux", "l4per2-clkctrl:01fc:22"), DT_CLK(NULL, "mcasp8_ahclkx_mux", "l4per2-clkctrl:0184:24"), DT_CLK(NULL, "mcasp8_aux_gfclk_mux", "l4per2-clkctrl:0184:22"), DT_CLK(NULL, "mmc1_clk32k", "l3init-clkctrl:0008:8"), DT_CLK(NULL, "mmc1_fclk_div", "l3init-clkctrl:0008:25"), DT_CLK(NULL, "mmc1_fclk_mux", "l3init-clkctrl:0008:24"), DT_CLK(NULL, "mmc2_clk32k", "l3init-clkctrl:0010:8"), DT_CLK(NULL, "mmc2_fclk_div", "l3init-clkctrl:0010:25"), DT_CLK(NULL, "mmc2_fclk_mux", "l3init-clkctrl:0010:24"), DT_CLK(NULL, "mmc3_clk32k", "l4per-clkctrl:00f8:8"), DT_CLK(NULL, "mmc3_gfclk_div", "l4per-clkctrl:00f8:25"), DT_CLK(NULL, "mmc3_gfclk_mux", "l4per-clkctrl:00f8:24"), DT_CLK(NULL, "mmc4_clk32k", "l4per-clkctrl:0100:8"), DT_CLK(NULL, "mmc4_gfclk_div", "l4per-clkctrl:0100:25"), DT_CLK(NULL, "mmc4_gfclk_mux", "l4per-clkctrl:0100:24"), DT_CLK(NULL, "optfclk_pciephy1_32khz", "pcie-clkctrl:0000:8"), DT_CLK(NULL, "optfclk_pciephy1_clk", "pcie-clkctrl:0000:9"), DT_CLK(NULL, "optfclk_pciephy1_div_clk", "pcie-clkctrl:0000:10"), DT_CLK(NULL, "optfclk_pciephy2_32khz", "pcie-clkctrl:0008:8"), DT_CLK(NULL, "optfclk_pciephy2_clk", "pcie-clkctrl:0008:9"), DT_CLK(NULL, "optfclk_pciephy2_div_clk", "pcie-clkctrl:0008:10"), DT_CLK(NULL, "qspi_gfclk_div", "l4per2-clkctrl:012c:25"), DT_CLK(NULL, "qspi_gfclk_mux", "l4per2-clkctrl:012c:24"), DT_CLK(NULL, "rmii_50mhz_clk_mux", "gmac-clkctrl:0000:24"), DT_CLK(NULL, "sata_ref_clk", "l3init-clkctrl:0068:8"), DT_CLK(NULL, "timer10_gfclk_mux", "l4per-clkctrl:0000:24"), DT_CLK(NULL, "timer11_gfclk_mux", "l4per-clkctrl:0008:24"), DT_CLK(NULL, "timer13_gfclk_mux", "l4per3-clkctrl:00b4:24"), DT_CLK(NULL, "timer14_gfclk_mux", "l4per3-clkctrl:00bc:24"), DT_CLK(NULL, "timer15_gfclk_mux", "l4per3-clkctrl:00c4:24"), DT_CLK(NULL, "timer16_gfclk_mux", "l4per3-clkctrl:011c:24"), DT_CLK(NULL, "timer1_gfclk_mux", "wkupaon-clkctrl:0020:24"), DT_CLK(NULL, "timer2_gfclk_mux", "l4per-clkctrl:0010:24"), DT_CLK(NULL, "timer3_gfclk_mux", "l4per-clkctrl:0018:24"), DT_CLK(NULL, "timer4_gfclk_mux", "l4per-clkctrl:0020:24"), DT_CLK(NULL, "timer5_gfclk_mux", "ipu-clkctrl:0008:24"), DT_CLK(NULL, "timer6_gfclk_mux", "ipu-clkctrl:0010:24"), DT_CLK(NULL, "timer7_gfclk_mux", "ipu-clkctrl:0018:24"), DT_CLK(NULL, "timer8_gfclk_mux", "ipu-clkctrl:0020:24"), DT_CLK(NULL, "timer9_gfclk_mux", "l4per-clkctrl:0028:24"), DT_CLK(NULL, "uart10_gfclk_mux", "wkupaon-clkctrl:0060:24"), DT_CLK(NULL, "uart1_gfclk_mux", "l4per-clkctrl:0118:24"), DT_CLK(NULL, "uart2_gfclk_mux", "l4per-clkctrl:0120:24"), DT_CLK(NULL, "uart3_gfclk_mux", "l4per-clkctrl:0128:24"), DT_CLK(NULL, "uart4_gfclk_mux", "l4per-clkctrl:0130:24"), DT_CLK(NULL, "uart5_gfclk_mux", "l4per-clkctrl:0148:24"), DT_CLK(NULL, "uart6_gfclk_mux", "ipu-clkctrl:0030:24"), DT_CLK(NULL, "uart7_gfclk_mux", "l4per2-clkctrl:01c4:24"), DT_CLK(NULL, "uart8_gfclk_mux", "l4per2-clkctrl:01d4:24"), DT_CLK(NULL, "uart9_gfclk_mux", "l4per2-clkctrl:01dc:24"), DT_CLK(NULL, "usb_otg_ss1_refclk960m", "l3init-clkctrl:00d0:8"), DT_CLK(NULL, "usb_otg_ss2_refclk960m", "l3init-clkctrl:0020:8"), { .node_name = NULL }, }; int __init dra7xx_dt_clk_init(void) { int rc; struct clk *dpll_ck, *hdcp_ck; ti_dt_clocks_register(dra7xx_clks); omap2_clk_disable_autoidle_all(); ti_clk_add_aliases(); dpll_ck = clk_get_sys(NULL, "dpll_gmac_ck"); rc = clk_set_rate(dpll_ck, DRA7_DPLL_GMAC_DEFFREQ); if (rc) pr_err("%s: failed to configure GMAC DPLL!\n", __func__); dpll_ck = clk_get_sys(NULL, "dpll_usb_ck"); rc = clk_set_rate(dpll_ck, DRA7_DPLL_USB_DEFFREQ); if (rc) pr_err("%s: failed to configure USB DPLL!\n", __func__); dpll_ck = clk_get_sys(NULL, "dpll_usb_m2_ck"); rc = clk_set_rate(dpll_ck, DRA7_DPLL_USB_DEFFREQ/2); if (rc) pr_err("%s: failed to set USB_DPLL M2 OUT\n", __func__); hdcp_ck = clk_get_sys(NULL, "dss_deshdcp_clk"); rc = clk_prepare_enable(hdcp_ck); if (rc) pr_err("%s: failed to set dss_deshdcp_clk\n", __func__); return rc; }
// SPDX-License-Identifier: GPL-2.0 /* * An API to allow a function, that may fail, to be executed, and recover in a * controlled manner. * * Copyright (C) 2019, Google LLC. * Author: Brendan Higgins <[email protected]> */ #include <kunit/test.h> #include <linux/completion.h> #include <linux/kernel.h> #include <linux/kthread.h> #include <linux/sched/task.h> #include "try-catch-impl.h" void __noreturn kunit_try_catch_throw(struct kunit_try_catch *try_catch) { try_catch->try_result = -EFAULT; kthread_exit(0); } EXPORT_SYMBOL_GPL(kunit_try_catch_throw); static int kunit_generic_run_threadfn_adapter(void *data) { struct kunit_try_catch *try_catch = data; try_catch->try_result = -EINTR; try_catch->try(try_catch->context); if (try_catch->try_result == -EINTR) try_catch->try_result = 0; return 0; } static unsigned long kunit_test_timeout(void) { /* * TODO([email protected]): We should probably have some type of * variable timeout here. The only question is what that timeout value * should be. * * The intention has always been, at some point, to be able to label * tests with some type of size bucket (unit/small, integration/medium, * large/system/end-to-end, etc), where each size bucket would get a * default timeout value kind of like what Bazel does: * https://docs.bazel.build/versions/master/be/common-definitions.html#test.size * There is still some debate to be had on exactly how we do this. (For * one, we probably want to have some sort of test runner level * timeout.) * * For more background on this topic, see: * https://mike-bland.com/2011/11/01/small-medium-large.html * * If tests timeout due to exceeding sysctl_hung_task_timeout_secs, * the task will be killed and an oops generated. */ return 300 * msecs_to_jiffies(MSEC_PER_SEC); /* 5 min */ } void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context) { struct kunit *test = try_catch->test; struct task_struct *task_struct; struct completion *task_done; int exit_code, time_remaining; try_catch->context = context; try_catch->try_result = 0; task_struct = kthread_create(kunit_generic_run_threadfn_adapter, try_catch, "kunit_try_catch_thread"); if (IS_ERR(task_struct)) { try_catch->try_result = PTR_ERR(task_struct); try_catch->catch(try_catch->context); return; } get_task_struct(task_struct); /* * As for a vfork(2), task_struct->vfork_done (pointing to the * underlying kthread->exited) can be used to wait for the end of a * kernel thread. It is set to NULL when the thread exits, so we * keep a copy here. */ task_done = task_struct->vfork_done; wake_up_process(task_struct); time_remaining = wait_for_completion_timeout(task_done, kunit_test_timeout()); if (time_remaining == 0) { try_catch->try_result = -ETIMEDOUT; kthread_stop(task_struct); } put_task_struct(task_struct); exit_code = try_catch->try_result; if (!exit_code) return; if (exit_code == -EFAULT) try_catch->try_result = 0; else if (exit_code == -EINTR) { if (test->last_seen.file) kunit_err(test, "try faulted: last line seen %s:%d\n", test->last_seen.file, test->last_seen.line); else kunit_err(test, "try faulted\n"); } else if (exit_code == -ETIMEDOUT) kunit_err(test, "try timed out\n"); else if (exit_code) kunit_err(test, "Unknown error: %d\n", exit_code); try_catch->catch(try_catch->context); } EXPORT_SYMBOL_GPL(kunit_try_catch_run);
// SPDX-License-Identifier: GPL-2.0-or-later /* * net/sched/ife.c Inter-FE action based on ForCES WG InterFE LFB * * Refer to: * draft-ietf-forces-interfelfb-03 * and * netdev01 paper: * "Distributing Linux Traffic Control Classifier-Action * Subsystem" * Authors: Jamal Hadi Salim and Damascene M. Joachimpillai * * copyright Jamal Hadi Salim (2015) */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <linux/module.h> #include <linux/init.h> #include <net/net_namespace.h> #include <net/netlink.h> #include <net/pkt_sched.h> #include <net/pkt_cls.h> #include <uapi/linux/tc_act/tc_ife.h> #include <net/tc_act/tc_ife.h> #include <linux/etherdevice.h> #include <net/ife.h> #include <net/tc_wrapper.h> static int max_metacnt = IFE_META_MAX + 1; static struct tc_action_ops act_ife_ops; static const struct nla_policy ife_policy[TCA_IFE_MAX + 1] = { [TCA_IFE_PARMS] = { .len = sizeof(struct tc_ife)}, [TCA_IFE_DMAC] = { .len = ETH_ALEN}, [TCA_IFE_SMAC] = { .len = ETH_ALEN}, [TCA_IFE_TYPE] = { .type = NLA_U16}, }; int ife_encode_meta_u16(u16 metaval, void *skbdata, struct tcf_meta_info *mi) { u16 edata = 0; if (mi->metaval) edata = *(u16 *)mi->metaval; else if (metaval) edata = metaval; if (!edata) /* will not encode */ return 0; edata = htons(edata); return ife_tlv_meta_encode(skbdata, mi->metaid, 2, &edata); } EXPORT_SYMBOL_GPL(ife_encode_meta_u16); int ife_get_meta_u32(struct sk_buff *skb, struct tcf_meta_info *mi) { if (mi->metaval) return nla_put_u32(skb, mi->metaid, *(u32 *)mi->metaval); else return nla_put(skb, mi->metaid, 0, NULL); } EXPORT_SYMBOL_GPL(ife_get_meta_u32); int ife_check_meta_u32(u32 metaval, struct tcf_meta_info *mi) { if (metaval || mi->metaval) return 8; /* T+L+V == 2+2+4 */ return 0; } EXPORT_SYMBOL_GPL(ife_check_meta_u32); int ife_check_meta_u16(u16 metaval, struct tcf_meta_info *mi) { if (metaval || mi->metaval) return 8; /* T+L+(V) == 2+2+(2+2bytepad) */ return 0; } EXPORT_SYMBOL_GPL(ife_check_meta_u16); int ife_encode_meta_u32(u32 metaval, void *skbdata, struct tcf_meta_info *mi) { u32 edata = metaval; if (mi->metaval) edata = *(u32 *)mi->metaval; else if (metaval) edata = metaval; if (!edata) /* will not encode */ return 0; edata = htonl(edata); return ife_tlv_meta_encode(skbdata, mi->metaid, 4, &edata); } EXPORT_SYMBOL_GPL(ife_encode_meta_u32); int ife_get_meta_u16(struct sk_buff *skb, struct tcf_meta_info *mi) { if (mi->metaval) return nla_put_u16(skb, mi->metaid, *(u16 *)mi->metaval); else return nla_put(skb, mi->metaid, 0, NULL); } EXPORT_SYMBOL_GPL(ife_get_meta_u16); int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval, gfp_t gfp) { mi->metaval = kmemdup(metaval, sizeof(u32), gfp); if (!mi->metaval) return -ENOMEM; return 0; } EXPORT_SYMBOL_GPL(ife_alloc_meta_u32); int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval, gfp_t gfp) { mi->metaval = kmemdup(metaval, sizeof(u16), gfp); if (!mi->metaval) return -ENOMEM; return 0; } EXPORT_SYMBOL_GPL(ife_alloc_meta_u16); void ife_release_meta_gen(struct tcf_meta_info *mi) { kfree(mi->metaval); } EXPORT_SYMBOL_GPL(ife_release_meta_gen); int ife_validate_meta_u32(void *val, int len) { if (len == sizeof(u32)) return 0; return -EINVAL; } EXPORT_SYMBOL_GPL(ife_validate_meta_u32); int ife_validate_meta_u16(void *val, int len) { /* length will not include padding */ if (len == sizeof(u16)) return 0; return -EINVAL; } EXPORT_SYMBOL_GPL(ife_validate_meta_u16); static LIST_HEAD(ifeoplist); static DEFINE_RWLOCK(ife_mod_lock); static struct tcf_meta_ops *find_ife_oplist(u16 metaid) { struct tcf_meta_ops *o; read_lock(&ife_mod_lock); list_for_each_entry(o, &ifeoplist, list) { if (o->metaid == metaid) { if (!try_module_get(o->owner)) o = NULL; read_unlock(&ife_mod_lock); return o; } } read_unlock(&ife_mod_lock); return NULL; } int register_ife_op(struct tcf_meta_ops *mops) { struct tcf_meta_ops *m; if (!mops->metaid || !mops->metatype || !mops->name || !mops->check_presence || !mops->encode || !mops->decode || !mops->get || !mops->alloc) return -EINVAL; write_lock(&ife_mod_lock); list_for_each_entry(m, &ifeoplist, list) { if (m->metaid == mops->metaid || (strcmp(mops->name, m->name) == 0)) { write_unlock(&ife_mod_lock); return -EEXIST; } } if (!mops->release) mops->release = ife_release_meta_gen; list_add_tail(&mops->list, &ifeoplist); write_unlock(&ife_mod_lock); return 0; } EXPORT_SYMBOL_GPL(unregister_ife_op); int unregister_ife_op(struct tcf_meta_ops *mops) { struct tcf_meta_ops *m; int err = -ENOENT; write_lock(&ife_mod_lock); list_for_each_entry(m, &ifeoplist, list) { if (m->metaid == mops->metaid) { list_del(&mops->list); err = 0; break; } } write_unlock(&ife_mod_lock); return err; } EXPORT_SYMBOL_GPL(register_ife_op); static int ife_validate_metatype(struct tcf_meta_ops *ops, void *val, int len) { int ret = 0; /* XXX: unfortunately cant use nla_policy at this point * because a length of 0 is valid in the case of * "allow". "use" semantics do enforce for proper * length and i couldve use nla_policy but it makes it hard * to use it just for that.. */ if (ops->validate) return ops->validate(val, len); if (ops->metatype == NLA_U32) ret = ife_validate_meta_u32(val, len); else if (ops->metatype == NLA_U16) ret = ife_validate_meta_u16(val, len); return ret; } #ifdef CONFIG_MODULES static const char *ife_meta_id2name(u32 metaid) { switch (metaid) { case IFE_META_SKBMARK: return "skbmark"; case IFE_META_PRIO: return "skbprio"; case IFE_META_TCINDEX: return "tcindex"; default: return "unknown"; } } #endif /* called when adding new meta information */ static int load_metaops_and_vet(u32 metaid, void *val, int len, bool rtnl_held) { struct tcf_meta_ops *ops = find_ife_oplist(metaid); int ret = 0; if (!ops) { ret = -ENOENT; #ifdef CONFIG_MODULES if (rtnl_held) rtnl_unlock(); request_module("ife-meta-%s", ife_meta_id2name(metaid)); if (rtnl_held) rtnl_lock(); ops = find_ife_oplist(metaid); #endif } if (ops) { ret = 0; if (len) ret = ife_validate_metatype(ops, val, len); module_put(ops->owner); } return ret; } /* called when adding new meta information */ static int __add_metainfo(const struct tcf_meta_ops *ops, struct tcf_ife_info *ife, u32 metaid, void *metaval, int len, bool atomic, bool exists) { struct tcf_meta_info *mi = NULL; int ret = 0; mi = kzalloc(sizeof(*mi), atomic ? GFP_ATOMIC : GFP_KERNEL); if (!mi) return -ENOMEM; mi->metaid = metaid; mi->ops = ops; if (len > 0) { ret = ops->alloc(mi, metaval, atomic ? GFP_ATOMIC : GFP_KERNEL); if (ret != 0) { kfree(mi); return ret; } } if (exists) spin_lock_bh(&ife->tcf_lock); list_add_tail(&mi->metalist, &ife->metalist); if (exists) spin_unlock_bh(&ife->tcf_lock); return ret; } static int add_metainfo_and_get_ops(const struct tcf_meta_ops *ops, struct tcf_ife_info *ife, u32 metaid, bool exists) { int ret; if (!try_module_get(ops->owner)) return -ENOENT; ret = __add_metainfo(ops, ife, metaid, NULL, 0, true, exists); if (ret) module_put(ops->owner); return ret; } static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval, int len, bool exists) { const struct tcf_meta_ops *ops = find_ife_oplist(metaid); int ret; if (!ops) return -ENOENT; ret = __add_metainfo(ops, ife, metaid, metaval, len, false, exists); if (ret) /*put back what find_ife_oplist took */ module_put(ops->owner); return ret; } static int use_all_metadata(struct tcf_ife_info *ife, bool exists) { struct tcf_meta_ops *o; int rc = 0; int installed = 0; read_lock(&ife_mod_lock); list_for_each_entry(o, &ifeoplist, list) { rc = add_metainfo_and_get_ops(o, ife, o->metaid, exists); if (rc == 0) installed += 1; } read_unlock(&ife_mod_lock); if (installed) return 0; else return -EINVAL; } static int dump_metalist(struct sk_buff *skb, struct tcf_ife_info *ife) { struct tcf_meta_info *e; struct nlattr *nest; unsigned char *b = skb_tail_pointer(skb); int total_encoded = 0; /*can only happen on decode */ if (list_empty(&ife->metalist)) return 0; nest = nla_nest_start_noflag(skb, TCA_IFE_METALST); if (!nest) goto out_nlmsg_trim; list_for_each_entry(e, &ife->metalist, metalist) { if (!e->ops->get(skb, e)) total_encoded += 1; } if (!total_encoded) goto out_nlmsg_trim; nla_nest_end(skb, nest); return 0; out_nlmsg_trim: nlmsg_trim(skb, b); return -1; } /* under ife->tcf_lock */ static void _tcf_ife_cleanup(struct tc_action *a) { struct tcf_ife_info *ife = to_ife(a); struct tcf_meta_info *e, *n; list_for_each_entry_safe(e, n, &ife->metalist, metalist) { list_del(&e->metalist); if (e->metaval) { if (e->ops->release) e->ops->release(e); else kfree(e->metaval); } module_put(e->ops->owner); kfree(e); } } static void tcf_ife_cleanup(struct tc_action *a) { struct tcf_ife_info *ife = to_ife(a); struct tcf_ife_params *p; spin_lock_bh(&ife->tcf_lock); _tcf_ife_cleanup(a); spin_unlock_bh(&ife->tcf_lock); p = rcu_dereference_protected(ife->params, 1); if (p) kfree_rcu(p, rcu); } static int load_metalist(struct nlattr **tb, bool rtnl_held) { int i; for (i = 1; i < max_metacnt; i++) { if (tb[i]) { void *val = nla_data(tb[i]); int len = nla_len(tb[i]); int rc; rc = load_metaops_and_vet(i, val, len, rtnl_held); if (rc != 0) return rc; } } return 0; } static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb, bool exists, bool rtnl_held) { int len = 0; int rc = 0; int i = 0; void *val; for (i = 1; i < max_metacnt; i++) { if (tb[i]) { val = nla_data(tb[i]); len = nla_len(tb[i]); rc = add_metainfo(ife, i, val, len, exists); if (rc) return rc; } } return rc; } static int tcf_ife_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, struct tcf_proto *tp, u32 flags, struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, act_ife_ops.net_id); bool bind = flags & TCA_ACT_FLAGS_BIND; struct nlattr *tb[TCA_IFE_MAX + 1]; struct nlattr *tb2[IFE_META_MAX + 1]; struct tcf_chain *goto_ch = NULL; struct tcf_ife_params *p; struct tcf_ife_info *ife; u16 ife_type = ETH_P_IFE; struct tc_ife *parm; u8 *daddr = NULL; u8 *saddr = NULL; bool exists = false; int ret = 0; u32 index; int err; if (!nla) { NL_SET_ERR_MSG_MOD(extack, "IFE requires attributes to be passed"); return -EINVAL; } err = nla_parse_nested_deprecated(tb, TCA_IFE_MAX, nla, ife_policy, NULL); if (err < 0) return err; if (!tb[TCA_IFE_PARMS]) return -EINVAL; parm = nla_data(tb[TCA_IFE_PARMS]); /* IFE_DECODE is 0 and indicates the opposite of IFE_ENCODE because * they cannot run as the same time. Check on all other values which * are not supported right now. */ if (parm->flags & ~IFE_ENCODE) return -EINVAL; p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) return -ENOMEM; if (tb[TCA_IFE_METALST]) { err = nla_parse_nested_deprecated(tb2, IFE_META_MAX, tb[TCA_IFE_METALST], NULL, NULL); if (err) { kfree(p); return err; } err = load_metalist(tb2, !(flags & TCA_ACT_FLAGS_NO_RTNL)); if (err) { kfree(p); return err; } } index = parm->index; err = tcf_idr_check_alloc(tn, &index, a, bind); if (err < 0) { kfree(p); return err; } exists = err; if (exists && bind) { kfree(p); return ACT_P_BOUND; } if (!exists) { ret = tcf_idr_create(tn, index, est, a, &act_ife_ops, bind, true, flags); if (ret) { tcf_idr_cleanup(tn, index); kfree(p); return ret; } ret = ACT_P_CREATED; } else if (!(flags & TCA_ACT_FLAGS_REPLACE)) { tcf_idr_release(*a, bind); kfree(p); return -EEXIST; } ife = to_ife(*a); if (ret == ACT_P_CREATED) INIT_LIST_HEAD(&ife->metalist); err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); if (err < 0) goto release_idr; p->flags = parm->flags; if (parm->flags & IFE_ENCODE) { if (tb[TCA_IFE_TYPE]) ife_type = nla_get_u16(tb[TCA_IFE_TYPE]); if (tb[TCA_IFE_DMAC]) daddr = nla_data(tb[TCA_IFE_DMAC]); if (tb[TCA_IFE_SMAC]) saddr = nla_data(tb[TCA_IFE_SMAC]); } if (parm->flags & IFE_ENCODE) { if (daddr) ether_addr_copy(p->eth_dst, daddr); else eth_zero_addr(p->eth_dst); if (saddr) ether_addr_copy(p->eth_src, saddr); else eth_zero_addr(p->eth_src); p->eth_type = ife_type; } if (tb[TCA_IFE_METALST]) { err = populate_metalist(ife, tb2, exists, !(flags & TCA_ACT_FLAGS_NO_RTNL)); if (err) goto metadata_parse_err; } else { /* if no passed metadata allow list or passed allow-all * then here we process by adding as many supported metadatum * as we can. You better have at least one else we are * going to bail out */ err = use_all_metadata(ife, exists); if (err) goto metadata_parse_err; } if (exists) spin_lock_bh(&ife->tcf_lock); /* protected by tcf_lock when modifying existing action */ goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); p = rcu_replace_pointer(ife->params, p, 1); if (exists) spin_unlock_bh(&ife->tcf_lock); if (goto_ch) tcf_chain_put_by_act(goto_ch); if (p) kfree_rcu(p, rcu); return ret; metadata_parse_err: if (goto_ch) tcf_chain_put_by_act(goto_ch); release_idr: kfree(p); tcf_idr_release(*a, bind); return err; } static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { unsigned char *b = skb_tail_pointer(skb); struct tcf_ife_info *ife = to_ife(a); struct tcf_ife_params *p; struct tc_ife opt = { .index = ife->tcf_index, .refcnt = refcount_read(&ife->tcf_refcnt) - ref, .bindcnt = atomic_read(&ife->tcf_bindcnt) - bind, }; struct tcf_t t; spin_lock_bh(&ife->tcf_lock); opt.action = ife->tcf_action; p = rcu_dereference_protected(ife->params, lockdep_is_held(&ife->tcf_lock)); opt.flags = p->flags; if (nla_put(skb, TCA_IFE_PARMS, sizeof(opt), &opt)) goto nla_put_failure; tcf_tm_dump(&t, &ife->tcf_tm); if (nla_put_64bit(skb, TCA_IFE_TM, sizeof(t), &t, TCA_IFE_PAD)) goto nla_put_failure; if (!is_zero_ether_addr(p->eth_dst)) { if (nla_put(skb, TCA_IFE_DMAC, ETH_ALEN, p->eth_dst)) goto nla_put_failure; } if (!is_zero_ether_addr(p->eth_src)) { if (nla_put(skb, TCA_IFE_SMAC, ETH_ALEN, p->eth_src)) goto nla_put_failure; } if (nla_put(skb, TCA_IFE_TYPE, 2, &p->eth_type)) goto nla_put_failure; if (dump_metalist(skb, ife)) { /*ignore failure to dump metalist */ pr_info("Failed to dump metalist\n"); } spin_unlock_bh(&ife->tcf_lock); return skb->len; nla_put_failure: spin_unlock_bh(&ife->tcf_lock); nlmsg_trim(skb, b); return -1; } static int find_decode_metaid(struct sk_buff *skb, struct tcf_ife_info *ife, u16 metaid, u16 mlen, void *mdata) { struct tcf_meta_info *e; /* XXX: use hash to speed up */ list_for_each_entry(e, &ife->metalist, metalist) { if (metaid == e->metaid) { if (e->ops) { /* We check for decode presence already */ return e->ops->decode(skb, mdata, mlen); } } } return -ENOENT; } static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res) { struct tcf_ife_info *ife = to_ife(a); int action = ife->tcf_action; u8 *ifehdr_end; u8 *tlv_data; u16 metalen; bstats_update(this_cpu_ptr(ife->common.cpu_bstats), skb); tcf_lastuse_update(&ife->tcf_tm); if (skb_at_tc_ingress(skb)) skb_push(skb, skb->dev->hard_header_len); tlv_data = ife_decode(skb, &metalen); if (unlikely(!tlv_data)) { qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats)); return TC_ACT_SHOT; } ifehdr_end = tlv_data + metalen; for (; tlv_data < ifehdr_end; tlv_data = ife_tlv_meta_next(tlv_data)) { u8 *curr_data; u16 mtype; u16 dlen; curr_data = ife_tlv_meta_decode(tlv_data, ifehdr_end, &mtype, &dlen, NULL); if (!curr_data) { qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats)); return TC_ACT_SHOT; } if (find_decode_metaid(skb, ife, mtype, dlen, curr_data)) { /* abuse overlimits to count when we receive metadata * but dont have an ops for it */ pr_info_ratelimited("Unknown metaid %d dlen %d\n", mtype, dlen); qstats_overlimit_inc(this_cpu_ptr(ife->common.cpu_qstats)); } } if (WARN_ON(tlv_data != ifehdr_end)) { qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats)); return TC_ACT_SHOT; } skb->protocol = eth_type_trans(skb, skb->dev); skb_reset_network_header(skb); return action; } /*XXX: check if we can do this at install time instead of current * send data path **/ static int ife_get_sz(struct sk_buff *skb, struct tcf_ife_info *ife) { struct tcf_meta_info *e, *n; int tot_run_sz = 0, run_sz = 0; list_for_each_entry_safe(e, n, &ife->metalist, metalist) { if (e->ops->check_presence) { run_sz = e->ops->check_presence(skb, e); tot_run_sz += run_sz; } } return tot_run_sz; } static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res, struct tcf_ife_params *p) { struct tcf_ife_info *ife = to_ife(a); int action = ife->tcf_action; struct ethhdr *oethh; /* outer ether header */ struct tcf_meta_info *e; /* OUTERHDR:TOTMETALEN:{TLVHDR:Metadatum:TLVHDR..}:ORIGDATA where ORIGDATA = original ethernet header ... */ u16 metalen = ife_get_sz(skb, ife); int hdrm = metalen + skb->dev->hard_header_len + IFE_METAHDRLEN; unsigned int skboff = 0; int new_len = skb->len + hdrm; bool exceed_mtu = false; void *ife_meta; int err = 0; if (!skb_at_tc_ingress(skb)) { if (new_len > skb->dev->mtu) exceed_mtu = true; } bstats_update(this_cpu_ptr(ife->common.cpu_bstats), skb); tcf_lastuse_update(&ife->tcf_tm); if (!metalen) { /* no metadata to send */ /* abuse overlimits to count when we allow packet * with no metadata */ qstats_overlimit_inc(this_cpu_ptr(ife->common.cpu_qstats)); return action; } /* could be stupid policy setup or mtu config * so lets be conservative.. */ if ((action == TC_ACT_SHOT) || exceed_mtu) { qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats)); return TC_ACT_SHOT; } if (skb_at_tc_ingress(skb)) skb_push(skb, skb->dev->hard_header_len); ife_meta = ife_encode(skb, metalen); spin_lock(&ife->tcf_lock); /* XXX: we dont have a clever way of telling encode to * not repeat some of the computations that are done by * ops->presence_check... */ list_for_each_entry(e, &ife->metalist, metalist) { if (e->ops->encode) { err = e->ops->encode(skb, (void *)(ife_meta + skboff), e); } if (err < 0) { /* too corrupt to keep around if overwritten */ spin_unlock(&ife->tcf_lock); qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats)); return TC_ACT_SHOT; } skboff += err; } spin_unlock(&ife->tcf_lock); oethh = (struct ethhdr *)skb->data; if (!is_zero_ether_addr(p->eth_src)) ether_addr_copy(oethh->h_source, p->eth_src); if (!is_zero_ether_addr(p->eth_dst)) ether_addr_copy(oethh->h_dest, p->eth_dst); oethh->h_proto = htons(p->eth_type); if (skb_at_tc_ingress(skb)) skb_pull(skb, skb->dev->hard_header_len); return action; } TC_INDIRECT_SCOPE int tcf_ife_act(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res) { struct tcf_ife_info *ife = to_ife(a); struct tcf_ife_params *p; int ret; p = rcu_dereference_bh(ife->params); if (p->flags & IFE_ENCODE) { ret = tcf_ife_encode(skb, a, res, p); return ret; } return tcf_ife_decode(skb, a, res); } static struct tc_action_ops act_ife_ops = { .kind = "ife", .id = TCA_ID_IFE, .owner = THIS_MODULE, .act = tcf_ife_act, .dump = tcf_ife_dump, .cleanup = tcf_ife_cleanup, .init = tcf_ife_init, .size = sizeof(struct tcf_ife_info), }; MODULE_ALIAS_NET_ACT("ife"); static __net_init int ife_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, act_ife_ops.net_id); return tc_action_net_init(net, tn, &act_ife_ops); } static void __net_exit ife_exit_net(struct list_head *net_list) { tc_action_net_exit(net_list, act_ife_ops.net_id); } static struct pernet_operations ife_net_ops = { .init = ife_init_net, .exit_batch = ife_exit_net, .id = &act_ife_ops.net_id, .size = sizeof(struct tc_action_net), }; static int __init ife_init_module(void) { return tcf_register_action(&act_ife_ops, &ife_net_ops); } static void __exit ife_cleanup_module(void) { tcf_unregister_action(&act_ife_ops, &ife_net_ops); } module_init(ife_init_module); module_exit(ife_cleanup_module); MODULE_AUTHOR("Jamal Hadi Salim(2015)"); MODULE_DESCRIPTION("Inter-FE LFB action"); MODULE_LICENSE("GPL");
// SPDX-License-Identifier: GPL-2.0-or-later /* * Zynq pin controller * * Copyright (C) 2014 Xilinx * * Sören Brinkmann <[email protected]> */ #include <linux/io.h> #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/init.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pinctrl/pinctrl.h> #include <linux/pinctrl/pinmux.h> #include <linux/pinctrl/pinconf.h> #include <linux/pinctrl/pinconf-generic.h> #include <linux/regmap.h> #include "pinctrl-utils.h" #include "core.h" #define ZYNQ_NUM_MIOS 54 #define ZYNQ_PCTRL_MIO_MST_TRI0 0x10c #define ZYNQ_PCTRL_MIO_MST_TRI1 0x110 #define ZYNQ_PINMUX_MUX_SHIFT 1 #define ZYNQ_PINMUX_MUX_MASK (0x7f << ZYNQ_PINMUX_MUX_SHIFT) /** * struct zynq_pinctrl - driver data * @pctrl: Pinctrl device * @syscon: Syscon regmap * @pctrl_offset: Offset for pinctrl into the @syscon space * @groups: Pingroups * @ngroups: Number of @groups * @funcs: Pinmux functions * @nfuncs: Number of @funcs */ struct zynq_pinctrl { struct pinctrl_dev *pctrl; struct regmap *syscon; u32 pctrl_offset; const struct zynq_pctrl_group *groups; unsigned int ngroups; const struct zynq_pinmux_function *funcs; unsigned int nfuncs; }; struct zynq_pctrl_group { const char *name; const unsigned int *pins; const unsigned int npins; }; /** * struct zynq_pinmux_function - a pinmux function * @name: Name of the pinmux function. * @groups: List of pingroups for this function. * @ngroups: Number of entries in @groups. * @mux_val: Selector for this function * @mux: Offset of function specific mux * @mux_mask: Mask for function specific selector * @mux_shift: Shift for function specific selector */ struct zynq_pinmux_function { const char *name; const char * const *groups; unsigned int ngroups; unsigned int mux_val; u32 mux; u32 mux_mask; u8 mux_shift; }; enum zynq_pinmux_functions { ZYNQ_PMUX_can0, ZYNQ_PMUX_can1, ZYNQ_PMUX_ethernet0, ZYNQ_PMUX_ethernet1, ZYNQ_PMUX_gpio0, ZYNQ_PMUX_i2c0, ZYNQ_PMUX_i2c1, ZYNQ_PMUX_mdio0, ZYNQ_PMUX_mdio1, ZYNQ_PMUX_qspi0, ZYNQ_PMUX_qspi1, ZYNQ_PMUX_qspi_fbclk, ZYNQ_PMUX_qspi_cs1, ZYNQ_PMUX_spi0, ZYNQ_PMUX_spi1, ZYNQ_PMUX_spi0_ss, ZYNQ_PMUX_spi1_ss, ZYNQ_PMUX_sdio0, ZYNQ_PMUX_sdio0_pc, ZYNQ_PMUX_sdio0_cd, ZYNQ_PMUX_sdio0_wp, ZYNQ_PMUX_sdio1, ZYNQ_PMUX_sdio1_pc, ZYNQ_PMUX_sdio1_cd, ZYNQ_PMUX_sdio1_wp, ZYNQ_PMUX_smc0_nor, ZYNQ_PMUX_smc0_nor_cs1, ZYNQ_PMUX_smc0_nor_addr25, ZYNQ_PMUX_smc0_nand, ZYNQ_PMUX_ttc0, ZYNQ_PMUX_ttc1, ZYNQ_PMUX_uart0, ZYNQ_PMUX_uart1, ZYNQ_PMUX_usb0, ZYNQ_PMUX_usb1, ZYNQ_PMUX_swdt0, ZYNQ_PMUX_MAX_FUNC }; static const struct pinctrl_pin_desc zynq_pins[] = { PINCTRL_PIN(0, "MIO0"), PINCTRL_PIN(1, "MIO1"), PINCTRL_PIN(2, "MIO2"), PINCTRL_PIN(3, "MIO3"), PINCTRL_PIN(4, "MIO4"), PINCTRL_PIN(5, "MIO5"), PINCTRL_PIN(6, "MIO6"), PINCTRL_PIN(7, "MIO7"), PINCTRL_PIN(8, "MIO8"), PINCTRL_PIN(9, "MIO9"), PINCTRL_PIN(10, "MIO10"), PINCTRL_PIN(11, "MIO11"), PINCTRL_PIN(12, "MIO12"), PINCTRL_PIN(13, "MIO13"), PINCTRL_PIN(14, "MIO14"), PINCTRL_PIN(15, "MIO15"), PINCTRL_PIN(16, "MIO16"), PINCTRL_PIN(17, "MIO17"), PINCTRL_PIN(18, "MIO18"), PINCTRL_PIN(19, "MIO19"), PINCTRL_PIN(20, "MIO20"), PINCTRL_PIN(21, "MIO21"), PINCTRL_PIN(22, "MIO22"), PINCTRL_PIN(23, "MIO23"), PINCTRL_PIN(24, "MIO24"), PINCTRL_PIN(25, "MIO25"), PINCTRL_PIN(26, "MIO26"), PINCTRL_PIN(27, "MIO27"), PINCTRL_PIN(28, "MIO28"), PINCTRL_PIN(29, "MIO29"), PINCTRL_PIN(30, "MIO30"), PINCTRL_PIN(31, "MIO31"), PINCTRL_PIN(32, "MIO32"), PINCTRL_PIN(33, "MIO33"), PINCTRL_PIN(34, "MIO34"), PINCTRL_PIN(35, "MIO35"), PINCTRL_PIN(36, "MIO36"), PINCTRL_PIN(37, "MIO37"), PINCTRL_PIN(38, "MIO38"), PINCTRL_PIN(39, "MIO39"), PINCTRL_PIN(40, "MIO40"), PINCTRL_PIN(41, "MIO41"), PINCTRL_PIN(42, "MIO42"), PINCTRL_PIN(43, "MIO43"), PINCTRL_PIN(44, "MIO44"), PINCTRL_PIN(45, "MIO45"), PINCTRL_PIN(46, "MIO46"), PINCTRL_PIN(47, "MIO47"), PINCTRL_PIN(48, "MIO48"), PINCTRL_PIN(49, "MIO49"), PINCTRL_PIN(50, "MIO50"), PINCTRL_PIN(51, "MIO51"), PINCTRL_PIN(52, "MIO52"), PINCTRL_PIN(53, "MIO53"), PINCTRL_PIN(54, "EMIO_SD0_WP"), PINCTRL_PIN(55, "EMIO_SD0_CD"), PINCTRL_PIN(56, "EMIO_SD1_WP"), PINCTRL_PIN(57, "EMIO_SD1_CD"), }; /* pin groups */ static const unsigned int ethernet0_0_pins[] = {16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}; static const unsigned int ethernet1_0_pins[] = {28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39}; static const unsigned int mdio0_0_pins[] = {52, 53}; static const unsigned int mdio1_0_pins[] = {52, 53}; static const unsigned int qspi0_0_pins[] = {1, 2, 3, 4, 5, 6}; static const unsigned int qspi1_0_pins[] = {9, 10, 11, 12, 13}; static const unsigned int qspi_cs1_pins[] = {0}; static const unsigned int qspi_fbclk_pins[] = {8}; static const unsigned int spi0_0_pins[] = {16, 17, 21}; static const unsigned int spi0_0_ss0_pins[] = {18}; static const unsigned int spi0_0_ss1_pins[] = {19}; static const unsigned int spi0_0_ss2_pins[] = {20,}; static const unsigned int spi0_1_pins[] = {28, 29, 33}; static const unsigned int spi0_1_ss0_pins[] = {30}; static const unsigned int spi0_1_ss1_pins[] = {31}; static const unsigned int spi0_1_ss2_pins[] = {32}; static const unsigned int spi0_2_pins[] = {40, 41, 45}; static const unsigned int spi0_2_ss0_pins[] = {42}; static const unsigned int spi0_2_ss1_pins[] = {43}; static const unsigned int spi0_2_ss2_pins[] = {44}; static const unsigned int spi1_0_pins[] = {10, 11, 12}; static const unsigned int spi1_0_ss0_pins[] = {13}; static const unsigned int spi1_0_ss1_pins[] = {14}; static const unsigned int spi1_0_ss2_pins[] = {15}; static const unsigned int spi1_1_pins[] = {22, 23, 24}; static const unsigned int spi1_1_ss0_pins[] = {25}; static const unsigned int spi1_1_ss1_pins[] = {26}; static const unsigned int spi1_1_ss2_pins[] = {27}; static const unsigned int spi1_2_pins[] = {34, 35, 36}; static const unsigned int spi1_2_ss0_pins[] = {37}; static const unsigned int spi1_2_ss1_pins[] = {38}; static const unsigned int spi1_2_ss2_pins[] = {39}; static const unsigned int spi1_3_pins[] = {46, 47, 48, 49}; static const unsigned int spi1_3_ss0_pins[] = {49}; static const unsigned int spi1_3_ss1_pins[] = {50}; static const unsigned int spi1_3_ss2_pins[] = {51}; static const unsigned int sdio0_0_pins[] = {16, 17, 18, 19, 20, 21}; static const unsigned int sdio0_1_pins[] = {28, 29, 30, 31, 32, 33}; static const unsigned int sdio0_2_pins[] = {40, 41, 42, 43, 44, 45}; static const unsigned int sdio1_0_pins[] = {10, 11, 12, 13, 14, 15}; static const unsigned int sdio1_1_pins[] = {22, 23, 24, 25, 26, 27}; static const unsigned int sdio1_2_pins[] = {34, 35, 36, 37, 38, 39}; static const unsigned int sdio1_3_pins[] = {46, 47, 48, 49, 50, 51}; static const unsigned int sdio0_emio_wp_pins[] = {54}; static const unsigned int sdio0_emio_cd_pins[] = {55}; static const unsigned int sdio1_emio_wp_pins[] = {56}; static const unsigned int sdio1_emio_cd_pins[] = {57}; static const unsigned int smc0_nor_pins[] = {0, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39}; static const unsigned int smc0_nor_cs1_pins[] = {1}; static const unsigned int smc0_nor_addr25_pins[] = {1}; static const unsigned int smc0_nand_pins[] = {0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16, 17, 18, 19, 20, 21, 22, 23}; static const unsigned int smc0_nand8_pins[] = {0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}; /* Note: CAN MIO clock inputs are modeled in the clock framework */ static const unsigned int can0_0_pins[] = {10, 11}; static const unsigned int can0_1_pins[] = {14, 15}; static const unsigned int can0_2_pins[] = {18, 19}; static const unsigned int can0_3_pins[] = {22, 23}; static const unsigned int can0_4_pins[] = {26, 27}; static const unsigned int can0_5_pins[] = {30, 31}; static const unsigned int can0_6_pins[] = {34, 35}; static const unsigned int can0_7_pins[] = {38, 39}; static const unsigned int can0_8_pins[] = {42, 43}; static const unsigned int can0_9_pins[] = {46, 47}; static const unsigned int can0_10_pins[] = {50, 51}; static const unsigned int can1_0_pins[] = {8, 9}; static const unsigned int can1_1_pins[] = {12, 13}; static const unsigned int can1_2_pins[] = {16, 17}; static const unsigned int can1_3_pins[] = {20, 21}; static const unsigned int can1_4_pins[] = {24, 25}; static const unsigned int can1_5_pins[] = {28, 29}; static const unsigned int can1_6_pins[] = {32, 33}; static const unsigned int can1_7_pins[] = {36, 37}; static const unsigned int can1_8_pins[] = {40, 41}; static const unsigned int can1_9_pins[] = {44, 45}; static const unsigned int can1_10_pins[] = {48, 49}; static const unsigned int can1_11_pins[] = {52, 53}; static const unsigned int uart0_0_pins[] = {10, 11}; static const unsigned int uart0_1_pins[] = {14, 15}; static const unsigned int uart0_2_pins[] = {18, 19}; static const unsigned int uart0_3_pins[] = {22, 23}; static const unsigned int uart0_4_pins[] = {26, 27}; static const unsigned int uart0_5_pins[] = {30, 31}; static const unsigned int uart0_6_pins[] = {34, 35}; static const unsigned int uart0_7_pins[] = {38, 39}; static const unsigned int uart0_8_pins[] = {42, 43}; static const unsigned int uart0_9_pins[] = {46, 47}; static const unsigned int uart0_10_pins[] = {50, 51}; static const unsigned int uart1_0_pins[] = {8, 9}; static const unsigned int uart1_1_pins[] = {12, 13}; static const unsigned int uart1_2_pins[] = {16, 17}; static const unsigned int uart1_3_pins[] = {20, 21}; static const unsigned int uart1_4_pins[] = {24, 25}; static const unsigned int uart1_5_pins[] = {28, 29}; static const unsigned int uart1_6_pins[] = {32, 33}; static const unsigned int uart1_7_pins[] = {36, 37}; static const unsigned int uart1_8_pins[] = {40, 41}; static const unsigned int uart1_9_pins[] = {44, 45}; static const unsigned int uart1_10_pins[] = {48, 49}; static const unsigned int uart1_11_pins[] = {52, 53}; static const unsigned int i2c0_0_pins[] = {10, 11}; static const unsigned int i2c0_1_pins[] = {14, 15}; static const unsigned int i2c0_2_pins[] = {18, 19}; static const unsigned int i2c0_3_pins[] = {22, 23}; static const unsigned int i2c0_4_pins[] = {26, 27}; static const unsigned int i2c0_5_pins[] = {30, 31}; static const unsigned int i2c0_6_pins[] = {34, 35}; static const unsigned int i2c0_7_pins[] = {38, 39}; static const unsigned int i2c0_8_pins[] = {42, 43}; static const unsigned int i2c0_9_pins[] = {46, 47}; static const unsigned int i2c0_10_pins[] = {50, 51}; static const unsigned int i2c1_0_pins[] = {12, 13}; static const unsigned int i2c1_1_pins[] = {16, 17}; static const unsigned int i2c1_2_pins[] = {20, 21}; static const unsigned int i2c1_3_pins[] = {24, 25}; static const unsigned int i2c1_4_pins[] = {28, 29}; static const unsigned int i2c1_5_pins[] = {32, 33}; static const unsigned int i2c1_6_pins[] = {36, 37}; static const unsigned int i2c1_7_pins[] = {40, 41}; static const unsigned int i2c1_8_pins[] = {44, 45}; static const unsigned int i2c1_9_pins[] = {48, 49}; static const unsigned int i2c1_10_pins[] = {52, 53}; static const unsigned int ttc0_0_pins[] = {18, 19}; static const unsigned int ttc0_1_pins[] = {30, 31}; static const unsigned int ttc0_2_pins[] = {42, 43}; static const unsigned int ttc1_0_pins[] = {16, 17}; static const unsigned int ttc1_1_pins[] = {28, 29}; static const unsigned int ttc1_2_pins[] = {40, 41}; static const unsigned int swdt0_0_pins[] = {14, 15}; static const unsigned int swdt0_1_pins[] = {26, 27}; static const unsigned int swdt0_2_pins[] = {38, 39}; static const unsigned int swdt0_3_pins[] = {50, 51}; static const unsigned int swdt0_4_pins[] = {52, 53}; static const unsigned int gpio0_0_pins[] = {0}; static const unsigned int gpio0_1_pins[] = {1}; static const unsigned int gpio0_2_pins[] = {2}; static const unsigned int gpio0_3_pins[] = {3}; static const unsigned int gpio0_4_pins[] = {4}; static const unsigned int gpio0_5_pins[] = {5}; static const unsigned int gpio0_6_pins[] = {6}; static const unsigned int gpio0_7_pins[] = {7}; static const unsigned int gpio0_8_pins[] = {8}; static const unsigned int gpio0_9_pins[] = {9}; static const unsigned int gpio0_10_pins[] = {10}; static const unsigned int gpio0_11_pins[] = {11}; static const unsigned int gpio0_12_pins[] = {12}; static const unsigned int gpio0_13_pins[] = {13}; static const unsigned int gpio0_14_pins[] = {14}; static const unsigned int gpio0_15_pins[] = {15}; static const unsigned int gpio0_16_pins[] = {16}; static const unsigned int gpio0_17_pins[] = {17}; static const unsigned int gpio0_18_pins[] = {18}; static const unsigned int gpio0_19_pins[] = {19}; static const unsigned int gpio0_20_pins[] = {20}; static const unsigned int gpio0_21_pins[] = {21}; static const unsigned int gpio0_22_pins[] = {22}; static const unsigned int gpio0_23_pins[] = {23}; static const unsigned int gpio0_24_pins[] = {24}; static const unsigned int gpio0_25_pins[] = {25}; static const unsigned int gpio0_26_pins[] = {26}; static const unsigned int gpio0_27_pins[] = {27}; static const unsigned int gpio0_28_pins[] = {28}; static const unsigned int gpio0_29_pins[] = {29}; static const unsigned int gpio0_30_pins[] = {30}; static const unsigned int gpio0_31_pins[] = {31}; static const unsigned int gpio0_32_pins[] = {32}; static const unsigned int gpio0_33_pins[] = {33}; static const unsigned int gpio0_34_pins[] = {34}; static const unsigned int gpio0_35_pins[] = {35}; static const unsigned int gpio0_36_pins[] = {36}; static const unsigned int gpio0_37_pins[] = {37}; static const unsigned int gpio0_38_pins[] = {38}; static const unsigned int gpio0_39_pins[] = {39}; static const unsigned int gpio0_40_pins[] = {40}; static const unsigned int gpio0_41_pins[] = {41}; static const unsigned int gpio0_42_pins[] = {42}; static const unsigned int gpio0_43_pins[] = {43}; static const unsigned int gpio0_44_pins[] = {44}; static const unsigned int gpio0_45_pins[] = {45}; static const unsigned int gpio0_46_pins[] = {46}; static const unsigned int gpio0_47_pins[] = {47}; static const unsigned int gpio0_48_pins[] = {48}; static const unsigned int gpio0_49_pins[] = {49}; static const unsigned int gpio0_50_pins[] = {50}; static const unsigned int gpio0_51_pins[] = {51}; static const unsigned int gpio0_52_pins[] = {52}; static const unsigned int gpio0_53_pins[] = {53}; static const unsigned int usb0_0_pins[] = {28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39}; static const unsigned int usb1_0_pins[] = {40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51}; #define DEFINE_ZYNQ_PINCTRL_GRP(nm) \ { \ .name = #nm "_grp", \ .pins = nm ## _pins, \ .npins = ARRAY_SIZE(nm ## _pins), \ } static const struct zynq_pctrl_group zynq_pctrl_groups[] = { DEFINE_ZYNQ_PINCTRL_GRP(ethernet0_0), DEFINE_ZYNQ_PINCTRL_GRP(ethernet1_0), DEFINE_ZYNQ_PINCTRL_GRP(mdio0_0), DEFINE_ZYNQ_PINCTRL_GRP(mdio1_0), DEFINE_ZYNQ_PINCTRL_GRP(qspi0_0), DEFINE_ZYNQ_PINCTRL_GRP(qspi1_0), DEFINE_ZYNQ_PINCTRL_GRP(qspi_fbclk), DEFINE_ZYNQ_PINCTRL_GRP(qspi_cs1), DEFINE_ZYNQ_PINCTRL_GRP(spi0_0), DEFINE_ZYNQ_PINCTRL_GRP(spi0_0_ss0), DEFINE_ZYNQ_PINCTRL_GRP(spi0_0_ss1), DEFINE_ZYNQ_PINCTRL_GRP(spi0_0_ss2), DEFINE_ZYNQ_PINCTRL_GRP(spi0_1), DEFINE_ZYNQ_PINCTRL_GRP(spi0_1_ss0), DEFINE_ZYNQ_PINCTRL_GRP(spi0_1_ss1), DEFINE_ZYNQ_PINCTRL_GRP(spi0_1_ss2), DEFINE_ZYNQ_PINCTRL_GRP(spi0_2), DEFINE_ZYNQ_PINCTRL_GRP(spi0_2_ss0), DEFINE_ZYNQ_PINCTRL_GRP(spi0_2_ss1), DEFINE_ZYNQ_PINCTRL_GRP(spi0_2_ss2), DEFINE_ZYNQ_PINCTRL_GRP(spi1_0), DEFINE_ZYNQ_PINCTRL_GRP(spi1_0_ss0), DEFINE_ZYNQ_PINCTRL_GRP(spi1_0_ss1), DEFINE_ZYNQ_PINCTRL_GRP(spi1_0_ss2), DEFINE_ZYNQ_PINCTRL_GRP(spi1_1), DEFINE_ZYNQ_PINCTRL_GRP(spi1_1_ss0), DEFINE_ZYNQ_PINCTRL_GRP(spi1_1_ss1), DEFINE_ZYNQ_PINCTRL_GRP(spi1_1_ss2), DEFINE_ZYNQ_PINCTRL_GRP(spi1_2), DEFINE_ZYNQ_PINCTRL_GRP(spi1_2_ss0), DEFINE_ZYNQ_PINCTRL_GRP(spi1_2_ss1), DEFINE_ZYNQ_PINCTRL_GRP(spi1_2_ss2), DEFINE_ZYNQ_PINCTRL_GRP(spi1_3), DEFINE_ZYNQ_PINCTRL_GRP(spi1_3_ss0), DEFINE_ZYNQ_PINCTRL_GRP(spi1_3_ss1), DEFINE_ZYNQ_PINCTRL_GRP(spi1_3_ss2), DEFINE_ZYNQ_PINCTRL_GRP(sdio0_0), DEFINE_ZYNQ_PINCTRL_GRP(sdio0_1), DEFINE_ZYNQ_PINCTRL_GRP(sdio0_2), DEFINE_ZYNQ_PINCTRL_GRP(sdio1_0), DEFINE_ZYNQ_PINCTRL_GRP(sdio1_1), DEFINE_ZYNQ_PINCTRL_GRP(sdio1_2), DEFINE_ZYNQ_PINCTRL_GRP(sdio1_3), DEFINE_ZYNQ_PINCTRL_GRP(sdio0_emio_wp), DEFINE_ZYNQ_PINCTRL_GRP(sdio0_emio_cd), DEFINE_ZYNQ_PINCTRL_GRP(sdio1_emio_wp), DEFINE_ZYNQ_PINCTRL_GRP(sdio1_emio_cd), DEFINE_ZYNQ_PINCTRL_GRP(smc0_nor), DEFINE_ZYNQ_PINCTRL_GRP(smc0_nor_cs1), DEFINE_ZYNQ_PINCTRL_GRP(smc0_nor_addr25), DEFINE_ZYNQ_PINCTRL_GRP(smc0_nand), DEFINE_ZYNQ_PINCTRL_GRP(smc0_nand8), DEFINE_ZYNQ_PINCTRL_GRP(can0_0), DEFINE_ZYNQ_PINCTRL_GRP(can0_1), DEFINE_ZYNQ_PINCTRL_GRP(can0_2), DEFINE_ZYNQ_PINCTRL_GRP(can0_3), DEFINE_ZYNQ_PINCTRL_GRP(can0_4), DEFINE_ZYNQ_PINCTRL_GRP(can0_5), DEFINE_ZYNQ_PINCTRL_GRP(can0_6), DEFINE_ZYNQ_PINCTRL_GRP(can0_7), DEFINE_ZYNQ_PINCTRL_GRP(can0_8), DEFINE_ZYNQ_PINCTRL_GRP(can0_9), DEFINE_ZYNQ_PINCTRL_GRP(can0_10), DEFINE_ZYNQ_PINCTRL_GRP(can1_0), DEFINE_ZYNQ_PINCTRL_GRP(can1_1), DEFINE_ZYNQ_PINCTRL_GRP(can1_2), DEFINE_ZYNQ_PINCTRL_GRP(can1_3), DEFINE_ZYNQ_PINCTRL_GRP(can1_4), DEFINE_ZYNQ_PINCTRL_GRP(can1_5), DEFINE_ZYNQ_PINCTRL_GRP(can1_6), DEFINE_ZYNQ_PINCTRL_GRP(can1_7), DEFINE_ZYNQ_PINCTRL_GRP(can1_8), DEFINE_ZYNQ_PINCTRL_GRP(can1_9), DEFINE_ZYNQ_PINCTRL_GRP(can1_10), DEFINE_ZYNQ_PINCTRL_GRP(can1_11), DEFINE_ZYNQ_PINCTRL_GRP(uart0_0), DEFINE_ZYNQ_PINCTRL_GRP(uart0_1), DEFINE_ZYNQ_PINCTRL_GRP(uart0_2), DEFINE_ZYNQ_PINCTRL_GRP(uart0_3), DEFINE_ZYNQ_PINCTRL_GRP(uart0_4), DEFINE_ZYNQ_PINCTRL_GRP(uart0_5), DEFINE_ZYNQ_PINCTRL_GRP(uart0_6), DEFINE_ZYNQ_PINCTRL_GRP(uart0_7), DEFINE_ZYNQ_PINCTRL_GRP(uart0_8), DEFINE_ZYNQ_PINCTRL_GRP(uart0_9), DEFINE_ZYNQ_PINCTRL_GRP(uart0_10), DEFINE_ZYNQ_PINCTRL_GRP(uart1_0), DEFINE_ZYNQ_PINCTRL_GRP(uart1_1), DEFINE_ZYNQ_PINCTRL_GRP(uart1_2), DEFINE_ZYNQ_PINCTRL_GRP(uart1_3), DEFINE_ZYNQ_PINCTRL_GRP(uart1_4), DEFINE_ZYNQ_PINCTRL_GRP(uart1_5), DEFINE_ZYNQ_PINCTRL_GRP(uart1_6), DEFINE_ZYNQ_PINCTRL_GRP(uart1_7), DEFINE_ZYNQ_PINCTRL_GRP(uart1_8), DEFINE_ZYNQ_PINCTRL_GRP(uart1_9), DEFINE_ZYNQ_PINCTRL_GRP(uart1_10), DEFINE_ZYNQ_PINCTRL_GRP(uart1_11), DEFINE_ZYNQ_PINCTRL_GRP(i2c0_0), DEFINE_ZYNQ_PINCTRL_GRP(i2c0_1), DEFINE_ZYNQ_PINCTRL_GRP(i2c0_2), DEFINE_ZYNQ_PINCTRL_GRP(i2c0_3), DEFINE_ZYNQ_PINCTRL_GRP(i2c0_4), DEFINE_ZYNQ_PINCTRL_GRP(i2c0_5), DEFINE_ZYNQ_PINCTRL_GRP(i2c0_6), DEFINE_ZYNQ_PINCTRL_GRP(i2c0_7), DEFINE_ZYNQ_PINCTRL_GRP(i2c0_8), DEFINE_ZYNQ_PINCTRL_GRP(i2c0_9), DEFINE_ZYNQ_PINCTRL_GRP(i2c0_10), DEFINE_ZYNQ_PINCTRL_GRP(i2c1_0), DEFINE_ZYNQ_PINCTRL_GRP(i2c1_1), DEFINE_ZYNQ_PINCTRL_GRP(i2c1_2), DEFINE_ZYNQ_PINCTRL_GRP(i2c1_3), DEFINE_ZYNQ_PINCTRL_GRP(i2c1_4), DEFINE_ZYNQ_PINCTRL_GRP(i2c1_5), DEFINE_ZYNQ_PINCTRL_GRP(i2c1_6), DEFINE_ZYNQ_PINCTRL_GRP(i2c1_7), DEFINE_ZYNQ_PINCTRL_GRP(i2c1_8), DEFINE_ZYNQ_PINCTRL_GRP(i2c1_9), DEFINE_ZYNQ_PINCTRL_GRP(i2c1_10), DEFINE_ZYNQ_PINCTRL_GRP(ttc0_0), DEFINE_ZYNQ_PINCTRL_GRP(ttc0_1), DEFINE_ZYNQ_PINCTRL_GRP(ttc0_2), DEFINE_ZYNQ_PINCTRL_GRP(ttc1_0), DEFINE_ZYNQ_PINCTRL_GRP(ttc1_1), DEFINE_ZYNQ_PINCTRL_GRP(ttc1_2), DEFINE_ZYNQ_PINCTRL_GRP(swdt0_0), DEFINE_ZYNQ_PINCTRL_GRP(swdt0_1), DEFINE_ZYNQ_PINCTRL_GRP(swdt0_2), DEFINE_ZYNQ_PINCTRL_GRP(swdt0_3), DEFINE_ZYNQ_PINCTRL_GRP(swdt0_4), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_0), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_1), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_2), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_3), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_4), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_5), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_6), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_7), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_8), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_9), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_10), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_11), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_12), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_13), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_14), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_15), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_16), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_17), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_18), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_19), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_20), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_21), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_22), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_23), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_24), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_25), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_26), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_27), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_28), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_29), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_30), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_31), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_32), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_33), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_34), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_35), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_36), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_37), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_38), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_39), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_40), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_41), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_42), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_43), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_44), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_45), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_46), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_47), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_48), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_49), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_50), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_51), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_52), DEFINE_ZYNQ_PINCTRL_GRP(gpio0_53), DEFINE_ZYNQ_PINCTRL_GRP(usb0_0), DEFINE_ZYNQ_PINCTRL_GRP(usb1_0), }; /* function groups */ static const char * const ethernet0_groups[] = {"ethernet0_0_grp"}; static const char * const ethernet1_groups[] = {"ethernet1_0_grp"}; static const char * const usb0_groups[] = {"usb0_0_grp"}; static const char * const usb1_groups[] = {"usb1_0_grp"}; static const char * const mdio0_groups[] = {"mdio0_0_grp"}; static const char * const mdio1_groups[] = {"mdio1_0_grp"}; static const char * const qspi0_groups[] = {"qspi0_0_grp"}; static const char * const qspi1_groups[] = {"qspi1_0_grp"}; static const char * const qspi_fbclk_groups[] = {"qspi_fbclk_grp"}; static const char * const qspi_cs1_groups[] = {"qspi_cs1_grp"}; static const char * const spi0_groups[] = {"spi0_0_grp", "spi0_1_grp", "spi0_2_grp"}; static const char * const spi1_groups[] = {"spi1_0_grp", "spi1_1_grp", "spi1_2_grp", "spi1_3_grp"}; static const char * const spi0_ss_groups[] = {"spi0_0_ss0_grp", "spi0_0_ss1_grp", "spi0_0_ss2_grp", "spi0_1_ss0_grp", "spi0_1_ss1_grp", "spi0_1_ss2_grp", "spi0_2_ss0_grp", "spi0_2_ss1_grp", "spi0_2_ss2_grp"}; static const char * const spi1_ss_groups[] = {"spi1_0_ss0_grp", "spi1_0_ss1_grp", "spi1_0_ss2_grp", "spi1_1_ss0_grp", "spi1_1_ss1_grp", "spi1_1_ss2_grp", "spi1_2_ss0_grp", "spi1_2_ss1_grp", "spi1_2_ss2_grp", "spi1_3_ss0_grp", "spi1_3_ss1_grp", "spi1_3_ss2_grp"}; static const char * const sdio0_groups[] = {"sdio0_0_grp", "sdio0_1_grp", "sdio0_2_grp"}; static const char * const sdio1_groups[] = {"sdio1_0_grp", "sdio1_1_grp", "sdio1_2_grp", "sdio1_3_grp"}; static const char * const sdio0_pc_groups[] = {"gpio0_0_grp", "gpio0_2_grp", "gpio0_4_grp", "gpio0_6_grp", "gpio0_8_grp", "gpio0_10_grp", "gpio0_12_grp", "gpio0_14_grp", "gpio0_16_grp", "gpio0_18_grp", "gpio0_20_grp", "gpio0_22_grp", "gpio0_24_grp", "gpio0_26_grp", "gpio0_28_grp", "gpio0_30_grp", "gpio0_32_grp", "gpio0_34_grp", "gpio0_36_grp", "gpio0_38_grp", "gpio0_40_grp", "gpio0_42_grp", "gpio0_44_grp", "gpio0_46_grp", "gpio0_48_grp", "gpio0_50_grp", "gpio0_52_grp"}; static const char * const sdio1_pc_groups[] = {"gpio0_1_grp", "gpio0_3_grp", "gpio0_5_grp", "gpio0_7_grp", "gpio0_9_grp", "gpio0_11_grp", "gpio0_13_grp", "gpio0_15_grp", "gpio0_17_grp", "gpio0_19_grp", "gpio0_21_grp", "gpio0_23_grp", "gpio0_25_grp", "gpio0_27_grp", "gpio0_29_grp", "gpio0_31_grp", "gpio0_33_grp", "gpio0_35_grp", "gpio0_37_grp", "gpio0_39_grp", "gpio0_41_grp", "gpio0_43_grp", "gpio0_45_grp", "gpio0_47_grp", "gpio0_49_grp", "gpio0_51_grp", "gpio0_53_grp"}; static const char * const sdio0_cd_groups[] = {"gpio0_0_grp", "gpio0_2_grp", "gpio0_4_grp", "gpio0_6_grp", "gpio0_10_grp", "gpio0_12_grp", "gpio0_14_grp", "gpio0_16_grp", "gpio0_18_grp", "gpio0_20_grp", "gpio0_22_grp", "gpio0_24_grp", "gpio0_26_grp", "gpio0_28_grp", "gpio0_30_grp", "gpio0_32_grp", "gpio0_34_grp", "gpio0_36_grp", "gpio0_38_grp", "gpio0_40_grp", "gpio0_42_grp", "gpio0_44_grp", "gpio0_46_grp", "gpio0_48_grp", "gpio0_50_grp", "gpio0_52_grp", "gpio0_1_grp", "gpio0_3_grp", "gpio0_5_grp", "gpio0_9_grp", "gpio0_11_grp", "gpio0_13_grp", "gpio0_15_grp", "gpio0_17_grp", "gpio0_19_grp", "gpio0_21_grp", "gpio0_23_grp", "gpio0_25_grp", "gpio0_27_grp", "gpio0_29_grp", "gpio0_31_grp", "gpio0_33_grp", "gpio0_35_grp", "gpio0_37_grp", "gpio0_39_grp", "gpio0_41_grp", "gpio0_43_grp", "gpio0_45_grp", "gpio0_47_grp", "gpio0_49_grp", "gpio0_51_grp", "gpio0_53_grp", "sdio0_emio_cd_grp"}; static const char * const sdio0_wp_groups[] = {"gpio0_0_grp", "gpio0_2_grp", "gpio0_4_grp", "gpio0_6_grp", "gpio0_10_grp", "gpio0_12_grp", "gpio0_14_grp", "gpio0_16_grp", "gpio0_18_grp", "gpio0_20_grp", "gpio0_22_grp", "gpio0_24_grp", "gpio0_26_grp", "gpio0_28_grp", "gpio0_30_grp", "gpio0_32_grp", "gpio0_34_grp", "gpio0_36_grp", "gpio0_38_grp", "gpio0_40_grp", "gpio0_42_grp", "gpio0_44_grp", "gpio0_46_grp", "gpio0_48_grp", "gpio0_50_grp", "gpio0_52_grp", "gpio0_1_grp", "gpio0_3_grp", "gpio0_5_grp", "gpio0_9_grp", "gpio0_11_grp", "gpio0_13_grp", "gpio0_15_grp", "gpio0_17_grp", "gpio0_19_grp", "gpio0_21_grp", "gpio0_23_grp", "gpio0_25_grp", "gpio0_27_grp", "gpio0_29_grp", "gpio0_31_grp", "gpio0_33_grp", "gpio0_35_grp", "gpio0_37_grp", "gpio0_39_grp", "gpio0_41_grp", "gpio0_43_grp", "gpio0_45_grp", "gpio0_47_grp", "gpio0_49_grp", "gpio0_51_grp", "gpio0_53_grp", "sdio0_emio_wp_grp"}; static const char * const sdio1_cd_groups[] = {"gpio0_0_grp", "gpio0_2_grp", "gpio0_4_grp", "gpio0_6_grp", "gpio0_10_grp", "gpio0_12_grp", "gpio0_14_grp", "gpio0_16_grp", "gpio0_18_grp", "gpio0_20_grp", "gpio0_22_grp", "gpio0_24_grp", "gpio0_26_grp", "gpio0_28_grp", "gpio0_30_grp", "gpio0_32_grp", "gpio0_34_grp", "gpio0_36_grp", "gpio0_38_grp", "gpio0_40_grp", "gpio0_42_grp", "gpio0_44_grp", "gpio0_46_grp", "gpio0_48_grp", "gpio0_50_grp", "gpio0_52_grp", "gpio0_1_grp", "gpio0_3_grp", "gpio0_5_grp", "gpio0_9_grp", "gpio0_11_grp", "gpio0_13_grp", "gpio0_15_grp", "gpio0_17_grp", "gpio0_19_grp", "gpio0_21_grp", "gpio0_23_grp", "gpio0_25_grp", "gpio0_27_grp", "gpio0_29_grp", "gpio0_31_grp", "gpio0_33_grp", "gpio0_35_grp", "gpio0_37_grp", "gpio0_39_grp", "gpio0_41_grp", "gpio0_43_grp", "gpio0_45_grp", "gpio0_47_grp", "gpio0_49_grp", "gpio0_51_grp", "gpio0_53_grp", "sdio1_emio_cd_grp"}; static const char * const sdio1_wp_groups[] = {"gpio0_0_grp", "gpio0_2_grp", "gpio0_4_grp", "gpio0_6_grp", "gpio0_10_grp", "gpio0_12_grp", "gpio0_14_grp", "gpio0_16_grp", "gpio0_18_grp", "gpio0_20_grp", "gpio0_22_grp", "gpio0_24_grp", "gpio0_26_grp", "gpio0_28_grp", "gpio0_30_grp", "gpio0_32_grp", "gpio0_34_grp", "gpio0_36_grp", "gpio0_38_grp", "gpio0_40_grp", "gpio0_42_grp", "gpio0_44_grp", "gpio0_46_grp", "gpio0_48_grp", "gpio0_50_grp", "gpio0_52_grp", "gpio0_1_grp", "gpio0_3_grp", "gpio0_5_grp", "gpio0_9_grp", "gpio0_11_grp", "gpio0_13_grp", "gpio0_15_grp", "gpio0_17_grp", "gpio0_19_grp", "gpio0_21_grp", "gpio0_23_grp", "gpio0_25_grp", "gpio0_27_grp", "gpio0_29_grp", "gpio0_31_grp", "gpio0_33_grp", "gpio0_35_grp", "gpio0_37_grp", "gpio0_39_grp", "gpio0_41_grp", "gpio0_43_grp", "gpio0_45_grp", "gpio0_47_grp", "gpio0_49_grp", "gpio0_51_grp", "gpio0_53_grp", "sdio1_emio_wp_grp"}; static const char * const smc0_nor_groups[] = {"smc0_nor_grp"}; static const char * const smc0_nor_cs1_groups[] = {"smc0_nor_cs1_grp"}; static const char * const smc0_nor_addr25_groups[] = {"smc0_nor_addr25_grp"}; static const char * const smc0_nand_groups[] = {"smc0_nand_grp", "smc0_nand8_grp"}; static const char * const can0_groups[] = {"can0_0_grp", "can0_1_grp", "can0_2_grp", "can0_3_grp", "can0_4_grp", "can0_5_grp", "can0_6_grp", "can0_7_grp", "can0_8_grp", "can0_9_grp", "can0_10_grp"}; static const char * const can1_groups[] = {"can1_0_grp", "can1_1_grp", "can1_2_grp", "can1_3_grp", "can1_4_grp", "can1_5_grp", "can1_6_grp", "can1_7_grp", "can1_8_grp", "can1_9_grp", "can1_10_grp", "can1_11_grp"}; static const char * const uart0_groups[] = {"uart0_0_grp", "uart0_1_grp", "uart0_2_grp", "uart0_3_grp", "uart0_4_grp", "uart0_5_grp", "uart0_6_grp", "uart0_7_grp", "uart0_8_grp", "uart0_9_grp", "uart0_10_grp"}; static const char * const uart1_groups[] = {"uart1_0_grp", "uart1_1_grp", "uart1_2_grp", "uart1_3_grp", "uart1_4_grp", "uart1_5_grp", "uart1_6_grp", "uart1_7_grp", "uart1_8_grp", "uart1_9_grp", "uart1_10_grp", "uart1_11_grp"}; static const char * const i2c0_groups[] = {"i2c0_0_grp", "i2c0_1_grp", "i2c0_2_grp", "i2c0_3_grp", "i2c0_4_grp", "i2c0_5_grp", "i2c0_6_grp", "i2c0_7_grp", "i2c0_8_grp", "i2c0_9_grp", "i2c0_10_grp"}; static const char * const i2c1_groups[] = {"i2c1_0_grp", "i2c1_1_grp", "i2c1_2_grp", "i2c1_3_grp", "i2c1_4_grp", "i2c1_5_grp", "i2c1_6_grp", "i2c1_7_grp", "i2c1_8_grp", "i2c1_9_grp", "i2c1_10_grp"}; static const char * const ttc0_groups[] = {"ttc0_0_grp", "ttc0_1_grp", "ttc0_2_grp"}; static const char * const ttc1_groups[] = {"ttc1_0_grp", "ttc1_1_grp", "ttc1_2_grp"}; static const char * const swdt0_groups[] = {"swdt0_0_grp", "swdt0_1_grp", "swdt0_2_grp", "swdt0_3_grp", "swdt0_4_grp"}; static const char * const gpio0_groups[] = {"gpio0_0_grp", "gpio0_2_grp", "gpio0_4_grp", "gpio0_6_grp", "gpio0_8_grp", "gpio0_10_grp", "gpio0_12_grp", "gpio0_14_grp", "gpio0_16_grp", "gpio0_18_grp", "gpio0_20_grp", "gpio0_22_grp", "gpio0_24_grp", "gpio0_26_grp", "gpio0_28_grp", "gpio0_30_grp", "gpio0_32_grp", "gpio0_34_grp", "gpio0_36_grp", "gpio0_38_grp", "gpio0_40_grp", "gpio0_42_grp", "gpio0_44_grp", "gpio0_46_grp", "gpio0_48_grp", "gpio0_50_grp", "gpio0_52_grp", "gpio0_1_grp", "gpio0_3_grp", "gpio0_5_grp", "gpio0_7_grp", "gpio0_9_grp", "gpio0_11_grp", "gpio0_13_grp", "gpio0_15_grp", "gpio0_17_grp", "gpio0_19_grp", "gpio0_21_grp", "gpio0_23_grp", "gpio0_25_grp", "gpio0_27_grp", "gpio0_29_grp", "gpio0_31_grp", "gpio0_33_grp", "gpio0_35_grp", "gpio0_37_grp", "gpio0_39_grp", "gpio0_41_grp", "gpio0_43_grp", "gpio0_45_grp", "gpio0_47_grp", "gpio0_49_grp", "gpio0_51_grp", "gpio0_53_grp"}; #define DEFINE_ZYNQ_PINMUX_FUNCTION(fname, mval) \ [ZYNQ_PMUX_##fname] = { \ .name = #fname, \ .groups = fname##_groups, \ .ngroups = ARRAY_SIZE(fname##_groups), \ .mux_val = mval, \ } #define DEFINE_ZYNQ_PINMUX_FUNCTION_MUX(fname, mval, offset, mask, shift)\ [ZYNQ_PMUX_##fname] = { \ .name = #fname, \ .groups = fname##_groups, \ .ngroups = ARRAY_SIZE(fname##_groups), \ .mux_val = mval, \ .mux = offset, \ .mux_mask = mask, \ .mux_shift = shift, \ } #define ZYNQ_SDIO_WP_SHIFT 0 #define ZYNQ_SDIO_WP_MASK (0x3f << ZYNQ_SDIO_WP_SHIFT) #define ZYNQ_SDIO_CD_SHIFT 16 #define ZYNQ_SDIO_CD_MASK (0x3f << ZYNQ_SDIO_CD_SHIFT) static const struct zynq_pinmux_function zynq_pmux_functions[] = { DEFINE_ZYNQ_PINMUX_FUNCTION(ethernet0, 1), DEFINE_ZYNQ_PINMUX_FUNCTION(ethernet1, 1), DEFINE_ZYNQ_PINMUX_FUNCTION(usb0, 2), DEFINE_ZYNQ_PINMUX_FUNCTION(usb1, 2), DEFINE_ZYNQ_PINMUX_FUNCTION(mdio0, 0x40), DEFINE_ZYNQ_PINMUX_FUNCTION(mdio1, 0x50), DEFINE_ZYNQ_PINMUX_FUNCTION(qspi0, 1), DEFINE_ZYNQ_PINMUX_FUNCTION(qspi1, 1), DEFINE_ZYNQ_PINMUX_FUNCTION(qspi_fbclk, 1), DEFINE_ZYNQ_PINMUX_FUNCTION(qspi_cs1, 1), DEFINE_ZYNQ_PINMUX_FUNCTION(spi0, 0x50), DEFINE_ZYNQ_PINMUX_FUNCTION(spi1, 0x50), DEFINE_ZYNQ_PINMUX_FUNCTION(spi0_ss, 0x50), DEFINE_ZYNQ_PINMUX_FUNCTION(spi1_ss, 0x50), DEFINE_ZYNQ_PINMUX_FUNCTION(sdio0, 0x40), DEFINE_ZYNQ_PINMUX_FUNCTION(sdio0_pc, 0xc), DEFINE_ZYNQ_PINMUX_FUNCTION_MUX(sdio0_wp, 0, 0x130, ZYNQ_SDIO_WP_MASK, ZYNQ_SDIO_WP_SHIFT), DEFINE_ZYNQ_PINMUX_FUNCTION_MUX(sdio0_cd, 0, 0x130, ZYNQ_SDIO_CD_MASK, ZYNQ_SDIO_CD_SHIFT), DEFINE_ZYNQ_PINMUX_FUNCTION(sdio1, 0x40), DEFINE_ZYNQ_PINMUX_FUNCTION(sdio1_pc, 0xc), DEFINE_ZYNQ_PINMUX_FUNCTION_MUX(sdio1_wp, 0, 0x134, ZYNQ_SDIO_WP_MASK, ZYNQ_SDIO_WP_SHIFT), DEFINE_ZYNQ_PINMUX_FUNCTION_MUX(sdio1_cd, 0, 0x134, ZYNQ_SDIO_CD_MASK, ZYNQ_SDIO_CD_SHIFT), DEFINE_ZYNQ_PINMUX_FUNCTION(smc0_nor, 4), DEFINE_ZYNQ_PINMUX_FUNCTION(smc0_nor_cs1, 8), DEFINE_ZYNQ_PINMUX_FUNCTION(smc0_nor_addr25, 4), DEFINE_ZYNQ_PINMUX_FUNCTION(smc0_nand, 8), DEFINE_ZYNQ_PINMUX_FUNCTION(can0, 0x10), DEFINE_ZYNQ_PINMUX_FUNCTION(can1, 0x10), DEFINE_ZYNQ_PINMUX_FUNCTION(uart0, 0x70), DEFINE_ZYNQ_PINMUX_FUNCTION(uart1, 0x70), DEFINE_ZYNQ_PINMUX_FUNCTION(i2c0, 0x20), DEFINE_ZYNQ_PINMUX_FUNCTION(i2c1, 0x20), DEFINE_ZYNQ_PINMUX_FUNCTION(ttc0, 0x60), DEFINE_ZYNQ_PINMUX_FUNCTION(ttc1, 0x60), DEFINE_ZYNQ_PINMUX_FUNCTION(swdt0, 0x30), DEFINE_ZYNQ_PINMUX_FUNCTION(gpio0, 0), }; /* pinctrl */ static int zynq_pctrl_get_groups_count(struct pinctrl_dev *pctldev) { struct zynq_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); return pctrl->ngroups; } static const char *zynq_pctrl_get_group_name(struct pinctrl_dev *pctldev, unsigned int selector) { struct zynq_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); return pctrl->groups[selector].name; } static int zynq_pctrl_get_group_pins(struct pinctrl_dev *pctldev, unsigned int selector, const unsigned int **pins, unsigned int *num_pins) { struct zynq_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); *pins = pctrl->groups[selector].pins; *num_pins = pctrl->groups[selector].npins; return 0; } static const struct pinctrl_ops zynq_pctrl_ops = { .get_groups_count = zynq_pctrl_get_groups_count, .get_group_name = zynq_pctrl_get_group_name, .get_group_pins = zynq_pctrl_get_group_pins, .dt_node_to_map = pinconf_generic_dt_node_to_map_all, .dt_free_map = pinctrl_utils_free_map, }; /* pinmux */ static int zynq_pmux_get_functions_count(struct pinctrl_dev *pctldev) { struct zynq_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); return pctrl->nfuncs; } static const char *zynq_pmux_get_function_name(struct pinctrl_dev *pctldev, unsigned int selector) { struct zynq_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); return pctrl->funcs[selector].name; } static int zynq_pmux_get_function_groups(struct pinctrl_dev *pctldev, unsigned int selector, const char * const **groups, unsigned * const num_groups) { struct zynq_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); *groups = pctrl->funcs[selector].groups; *num_groups = pctrl->funcs[selector].ngroups; return 0; } static int zynq_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function, unsigned int group) { int i, ret; struct zynq_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); const struct zynq_pctrl_group *pgrp = &pctrl->groups[group]; const struct zynq_pinmux_function *func = &pctrl->funcs[function]; /* * SD WP & CD are special. They have dedicated registers * to mux them in */ if (function == ZYNQ_PMUX_sdio0_cd || function == ZYNQ_PMUX_sdio0_wp || function == ZYNQ_PMUX_sdio1_cd || function == ZYNQ_PMUX_sdio1_wp) { u32 reg; ret = regmap_read(pctrl->syscon, pctrl->pctrl_offset + func->mux, &reg); if (ret) return ret; reg &= ~func->mux_mask; reg |= pgrp->pins[0] << func->mux_shift; ret = regmap_write(pctrl->syscon, pctrl->pctrl_offset + func->mux, reg); if (ret) return ret; } else { for (i = 0; i < pgrp->npins; i++) { unsigned int pin = pgrp->pins[i]; u32 reg, addr = pctrl->pctrl_offset + (4 * pin); ret = regmap_read(pctrl->syscon, addr, &reg); if (ret) return ret; reg &= ~ZYNQ_PINMUX_MUX_MASK; reg |= func->mux_val << ZYNQ_PINMUX_MUX_SHIFT; ret = regmap_write(pctrl->syscon, addr, reg); if (ret) return ret; } } return 0; } static const struct pinmux_ops zynq_pinmux_ops = { .get_functions_count = zynq_pmux_get_functions_count, .get_function_name = zynq_pmux_get_function_name, .get_function_groups = zynq_pmux_get_function_groups, .set_mux = zynq_pinmux_set_mux, }; /* pinconfig */ #define ZYNQ_PINCONF_TRISTATE BIT(0) #define ZYNQ_PINCONF_SPEED BIT(8) #define ZYNQ_PINCONF_PULLUP BIT(12) #define ZYNQ_PINCONF_DISABLE_RECVR BIT(13) #define ZYNQ_PINCONF_IOTYPE_SHIFT 9 #define ZYNQ_PINCONF_IOTYPE_MASK (7 << ZYNQ_PINCONF_IOTYPE_SHIFT) enum zynq_io_standards { zynq_iostd_min, zynq_iostd_lvcmos18, zynq_iostd_lvcmos25, zynq_iostd_lvcmos33, zynq_iostd_hstl, zynq_iostd_max }; /* * PIN_CONFIG_IOSTANDARD: if the pin can select an IO standard, the argument to * this parameter (on a custom format) tells the driver which alternative * IO standard to use. */ #define PIN_CONFIG_IOSTANDARD (PIN_CONFIG_END + 1) static const struct pinconf_generic_params zynq_dt_params[] = { {"io-standard", PIN_CONFIG_IOSTANDARD, zynq_iostd_lvcmos18}, }; #ifdef CONFIG_DEBUG_FS static const struct pin_config_item zynq_conf_items[ARRAY_SIZE(zynq_dt_params)] = { PCONFDUMP(PIN_CONFIG_IOSTANDARD, "IO-standard", NULL, true), }; #endif static unsigned int zynq_pinconf_iostd_get(u32 reg) { return (reg & ZYNQ_PINCONF_IOTYPE_MASK) >> ZYNQ_PINCONF_IOTYPE_SHIFT; } static int zynq_pinconf_cfg_get(struct pinctrl_dev *pctldev, unsigned int pin, unsigned long *config) { u32 reg; int ret; unsigned int arg = 0; unsigned int param = pinconf_to_config_param(*config); struct zynq_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); if (pin >= ZYNQ_NUM_MIOS) return -ENOTSUPP; ret = regmap_read(pctrl->syscon, pctrl->pctrl_offset + (4 * pin), &reg); if (ret) return -EIO; switch (param) { case PIN_CONFIG_BIAS_PULL_UP: if (!(reg & ZYNQ_PINCONF_PULLUP)) return -EINVAL; arg = 1; break; case PIN_CONFIG_BIAS_HIGH_IMPEDANCE: if (!(reg & ZYNQ_PINCONF_TRISTATE)) return -EINVAL; arg = 1; break; case PIN_CONFIG_BIAS_DISABLE: if (reg & ZYNQ_PINCONF_PULLUP || reg & ZYNQ_PINCONF_TRISTATE) return -EINVAL; break; case PIN_CONFIG_SLEW_RATE: arg = !!(reg & ZYNQ_PINCONF_SPEED); break; case PIN_CONFIG_MODE_LOW_POWER: { enum zynq_io_standards iostd = zynq_pinconf_iostd_get(reg); if (iostd != zynq_iostd_hstl) return -EINVAL; if (!(reg & ZYNQ_PINCONF_DISABLE_RECVR)) return -EINVAL; arg = !!(reg & ZYNQ_PINCONF_DISABLE_RECVR); break; } case PIN_CONFIG_IOSTANDARD: case PIN_CONFIG_POWER_SOURCE: arg = zynq_pinconf_iostd_get(reg); break; default: return -ENOTSUPP; } *config = pinconf_to_config_packed(param, arg); return 0; } static int zynq_pinconf_cfg_set(struct pinctrl_dev *pctldev, unsigned int pin, unsigned long *configs, unsigned int num_configs) { int i, ret; u32 reg; u32 pullup = 0; u32 tristate = 0; struct zynq_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); if (pin >= ZYNQ_NUM_MIOS) return -ENOTSUPP; ret = regmap_read(pctrl->syscon, pctrl->pctrl_offset + (4 * pin), &reg); if (ret) return -EIO; for (i = 0; i < num_configs; i++) { unsigned int param = pinconf_to_config_param(configs[i]); unsigned int arg = pinconf_to_config_argument(configs[i]); switch (param) { case PIN_CONFIG_BIAS_PULL_UP: pullup = ZYNQ_PINCONF_PULLUP; break; case PIN_CONFIG_BIAS_HIGH_IMPEDANCE: tristate = ZYNQ_PINCONF_TRISTATE; break; case PIN_CONFIG_BIAS_DISABLE: reg &= ~(ZYNQ_PINCONF_PULLUP | ZYNQ_PINCONF_TRISTATE); break; case PIN_CONFIG_SLEW_RATE: if (arg) reg |= ZYNQ_PINCONF_SPEED; else reg &= ~ZYNQ_PINCONF_SPEED; break; case PIN_CONFIG_IOSTANDARD: case PIN_CONFIG_POWER_SOURCE: if (arg <= zynq_iostd_min || arg >= zynq_iostd_max) { dev_warn(pctldev->dev, "unsupported IO standard '%u'\n", param); break; } reg &= ~ZYNQ_PINCONF_IOTYPE_MASK; reg |= arg << ZYNQ_PINCONF_IOTYPE_SHIFT; break; case PIN_CONFIG_MODE_LOW_POWER: if (arg) reg |= ZYNQ_PINCONF_DISABLE_RECVR; else reg &= ~ZYNQ_PINCONF_DISABLE_RECVR; break; default: dev_warn(pctldev->dev, "unsupported configuration parameter '%u'\n", param); continue; } } if (tristate || pullup) { reg &= ~(ZYNQ_PINCONF_PULLUP | ZYNQ_PINCONF_TRISTATE); reg |= tristate | pullup; } ret = regmap_write(pctrl->syscon, pctrl->pctrl_offset + (4 * pin), reg); if (ret) return -EIO; return 0; } static int zynq_pinconf_group_set(struct pinctrl_dev *pctldev, unsigned int selector, unsigned long *configs, unsigned int num_configs) { int i, ret; struct zynq_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); const struct zynq_pctrl_group *pgrp = &pctrl->groups[selector]; for (i = 0; i < pgrp->npins; i++) { ret = zynq_pinconf_cfg_set(pctldev, pgrp->pins[i], configs, num_configs); if (ret) return ret; } return 0; } static const struct pinconf_ops zynq_pinconf_ops = { .is_generic = true, .pin_config_get = zynq_pinconf_cfg_get, .pin_config_set = zynq_pinconf_cfg_set, .pin_config_group_set = zynq_pinconf_group_set, }; static struct pinctrl_desc zynq_desc = { .name = "zynq_pinctrl", .pins = zynq_pins, .npins = ARRAY_SIZE(zynq_pins), .pctlops = &zynq_pctrl_ops, .pmxops = &zynq_pinmux_ops, .confops = &zynq_pinconf_ops, .num_custom_params = ARRAY_SIZE(zynq_dt_params), .custom_params = zynq_dt_params, #ifdef CONFIG_DEBUG_FS .custom_conf_items = zynq_conf_items, #endif .owner = THIS_MODULE, }; static int zynq_pinctrl_probe(struct platform_device *pdev) { struct resource *res; struct zynq_pinctrl *pctrl; pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL); if (!pctrl) return -ENOMEM; pctrl->syscon = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "syscon"); if (IS_ERR(pctrl->syscon)) { dev_err(&pdev->dev, "unable to get syscon\n"); return PTR_ERR(pctrl->syscon); } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "missing IO resource\n"); return -ENODEV; } pctrl->pctrl_offset = res->start; pctrl->groups = zynq_pctrl_groups; pctrl->ngroups = ARRAY_SIZE(zynq_pctrl_groups); pctrl->funcs = zynq_pmux_functions; pctrl->nfuncs = ARRAY_SIZE(zynq_pmux_functions); pctrl->pctrl = devm_pinctrl_register(&pdev->dev, &zynq_desc, pctrl); if (IS_ERR(pctrl->pctrl)) return PTR_ERR(pctrl->pctrl); platform_set_drvdata(pdev, pctrl); dev_info(&pdev->dev, "zynq pinctrl initialized\n"); return 0; } static const struct of_device_id zynq_pinctrl_of_match[] = { { .compatible = "xlnx,pinctrl-zynq" }, { } }; MODULE_DEVICE_TABLE(of, zynq_pinctrl_of_match); static struct platform_driver zynq_pinctrl_driver = { .driver = { .name = "zynq-pinctrl", .of_match_table = zynq_pinctrl_of_match, }, .probe = zynq_pinctrl_probe, }; module_platform_driver(zynq_pinctrl_driver);
// SPDX-License-Identifier: GPL-2.0 /* * Samsung's Exynos5250 SoC pin-mux and pin-config device tree source * * Copyright (c) 2012 Samsung Electronics Co., Ltd. * http://www.samsung.com * * Samsung's Exynos5250 SoC pin-mux and pin-config options are listed as device * tree nodes in this file. */ #include "exynos-pinctrl.h" &pinctrl_0 { gpa0: gpa0-gpio-bank { gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; }; gpa1: gpa1-gpio-bank { gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; }; gpa2: gpa2-gpio-bank { gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; }; gpb0: gpb0-gpio-bank { gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; }; gpb1: gpb1-gpio-bank { gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; }; gpb2: gpb2-gpio-bank { gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; }; gpb3: gpb3-gpio-bank { gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; }; gpc0: gpc0-gpio-bank { gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; }; gpc1: gpc1-gpio-bank { gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; }; gpc2: gpc2-gpio-bank { gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; }; gpc3: gpc3-gpio-bank { gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; }; gpd0: gpd0-gpio-bank { gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; }; gpd1: gpd1-gpio-bank { gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; }; gpy0: gpy0-gpio-bank { gpio-controller; #gpio-cells = <2>; }; gpy1: gpy1-gpio-bank { gpio-controller; #gpio-cells = <2>; }; gpy2: gpy2-gpio-bank { gpio-controller; #gpio-cells = <2>; }; gpy3: gpy3-gpio-bank { gpio-controller; #gpio-cells = <2>; }; gpy4: gpy4-gpio-bank { gpio-controller; #gpio-cells = <2>; }; gpy5: gpy5-gpio-bank { gpio-controller; #gpio-cells = <2>; }; gpy6: gpy6-gpio-bank { gpio-controller; #gpio-cells = <2>; }; gpc4: gpc4-gpio-bank { gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; }; gpx0: gpx0-gpio-bank { gpio-controller; #gpio-cells = <2>; interrupt-controller; interrupt-parent = <&combiner>; #interrupt-cells = <2>; interrupts = <23 0>, <24 0>, <25 0>, <25 1>, <26 0>, <26 1>, <27 0>, <27 1>; }; gpx1: gpx1-gpio-bank { gpio-controller; #gpio-cells = <2>; interrupt-controller; interrupt-parent = <&combiner>; #interrupt-cells = <2>; interrupts = <28 0>, <28 1>, <29 0>, <29 1>, <30 0>, <30 1>, <31 0>, <31 1>; }; gpx2: gpx2-gpio-bank { gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; }; gpx3: gpx3-gpio-bank { gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; }; uart0_data: uart0-data-pins { samsung,pins = "gpa0-0", "gpa0-1"; samsung,pin-function = <EXYNOS_PIN_FUNC_2>; samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; uart0_fctl: uart0-fctl-pins { samsung,pins = "gpa0-2", "gpa0-3"; samsung,pin-function = <EXYNOS_PIN_FUNC_2>; samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; i2c2_bus: i2c2-bus-pins { samsung,pins = "gpa0-6", "gpa0-7"; samsung,pin-function = <EXYNOS_PIN_FUNC_3>; samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; i2c2_hs_bus: i2c2-hs-bus-pins { samsung,pins = "gpa0-6", "gpa0-7"; samsung,pin-function = <EXYNOS_PIN_FUNC_4>; samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; i2c2_gpio_bus: i2c2-gpio-bus-pins { samsung,pins = "gpa0-6", "gpa0-7"; samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; uart2_data: uart2-data-pins { samsung,pins = "gpa1-0", "gpa1-1"; samsung,pin-function = <EXYNOS_PIN_FUNC_2>; samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; uart2_fctl: uart2-fctl-pins { samsung,pins = "gpa1-2", "gpa1-3"; samsung,pin-function = <EXYNOS_PIN_FUNC_2>; samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; i2c3_bus: i2c3-bus-pins { samsung,pins = "gpa1-2", "gpa1-3"; samsung,pin-function = <EXYNOS_PIN_FUNC_3>; samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; i2c3_hs_bus: i2c3-hs-bus-pins { samsung,pins = "gpa1-2", "gpa1-3"; samsung,pin-function = <EXYNOS_PIN_FUNC_4>; samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; uart3_data: uart3-data-pins { samsung,pins = "gpa1-4", "gpa1-5"; samsung,pin-function = <EXYNOS_PIN_FUNC_2>; samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; spi0_bus: spi0-bus-pins { samsung,pins = "gpa2-0", "gpa2-2", "gpa2-3"; samsung,pin-function = <EXYNOS_PIN_FUNC_2>; samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; i2c4_bus: i2c4-bus-pins { samsung,pins = "gpa2-0", "gpa2-1"; samsung,pin-function = <EXYNOS_PIN_FUNC_3>; samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; i2c5_bus: i2c5-bus-pins { samsung,pins = "gpa2-2", "gpa2-3"; samsung,pin-function = <EXYNOS_PIN_FUNC_3>; samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; spi1_bus: spi1-bus-pins { samsung,pins = "gpa2-4", "gpa2-6", "gpa2-7"; samsung,pin-function = <EXYNOS_PIN_FUNC_2>; samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; i2s1_bus: i2s1-bus-pins { samsung,pins = "gpb0-0", "gpb0-1", "gpb0-2", "gpb0-3", "gpb0-4"; samsung,pin-function = <EXYNOS_PIN_FUNC_2>; samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; pcm1_bus: pcm1-bus-pins { samsung,pins = "gpb0-0", "gpb0-1", "gpb0-2", "gpb0-3", "gpb0-4"; samsung,pin-function = <EXYNOS_PIN_FUNC_3>; samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; ac97_bus: ac97-bus-pins { samsung,pins = "gpb0-0", "gpb0-1", "gpb0-2", "gpb0-3", "gpb0-4"; samsung,pin-function = <EXYNOS_PIN_FUNC_4>; samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; i2s2_bus: i2s2-bus-pins { samsung,pins = "gpb1-0", "gpb1-1", "gpb1-2", "gpb1-3", "gpb1-4"; samsung,pin-function = <EXYNOS_PIN_FUNC_2>; samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; pcm2_bus: pcm2-bus-pins { samsung,pins = "gpb1-0", "gpb1-1", "gpb1-2", "gpb1-3", "gpb1-4"; samsung,pin-function = <EXYNOS_PIN_FUNC_3>; samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; spdif_bus: spdif-bus-pins { samsung,pins = "gpb1-0", "gpb1-1"; samsung,pin-function = <EXYNOS_PIN_FUNC_4>; samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; spi2_bus: spi2-bus-pins { samsung,pins = "gpb1-1", "gpb1-3", "gpb1-4"; samsung,pin-function = <EXYNOS_PIN_FUNC_5>; samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; i2c6_bus: i2c6-bus-pins { samsung,pins = "gpb1-3", "gpb1-4"; samsung,pin-function = <EXYNOS_PIN_FUNC_4>; samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; pwm0_out: pwm0-out-pins { samsung,pins = "gpb2-0"; samsung,pin-function = <EXYNOS_PIN_FUNC_2>; samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; pwm1_out: pwm1-out-pins { samsung,pins = "gpb2-1"; samsung,pin-function = <EXYNOS_PIN_FUNC_2>; samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; pwm2_out: pwm2-out-pins { samsung,pins = "gpb2-2"; samsung,pin-function = <EXYNOS_PIN_FUNC_2>; samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; pwm3_out: pwm3-out-pins { samsung,pins = "gpb2-3"; samsung,pin-function = <EXYNOS_PIN_FUNC_2>; samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; i2c7_bus: i2c7-bus-pins { samsung,pins = "gpb2-2", "gpb2-3"; samsung,pin-function = <EXYNOS_PIN_FUNC_3>; samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; i2c0_bus: i2c0-bus-pins { samsung,pins = "gpb3-0", "gpb3-1"; samsung,pin-function = <EXYNOS_PIN_FUNC_2>; samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; i2c1_bus: i2c1-bus-pins { samsung,pins = "gpb3-2", "gpb3-3"; samsung,pin-function = <EXYNOS_PIN_FUNC_2>; samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; i2c0_hs_bus: i2c0-hs-bus-pins { samsung,pins = "gpb3-0", "gpb3-1"; samsung,pin-function = <EXYNOS_PIN_FUNC_4>; samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; i2c1_hs_bus: i2c1-hs-bus-pins { samsung,pins = "gpb3-2", "gpb3-3"; samsung,pin-function = <EXYNOS_PIN_FUNC_4>; samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; sd0_clk: sd0-clk-pins { samsung,pins = "gpc0-0"; samsung,pin-function = <EXYNOS_PIN_FUNC_2>; samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>; }; sd0_cmd: sd0-cmd-pins { samsung,pins = "gpc0-1"; samsung,pin-function = <EXYNOS_PIN_FUNC_2>; samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>; }; sd0_cd: sd0-cd-pins { samsung,pins = "gpc0-2"; samsung,pin-function = <EXYNOS_PIN_FUNC_2>; samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>; }; sd0_bus1: sd0-bus-width1-pins { samsung,pins = "gpc0-3"; samsung,pin-function = <EXYNOS_PIN_FUNC_2>; samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>; }; sd0_bus4: sd0-bus-width4-pins { samsung,pins = "gpc0-3", "gpc0-4", "gpc0-5", "gpc0-6"; samsung,pin-function = <EXYNOS_PIN_FUNC_2>; samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>; }; sd0_bus8: sd0-bus-width8-pins { samsung,pins = "gpc1-0", "gpc1-1", "gpc1-2", "gpc1-3"; samsung,pin-function = <EXYNOS_PIN_FUNC_2>; samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>; }; sd1_clk: sd1-clk-pins { samsung,pins = "gpc2-0"; samsung,pin-function = <EXYNOS_PIN_FUNC_2>; samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>; }; sd1_cmd: sd1-cmd-pins { samsung,pins = "gpc2-1"; samsung,pin-function = <EXYNOS_PIN_FUNC_2>; samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>; }; sd1_cd: sd1-cd-pins { samsung,pins = "gpc2-2"; samsung,pin-function = <EXYNOS_PIN_FUNC_2>; samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>; }; sd1_bus1: sd1-bus-width1-pins { samsung,pins = "gpc2-3"; samsung,pin-function = <EXYNOS_PIN_FUNC_2>; samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>; }; sd1_bus4: sd1-bus-width4-pins { samsung,pins = "gpc2-3", "gpc2-4", "gpc2-5", "gpc2-6"; samsung,pin-function = <EXYNOS_PIN_FUNC_2>; samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>; }; sd2_clk: sd2-clk-pins { samsung,pins = "gpc3-0"; samsung,pin-function = <EXYNOS_PIN_FUNC_2>; samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>; }; sd2_cmd: sd2-cmd-pins { samsung,pins = "gpc3-1"; samsung,pin-function = <EXYNOS_PIN_FUNC_2>; samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>; }; sd2_cd: sd2-cd-pins { samsung,pins = "gpc3-2"; samsung,pin-function = <EXYNOS_PIN_FUNC_2>; samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>; }; sd2_bus1: sd2-bus-width1-pins { samsung,pins = "gpc3-3"; samsung,pin-function = <EXYNOS_PIN_FUNC_2>; samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>; }; sd2_bus4: sd2-bus-width4-pins { samsung,pins = "gpc3-3", "gpc3-4", "gpc3-5", "gpc3-6"; samsung,pin-function = <EXYNOS_PIN_FUNC_2>; samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>; }; sd2_bus8: sd2-bus-width8-pins { samsung,pins = "gpc4-3", "gpc4-4", "gpc4-5", "gpc4-6"; samsung,pin-function = <EXYNOS_PIN_FUNC_3>; samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>; }; sd3_clk: sd3-clk-pins { samsung,pins = "gpc4-0"; samsung,pin-function = <EXYNOS_PIN_FUNC_2>; samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>; }; sd3_cmd: sd3-cmd-pins { samsung,pins = "gpc4-1"; samsung,pin-function = <EXYNOS_PIN_FUNC_2>; samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>; }; sd3_cd: sd3-cd-pins { samsung,pins = "gpc4-2"; samsung,pin-function = <EXYNOS_PIN_FUNC_2>; samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>; }; sd3_bus1: sd3-bus-width1-pins { samsung,pins = "gpc4-3"; samsung,pin-function = <EXYNOS_PIN_FUNC_2>; samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>; }; sd3_bus4: sd3-bus-width4-pins { samsung,pins = "gpc4-3", "gpc4-4", "gpc4-5", "gpc4-6"; samsung,pin-function = <EXYNOS_PIN_FUNC_2>; samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>; }; uart1_data: uart1-data-pins { samsung,pins = "gpd0-0", "gpd0-1"; samsung,pin-function = <EXYNOS_PIN_FUNC_2>; samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; uart1_fctl: uart1-fctl-pins { samsung,pins = "gpd0-2", "gpd0-3"; samsung,pin-function = <EXYNOS_PIN_FUNC_2>; samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; dp_hpd: dp-hpd-pins { samsung,pins = "gpx0-7"; samsung,pin-function = <EXYNOS_PIN_FUNC_3>; samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; hdmi_cec: hdmi-cec-pins { samsung,pins = "gpx3-6"; samsung,pin-function = <EXYNOS_PIN_FUNC_3>; samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; hdmi_hpd: hdmi-hpd-pins { samsung,pins = "gpx3-7"; samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; }; }; &pinctrl_1 { gpe0: gpe0-gpio-bank { gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; }; gpe1: gpe1-gpio-bank { gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; }; gpf0: gpf0-gpio-bank { gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; }; gpf1: gpf1-gpio-bank { gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; }; gpg0: gpg0-gpio-bank { gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; }; gpg1: gpg1-gpio-bank { gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; }; gpg2: gpg2-gpio-bank { gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; }; gph0: gph0-gpio-bank { gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; }; gph1: gph1-gpio-bank { gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; }; cam_gpio_a: cam-gpio-a-pins { samsung,pins = "gpe0-0", "gpe0-1", "gpe0-2", "gpe0-3", "gpe0-4", "gpe0-5", "gpe0-6", "gpe0-7", "gpe1-0", "gpe1-1"; samsung,pin-function = <EXYNOS_PIN_FUNC_2>; samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; cam_gpio_b: cam-gpio-b-pins { samsung,pins = "gpf0-0", "gpf0-1", "gpf0-2", "gpf0-3", "gpf1-0", "gpf1-1", "gpf1-2", "gpf1-3"; samsung,pin-function = <EXYNOS_PIN_FUNC_3>; samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; cam_i2c2_bus: cam-i2c2-bus-pins { samsung,pins = "gpe0-6", "gpe1-0"; samsung,pin-function = <EXYNOS_PIN_FUNC_4>; samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; cam_spi1_bus: cam-spi1-bus-pins { samsung,pins = "gpe0-4", "gpe0-5", "gpf0-2", "gpf0-3"; samsung,pin-function = <EXYNOS_PIN_FUNC_4>; samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; cam_i2c1_bus: cam-i2c1-bus-pins { samsung,pins = "gpf0-2", "gpf0-3"; samsung,pin-function = <EXYNOS_PIN_FUNC_2>; samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; cam_i2c0_bus: cam-i2c0-bus-pins { samsung,pins = "gpf0-0", "gpf0-1"; samsung,pin-function = <EXYNOS_PIN_FUNC_2>; samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; cam_spi0_bus: cam-spi0-bus-pins { samsung,pins = "gpf1-0", "gpf1-1", "gpf1-2", "gpf1-3"; samsung,pin-function = <EXYNOS_PIN_FUNC_2>; samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; cam_bayrgb_bus: cam-bayrgb-bus-pins { samsung,pins = "gpg0-0", "gpg0-1", "gpg0-2", "gpg0-3", "gpg0-4", "gpg0-5", "gpg0-6", "gpg0-7", "gpg1-0", "gpg1-1", "gpg1-2", "gpg1-3", "gpg1-4", "gpg1-5", "gpg1-6", "gpg1-7", "gpg2-0", "gpg2-1"; samsung,pin-function = <EXYNOS_PIN_FUNC_2>; samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; cam_port_a: cam-port-a-pins { samsung,pins = "gph0-0", "gph0-1", "gph0-2", "gph0-3", "gph1-0", "gph1-1", "gph1-2", "gph1-3", "gph1-4", "gph1-5", "gph1-6", "gph1-7"; samsung,pin-function = <EXYNOS_PIN_FUNC_2>; samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; }; &pinctrl_2 { gpv0: gpv0-gpio-bank { gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; }; gpv1: gpv1-gpio-bank { gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; }; gpv2: gpv2-gpio-bank { gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; }; gpv3: gpv3-gpio-bank { gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; }; gpv4: gpv4-gpio-bank { gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; }; c2c_rxd: c2c-rxd-pins { samsung,pins = "gpv0-0", "gpv0-1", "gpv0-2", "gpv0-3", "gpv0-4", "gpv0-5", "gpv0-6", "gpv0-7", "gpv1-0", "gpv1-1", "gpv1-2", "gpv1-3", "gpv1-4", "gpv1-5", "gpv1-6", "gpv1-7"; samsung,pin-function = <EXYNOS_PIN_FUNC_2>; samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; c2c_txd: c2c-txd-pins { samsung,pins = "gpv2-0", "gpv2-1", "gpv2-2", "gpv2-3", "gpv2-4", "gpv2-5", "gpv2-6", "gpv2-7", "gpv3-0", "gpv3-1", "gpv3-2", "gpv3-3", "gpv3-4", "gpv3-5", "gpv3-6", "gpv3-7"; samsung,pin-function = <EXYNOS_PIN_FUNC_2>; samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; }; &pinctrl_3 { gpz: gpz-gpio-bank { gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; }; i2s0_bus: i2s0-bus-pins { samsung,pins = "gpz-0", "gpz-1", "gpz-2", "gpz-3", "gpz-4", "gpz-5", "gpz-6"; samsung,pin-function = <EXYNOS_PIN_FUNC_2>; samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; };
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2015, The Linux Foundation. All rights reserved. */ #ifndef __DSI_PHY_H__ #define __DSI_PHY_H__ #include <linux/clk-provider.h> #include <linux/delay.h> #include <linux/regulator/consumer.h> #include "dsi.h" struct msm_dsi_phy_ops { int (*pll_init)(struct msm_dsi_phy *phy); int (*enable)(struct msm_dsi_phy *phy, struct msm_dsi_phy_clk_request *clk_req); void (*disable)(struct msm_dsi_phy *phy); void (*save_pll_state)(struct msm_dsi_phy *phy); int (*restore_pll_state)(struct msm_dsi_phy *phy); bool (*set_continuous_clock)(struct msm_dsi_phy *phy, bool enable); int (*parse_dt_properties)(struct msm_dsi_phy *phy); }; struct msm_dsi_phy_cfg { const struct regulator_bulk_data *regulator_data; int num_regulators; struct msm_dsi_phy_ops ops; unsigned long min_pll_rate; unsigned long max_pll_rate; const resource_size_t io_start[DSI_MAX]; const int num_dsi_phy; const int quirks; bool has_phy_regulator; bool has_phy_lane; }; extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_famb_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8226_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8937_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_14nm_2290_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_14nm_8953_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_7nm_6375_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_7nm_7280_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_5nm_8350_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_5nm_8450_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_4nm_8550_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_4nm_8650_cfgs; struct msm_dsi_dphy_timing { u32 clk_zero; u32 clk_trail; u32 clk_prepare; u32 hs_exit; u32 hs_zero; u32 hs_prepare; u32 hs_trail; u32 hs_rqst; u32 ta_go; u32 ta_sure; u32 ta_get; struct msm_dsi_phy_shared_timings shared_timings; /* For PHY v2 only */ u32 hs_rqst_ckln; u32 hs_prep_dly; u32 hs_prep_dly_ckln; u8 hs_halfbyte_en; u8 hs_halfbyte_en_ckln; }; #define DSI_BYTE_PLL_CLK 0 #define DSI_PIXEL_PLL_CLK 1 #define NUM_PROVIDED_CLKS 2 #define DSI_LANE_MAX 5 struct msm_dsi_phy { struct platform_device *pdev; void __iomem *base; void __iomem *pll_base; void __iomem *reg_base; void __iomem *lane_base; phys_addr_t base_size; phys_addr_t pll_size; phys_addr_t reg_size; phys_addr_t lane_size; int id; struct clk *ahb_clk; struct regulator_bulk_data *supplies; struct msm_dsi_dphy_timing timing; const struct msm_dsi_phy_cfg *cfg; void *tuning_cfg; enum msm_dsi_phy_usecase usecase; bool regulator_ldo_mode; bool cphy_mode; struct clk_hw *vco_hw; bool pll_on; struct clk_hw_onecell_data *provided_clocks; bool state_saved; }; /* * PHY internal functions */ int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing, struct msm_dsi_phy_clk_request *clk_req); int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing, struct msm_dsi_phy_clk_request *clk_req); int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing, struct msm_dsi_phy_clk_request *clk_req); int msm_dsi_dphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing, struct msm_dsi_phy_clk_request *clk_req); int msm_dsi_cphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing, struct msm_dsi_phy_clk_request *clk_req); #endif /* __DSI_PHY_H__ */
/* * Copyright 2018 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dccg.h" #include "rn_clk_mgr.h" #include "dcn20/dcn20_clk_mgr.h" #include "dml/dcn20/dcn20_fpu.h" #include "dce100/dce_clk_mgr.h" #include "rn_clk_mgr_vbios_smu.h" #include "reg_helper.h" #include "core_types.h" #include "dm_helpers.h" #include "atomfirmware.h" #include "clk/clk_10_0_2_offset.h" #include "clk/clk_10_0_2_sh_mask.h" #include "renoir_ip_offset.h" /* Constants */ #define SMU_VER_55_51_0 0x373300 /* SMU Version that is able to set DISPCLK below 100MHz */ /* Macros */ #define REG(reg_name) \ (CLK_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name) /* TODO: evaluate how to lower or disable all dcn clocks in screen off case */ static int rn_get_active_display_cnt_wa(struct dc *dc, struct dc_state *context) { int i, display_count; bool tmds_present = false; display_count = 0; for (i = 0; i < context->stream_count; i++) { const struct dc_stream_state *stream = context->streams[i]; if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A || stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK || stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK) tmds_present = true; } for (i = 0; i < dc->link_count; i++) { const struct dc_link *link = dc->links[i]; /* abusing the fact that the dig and phy are coupled to see if the phy is enabled */ if (link->link_enc->funcs->is_dig_enabled && link->link_enc->funcs->is_dig_enabled(link->link_enc)) display_count++; } /* WA for hang on HDMI after display off back back on*/ if (display_count == 0 && tmds_present) display_count = 1; return display_count; } static void rn_set_low_power_state(struct clk_mgr *clk_mgr_base) { int display_count; struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); struct dc *dc = clk_mgr_base->ctx->dc; struct dc_state *context = dc->current_state; if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) { display_count = rn_get_active_display_cnt_wa(dc, context); /* if we can go lower, go lower */ if (display_count == 0) { rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_LOW_POWER); /* update power state */ clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER; } } } static void rn_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr, struct dc_state *context, int ref_dpp_clk, bool safe_to_lower) { int i; clk_mgr->dccg->ref_dppclk = ref_dpp_clk; for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) { int dpp_inst, dppclk_khz, prev_dppclk_khz; /* Loop index may not match dpp->inst if some pipes disabled, * so select correct inst from res_pool */ dpp_inst = clk_mgr->base.ctx->dc->res_pool->dpps[i]->inst; dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz; prev_dppclk_khz = clk_mgr->dccg->pipe_dppclk_khz[dpp_inst]; if (safe_to_lower || prev_dppclk_khz < dppclk_khz) clk_mgr->dccg->funcs->update_dpp_dto( clk_mgr->dccg, dpp_inst, dppclk_khz); } } static void rn_update_clocks(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool safe_to_lower) { struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; struct dc *dc = clk_mgr_base->ctx->dc; int display_count; bool update_dppclk = false; bool update_dispclk = false; bool dpp_clock_lowered = false; struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu; if (dc->work_arounds.skip_clock_update) return; /* * if it is safe to lower, but we are already in the lower state, we don't have to do anything * also if safe to lower is false, we just go in the higher state */ if (safe_to_lower && !dc->debug.disable_48mhz_pwrdwn) { /* check that we're not already in lower */ if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) { display_count = rn_get_active_display_cnt_wa(dc, context); /* if we can go lower, go lower */ if (display_count == 0) { rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_LOW_POWER); /* update power state */ clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER; } } } else { /* check that we're not already in D0 */ if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_MISSION_MODE) { rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_MISSION_MODE); /* update power state */ clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_MISSION_MODE; } } if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) { clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz; rn_vbios_smu_set_hard_min_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_khz); } if (should_set_clock(safe_to_lower, new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) { clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz; rn_vbios_smu_set_min_deep_sleep_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_deep_sleep_khz); } // workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow. // Do not adjust dppclk if dppclk is 0 to avoid unexpected result if (new_clocks->dppclk_khz < 100000 && new_clocks->dppclk_khz > 0) new_clocks->dppclk_khz = 100000; /* * Temporally ignore thew 0 cases for disp and dpp clks. * We may have a new feature that requires 0 clks in the future. */ if (new_clocks->dppclk_khz == 0 || new_clocks->dispclk_khz == 0) { new_clocks->dppclk_khz = clk_mgr_base->clks.dppclk_khz; new_clocks->dispclk_khz = clk_mgr_base->clks.dispclk_khz; } if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr_base->clks.dppclk_khz)) { if (clk_mgr_base->clks.dppclk_khz > new_clocks->dppclk_khz) dpp_clock_lowered = true; clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz; update_dppclk = true; } if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) { clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; clk_mgr_base->clks.actual_dispclk_khz = rn_vbios_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz); update_dispclk = true; } if (dpp_clock_lowered) { // increase per DPP DTO before lowering global dppclk with requested dppclk rn_update_clocks_update_dpp_dto( clk_mgr, context, clk_mgr_base->clks.dppclk_khz, safe_to_lower); clk_mgr_base->clks.actual_dppclk_khz = rn_vbios_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz); //update dpp dto with actual dpp clk. rn_update_clocks_update_dpp_dto( clk_mgr, context, clk_mgr_base->clks.actual_dppclk_khz, safe_to_lower); } else { // increase global DPPCLK before lowering per DPP DTO if (update_dppclk || update_dispclk) clk_mgr_base->clks.actual_dppclk_khz = rn_vbios_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz); // always update dtos unless clock is lowered and not safe to lower rn_update_clocks_update_dpp_dto( clk_mgr, context, clk_mgr_base->clks.actual_dppclk_khz, safe_to_lower); } if (update_dispclk && dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { /*update dmcu for wait_loop count*/ dmcu->funcs->set_psr_wait_loop(dmcu, clk_mgr_base->clks.dispclk_khz / 1000 / 7); } } static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr) { /* get FbMult value */ struct fixed31_32 pll_req; unsigned int fbmult_frac_val = 0; unsigned int fbmult_int_val = 0; /* * Register value of fbmult is in 8.16 format, we are converting to 31.32 * to leverage the fix point operations available in driver */ REG_GET(CLK1_CLK_PLL_REQ, FbMult_frac, &fbmult_frac_val); /* 16 bit fractional part*/ REG_GET(CLK1_CLK_PLL_REQ, FbMult_int, &fbmult_int_val); /* 8 bit integer part */ pll_req = dc_fixpt_from_int(fbmult_int_val); /* * since fractional part is only 16 bit in register definition but is 32 bit * in our fix point definiton, need to shift left by 16 to obtain correct value */ pll_req.value |= fbmult_frac_val << 16; /* multiply by REFCLK period */ pll_req = dc_fixpt_mul_int(pll_req, clk_mgr->dfs_ref_freq_khz); /* integer part is now VCO frequency in kHz */ return dc_fixpt_floor(pll_req); } static void rn_dump_clk_registers_internal(struct rn_clk_internal *internal, struct clk_mgr *clk_mgr_base) { struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); internal->CLK1_CLK3_CURRENT_CNT = REG_READ(CLK1_CLK3_CURRENT_CNT); internal->CLK1_CLK3_BYPASS_CNTL = REG_READ(CLK1_CLK3_BYPASS_CNTL); internal->CLK1_CLK3_DS_CNTL = REG_READ(CLK1_CLK3_DS_CNTL); //dcf deep sleep divider internal->CLK1_CLK3_ALLOW_DS = REG_READ(CLK1_CLK3_ALLOW_DS); internal->CLK1_CLK1_CURRENT_CNT = REG_READ(CLK1_CLK1_CURRENT_CNT); internal->CLK1_CLK1_BYPASS_CNTL = REG_READ(CLK1_CLK1_BYPASS_CNTL); internal->CLK1_CLK2_CURRENT_CNT = REG_READ(CLK1_CLK2_CURRENT_CNT); internal->CLK1_CLK2_BYPASS_CNTL = REG_READ(CLK1_CLK2_BYPASS_CNTL); internal->CLK1_CLK0_CURRENT_CNT = REG_READ(CLK1_CLK0_CURRENT_CNT); internal->CLK1_CLK0_BYPASS_CNTL = REG_READ(CLK1_CLK0_BYPASS_CNTL); } /* This function collect raw clk register values */ static void rn_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass, struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info) { struct rn_clk_internal internal = {0}; char *bypass_clks[5] = {"0x0 DFS", "0x1 REFCLK", "0x2 ERROR", "0x3 400 FCH", "0x4 600 FCH"}; unsigned int chars_printed = 0; unsigned int remaining_buffer = log_info->bufSize; rn_dump_clk_registers_internal(&internal, clk_mgr_base); regs_and_bypass->dcfclk = internal.CLK1_CLK3_CURRENT_CNT / 10; regs_and_bypass->dcf_deep_sleep_divider = internal.CLK1_CLK3_DS_CNTL / 10; regs_and_bypass->dcf_deep_sleep_allow = internal.CLK1_CLK3_ALLOW_DS; regs_and_bypass->dprefclk = internal.CLK1_CLK2_CURRENT_CNT / 10; regs_and_bypass->dispclk = internal.CLK1_CLK0_CURRENT_CNT / 10; regs_and_bypass->dppclk = internal.CLK1_CLK1_CURRENT_CNT / 10; regs_and_bypass->dppclk_bypass = internal.CLK1_CLK1_BYPASS_CNTL & 0x0007; if (regs_and_bypass->dppclk_bypass > 4) regs_and_bypass->dppclk_bypass = 0; regs_and_bypass->dcfclk_bypass = internal.CLK1_CLK3_BYPASS_CNTL & 0x0007; if (regs_and_bypass->dcfclk_bypass > 4) regs_and_bypass->dcfclk_bypass = 0; regs_and_bypass->dispclk_bypass = internal.CLK1_CLK0_BYPASS_CNTL & 0x0007; if (regs_and_bypass->dispclk_bypass > 4) regs_and_bypass->dispclk_bypass = 0; regs_and_bypass->dprefclk_bypass = internal.CLK1_CLK2_BYPASS_CNTL & 0x0007; if (regs_and_bypass->dprefclk_bypass > 4) regs_and_bypass->dprefclk_bypass = 0; if (log_info->enabled) { chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "clk_type,clk_value,deepsleep_cntl,deepsleep_allow,bypass\n"); remaining_buffer -= chars_printed; *log_info->sum_chars_printed += chars_printed; log_info->pBuf += chars_printed; chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "dcfclk,%d,%d,%d,%s\n", regs_and_bypass->dcfclk, regs_and_bypass->dcf_deep_sleep_divider, regs_and_bypass->dcf_deep_sleep_allow, bypass_clks[(int) regs_and_bypass->dcfclk_bypass]); remaining_buffer -= chars_printed; *log_info->sum_chars_printed += chars_printed; log_info->pBuf += chars_printed; chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "dprefclk,%d,N/A,N/A,%s\n", regs_and_bypass->dprefclk, bypass_clks[(int) regs_and_bypass->dprefclk_bypass]); remaining_buffer -= chars_printed; *log_info->sum_chars_printed += chars_printed; log_info->pBuf += chars_printed; chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "dispclk,%d,N/A,N/A,%s\n", regs_and_bypass->dispclk, bypass_clks[(int) regs_and_bypass->dispclk_bypass]); remaining_buffer -= chars_printed; *log_info->sum_chars_printed += chars_printed; log_info->pBuf += chars_printed; //split chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "SPLIT\n"); remaining_buffer -= chars_printed; *log_info->sum_chars_printed += chars_printed; log_info->pBuf += chars_printed; // REGISTER VALUES chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "reg_name,value,clk_type\n"); remaining_buffer -= chars_printed; *log_info->sum_chars_printed += chars_printed; log_info->pBuf += chars_printed; chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK3_CURRENT_CNT,%d,dcfclk\n", internal.CLK1_CLK3_CURRENT_CNT); remaining_buffer -= chars_printed; *log_info->sum_chars_printed += chars_printed; log_info->pBuf += chars_printed; chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK3_DS_CNTL,%d,dcf_deep_sleep_divider\n", internal.CLK1_CLK3_DS_CNTL); remaining_buffer -= chars_printed; *log_info->sum_chars_printed += chars_printed; log_info->pBuf += chars_printed; chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK3_ALLOW_DS,%d,dcf_deep_sleep_allow\n", internal.CLK1_CLK3_ALLOW_DS); remaining_buffer -= chars_printed; *log_info->sum_chars_printed += chars_printed; log_info->pBuf += chars_printed; chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK2_CURRENT_CNT,%d,dprefclk\n", internal.CLK1_CLK2_CURRENT_CNT); remaining_buffer -= chars_printed; *log_info->sum_chars_printed += chars_printed; log_info->pBuf += chars_printed; chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK0_CURRENT_CNT,%d,dispclk\n", internal.CLK1_CLK0_CURRENT_CNT); remaining_buffer -= chars_printed; *log_info->sum_chars_printed += chars_printed; log_info->pBuf += chars_printed; chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK1_CURRENT_CNT,%d,dppclk\n", internal.CLK1_CLK1_CURRENT_CNT); remaining_buffer -= chars_printed; *log_info->sum_chars_printed += chars_printed; log_info->pBuf += chars_printed; chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK3_BYPASS_CNTL,%d,dcfclk_bypass\n", internal.CLK1_CLK3_BYPASS_CNTL); remaining_buffer -= chars_printed; *log_info->sum_chars_printed += chars_printed; log_info->pBuf += chars_printed; chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK2_BYPASS_CNTL,%d,dprefclk_bypass\n", internal.CLK1_CLK2_BYPASS_CNTL); remaining_buffer -= chars_printed; *log_info->sum_chars_printed += chars_printed; log_info->pBuf += chars_printed; chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK0_BYPASS_CNTL,%d,dispclk_bypass\n", internal.CLK1_CLK0_BYPASS_CNTL); remaining_buffer -= chars_printed; *log_info->sum_chars_printed += chars_printed; log_info->pBuf += chars_printed; chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK1_BYPASS_CNTL,%d,dppclk_bypass\n", internal.CLK1_CLK1_BYPASS_CNTL); remaining_buffer -= chars_printed; *log_info->sum_chars_printed += chars_printed; log_info->pBuf += chars_printed; } } static void rn_enable_pme_wa(struct clk_mgr *clk_mgr_base) { struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); rn_vbios_smu_enable_pme_wa(clk_mgr); } static void rn_init_clocks(struct clk_mgr *clk_mgr) { memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); // Assumption is that boot state always supports pstate clk_mgr->clks.p_state_change_support = true; clk_mgr->clks.prev_p_state_change_support = true; clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN; } static void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_range_sets *ranges) { int i, num_valid_sets; num_valid_sets = 0; for (i = 0; i < WM_SET_COUNT; i++) { /* skip empty entries, the smu array has no holes*/ if (!bw_params->wm_table.entries[i].valid) continue; ranges->reader_wm_sets[num_valid_sets].wm_inst = bw_params->wm_table.entries[i].wm_inst; ranges->reader_wm_sets[num_valid_sets].wm_type = bw_params->wm_table.entries[i].wm_type; /* We will not select WM based on fclk, so leave it as unconstrained */ ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; /* dcfclk wil be used to select WM*/ if (ranges->reader_wm_sets[num_valid_sets].wm_type == WM_TYPE_PSTATE_CHG) { if (i == 0) ranges->reader_wm_sets[num_valid_sets].min_drain_clk_mhz = 0; else { /* add 1 to make it non-overlapping with next lvl */ ranges->reader_wm_sets[num_valid_sets].min_drain_clk_mhz = bw_params->clk_table.entries[i - 1].dcfclk_mhz + 1; } ranges->reader_wm_sets[num_valid_sets].max_drain_clk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz; } else { /* unconstrained for memory retraining */ ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; /* Modify previous watermark range to cover up to max */ if (num_valid_sets > 0) ranges->reader_wm_sets[num_valid_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; } num_valid_sets++; } ASSERT(num_valid_sets != 0); /* Must have at least one set of valid watermarks */ ranges->num_reader_wm_sets = num_valid_sets; /* modify the min and max to make sure we cover the whole range*/ ranges->reader_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; ranges->reader_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; ranges->reader_wm_sets[ranges->num_reader_wm_sets - 1].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; ranges->reader_wm_sets[ranges->num_reader_wm_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; /* This is for writeback only, does not matter currently as no writeback support*/ ranges->num_writer_wm_sets = 1; ranges->writer_wm_sets[0].wm_inst = WM_A; ranges->writer_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; ranges->writer_wm_sets[0].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; ranges->writer_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; ranges->writer_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; } static void rn_notify_wm_ranges(struct clk_mgr *clk_mgr_base) { struct dc_debug_options *debug = &clk_mgr_base->ctx->dc->debug; struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); struct pp_smu_funcs *pp_smu = clk_mgr->pp_smu; if (!debug->disable_pplib_wm_range) { build_watermark_ranges(clk_mgr_base->bw_params, &clk_mgr_base->ranges); /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */ if (pp_smu && pp_smu->rn_funcs.set_wm_ranges) pp_smu->rn_funcs.set_wm_ranges(&pp_smu->rn_funcs.pp_smu, &clk_mgr_base->ranges); } } static bool rn_are_clock_states_equal(struct dc_clocks *a, struct dc_clocks *b) { if (a->dispclk_khz != b->dispclk_khz) return false; else if (a->dppclk_khz != b->dppclk_khz) return false; else if (a->dcfclk_khz != b->dcfclk_khz) return false; else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz) return false; return true; } /* Notify clk_mgr of a change in link rate, update phyclk frequency if necessary */ static void rn_notify_link_rate_change(struct clk_mgr *clk_mgr_base, struct dc_link *link) { struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); unsigned int i, max_phyclk_req = 0; clk_mgr->cur_phyclk_req_table[link->link_index] = link->cur_link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ; for (i = 0; i < MAX_LINKS; i++) { if (clk_mgr->cur_phyclk_req_table[i] > max_phyclk_req) max_phyclk_req = clk_mgr->cur_phyclk_req_table[i]; } if (max_phyclk_req != clk_mgr_base->clks.phyclk_khz) { clk_mgr_base->clks.phyclk_khz = max_phyclk_req; rn_vbios_smu_set_phyclk(clk_mgr, clk_mgr_base->clks.phyclk_khz); } } static struct clk_mgr_funcs dcn21_funcs = { .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, .update_clocks = rn_update_clocks, .init_clocks = rn_init_clocks, .enable_pme_wa = rn_enable_pme_wa, .are_clock_states_equal = rn_are_clock_states_equal, .set_low_power_state = rn_set_low_power_state, .notify_wm_ranges = rn_notify_wm_ranges, .notify_link_rate_change = rn_notify_link_rate_change, }; static struct clk_bw_params rn_bw_params = { .vram_type = Ddr4MemType, .num_channels = 1, .clk_table = { .entries = { { .voltage = 0, .dcfclk_mhz = 400, .fclk_mhz = 400, .memclk_mhz = 800, .socclk_mhz = 0, }, { .voltage = 0, .dcfclk_mhz = 483, .fclk_mhz = 800, .memclk_mhz = 1600, .socclk_mhz = 0, }, { .voltage = 0, .dcfclk_mhz = 602, .fclk_mhz = 1067, .memclk_mhz = 1067, .socclk_mhz = 0, }, { .voltage = 0, .dcfclk_mhz = 738, .fclk_mhz = 1333, .memclk_mhz = 1600, .socclk_mhz = 0, }, }, .num_entries = 4, }, }; static unsigned int find_socclk_for_voltage(struct dpm_clocks *clock_table, unsigned int voltage) { int i; for (i = 0; i < PP_SMU_NUM_SOCCLK_DPM_LEVELS; i++) { if (clock_table->SocClocks[i].Vol == voltage) return clock_table->SocClocks[i].Freq; } ASSERT(0); return 0; } static unsigned int find_dcfclk_for_voltage(struct dpm_clocks *clock_table, unsigned int voltage) { int i; for (i = 0; i < PP_SMU_NUM_DCFCLK_DPM_LEVELS; i++) { if (clock_table->DcfClocks[i].Vol == voltage) return clock_table->DcfClocks[i].Freq; } ASSERT(0); return 0; } static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params, struct dpm_clocks *clock_table, struct integrated_info *bios_info) { int i, j = 0; j = -1; static_assert(PP_SMU_NUM_FCLK_DPM_LEVELS <= MAX_NUM_DPM_LVL, "number of reported FCLK DPM levels exceed maximum"); /* Find lowest DPM, FCLK is filled in reverse order*/ for (i = PP_SMU_NUM_FCLK_DPM_LEVELS - 1; i >= 0; i--) { if (clock_table->FClocks[i].Freq != 0 && clock_table->FClocks[i].Vol != 0) { j = i; break; } } if (j == -1) { /* clock table is all 0s, just use our own hardcode */ ASSERT(0); return; } bw_params->clk_table.num_entries = j + 1; for (i = 0; i < bw_params->clk_table.num_entries; i++, j--) { bw_params->clk_table.entries[i].fclk_mhz = clock_table->FClocks[j].Freq; bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemClocks[j].Freq; bw_params->clk_table.entries[i].voltage = clock_table->FClocks[j].Vol; bw_params->clk_table.entries[i].dcfclk_mhz = find_dcfclk_for_voltage(clock_table, clock_table->FClocks[j].Vol); bw_params->clk_table.entries[i].socclk_mhz = find_socclk_for_voltage(clock_table, bw_params->clk_table.entries[i].voltage); } bw_params->vram_type = bios_info->memory_type; bw_params->num_channels = bios_info->ma_channel_number; for (i = 0; i < WM_SET_COUNT; i++) { bw_params->wm_table.entries[i].wm_inst = i; if (i >= bw_params->clk_table.num_entries) { bw_params->wm_table.entries[i].valid = false; continue; } bw_params->wm_table.entries[i].wm_type = WM_TYPE_PSTATE_CHG; bw_params->wm_table.entries[i].valid = true; } if (bw_params->vram_type == LpDdr4MemType) { /* * WM set D will be re-purposed for memory retraining */ DC_FP_START(); dcn21_clk_mgr_set_bw_params_wm_table(bw_params); DC_FP_END(); } } void rn_clk_mgr_construct( struct dc_context *ctx, struct clk_mgr_internal *clk_mgr, struct pp_smu_funcs *pp_smu, struct dccg *dccg) { struct dc_debug_options *debug = &ctx->dc->debug; struct dpm_clocks clock_table = { 0 }; enum pp_smu_status status = 0; int is_green_sardine = 0; struct clk_log_info log_info = {0}; is_green_sardine = ASICREV_IS_GREEN_SARDINE(ctx->asic_id.hw_internal_rev); clk_mgr->base.ctx = ctx; clk_mgr->base.funcs = &dcn21_funcs; clk_mgr->pp_smu = pp_smu; clk_mgr->dccg = dccg; clk_mgr->dfs_bypass_disp_clk = 0; clk_mgr->dprefclk_ss_percentage = 0; clk_mgr->dprefclk_ss_divider = 1000; clk_mgr->ss_on_dprefclk = false; clk_mgr->dfs_ref_freq_khz = 48000; clk_mgr->smu_ver = rn_vbios_smu_get_smu_version(clk_mgr); clk_mgr->periodic_retraining_disabled = rn_vbios_smu_is_periodic_retraining_disabled(clk_mgr); /* SMU Version 55.51.0 and up no longer have an issue * that needs to limit minimum dispclk */ if (clk_mgr->smu_ver >= SMU_VER_55_51_0) debug->min_disp_clk_khz = 0; /* TODO: Check we get what we expect during bringup */ clk_mgr->base.dentist_vco_freq_khz = get_vco_frequency_from_reg(clk_mgr); /* in case we don't get a value from the register, use default */ if (clk_mgr->base.dentist_vco_freq_khz == 0) clk_mgr->base.dentist_vco_freq_khz = 3600000; if (ctx->dc_bios->integrated_info->memory_type == LpDdr4MemType) { if (clk_mgr->periodic_retraining_disabled) { rn_bw_params.wm_table = lpddr4_wm_table_with_disabled_ppt; } else { if (is_green_sardine) rn_bw_params.wm_table = lpddr4_wm_table_gs; else rn_bw_params.wm_table = lpddr4_wm_table_rn; } } else { if (is_green_sardine) rn_bw_params.wm_table = ddr4_wm_table_gs; else { if (ctx->dc->config.is_single_rank_dimm) rn_bw_params.wm_table = ddr4_1R_wm_table_rn; else rn_bw_params.wm_table = ddr4_wm_table_rn; } } /* Saved clocks configured at boot for debug purposes */ rn_dump_clk_registers(&clk_mgr->base.boot_snapshot, &clk_mgr->base, &log_info); clk_mgr->base.dprefclk_khz = 600000; dce_clock_read_ss_info(clk_mgr); clk_mgr->base.bw_params = &rn_bw_params; if (pp_smu && pp_smu->rn_funcs.get_dpm_clock_table) { status = pp_smu->rn_funcs.get_dpm_clock_table(&pp_smu->rn_funcs.pp_smu, &clock_table); if (status == PP_SMU_RESULT_OK && ctx->dc_bios->integrated_info) { rn_clk_mgr_helper_populate_bw_params (clk_mgr->base.bw_params, &clock_table, ctx->dc_bios->integrated_info); /* treat memory config as single channel if memory is asymmetrics. */ if (ctx->dc->config.is_asymmetric_memory) clk_mgr->base.bw_params->num_channels = 1; } } /* enable powerfeatures when displaycount goes to 0 */ if (clk_mgr->smu_ver >= 0x00371500) rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(clk_mgr, !debug->disable_48mhz_pwrdwn); }