file_name
int64
0
72.3k
vulnerable_line_numbers
stringlengths
1
1.06k
dataset_type
stringclasses
1 value
commit_hash
stringlengths
40
44
unique_id
int64
0
271k
project
stringclasses
10 values
target
int64
0
1
repo_url
stringclasses
10 values
date
stringlengths
25
25
code
stringlengths
0
20.4M
CVE
stringlengths
13
43
CWE
stringclasses
50 values
commit_link
stringlengths
73
97
severity
stringclasses
4 values
__index_level_0__
int64
0
124k
32,176
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
32,176
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2018 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "third_party/blink/renderer/core/css/properties/longhands/outline_style.h" #include "third_party/blink/renderer/core/css/css_identifier_value.h" #include "third_party/blink/renderer/core/style/computed_style.h" namespace blink { namespace CSSLonghand { const CSSValue* OutlineStyle::CSSValueFromComputedStyleInternal( const ComputedStyle& style, const SVGComputedStyle&, const LayoutObject*, Node* styled_node, bool allow_visited_style) const { if (style.OutlineStyleIsAuto()) return CSSIdentifierValue::Create(CSSValueAuto); return CSSIdentifierValue::Create(style.OutlineStyle()); } } // namespace CSSLonghand } // namespace blink
null
null
null
null
29,039
27,481
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
192,476
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
#ifndef _R819XU_PHYREG_H #define _R819XU_PHYREG_H #define RF_DATA 0x1d4 /* FW will write RF data in the register.*/ /* Register duplicate register due to connection: RF_Mode, TRxRN, NumOf L-STF * page 1 */ #define rPMAC_Reset 0x100 #define rPMAC_TxStart 0x104 #define rPMAC_TxLegacySIG 0x108 #define rPMAC_TxHTSIG1 0x10c #define rPMAC_TxHTSIG2 0x110 #define rPMAC_PHYDebug 0x114 #define rPMAC_TxPacketNum 0x118 #define rPMAC_TxIdle 0x11c #define rPMAC_TxMACHeader0 0x120 #define rPMAC_TxMACHeader1 0x124 #define rPMAC_TxMACHeader2 0x128 #define rPMAC_TxMACHeader3 0x12c #define rPMAC_TxMACHeader4 0x130 #define rPMAC_TxMACHeader5 0x134 #define rPMAC_TxDataType 0x138 #define rPMAC_TxRandomSeed 0x13c #define rPMAC_CCKPLCPPreamble 0x140 #define rPMAC_CCKPLCPHeader 0x144 #define rPMAC_CCKCRC16 0x148 #define rPMAC_OFDMRxCRC32OK 0x170 #define rPMAC_OFDMRxCRC32Er 0x174 #define rPMAC_OFDMRxParityEr 0x178 #define rPMAC_OFDMRxCRC8Er 0x17c #define rPMAC_CCKCRxRC16Er 0x180 #define rPMAC_CCKCRxRC32Er 0x184 #define rPMAC_CCKCRxRC32OK 0x188 #define rPMAC_TxStatus 0x18c /* page8 */ #define rFPGA0_RFMOD 0x800 /* RF mode & CCK TxSC */ #define rFPGA0_TxInfo 0x804 #define rFPGA0_PSDFunction 0x808 #define rFPGA0_TxGainStage 0x80c #define rFPGA0_RFTiming1 0x810 #define rFPGA0_RFTiming2 0x814 /* #define rFPGA0_XC_RFTiming 0x818 * #define rFPGA0_XD_RFTiming 0x81c */ #define rFPGA0_XA_HSSIParameter1 0x820 #define rFPGA0_XA_HSSIParameter2 0x824 #define rFPGA0_XB_HSSIParameter1 0x828 #define rFPGA0_XB_HSSIParameter2 0x82c #define rFPGA0_XC_HSSIParameter1 0x830 #define rFPGA0_XC_HSSIParameter2 0x834 #define rFPGA0_XD_HSSIParameter1 0x838 #define rFPGA0_XD_HSSIParameter2 0x83c #define rFPGA0_XA_LSSIParameter 0x840 #define rFPGA0_XB_LSSIParameter 0x844 #define rFPGA0_XC_LSSIParameter 0x848 #define rFPGA0_XD_LSSIParameter 0x84c #define rFPGA0_RFWakeUpParameter 0x850 #define rFPGA0_RFSleepUpParameter 0x854 #define rFPGA0_XAB_SwitchControl 0x858 #define rFPGA0_XCD_SwitchControl 0x85c #define rFPGA0_XA_RFInterfaceOE 0x860 #define rFPGA0_XB_RFInterfaceOE 0x864 #define rFPGA0_XC_RFInterfaceOE 0x868 #define rFPGA0_XD_RFInterfaceOE 0x86c #define rFPGA0_XAB_RFInterfaceSW 0x870 #define rFPGA0_XCD_RFInterfaceSW 0x874 #define rFPGA0_XAB_RFParameter 0x878 #define rFPGA0_XCD_RFParameter 0x87c #define rFPGA0_AnalogParameter1 0x880 #define rFPGA0_AnalogParameter2 0x884 #define rFPGA0_AnalogParameter3 0x888 #define rFPGA0_AnalogParameter4 0x88c #define rFPGA0_XA_LSSIReadBack 0x8a0 #define rFPGA0_XB_LSSIReadBack 0x8a4 #define rFPGA0_XC_LSSIReadBack 0x8a8 #define rFPGA0_XD_LSSIReadBack 0x8ac #define rFPGA0_PSDReport 0x8b4 #define rFPGA0_XAB_RFInterfaceRB 0x8e0 #define rFPGA0_XCD_RFInterfaceRB 0x8e4 /* page 9 */ #define rFPGA1_RFMOD 0x900 /* RF mode & OFDM TxSC */ #define rFPGA1_TxBlock 0x904 #define rFPGA1_DebugSelect 0x908 #define rFPGA1_TxInfo 0x90c /* page a */ #define rCCK0_System 0xa00 #define rCCK0_AFESetting 0xa04 #define rCCK0_CCA 0xa08 #define rCCK0_RxAGC1 0xa0c /* AGC default value, saturation level */ #define rCCK0_RxAGC2 0xa10 /* AGC & DAGC */ #define rCCK0_RxHP 0xa14 #define rCCK0_DSPParameter1 0xa18 /* Timing recovery & Channel estimation threshold */ #define rCCK0_DSPParameter2 0xa1c /* SQ threshold */ #define rCCK0_TxFilter1 0xa20 #define rCCK0_TxFilter2 0xa24 #define rCCK0_DebugPort 0xa28 /* debug port and Tx filter3 */ #define rCCK0_FalseAlarmReport 0xa2c /* 0xa2d */ #define rCCK0_TRSSIReport 0xa50 #define rCCK0_RxReport 0xa54 /* 0xa57 */ #define rCCK0_FACounterLower 0xa5c /* 0xa5b */ #define rCCK0_FACounterUpper 0xa58 /* 0xa5c */ /* page c */ #define rOFDM0_LSTF 0xc00 #define rOFDM0_TRxPathEnable 0xc04 #define rOFDM0_TRMuxPar 0xc08 #define rOFDM0_TRSWIsolation 0xc0c #define rOFDM0_XARxAFE 0xc10 /* RxIQ DC offset, Rx digital filter, DC notch filter */ #define rOFDM0_XARxIQImbalance 0xc14 /* RxIQ imblance matrix */ #define rOFDM0_XBRxAFE 0xc18 #define rOFDM0_XBRxIQImbalance 0xc1c #define rOFDM0_XCRxAFE 0xc20 #define rOFDM0_XCRxIQImbalance 0xc24 #define rOFDM0_XDRxAFE 0xc28 #define rOFDM0_XDRxIQImbalance 0xc2c #define rOFDM0_RxDetector1 0xc30 /* PD,BW & SBD */ #define rOFDM0_RxDetector2 0xc34 /* SBD & Fame Sync.*/ #define rOFDM0_RxDetector3 0xc38 /* Frame Sync.*/ #define rOFDM0_RxDetector4 0xc3c /* PD, SBD, Frame Sync & Short-GI */ #define rOFDM0_RxDSP 0xc40 /* Rx Sync Path */ #define rOFDM0_CFOandDAGC 0xc44 /* CFO & DAGC */ #define rOFDM0_CCADropThreshold 0xc48 /* CCA Drop threshold */ #define rOFDM0_ECCAThreshold 0xc4c /* energy CCA */ #define rOFDM0_XAAGCCore1 0xc50 #define rOFDM0_XAAGCCore2 0xc54 #define rOFDM0_XBAGCCore1 0xc58 #define rOFDM0_XBAGCCore2 0xc5c #define rOFDM0_XCAGCCore1 0xc60 #define rOFDM0_XCAGCCore2 0xc64 #define rOFDM0_XDAGCCore1 0xc68 #define rOFDM0_XDAGCCore2 0xc6c #define rOFDM0_AGCParameter1 0xc70 #define rOFDM0_AGCParameter2 0xc74 #define rOFDM0_AGCRSSITable 0xc78 #define rOFDM0_HTSTFAGC 0xc7c #define rOFDM0_XATxIQImbalance 0xc80 #define rOFDM0_XATxAFE 0xc84 #define rOFDM0_XBTxIQImbalance 0xc88 #define rOFDM0_XBTxAFE 0xc8c #define rOFDM0_XCTxIQImbalance 0xc90 #define rOFDM0_XCTxAFE 0xc94 #define rOFDM0_XDTxIQImbalance 0xc98 #define rOFDM0_XDTxAFE 0xc9c #define rOFDM0_RxHPParameter 0xce0 #define rOFDM0_TxPseudoNoiseWgt 0xce4 #define rOFDM0_FrameSync 0xcf0 #define rOFDM0_DFSReport 0xcf4 #define rOFDM0_TxCoeff1 0xca4 #define rOFDM0_TxCoeff2 0xca8 #define rOFDM0_TxCoeff3 0xcac #define rOFDM0_TxCoeff4 0xcb0 #define rOFDM0_TxCoeff5 0xcb4 #define rOFDM0_TxCoeff6 0xcb8 /* page d */ #define rOFDM1_LSTF 0xd00 #define rOFDM1_TRxPathEnable 0xd04 #define rOFDM1_CFO 0xd08 #define rOFDM1_CSI1 0xd10 #define rOFDM1_SBD 0xd14 #define rOFDM1_CSI2 0xd18 #define rOFDM1_CFOTracking 0xd2c #define rOFDM1_TRxMesaure1 0xd34 #define rOFDM1_IntfDet 0xd3c #define rOFDM1_PseudoNoiseStateAB 0xd50 #define rOFDM1_PseudoNoiseStateCD 0xd54 #define rOFDM1_RxPseudoNoiseWgt 0xd58 #define rOFDM_PHYCounter1 0xda0 /* cca, parity fail */ #define rOFDM_PHYCounter2 0xda4 /* rate illegal, crc8 fail */ #define rOFDM_PHYCounter3 0xda8 /* MCS not support */ #define rOFDM_ShortCFOAB 0xdac #define rOFDM_ShortCFOCD 0xdb0 #define rOFDM_LongCFOAB 0xdb4 #define rOFDM_LongCFOCD 0xdb8 #define rOFDM_TailCFOAB 0xdbc #define rOFDM_TailCFOCD 0xdc0 #define rOFDM_PWMeasure1 0xdc4 #define rOFDM_PWMeasure2 0xdc8 #define rOFDM_BWReport 0xdcc #define rOFDM_AGCReport 0xdd0 #define rOFDM_RxSNR 0xdd4 #define rOFDM_RxEVMCSI 0xdd8 #define rOFDM_SIGReport 0xddc /* page e */ #define rTxAGC_Rate18_06 0xe00 #define rTxAGC_Rate54_24 0xe04 #define rTxAGC_CCK_Mcs32 0xe08 #define rTxAGC_Mcs03_Mcs00 0xe10 #define rTxAGC_Mcs07_Mcs04 0xe14 #define rTxAGC_Mcs11_Mcs08 0xe18 #define rTxAGC_Mcs15_Mcs12 0xe1c /* RF * Zebra1 */ #define rZebra1_HSSIEnable 0x0 #define rZebra1_TRxEnable1 0x1 #define rZebra1_TRxEnable2 0x2 #define rZebra1_AGC 0x4 #define rZebra1_ChargePump 0x5 #define rZebra1_Channel 0x7 #define rZebra1_TxGain 0x8 #define rZebra1_TxLPF 0x9 #define rZebra1_RxLPF 0xb #define rZebra1_RxHPFCorner 0xc /* Zebra4 */ #define rGlobalCtrl 0 #define rRTL8256_TxLPF 19 #define rRTL8256_RxLPF 11 /* RTL8258 */ #define rRTL8258_TxLPF 0x11 #define rRTL8258_RxLPF 0x13 #define rRTL8258_RSSILPF 0xa /* Bit Mask * page-1 */ #define bBBResetB 0x100 #define bGlobalResetB 0x200 #define bOFDMTxStart 0x4 #define bCCKTxStart 0x8 #define bCRC32Debug 0x100 #define bPMACLoopback 0x10 #define bTxLSIG 0xffffff #define bOFDMTxRate 0xf #define bOFDMTxReserved 0x10 #define bOFDMTxLength 0x1ffe0 #define bOFDMTxParity 0x20000 #define bTxHTSIG1 0xffffff #define bTxHTMCSRate 0x7f #define bTxHTBW 0x80 #define bTxHTLength 0xffff00 #define bTxHTSIG2 0xffffff #define bTxHTSmoothing 0x1 #define bTxHTSounding 0x2 #define bTxHTReserved 0x4 #define bTxHTAggreation 0x8 #define bTxHTSTBC 0x30 #define bTxHTAdvanceCoding 0x40 #define bTxHTShortGI 0x80 #define bTxHTNumberHT_LTF 0x300 #define bTxHTCRC8 0x3fc00 #define bCounterReset 0x10000 #define bNumOfOFDMTx 0xffff #define bNumOfCCKTx 0xffff0000 #define bTxIdleInterval 0xffff #define bOFDMService 0xffff0000 #define bTxMACHeader 0xffffffff #define bTxDataInit 0xff #define bTxHTMode 0x100 #define bTxDataType 0x30000 #define bTxRandomSeed 0xffffffff #define bCCKTxPreamble 0x1 #define bCCKTxSFD 0xffff0000 #define bCCKTxSIG 0xff #define bCCKTxService 0xff00 #define bCCKLengthExt 0x8000 #define bCCKTxLength 0xffff0000 #define bCCKTxCRC16 0xffff #define bCCKTxStatus 0x1 #define bOFDMTxStatus 0x2 /* page-8 */ #define bRFMOD 0x1 #define bJapanMode 0x2 #define bCCKTxSC 0x30 #define bCCKEn 0x1000000 #define bOFDMEn 0x2000000 #define bOFDMRxADCPhase 0x10000 #define bOFDMTxDACPhase 0x40000 #define bXATxAGC 0x3f #define bXBTxAGC 0xf00 #define bXCTxAGC 0xf000 #define bXDTxAGC 0xf0000 #define bPAStart 0xf0000000 #define bTRStart 0x00f00000 #define bRFStart 0x0000f000 #define bBBStart 0x000000f0 #define bBBCCKStart 0x0000000f #define bPAEnd 0xf /* Reg0x814 */ #define bTREnd 0x0f000000 #define bRFEnd 0x000f0000 #define bCCAMask 0x000000f0 /* T2R */ #define bR2RCCAMask 0x00000f00 #define bHSSI_R2TDelay 0xf8000000 #define bHSSI_T2RDelay 0xf80000 #define bContTxHSSI 0x400 /* chane gain at continue Tx */ #define bIGFromCCK 0x200 #define bAGCAddress 0x3f #define bRxHPTx 0x7000 #define bRxHPT2R 0x38000 #define bRxHPCCKIni 0xc0000 #define bAGCTxCode 0xc00000 #define bAGCRxCode 0x300000 #define b3WireDataLength 0x800 #define b3WireAddressLength 0x400 #define b3WireRFPowerDown 0x1 /* #define bHWSISelect 0x8 */ #define b5GPAPEPolarity 0x40000000 #define b2GPAPEPolarity 0x80000000 #define bRFSW_TxDefaultAnt 0x3 #define bRFSW_TxOptionAnt 0x30 #define bRFSW_RxDefaultAnt 0x300 #define bRFSW_RxOptionAnt 0x3000 #define bRFSI_3WireData 0x1 #define bRFSI_3WireClock 0x2 #define bRFSI_3WireLoad 0x4 #define bRFSI_3WireRW 0x8 #define bRFSI_3Wire 0xf /* 3-wire total control */ #define bRFSI_RFENV 0x10 #define bRFSI_TRSW 0x20 #define bRFSI_TRSWB 0x40 #define bRFSI_ANTSW 0x100 #define bRFSI_ANTSWB 0x200 #define bRFSI_PAPE 0x400 #define bRFSI_PAPE5G 0x800 #define bBandSelect 0x1 #define bHTSIG2_GI 0x80 #define bHTSIG2_Smoothing 0x01 #define bHTSIG2_Sounding 0x02 #define bHTSIG2_Aggreaton 0x08 #define bHTSIG2_STBC 0x30 #define bHTSIG2_AdvCoding 0x40 #define bHTSIG2_NumOfHTLTF 0x300 #define bHTSIG2_CRC8 0x3fc #define bHTSIG1_MCS 0x7f #define bHTSIG1_BandWidth 0x80 #define bHTSIG1_HTLength 0xffff #define bLSIG_Rate 0xf #define bLSIG_Reserved 0x10 #define bLSIG_Length 0x1fffe #define bLSIG_Parity 0x20 #define bCCKRxPhase 0x4 #define bLSSIReadAddress 0x3f000000 /* LSSI "Read" Address */ #define bLSSIReadEdge 0x80000000 /* LSSI "Read" edge signal */ #define bLSSIReadBackData 0xfff #define bLSSIReadOKFlag 0x1000 #define bCCKSampleRate 0x8 /* 0: 44MHz, 1:88MHz */ #define bRegulator0Standby 0x1 #define bRegulatorPLLStandby 0x2 #define bRegulator1Standby 0x4 #define bPLLPowerUp 0x8 #define bDPLLPowerUp 0x10 #define bDA10PowerUp 0x20 #define bAD7PowerUp 0x200 #define bDA6PowerUp 0x2000 #define bXtalPowerUp 0x4000 #define b40MDClkPowerUP 0x8000 #define bDA6DebugMode 0x20000 #define bDA6Swing 0x380000 #define bADClkPhase 0x4000000 #define b80MClkDelay 0x18000000 #define bAFEWatchDogEnable 0x20000000 #define bXtalCap 0x0f000000 #define bIntDifClkEnable 0x400 #define bExtSigClkEnable 0x800 #define bBandgapMbiasPowerUp 0x10000 #define bAD11SHGain 0xc0000 #define bAD11InputRange 0x700000 #define bAD11OPCurrent 0x3800000 #define bIPathLoopback 0x4000000 #define bQPathLoopback 0x8000000 #define bAFELoopback 0x10000000 #define bDA10Swing 0x7e0 #define bDA10Reverse 0x800 #define bDAClkSource 0x1000 #define bAD7InputRange 0x6000 #define bAD7Gain 0x38000 #define bAD7OutputCMMode 0x40000 #define bAD7InputCMMode 0x380000 #define bAD7Current 0xc00000 #define bRegulatorAdjust 0x7000000 #define bAD11PowerUpAtTx 0x1 #define bDA10PSAtTx 0x10 #define bAD11PowerUpAtRx 0x100 #define bDA10PSAtRx 0x1000 #define bCCKRxAGCFormat 0x200 #define bPSDFFTSamplepPoint 0xc000 #define bPSDAverageNum 0x3000 #define bIQPathControl 0xc00 #define bPSDFreq 0x3ff #define bPSDAntennaPath 0x30 #define bPSDIQSwitch 0x40 #define bPSDRxTrigger 0x400000 #define bPSDTxTrigger 0x80000000 #define bPSDSineToneScale 0x7f000000 #define bPSDReport 0xffff /* page-9 */ #define bOFDMTxSC 0x30000000 #define bCCKTxOn 0x1 #define bOFDMTxOn 0x2 #define bDebugPage 0xfff /* reset debug page and also HWord, LWord */ #define bDebugItem 0xff /* reset debug page and LWord */ #define bAntL 0x10 #define bAntNonHT 0x100 #define bAntHT1 0x1000 #define bAntHT2 0x10000 #define bAntHT1S1 0x100000 #define bAntNonHTS1 0x1000000 /* page-a */ #define bCCKBBMode 0x3 #define bCCKTxPowerSaving 0x80 #define bCCKRxPowerSaving 0x40 #define bCCKSideBand 0x10 #define bCCKScramble 0x8 #define bCCKAntDiversity 0x8000 #define bCCKCarrierRecovery 0x4000 #define bCCKTxRate 0x3000 #define bCCKDCCancel 0x0800 #define bCCKISICancel 0x0400 #define bCCKMatchFilter 0x0200 #define bCCKEqualizer 0x0100 #define bCCKPreambleDetect 0x800000 #define bCCKFastFalseCCA 0x400000 #define bCCKChEstStart 0x300000 #define bCCKCCACount 0x080000 #define bCCKcs_lim 0x070000 #define bCCKBistMode 0x80000000 #define bCCKCCAMask 0x40000000 #define bCCKTxDACPhase 0x4 #define bCCKRxADCPhase 0x20000000 /* r_rx_clk */ #define bCCKr_cp_mode0 0x0100 #define bCCKTxDCOffset 0xf0 #define bCCKRxDCOffset 0xf #define bCCKCCAMode 0xc000 #define bCCKFalseCS_lim 0x3f00 #define bCCKCS_ratio 0xc00000 #define bCCKCorgBit_sel 0x300000 #define bCCKPD_lim 0x0f0000 #define bCCKNewCCA 0x80000000 #define bCCKRxHPofIG 0x8000 #define bCCKRxIG 0x7f00 #define bCCKLNAPolarity 0x800000 #define bCCKRx1stGain 0x7f0000 #define bCCKRFExtend 0x20000000 /* CCK Rx initial gain polarity */ #define bCCKRxAGCSatLevel 0x1f000000 #define bCCKRxAGCSatCount 0xe0 #define bCCKRxRFSettle 0x1f /* AGCsamp_dly */ #define bCCKFixedRxAGC 0x8000 /* #define bCCKRxAGCFormat 0x4000 */ /* remove to HSSI register 0x824 */ #define bCCKAntennaPolarity 0x2000 #define bCCKTxFilterType 0x0c00 #define bCCKRxAGCReportType 0x0300 #define bCCKRxDAGCEn 0x80000000 #define bCCKRxDAGCPeriod 0x20000000 #define bCCKRxDAGCSatLevel 0x1f000000 #define bCCKTimingRecovery 0x800000 #define bCCKTxC0 0x3f0000 #define bCCKTxC1 0x3f000000 #define bCCKTxC2 0x3f #define bCCKTxC3 0x3f00 #define bCCKTxC4 0x3f0000 #define bCCKTxC5 0x3f000000 #define bCCKTxC6 0x3f #define bCCKTxC7 0x3f00 #define bCCKDebugPort 0xff0000 #define bCCKDACDebug 0x0f000000 #define bCCKFalseAlarmEnable 0x8000 #define bCCKFalseAlarmRead 0x4000 #define bCCKTRSSI 0x7f #define bCCKRxAGCReport 0xfe #define bCCKRxReport_AntSel 0x80000000 #define bCCKRxReport_MFOff 0x40000000 #define bCCKRxRxReport_SQLoss 0x20000000 #define bCCKRxReport_Pktloss 0x10000000 #define bCCKRxReport_Lockedbit 0x08000000 #define bCCKRxReport_RateError 0x04000000 #define bCCKRxReport_RxRate 0x03000000 #define bCCKRxFACounterLower 0xff #define bCCKRxFACounterUpper 0xff000000 #define bCCKRxHPAGCStart 0xe000 #define bCCKRxHPAGCFinal 0x1c00 #define bCCKRxFalseAlarmEnable 0x8000 #define bCCKFACounterFreeze 0x4000 #define bCCKTxPathSel 0x10000000 #define bCCKDefaultRxPath 0xc000000 #define bCCKOptionRxPath 0x3000000 /* page c */ #define bNumOfSTF 0x3 #define bShift_L 0xc0 #define bGI_TH 0xc #define bRxPathA 0x1 #define bRxPathB 0x2 #define bRxPathC 0x4 #define bRxPathD 0x8 #define bTxPathA 0x1 #define bTxPathB 0x2 #define bTxPathC 0x4 #define bTxPathD 0x8 #define bTRSSIFreq 0x200 #define bADCBackoff 0x3000 #define bDFIRBackoff 0xc000 #define bTRSSILatchPhase 0x10000 #define bRxIDCOffset 0xff #define bRxQDCOffset 0xff00 #define bRxDFIRMode 0x1800000 #define bRxDCNFType 0xe000000 #define bRXIQImb_A 0x3ff #define bRXIQImb_B 0xfc00 #define bRXIQImb_C 0x3f0000 #define bRXIQImb_D 0xffc00000 #define bDC_dc_Notch 0x60000 #define bRxNBINotch 0x1f000000 #define bPD_TH 0xf #define bPD_TH_Opt2 0xc000 #define bPWED_TH 0x700 #define bIfMF_Win_L 0x800 #define bPD_Option 0x1000 #define bMF_Win_L 0xe000 #define bBW_Search_L 0x30000 #define bwin_enh_L 0xc0000 #define bBW_TH 0x700000 #define bED_TH2 0x3800000 #define bBW_option 0x4000000 #define bRatio_TH 0x18000000 #define bWindow_L 0xe0000000 #define bSBD_Option 0x1 #define bFrame_TH 0x1c #define bFS_Option 0x60 #define bDC_Slope_check 0x80 #define bFGuard_Counter_DC_L 0xe00 #define bFrame_Weight_Short 0x7000 #define bSub_Tune 0xe00000 #define bFrame_DC_Length 0xe000000 #define bSBD_start_offset 0x30000000 #define bFrame_TH_2 0x7 #define bFrame_GI2_TH 0x38 #define bGI2_Sync_en 0x40 #define bSarch_Short_Early 0x300 #define bSarch_Short_Late 0xc00 #define bSarch_GI2_Late 0x70000 #define bCFOAntSum 0x1 #define bCFOAcc 0x2 #define bCFOStartOffset 0xc #define bCFOLookBack 0x70 #define bCFOSumWeight 0x80 #define bDAGCEnable 0x10000 #define bTXIQImb_A 0x3ff #define bTXIQImb_B 0xfc00 #define bTXIQImb_C 0x3f0000 #define bTXIQImb_D 0xffc00000 #define bTxIDCOffset 0xff #define bTxQDCOffset 0xff00 #define bTxDFIRMode 0x10000 #define bTxPesudoNoiseOn 0x4000000 #define bTxPesudoNoise_A 0xff #define bTxPesudoNoise_B 0xff00 #define bTxPesudoNoise_C 0xff0000 #define bTxPesudoNoise_D 0xff000000 #define bCCADropOption 0x20000 #define bCCADropThres 0xfff00000 #define bEDCCA_H 0xf #define bEDCCA_L 0xf0 #define bLambda_ED 0x300 #define bRxInitialGain 0x7f #define bRxAntDivEn 0x80 #define bRxAGCAddressForLNA 0x7f00 #define bRxHighPowerFlow 0x8000 #define bRxAGCFreezeThres 0xc0000 #define bRxFreezeStep_AGC1 0x300000 #define bRxFreezeStep_AGC2 0xc00000 #define bRxFreezeStep_AGC3 0x3000000 #define bRxFreezeStep_AGC0 0xc000000 #define bRxRssi_Cmp_En 0x10000000 #define bRxQuickAGCEn 0x20000000 #define bRxAGCFreezeThresMode 0x40000000 #define bRxOverFlowCheckType 0x80000000 #define bRxAGCShift 0x7f #define bTRSW_Tri_Only 0x80 #define bPowerThres 0x300 #define bRxAGCEn 0x1 #define bRxAGCTogetherEn 0x2 #define bRxAGCMin 0x4 #define bRxHP_Ini 0x7 #define bRxHP_TRLNA 0x70 #define bRxHP_RSSI 0x700 #define bRxHP_BBP1 0x7000 #define bRxHP_BBP2 0x70000 #define bRxHP_BBP3 0x700000 #define bRSSI_H 0x7f0000 /* the threshold for high power */ #define bRSSI_Gen 0x7f000000 /* the threshold for ant diversity */ #define bRxSettle_TRSW 0x7 #define bRxSettle_LNA 0x38 #define bRxSettle_RSSI 0x1c0 #define bRxSettle_BBP 0xe00 #define bRxSettle_RxHP 0x7000 #define bRxSettle_AntSW_RSSI 0x38000 #define bRxSettle_AntSW 0xc0000 #define bRxProcessTime_DAGC 0x300000 #define bRxSettle_HSSI 0x400000 #define bRxProcessTime_BBPPW 0x800000 #define bRxAntennaPowerShift 0x3000000 #define bRSSITableSelect 0xc000000 #define bRxHP_Final 0x7000000 #define bRxHTSettle_BBP 0x7 #define bRxHTSettle_HSSI 0x8 #define bRxHTSettle_RxHP 0x70 #define bRxHTSettle_BBPPW 0x80 #define bRxHTSettle_Idle 0x300 #define bRxHTSettle_Reserved 0x1c00 #define bRxHTRxHPEn 0x8000 #define bRxHTAGCFreezeThres 0x30000 #define bRxHTAGCTogetherEn 0x40000 #define bRxHTAGCMin 0x80000 #define bRxHTAGCEn 0x100000 #define bRxHTDAGCEn 0x200000 #define bRxHTRxHP_BBP 0x1c00000 #define bRxHTRxHP_Final 0xe0000000 #define bRxPWRatioTH 0x3 #define bRxPWRatioEn 0x4 #define bRxMFHold 0x3800 #define bRxPD_Delay_TH1 0x38 #define bRxPD_Delay_TH2 0x1c0 #define bRxPD_DC_COUNT_MAX 0x600 /* #define bRxMF_Hold 0x3800 */ #define bRxPD_Delay_TH 0x8000 #define bRxProcess_Delay 0xf0000 #define bRxSearchrange_GI2_Early 0x700000 #define bRxFrame_Guard_Counter_L 0x3800000 #define bRxSGI_Guard_L 0xc000000 #define bRxSGI_Search_L 0x30000000 #define bRxSGI_TH 0xc0000000 #define bDFSCnt0 0xff #define bDFSCnt1 0xff00 #define bDFSFlag 0xf0000 #define bMFWeightSum 0x300000 #define bMinIdxTH 0x7f000000 #define bDAFormat 0x40000 #define bTxChEmuEnable 0x01000000 #define bTRSWIsolation_A 0x7f #define bTRSWIsolation_B 0x7f00 #define bTRSWIsolation_C 0x7f0000 #define bTRSWIsolation_D 0x7f000000 #define bExtLNAGain 0x7c00 /* page d */ #define bSTBCEn 0x4 #define bAntennaMapping 0x10 #define bNss 0x20 #define bCFOAntSumD 0x200 #define bPHYCounterReset 0x8000000 #define bCFOReportGet 0x4000000 #define bOFDMContinueTx 0x10000000 #define bOFDMSingleCarrier 0x20000000 #define bOFDMSingleTone 0x40000000 /* #define bRxPath1 0x01 * #define bRxPath2 0x02 * #define bRxPath3 0x04 * #define bRxPath4 0x08 * #define bTxPath1 0x10 * #define bTxPath2 0x20 */ #define bHTDetect 0x100 #define bCFOEn 0x10000 #define bCFOValue 0xfff00000 #define bSigTone_Re 0x3f #define bSigTone_Im 0x7f00 #define bCounter_CCA 0xffff #define bCounter_ParityFail 0xffff0000 #define bCounter_RateIllegal 0xffff #define bCounter_CRC8Fail 0xffff0000 #define bCounter_MCSNoSupport 0xffff #define bCounter_FastSync 0xffff #define bShortCFO 0xfff #define bShortCFOTLength 12 /* total */ #define bShortCFOFLength 11 /* fraction */ #define bLongCFO 0x7ff #define bLongCFOTLength 11 #define bLongCFOFLength 11 #define bTailCFO 0x1fff #define bTailCFOTLength 13 #define bTailCFOFLength 12 #define bmax_en_pwdB 0xffff #define bCC_power_dB 0xffff0000 #define bnoise_pwdB 0xffff #define bPowerMeasTLength 10 #define bPowerMeasFLength 3 #define bRx_HT_BW 0x1 #define bRxSC 0x6 #define bRx_HT 0x8 #define bNB_intf_det_on 0x1 #define bIntf_win_len_cfg 0x30 #define bNB_Intf_TH_cfg 0x1c0 #define bRFGain 0x3f #define bTableSel 0x40 #define bTRSW 0x80 #define bRxSNR_A 0xff #define bRxSNR_B 0xff00 #define bRxSNR_C 0xff0000 #define bRxSNR_D 0xff000000 #define bSNREVMTLength 8 #define bSNREVMFLength 1 #define bCSI1st 0xff #define bCSI2nd 0xff00 #define bRxEVM1st 0xff0000 #define bRxEVM2nd 0xff000000 #define bSIGEVM 0xff #define bPWDB 0xff00 #define bSGIEN 0x10000 #define bSFactorQAM1 0xf #define bSFactorQAM2 0xf0 #define bSFactorQAM3 0xf00 #define bSFactorQAM4 0xf000 #define bSFactorQAM5 0xf0000 #define bSFactorQAM6 0xf0000 #define bSFactorQAM7 0xf00000 #define bSFactorQAM8 0xf000000 #define bSFactorQAM9 0xf0000000 #define bCSIScheme 0x100000 #define bNoiseLvlTopSet 0x3 #define bChSmooth 0x4 #define bChSmoothCfg1 0x38 #define bChSmoothCfg2 0x1c0 #define bChSmoothCfg3 0xe00 #define bChSmoothCfg4 0x7000 #define bMRCMode 0x800000 #define bTHEVMCfg 0x7000000 #define bLoopFitType 0x1 #define bUpdCFO 0x40 #define bUpdCFOOffData 0x80 #define bAdvUpdCFO 0x100 #define bAdvTimeCtrl 0x800 #define bUpdClko 0x1000 #define bFC 0x6000 #define bTrackingMode 0x8000 #define bPhCmpEnable 0x10000 #define bUpdClkoLTF 0x20000 #define bComChCFO 0x40000 #define bCSIEstiMode 0x80000 #define bAdvUpdEqz 0x100000 #define bUChCfg 0x7000000 #define bUpdEqz 0x8000000 /* page e */ #define bTxAGCRate18_06 0x7f7f7f7f #define bTxAGCRate54_24 0x7f7f7f7f #define bTxAGCRateMCS32 0x7f #define bTxAGCRateCCK 0x7f00 #define bTxAGCRateMCS3_MCS0 0x7f7f7f7f #define bTxAGCRateMCS7_MCS4 0x7f7f7f7f #define bTxAGCRateMCS11_MCS8 0x7f7f7f7f #define bTxAGCRateMCS15_MCS12 0x7f7f7f7f /* Rx Pseduo noise */ #define bRxPesudoNoiseOn 0x20000000 #define bRxPesudoNoise_A 0xff #define bRxPesudoNoise_B 0xff00 #define bRxPesudoNoise_C 0xff0000 #define bRxPesudoNoise_D 0xff000000 #define bPesudoNoiseState_A 0xffff #define bPesudoNoiseState_B 0xffff0000 #define bPesudoNoiseState_C 0xffff #define bPesudoNoiseState_D 0xffff0000 /* RF * Zebra1 */ #define bZebra1_HSSIEnable 0x8 #define bZebra1_TRxControl 0xc00 #define bZebra1_TRxGainSetting 0x07f #define bZebra1_RxCorner 0xc00 #define bZebra1_TxChargePump 0x38 #define bZebra1_RxChargePump 0x7 #define bZebra1_ChannelNum 0xf80 #define bZebra1_TxLPFBW 0x400 #define bZebra1_RxLPFBW 0x600 /* Zebra4 */ #define bRTL8256RegModeCtrl1 0x100 #define bRTL8256RegModeCtrl0 0x40 #define bRTL8256_TxLPFBW 0x18 #define bRTL8256_RxLPFBW 0x600 /* RTL8258 */ #define bRTL8258_TxLPFBW 0xc #define bRTL8258_RxLPFBW 0xc00 #define bRTL8258_RSSILPFBW 0xc0 /* byte endable for sb_write */ #define bByte0 0x1 #define bByte1 0x2 #define bByte2 0x4 #define bByte3 0x8 #define bWord0 0x3 #define bWord1 0xc #define bDWord 0xf /* for PutRegsetting & GetRegSetting BitMask */ #define bMaskByte0 0xff #define bMaskByte1 0xff00 #define bMaskByte2 0xff0000 #define bMaskByte3 0xff000000 #define bMaskHWord 0xffff0000 #define bMaskLWord 0x0000ffff #define bMaskDWord 0xffffffff /* for PutRFRegsetting & GetRFRegSetting BitMask */ #define bMask12Bits 0xfff #define bEnable 0x1 #define bDisable 0x0 #define LeftAntenna 0x0 #define RightAntenna 0x1 #define tCheckTxStatus 500 /* 500ms */ #define tUpdateRxCounter 100 /* 100ms */ #define rateCCK 0 #define rateOFDM 1 #define rateHT 2 /* define Register-End */ #define bPMAC_End 0x1ff #define bFPGAPHY0_End 0x8ff #define bFPGAPHY1_End 0x9ff #define bCCKPHY0_End 0xaff #define bOFDMPHY0_End 0xcff #define bOFDMPHY1_End 0xdff /* define max debug item in each debug page * #define bMaxItem_FPGA_PHY0 0x9 * #define bMaxItem_FPGA_PHY1 0x3 * #define bMaxItem_PHY_11B 0x16 * #define bMaxItem_OFDM_PHY0 0x29 * #define bMaxItem_OFDM_PHY1 0x0 */ #define bPMACControl 0x0 #define bWMACControl 0x1 #define bWNICControl 0x2 #define PathA 0x0 #define PathB 0x1 #define PathC 0x2 #define PathD 0x3 #define rRTL8256RxMixerPole 0xb #define bZebraRxMixerPole 0x6 #define rRTL8256TxBBOPBias 0x9 #define bRTL8256TxBBOPBias 0x400 #define rRTL8256TxBBBW 19 #define bRTL8256TxBBBW 0x18 #endif /* __INC_HAL8190PCIPHYREG_H */
null
null
null
null
100,823
13,575
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
178,570
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
#ifndef __ARCH_M68K_CMPXCHG__ #define __ARCH_M68K_CMPXCHG__ #include <linux/irqflags.h> struct __xchg_dummy { unsigned long a[100]; }; #define __xg(x) ((volatile struct __xchg_dummy *)(x)) extern unsigned long __invalid_xchg_size(unsigned long, volatile void *, int); #ifndef CONFIG_RMW_INSNS static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) { unsigned long flags, tmp; local_irq_save(flags); switch (size) { case 1: tmp = *(u8 *)ptr; *(u8 *)ptr = x; x = tmp; break; case 2: tmp = *(u16 *)ptr; *(u16 *)ptr = x; x = tmp; break; case 4: tmp = *(u32 *)ptr; *(u32 *)ptr = x; x = tmp; break; default: tmp = __invalid_xchg_size(x, ptr, size); break; } local_irq_restore(flags); return x; } #else static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) { switch (size) { case 1: __asm__ __volatile__ ("moveb %2,%0\n\t" "1:\n\t" "casb %0,%1,%2\n\t" "jne 1b" : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory"); break; case 2: __asm__ __volatile__ ("movew %2,%0\n\t" "1:\n\t" "casw %0,%1,%2\n\t" "jne 1b" : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory"); break; case 4: __asm__ __volatile__ ("movel %2,%0\n\t" "1:\n\t" "casl %0,%1,%2\n\t" "jne 1b" : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory"); break; default: x = __invalid_xchg_size(x, ptr, size); break; } return x; } #endif #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) #include <asm-generic/cmpxchg-local.h> #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) extern unsigned long __invalid_cmpxchg_size(volatile void *, unsigned long, unsigned long, int); /* * Atomic compare and exchange. Compare OLD with MEM, if identical, * store NEW in MEM. Return the initial value in MEM. Success is * indicated by comparing RETURN with OLD. */ #ifdef CONFIG_RMW_INSNS static inline unsigned long __cmpxchg(volatile void *p, unsigned long old, unsigned long new, int size) { switch (size) { case 1: __asm__ __volatile__ ("casb %0,%2,%1" : "=d" (old), "=m" (*(char *)p) : "d" (new), "0" (old), "m" (*(char *)p)); break; case 2: __asm__ __volatile__ ("casw %0,%2,%1" : "=d" (old), "=m" (*(short *)p) : "d" (new), "0" (old), "m" (*(short *)p)); break; case 4: __asm__ __volatile__ ("casl %0,%2,%1" : "=d" (old), "=m" (*(int *)p) : "d" (new), "0" (old), "m" (*(int *)p)); break; default: old = __invalid_cmpxchg_size(p, old, new, size); break; } return old; } #define cmpxchg(ptr, o, n) \ ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ (unsigned long)(n), sizeof(*(ptr)))) #define cmpxchg_local(ptr, o, n) \ ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ (unsigned long)(n), sizeof(*(ptr)))) #define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n)) #else /* * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make * them available. */ #define cmpxchg_local(ptr, o, n) \ ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ (unsigned long)(n), sizeof(*(ptr)))) #include <asm-generic/cmpxchg.h> #endif #endif /* __ARCH_M68K_CMPXCHG__ */
null
null
null
null
86,917
36,070
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
201,065
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Driver for Epson RTC-9701JE * * Copyright (C) 2008 Magnus Damm * * Based on rtc-max6902.c * * Copyright (C) 2006 8D Technologies inc. * Copyright (C) 2004 Compulab Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/device.h> #include <linux/init.h> #include <linux/rtc.h> #include <linux/spi/spi.h> #include <linux/bcd.h> #include <linux/delay.h> #include <linux/bitops.h> #define RSECCNT 0x00 /* Second Counter */ #define RMINCNT 0x01 /* Minute Counter */ #define RHRCNT 0x02 /* Hour Counter */ #define RWKCNT 0x03 /* Week Counter */ #define RDAYCNT 0x04 /* Day Counter */ #define RMONCNT 0x05 /* Month Counter */ #define RYRCNT 0x06 /* Year Counter */ #define R100CNT 0x07 /* Y100 Counter */ #define RMINAR 0x08 /* Minute Alarm */ #define RHRAR 0x09 /* Hour Alarm */ #define RWKAR 0x0a /* Week/Day Alarm */ #define RTIMCNT 0x0c /* Interval Timer */ #define REXT 0x0d /* Extension Register */ #define RFLAG 0x0e /* RTC Flag Register */ #define RCR 0x0f /* RTC Control Register */ static int write_reg(struct device *dev, int address, unsigned char data) { struct spi_device *spi = to_spi_device(dev); unsigned char buf[2]; buf[0] = address & 0x7f; buf[1] = data; return spi_write(spi, buf, ARRAY_SIZE(buf)); } static int read_regs(struct device *dev, unsigned char *regs, int no_regs) { struct spi_device *spi = to_spi_device(dev); u8 txbuf[1], rxbuf[1]; int k, ret; ret = 0; for (k = 0; ret == 0 && k < no_regs; k++) { txbuf[0] = 0x80 | regs[k]; ret = spi_write_then_read(spi, txbuf, 1, rxbuf, 1); regs[k] = rxbuf[0]; } return ret; } static int r9701_get_datetime(struct device *dev, struct rtc_time *dt) { int ret; unsigned char buf[] = { RSECCNT, RMINCNT, RHRCNT, RDAYCNT, RMONCNT, RYRCNT }; ret = read_regs(dev, buf, ARRAY_SIZE(buf)); if (ret) return ret; memset(dt, 0, sizeof(*dt)); dt->tm_sec = bcd2bin(buf[0]); /* RSECCNT */ dt->tm_min = bcd2bin(buf[1]); /* RMINCNT */ dt->tm_hour = bcd2bin(buf[2]); /* RHRCNT */ dt->tm_mday = bcd2bin(buf[3]); /* RDAYCNT */ dt->tm_mon = bcd2bin(buf[4]) - 1; /* RMONCNT */ dt->tm_year = bcd2bin(buf[5]) + 100; /* RYRCNT */ /* the rtc device may contain illegal values on power up * according to the data sheet. make sure they are valid. */ return rtc_valid_tm(dt); } static int r9701_set_datetime(struct device *dev, struct rtc_time *dt) { int ret, year; year = dt->tm_year + 1900; if (year >= 2100 || year < 2000) return -EINVAL; ret = write_reg(dev, RHRCNT, bin2bcd(dt->tm_hour)); ret = ret ? ret : write_reg(dev, RMINCNT, bin2bcd(dt->tm_min)); ret = ret ? ret : write_reg(dev, RSECCNT, bin2bcd(dt->tm_sec)); ret = ret ? ret : write_reg(dev, RDAYCNT, bin2bcd(dt->tm_mday)); ret = ret ? ret : write_reg(dev, RMONCNT, bin2bcd(dt->tm_mon + 1)); ret = ret ? ret : write_reg(dev, RYRCNT, bin2bcd(dt->tm_year - 100)); ret = ret ? ret : write_reg(dev, RWKCNT, 1 << dt->tm_wday); return ret; } static const struct rtc_class_ops r9701_rtc_ops = { .read_time = r9701_get_datetime, .set_time = r9701_set_datetime, }; static int r9701_probe(struct spi_device *spi) { struct rtc_device *rtc; struct rtc_time dt; unsigned char tmp; int res; tmp = R100CNT; res = read_regs(&spi->dev, &tmp, 1); if (res || tmp != 0x20) { dev_err(&spi->dev, "cannot read RTC register\n"); return -ENODEV; } /* * The device seems to be present. Now check if the registers * contain invalid values. If so, try to write a default date: * 2000/1/1 00:00:00 */ if (r9701_get_datetime(&spi->dev, &dt)) { dev_info(&spi->dev, "trying to repair invalid date/time\n"); dt.tm_sec = 0; dt.tm_min = 0; dt.tm_hour = 0; dt.tm_mday = 1; dt.tm_mon = 0; dt.tm_year = 100; if (r9701_set_datetime(&spi->dev, &dt) || r9701_get_datetime(&spi->dev, &dt)) { dev_err(&spi->dev, "cannot repair RTC register\n"); return -ENODEV; } } rtc = devm_rtc_device_register(&spi->dev, "r9701", &r9701_rtc_ops, THIS_MODULE); if (IS_ERR(rtc)) return PTR_ERR(rtc); spi_set_drvdata(spi, rtc); return 0; } static int r9701_remove(struct spi_device *spi) { return 0; } static struct spi_driver r9701_driver = { .driver = { .name = "rtc-r9701", }, .probe = r9701_probe, .remove = r9701_remove, }; module_spi_driver(r9701_driver); MODULE_DESCRIPTION("r9701 spi RTC driver"); MODULE_AUTHOR("Magnus Damm <[email protected]>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("spi:rtc-r9701");
null
null
null
null
109,412
3,509
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
168,504
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * This file is part of UBIFS. * * Copyright (C) 2006-2008 Nokia Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Authors: Adrian Hunter * Artem Bityutskiy (Битюцкий Артём) */ /* * This file implements the scan which is a general-purpose function for * determining what nodes are in an eraseblock. The scan is used to replay the * journal, to do garbage collection. for the TNC in-the-gaps method, and by * debugging functions. */ #include "ubifs.h" /** * scan_padding_bytes - scan for padding bytes. * @buf: buffer to scan * @len: length of buffer * * This function returns the number of padding bytes on success and * %SCANNED_GARBAGE on failure. */ static int scan_padding_bytes(void *buf, int len) { int pad_len = 0, max_pad_len = min_t(int, UBIFS_PAD_NODE_SZ, len); uint8_t *p = buf; dbg_scan("not a node"); while (pad_len < max_pad_len && *p++ == UBIFS_PADDING_BYTE) pad_len += 1; if (!pad_len || (pad_len & 7)) return SCANNED_GARBAGE; dbg_scan("%d padding bytes", pad_len); return pad_len; } /** * ubifs_scan_a_node - scan for a node or padding. * @c: UBIFS file-system description object * @buf: buffer to scan * @len: length of buffer * @lnum: logical eraseblock number * @offs: offset within the logical eraseblock * @quiet: print no messages * * This function returns a scanning code to indicate what was scanned. */ int ubifs_scan_a_node(const struct ubifs_info *c, void *buf, int len, int lnum, int offs, int quiet) { struct ubifs_ch *ch = buf; uint32_t magic; magic = le32_to_cpu(ch->magic); if (magic == 0xFFFFFFFF) { dbg_scan("hit empty space at LEB %d:%d", lnum, offs); return SCANNED_EMPTY_SPACE; } if (magic != UBIFS_NODE_MAGIC) return scan_padding_bytes(buf, len); if (len < UBIFS_CH_SZ) return SCANNED_GARBAGE; dbg_scan("scanning %s at LEB %d:%d", dbg_ntype(ch->node_type), lnum, offs); if (ubifs_check_node(c, buf, lnum, offs, quiet, 1)) return SCANNED_A_CORRUPT_NODE; if (ch->node_type == UBIFS_PAD_NODE) { struct ubifs_pad_node *pad = buf; int pad_len = le32_to_cpu(pad->pad_len); int node_len = le32_to_cpu(ch->len); /* Validate the padding node */ if (pad_len < 0 || offs + node_len + pad_len > c->leb_size) { if (!quiet) { ubifs_err(c, "bad pad node at LEB %d:%d", lnum, offs); ubifs_dump_node(c, pad); } return SCANNED_A_BAD_PAD_NODE; } /* Make the node pads to 8-byte boundary */ if ((node_len + pad_len) & 7) { if (!quiet) ubifs_err(c, "bad padding length %d - %d", offs, offs + node_len + pad_len); return SCANNED_A_BAD_PAD_NODE; } dbg_scan("%d bytes padded at LEB %d:%d, offset now %d", pad_len, lnum, offs, ALIGN(offs + node_len + pad_len, 8)); return node_len + pad_len; } return SCANNED_A_NODE; } /** * ubifs_start_scan - create LEB scanning information at start of scan. * @c: UBIFS file-system description object * @lnum: logical eraseblock number * @offs: offset to start at (usually zero) * @sbuf: scan buffer (must be c->leb_size) * * This function returns the scanned information on success and a negative error * code on failure. */ struct ubifs_scan_leb *ubifs_start_scan(const struct ubifs_info *c, int lnum, int offs, void *sbuf) { struct ubifs_scan_leb *sleb; int err; dbg_scan("scan LEB %d:%d", lnum, offs); sleb = kzalloc(sizeof(struct ubifs_scan_leb), GFP_NOFS); if (!sleb) return ERR_PTR(-ENOMEM); sleb->lnum = lnum; INIT_LIST_HEAD(&sleb->nodes); sleb->buf = sbuf; err = ubifs_leb_read(c, lnum, sbuf + offs, offs, c->leb_size - offs, 0); if (err && err != -EBADMSG) { ubifs_err(c, "cannot read %d bytes from LEB %d:%d, error %d", c->leb_size - offs, lnum, offs, err); kfree(sleb); return ERR_PTR(err); } /* * Note, we ignore integrity errors (EBASMSG) because all the nodes are * protected by CRC checksums. */ return sleb; } /** * ubifs_end_scan - update LEB scanning information at end of scan. * @c: UBIFS file-system description object * @sleb: scanning information * @lnum: logical eraseblock number * @offs: offset to start at (usually zero) */ void ubifs_end_scan(const struct ubifs_info *c, struct ubifs_scan_leb *sleb, int lnum, int offs) { lnum = lnum; dbg_scan("stop scanning LEB %d at offset %d", lnum, offs); ubifs_assert(offs % c->min_io_size == 0); sleb->endpt = ALIGN(offs, c->min_io_size); } /** * ubifs_add_snod - add a scanned node to LEB scanning information. * @c: UBIFS file-system description object * @sleb: scanning information * @buf: buffer containing node * @offs: offset of node on flash * * This function returns %0 on success and a negative error code on failure. */ int ubifs_add_snod(const struct ubifs_info *c, struct ubifs_scan_leb *sleb, void *buf, int offs) { struct ubifs_ch *ch = buf; struct ubifs_ino_node *ino = buf; struct ubifs_scan_node *snod; snod = kmalloc(sizeof(struct ubifs_scan_node), GFP_NOFS); if (!snod) return -ENOMEM; snod->sqnum = le64_to_cpu(ch->sqnum); snod->type = ch->node_type; snod->offs = offs; snod->len = le32_to_cpu(ch->len); snod->node = buf; switch (ch->node_type) { case UBIFS_INO_NODE: case UBIFS_DENT_NODE: case UBIFS_XENT_NODE: case UBIFS_DATA_NODE: /* * The key is in the same place in all keyed * nodes. */ key_read(c, &ino->key, &snod->key); break; default: invalid_key_init(c, &snod->key); break; } list_add_tail(&snod->list, &sleb->nodes); sleb->nodes_cnt += 1; return 0; } /** * ubifs_scanned_corruption - print information after UBIFS scanned corruption. * @c: UBIFS file-system description object * @lnum: LEB number of corruption * @offs: offset of corruption * @buf: buffer containing corruption */ void ubifs_scanned_corruption(const struct ubifs_info *c, int lnum, int offs, void *buf) { int len; ubifs_err(c, "corruption at LEB %d:%d", lnum, offs); len = c->leb_size - offs; if (len > 8192) len = 8192; ubifs_err(c, "first %d bytes from LEB %d:%d", len, lnum, offs); print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 4, buf, len, 1); } /** * ubifs_scan - scan a logical eraseblock. * @c: UBIFS file-system description object * @lnum: logical eraseblock number * @offs: offset to start at (usually zero) * @sbuf: scan buffer (must be of @c->leb_size bytes in size) * @quiet: print no messages * * This function scans LEB number @lnum and returns complete information about * its contents. Returns the scanned information in case of success and, * %-EUCLEAN if the LEB neads recovery, and other negative error codes in case * of failure. * * If @quiet is non-zero, this function does not print large and scary * error messages and flash dumps in case of errors. */ struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum, int offs, void *sbuf, int quiet) { void *buf = sbuf + offs; int err, len = c->leb_size - offs; struct ubifs_scan_leb *sleb; sleb = ubifs_start_scan(c, lnum, offs, sbuf); if (IS_ERR(sleb)) return sleb; while (len >= 8) { struct ubifs_ch *ch = buf; int node_len, ret; dbg_scan("look at LEB %d:%d (%d bytes left)", lnum, offs, len); cond_resched(); ret = ubifs_scan_a_node(c, buf, len, lnum, offs, quiet); if (ret > 0) { /* Padding bytes or a valid padding node */ offs += ret; buf += ret; len -= ret; continue; } if (ret == SCANNED_EMPTY_SPACE) /* Empty space is checked later */ break; switch (ret) { case SCANNED_GARBAGE: ubifs_err(c, "garbage"); goto corrupted; case SCANNED_A_NODE: break; case SCANNED_A_CORRUPT_NODE: case SCANNED_A_BAD_PAD_NODE: ubifs_err(c, "bad node"); goto corrupted; default: ubifs_err(c, "unknown"); err = -EINVAL; goto error; } err = ubifs_add_snod(c, sleb, buf, offs); if (err) goto error; node_len = ALIGN(le32_to_cpu(ch->len), 8); offs += node_len; buf += node_len; len -= node_len; } if (offs % c->min_io_size) { if (!quiet) ubifs_err(c, "empty space starts at non-aligned offset %d", offs); goto corrupted; } ubifs_end_scan(c, sleb, lnum, offs); for (; len > 4; offs += 4, buf = buf + 4, len -= 4) if (*(uint32_t *)buf != 0xffffffff) break; for (; len; offs++, buf++, len--) if (*(uint8_t *)buf != 0xff) { if (!quiet) ubifs_err(c, "corrupt empty space at LEB %d:%d", lnum, offs); goto corrupted; } return sleb; corrupted: if (!quiet) { ubifs_scanned_corruption(c, lnum, offs, buf); ubifs_err(c, "LEB %d scanning failed", lnum); } err = -EUCLEAN; ubifs_scan_destroy(sleb); return ERR_PTR(err); error: ubifs_err(c, "LEB %d scanning failed, error %d", lnum, err); ubifs_scan_destroy(sleb); return ERR_PTR(err); } /** * ubifs_scan_destroy - destroy LEB scanning information. * @sleb: scanning information to free */ void ubifs_scan_destroy(struct ubifs_scan_leb *sleb) { struct ubifs_scan_node *node; struct list_head *head; head = &sleb->nodes; while (!list_empty(head)) { node = list_entry(head->next, struct ubifs_scan_node, list); list_del(&node->list); kfree(node); } kfree(sleb); }
null
null
null
null
76,851
22,666
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
187,661
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Copyright (C) 2005, 2012 IBM Corporation * * Authors: * Kent Yoder <[email protected]> * Seiji Munetoh <[email protected]> * Stefan Berger <[email protected]> * Reiner Sailer <[email protected]> * Kylene Hall <[email protected]> * Nayna Jain <[email protected]> * * Maintained by: <[email protected]> * * Access to the event log created by a system's firmware / BIOS * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ #include <linux/seq_file.h> #include <linux/fs.h> #include <linux/security.h> #include <linux/module.h> #include <linux/slab.h> #include "tpm.h" #include "tpm_eventlog.h" static const char* tcpa_event_type_strings[] = { "PREBOOT", "POST CODE", "", "NO ACTION", "SEPARATOR", "ACTION", "EVENT TAG", "S-CRTM Contents", "S-CRTM Version", "CPU Microcode", "Platform Config Flags", "Table of Devices", "Compact Hash", "IPL", "IPL Partition Data", "Non-Host Code", "Non-Host Config", "Non-Host Info" }; static const char* tcpa_pc_event_id_strings[] = { "", "SMBIOS", "BIS Certificate", "POST BIOS ", "ESCD ", "CMOS", "NVRAM", "Option ROM", "Option ROM config", "", "Option ROM microcode ", "S-CRTM Version", "S-CRTM Contents ", "POST Contents ", "Table of Devices", }; /* returns pointer to start of pos. entry of tcg log */ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos) { loff_t i; struct tpm_chip *chip = m->private; struct tpm_bios_log *log = &chip->log; void *addr = log->bios_event_log; void *limit = log->bios_event_log_end; struct tcpa_event *event; u32 converted_event_size; u32 converted_event_type; /* read over *pos measurements */ for (i = 0; i < *pos; i++) { event = addr; converted_event_size = do_endian_conversion(event->event_size); converted_event_type = do_endian_conversion(event->event_type); if ((addr + sizeof(struct tcpa_event)) < limit) { if ((converted_event_type == 0) && (converted_event_size == 0)) return NULL; addr += (sizeof(struct tcpa_event) + converted_event_size); } } /* now check if current entry is valid */ if ((addr + sizeof(struct tcpa_event)) >= limit) return NULL; event = addr; converted_event_size = do_endian_conversion(event->event_size); converted_event_type = do_endian_conversion(event->event_type); if (((converted_event_type == 0) && (converted_event_size == 0)) || ((addr + sizeof(struct tcpa_event) + converted_event_size) >= limit)) return NULL; return addr; } static void *tpm_bios_measurements_next(struct seq_file *m, void *v, loff_t *pos) { struct tcpa_event *event = v; struct tpm_chip *chip = m->private; struct tpm_bios_log *log = &chip->log; void *limit = log->bios_event_log_end; u32 converted_event_size; u32 converted_event_type; converted_event_size = do_endian_conversion(event->event_size); v += sizeof(struct tcpa_event) + converted_event_size; /* now check if current entry is valid */ if ((v + sizeof(struct tcpa_event)) >= limit) return NULL; event = v; converted_event_size = do_endian_conversion(event->event_size); converted_event_type = do_endian_conversion(event->event_type); if (((converted_event_type == 0) && (converted_event_size == 0)) || ((v + sizeof(struct tcpa_event) + converted_event_size) >= limit)) return NULL; (*pos)++; return v; } static void tpm_bios_measurements_stop(struct seq_file *m, void *v) { } static int get_event_name(char *dest, struct tcpa_event *event, unsigned char * event_entry) { const char *name = ""; /* 41 so there is room for 40 data and 1 nul */ char data[41] = ""; int i, n_len = 0, d_len = 0; struct tcpa_pc_event *pc_event; switch (do_endian_conversion(event->event_type)) { case PREBOOT: case POST_CODE: case UNUSED: case NO_ACTION: case SCRTM_CONTENTS: case SCRTM_VERSION: case CPU_MICROCODE: case PLATFORM_CONFIG_FLAGS: case TABLE_OF_DEVICES: case COMPACT_HASH: case IPL: case IPL_PARTITION_DATA: case NONHOST_CODE: case NONHOST_CONFIG: case NONHOST_INFO: name = tcpa_event_type_strings[do_endian_conversion (event->event_type)]; n_len = strlen(name); break; case SEPARATOR: case ACTION: if (MAX_TEXT_EVENT > do_endian_conversion(event->event_size)) { name = event_entry; n_len = do_endian_conversion(event->event_size); } break; case EVENT_TAG: pc_event = (struct tcpa_pc_event *)event_entry; /* ToDo Row data -> Base64 */ switch (do_endian_conversion(pc_event->event_id)) { case SMBIOS: case BIS_CERT: case CMOS: case NVRAM: case OPTION_ROM_EXEC: case OPTION_ROM_CONFIG: case S_CRTM_VERSION: name = tcpa_pc_event_id_strings[do_endian_conversion (pc_event->event_id)]; n_len = strlen(name); break; /* hash data */ case POST_BIOS_ROM: case ESCD: case OPTION_ROM_MICROCODE: case S_CRTM_CONTENTS: case POST_CONTENTS: name = tcpa_pc_event_id_strings[do_endian_conversion (pc_event->event_id)]; n_len = strlen(name); for (i = 0; i < 20; i++) d_len += sprintf(&data[2*i], "%02x", pc_event->event_data[i]); break; default: break; } default: break; } return snprintf(dest, MAX_TEXT_EVENT, "[%.*s%.*s]", n_len, name, d_len, data); } static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v) { struct tcpa_event *event = v; struct tcpa_event temp_event; char *temp_ptr; int i; memcpy(&temp_event, event, sizeof(struct tcpa_event)); /* convert raw integers for endianness */ temp_event.pcr_index = do_endian_conversion(event->pcr_index); temp_event.event_type = do_endian_conversion(event->event_type); temp_event.event_size = do_endian_conversion(event->event_size); temp_ptr = (char *) &temp_event; for (i = 0; i < (sizeof(struct tcpa_event) - 1) ; i++) seq_putc(m, temp_ptr[i]); temp_ptr = (char *) v; for (i = (sizeof(struct tcpa_event) - 1); i < (sizeof(struct tcpa_event) + temp_event.event_size); i++) seq_putc(m, temp_ptr[i]); return 0; } static int tpm_bios_measurements_release(struct inode *inode, struct file *file) { struct seq_file *seq = (struct seq_file *)file->private_data; struct tpm_chip *chip = (struct tpm_chip *)seq->private; put_device(&chip->dev); return seq_release(inode, file); } static int tpm_ascii_bios_measurements_show(struct seq_file *m, void *v) { int len = 0; char *eventname; struct tcpa_event *event = v; unsigned char *event_entry = (unsigned char *)(v + sizeof(struct tcpa_event)); eventname = kmalloc(MAX_TEXT_EVENT, GFP_KERNEL); if (!eventname) { printk(KERN_ERR "%s: ERROR - No Memory for event name\n ", __func__); return -EFAULT; } /* 1st: PCR */ seq_printf(m, "%2d ", do_endian_conversion(event->pcr_index)); /* 2nd: SHA1 */ seq_printf(m, "%20phN", event->pcr_value); /* 3rd: event type identifier */ seq_printf(m, " %02x", do_endian_conversion(event->event_type)); len += get_event_name(eventname, event, event_entry); /* 4th: eventname <= max + \'0' delimiter */ seq_printf(m, " %s\n", eventname); kfree(eventname); return 0; } static const struct seq_operations tpm_ascii_b_measurements_seqops = { .start = tpm_bios_measurements_start, .next = tpm_bios_measurements_next, .stop = tpm_bios_measurements_stop, .show = tpm_ascii_bios_measurements_show, }; static const struct seq_operations tpm_binary_b_measurements_seqops = { .start = tpm_bios_measurements_start, .next = tpm_bios_measurements_next, .stop = tpm_bios_measurements_stop, .show = tpm_binary_bios_measurements_show, }; static int tpm_bios_measurements_open(struct inode *inode, struct file *file) { int err; struct seq_file *seq; struct tpm_chip_seqops *chip_seqops; const struct seq_operations *seqops; struct tpm_chip *chip; inode_lock(inode); if (!inode->i_private) { inode_unlock(inode); return -ENODEV; } chip_seqops = (struct tpm_chip_seqops *)inode->i_private; seqops = chip_seqops->seqops; chip = chip_seqops->chip; get_device(&chip->dev); inode_unlock(inode); /* now register seq file */ err = seq_open(file, seqops); if (!err) { seq = file->private_data; seq->private = chip; } return err; } static const struct file_operations tpm_bios_measurements_ops = { .owner = THIS_MODULE, .open = tpm_bios_measurements_open, .read = seq_read, .llseek = seq_lseek, .release = tpm_bios_measurements_release, }; static int tpm_read_log(struct tpm_chip *chip) { int rc; if (chip->log.bios_event_log != NULL) { dev_dbg(&chip->dev, "%s: ERROR - event log already initialized\n", __func__); return -EFAULT; } rc = tpm_read_log_acpi(chip); if (rc != -ENODEV) return rc; return tpm_read_log_of(chip); } /* * tpm_bios_log_setup() - Read the event log from the firmware * @chip: TPM chip to use. * * If an event log is found then the securityfs files are setup to * export it to userspace, otherwise nothing is done. * * Returns -ENODEV if the firmware has no event log or securityfs is not * supported. */ int tpm_bios_log_setup(struct tpm_chip *chip) { const char *name = dev_name(&chip->dev); unsigned int cnt; int rc = 0; rc = tpm_read_log(chip); if (rc) return rc; cnt = 0; chip->bios_dir[cnt] = securityfs_create_dir(name, NULL); /* NOTE: securityfs_create_dir can return ENODEV if securityfs is * compiled out. The caller should ignore the ENODEV return code. */ if (IS_ERR(chip->bios_dir[cnt])) goto err; cnt++; chip->bin_log_seqops.chip = chip; if (chip->flags & TPM_CHIP_FLAG_TPM2) chip->bin_log_seqops.seqops = &tpm2_binary_b_measurements_seqops; else chip->bin_log_seqops.seqops = &tpm_binary_b_measurements_seqops; chip->bios_dir[cnt] = securityfs_create_file("binary_bios_measurements", 0440, chip->bios_dir[0], (void *)&chip->bin_log_seqops, &tpm_bios_measurements_ops); if (IS_ERR(chip->bios_dir[cnt])) goto err; cnt++; if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) { chip->ascii_log_seqops.chip = chip; chip->ascii_log_seqops.seqops = &tpm_ascii_b_measurements_seqops; chip->bios_dir[cnt] = securityfs_create_file("ascii_bios_measurements", 0440, chip->bios_dir[0], (void *)&chip->ascii_log_seqops, &tpm_bios_measurements_ops); if (IS_ERR(chip->bios_dir[cnt])) goto err; cnt++; } return 0; err: rc = PTR_ERR(chip->bios_dir[cnt]); chip->bios_dir[cnt] = NULL; tpm_bios_log_teardown(chip); return rc; } void tpm_bios_log_teardown(struct tpm_chip *chip) { int i; struct inode *inode; /* securityfs_remove currently doesn't take care of handling sync * between removal and opening of pseudo files. To handle this, a * workaround is added by making i_private = NULL here during removal * and to check it during open(), both within inode_lock()/unlock(). * This design ensures that open() either safely gets kref or fails. */ for (i = (TPM_NUM_EVENT_LOG_FILES - 1); i >= 0; i--) { if (chip->bios_dir[i]) { inode = d_inode(chip->bios_dir[i]); inode_lock(inode); inode->i_private = NULL; inode_unlock(inode); securityfs_remove(chip->bios_dir[i]); } } }
null
null
null
null
96,008
23,921
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
23,921
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CONTENT_BROWSER_MEMORY_SWAP_METRICS_DRIVER_IMPL_H_ #define CONTENT_BROWSER_MEMORY_SWAP_METRICS_DRIVER_IMPL_H_ #include <memory> #include "base/gtest_prod_util.h" #include "base/sequence_checker.h" #include "base/time/time.h" #include "base/timer/timer.h" #include "content/common/content_export.h" #include "content/public/browser/swap_metrics_driver.h" namespace content { // SwapMetricsDriverImpl is the platform independent portion of the // SwapMetricsDriver implementation. class CONTENT_EXPORT SwapMetricsDriverImpl : public SwapMetricsDriver { public: ~SwapMetricsDriverImpl() override; // SwapMetricsDriver SwapMetricsDriver::SwapMetricsUpdateResult InitializeMetrics() override; bool IsRunning() const override; SwapMetricsDriver::SwapMetricsUpdateResult Start() override; void Stop() override; SwapMetricsDriver::SwapMetricsUpdateResult UpdateMetrics() override; protected: SwapMetricsDriverImpl(std::unique_ptr<Delegate> delegate, const base::TimeDelta update_interval); // Periodically called to update swap metrics. void PeriodicUpdateMetrics(); // Common swap metrics update method for both periodic and manual updates. SwapMetricsDriver::SwapMetricsUpdateResult UpdateMetricsImpl(); // Platform-dependent parts of UpdateMetricsImpl(). |interval| is the elapsed // time since the last UpdateMetricsImpl() call. |interval| will be zero when // this function is called for the first time. virtual SwapMetricsDriver::SwapMetricsUpdateResult UpdateMetricsInternal( base::TimeDelta interval) = 0; // The Delegate observing the metrics updates. std::unique_ptr<Delegate> delegate_; private: FRIEND_TEST_ALL_PREFIXES(TestSwapMetricsDriver, ExpectedMetricCounts); FRIEND_TEST_ALL_PREFIXES(TestSwapMetricsDriver, UpdateMetricsFail); // The interval between metrics updates. base::TimeDelta update_interval_; // A periodic timer to update swap metrics. base::RepeatingTimer timer_; // Holds the last TimeTicks when swap metrics are updated. base::TimeTicks last_ticks_; // True if and only if InitalizeMetrics() was called, and used to enforce // InitializeMetrics() is called before UpdateMetrics(). This helps // code readability. bool is_initialized_; // Updating metrics is not thread safe, and this checks that // UpdateMetricsImpl() is always called on the same sequence. SEQUENCE_CHECKER(sequence_checker_); DISALLOW_COPY_AND_ASSIGN(SwapMetricsDriverImpl); }; } // namespace content #endif // CONTENT_BROWSER_MEMORY_SWAP_METRICS_DRIVER_IMPL_H_
null
null
null
null
20,784
36,933
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
201,928
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * clk-si5351.c: Silicon Laboratories Si5351A/B/C I2C Clock Generator * * Sebastian Hesselbarth <[email protected]> * Rabeeh Khoury <[email protected]> * * References: * [1] "Si5351A/B/C Data Sheet" * http://www.silabs.com/Support%20Documents/TechnicalDocs/Si5351.pdf * [2] "Manually Generating an Si5351 Register Map" * http://www.silabs.com/Support%20Documents/TechnicalDocs/AN619.pdf * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/rational.h> #include <linux/i2c.h> #include <linux/of_platform.h> #include <linux/platform_data/si5351.h> #include <linux/regmap.h> #include <linux/slab.h> #include <linux/string.h> #include <asm/div64.h> #include "clk-si5351.h" struct si5351_driver_data; struct si5351_parameters { unsigned long p1; unsigned long p2; unsigned long p3; int valid; }; struct si5351_hw_data { struct clk_hw hw; struct si5351_driver_data *drvdata; struct si5351_parameters params; unsigned char num; }; struct si5351_driver_data { enum si5351_variant variant; struct i2c_client *client; struct regmap *regmap; struct clk *pxtal; const char *pxtal_name; struct clk_hw xtal; struct clk *pclkin; const char *pclkin_name; struct clk_hw clkin; struct si5351_hw_data pll[2]; struct si5351_hw_data *msynth; struct si5351_hw_data *clkout; size_t num_clkout; }; static const char * const si5351_input_names[] = { "xtal", "clkin" }; static const char * const si5351_pll_names[] = { "plla", "pllb", "vxco" }; static const char * const si5351_msynth_names[] = { "ms0", "ms1", "ms2", "ms3", "ms4", "ms5", "ms6", "ms7" }; static const char * const si5351_clkout_names[] = { "clk0", "clk1", "clk2", "clk3", "clk4", "clk5", "clk6", "clk7" }; /* * Si5351 i2c regmap */ static inline u8 si5351_reg_read(struct si5351_driver_data *drvdata, u8 reg) { u32 val; int ret; ret = regmap_read(drvdata->regmap, reg, &val); if (ret) { dev_err(&drvdata->client->dev, "unable to read from reg%02x\n", reg); return 0; } return (u8)val; } static inline int si5351_bulk_read(struct si5351_driver_data *drvdata, u8 reg, u8 count, u8 *buf) { return regmap_bulk_read(drvdata->regmap, reg, buf, count); } static inline int si5351_reg_write(struct si5351_driver_data *drvdata, u8 reg, u8 val) { return regmap_write(drvdata->regmap, reg, val); } static inline int si5351_bulk_write(struct si5351_driver_data *drvdata, u8 reg, u8 count, const u8 *buf) { return regmap_raw_write(drvdata->regmap, reg, buf, count); } static inline int si5351_set_bits(struct si5351_driver_data *drvdata, u8 reg, u8 mask, u8 val) { return regmap_update_bits(drvdata->regmap, reg, mask, val); } static inline u8 si5351_msynth_params_address(int num) { if (num > 5) return SI5351_CLK6_PARAMETERS + (num - 6); return SI5351_CLK0_PARAMETERS + (SI5351_PARAMETERS_LENGTH * num); } static void si5351_read_parameters(struct si5351_driver_data *drvdata, u8 reg, struct si5351_parameters *params) { u8 buf[SI5351_PARAMETERS_LENGTH]; switch (reg) { case SI5351_CLK6_PARAMETERS: case SI5351_CLK7_PARAMETERS: buf[0] = si5351_reg_read(drvdata, reg); params->p1 = buf[0]; params->p2 = 0; params->p3 = 1; break; default: si5351_bulk_read(drvdata, reg, SI5351_PARAMETERS_LENGTH, buf); params->p1 = ((buf[2] & 0x03) << 16) | (buf[3] << 8) | buf[4]; params->p2 = ((buf[5] & 0x0f) << 16) | (buf[6] << 8) | buf[7]; params->p3 = ((buf[5] & 0xf0) << 12) | (buf[0] << 8) | buf[1]; } params->valid = 1; } static void si5351_write_parameters(struct si5351_driver_data *drvdata, u8 reg, struct si5351_parameters *params) { u8 buf[SI5351_PARAMETERS_LENGTH]; switch (reg) { case SI5351_CLK6_PARAMETERS: case SI5351_CLK7_PARAMETERS: buf[0] = params->p1 & 0xff; si5351_reg_write(drvdata, reg, buf[0]); break; default: buf[0] = ((params->p3 & 0x0ff00) >> 8) & 0xff; buf[1] = params->p3 & 0xff; /* save rdiv and divby4 */ buf[2] = si5351_reg_read(drvdata, reg + 2) & ~0x03; buf[2] |= ((params->p1 & 0x30000) >> 16) & 0x03; buf[3] = ((params->p1 & 0x0ff00) >> 8) & 0xff; buf[4] = params->p1 & 0xff; buf[5] = ((params->p3 & 0xf0000) >> 12) | ((params->p2 & 0xf0000) >> 16); buf[6] = ((params->p2 & 0x0ff00) >> 8) & 0xff; buf[7] = params->p2 & 0xff; si5351_bulk_write(drvdata, reg, SI5351_PARAMETERS_LENGTH, buf); } } static bool si5351_regmap_is_volatile(struct device *dev, unsigned int reg) { switch (reg) { case SI5351_DEVICE_STATUS: case SI5351_INTERRUPT_STATUS: case SI5351_PLL_RESET: return true; } return false; } static bool si5351_regmap_is_writeable(struct device *dev, unsigned int reg) { /* reserved registers */ if (reg >= 4 && reg <= 8) return false; if (reg >= 10 && reg <= 14) return false; if (reg >= 173 && reg <= 176) return false; if (reg >= 178 && reg <= 182) return false; /* read-only */ if (reg == SI5351_DEVICE_STATUS) return false; return true; } static const struct regmap_config si5351_regmap_config = { .reg_bits = 8, .val_bits = 8, .cache_type = REGCACHE_RBTREE, .max_register = 187, .writeable_reg = si5351_regmap_is_writeable, .volatile_reg = si5351_regmap_is_volatile, }; /* * Si5351 xtal clock input */ static int si5351_xtal_prepare(struct clk_hw *hw) { struct si5351_driver_data *drvdata = container_of(hw, struct si5351_driver_data, xtal); si5351_set_bits(drvdata, SI5351_FANOUT_ENABLE, SI5351_XTAL_ENABLE, SI5351_XTAL_ENABLE); return 0; } static void si5351_xtal_unprepare(struct clk_hw *hw) { struct si5351_driver_data *drvdata = container_of(hw, struct si5351_driver_data, xtal); si5351_set_bits(drvdata, SI5351_FANOUT_ENABLE, SI5351_XTAL_ENABLE, 0); } static const struct clk_ops si5351_xtal_ops = { .prepare = si5351_xtal_prepare, .unprepare = si5351_xtal_unprepare, }; /* * Si5351 clkin clock input (Si5351C only) */ static int si5351_clkin_prepare(struct clk_hw *hw) { struct si5351_driver_data *drvdata = container_of(hw, struct si5351_driver_data, clkin); si5351_set_bits(drvdata, SI5351_FANOUT_ENABLE, SI5351_CLKIN_ENABLE, SI5351_CLKIN_ENABLE); return 0; } static void si5351_clkin_unprepare(struct clk_hw *hw) { struct si5351_driver_data *drvdata = container_of(hw, struct si5351_driver_data, clkin); si5351_set_bits(drvdata, SI5351_FANOUT_ENABLE, SI5351_CLKIN_ENABLE, 0); } /* * CMOS clock source constraints: * The input frequency range of the PLL is 10Mhz to 40MHz. * If CLKIN is >40MHz, the input divider must be used. */ static unsigned long si5351_clkin_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct si5351_driver_data *drvdata = container_of(hw, struct si5351_driver_data, clkin); unsigned long rate; unsigned char idiv; rate = parent_rate; if (parent_rate > 160000000) { idiv = SI5351_CLKIN_DIV_8; rate /= 8; } else if (parent_rate > 80000000) { idiv = SI5351_CLKIN_DIV_4; rate /= 4; } else if (parent_rate > 40000000) { idiv = SI5351_CLKIN_DIV_2; rate /= 2; } else { idiv = SI5351_CLKIN_DIV_1; } si5351_set_bits(drvdata, SI5351_PLL_INPUT_SOURCE, SI5351_CLKIN_DIV_MASK, idiv); dev_dbg(&drvdata->client->dev, "%s - clkin div = %d, rate = %lu\n", __func__, (1 << (idiv >> 6)), rate); return rate; } static const struct clk_ops si5351_clkin_ops = { .prepare = si5351_clkin_prepare, .unprepare = si5351_clkin_unprepare, .recalc_rate = si5351_clkin_recalc_rate, }; /* * Si5351 vxco clock input (Si5351B only) */ static int si5351_vxco_prepare(struct clk_hw *hw) { struct si5351_hw_data *hwdata = container_of(hw, struct si5351_hw_data, hw); dev_warn(&hwdata->drvdata->client->dev, "VXCO currently unsupported\n"); return 0; } static void si5351_vxco_unprepare(struct clk_hw *hw) { } static unsigned long si5351_vxco_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { return 0; } static int si5351_vxco_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent) { return 0; } static const struct clk_ops si5351_vxco_ops = { .prepare = si5351_vxco_prepare, .unprepare = si5351_vxco_unprepare, .recalc_rate = si5351_vxco_recalc_rate, .set_rate = si5351_vxco_set_rate, }; /* * Si5351 pll a/b * * Feedback Multisynth Divider Equations [2] * * fVCO = fIN * (a + b/c) * * with 15 + 0/1048575 <= (a + b/c) <= 90 + 0/1048575 and * fIN = fXTAL or fIN = fCLKIN/CLKIN_DIV * * Feedback Multisynth Register Equations * * (1) MSNx_P1[17:0] = 128 * a + floor(128 * b/c) - 512 * (2) MSNx_P2[19:0] = 128 * b - c * floor(128 * b/c) = (128*b) mod c * (3) MSNx_P3[19:0] = c * * Transposing (2) yields: (4) floor(128 * b/c) = (128 * b / MSNx_P2)/c * * Using (4) on (1) yields: * MSNx_P1 = 128 * a + (128 * b/MSNx_P2)/c - 512 * MSNx_P1 + 512 + MSNx_P2/c = 128 * a + 128 * b/c * * a + b/c = (MSNx_P1 + MSNx_P2/MSNx_P3 + 512)/128 * = (MSNx_P1*MSNx_P3 + MSNx_P2 + 512*MSNx_P3)/(128*MSNx_P3) * */ static int _si5351_pll_reparent(struct si5351_driver_data *drvdata, int num, enum si5351_pll_src parent) { u8 mask = (num == 0) ? SI5351_PLLA_SOURCE : SI5351_PLLB_SOURCE; if (parent == SI5351_PLL_SRC_DEFAULT) return 0; if (num > 2) return -EINVAL; if (drvdata->variant != SI5351_VARIANT_C && parent != SI5351_PLL_SRC_XTAL) return -EINVAL; si5351_set_bits(drvdata, SI5351_PLL_INPUT_SOURCE, mask, (parent == SI5351_PLL_SRC_XTAL) ? 0 : mask); return 0; } static unsigned char si5351_pll_get_parent(struct clk_hw *hw) { struct si5351_hw_data *hwdata = container_of(hw, struct si5351_hw_data, hw); u8 mask = (hwdata->num == 0) ? SI5351_PLLA_SOURCE : SI5351_PLLB_SOURCE; u8 val; val = si5351_reg_read(hwdata->drvdata, SI5351_PLL_INPUT_SOURCE); return (val & mask) ? 1 : 0; } static int si5351_pll_set_parent(struct clk_hw *hw, u8 index) { struct si5351_hw_data *hwdata = container_of(hw, struct si5351_hw_data, hw); if (hwdata->drvdata->variant != SI5351_VARIANT_C && index > 0) return -EPERM; if (index > 1) return -EINVAL; return _si5351_pll_reparent(hwdata->drvdata, hwdata->num, (index == 0) ? SI5351_PLL_SRC_XTAL : SI5351_PLL_SRC_CLKIN); } static unsigned long si5351_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct si5351_hw_data *hwdata = container_of(hw, struct si5351_hw_data, hw); u8 reg = (hwdata->num == 0) ? SI5351_PLLA_PARAMETERS : SI5351_PLLB_PARAMETERS; unsigned long long rate; if (!hwdata->params.valid) si5351_read_parameters(hwdata->drvdata, reg, &hwdata->params); if (hwdata->params.p3 == 0) return parent_rate; /* fVCO = fIN * (P1*P3 + 512*P3 + P2)/(128*P3) */ rate = hwdata->params.p1 * hwdata->params.p3; rate += 512 * hwdata->params.p3; rate += hwdata->params.p2; rate *= parent_rate; do_div(rate, 128 * hwdata->params.p3); dev_dbg(&hwdata->drvdata->client->dev, "%s - %s: p1 = %lu, p2 = %lu, p3 = %lu, parent_rate = %lu, rate = %lu\n", __func__, clk_hw_get_name(hw), hwdata->params.p1, hwdata->params.p2, hwdata->params.p3, parent_rate, (unsigned long)rate); return (unsigned long)rate; } static long si5351_pll_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate) { struct si5351_hw_data *hwdata = container_of(hw, struct si5351_hw_data, hw); unsigned long rfrac, denom, a, b, c; unsigned long long lltmp; if (rate < SI5351_PLL_VCO_MIN) rate = SI5351_PLL_VCO_MIN; if (rate > SI5351_PLL_VCO_MAX) rate = SI5351_PLL_VCO_MAX; /* determine integer part of feedback equation */ a = rate / *parent_rate; if (a < SI5351_PLL_A_MIN) rate = *parent_rate * SI5351_PLL_A_MIN; if (a > SI5351_PLL_A_MAX) rate = *parent_rate * SI5351_PLL_A_MAX; /* find best approximation for b/c = fVCO mod fIN */ denom = 1000 * 1000; lltmp = rate % (*parent_rate); lltmp *= denom; do_div(lltmp, *parent_rate); rfrac = (unsigned long)lltmp; b = 0; c = 1; if (rfrac) rational_best_approximation(rfrac, denom, SI5351_PLL_B_MAX, SI5351_PLL_C_MAX, &b, &c); /* calculate parameters */ hwdata->params.p3 = c; hwdata->params.p2 = (128 * b) % c; hwdata->params.p1 = 128 * a; hwdata->params.p1 += (128 * b / c); hwdata->params.p1 -= 512; /* recalculate rate by fIN * (a + b/c) */ lltmp = *parent_rate; lltmp *= b; do_div(lltmp, c); rate = (unsigned long)lltmp; rate += *parent_rate * a; dev_dbg(&hwdata->drvdata->client->dev, "%s - %s: a = %lu, b = %lu, c = %lu, parent_rate = %lu, rate = %lu\n", __func__, clk_hw_get_name(hw), a, b, c, *parent_rate, rate); return rate; } static int si5351_pll_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct si5351_hw_data *hwdata = container_of(hw, struct si5351_hw_data, hw); u8 reg = (hwdata->num == 0) ? SI5351_PLLA_PARAMETERS : SI5351_PLLB_PARAMETERS; /* write multisynth parameters */ si5351_write_parameters(hwdata->drvdata, reg, &hwdata->params); /* plla/pllb ctrl is in clk6/clk7 ctrl registers */ si5351_set_bits(hwdata->drvdata, SI5351_CLK6_CTRL + hwdata->num, SI5351_CLK_INTEGER_MODE, (hwdata->params.p2 == 0) ? SI5351_CLK_INTEGER_MODE : 0); dev_dbg(&hwdata->drvdata->client->dev, "%s - %s: p1 = %lu, p2 = %lu, p3 = %lu, parent_rate = %lu, rate = %lu\n", __func__, clk_hw_get_name(hw), hwdata->params.p1, hwdata->params.p2, hwdata->params.p3, parent_rate, rate); return 0; } static const struct clk_ops si5351_pll_ops = { .set_parent = si5351_pll_set_parent, .get_parent = si5351_pll_get_parent, .recalc_rate = si5351_pll_recalc_rate, .round_rate = si5351_pll_round_rate, .set_rate = si5351_pll_set_rate, }; /* * Si5351 multisync divider * * for fOUT <= 150 MHz: * * fOUT = (fIN * (a + b/c)) / CLKOUTDIV * * with 6 + 0/1048575 <= (a + b/c) <= 1800 + 0/1048575 and * fIN = fVCO0, fVCO1 * * Output Clock Multisynth Register Equations * * MSx_P1[17:0] = 128 * a + floor(128 * b/c) - 512 * MSx_P2[19:0] = 128 * b - c * floor(128 * b/c) = (128*b) mod c * MSx_P3[19:0] = c * * MS[6,7] are integer (P1) divide only, P1 = divide value, * P2 and P3 are not applicable * * for 150MHz < fOUT <= 160MHz: * * MSx_P1 = 0, MSx_P2 = 0, MSx_P3 = 1, MSx_INT = 1, MSx_DIVBY4 = 11b */ static int _si5351_msynth_reparent(struct si5351_driver_data *drvdata, int num, enum si5351_multisynth_src parent) { if (parent == SI5351_MULTISYNTH_SRC_DEFAULT) return 0; if (num > 8) return -EINVAL; si5351_set_bits(drvdata, SI5351_CLK0_CTRL + num, SI5351_CLK_PLL_SELECT, (parent == SI5351_MULTISYNTH_SRC_VCO0) ? 0 : SI5351_CLK_PLL_SELECT); return 0; } static unsigned char si5351_msynth_get_parent(struct clk_hw *hw) { struct si5351_hw_data *hwdata = container_of(hw, struct si5351_hw_data, hw); u8 val; val = si5351_reg_read(hwdata->drvdata, SI5351_CLK0_CTRL + hwdata->num); return (val & SI5351_CLK_PLL_SELECT) ? 1 : 0; } static int si5351_msynth_set_parent(struct clk_hw *hw, u8 index) { struct si5351_hw_data *hwdata = container_of(hw, struct si5351_hw_data, hw); return _si5351_msynth_reparent(hwdata->drvdata, hwdata->num, (index == 0) ? SI5351_MULTISYNTH_SRC_VCO0 : SI5351_MULTISYNTH_SRC_VCO1); } static unsigned long si5351_msynth_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct si5351_hw_data *hwdata = container_of(hw, struct si5351_hw_data, hw); u8 reg = si5351_msynth_params_address(hwdata->num); unsigned long long rate; unsigned long m; if (!hwdata->params.valid) si5351_read_parameters(hwdata->drvdata, reg, &hwdata->params); /* * multisync0-5: fOUT = (128 * P3 * fIN) / (P1*P3 + P2 + 512*P3) * multisync6-7: fOUT = fIN / P1 */ rate = parent_rate; if (hwdata->num > 5) { m = hwdata->params.p1; } else if (hwdata->params.p3 == 0) { return parent_rate; } else if ((si5351_reg_read(hwdata->drvdata, reg + 2) & SI5351_OUTPUT_CLK_DIVBY4) == SI5351_OUTPUT_CLK_DIVBY4) { m = 4; } else { rate *= 128 * hwdata->params.p3; m = hwdata->params.p1 * hwdata->params.p3; m += hwdata->params.p2; m += 512 * hwdata->params.p3; } if (m == 0) return 0; do_div(rate, m); dev_dbg(&hwdata->drvdata->client->dev, "%s - %s: p1 = %lu, p2 = %lu, p3 = %lu, m = %lu, parent_rate = %lu, rate = %lu\n", __func__, clk_hw_get_name(hw), hwdata->params.p1, hwdata->params.p2, hwdata->params.p3, m, parent_rate, (unsigned long)rate); return (unsigned long)rate; } static long si5351_msynth_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate) { struct si5351_hw_data *hwdata = container_of(hw, struct si5351_hw_data, hw); unsigned long long lltmp; unsigned long a, b, c; int divby4; /* multisync6-7 can only handle freqencies < 150MHz */ if (hwdata->num >= 6 && rate > SI5351_MULTISYNTH67_MAX_FREQ) rate = SI5351_MULTISYNTH67_MAX_FREQ; /* multisync frequency is 1MHz .. 160MHz */ if (rate > SI5351_MULTISYNTH_MAX_FREQ) rate = SI5351_MULTISYNTH_MAX_FREQ; if (rate < SI5351_MULTISYNTH_MIN_FREQ) rate = SI5351_MULTISYNTH_MIN_FREQ; divby4 = 0; if (rate > SI5351_MULTISYNTH_DIVBY4_FREQ) divby4 = 1; /* multisync can set pll */ if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) { /* * find largest integer divider for max * vco frequency and given target rate */ if (divby4 == 0) { lltmp = SI5351_PLL_VCO_MAX; do_div(lltmp, rate); a = (unsigned long)lltmp; } else a = 4; b = 0; c = 1; *parent_rate = a * rate; } else if (hwdata->num >= 6) { /* determine the closest integer divider */ a = DIV_ROUND_CLOSEST(*parent_rate, rate); if (a < SI5351_MULTISYNTH_A_MIN) a = SI5351_MULTISYNTH_A_MIN; if (a > SI5351_MULTISYNTH67_A_MAX) a = SI5351_MULTISYNTH67_A_MAX; b = 0; c = 1; } else { unsigned long rfrac, denom; /* disable divby4 */ if (divby4) { rate = SI5351_MULTISYNTH_DIVBY4_FREQ; divby4 = 0; } /* determine integer part of divider equation */ a = *parent_rate / rate; if (a < SI5351_MULTISYNTH_A_MIN) a = SI5351_MULTISYNTH_A_MIN; if (a > SI5351_MULTISYNTH_A_MAX) a = SI5351_MULTISYNTH_A_MAX; /* find best approximation for b/c = fVCO mod fOUT */ denom = 1000 * 1000; lltmp = (*parent_rate) % rate; lltmp *= denom; do_div(lltmp, rate); rfrac = (unsigned long)lltmp; b = 0; c = 1; if (rfrac) rational_best_approximation(rfrac, denom, SI5351_MULTISYNTH_B_MAX, SI5351_MULTISYNTH_C_MAX, &b, &c); } /* recalculate rate by fOUT = fIN / (a + b/c) */ lltmp = *parent_rate; lltmp *= c; do_div(lltmp, a * c + b); rate = (unsigned long)lltmp; /* calculate parameters */ if (divby4) { hwdata->params.p3 = 1; hwdata->params.p2 = 0; hwdata->params.p1 = 0; } else if (hwdata->num >= 6) { hwdata->params.p3 = 0; hwdata->params.p2 = 0; hwdata->params.p1 = a; } else { hwdata->params.p3 = c; hwdata->params.p2 = (128 * b) % c; hwdata->params.p1 = 128 * a; hwdata->params.p1 += (128 * b / c); hwdata->params.p1 -= 512; } dev_dbg(&hwdata->drvdata->client->dev, "%s - %s: a = %lu, b = %lu, c = %lu, divby4 = %d, parent_rate = %lu, rate = %lu\n", __func__, clk_hw_get_name(hw), a, b, c, divby4, *parent_rate, rate); return rate; } static int si5351_msynth_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct si5351_hw_data *hwdata = container_of(hw, struct si5351_hw_data, hw); u8 reg = si5351_msynth_params_address(hwdata->num); int divby4 = 0; /* write multisynth parameters */ si5351_write_parameters(hwdata->drvdata, reg, &hwdata->params); if (rate > SI5351_MULTISYNTH_DIVBY4_FREQ) divby4 = 1; /* enable/disable integer mode and divby4 on multisynth0-5 */ if (hwdata->num < 6) { si5351_set_bits(hwdata->drvdata, reg + 2, SI5351_OUTPUT_CLK_DIVBY4, (divby4) ? SI5351_OUTPUT_CLK_DIVBY4 : 0); si5351_set_bits(hwdata->drvdata, SI5351_CLK0_CTRL + hwdata->num, SI5351_CLK_INTEGER_MODE, (hwdata->params.p2 == 0) ? SI5351_CLK_INTEGER_MODE : 0); } dev_dbg(&hwdata->drvdata->client->dev, "%s - %s: p1 = %lu, p2 = %lu, p3 = %lu, divby4 = %d, parent_rate = %lu, rate = %lu\n", __func__, clk_hw_get_name(hw), hwdata->params.p1, hwdata->params.p2, hwdata->params.p3, divby4, parent_rate, rate); return 0; } static const struct clk_ops si5351_msynth_ops = { .set_parent = si5351_msynth_set_parent, .get_parent = si5351_msynth_get_parent, .recalc_rate = si5351_msynth_recalc_rate, .round_rate = si5351_msynth_round_rate, .set_rate = si5351_msynth_set_rate, }; /* * Si5351 clkout divider */ static int _si5351_clkout_reparent(struct si5351_driver_data *drvdata, int num, enum si5351_clkout_src parent) { u8 val; if (num > 8) return -EINVAL; switch (parent) { case SI5351_CLKOUT_SRC_MSYNTH_N: val = SI5351_CLK_INPUT_MULTISYNTH_N; break; case SI5351_CLKOUT_SRC_MSYNTH_0_4: /* clk0/clk4 can only connect to its own multisync */ if (num == 0 || num == 4) val = SI5351_CLK_INPUT_MULTISYNTH_N; else val = SI5351_CLK_INPUT_MULTISYNTH_0_4; break; case SI5351_CLKOUT_SRC_XTAL: val = SI5351_CLK_INPUT_XTAL; break; case SI5351_CLKOUT_SRC_CLKIN: if (drvdata->variant != SI5351_VARIANT_C) return -EINVAL; val = SI5351_CLK_INPUT_CLKIN; break; default: return 0; } si5351_set_bits(drvdata, SI5351_CLK0_CTRL + num, SI5351_CLK_INPUT_MASK, val); return 0; } static int _si5351_clkout_set_drive_strength( struct si5351_driver_data *drvdata, int num, enum si5351_drive_strength drive) { u8 mask; if (num > 8) return -EINVAL; switch (drive) { case SI5351_DRIVE_2MA: mask = SI5351_CLK_DRIVE_STRENGTH_2MA; break; case SI5351_DRIVE_4MA: mask = SI5351_CLK_DRIVE_STRENGTH_4MA; break; case SI5351_DRIVE_6MA: mask = SI5351_CLK_DRIVE_STRENGTH_6MA; break; case SI5351_DRIVE_8MA: mask = SI5351_CLK_DRIVE_STRENGTH_8MA; break; default: return 0; } si5351_set_bits(drvdata, SI5351_CLK0_CTRL + num, SI5351_CLK_DRIVE_STRENGTH_MASK, mask); return 0; } static int _si5351_clkout_set_disable_state( struct si5351_driver_data *drvdata, int num, enum si5351_disable_state state) { u8 reg = (num < 4) ? SI5351_CLK3_0_DISABLE_STATE : SI5351_CLK7_4_DISABLE_STATE; u8 shift = (num < 4) ? (2 * num) : (2 * (num-4)); u8 mask = SI5351_CLK_DISABLE_STATE_MASK << shift; u8 val; if (num > 8) return -EINVAL; switch (state) { case SI5351_DISABLE_LOW: val = SI5351_CLK_DISABLE_STATE_LOW; break; case SI5351_DISABLE_HIGH: val = SI5351_CLK_DISABLE_STATE_HIGH; break; case SI5351_DISABLE_FLOATING: val = SI5351_CLK_DISABLE_STATE_FLOAT; break; case SI5351_DISABLE_NEVER: val = SI5351_CLK_DISABLE_STATE_NEVER; break; default: return 0; } si5351_set_bits(drvdata, reg, mask, val << shift); return 0; } static int si5351_clkout_prepare(struct clk_hw *hw) { struct si5351_hw_data *hwdata = container_of(hw, struct si5351_hw_data, hw); si5351_set_bits(hwdata->drvdata, SI5351_CLK0_CTRL + hwdata->num, SI5351_CLK_POWERDOWN, 0); si5351_set_bits(hwdata->drvdata, SI5351_OUTPUT_ENABLE_CTRL, (1 << hwdata->num), 0); return 0; } static void si5351_clkout_unprepare(struct clk_hw *hw) { struct si5351_hw_data *hwdata = container_of(hw, struct si5351_hw_data, hw); si5351_set_bits(hwdata->drvdata, SI5351_CLK0_CTRL + hwdata->num, SI5351_CLK_POWERDOWN, SI5351_CLK_POWERDOWN); si5351_set_bits(hwdata->drvdata, SI5351_OUTPUT_ENABLE_CTRL, (1 << hwdata->num), (1 << hwdata->num)); } static u8 si5351_clkout_get_parent(struct clk_hw *hw) { struct si5351_hw_data *hwdata = container_of(hw, struct si5351_hw_data, hw); int index = 0; unsigned char val; val = si5351_reg_read(hwdata->drvdata, SI5351_CLK0_CTRL + hwdata->num); switch (val & SI5351_CLK_INPUT_MASK) { case SI5351_CLK_INPUT_MULTISYNTH_N: index = 0; break; case SI5351_CLK_INPUT_MULTISYNTH_0_4: index = 1; break; case SI5351_CLK_INPUT_XTAL: index = 2; break; case SI5351_CLK_INPUT_CLKIN: index = 3; break; } return index; } static int si5351_clkout_set_parent(struct clk_hw *hw, u8 index) { struct si5351_hw_data *hwdata = container_of(hw, struct si5351_hw_data, hw); enum si5351_clkout_src parent = SI5351_CLKOUT_SRC_DEFAULT; switch (index) { case 0: parent = SI5351_CLKOUT_SRC_MSYNTH_N; break; case 1: parent = SI5351_CLKOUT_SRC_MSYNTH_0_4; break; case 2: parent = SI5351_CLKOUT_SRC_XTAL; break; case 3: parent = SI5351_CLKOUT_SRC_CLKIN; break; } return _si5351_clkout_reparent(hwdata->drvdata, hwdata->num, parent); } static unsigned long si5351_clkout_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct si5351_hw_data *hwdata = container_of(hw, struct si5351_hw_data, hw); unsigned char reg; unsigned char rdiv; if (hwdata->num <= 5) reg = si5351_msynth_params_address(hwdata->num) + 2; else reg = SI5351_CLK6_7_OUTPUT_DIVIDER; rdiv = si5351_reg_read(hwdata->drvdata, reg); if (hwdata->num == 6) { rdiv &= SI5351_OUTPUT_CLK6_DIV_MASK; } else { rdiv &= SI5351_OUTPUT_CLK_DIV_MASK; rdiv >>= SI5351_OUTPUT_CLK_DIV_SHIFT; } return parent_rate >> rdiv; } static long si5351_clkout_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate) { struct si5351_hw_data *hwdata = container_of(hw, struct si5351_hw_data, hw); unsigned char rdiv; /* clkout6/7 can only handle output freqencies < 150MHz */ if (hwdata->num >= 6 && rate > SI5351_CLKOUT67_MAX_FREQ) rate = SI5351_CLKOUT67_MAX_FREQ; /* clkout freqency is 8kHz - 160MHz */ if (rate > SI5351_CLKOUT_MAX_FREQ) rate = SI5351_CLKOUT_MAX_FREQ; if (rate < SI5351_CLKOUT_MIN_FREQ) rate = SI5351_CLKOUT_MIN_FREQ; /* request frequency if multisync master */ if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) { /* use r divider for frequencies below 1MHz */ rdiv = SI5351_OUTPUT_CLK_DIV_1; while (rate < SI5351_MULTISYNTH_MIN_FREQ && rdiv < SI5351_OUTPUT_CLK_DIV_128) { rdiv += 1; rate *= 2; } *parent_rate = rate; } else { unsigned long new_rate, new_err, err; /* round to closed rdiv */ rdiv = SI5351_OUTPUT_CLK_DIV_1; new_rate = *parent_rate; err = abs(new_rate - rate); do { new_rate >>= 1; new_err = abs(new_rate - rate); if (new_err > err || rdiv == SI5351_OUTPUT_CLK_DIV_128) break; rdiv++; err = new_err; } while (1); } rate = *parent_rate >> rdiv; dev_dbg(&hwdata->drvdata->client->dev, "%s - %s: rdiv = %u, parent_rate = %lu, rate = %lu\n", __func__, clk_hw_get_name(hw), (1 << rdiv), *parent_rate, rate); return rate; } static int si5351_clkout_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct si5351_hw_data *hwdata = container_of(hw, struct si5351_hw_data, hw); unsigned long new_rate, new_err, err; unsigned char rdiv; /* round to closed rdiv */ rdiv = SI5351_OUTPUT_CLK_DIV_1; new_rate = parent_rate; err = abs(new_rate - rate); do { new_rate >>= 1; new_err = abs(new_rate - rate); if (new_err > err || rdiv == SI5351_OUTPUT_CLK_DIV_128) break; rdiv++; err = new_err; } while (1); /* write output divider */ switch (hwdata->num) { case 6: si5351_set_bits(hwdata->drvdata, SI5351_CLK6_7_OUTPUT_DIVIDER, SI5351_OUTPUT_CLK6_DIV_MASK, rdiv); break; case 7: si5351_set_bits(hwdata->drvdata, SI5351_CLK6_7_OUTPUT_DIVIDER, SI5351_OUTPUT_CLK_DIV_MASK, rdiv << SI5351_OUTPUT_CLK_DIV_SHIFT); break; default: si5351_set_bits(hwdata->drvdata, si5351_msynth_params_address(hwdata->num) + 2, SI5351_OUTPUT_CLK_DIV_MASK, rdiv << SI5351_OUTPUT_CLK_DIV_SHIFT); } /* powerup clkout */ si5351_set_bits(hwdata->drvdata, SI5351_CLK0_CTRL + hwdata->num, SI5351_CLK_POWERDOWN, 0); /* * Do a pll soft reset on both plls, needed in some cases to get * all outputs running. */ si5351_reg_write(hwdata->drvdata, SI5351_PLL_RESET, SI5351_PLL_RESET_A | SI5351_PLL_RESET_B); dev_dbg(&hwdata->drvdata->client->dev, "%s - %s: rdiv = %u, parent_rate = %lu, rate = %lu\n", __func__, clk_hw_get_name(hw), (1 << rdiv), parent_rate, rate); return 0; } static const struct clk_ops si5351_clkout_ops = { .prepare = si5351_clkout_prepare, .unprepare = si5351_clkout_unprepare, .set_parent = si5351_clkout_set_parent, .get_parent = si5351_clkout_get_parent, .recalc_rate = si5351_clkout_recalc_rate, .round_rate = si5351_clkout_round_rate, .set_rate = si5351_clkout_set_rate, }; /* * Si5351 i2c probe and DT */ #ifdef CONFIG_OF static const struct of_device_id si5351_dt_ids[] = { { .compatible = "silabs,si5351a", .data = (void *)SI5351_VARIANT_A, }, { .compatible = "silabs,si5351a-msop", .data = (void *)SI5351_VARIANT_A3, }, { .compatible = "silabs,si5351b", .data = (void *)SI5351_VARIANT_B, }, { .compatible = "silabs,si5351c", .data = (void *)SI5351_VARIANT_C, }, { } }; MODULE_DEVICE_TABLE(of, si5351_dt_ids); static int si5351_dt_parse(struct i2c_client *client, enum si5351_variant variant) { struct device_node *child, *np = client->dev.of_node; struct si5351_platform_data *pdata; struct property *prop; const __be32 *p; int num = 0; u32 val; if (np == NULL) return 0; pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) return -ENOMEM; /* * property silabs,pll-source : <num src>, [<..>] * allow to selectively set pll source */ of_property_for_each_u32(np, "silabs,pll-source", prop, p, num) { if (num >= 2) { dev_err(&client->dev, "invalid pll %d on pll-source prop\n", num); return -EINVAL; } p = of_prop_next_u32(prop, p, &val); if (!p) { dev_err(&client->dev, "missing pll-source for pll %d\n", num); return -EINVAL; } switch (val) { case 0: pdata->pll_src[num] = SI5351_PLL_SRC_XTAL; break; case 1: if (variant != SI5351_VARIANT_C) { dev_err(&client->dev, "invalid parent %d for pll %d\n", val, num); return -EINVAL; } pdata->pll_src[num] = SI5351_PLL_SRC_CLKIN; break; default: dev_err(&client->dev, "invalid parent %d for pll %d\n", val, num); return -EINVAL; } } /* per clkout properties */ for_each_child_of_node(np, child) { if (of_property_read_u32(child, "reg", &num)) { dev_err(&client->dev, "missing reg property of %s\n", child->name); goto put_child; } if (num >= 8 || (variant == SI5351_VARIANT_A3 && num >= 3)) { dev_err(&client->dev, "invalid clkout %d\n", num); goto put_child; } if (!of_property_read_u32(child, "silabs,multisynth-source", &val)) { switch (val) { case 0: pdata->clkout[num].multisynth_src = SI5351_MULTISYNTH_SRC_VCO0; break; case 1: pdata->clkout[num].multisynth_src = SI5351_MULTISYNTH_SRC_VCO1; break; default: dev_err(&client->dev, "invalid parent %d for multisynth %d\n", val, num); goto put_child; } } if (!of_property_read_u32(child, "silabs,clock-source", &val)) { switch (val) { case 0: pdata->clkout[num].clkout_src = SI5351_CLKOUT_SRC_MSYNTH_N; break; case 1: pdata->clkout[num].clkout_src = SI5351_CLKOUT_SRC_MSYNTH_0_4; break; case 2: pdata->clkout[num].clkout_src = SI5351_CLKOUT_SRC_XTAL; break; case 3: if (variant != SI5351_VARIANT_C) { dev_err(&client->dev, "invalid parent %d for clkout %d\n", val, num); goto put_child; } pdata->clkout[num].clkout_src = SI5351_CLKOUT_SRC_CLKIN; break; default: dev_err(&client->dev, "invalid parent %d for clkout %d\n", val, num); goto put_child; } } if (!of_property_read_u32(child, "silabs,drive-strength", &val)) { switch (val) { case SI5351_DRIVE_2MA: case SI5351_DRIVE_4MA: case SI5351_DRIVE_6MA: case SI5351_DRIVE_8MA: pdata->clkout[num].drive = val; break; default: dev_err(&client->dev, "invalid drive strength %d for clkout %d\n", val, num); goto put_child; } } if (!of_property_read_u32(child, "silabs,disable-state", &val)) { switch (val) { case 0: pdata->clkout[num].disable_state = SI5351_DISABLE_LOW; break; case 1: pdata->clkout[num].disable_state = SI5351_DISABLE_HIGH; break; case 2: pdata->clkout[num].disable_state = SI5351_DISABLE_FLOATING; break; case 3: pdata->clkout[num].disable_state = SI5351_DISABLE_NEVER; break; default: dev_err(&client->dev, "invalid disable state %d for clkout %d\n", val, num); goto put_child; } } if (!of_property_read_u32(child, "clock-frequency", &val)) pdata->clkout[num].rate = val; pdata->clkout[num].pll_master = of_property_read_bool(child, "silabs,pll-master"); } client->dev.platform_data = pdata; return 0; put_child: of_node_put(child); return -EINVAL; } static struct clk_hw * si53351_of_clk_get(struct of_phandle_args *clkspec, void *data) { struct si5351_driver_data *drvdata = data; unsigned int idx = clkspec->args[0]; if (idx >= drvdata->num_clkout) { pr_err("%s: invalid index %u\n", __func__, idx); return ERR_PTR(-EINVAL); } return &drvdata->clkout[idx].hw; } #else static int si5351_dt_parse(struct i2c_client *client, enum si5351_variant variant) { return 0; } static struct clk_hw * si53351_of_clk_get(struct of_phandle_args *clkspec, void *data) { return NULL; } #endif /* CONFIG_OF */ static int si5351_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { enum si5351_variant variant = (enum si5351_variant)id->driver_data; struct si5351_platform_data *pdata; struct si5351_driver_data *drvdata; struct clk_init_data init; const char *parent_names[4]; u8 num_parents, num_clocks; int ret, n; ret = si5351_dt_parse(client, variant); if (ret) return ret; pdata = client->dev.platform_data; if (!pdata) return -EINVAL; drvdata = devm_kzalloc(&client->dev, sizeof(*drvdata), GFP_KERNEL); if (drvdata == NULL) { dev_err(&client->dev, "unable to allocate driver data\n"); return -ENOMEM; } i2c_set_clientdata(client, drvdata); drvdata->client = client; drvdata->variant = variant; drvdata->pxtal = devm_clk_get(&client->dev, "xtal"); drvdata->pclkin = devm_clk_get(&client->dev, "clkin"); if (PTR_ERR(drvdata->pxtal) == -EPROBE_DEFER || PTR_ERR(drvdata->pclkin) == -EPROBE_DEFER) return -EPROBE_DEFER; /* * Check for valid parent clock: VARIANT_A and VARIANT_B need XTAL, * VARIANT_C can have CLKIN instead. */ if (IS_ERR(drvdata->pxtal) && (drvdata->variant != SI5351_VARIANT_C || IS_ERR(drvdata->pclkin))) { dev_err(&client->dev, "missing parent clock\n"); return -EINVAL; } drvdata->regmap = devm_regmap_init_i2c(client, &si5351_regmap_config); if (IS_ERR(drvdata->regmap)) { dev_err(&client->dev, "failed to allocate register map\n"); return PTR_ERR(drvdata->regmap); } /* Disable interrupts */ si5351_reg_write(drvdata, SI5351_INTERRUPT_MASK, 0xf0); /* Ensure pll select is on XTAL for Si5351A/B */ if (drvdata->variant != SI5351_VARIANT_C) si5351_set_bits(drvdata, SI5351_PLL_INPUT_SOURCE, SI5351_PLLA_SOURCE | SI5351_PLLB_SOURCE, 0); /* setup clock configuration */ for (n = 0; n < 2; n++) { ret = _si5351_pll_reparent(drvdata, n, pdata->pll_src[n]); if (ret) { dev_err(&client->dev, "failed to reparent pll %d to %d\n", n, pdata->pll_src[n]); return ret; } } for (n = 0; n < 8; n++) { ret = _si5351_msynth_reparent(drvdata, n, pdata->clkout[n].multisynth_src); if (ret) { dev_err(&client->dev, "failed to reparent multisynth %d to %d\n", n, pdata->clkout[n].multisynth_src); return ret; } ret = _si5351_clkout_reparent(drvdata, n, pdata->clkout[n].clkout_src); if (ret) { dev_err(&client->dev, "failed to reparent clkout %d to %d\n", n, pdata->clkout[n].clkout_src); return ret; } ret = _si5351_clkout_set_drive_strength(drvdata, n, pdata->clkout[n].drive); if (ret) { dev_err(&client->dev, "failed set drive strength of clkout%d to %d\n", n, pdata->clkout[n].drive); return ret; } ret = _si5351_clkout_set_disable_state(drvdata, n, pdata->clkout[n].disable_state); if (ret) { dev_err(&client->dev, "failed set disable state of clkout%d to %d\n", n, pdata->clkout[n].disable_state); return ret; } } if (!IS_ERR(drvdata->pxtal)) clk_prepare_enable(drvdata->pxtal); if (!IS_ERR(drvdata->pclkin)) clk_prepare_enable(drvdata->pclkin); /* register xtal input clock gate */ memset(&init, 0, sizeof(init)); init.name = si5351_input_names[0]; init.ops = &si5351_xtal_ops; init.flags = 0; if (!IS_ERR(drvdata->pxtal)) { drvdata->pxtal_name = __clk_get_name(drvdata->pxtal); init.parent_names = &drvdata->pxtal_name; init.num_parents = 1; } drvdata->xtal.init = &init; ret = devm_clk_hw_register(&client->dev, &drvdata->xtal); if (ret) { dev_err(&client->dev, "unable to register %s\n", init.name); goto err_clk; } /* register clkin input clock gate */ if (drvdata->variant == SI5351_VARIANT_C) { memset(&init, 0, sizeof(init)); init.name = si5351_input_names[1]; init.ops = &si5351_clkin_ops; if (!IS_ERR(drvdata->pclkin)) { drvdata->pclkin_name = __clk_get_name(drvdata->pclkin); init.parent_names = &drvdata->pclkin_name; init.num_parents = 1; } drvdata->clkin.init = &init; ret = devm_clk_hw_register(&client->dev, &drvdata->clkin); if (ret) { dev_err(&client->dev, "unable to register %s\n", init.name); goto err_clk; } } /* Si5351C allows to mux either xtal or clkin to PLL input */ num_parents = (drvdata->variant == SI5351_VARIANT_C) ? 2 : 1; parent_names[0] = si5351_input_names[0]; parent_names[1] = si5351_input_names[1]; /* register PLLA */ drvdata->pll[0].num = 0; drvdata->pll[0].drvdata = drvdata; drvdata->pll[0].hw.init = &init; memset(&init, 0, sizeof(init)); init.name = si5351_pll_names[0]; init.ops = &si5351_pll_ops; init.flags = 0; init.parent_names = parent_names; init.num_parents = num_parents; ret = devm_clk_hw_register(&client->dev, &drvdata->pll[0].hw); if (ret) { dev_err(&client->dev, "unable to register %s\n", init.name); goto err_clk; } /* register PLLB or VXCO (Si5351B) */ drvdata->pll[1].num = 1; drvdata->pll[1].drvdata = drvdata; drvdata->pll[1].hw.init = &init; memset(&init, 0, sizeof(init)); if (drvdata->variant == SI5351_VARIANT_B) { init.name = si5351_pll_names[2]; init.ops = &si5351_vxco_ops; init.flags = 0; init.parent_names = NULL; init.num_parents = 0; } else { init.name = si5351_pll_names[1]; init.ops = &si5351_pll_ops; init.flags = 0; init.parent_names = parent_names; init.num_parents = num_parents; } ret = devm_clk_hw_register(&client->dev, &drvdata->pll[1].hw); if (ret) { dev_err(&client->dev, "unable to register %s\n", init.name); goto err_clk; } /* register clk multisync and clk out divider */ num_clocks = (drvdata->variant == SI5351_VARIANT_A3) ? 3 : 8; parent_names[0] = si5351_pll_names[0]; if (drvdata->variant == SI5351_VARIANT_B) parent_names[1] = si5351_pll_names[2]; else parent_names[1] = si5351_pll_names[1]; drvdata->msynth = devm_kzalloc(&client->dev, num_clocks * sizeof(*drvdata->msynth), GFP_KERNEL); drvdata->clkout = devm_kzalloc(&client->dev, num_clocks * sizeof(*drvdata->clkout), GFP_KERNEL); drvdata->num_clkout = num_clocks; if (WARN_ON(!drvdata->msynth || !drvdata->clkout)) { ret = -ENOMEM; goto err_clk; } for (n = 0; n < num_clocks; n++) { drvdata->msynth[n].num = n; drvdata->msynth[n].drvdata = drvdata; drvdata->msynth[n].hw.init = &init; memset(&init, 0, sizeof(init)); init.name = si5351_msynth_names[n]; init.ops = &si5351_msynth_ops; init.flags = 0; if (pdata->clkout[n].pll_master) init.flags |= CLK_SET_RATE_PARENT; init.parent_names = parent_names; init.num_parents = 2; ret = devm_clk_hw_register(&client->dev, &drvdata->msynth[n].hw); if (ret) { dev_err(&client->dev, "unable to register %s\n", init.name); goto err_clk; } } num_parents = (drvdata->variant == SI5351_VARIANT_C) ? 4 : 3; parent_names[2] = si5351_input_names[0]; parent_names[3] = si5351_input_names[1]; for (n = 0; n < num_clocks; n++) { parent_names[0] = si5351_msynth_names[n]; parent_names[1] = (n < 4) ? si5351_msynth_names[0] : si5351_msynth_names[4]; drvdata->clkout[n].num = n; drvdata->clkout[n].drvdata = drvdata; drvdata->clkout[n].hw.init = &init; memset(&init, 0, sizeof(init)); init.name = si5351_clkout_names[n]; init.ops = &si5351_clkout_ops; init.flags = 0; if (pdata->clkout[n].clkout_src == SI5351_CLKOUT_SRC_MSYNTH_N) init.flags |= CLK_SET_RATE_PARENT; init.parent_names = parent_names; init.num_parents = num_parents; ret = devm_clk_hw_register(&client->dev, &drvdata->clkout[n].hw); if (ret) { dev_err(&client->dev, "unable to register %s\n", init.name); goto err_clk; } /* set initial clkout rate */ if (pdata->clkout[n].rate != 0) { int ret; ret = clk_set_rate(drvdata->clkout[n].hw.clk, pdata->clkout[n].rate); if (ret != 0) { dev_err(&client->dev, "Cannot set rate : %d\n", ret); } } } ret = of_clk_add_hw_provider(client->dev.of_node, si53351_of_clk_get, drvdata); if (ret) { dev_err(&client->dev, "unable to add clk provider\n"); goto err_clk; } return 0; err_clk: if (!IS_ERR(drvdata->pxtal)) clk_disable_unprepare(drvdata->pxtal); if (!IS_ERR(drvdata->pclkin)) clk_disable_unprepare(drvdata->pclkin); return ret; } static const struct i2c_device_id si5351_i2c_ids[] = { { "si5351a", SI5351_VARIANT_A }, { "si5351a-msop", SI5351_VARIANT_A3 }, { "si5351b", SI5351_VARIANT_B }, { "si5351c", SI5351_VARIANT_C }, { } }; MODULE_DEVICE_TABLE(i2c, si5351_i2c_ids); static struct i2c_driver si5351_driver = { .driver = { .name = "si5351", .of_match_table = of_match_ptr(si5351_dt_ids), }, .probe = si5351_i2c_probe, .id_table = si5351_i2c_ids, }; module_i2c_driver(si5351_driver); MODULE_AUTHOR("Sebastian Hesselbarth <[email protected]"); MODULE_DESCRIPTION("Silicon Labs Si5351A/B/C clock generator driver"); MODULE_LICENSE("GPL");
null
null
null
null
110,275
18,249
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
18,249
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef COMPONENTS_DOWNLOAD_PUBLIC_BACKGROUND_SERVICE_CLIENTS_H_ #define COMPONENTS_DOWNLOAD_PUBLIC_BACKGROUND_SERVICE_CLIENTS_H_ #include <map> #include <memory> #include "components/download/public/background_service/client.h" namespace download { // A list of all clients that are able to make download requests through the // DownloadService. // To add a new client, update the metric DownloadService.DownloadClients in // histograms.xml and make sure to keep this list in sync. Additions should be // treated as APPEND ONLY to make sure to keep both UMA metric semantics correct // but also to make sure the underlying database properly associates each // download with the right client. enum class DownloadClient { // Test client values. Meant to be used by the testing framework and not // production code. Callers will be unable to access the DownloadService with // these test APIs. TEST = -1, TEST_2 = -2, TEST_3 = -3, // Represents an uninitialized DownloadClient variable. INVALID = 0, OFFLINE_PAGE_PREFETCH = 1, BACKGROUND_FETCH = 2, // Used by debug surfaces in the app (the WebUI, for example). DEBUGGING = 3, MOUNTAIN_INTERNAL = 4, BOUNDARY = 5, }; using DownloadClientMap = std::map<DownloadClient, std::unique_ptr<Client>>; } // namespace download #endif // COMPONENTS_DOWNLOAD_PUBLIC_BACKGROUND_SERVICE_CLIENTS_H_
null
null
null
null
15,112
3,614
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
168,609
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * fs/cifs/connect.c * * Copyright (C) International Business Machines Corp., 2002,2011 * Author(s): Steve French ([email protected]) * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/fs.h> #include <linux/net.h> #include <linux/string.h> #include <linux/sched/signal.h> #include <linux/list.h> #include <linux/wait.h> #include <linux/slab.h> #include <linux/pagemap.h> #include <linux/ctype.h> #include <linux/utsname.h> #include <linux/mempool.h> #include <linux/delay.h> #include <linux/completion.h> #include <linux/kthread.h> #include <linux/pagevec.h> #include <linux/freezer.h> #include <linux/namei.h> #include <linux/uaccess.h> #include <asm/processor.h> #include <linux/inet.h> #include <linux/module.h> #include <keys/user-type.h> #include <net/ipv6.h> #include <linux/parser.h> #include <linux/bvec.h> #include "cifspdu.h" #include "cifsglob.h" #include "cifsproto.h" #include "cifs_unicode.h" #include "cifs_debug.h" #include "cifs_fs_sb.h" #include "ntlmssp.h" #include "nterr.h" #include "rfc1002pdu.h" #include "fscache.h" #ifdef CONFIG_CIFS_SMB2 #include "smb2proto.h" #endif #define CIFS_PORT 445 #define RFC1001_PORT 139 extern mempool_t *cifs_req_poolp; /* FIXME: should these be tunable? */ #define TLINK_ERROR_EXPIRE (1 * HZ) #define TLINK_IDLE_EXPIRE (600 * HZ) enum { /* Mount options that take no arguments */ Opt_user_xattr, Opt_nouser_xattr, Opt_forceuid, Opt_noforceuid, Opt_forcegid, Opt_noforcegid, Opt_noblocksend, Opt_noautotune, Opt_hard, Opt_soft, Opt_perm, Opt_noperm, Opt_mapposix, Opt_nomapposix, Opt_mapchars, Opt_nomapchars, Opt_sfu, Opt_nosfu, Opt_nodfs, Opt_posixpaths, Opt_noposixpaths, Opt_nounix, Opt_nocase, Opt_brl, Opt_nobrl, Opt_forcemandatorylock, Opt_setuidfromacl, Opt_setuids, Opt_nosetuids, Opt_dynperm, Opt_nodynperm, Opt_nohard, Opt_nosoft, Opt_nointr, Opt_intr, Opt_nostrictsync, Opt_strictsync, Opt_serverino, Opt_noserverino, Opt_rwpidforward, Opt_cifsacl, Opt_nocifsacl, Opt_acl, Opt_noacl, Opt_locallease, Opt_sign, Opt_seal, Opt_noac, Opt_fsc, Opt_mfsymlinks, Opt_multiuser, Opt_sloppy, Opt_nosharesock, Opt_persistent, Opt_nopersistent, Opt_resilient, Opt_noresilient, Opt_domainauto, /* Mount options which take numeric value */ Opt_backupuid, Opt_backupgid, Opt_uid, Opt_cruid, Opt_gid, Opt_file_mode, Opt_dirmode, Opt_port, Opt_rsize, Opt_wsize, Opt_actimeo, Opt_echo_interval, Opt_max_credits, Opt_snapshot, /* Mount options which take string value */ Opt_user, Opt_pass, Opt_ip, Opt_domain, Opt_srcaddr, Opt_iocharset, Opt_netbiosname, Opt_servern, Opt_ver, Opt_vers, Opt_sec, Opt_cache, /* Mount options to be ignored */ Opt_ignore, /* Options which could be blank */ Opt_blank_pass, Opt_blank_user, Opt_blank_ip, Opt_err }; static const match_table_t cifs_mount_option_tokens = { { Opt_user_xattr, "user_xattr" }, { Opt_nouser_xattr, "nouser_xattr" }, { Opt_forceuid, "forceuid" }, { Opt_noforceuid, "noforceuid" }, { Opt_forcegid, "forcegid" }, { Opt_noforcegid, "noforcegid" }, { Opt_noblocksend, "noblocksend" }, { Opt_noautotune, "noautotune" }, { Opt_hard, "hard" }, { Opt_soft, "soft" }, { Opt_perm, "perm" }, { Opt_noperm, "noperm" }, { Opt_mapchars, "mapchars" }, /* SFU style */ { Opt_nomapchars, "nomapchars" }, { Opt_mapposix, "mapposix" }, /* SFM style */ { Opt_nomapposix, "nomapposix" }, { Opt_sfu, "sfu" }, { Opt_nosfu, "nosfu" }, { Opt_nodfs, "nodfs" }, { Opt_posixpaths, "posixpaths" }, { Opt_noposixpaths, "noposixpaths" }, { Opt_nounix, "nounix" }, { Opt_nounix, "nolinux" }, { Opt_nocase, "nocase" }, { Opt_nocase, "ignorecase" }, { Opt_brl, "brl" }, { Opt_nobrl, "nobrl" }, { Opt_nobrl, "nolock" }, { Opt_forcemandatorylock, "forcemandatorylock" }, { Opt_forcemandatorylock, "forcemand" }, { Opt_setuids, "setuids" }, { Opt_nosetuids, "nosetuids" }, { Opt_setuidfromacl, "idsfromsid" }, { Opt_dynperm, "dynperm" }, { Opt_nodynperm, "nodynperm" }, { Opt_nohard, "nohard" }, { Opt_nosoft, "nosoft" }, { Opt_nointr, "nointr" }, { Opt_intr, "intr" }, { Opt_nostrictsync, "nostrictsync" }, { Opt_strictsync, "strictsync" }, { Opt_serverino, "serverino" }, { Opt_noserverino, "noserverino" }, { Opt_rwpidforward, "rwpidforward" }, { Opt_cifsacl, "cifsacl" }, { Opt_nocifsacl, "nocifsacl" }, { Opt_acl, "acl" }, { Opt_noacl, "noacl" }, { Opt_locallease, "locallease" }, { Opt_sign, "sign" }, { Opt_seal, "seal" }, { Opt_noac, "noac" }, { Opt_fsc, "fsc" }, { Opt_mfsymlinks, "mfsymlinks" }, { Opt_multiuser, "multiuser" }, { Opt_sloppy, "sloppy" }, { Opt_nosharesock, "nosharesock" }, { Opt_persistent, "persistenthandles"}, { Opt_nopersistent, "nopersistenthandles"}, { Opt_resilient, "resilienthandles"}, { Opt_noresilient, "noresilienthandles"}, { Opt_domainauto, "domainauto"}, { Opt_backupuid, "backupuid=%s" }, { Opt_backupgid, "backupgid=%s" }, { Opt_uid, "uid=%s" }, { Opt_cruid, "cruid=%s" }, { Opt_gid, "gid=%s" }, { Opt_file_mode, "file_mode=%s" }, { Opt_dirmode, "dirmode=%s" }, { Opt_dirmode, "dir_mode=%s" }, { Opt_port, "port=%s" }, { Opt_rsize, "rsize=%s" }, { Opt_wsize, "wsize=%s" }, { Opt_actimeo, "actimeo=%s" }, { Opt_echo_interval, "echo_interval=%s" }, { Opt_max_credits, "max_credits=%s" }, { Opt_snapshot, "snapshot=%s" }, { Opt_blank_user, "user=" }, { Opt_blank_user, "username=" }, { Opt_user, "user=%s" }, { Opt_user, "username=%s" }, { Opt_blank_pass, "pass=" }, { Opt_blank_pass, "password=" }, { Opt_pass, "pass=%s" }, { Opt_pass, "password=%s" }, { Opt_blank_ip, "ip=" }, { Opt_blank_ip, "addr=" }, { Opt_ip, "ip=%s" }, { Opt_ip, "addr=%s" }, { Opt_ignore, "unc=%s" }, { Opt_ignore, "target=%s" }, { Opt_ignore, "path=%s" }, { Opt_domain, "dom=%s" }, { Opt_domain, "domain=%s" }, { Opt_domain, "workgroup=%s" }, { Opt_srcaddr, "srcaddr=%s" }, { Opt_ignore, "prefixpath=%s" }, { Opt_iocharset, "iocharset=%s" }, { Opt_netbiosname, "netbiosname=%s" }, { Opt_servern, "servern=%s" }, { Opt_ver, "ver=%s" }, { Opt_vers, "vers=%s" }, { Opt_sec, "sec=%s" }, { Opt_cache, "cache=%s" }, { Opt_ignore, "cred" }, { Opt_ignore, "credentials" }, { Opt_ignore, "cred=%s" }, { Opt_ignore, "credentials=%s" }, { Opt_ignore, "guest" }, { Opt_ignore, "rw" }, { Opt_ignore, "ro" }, { Opt_ignore, "suid" }, { Opt_ignore, "nosuid" }, { Opt_ignore, "exec" }, { Opt_ignore, "noexec" }, { Opt_ignore, "nodev" }, { Opt_ignore, "noauto" }, { Opt_ignore, "dev" }, { Opt_ignore, "mand" }, { Opt_ignore, "nomand" }, { Opt_ignore, "_netdev" }, { Opt_err, NULL } }; enum { Opt_sec_krb5, Opt_sec_krb5i, Opt_sec_krb5p, Opt_sec_ntlmsspi, Opt_sec_ntlmssp, Opt_ntlm, Opt_sec_ntlmi, Opt_sec_ntlmv2, Opt_sec_ntlmv2i, Opt_sec_lanman, Opt_sec_none, Opt_sec_err }; static const match_table_t cifs_secflavor_tokens = { { Opt_sec_krb5, "krb5" }, { Opt_sec_krb5i, "krb5i" }, { Opt_sec_krb5p, "krb5p" }, { Opt_sec_ntlmsspi, "ntlmsspi" }, { Opt_sec_ntlmssp, "ntlmssp" }, { Opt_ntlm, "ntlm" }, { Opt_sec_ntlmi, "ntlmi" }, { Opt_sec_ntlmv2, "nontlm" }, { Opt_sec_ntlmv2, "ntlmv2" }, { Opt_sec_ntlmv2i, "ntlmv2i" }, { Opt_sec_lanman, "lanman" }, { Opt_sec_none, "none" }, { Opt_sec_err, NULL } }; /* cache flavors */ enum { Opt_cache_loose, Opt_cache_strict, Opt_cache_none, Opt_cache_err }; static const match_table_t cifs_cacheflavor_tokens = { { Opt_cache_loose, "loose" }, { Opt_cache_strict, "strict" }, { Opt_cache_none, "none" }, { Opt_cache_err, NULL } }; static const match_table_t cifs_smb_version_tokens = { { Smb_1, SMB1_VERSION_STRING }, { Smb_20, SMB20_VERSION_STRING}, { Smb_21, SMB21_VERSION_STRING }, { Smb_30, SMB30_VERSION_STRING }, { Smb_302, SMB302_VERSION_STRING }, #ifdef CONFIG_CIFS_SMB311 { Smb_311, SMB311_VERSION_STRING }, { Smb_311, ALT_SMB311_VERSION_STRING }, #endif /* SMB311 */ { Smb_version_err, NULL } }; static int ip_connect(struct TCP_Server_Info *server); static int generic_ip_connect(struct TCP_Server_Info *server); static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink); static void cifs_prune_tlinks(struct work_struct *work); static int cifs_setup_volume_info(struct smb_vol *volume_info, char *mount_data, const char *devname); /* * cifs tcp session reconnection * * mark tcp session as reconnecting so temporarily locked * mark all smb sessions as reconnecting for tcp session * reconnect tcp session * wake up waiters on reconnection? - (not needed currently) */ int cifs_reconnect(struct TCP_Server_Info *server) { int rc = 0; struct list_head *tmp, *tmp2; struct cifs_ses *ses; struct cifs_tcon *tcon; struct mid_q_entry *mid_entry; struct list_head retry_list; spin_lock(&GlobalMid_Lock); if (server->tcpStatus == CifsExiting) { /* the demux thread will exit normally next time through the loop */ spin_unlock(&GlobalMid_Lock); return rc; } else server->tcpStatus = CifsNeedReconnect; spin_unlock(&GlobalMid_Lock); server->maxBuf = 0; #ifdef CONFIG_CIFS_SMB2 server->max_read = 0; #endif cifs_dbg(FYI, "Reconnecting tcp session\n"); /* before reconnecting the tcp session, mark the smb session (uid) and the tid bad so they are not used until reconnected */ cifs_dbg(FYI, "%s: marking sessions and tcons for reconnect\n", __func__); spin_lock(&cifs_tcp_ses_lock); list_for_each(tmp, &server->smb_ses_list) { ses = list_entry(tmp, struct cifs_ses, smb_ses_list); ses->need_reconnect = true; ses->ipc_tid = 0; list_for_each(tmp2, &ses->tcon_list) { tcon = list_entry(tmp2, struct cifs_tcon, tcon_list); tcon->need_reconnect = true; } } spin_unlock(&cifs_tcp_ses_lock); /* do not want to be sending data on a socket we are freeing */ cifs_dbg(FYI, "%s: tearing down socket\n", __func__); mutex_lock(&server->srv_mutex); if (server->ssocket) { cifs_dbg(FYI, "State: 0x%x Flags: 0x%lx\n", server->ssocket->state, server->ssocket->flags); kernel_sock_shutdown(server->ssocket, SHUT_WR); cifs_dbg(FYI, "Post shutdown state: 0x%x Flags: 0x%lx\n", server->ssocket->state, server->ssocket->flags); sock_release(server->ssocket); server->ssocket = NULL; } server->sequence_number = 0; server->session_estab = false; kfree(server->session_key.response); server->session_key.response = NULL; server->session_key.len = 0; server->lstrp = jiffies; /* mark submitted MIDs for retry and issue callback */ INIT_LIST_HEAD(&retry_list); cifs_dbg(FYI, "%s: moving mids to private list\n", __func__); spin_lock(&GlobalMid_Lock); list_for_each_safe(tmp, tmp2, &server->pending_mid_q) { mid_entry = list_entry(tmp, struct mid_q_entry, qhead); if (mid_entry->mid_state == MID_REQUEST_SUBMITTED) mid_entry->mid_state = MID_RETRY_NEEDED; list_move(&mid_entry->qhead, &retry_list); } spin_unlock(&GlobalMid_Lock); mutex_unlock(&server->srv_mutex); cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__); list_for_each_safe(tmp, tmp2, &retry_list) { mid_entry = list_entry(tmp, struct mid_q_entry, qhead); list_del_init(&mid_entry->qhead); mid_entry->callback(mid_entry); } do { try_to_freeze(); /* we should try only the port we connected to before */ mutex_lock(&server->srv_mutex); rc = generic_ip_connect(server); if (rc) { cifs_dbg(FYI, "reconnect error %d\n", rc); mutex_unlock(&server->srv_mutex); msleep(3000); } else { atomic_inc(&tcpSesReconnectCount); spin_lock(&GlobalMid_Lock); if (server->tcpStatus != CifsExiting) server->tcpStatus = CifsNeedNegotiate; spin_unlock(&GlobalMid_Lock); mutex_unlock(&server->srv_mutex); } } while (server->tcpStatus == CifsNeedReconnect); if (server->tcpStatus == CifsNeedNegotiate) mod_delayed_work(cifsiod_wq, &server->echo, 0); return rc; } static void cifs_echo_request(struct work_struct *work) { int rc; struct TCP_Server_Info *server = container_of(work, struct TCP_Server_Info, echo.work); unsigned long echo_interval; /* * If we need to renegotiate, set echo interval to zero to * immediately call echo service where we can renegotiate. */ if (server->tcpStatus == CifsNeedNegotiate) echo_interval = 0; else echo_interval = server->echo_interval; /* * We cannot send an echo if it is disabled. * Also, no need to ping if we got a response recently. */ if (server->tcpStatus == CifsNeedReconnect || server->tcpStatus == CifsExiting || server->tcpStatus == CifsNew || (server->ops->can_echo && !server->ops->can_echo(server)) || time_before(jiffies, server->lstrp + echo_interval - HZ)) goto requeue_echo; rc = server->ops->echo ? server->ops->echo(server) : -ENOSYS; if (rc) cifs_dbg(FYI, "Unable to send echo request to server: %s\n", server->hostname); requeue_echo: queue_delayed_work(cifsiod_wq, &server->echo, server->echo_interval); } static bool allocate_buffers(struct TCP_Server_Info *server) { if (!server->bigbuf) { server->bigbuf = (char *)cifs_buf_get(); if (!server->bigbuf) { cifs_dbg(VFS, "No memory for large SMB response\n"); msleep(3000); /* retry will check if exiting */ return false; } } else if (server->large_buf) { /* we are reusing a dirty large buf, clear its start */ memset(server->bigbuf, 0, HEADER_SIZE(server)); } if (!server->smallbuf) { server->smallbuf = (char *)cifs_small_buf_get(); if (!server->smallbuf) { cifs_dbg(VFS, "No memory for SMB response\n"); msleep(1000); /* retry will check if exiting */ return false; } /* beginning of smb buffer is cleared in our buf_get */ } else { /* if existing small buf clear beginning */ memset(server->smallbuf, 0, HEADER_SIZE(server)); } return true; } static bool server_unresponsive(struct TCP_Server_Info *server) { /* * We need to wait 2 echo intervals to make sure we handle such * situations right: * 1s client sends a normal SMB request * 2s client gets a response * 30s echo workqueue job pops, and decides we got a response recently * and don't need to send another * ... * 65s kernel_recvmsg times out, and we see that we haven't gotten * a response in >60s. */ if (server->tcpStatus == CifsGood && time_after(jiffies, server->lstrp + 2 * server->echo_interval)) { cifs_dbg(VFS, "Server %s has not responded in %lu seconds. Reconnecting...\n", server->hostname, (2 * server->echo_interval) / HZ); cifs_reconnect(server); wake_up(&server->response_q); return true; } return false; } static int cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg) { int length = 0; int total_read; smb_msg->msg_control = NULL; smb_msg->msg_controllen = 0; for (total_read = 0; msg_data_left(smb_msg); total_read += length) { try_to_freeze(); if (server_unresponsive(server)) return -ECONNABORTED; length = sock_recvmsg(server->ssocket, smb_msg, 0); if (server->tcpStatus == CifsExiting) return -ESHUTDOWN; if (server->tcpStatus == CifsNeedReconnect) { cifs_reconnect(server); return -ECONNABORTED; } if (length == -ERESTARTSYS || length == -EAGAIN || length == -EINTR) { /* * Minimum sleep to prevent looping, allowing socket * to clear and app threads to set tcpStatus * CifsNeedReconnect if server hung. */ usleep_range(1000, 2000); length = 0; continue; } if (length <= 0) { cifs_dbg(FYI, "Received no data or error: %d\n", length); cifs_reconnect(server); return -ECONNABORTED; } } return total_read; } int cifs_read_from_socket(struct TCP_Server_Info *server, char *buf, unsigned int to_read) { struct msghdr smb_msg; struct kvec iov = {.iov_base = buf, .iov_len = to_read}; iov_iter_kvec(&smb_msg.msg_iter, READ | ITER_KVEC, &iov, 1, to_read); return cifs_readv_from_socket(server, &smb_msg); } int cifs_read_page_from_socket(struct TCP_Server_Info *server, struct page *page, unsigned int to_read) { struct msghdr smb_msg; struct bio_vec bv = {.bv_page = page, .bv_len = to_read}; iov_iter_bvec(&smb_msg.msg_iter, READ | ITER_BVEC, &bv, 1, to_read); return cifs_readv_from_socket(server, &smb_msg); } static bool is_smb_response(struct TCP_Server_Info *server, unsigned char type) { /* * The first byte big endian of the length field, * is actually not part of the length but the type * with the most common, zero, as regular data. */ switch (type) { case RFC1002_SESSION_MESSAGE: /* Regular SMB response */ return true; case RFC1002_SESSION_KEEP_ALIVE: cifs_dbg(FYI, "RFC 1002 session keep alive\n"); break; case RFC1002_POSITIVE_SESSION_RESPONSE: cifs_dbg(FYI, "RFC 1002 positive session response\n"); break; case RFC1002_NEGATIVE_SESSION_RESPONSE: /* * We get this from Windows 98 instead of an error on * SMB negprot response. */ cifs_dbg(FYI, "RFC 1002 negative session response\n"); /* give server a second to clean up */ msleep(1000); /* * Always try 445 first on reconnect since we get NACK * on some if we ever connected to port 139 (the NACK * is since we do not begin with RFC1001 session * initialize frame). */ cifs_set_port((struct sockaddr *)&server->dstaddr, CIFS_PORT); cifs_reconnect(server); wake_up(&server->response_q); break; default: cifs_dbg(VFS, "RFC 1002 unknown response type 0x%x\n", type); cifs_reconnect(server); } return false; } void dequeue_mid(struct mid_q_entry *mid, bool malformed) { #ifdef CONFIG_CIFS_STATS2 mid->when_received = jiffies; #endif spin_lock(&GlobalMid_Lock); if (!malformed) mid->mid_state = MID_RESPONSE_RECEIVED; else mid->mid_state = MID_RESPONSE_MALFORMED; list_del_init(&mid->qhead); spin_unlock(&GlobalMid_Lock); } static void handle_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server, char *buf, int malformed) { if (server->ops->check_trans2 && server->ops->check_trans2(mid, server, buf, malformed)) return; mid->resp_buf = buf; mid->large_buf = server->large_buf; /* Was previous buf put in mpx struct for multi-rsp? */ if (!mid->multiRsp) { /* smb buffer will be freed by user thread */ if (server->large_buf) server->bigbuf = NULL; else server->smallbuf = NULL; } dequeue_mid(mid, malformed); } static void clean_demultiplex_info(struct TCP_Server_Info *server) { int length; /* take it off the list, if it's not already */ spin_lock(&cifs_tcp_ses_lock); list_del_init(&server->tcp_ses_list); spin_unlock(&cifs_tcp_ses_lock); spin_lock(&GlobalMid_Lock); server->tcpStatus = CifsExiting; spin_unlock(&GlobalMid_Lock); wake_up_all(&server->response_q); /* check if we have blocked requests that need to free */ spin_lock(&server->req_lock); if (server->credits <= 0) server->credits = 1; spin_unlock(&server->req_lock); /* * Although there should not be any requests blocked on this queue it * can not hurt to be paranoid and try to wake up requests that may * haven been blocked when more than 50 at time were on the wire to the * same server - they now will see the session is in exit state and get * out of SendReceive. */ wake_up_all(&server->request_q); /* give those requests time to exit */ msleep(125); if (server->ssocket) { sock_release(server->ssocket); server->ssocket = NULL; } if (!list_empty(&server->pending_mid_q)) { struct list_head dispose_list; struct mid_q_entry *mid_entry; struct list_head *tmp, *tmp2; INIT_LIST_HEAD(&dispose_list); spin_lock(&GlobalMid_Lock); list_for_each_safe(tmp, tmp2, &server->pending_mid_q) { mid_entry = list_entry(tmp, struct mid_q_entry, qhead); cifs_dbg(FYI, "Clearing mid 0x%llx\n", mid_entry->mid); mid_entry->mid_state = MID_SHUTDOWN; list_move(&mid_entry->qhead, &dispose_list); } spin_unlock(&GlobalMid_Lock); /* now walk dispose list and issue callbacks */ list_for_each_safe(tmp, tmp2, &dispose_list) { mid_entry = list_entry(tmp, struct mid_q_entry, qhead); cifs_dbg(FYI, "Callback mid 0x%llx\n", mid_entry->mid); list_del_init(&mid_entry->qhead); mid_entry->callback(mid_entry); } /* 1/8th of sec is more than enough time for them to exit */ msleep(125); } if (!list_empty(&server->pending_mid_q)) { /* * mpx threads have not exited yet give them at least the smb * send timeout time for long ops. * * Due to delays on oplock break requests, we need to wait at * least 45 seconds before giving up on a request getting a * response and going ahead and killing cifsd. */ cifs_dbg(FYI, "Wait for exit from demultiplex thread\n"); msleep(46000); /* * If threads still have not exited they are probably never * coming home not much else we can do but free the memory. */ } kfree(server->hostname); kfree(server); length = atomic_dec_return(&tcpSesAllocCount); if (length > 0) mempool_resize(cifs_req_poolp, length + cifs_min_rcv); } static int standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid) { int length; char *buf = server->smallbuf; unsigned int pdu_length = get_rfc1002_length(buf); /* make sure this will fit in a large buffer */ if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server) - 4) { cifs_dbg(VFS, "SMB response too long (%u bytes)\n", pdu_length); cifs_reconnect(server); wake_up(&server->response_q); return -ECONNABORTED; } /* switch to large buffer if too big for a small one */ if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE - 4) { server->large_buf = true; memcpy(server->bigbuf, buf, server->total_read); buf = server->bigbuf; } /* now read the rest */ length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1, pdu_length - HEADER_SIZE(server) + 1 + 4); if (length < 0) return length; server->total_read += length; dump_smb(buf, server->total_read); return cifs_handle_standard(server, mid); } int cifs_handle_standard(struct TCP_Server_Info *server, struct mid_q_entry *mid) { char *buf = server->large_buf ? server->bigbuf : server->smallbuf; int length; /* * We know that we received enough to get to the MID as we * checked the pdu_length earlier. Now check to see * if the rest of the header is OK. We borrow the length * var for the rest of the loop to avoid a new stack var. * * 48 bytes is enough to display the header and a little bit * into the payload for debugging purposes. */ length = server->ops->check_message(buf, server->total_read, server); if (length != 0) cifs_dump_mem("Bad SMB: ", buf, min_t(unsigned int, server->total_read, 48)); if (server->ops->is_status_pending && server->ops->is_status_pending(buf, server, length)) return -1; if (!mid) return length; handle_mid(mid, server, buf, length); return 0; } static int cifs_demultiplex_thread(void *p) { int length; struct TCP_Server_Info *server = p; unsigned int pdu_length; char *buf = NULL; struct task_struct *task_to_wake = NULL; struct mid_q_entry *mid_entry; current->flags |= PF_MEMALLOC; cifs_dbg(FYI, "Demultiplex PID: %d\n", task_pid_nr(current)); length = atomic_inc_return(&tcpSesAllocCount); if (length > 1) mempool_resize(cifs_req_poolp, length + cifs_min_rcv); set_freezable(); while (server->tcpStatus != CifsExiting) { if (try_to_freeze()) continue; if (!allocate_buffers(server)) continue; server->large_buf = false; buf = server->smallbuf; pdu_length = 4; /* enough to get RFC1001 header */ length = cifs_read_from_socket(server, buf, pdu_length); if (length < 0) continue; server->total_read = length; /* * The right amount was read from socket - 4 bytes, * so we can now interpret the length field. */ pdu_length = get_rfc1002_length(buf); cifs_dbg(FYI, "RFC1002 header 0x%x\n", pdu_length); if (!is_smb_response(server, buf[0])) continue; /* make sure we have enough to get to the MID */ if (pdu_length < HEADER_SIZE(server) - 1 - 4) { cifs_dbg(VFS, "SMB response too short (%u bytes)\n", pdu_length); cifs_reconnect(server); wake_up(&server->response_q); continue; } /* read down to the MID */ length = cifs_read_from_socket(server, buf + 4, HEADER_SIZE(server) - 1 - 4); if (length < 0) continue; server->total_read += length; if (server->ops->is_transform_hdr && server->ops->receive_transform && server->ops->is_transform_hdr(buf)) { length = server->ops->receive_transform(server, &mid_entry); } else { mid_entry = server->ops->find_mid(server, buf); if (!mid_entry || !mid_entry->receive) length = standard_receive3(server, mid_entry); else length = mid_entry->receive(server, mid_entry); } if (length < 0) continue; if (server->large_buf) buf = server->bigbuf; server->lstrp = jiffies; if (mid_entry != NULL) { if (!mid_entry->multiRsp || mid_entry->multiEnd) mid_entry->callback(mid_entry); } else if (!server->ops->is_oplock_break || !server->ops->is_oplock_break(buf, server)) { cifs_dbg(VFS, "No task to wake, unknown frame received! NumMids %d\n", atomic_read(&midCount)); cifs_dump_mem("Received Data is: ", buf, HEADER_SIZE(server)); #ifdef CONFIG_CIFS_DEBUG2 if (server->ops->dump_detail) server->ops->dump_detail(buf); cifs_dump_mids(server); #endif /* CIFS_DEBUG2 */ } } /* end while !EXITING */ /* buffer usually freed in free_mid - need to free it here on exit */ cifs_buf_release(server->bigbuf); if (server->smallbuf) /* no sense logging a debug message if NULL */ cifs_small_buf_release(server->smallbuf); task_to_wake = xchg(&server->tsk, NULL); clean_demultiplex_info(server); /* if server->tsk was NULL then wait for a signal before exiting */ if (!task_to_wake) { set_current_state(TASK_INTERRUPTIBLE); while (!signal_pending(current)) { schedule(); set_current_state(TASK_INTERRUPTIBLE); } set_current_state(TASK_RUNNING); } module_put_and_exit(0); } /* extract the host portion of the UNC string */ static char * extract_hostname(const char *unc) { const char *src; char *dst, *delim; unsigned int len; /* skip double chars at beginning of string */ /* BB: check validity of these bytes? */ src = unc + 2; /* delimiter between hostname and sharename is always '\\' now */ delim = strchr(src, '\\'); if (!delim) return ERR_PTR(-EINVAL); len = delim - src; dst = kmalloc((len + 1), GFP_KERNEL); if (dst == NULL) return ERR_PTR(-ENOMEM); memcpy(dst, src, len); dst[len] = '\0'; return dst; } static int get_option_ul(substring_t args[], unsigned long *option) { int rc; char *string; string = match_strdup(args); if (string == NULL) return -ENOMEM; rc = kstrtoul(string, 0, option); kfree(string); return rc; } static int get_option_uid(substring_t args[], kuid_t *result) { unsigned long value; kuid_t uid; int rc; rc = get_option_ul(args, &value); if (rc) return rc; uid = make_kuid(current_user_ns(), value); if (!uid_valid(uid)) return -EINVAL; *result = uid; return 0; } static int get_option_gid(substring_t args[], kgid_t *result) { unsigned long value; kgid_t gid; int rc; rc = get_option_ul(args, &value); if (rc) return rc; gid = make_kgid(current_user_ns(), value); if (!gid_valid(gid)) return -EINVAL; *result = gid; return 0; } static int cifs_parse_security_flavors(char *value, struct smb_vol *vol) { substring_t args[MAX_OPT_ARGS]; /* * With mount options, the last one should win. Reset any existing * settings back to default. */ vol->sectype = Unspecified; vol->sign = false; switch (match_token(value, cifs_secflavor_tokens, args)) { case Opt_sec_krb5p: cifs_dbg(VFS, "sec=krb5p is not supported!\n"); return 1; case Opt_sec_krb5i: vol->sign = true; /* Fallthrough */ case Opt_sec_krb5: vol->sectype = Kerberos; break; case Opt_sec_ntlmsspi: vol->sign = true; /* Fallthrough */ case Opt_sec_ntlmssp: vol->sectype = RawNTLMSSP; break; case Opt_sec_ntlmi: vol->sign = true; /* Fallthrough */ case Opt_ntlm: vol->sectype = NTLM; break; case Opt_sec_ntlmv2i: vol->sign = true; /* Fallthrough */ case Opt_sec_ntlmv2: vol->sectype = NTLMv2; break; #ifdef CONFIG_CIFS_WEAK_PW_HASH case Opt_sec_lanman: vol->sectype = LANMAN; break; #endif case Opt_sec_none: vol->nullauth = 1; break; default: cifs_dbg(VFS, "bad security option: %s\n", value); return 1; } return 0; } static int cifs_parse_cache_flavor(char *value, struct smb_vol *vol) { substring_t args[MAX_OPT_ARGS]; switch (match_token(value, cifs_cacheflavor_tokens, args)) { case Opt_cache_loose: vol->direct_io = false; vol->strict_io = false; break; case Opt_cache_strict: vol->direct_io = false; vol->strict_io = true; break; case Opt_cache_none: vol->direct_io = true; vol->strict_io = false; break; default: cifs_dbg(VFS, "bad cache= option: %s\n", value); return 1; } return 0; } static int cifs_parse_smb_version(char *value, struct smb_vol *vol) { substring_t args[MAX_OPT_ARGS]; switch (match_token(value, cifs_smb_version_tokens, args)) { case Smb_1: vol->ops = &smb1_operations; vol->vals = &smb1_values; break; #ifdef CONFIG_CIFS_SMB2 case Smb_20: vol->ops = &smb20_operations; vol->vals = &smb20_values; break; case Smb_21: vol->ops = &smb21_operations; vol->vals = &smb21_values; break; case Smb_30: vol->ops = &smb30_operations; vol->vals = &smb30_values; break; case Smb_302: vol->ops = &smb30_operations; /* currently identical with 3.0 */ vol->vals = &smb302_values; break; #ifdef CONFIG_CIFS_SMB311 case Smb_311: vol->ops = &smb311_operations; vol->vals = &smb311_values; break; #endif /* SMB311 */ #endif default: cifs_dbg(VFS, "Unknown vers= option specified: %s\n", value); return 1; } return 0; } /* * Parse a devname into substrings and populate the vol->UNC and vol->prepath * fields with the result. Returns 0 on success and an error otherwise. */ static int cifs_parse_devname(const char *devname, struct smb_vol *vol) { char *pos; const char *delims = "/\\"; size_t len; /* make sure we have a valid UNC double delimiter prefix */ len = strspn(devname, delims); if (len != 2) return -EINVAL; /* find delimiter between host and sharename */ pos = strpbrk(devname + 2, delims); if (!pos) return -EINVAL; /* skip past delimiter */ ++pos; /* now go until next delimiter or end of string */ len = strcspn(pos, delims); /* move "pos" up to delimiter or NULL */ pos += len; vol->UNC = kstrndup(devname, pos - devname, GFP_KERNEL); if (!vol->UNC) return -ENOMEM; convert_delimiter(vol->UNC, '\\'); /* skip any delimiter */ if (*pos == '/' || *pos == '\\') pos++; /* If pos is NULL then no prepath */ if (!*pos) return 0; vol->prepath = kstrdup(pos, GFP_KERNEL); if (!vol->prepath) return -ENOMEM; return 0; } static int cifs_parse_mount_options(const char *mountdata, const char *devname, struct smb_vol *vol) { char *data, *end; char *mountdata_copy = NULL, *options; unsigned int temp_len, i, j; char separator[2]; short int override_uid = -1; short int override_gid = -1; bool uid_specified = false; bool gid_specified = false; bool sloppy = false; char *invalid = NULL; char *nodename = utsname()->nodename; char *string = NULL; char *tmp_end, *value; char delim; bool got_ip = false; unsigned short port = 0; struct sockaddr *dstaddr = (struct sockaddr *)&vol->dstaddr; separator[0] = ','; separator[1] = 0; delim = separator[0]; /* ensure we always start with zeroed-out smb_vol */ memset(vol, 0, sizeof(*vol)); /* * does not have to be perfect mapping since field is * informational, only used for servers that do not support * port 445 and it can be overridden at mount time */ memset(vol->source_rfc1001_name, 0x20, RFC1001_NAME_LEN); for (i = 0; i < strnlen(nodename, RFC1001_NAME_LEN); i++) vol->source_rfc1001_name[i] = toupper(nodename[i]); vol->source_rfc1001_name[RFC1001_NAME_LEN] = 0; /* null target name indicates to use *SMBSERVR default called name if we end up sending RFC1001 session initialize */ vol->target_rfc1001_name[0] = 0; vol->cred_uid = current_uid(); vol->linux_uid = current_uid(); vol->linux_gid = current_gid(); /* * default to SFM style remapping of seven reserved characters * unless user overrides it or we negotiate CIFS POSIX where * it is unnecessary. Can not simultaneously use more than one mapping * since then readdir could list files that open could not open */ vol->remap = true; /* default to only allowing write access to owner of the mount */ vol->dir_mode = vol->file_mode = S_IRUGO | S_IXUGO | S_IWUSR; /* vol->retry default is 0 (i.e. "soft" limited retry not hard retry) */ /* default is always to request posix paths. */ vol->posix_paths = 1; /* default to using server inode numbers where available */ vol->server_ino = 1; /* default is to use strict cifs caching semantics */ vol->strict_io = true; vol->actimeo = CIFS_DEF_ACTIMEO; /* FIXME: add autonegotiation -- for now, SMB1 is default */ vol->ops = &smb1_operations; vol->vals = &smb1_values; vol->echo_interval = SMB_ECHO_INTERVAL_DEFAULT; if (!mountdata) goto cifs_parse_mount_err; mountdata_copy = kstrndup(mountdata, PAGE_SIZE, GFP_KERNEL); if (!mountdata_copy) goto cifs_parse_mount_err; options = mountdata_copy; end = options + strlen(options); if (strncmp(options, "sep=", 4) == 0) { if (options[4] != 0) { separator[0] = options[4]; options += 5; } else { cifs_dbg(FYI, "Null separator not allowed\n"); } } vol->backupuid_specified = false; /* no backup intent for a user */ vol->backupgid_specified = false; /* no backup intent for a group */ switch (cifs_parse_devname(devname, vol)) { case 0: break; case -ENOMEM: cifs_dbg(VFS, "Unable to allocate memory for devname.\n"); goto cifs_parse_mount_err; case -EINVAL: cifs_dbg(VFS, "Malformed UNC in devname.\n"); goto cifs_parse_mount_err; default: cifs_dbg(VFS, "Unknown error parsing devname.\n"); goto cifs_parse_mount_err; } while ((data = strsep(&options, separator)) != NULL) { substring_t args[MAX_OPT_ARGS]; unsigned long option; int token; if (!*data) continue; token = match_token(data, cifs_mount_option_tokens, args); switch (token) { /* Ingnore the following */ case Opt_ignore: break; /* Boolean values */ case Opt_user_xattr: vol->no_xattr = 0; break; case Opt_nouser_xattr: vol->no_xattr = 1; break; case Opt_forceuid: override_uid = 1; break; case Opt_noforceuid: override_uid = 0; break; case Opt_forcegid: override_gid = 1; break; case Opt_noforcegid: override_gid = 0; break; case Opt_noblocksend: vol->noblocksnd = 1; break; case Opt_noautotune: vol->noautotune = 1; break; case Opt_hard: vol->retry = 1; break; case Opt_soft: vol->retry = 0; break; case Opt_perm: vol->noperm = 0; break; case Opt_noperm: vol->noperm = 1; break; case Opt_mapchars: vol->sfu_remap = true; vol->remap = false; /* disable SFM mapping */ break; case Opt_nomapchars: vol->sfu_remap = false; break; case Opt_mapposix: vol->remap = true; vol->sfu_remap = false; /* disable SFU mapping */ break; case Opt_nomapposix: vol->remap = false; break; case Opt_sfu: vol->sfu_emul = 1; break; case Opt_nosfu: vol->sfu_emul = 0; break; case Opt_nodfs: vol->nodfs = 1; break; case Opt_posixpaths: vol->posix_paths = 1; break; case Opt_noposixpaths: vol->posix_paths = 0; break; case Opt_nounix: vol->no_linux_ext = 1; break; case Opt_nocase: vol->nocase = 1; break; case Opt_brl: vol->nobrl = 0; break; case Opt_nobrl: vol->nobrl = 1; /* * turn off mandatory locking in mode * if remote locking is turned off since the * local vfs will do advisory */ if (vol->file_mode == (S_IALLUGO & ~(S_ISUID | S_IXGRP))) vol->file_mode = S_IALLUGO; break; case Opt_forcemandatorylock: vol->mand_lock = 1; break; case Opt_setuids: vol->setuids = 1; break; case Opt_nosetuids: vol->setuids = 0; break; case Opt_setuidfromacl: vol->setuidfromacl = 1; break; case Opt_dynperm: vol->dynperm = true; break; case Opt_nodynperm: vol->dynperm = false; break; case Opt_nohard: vol->retry = 0; break; case Opt_nosoft: vol->retry = 1; break; case Opt_nointr: vol->intr = 0; break; case Opt_intr: vol->intr = 1; break; case Opt_nostrictsync: vol->nostrictsync = 1; break; case Opt_strictsync: vol->nostrictsync = 0; break; case Opt_serverino: vol->server_ino = 1; break; case Opt_noserverino: vol->server_ino = 0; break; case Opt_rwpidforward: vol->rwpidforward = 1; break; case Opt_cifsacl: vol->cifs_acl = 1; break; case Opt_nocifsacl: vol->cifs_acl = 0; break; case Opt_acl: vol->no_psx_acl = 0; break; case Opt_noacl: vol->no_psx_acl = 1; break; case Opt_locallease: vol->local_lease = 1; break; case Opt_sign: vol->sign = true; break; case Opt_seal: /* we do not do the following in secFlags because seal * is a per tree connection (mount) not a per socket * or per-smb connection option in the protocol * vol->secFlg |= CIFSSEC_MUST_SEAL; */ vol->seal = 1; break; case Opt_noac: pr_warn("CIFS: Mount option noac not supported. Instead set /proc/fs/cifs/LookupCacheEnabled to 0\n"); break; case Opt_fsc: #ifndef CONFIG_CIFS_FSCACHE cifs_dbg(VFS, "FS-Cache support needs CONFIG_CIFS_FSCACHE kernel config option set\n"); goto cifs_parse_mount_err; #endif vol->fsc = true; break; case Opt_mfsymlinks: vol->mfsymlinks = true; break; case Opt_multiuser: vol->multiuser = true; break; case Opt_sloppy: sloppy = true; break; case Opt_nosharesock: vol->nosharesock = true; break; case Opt_nopersistent: vol->nopersistent = true; if (vol->persistent) { cifs_dbg(VFS, "persistenthandles mount options conflict\n"); goto cifs_parse_mount_err; } break; case Opt_persistent: vol->persistent = true; if ((vol->nopersistent) || (vol->resilient)) { cifs_dbg(VFS, "persistenthandles mount options conflict\n"); goto cifs_parse_mount_err; } break; case Opt_resilient: vol->resilient = true; if (vol->persistent) { cifs_dbg(VFS, "persistenthandles mount options conflict\n"); goto cifs_parse_mount_err; } break; case Opt_noresilient: vol->resilient = false; /* already the default */ break; case Opt_domainauto: vol->domainauto = true; break; /* Numeric Values */ case Opt_backupuid: if (get_option_uid(args, &vol->backupuid)) { cifs_dbg(VFS, "%s: Invalid backupuid value\n", __func__); goto cifs_parse_mount_err; } vol->backupuid_specified = true; break; case Opt_backupgid: if (get_option_gid(args, &vol->backupgid)) { cifs_dbg(VFS, "%s: Invalid backupgid value\n", __func__); goto cifs_parse_mount_err; } vol->backupgid_specified = true; break; case Opt_uid: if (get_option_uid(args, &vol->linux_uid)) { cifs_dbg(VFS, "%s: Invalid uid value\n", __func__); goto cifs_parse_mount_err; } uid_specified = true; break; case Opt_cruid: if (get_option_uid(args, &vol->cred_uid)) { cifs_dbg(VFS, "%s: Invalid cruid value\n", __func__); goto cifs_parse_mount_err; } break; case Opt_gid: if (get_option_gid(args, &vol->linux_gid)) { cifs_dbg(VFS, "%s: Invalid gid value\n", __func__); goto cifs_parse_mount_err; } gid_specified = true; break; case Opt_file_mode: if (get_option_ul(args, &option)) { cifs_dbg(VFS, "%s: Invalid file_mode value\n", __func__); goto cifs_parse_mount_err; } vol->file_mode = option; break; case Opt_dirmode: if (get_option_ul(args, &option)) { cifs_dbg(VFS, "%s: Invalid dir_mode value\n", __func__); goto cifs_parse_mount_err; } vol->dir_mode = option; break; case Opt_port: if (get_option_ul(args, &option) || option > USHRT_MAX) { cifs_dbg(VFS, "%s: Invalid port value\n", __func__); goto cifs_parse_mount_err; } port = (unsigned short)option; break; case Opt_rsize: if (get_option_ul(args, &option)) { cifs_dbg(VFS, "%s: Invalid rsize value\n", __func__); goto cifs_parse_mount_err; } vol->rsize = option; break; case Opt_wsize: if (get_option_ul(args, &option)) { cifs_dbg(VFS, "%s: Invalid wsize value\n", __func__); goto cifs_parse_mount_err; } vol->wsize = option; break; case Opt_actimeo: if (get_option_ul(args, &option)) { cifs_dbg(VFS, "%s: Invalid actimeo value\n", __func__); goto cifs_parse_mount_err; } vol->actimeo = HZ * option; if (vol->actimeo > CIFS_MAX_ACTIMEO) { cifs_dbg(VFS, "attribute cache timeout too large\n"); goto cifs_parse_mount_err; } break; case Opt_echo_interval: if (get_option_ul(args, &option)) { cifs_dbg(VFS, "%s: Invalid echo interval value\n", __func__); goto cifs_parse_mount_err; } vol->echo_interval = option; break; case Opt_snapshot: if (get_option_ul(args, &option)) { cifs_dbg(VFS, "%s: Invalid snapshot time\n", __func__); goto cifs_parse_mount_err; } vol->snapshot_time = option; break; case Opt_max_credits: if (get_option_ul(args, &option) || (option < 20) || (option > 60000)) { cifs_dbg(VFS, "%s: Invalid max_credits value\n", __func__); goto cifs_parse_mount_err; } vol->max_credits = option; break; /* String Arguments */ case Opt_blank_user: /* null user, ie. anonymous authentication */ vol->nullauth = 1; vol->username = NULL; break; case Opt_user: string = match_strdup(args); if (string == NULL) goto out_nomem; if (strnlen(string, CIFS_MAX_USERNAME_LEN) > CIFS_MAX_USERNAME_LEN) { pr_warn("CIFS: username too long\n"); goto cifs_parse_mount_err; } kfree(vol->username); vol->username = kstrdup(string, GFP_KERNEL); if (!vol->username) goto cifs_parse_mount_err; break; case Opt_blank_pass: /* passwords have to be handled differently * to allow the character used for deliminator * to be passed within them */ /* * Check if this is a case where the password * starts with a delimiter */ tmp_end = strchr(data, '='); tmp_end++; if (!(tmp_end < end && tmp_end[1] == delim)) { /* No it is not. Set the password to NULL */ kfree(vol->password); vol->password = NULL; break; } /* Yes it is. Drop down to Opt_pass below.*/ case Opt_pass: /* Obtain the value string */ value = strchr(data, '='); value++; /* Set tmp_end to end of the string */ tmp_end = (char *) value + strlen(value); /* Check if following character is the deliminator * If yes, we have encountered a double deliminator * reset the NULL character to the deliminator */ if (tmp_end < end && tmp_end[1] == delim) { tmp_end[0] = delim; /* Keep iterating until we get to a single * deliminator OR the end */ while ((tmp_end = strchr(tmp_end, delim)) != NULL && (tmp_end[1] == delim)) { tmp_end = (char *) &tmp_end[2]; } /* Reset var options to point to next element */ if (tmp_end) { tmp_end[0] = '\0'; options = (char *) &tmp_end[1]; } else /* Reached the end of the mount option * string */ options = end; } kfree(vol->password); /* Now build new password string */ temp_len = strlen(value); vol->password = kzalloc(temp_len+1, GFP_KERNEL); if (vol->password == NULL) { pr_warn("CIFS: no memory for password\n"); goto cifs_parse_mount_err; } for (i = 0, j = 0; i < temp_len; i++, j++) { vol->password[j] = value[i]; if ((value[i] == delim) && value[i+1] == delim) /* skip the second deliminator */ i++; } vol->password[j] = '\0'; break; case Opt_blank_ip: /* FIXME: should this be an error instead? */ got_ip = false; break; case Opt_ip: string = match_strdup(args); if (string == NULL) goto out_nomem; if (!cifs_convert_address(dstaddr, string, strlen(string))) { pr_err("CIFS: bad ip= option (%s).\n", string); goto cifs_parse_mount_err; } got_ip = true; break; case Opt_domain: string = match_strdup(args); if (string == NULL) goto out_nomem; if (strnlen(string, CIFS_MAX_DOMAINNAME_LEN) == CIFS_MAX_DOMAINNAME_LEN) { pr_warn("CIFS: domain name too long\n"); goto cifs_parse_mount_err; } kfree(vol->domainname); vol->domainname = kstrdup(string, GFP_KERNEL); if (!vol->domainname) { pr_warn("CIFS: no memory for domainname\n"); goto cifs_parse_mount_err; } cifs_dbg(FYI, "Domain name set\n"); break; case Opt_srcaddr: string = match_strdup(args); if (string == NULL) goto out_nomem; if (!cifs_convert_address( (struct sockaddr *)&vol->srcaddr, string, strlen(string))) { pr_warn("CIFS: Could not parse srcaddr: %s\n", string); goto cifs_parse_mount_err; } break; case Opt_iocharset: string = match_strdup(args); if (string == NULL) goto out_nomem; if (strnlen(string, 1024) >= 65) { pr_warn("CIFS: iocharset name too long.\n"); goto cifs_parse_mount_err; } if (strncasecmp(string, "default", 7) != 0) { kfree(vol->iocharset); vol->iocharset = kstrdup(string, GFP_KERNEL); if (!vol->iocharset) { pr_warn("CIFS: no memory for charset\n"); goto cifs_parse_mount_err; } } /* if iocharset not set then load_nls_default * is used by caller */ cifs_dbg(FYI, "iocharset set to %s\n", string); break; case Opt_netbiosname: string = match_strdup(args); if (string == NULL) goto out_nomem; memset(vol->source_rfc1001_name, 0x20, RFC1001_NAME_LEN); /* * FIXME: are there cases in which a comma can * be valid in workstation netbios name (and * need special handling)? */ for (i = 0; i < RFC1001_NAME_LEN; i++) { /* don't ucase netbiosname for user */ if (string[i] == 0) break; vol->source_rfc1001_name[i] = string[i]; } /* The string has 16th byte zero still from * set at top of the function */ if (i == RFC1001_NAME_LEN && string[i] != 0) pr_warn("CIFS: netbiosname longer than 15 truncated.\n"); break; case Opt_servern: /* servernetbiosname specified override *SMBSERVER */ string = match_strdup(args); if (string == NULL) goto out_nomem; /* last byte, type, is 0x20 for servr type */ memset(vol->target_rfc1001_name, 0x20, RFC1001_NAME_LEN_WITH_NULL); /* BB are there cases in which a comma can be valid in this workstation netbios name (and need special handling)? */ /* user or mount helper must uppercase the netbios name */ for (i = 0; i < 15; i++) { if (string[i] == 0) break; vol->target_rfc1001_name[i] = string[i]; } /* The string has 16th byte zero still from set at top of the function */ if (i == RFC1001_NAME_LEN && string[i] != 0) pr_warn("CIFS: server netbiosname longer than 15 truncated.\n"); break; case Opt_ver: string = match_strdup(args); if (string == NULL) goto out_nomem; if (strncasecmp(string, "1", 1) == 0) { /* This is the default */ break; } /* For all other value, error */ pr_warn("CIFS: Invalid version specified\n"); goto cifs_parse_mount_err; case Opt_vers: string = match_strdup(args); if (string == NULL) goto out_nomem; if (cifs_parse_smb_version(string, vol) != 0) goto cifs_parse_mount_err; break; case Opt_sec: string = match_strdup(args); if (string == NULL) goto out_nomem; if (cifs_parse_security_flavors(string, vol) != 0) goto cifs_parse_mount_err; break; case Opt_cache: string = match_strdup(args); if (string == NULL) goto out_nomem; if (cifs_parse_cache_flavor(string, vol) != 0) goto cifs_parse_mount_err; break; default: /* * An option we don't recognize. Save it off for later * if we haven't already found one */ if (!invalid) invalid = data; break; } /* Free up any allocated string */ kfree(string); string = NULL; } if (!sloppy && invalid) { pr_err("CIFS: Unknown mount option \"%s\"\n", invalid); goto cifs_parse_mount_err; } #ifndef CONFIG_KEYS /* Muliuser mounts require CONFIG_KEYS support */ if (vol->multiuser) { cifs_dbg(VFS, "Multiuser mounts require kernels with CONFIG_KEYS enabled\n"); goto cifs_parse_mount_err; } #endif if (!vol->UNC) { cifs_dbg(VFS, "CIFS mount error: No usable UNC path provided in device string!\n"); goto cifs_parse_mount_err; } /* make sure UNC has a share name */ if (!strchr(vol->UNC + 3, '\\')) { cifs_dbg(VFS, "Malformed UNC. Unable to find share name.\n"); goto cifs_parse_mount_err; } if (!got_ip) { /* No ip= option specified? Try to get it from UNC */ if (!cifs_convert_address(dstaddr, &vol->UNC[2], strlen(&vol->UNC[2]))) { pr_err("Unable to determine destination address.\n"); goto cifs_parse_mount_err; } } /* set the port that we got earlier */ cifs_set_port(dstaddr, port); if (uid_specified) vol->override_uid = override_uid; else if (override_uid == 1) pr_notice("CIFS: ignoring forceuid mount option specified with no uid= option.\n"); if (gid_specified) vol->override_gid = override_gid; else if (override_gid == 1) pr_notice("CIFS: ignoring forcegid mount option specified with no gid= option.\n"); kfree(mountdata_copy); return 0; out_nomem: pr_warn("Could not allocate temporary buffer\n"); cifs_parse_mount_err: kfree(string); kfree(mountdata_copy); return 1; } /** Returns true if srcaddr isn't specified and rhs isn't * specified, or if srcaddr is specified and * matches the IP address of the rhs argument. */ static bool srcip_matches(struct sockaddr *srcaddr, struct sockaddr *rhs) { switch (srcaddr->sa_family) { case AF_UNSPEC: return (rhs->sa_family == AF_UNSPEC); case AF_INET: { struct sockaddr_in *saddr4 = (struct sockaddr_in *)srcaddr; struct sockaddr_in *vaddr4 = (struct sockaddr_in *)rhs; return (saddr4->sin_addr.s_addr == vaddr4->sin_addr.s_addr); } case AF_INET6: { struct sockaddr_in6 *saddr6 = (struct sockaddr_in6 *)srcaddr; struct sockaddr_in6 *vaddr6 = (struct sockaddr_in6 *)rhs; return ipv6_addr_equal(&saddr6->sin6_addr, &vaddr6->sin6_addr); } default: WARN_ON(1); return false; /* don't expect to be here */ } } /* * If no port is specified in addr structure, we try to match with 445 port * and if it fails - with 139 ports. It should be called only if address * families of server and addr are equal. */ static bool match_port(struct TCP_Server_Info *server, struct sockaddr *addr) { __be16 port, *sport; switch (addr->sa_family) { case AF_INET: sport = &((struct sockaddr_in *) &server->dstaddr)->sin_port; port = ((struct sockaddr_in *) addr)->sin_port; break; case AF_INET6: sport = &((struct sockaddr_in6 *) &server->dstaddr)->sin6_port; port = ((struct sockaddr_in6 *) addr)->sin6_port; break; default: WARN_ON(1); return false; } if (!port) { port = htons(CIFS_PORT); if (port == *sport) return true; port = htons(RFC1001_PORT); } return port == *sport; } static bool match_address(struct TCP_Server_Info *server, struct sockaddr *addr, struct sockaddr *srcaddr) { switch (addr->sa_family) { case AF_INET: { struct sockaddr_in *addr4 = (struct sockaddr_in *)addr; struct sockaddr_in *srv_addr4 = (struct sockaddr_in *)&server->dstaddr; if (addr4->sin_addr.s_addr != srv_addr4->sin_addr.s_addr) return false; break; } case AF_INET6: { struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)addr; struct sockaddr_in6 *srv_addr6 = (struct sockaddr_in6 *)&server->dstaddr; if (!ipv6_addr_equal(&addr6->sin6_addr, &srv_addr6->sin6_addr)) return false; if (addr6->sin6_scope_id != srv_addr6->sin6_scope_id) return false; break; } default: WARN_ON(1); return false; /* don't expect to be here */ } if (!srcip_matches(srcaddr, (struct sockaddr *)&server->srcaddr)) return false; return true; } static bool match_security(struct TCP_Server_Info *server, struct smb_vol *vol) { /* * The select_sectype function should either return the vol->sectype * that was specified, or "Unspecified" if that sectype was not * compatible with the given NEGOTIATE request. */ if (server->ops->select_sectype(server, vol->sectype) == Unspecified) return false; /* * Now check if signing mode is acceptable. No need to check * global_secflags at this point since if MUST_SIGN is set then * the server->sign had better be too. */ if (vol->sign && !server->sign) return false; return true; } static int match_server(struct TCP_Server_Info *server, struct smb_vol *vol) { struct sockaddr *addr = (struct sockaddr *)&vol->dstaddr; if (vol->nosharesock) return 0; if ((server->vals != vol->vals) || (server->ops != vol->ops)) return 0; if (!net_eq(cifs_net_ns(server), current->nsproxy->net_ns)) return 0; if (!match_address(server, addr, (struct sockaddr *)&vol->srcaddr)) return 0; if (!match_port(server, addr)) return 0; if (!match_security(server, vol)) return 0; if (server->echo_interval != vol->echo_interval * HZ) return 0; return 1; } static struct TCP_Server_Info * cifs_find_tcp_session(struct smb_vol *vol) { struct TCP_Server_Info *server; spin_lock(&cifs_tcp_ses_lock); list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) { if (!match_server(server, vol)) continue; ++server->srv_count; spin_unlock(&cifs_tcp_ses_lock); cifs_dbg(FYI, "Existing tcp session with server found\n"); return server; } spin_unlock(&cifs_tcp_ses_lock); return NULL; } void cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect) { struct task_struct *task; spin_lock(&cifs_tcp_ses_lock); if (--server->srv_count > 0) { spin_unlock(&cifs_tcp_ses_lock); return; } put_net(cifs_net_ns(server)); list_del_init(&server->tcp_ses_list); spin_unlock(&cifs_tcp_ses_lock); cancel_delayed_work_sync(&server->echo); #ifdef CONFIG_CIFS_SMB2 if (from_reconnect) /* * Avoid deadlock here: reconnect work calls * cifs_put_tcp_session() at its end. Need to be sure * that reconnect work does nothing with server pointer after * that step. */ cancel_delayed_work(&server->reconnect); else cancel_delayed_work_sync(&server->reconnect); #endif spin_lock(&GlobalMid_Lock); server->tcpStatus = CifsExiting; spin_unlock(&GlobalMid_Lock); cifs_crypto_secmech_release(server); cifs_fscache_release_client_cookie(server); kfree(server->session_key.response); server->session_key.response = NULL; server->session_key.len = 0; task = xchg(&server->tsk, NULL); if (task) force_sig(SIGKILL, task); } static struct TCP_Server_Info * cifs_get_tcp_session(struct smb_vol *volume_info) { struct TCP_Server_Info *tcp_ses = NULL; int rc; cifs_dbg(FYI, "UNC: %s\n", volume_info->UNC); /* see if we already have a matching tcp_ses */ tcp_ses = cifs_find_tcp_session(volume_info); if (tcp_ses) return tcp_ses; tcp_ses = kzalloc(sizeof(struct TCP_Server_Info), GFP_KERNEL); if (!tcp_ses) { rc = -ENOMEM; goto out_err; } tcp_ses->ops = volume_info->ops; tcp_ses->vals = volume_info->vals; cifs_set_net_ns(tcp_ses, get_net(current->nsproxy->net_ns)); tcp_ses->hostname = extract_hostname(volume_info->UNC); if (IS_ERR(tcp_ses->hostname)) { rc = PTR_ERR(tcp_ses->hostname); goto out_err_crypto_release; } tcp_ses->noblocksnd = volume_info->noblocksnd; tcp_ses->noautotune = volume_info->noautotune; tcp_ses->tcp_nodelay = volume_info->sockopt_tcp_nodelay; tcp_ses->in_flight = 0; tcp_ses->credits = 1; init_waitqueue_head(&tcp_ses->response_q); init_waitqueue_head(&tcp_ses->request_q); INIT_LIST_HEAD(&tcp_ses->pending_mid_q); mutex_init(&tcp_ses->srv_mutex); memcpy(tcp_ses->workstation_RFC1001_name, volume_info->source_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL); memcpy(tcp_ses->server_RFC1001_name, volume_info->target_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL); tcp_ses->session_estab = false; tcp_ses->sequence_number = 0; tcp_ses->lstrp = jiffies; spin_lock_init(&tcp_ses->req_lock); INIT_LIST_HEAD(&tcp_ses->tcp_ses_list); INIT_LIST_HEAD(&tcp_ses->smb_ses_list); INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request); #ifdef CONFIG_CIFS_SMB2 INIT_DELAYED_WORK(&tcp_ses->reconnect, smb2_reconnect_server); mutex_init(&tcp_ses->reconnect_mutex); #endif memcpy(&tcp_ses->srcaddr, &volume_info->srcaddr, sizeof(tcp_ses->srcaddr)); memcpy(&tcp_ses->dstaddr, &volume_info->dstaddr, sizeof(tcp_ses->dstaddr)); #ifdef CONFIG_CIFS_SMB2 generate_random_uuid(tcp_ses->client_guid); #endif /* * at this point we are the only ones with the pointer * to the struct since the kernel thread not created yet * no need to spinlock this init of tcpStatus or srv_count */ tcp_ses->tcpStatus = CifsNew; ++tcp_ses->srv_count; if (volume_info->echo_interval >= SMB_ECHO_INTERVAL_MIN && volume_info->echo_interval <= SMB_ECHO_INTERVAL_MAX) tcp_ses->echo_interval = volume_info->echo_interval * HZ; else tcp_ses->echo_interval = SMB_ECHO_INTERVAL_DEFAULT * HZ; rc = ip_connect(tcp_ses); if (rc < 0) { cifs_dbg(VFS, "Error connecting to socket. Aborting operation.\n"); goto out_err_crypto_release; } /* * since we're in a cifs function already, we know that * this will succeed. No need for try_module_get(). */ __module_get(THIS_MODULE); tcp_ses->tsk = kthread_run(cifs_demultiplex_thread, tcp_ses, "cifsd"); if (IS_ERR(tcp_ses->tsk)) { rc = PTR_ERR(tcp_ses->tsk); cifs_dbg(VFS, "error %d create cifsd thread\n", rc); module_put(THIS_MODULE); goto out_err_crypto_release; } tcp_ses->tcpStatus = CifsNeedNegotiate; /* thread spawned, put it on the list */ spin_lock(&cifs_tcp_ses_lock); list_add(&tcp_ses->tcp_ses_list, &cifs_tcp_ses_list); spin_unlock(&cifs_tcp_ses_lock); cifs_fscache_get_client_cookie(tcp_ses); /* queue echo request delayed work */ queue_delayed_work(cifsiod_wq, &tcp_ses->echo, tcp_ses->echo_interval); return tcp_ses; out_err_crypto_release: cifs_crypto_secmech_release(tcp_ses); put_net(cifs_net_ns(tcp_ses)); out_err: if (tcp_ses) { if (!IS_ERR(tcp_ses->hostname)) kfree(tcp_ses->hostname); if (tcp_ses->ssocket) sock_release(tcp_ses->ssocket); kfree(tcp_ses); } return ERR_PTR(rc); } static int match_session(struct cifs_ses *ses, struct smb_vol *vol) { if (vol->sectype != Unspecified && vol->sectype != ses->sectype) return 0; switch (ses->sectype) { case Kerberos: if (!uid_eq(vol->cred_uid, ses->cred_uid)) return 0; break; default: /* NULL username means anonymous session */ if (ses->user_name == NULL) { if (!vol->nullauth) return 0; break; } /* anything else takes username/password */ if (strncmp(ses->user_name, vol->username ? vol->username : "", CIFS_MAX_USERNAME_LEN)) return 0; if ((vol->username && strlen(vol->username) != 0) && ses->password != NULL && strncmp(ses->password, vol->password ? vol->password : "", CIFS_MAX_PASSWORD_LEN)) return 0; } return 1; } static struct cifs_ses * cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol) { struct cifs_ses *ses; spin_lock(&cifs_tcp_ses_lock); list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { if (ses->status == CifsExiting) continue; if (!match_session(ses, vol)) continue; ++ses->ses_count; spin_unlock(&cifs_tcp_ses_lock); return ses; } spin_unlock(&cifs_tcp_ses_lock); return NULL; } static void cifs_put_smb_ses(struct cifs_ses *ses) { unsigned int rc, xid; struct TCP_Server_Info *server = ses->server; cifs_dbg(FYI, "%s: ses_count=%d\n", __func__, ses->ses_count); spin_lock(&cifs_tcp_ses_lock); if (ses->status == CifsExiting) { spin_unlock(&cifs_tcp_ses_lock); return; } if (--ses->ses_count > 0) { spin_unlock(&cifs_tcp_ses_lock); return; } if (ses->status == CifsGood) ses->status = CifsExiting; spin_unlock(&cifs_tcp_ses_lock); if (ses->status == CifsExiting && server->ops->logoff) { xid = get_xid(); rc = server->ops->logoff(xid, ses); if (rc) cifs_dbg(VFS, "%s: Session Logoff failure rc=%d\n", __func__, rc); _free_xid(xid); } spin_lock(&cifs_tcp_ses_lock); list_del_init(&ses->smb_ses_list); spin_unlock(&cifs_tcp_ses_lock); sesInfoFree(ses); cifs_put_tcp_session(server, 0); } #ifdef CONFIG_KEYS /* strlen("cifs:a:") + CIFS_MAX_DOMAINNAME_LEN + 1 */ #define CIFSCREDS_DESC_SIZE (7 + CIFS_MAX_DOMAINNAME_LEN + 1) /* Populate username and pw fields from keyring if possible */ static int cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses) { int rc = 0; const char *delim, *payload; char *desc; ssize_t len; struct key *key; struct TCP_Server_Info *server = ses->server; struct sockaddr_in *sa; struct sockaddr_in6 *sa6; const struct user_key_payload *upayload; desc = kmalloc(CIFSCREDS_DESC_SIZE, GFP_KERNEL); if (!desc) return -ENOMEM; /* try to find an address key first */ switch (server->dstaddr.ss_family) { case AF_INET: sa = (struct sockaddr_in *)&server->dstaddr; sprintf(desc, "cifs:a:%pI4", &sa->sin_addr.s_addr); break; case AF_INET6: sa6 = (struct sockaddr_in6 *)&server->dstaddr; sprintf(desc, "cifs:a:%pI6c", &sa6->sin6_addr.s6_addr); break; default: cifs_dbg(FYI, "Bad ss_family (%hu)\n", server->dstaddr.ss_family); rc = -EINVAL; goto out_err; } cifs_dbg(FYI, "%s: desc=%s\n", __func__, desc); key = request_key(&key_type_logon, desc, ""); if (IS_ERR(key)) { if (!ses->domainName) { cifs_dbg(FYI, "domainName is NULL\n"); rc = PTR_ERR(key); goto out_err; } /* didn't work, try to find a domain key */ sprintf(desc, "cifs:d:%s", ses->domainName); cifs_dbg(FYI, "%s: desc=%s\n", __func__, desc); key = request_key(&key_type_logon, desc, ""); if (IS_ERR(key)) { rc = PTR_ERR(key); goto out_err; } } down_read(&key->sem); upayload = user_key_payload_locked(key); if (IS_ERR_OR_NULL(upayload)) { rc = upayload ? PTR_ERR(upayload) : -EINVAL; goto out_key_put; } /* find first : in payload */ payload = upayload->data; delim = strnchr(payload, upayload->datalen, ':'); cifs_dbg(FYI, "payload=%s\n", payload); if (!delim) { cifs_dbg(FYI, "Unable to find ':' in payload (datalen=%d)\n", upayload->datalen); rc = -EINVAL; goto out_key_put; } len = delim - payload; if (len > CIFS_MAX_USERNAME_LEN || len <= 0) { cifs_dbg(FYI, "Bad value from username search (len=%zd)\n", len); rc = -EINVAL; goto out_key_put; } vol->username = kstrndup(payload, len, GFP_KERNEL); if (!vol->username) { cifs_dbg(FYI, "Unable to allocate %zd bytes for username\n", len); rc = -ENOMEM; goto out_key_put; } cifs_dbg(FYI, "%s: username=%s\n", __func__, vol->username); len = key->datalen - (len + 1); if (len > CIFS_MAX_PASSWORD_LEN || len <= 0) { cifs_dbg(FYI, "Bad len for password search (len=%zd)\n", len); rc = -EINVAL; kfree(vol->username); vol->username = NULL; goto out_key_put; } ++delim; vol->password = kstrndup(delim, len, GFP_KERNEL); if (!vol->password) { cifs_dbg(FYI, "Unable to allocate %zd bytes for password\n", len); rc = -ENOMEM; kfree(vol->username); vol->username = NULL; goto out_key_put; } out_key_put: up_read(&key->sem); key_put(key); out_err: kfree(desc); cifs_dbg(FYI, "%s: returning %d\n", __func__, rc); return rc; } #else /* ! CONFIG_KEYS */ static inline int cifs_set_cifscreds(struct smb_vol *vol __attribute__((unused)), struct cifs_ses *ses __attribute__((unused))) { return -ENOSYS; } #endif /* CONFIG_KEYS */ static struct cifs_ses * cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info) { int rc = -ENOMEM; unsigned int xid; struct cifs_ses *ses; struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr; struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr; xid = get_xid(); ses = cifs_find_smb_ses(server, volume_info); if (ses) { cifs_dbg(FYI, "Existing smb sess found (status=%d)\n", ses->status); mutex_lock(&ses->session_mutex); rc = cifs_negotiate_protocol(xid, ses); if (rc) { mutex_unlock(&ses->session_mutex); /* problem -- put our ses reference */ cifs_put_smb_ses(ses); free_xid(xid); return ERR_PTR(rc); } if (ses->need_reconnect) { cifs_dbg(FYI, "Session needs reconnect\n"); rc = cifs_setup_session(xid, ses, volume_info->local_nls); if (rc) { mutex_unlock(&ses->session_mutex); /* problem -- put our reference */ cifs_put_smb_ses(ses); free_xid(xid); return ERR_PTR(rc); } } mutex_unlock(&ses->session_mutex); /* existing SMB ses has a server reference already */ cifs_put_tcp_session(server, 0); free_xid(xid); return ses; } cifs_dbg(FYI, "Existing smb sess not found\n"); ses = sesInfoAlloc(); if (ses == NULL) goto get_ses_fail; /* new SMB session uses our server ref */ ses->server = server; if (server->dstaddr.ss_family == AF_INET6) sprintf(ses->serverName, "%pI6", &addr6->sin6_addr); else sprintf(ses->serverName, "%pI4", &addr->sin_addr); if (volume_info->username) { ses->user_name = kstrdup(volume_info->username, GFP_KERNEL); if (!ses->user_name) goto get_ses_fail; } /* volume_info->password freed at unmount */ if (volume_info->password) { ses->password = kstrdup(volume_info->password, GFP_KERNEL); if (!ses->password) goto get_ses_fail; } if (volume_info->domainname) { ses->domainName = kstrdup(volume_info->domainname, GFP_KERNEL); if (!ses->domainName) goto get_ses_fail; } if (volume_info->domainauto) ses->domainAuto = volume_info->domainauto; ses->cred_uid = volume_info->cred_uid; ses->linux_uid = volume_info->linux_uid; ses->sectype = volume_info->sectype; ses->sign = volume_info->sign; mutex_lock(&ses->session_mutex); rc = cifs_negotiate_protocol(xid, ses); if (!rc) rc = cifs_setup_session(xid, ses, volume_info->local_nls); mutex_unlock(&ses->session_mutex); if (rc) goto get_ses_fail; /* success, put it on the list */ spin_lock(&cifs_tcp_ses_lock); list_add(&ses->smb_ses_list, &server->smb_ses_list); spin_unlock(&cifs_tcp_ses_lock); free_xid(xid); return ses; get_ses_fail: sesInfoFree(ses); free_xid(xid); return ERR_PTR(rc); } static int match_tcon(struct cifs_tcon *tcon, struct smb_vol *volume_info) { if (tcon->tidStatus == CifsExiting) return 0; if (strncmp(tcon->treeName, volume_info->UNC, MAX_TREE_SIZE)) return 0; if (tcon->seal != volume_info->seal) return 0; #ifdef CONFIG_CIFS_SMB2 if (tcon->snapshot_time != volume_info->snapshot_time) return 0; #endif /* CONFIG_CIFS_SMB2 */ return 1; } static struct cifs_tcon * cifs_find_tcon(struct cifs_ses *ses, struct smb_vol *volume_info) { struct list_head *tmp; struct cifs_tcon *tcon; spin_lock(&cifs_tcp_ses_lock); list_for_each(tmp, &ses->tcon_list) { tcon = list_entry(tmp, struct cifs_tcon, tcon_list); if (!match_tcon(tcon, volume_info)) continue; ++tcon->tc_count; spin_unlock(&cifs_tcp_ses_lock); return tcon; } spin_unlock(&cifs_tcp_ses_lock); return NULL; } void cifs_put_tcon(struct cifs_tcon *tcon) { unsigned int xid; struct cifs_ses *ses = tcon->ses; cifs_dbg(FYI, "%s: tc_count=%d\n", __func__, tcon->tc_count); spin_lock(&cifs_tcp_ses_lock); if (--tcon->tc_count > 0) { spin_unlock(&cifs_tcp_ses_lock); return; } list_del_init(&tcon->tcon_list); spin_unlock(&cifs_tcp_ses_lock); xid = get_xid(); if (ses->server->ops->tree_disconnect) ses->server->ops->tree_disconnect(xid, tcon); _free_xid(xid); cifs_fscache_release_super_cookie(tcon); tconInfoFree(tcon); cifs_put_smb_ses(ses); } static struct cifs_tcon * cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info) { int rc, xid; struct cifs_tcon *tcon; tcon = cifs_find_tcon(ses, volume_info); if (tcon) { cifs_dbg(FYI, "Found match on UNC path\n"); /* existing tcon already has a reference */ cifs_put_smb_ses(ses); return tcon; } if (!ses->server->ops->tree_connect) { rc = -ENOSYS; goto out_fail; } tcon = tconInfoAlloc(); if (tcon == NULL) { rc = -ENOMEM; goto out_fail; } if (volume_info->snapshot_time) { #ifdef CONFIG_CIFS_SMB2 if (ses->server->vals->protocol_id == 0) { cifs_dbg(VFS, "Use SMB2 or later for snapshot mount option\n"); rc = -EOPNOTSUPP; goto out_fail; } else tcon->snapshot_time = volume_info->snapshot_time; #else cifs_dbg(VFS, "Snapshot mount option requires SMB2 support\n"); rc = -EOPNOTSUPP; goto out_fail; #endif /* CONFIG_CIFS_SMB2 */ } tcon->ses = ses; if (volume_info->password) { tcon->password = kstrdup(volume_info->password, GFP_KERNEL); if (!tcon->password) { rc = -ENOMEM; goto out_fail; } } /* * BB Do we need to wrap session_mutex around this TCon call and Unix * SetFS as we do on SessSetup and reconnect? */ xid = get_xid(); rc = ses->server->ops->tree_connect(xid, ses, volume_info->UNC, tcon, volume_info->local_nls); free_xid(xid); cifs_dbg(FYI, "Tcon rc = %d\n", rc); if (rc) goto out_fail; if (volume_info->nodfs) { tcon->Flags &= ~SMB_SHARE_IS_IN_DFS; cifs_dbg(FYI, "DFS disabled (%d)\n", tcon->Flags); } tcon->use_persistent = false; /* check if SMB2 or later, CIFS does not support persistent handles */ if (volume_info->persistent) { if (ses->server->vals->protocol_id == 0) { cifs_dbg(VFS, "SMB3 or later required for persistent handles\n"); rc = -EOPNOTSUPP; goto out_fail; #ifdef CONFIG_CIFS_SMB2 } else if (ses->server->capabilities & SMB2_GLOBAL_CAP_PERSISTENT_HANDLES) tcon->use_persistent = true; else /* persistent handles requested but not supported */ { cifs_dbg(VFS, "Persistent handles not supported on share\n"); rc = -EOPNOTSUPP; goto out_fail; #endif /* CONFIG_CIFS_SMB2 */ } #ifdef CONFIG_CIFS_SMB2 } else if ((tcon->capabilities & SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY) && (ses->server->capabilities & SMB2_GLOBAL_CAP_PERSISTENT_HANDLES) && (volume_info->nopersistent == false)) { cifs_dbg(FYI, "enabling persistent handles\n"); tcon->use_persistent = true; #endif /* CONFIG_CIFS_SMB2 */ } else if (volume_info->resilient) { if (ses->server->vals->protocol_id == 0) { cifs_dbg(VFS, "SMB2.1 or later required for resilient handles\n"); rc = -EOPNOTSUPP; goto out_fail; } tcon->use_resilient = true; } if (volume_info->seal) { if (ses->server->vals->protocol_id == 0) { cifs_dbg(VFS, "SMB3 or later required for encryption\n"); rc = -EOPNOTSUPP; goto out_fail; #ifdef CONFIG_CIFS_SMB2 } else if (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) tcon->seal = true; else { cifs_dbg(VFS, "Encryption is not supported on share\n"); rc = -EOPNOTSUPP; goto out_fail; #endif /* CONFIG_CIFS_SMB2 */ } } /* * We can have only one retry value for a connection to a share so for * resources mounted more than once to the same server share the last * value passed in for the retry flag is used. */ tcon->retry = volume_info->retry; tcon->nocase = volume_info->nocase; tcon->local_lease = volume_info->local_lease; INIT_LIST_HEAD(&tcon->pending_opens); spin_lock(&cifs_tcp_ses_lock); list_add(&tcon->tcon_list, &ses->tcon_list); spin_unlock(&cifs_tcp_ses_lock); cifs_fscache_get_super_cookie(tcon); return tcon; out_fail: tconInfoFree(tcon); return ERR_PTR(rc); } void cifs_put_tlink(struct tcon_link *tlink) { if (!tlink || IS_ERR(tlink)) return; if (!atomic_dec_and_test(&tlink->tl_count) || test_bit(TCON_LINK_IN_TREE, &tlink->tl_flags)) { tlink->tl_time = jiffies; return; } if (!IS_ERR(tlink_tcon(tlink))) cifs_put_tcon(tlink_tcon(tlink)); kfree(tlink); return; } static inline struct tcon_link * cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb) { return cifs_sb->master_tlink; } static int compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data) { struct cifs_sb_info *old = CIFS_SB(sb); struct cifs_sb_info *new = mnt_data->cifs_sb; if ((sb->s_flags & CIFS_MS_MASK) != (mnt_data->flags & CIFS_MS_MASK)) return 0; if ((old->mnt_cifs_flags & CIFS_MOUNT_MASK) != (new->mnt_cifs_flags & CIFS_MOUNT_MASK)) return 0; /* * We want to share sb only if we don't specify an r/wsize or * specified r/wsize is greater than or equal to existing one. */ if (new->wsize && new->wsize < old->wsize) return 0; if (new->rsize && new->rsize < old->rsize) return 0; if (!uid_eq(old->mnt_uid, new->mnt_uid) || !gid_eq(old->mnt_gid, new->mnt_gid)) return 0; if (old->mnt_file_mode != new->mnt_file_mode || old->mnt_dir_mode != new->mnt_dir_mode) return 0; if (strcmp(old->local_nls->charset, new->local_nls->charset)) return 0; if (old->actimeo != new->actimeo) return 0; return 1; } static int match_prepath(struct super_block *sb, struct cifs_mnt_data *mnt_data) { struct cifs_sb_info *old = CIFS_SB(sb); struct cifs_sb_info *new = mnt_data->cifs_sb; if (old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) { if (!(new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)) return 0; /* The prepath should be null terminated strings */ if (strcmp(new->prepath, old->prepath)) return 0; return 1; } return 0; } int cifs_match_super(struct super_block *sb, void *data) { struct cifs_mnt_data *mnt_data = (struct cifs_mnt_data *)data; struct smb_vol *volume_info; struct cifs_sb_info *cifs_sb; struct TCP_Server_Info *tcp_srv; struct cifs_ses *ses; struct cifs_tcon *tcon; struct tcon_link *tlink; int rc = 0; spin_lock(&cifs_tcp_ses_lock); cifs_sb = CIFS_SB(sb); tlink = cifs_get_tlink(cifs_sb_master_tlink(cifs_sb)); if (IS_ERR(tlink)) { spin_unlock(&cifs_tcp_ses_lock); return rc; } tcon = tlink_tcon(tlink); ses = tcon->ses; tcp_srv = ses->server; volume_info = mnt_data->vol; if (!match_server(tcp_srv, volume_info) || !match_session(ses, volume_info) || !match_tcon(tcon, volume_info) || !match_prepath(sb, mnt_data)) { rc = 0; goto out; } rc = compare_mount_options(sb, mnt_data); out: spin_unlock(&cifs_tcp_ses_lock); cifs_put_tlink(tlink); return rc; } int get_dfs_path(const unsigned int xid, struct cifs_ses *ses, const char *old_path, const struct nls_table *nls_codepage, unsigned int *num_referrals, struct dfs_info3_param **referrals, int remap) { char *temp_unc; int rc = 0; if (!ses->server->ops->tree_connect || !ses->server->ops->get_dfs_refer) return -ENOSYS; *num_referrals = 0; *referrals = NULL; if (ses->ipc_tid == 0) { temp_unc = kmalloc(2 /* for slashes */ + strnlen(ses->serverName, SERVER_NAME_LEN_WITH_NULL * 2) + 1 + 4 /* slash IPC$ */ + 2, GFP_KERNEL); if (temp_unc == NULL) return -ENOMEM; temp_unc[0] = '\\'; temp_unc[1] = '\\'; strcpy(temp_unc + 2, ses->serverName); strcpy(temp_unc + 2 + strlen(ses->serverName), "\\IPC$"); rc = ses->server->ops->tree_connect(xid, ses, temp_unc, NULL, nls_codepage); cifs_dbg(FYI, "Tcon rc = %d ipc_tid = %d\n", rc, ses->ipc_tid); kfree(temp_unc); } if (rc == 0) rc = ses->server->ops->get_dfs_refer(xid, ses, old_path, referrals, num_referrals, nls_codepage, remap); /* * BB - map targetUNCs to dfs_info3 structures, here or in * ses->server->ops->get_dfs_refer. */ return rc; } #ifdef CONFIG_DEBUG_LOCK_ALLOC static struct lock_class_key cifs_key[2]; static struct lock_class_key cifs_slock_key[2]; static inline void cifs_reclassify_socket4(struct socket *sock) { struct sock *sk = sock->sk; BUG_ON(!sock_allow_reclassification(sk)); sock_lock_init_class_and_name(sk, "slock-AF_INET-CIFS", &cifs_slock_key[0], "sk_lock-AF_INET-CIFS", &cifs_key[0]); } static inline void cifs_reclassify_socket6(struct socket *sock) { struct sock *sk = sock->sk; BUG_ON(!sock_allow_reclassification(sk)); sock_lock_init_class_and_name(sk, "slock-AF_INET6-CIFS", &cifs_slock_key[1], "sk_lock-AF_INET6-CIFS", &cifs_key[1]); } #else static inline void cifs_reclassify_socket4(struct socket *sock) { } static inline void cifs_reclassify_socket6(struct socket *sock) { } #endif /* See RFC1001 section 14 on representation of Netbios names */ static void rfc1002mangle(char *target, char *source, unsigned int length) { unsigned int i, j; for (i = 0, j = 0; i < (length); i++) { /* mask a nibble at a time and encode */ target[j] = 'A' + (0x0F & (source[i] >> 4)); target[j+1] = 'A' + (0x0F & source[i]); j += 2; } } static int bind_socket(struct TCP_Server_Info *server) { int rc = 0; if (server->srcaddr.ss_family != AF_UNSPEC) { /* Bind to the specified local IP address */ struct socket *socket = server->ssocket; rc = socket->ops->bind(socket, (struct sockaddr *) &server->srcaddr, sizeof(server->srcaddr)); if (rc < 0) { struct sockaddr_in *saddr4; struct sockaddr_in6 *saddr6; saddr4 = (struct sockaddr_in *)&server->srcaddr; saddr6 = (struct sockaddr_in6 *)&server->srcaddr; if (saddr6->sin6_family == AF_INET6) cifs_dbg(VFS, "Failed to bind to: %pI6c, error: %d\n", &saddr6->sin6_addr, rc); else cifs_dbg(VFS, "Failed to bind to: %pI4, error: %d\n", &saddr4->sin_addr.s_addr, rc); } } return rc; } static int ip_rfc1001_connect(struct TCP_Server_Info *server) { int rc = 0; /* * some servers require RFC1001 sessinit before sending * negprot - BB check reconnection in case where second * sessinit is sent but no second negprot */ struct rfc1002_session_packet *ses_init_buf; struct smb_hdr *smb_buf; ses_init_buf = kzalloc(sizeof(struct rfc1002_session_packet), GFP_KERNEL); if (ses_init_buf) { ses_init_buf->trailer.session_req.called_len = 32; if (server->server_RFC1001_name[0] != 0) rfc1002mangle(ses_init_buf->trailer. session_req.called_name, server->server_RFC1001_name, RFC1001_NAME_LEN_WITH_NULL); else rfc1002mangle(ses_init_buf->trailer. session_req.called_name, DEFAULT_CIFS_CALLED_NAME, RFC1001_NAME_LEN_WITH_NULL); ses_init_buf->trailer.session_req.calling_len = 32; /* * calling name ends in null (byte 16) from old smb * convention. */ if (server->workstation_RFC1001_name[0] != 0) rfc1002mangle(ses_init_buf->trailer. session_req.calling_name, server->workstation_RFC1001_name, RFC1001_NAME_LEN_WITH_NULL); else rfc1002mangle(ses_init_buf->trailer. session_req.calling_name, "LINUX_CIFS_CLNT", RFC1001_NAME_LEN_WITH_NULL); ses_init_buf->trailer.session_req.scope1 = 0; ses_init_buf->trailer.session_req.scope2 = 0; smb_buf = (struct smb_hdr *)ses_init_buf; /* sizeof RFC1002_SESSION_REQUEST with no scope */ smb_buf->smb_buf_length = cpu_to_be32(0x81000044); rc = smb_send(server, smb_buf, 0x44); kfree(ses_init_buf); /* * RFC1001 layer in at least one server * requires very short break before negprot * presumably because not expecting negprot * to follow so fast. This is a simple * solution that works without * complicating the code and causes no * significant slowing down on mount * for everyone else */ usleep_range(1000, 2000); } /* * else the negprot may still work without this * even though malloc failed */ return rc; } static int generic_ip_connect(struct TCP_Server_Info *server) { int rc = 0; __be16 sport; int slen, sfamily; struct socket *socket = server->ssocket; struct sockaddr *saddr; saddr = (struct sockaddr *) &server->dstaddr; if (server->dstaddr.ss_family == AF_INET6) { sport = ((struct sockaddr_in6 *) saddr)->sin6_port; slen = sizeof(struct sockaddr_in6); sfamily = AF_INET6; } else { sport = ((struct sockaddr_in *) saddr)->sin_port; slen = sizeof(struct sockaddr_in); sfamily = AF_INET; } if (socket == NULL) { rc = __sock_create(cifs_net_ns(server), sfamily, SOCK_STREAM, IPPROTO_TCP, &socket, 1); if (rc < 0) { cifs_dbg(VFS, "Error %d creating socket\n", rc); server->ssocket = NULL; return rc; } /* BB other socket options to set KEEPALIVE, NODELAY? */ cifs_dbg(FYI, "Socket created\n"); server->ssocket = socket; socket->sk->sk_allocation = GFP_NOFS; if (sfamily == AF_INET6) cifs_reclassify_socket6(socket); else cifs_reclassify_socket4(socket); } rc = bind_socket(server); if (rc < 0) return rc; /* * Eventually check for other socket options to change from * the default. sock_setsockopt not used because it expects * user space buffer */ socket->sk->sk_rcvtimeo = 7 * HZ; socket->sk->sk_sndtimeo = 5 * HZ; /* make the bufsizes depend on wsize/rsize and max requests */ if (server->noautotune) { if (socket->sk->sk_sndbuf < (200 * 1024)) socket->sk->sk_sndbuf = 200 * 1024; if (socket->sk->sk_rcvbuf < (140 * 1024)) socket->sk->sk_rcvbuf = 140 * 1024; } if (server->tcp_nodelay) { int val = 1; rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY, (char *)&val, sizeof(val)); if (rc) cifs_dbg(FYI, "set TCP_NODELAY socket option error %d\n", rc); } cifs_dbg(FYI, "sndbuf %d rcvbuf %d rcvtimeo 0x%lx\n", socket->sk->sk_sndbuf, socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo); rc = socket->ops->connect(socket, saddr, slen, 0); if (rc < 0) { cifs_dbg(FYI, "Error %d connecting to server\n", rc); sock_release(socket); server->ssocket = NULL; return rc; } if (sport == htons(RFC1001_PORT)) rc = ip_rfc1001_connect(server); return rc; } static int ip_connect(struct TCP_Server_Info *server) { __be16 *sport; struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr; struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr; if (server->dstaddr.ss_family == AF_INET6) sport = &addr6->sin6_port; else sport = &addr->sin_port; if (*sport == 0) { int rc; /* try with 445 port at first */ *sport = htons(CIFS_PORT); rc = generic_ip_connect(server); if (rc >= 0) return rc; /* if it failed, try with 139 port */ *sport = htons(RFC1001_PORT); } return generic_ip_connect(server); } void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, struct smb_vol *vol_info) { /* if we are reconnecting then should we check to see if * any requested capabilities changed locally e.g. via * remount but we can not do much about it here * if they have (even if we could detect it by the following) * Perhaps we could add a backpointer to array of sb from tcon * or if we change to make all sb to same share the same * sb as NFS - then we only have one backpointer to sb. * What if we wanted to mount the server share twice once with * and once without posixacls or posix paths? */ __u64 saved_cap = le64_to_cpu(tcon->fsUnixInfo.Capability); if (vol_info && vol_info->no_linux_ext) { tcon->fsUnixInfo.Capability = 0; tcon->unix_ext = 0; /* Unix Extensions disabled */ cifs_dbg(FYI, "Linux protocol extensions disabled\n"); return; } else if (vol_info) tcon->unix_ext = 1; /* Unix Extensions supported */ if (tcon->unix_ext == 0) { cifs_dbg(FYI, "Unix extensions disabled so not set on reconnect\n"); return; } if (!CIFSSMBQFSUnixInfo(xid, tcon)) { __u64 cap = le64_to_cpu(tcon->fsUnixInfo.Capability); cifs_dbg(FYI, "unix caps which server supports %lld\n", cap); /* check for reconnect case in which we do not want to change the mount behavior if we can avoid it */ if (vol_info == NULL) { /* turn off POSIX ACL and PATHNAMES if not set originally at mount time */ if ((saved_cap & CIFS_UNIX_POSIX_ACL_CAP) == 0) cap &= ~CIFS_UNIX_POSIX_ACL_CAP; if ((saved_cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) { if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) cifs_dbg(VFS, "POSIXPATH support change\n"); cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP; } else if ((cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) { cifs_dbg(VFS, "possible reconnect error\n"); cifs_dbg(VFS, "server disabled POSIX path support\n"); } } if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP) cifs_dbg(VFS, "per-share encryption not supported yet\n"); cap &= CIFS_UNIX_CAP_MASK; if (vol_info && vol_info->no_psx_acl) cap &= ~CIFS_UNIX_POSIX_ACL_CAP; else if (CIFS_UNIX_POSIX_ACL_CAP & cap) { cifs_dbg(FYI, "negotiated posix acl support\n"); if (cifs_sb) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIXACL; } if (vol_info && vol_info->posix_paths == 0) cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP; else if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) { cifs_dbg(FYI, "negotiate posix pathnames\n"); if (cifs_sb) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS; } cifs_dbg(FYI, "Negotiate caps 0x%x\n", (int)cap); #ifdef CONFIG_CIFS_DEBUG2 if (cap & CIFS_UNIX_FCNTL_CAP) cifs_dbg(FYI, "FCNTL cap\n"); if (cap & CIFS_UNIX_EXTATTR_CAP) cifs_dbg(FYI, "EXTATTR cap\n"); if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) cifs_dbg(FYI, "POSIX path cap\n"); if (cap & CIFS_UNIX_XATTR_CAP) cifs_dbg(FYI, "XATTR cap\n"); if (cap & CIFS_UNIX_POSIX_ACL_CAP) cifs_dbg(FYI, "POSIX ACL cap\n"); if (cap & CIFS_UNIX_LARGE_READ_CAP) cifs_dbg(FYI, "very large read cap\n"); if (cap & CIFS_UNIX_LARGE_WRITE_CAP) cifs_dbg(FYI, "very large write cap\n"); if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_CAP) cifs_dbg(FYI, "transport encryption cap\n"); if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP) cifs_dbg(FYI, "mandatory transport encryption cap\n"); #endif /* CIFS_DEBUG2 */ if (CIFSSMBSetFSUnixInfo(xid, tcon, cap)) { if (vol_info == NULL) { cifs_dbg(FYI, "resetting capabilities failed\n"); } else cifs_dbg(VFS, "Negotiating Unix capabilities with the server failed. Consider mounting with the Unix Extensions disabled if problems are found by specifying the nounix mount option.\n"); } } } int cifs_setup_cifs_sb(struct smb_vol *pvolume_info, struct cifs_sb_info *cifs_sb) { INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks); spin_lock_init(&cifs_sb->tlink_tree_lock); cifs_sb->tlink_tree = RB_ROOT; /* * Temporarily set r/wsize for matching superblock. If we end up using * new sb then client will later negotiate it downward if needed. */ cifs_sb->rsize = pvolume_info->rsize; cifs_sb->wsize = pvolume_info->wsize; cifs_sb->mnt_uid = pvolume_info->linux_uid; cifs_sb->mnt_gid = pvolume_info->linux_gid; cifs_sb->mnt_file_mode = pvolume_info->file_mode; cifs_sb->mnt_dir_mode = pvolume_info->dir_mode; cifs_dbg(FYI, "file mode: 0x%hx dir mode: 0x%hx\n", cifs_sb->mnt_file_mode, cifs_sb->mnt_dir_mode); cifs_sb->actimeo = pvolume_info->actimeo; cifs_sb->local_nls = pvolume_info->local_nls; if (pvolume_info->noperm) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_PERM; if (pvolume_info->setuids) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_SET_UID; if (pvolume_info->setuidfromacl) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_UID_FROM_ACL; if (pvolume_info->server_ino) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_SERVER_INUM; if (pvolume_info->remap) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_MAP_SFM_CHR; if (pvolume_info->sfu_remap) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_MAP_SPECIAL_CHR; if (pvolume_info->no_xattr) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_XATTR; if (pvolume_info->sfu_emul) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_UNX_EMUL; if (pvolume_info->nobrl) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_BRL; if (pvolume_info->nostrictsync) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NOSSYNC; if (pvolume_info->mand_lock) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NOPOSIXBRL; if (pvolume_info->rwpidforward) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_RWPIDFORWARD; if (pvolume_info->cifs_acl) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_ACL; if (pvolume_info->backupuid_specified) { cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_BACKUPUID; cifs_sb->mnt_backupuid = pvolume_info->backupuid; } if (pvolume_info->backupgid_specified) { cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_BACKUPGID; cifs_sb->mnt_backupgid = pvolume_info->backupgid; } if (pvolume_info->override_uid) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_OVERR_UID; if (pvolume_info->override_gid) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_OVERR_GID; if (pvolume_info->dynperm) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DYNPERM; if (pvolume_info->fsc) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_FSCACHE; if (pvolume_info->multiuser) cifs_sb->mnt_cifs_flags |= (CIFS_MOUNT_MULTIUSER | CIFS_MOUNT_NO_PERM); if (pvolume_info->strict_io) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_STRICT_IO; if (pvolume_info->direct_io) { cifs_dbg(FYI, "mounting share using direct i/o\n"); cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DIRECT_IO; } if (pvolume_info->mfsymlinks) { if (pvolume_info->sfu_emul) { /* * Our SFU ("Services for Unix" emulation does not allow * creating symlinks but does allow reading existing SFU * symlinks (it does allow both creating and reading SFU * style mknod and FIFOs though). When "mfsymlinks" and * "sfu" are both enabled at the same time, it allows * reading both types of symlinks, but will only create * them with mfsymlinks format. This allows better * Apple compatibility (probably better for Samba too) * while still recognizing old Windows style symlinks. */ cifs_dbg(VFS, "mount options mfsymlinks and sfu both enabled\n"); } cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_MF_SYMLINKS; } if ((pvolume_info->cifs_acl) && (pvolume_info->dynperm)) cifs_dbg(VFS, "mount option dynperm ignored if cifsacl mount option supported\n"); if (pvolume_info->prepath) { cifs_sb->prepath = kstrdup(pvolume_info->prepath, GFP_KERNEL); if (cifs_sb->prepath == NULL) return -ENOMEM; } return 0; } static void cleanup_volume_info_contents(struct smb_vol *volume_info) { kfree(volume_info->username); kzfree(volume_info->password); kfree(volume_info->UNC); kfree(volume_info->domainname); kfree(volume_info->iocharset); kfree(volume_info->prepath); } void cifs_cleanup_volume_info(struct smb_vol *volume_info) { if (!volume_info) return; cleanup_volume_info_contents(volume_info); kfree(volume_info); } #ifdef CONFIG_CIFS_DFS_UPCALL /* * cifs_build_path_to_root returns full path to root when we do not have an * exiting connection (tcon) */ static char * build_unc_path_to_root(const struct smb_vol *vol, const struct cifs_sb_info *cifs_sb) { char *full_path, *pos; unsigned int pplen = vol->prepath ? strlen(vol->prepath) + 1 : 0; unsigned int unc_len = strnlen(vol->UNC, MAX_TREE_SIZE + 1); full_path = kmalloc(unc_len + pplen + 1, GFP_KERNEL); if (full_path == NULL) return ERR_PTR(-ENOMEM); strncpy(full_path, vol->UNC, unc_len); pos = full_path + unc_len; if (pplen) { *pos = CIFS_DIR_SEP(cifs_sb); strncpy(pos + 1, vol->prepath, pplen); pos += pplen; } *pos = '\0'; /* add trailing null */ convert_delimiter(full_path, CIFS_DIR_SEP(cifs_sb)); cifs_dbg(FYI, "%s: full_path=%s\n", __func__, full_path); return full_path; } /* * Perform a dfs referral query for a share and (optionally) prefix * * If a referral is found, cifs_sb->mountdata will be (re-)allocated * to a string containing updated options for the submount. Otherwise it * will be left untouched. * * Returns the rc from get_dfs_path to the caller, which can be used to * determine whether there were referrals. */ static int expand_dfs_referral(const unsigned int xid, struct cifs_ses *ses, struct smb_vol *volume_info, struct cifs_sb_info *cifs_sb, int check_prefix) { int rc; unsigned int num_referrals = 0; struct dfs_info3_param *referrals = NULL; char *full_path = NULL, *ref_path = NULL, *mdata = NULL; full_path = build_unc_path_to_root(volume_info, cifs_sb); if (IS_ERR(full_path)) return PTR_ERR(full_path); /* For DFS paths, skip the first '\' of the UNC */ ref_path = check_prefix ? full_path + 1 : volume_info->UNC + 1; rc = get_dfs_path(xid, ses, ref_path, cifs_sb->local_nls, &num_referrals, &referrals, cifs_remap(cifs_sb)); if (!rc && num_referrals > 0) { char *fake_devname = NULL; mdata = cifs_compose_mount_options(cifs_sb->mountdata, full_path + 1, referrals, &fake_devname); free_dfs_info_array(referrals, num_referrals); if (IS_ERR(mdata)) { rc = PTR_ERR(mdata); mdata = NULL; } else { cleanup_volume_info_contents(volume_info); rc = cifs_setup_volume_info(volume_info, mdata, fake_devname); } kfree(fake_devname); kfree(cifs_sb->mountdata); cifs_sb->mountdata = mdata; } kfree(full_path); return rc; } #endif static int cifs_setup_volume_info(struct smb_vol *volume_info, char *mount_data, const char *devname) { int rc = 0; if (cifs_parse_mount_options(mount_data, devname, volume_info)) return -EINVAL; if (volume_info->nullauth) { cifs_dbg(FYI, "Anonymous login\n"); kfree(volume_info->username); volume_info->username = NULL; } else if (volume_info->username) { /* BB fixme parse for domain name here */ cifs_dbg(FYI, "Username: %s\n", volume_info->username); } else { cifs_dbg(VFS, "No username specified\n"); /* In userspace mount helper we can get user name from alternate locations such as env variables and files on disk */ return -EINVAL; } /* this is needed for ASCII cp to Unicode converts */ if (volume_info->iocharset == NULL) { /* load_nls_default cannot return null */ volume_info->local_nls = load_nls_default(); } else { volume_info->local_nls = load_nls(volume_info->iocharset); if (volume_info->local_nls == NULL) { cifs_dbg(VFS, "CIFS mount error: iocharset %s not found\n", volume_info->iocharset); return -ELIBACC; } } return rc; } struct smb_vol * cifs_get_volume_info(char *mount_data, const char *devname) { int rc; struct smb_vol *volume_info; volume_info = kmalloc(sizeof(struct smb_vol), GFP_KERNEL); if (!volume_info) return ERR_PTR(-ENOMEM); rc = cifs_setup_volume_info(volume_info, mount_data, devname); if (rc) { cifs_cleanup_volume_info(volume_info); volume_info = ERR_PTR(rc); } return volume_info; } static int cifs_are_all_path_components_accessible(struct TCP_Server_Info *server, unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, char *full_path) { int rc; char *s; char sep, tmp; sep = CIFS_DIR_SEP(cifs_sb); s = full_path; rc = server->ops->is_path_accessible(xid, tcon, cifs_sb, ""); while (rc == 0) { /* skip separators */ while (*s == sep) s++; if (!*s) break; /* next separator */ while (*s && *s != sep) s++; /* * temporarily null-terminate the path at the end of * the current component */ tmp = *s; *s = 0; rc = server->ops->is_path_accessible(xid, tcon, cifs_sb, full_path); *s = tmp; } return rc; } int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info) { int rc; unsigned int xid; struct cifs_ses *ses; struct cifs_tcon *tcon; struct TCP_Server_Info *server; char *full_path; struct tcon_link *tlink; #ifdef CONFIG_CIFS_DFS_UPCALL int referral_walks_count = 0; #endif rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs"); if (rc) return rc; #ifdef CONFIG_CIFS_DFS_UPCALL try_mount_again: /* cleanup activities if we're chasing a referral */ if (referral_walks_count) { if (tcon) cifs_put_tcon(tcon); else if (ses) cifs_put_smb_ses(ses); cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_POSIX_PATHS; free_xid(xid); } #endif rc = 0; tcon = NULL; ses = NULL; server = NULL; full_path = NULL; tlink = NULL; xid = get_xid(); /* get a reference to a tcp session */ server = cifs_get_tcp_session(volume_info); if (IS_ERR(server)) { rc = PTR_ERR(server); bdi_destroy(&cifs_sb->bdi); goto out; } if ((volume_info->max_credits < 20) || (volume_info->max_credits > 60000)) server->max_credits = SMB2_MAX_CREDITS_AVAILABLE; else server->max_credits = volume_info->max_credits; /* get a reference to a SMB session */ ses = cifs_get_smb_ses(server, volume_info); if (IS_ERR(ses)) { rc = PTR_ERR(ses); ses = NULL; goto mount_fail_check; } #ifdef CONFIG_CIFS_SMB2 if ((volume_info->persistent == true) && ((ses->server->capabilities & SMB2_GLOBAL_CAP_PERSISTENT_HANDLES) == 0)) { cifs_dbg(VFS, "persistent handles not supported by server\n"); rc = -EOPNOTSUPP; goto mount_fail_check; } #endif /* CONFIG_CIFS_SMB2*/ /* search for existing tcon to this server share */ tcon = cifs_get_tcon(ses, volume_info); if (IS_ERR(tcon)) { rc = PTR_ERR(tcon); tcon = NULL; goto remote_path_check; } /* tell server which Unix caps we support */ if (cap_unix(tcon->ses)) { /* reset of caps checks mount to see if unix extensions disabled for just this mount */ reset_cifs_unix_caps(xid, tcon, cifs_sb, volume_info); if ((tcon->ses->server->tcpStatus == CifsNeedReconnect) && (le64_to_cpu(tcon->fsUnixInfo.Capability) & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)) { rc = -EACCES; goto mount_fail_check; } } else tcon->unix_ext = 0; /* server does not support them */ /* do not care if a following call succeed - informational */ if (!tcon->ipc && server->ops->qfs_tcon) server->ops->qfs_tcon(xid, tcon); cifs_sb->wsize = server->ops->negotiate_wsize(tcon, volume_info); cifs_sb->rsize = server->ops->negotiate_rsize(tcon, volume_info); /* tune readahead according to rsize */ cifs_sb->bdi.ra_pages = cifs_sb->rsize / PAGE_SIZE; remote_path_check: #ifdef CONFIG_CIFS_DFS_UPCALL /* * Perform an unconditional check for whether there are DFS * referrals for this path without prefix, to provide support * for DFS referrals from w2k8 servers which don't seem to respond * with PATH_NOT_COVERED to requests that include the prefix. * Chase the referral if found, otherwise continue normally. */ if (referral_walks_count == 0) { int refrc = expand_dfs_referral(xid, ses, volume_info, cifs_sb, false); if (!refrc) { referral_walks_count++; goto try_mount_again; } } #endif /* check if a whole path is not remote */ if (!rc && tcon) { if (!server->ops->is_path_accessible) { rc = -ENOSYS; goto mount_fail_check; } /* * cifs_build_path_to_root works only when we have a valid tcon */ full_path = cifs_build_path_to_root(volume_info, cifs_sb, tcon, tcon->Flags & SMB_SHARE_IS_IN_DFS); if (full_path == NULL) { rc = -ENOMEM; goto mount_fail_check; } rc = server->ops->is_path_accessible(xid, tcon, cifs_sb, full_path); if (rc != 0 && rc != -EREMOTE) { kfree(full_path); goto mount_fail_check; } if (rc != -EREMOTE) { rc = cifs_are_all_path_components_accessible(server, xid, tcon, cifs_sb, full_path); if (rc != 0) { cifs_dbg(VFS, "cannot query dirs between root and final path, " "enabling CIFS_MOUNT_USE_PREFIX_PATH\n"); cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH; rc = 0; } } kfree(full_path); } /* get referral if needed */ if (rc == -EREMOTE) { #ifdef CONFIG_CIFS_DFS_UPCALL if (referral_walks_count > MAX_NESTED_LINKS) { /* * BB: when we implement proper loop detection, * we will remove this check. But now we need it * to prevent an indefinite loop if 'DFS tree' is * misconfigured (i.e. has loops). */ rc = -ELOOP; goto mount_fail_check; } rc = expand_dfs_referral(xid, ses, volume_info, cifs_sb, true); if (!rc) { referral_walks_count++; goto try_mount_again; } goto mount_fail_check; #else /* No DFS support, return error on mount */ rc = -EOPNOTSUPP; #endif } if (rc) goto mount_fail_check; /* now, hang the tcon off of the superblock */ tlink = kzalloc(sizeof *tlink, GFP_KERNEL); if (tlink == NULL) { rc = -ENOMEM; goto mount_fail_check; } tlink->tl_uid = ses->linux_uid; tlink->tl_tcon = tcon; tlink->tl_time = jiffies; set_bit(TCON_LINK_MASTER, &tlink->tl_flags); set_bit(TCON_LINK_IN_TREE, &tlink->tl_flags); cifs_sb->master_tlink = tlink; spin_lock(&cifs_sb->tlink_tree_lock); tlink_rb_insert(&cifs_sb->tlink_tree, tlink); spin_unlock(&cifs_sb->tlink_tree_lock); queue_delayed_work(cifsiod_wq, &cifs_sb->prune_tlinks, TLINK_IDLE_EXPIRE); mount_fail_check: /* on error free sesinfo and tcon struct if needed */ if (rc) { /* If find_unc succeeded then rc == 0 so we can not end */ /* up accidentally freeing someone elses tcon struct */ if (tcon) cifs_put_tcon(tcon); else if (ses) cifs_put_smb_ses(ses); else cifs_put_tcp_session(server, 0); bdi_destroy(&cifs_sb->bdi); } out: free_xid(xid); return rc; } /* * Issue a TREE_CONNECT request. Note that for IPC$ shares, that the tcon * pointer may be NULL. */ int CIFSTCon(const unsigned int xid, struct cifs_ses *ses, const char *tree, struct cifs_tcon *tcon, const struct nls_table *nls_codepage) { struct smb_hdr *smb_buffer; struct smb_hdr *smb_buffer_response; TCONX_REQ *pSMB; TCONX_RSP *pSMBr; unsigned char *bcc_ptr; int rc = 0; int length; __u16 bytes_left, count; if (ses == NULL) return -EIO; smb_buffer = cifs_buf_get(); if (smb_buffer == NULL) return -ENOMEM; smb_buffer_response = smb_buffer; header_assemble(smb_buffer, SMB_COM_TREE_CONNECT_ANDX, NULL /*no tid */ , 4 /*wct */ ); smb_buffer->Mid = get_next_mid(ses->server); smb_buffer->Uid = ses->Suid; pSMB = (TCONX_REQ *) smb_buffer; pSMBr = (TCONX_RSP *) smb_buffer_response; pSMB->AndXCommand = 0xFF; pSMB->Flags = cpu_to_le16(TCON_EXTENDED_SECINFO); bcc_ptr = &pSMB->Password[0]; if (!tcon || (ses->server->sec_mode & SECMODE_USER)) { pSMB->PasswordLength = cpu_to_le16(1); /* minimum */ *bcc_ptr = 0; /* password is null byte */ bcc_ptr++; /* skip password */ /* already aligned so no need to do it below */ } else { pSMB->PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE); /* BB FIXME add code to fail this if NTLMv2 or Kerberos specified as required (when that support is added to the vfs in the future) as only NTLM or the much weaker LANMAN (which we do not send by default) is accepted by Samba (not sure whether other servers allow NTLMv2 password here) */ #ifdef CONFIG_CIFS_WEAK_PW_HASH if ((global_secflags & CIFSSEC_MAY_LANMAN) && (ses->sectype == LANMAN)) calc_lanman_hash(tcon->password, ses->server->cryptkey, ses->server->sec_mode & SECMODE_PW_ENCRYPT ? true : false, bcc_ptr); else #endif /* CIFS_WEAK_PW_HASH */ rc = SMBNTencrypt(tcon->password, ses->server->cryptkey, bcc_ptr, nls_codepage); if (rc) { cifs_dbg(FYI, "%s Can't generate NTLM rsp. Error: %d\n", __func__, rc); cifs_buf_release(smb_buffer); return rc; } bcc_ptr += CIFS_AUTH_RESP_SIZE; if (ses->capabilities & CAP_UNICODE) { /* must align unicode strings */ *bcc_ptr = 0; /* null byte password */ bcc_ptr++; } } if (ses->server->sign) smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE; if (ses->capabilities & CAP_STATUS32) { smb_buffer->Flags2 |= SMBFLG2_ERR_STATUS; } if (ses->capabilities & CAP_DFS) { smb_buffer->Flags2 |= SMBFLG2_DFS; } if (ses->capabilities & CAP_UNICODE) { smb_buffer->Flags2 |= SMBFLG2_UNICODE; length = cifs_strtoUTF16((__le16 *) bcc_ptr, tree, 6 /* max utf8 char length in bytes */ * (/* server len*/ + 256 /* share len */), nls_codepage); bcc_ptr += 2 * length; /* convert num 16 bit words to bytes */ bcc_ptr += 2; /* skip trailing null */ } else { /* ASCII */ strcpy(bcc_ptr, tree); bcc_ptr += strlen(tree) + 1; } strcpy(bcc_ptr, "?????"); bcc_ptr += strlen("?????"); bcc_ptr += 1; count = bcc_ptr - &pSMB->Password[0]; pSMB->hdr.smb_buf_length = cpu_to_be32(be32_to_cpu( pSMB->hdr.smb_buf_length) + count); pSMB->ByteCount = cpu_to_le16(count); rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response, &length, 0); /* above now done in SendReceive */ if ((rc == 0) && (tcon != NULL)) { bool is_unicode; tcon->tidStatus = CifsGood; tcon->need_reconnect = false; tcon->tid = smb_buffer_response->Tid; bcc_ptr = pByteArea(smb_buffer_response); bytes_left = get_bcc(smb_buffer_response); length = strnlen(bcc_ptr, bytes_left - 2); if (smb_buffer->Flags2 & SMBFLG2_UNICODE) is_unicode = true; else is_unicode = false; /* skip service field (NB: this field is always ASCII) */ if (length == 3) { if ((bcc_ptr[0] == 'I') && (bcc_ptr[1] == 'P') && (bcc_ptr[2] == 'C')) { cifs_dbg(FYI, "IPC connection\n"); tcon->ipc = 1; } } else if (length == 2) { if ((bcc_ptr[0] == 'A') && (bcc_ptr[1] == ':')) { /* the most common case */ cifs_dbg(FYI, "disk share connection\n"); } } bcc_ptr += length + 1; bytes_left -= (length + 1); strlcpy(tcon->treeName, tree, sizeof(tcon->treeName)); /* mostly informational -- no need to fail on error here */ kfree(tcon->nativeFileSystem); tcon->nativeFileSystem = cifs_strndup_from_utf16(bcc_ptr, bytes_left, is_unicode, nls_codepage); cifs_dbg(FYI, "nativeFileSystem=%s\n", tcon->nativeFileSystem); if ((smb_buffer_response->WordCount == 3) || (smb_buffer_response->WordCount == 7)) /* field is in same location */ tcon->Flags = le16_to_cpu(pSMBr->OptionalSupport); else tcon->Flags = 0; cifs_dbg(FYI, "Tcon flags: 0x%x\n", tcon->Flags); } else if ((rc == 0) && tcon == NULL) { /* all we need to save for IPC$ connection */ ses->ipc_tid = smb_buffer_response->Tid; } cifs_buf_release(smb_buffer); return rc; } static void delayed_free(struct rcu_head *p) { struct cifs_sb_info *sbi = container_of(p, struct cifs_sb_info, rcu); unload_nls(sbi->local_nls); kfree(sbi); } void cifs_umount(struct cifs_sb_info *cifs_sb) { struct rb_root *root = &cifs_sb->tlink_tree; struct rb_node *node; struct tcon_link *tlink; cancel_delayed_work_sync(&cifs_sb->prune_tlinks); spin_lock(&cifs_sb->tlink_tree_lock); while ((node = rb_first(root))) { tlink = rb_entry(node, struct tcon_link, tl_rbnode); cifs_get_tlink(tlink); clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags); rb_erase(node, root); spin_unlock(&cifs_sb->tlink_tree_lock); cifs_put_tlink(tlink); spin_lock(&cifs_sb->tlink_tree_lock); } spin_unlock(&cifs_sb->tlink_tree_lock); bdi_destroy(&cifs_sb->bdi); kfree(cifs_sb->mountdata); kfree(cifs_sb->prepath); call_rcu(&cifs_sb->rcu, delayed_free); } int cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses) { int rc = 0; struct TCP_Server_Info *server = ses->server; if (!server->ops->need_neg || !server->ops->negotiate) return -ENOSYS; /* only send once per connect */ if (!server->ops->need_neg(server)) return 0; set_credits(server, 1); rc = server->ops->negotiate(xid, ses); if (rc == 0) { spin_lock(&GlobalMid_Lock); if (server->tcpStatus == CifsNeedNegotiate) server->tcpStatus = CifsGood; else rc = -EHOSTDOWN; spin_unlock(&GlobalMid_Lock); } return rc; } int cifs_setup_session(const unsigned int xid, struct cifs_ses *ses, struct nls_table *nls_info) { int rc = -ENOSYS; struct TCP_Server_Info *server = ses->server; ses->capabilities = server->capabilities; if (linuxExtEnabled == 0) ses->capabilities &= (~server->vals->cap_unix); cifs_dbg(FYI, "Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d\n", server->sec_mode, server->capabilities, server->timeAdj); if (server->ops->sess_setup) rc = server->ops->sess_setup(xid, ses, nls_info); if (rc) cifs_dbg(VFS, "Send error in SessSetup = %d\n", rc); return rc; } static int cifs_set_vol_auth(struct smb_vol *vol, struct cifs_ses *ses) { vol->sectype = ses->sectype; /* krb5 is special, since we don't need username or pw */ if (vol->sectype == Kerberos) return 0; return cifs_set_cifscreds(vol, ses); } static struct cifs_tcon * cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid) { int rc; struct cifs_tcon *master_tcon = cifs_sb_master_tcon(cifs_sb); struct cifs_ses *ses; struct cifs_tcon *tcon = NULL; struct smb_vol *vol_info; vol_info = kzalloc(sizeof(*vol_info), GFP_KERNEL); if (vol_info == NULL) return ERR_PTR(-ENOMEM); vol_info->local_nls = cifs_sb->local_nls; vol_info->linux_uid = fsuid; vol_info->cred_uid = fsuid; vol_info->UNC = master_tcon->treeName; vol_info->retry = master_tcon->retry; vol_info->nocase = master_tcon->nocase; vol_info->local_lease = master_tcon->local_lease; vol_info->no_linux_ext = !master_tcon->unix_ext; vol_info->sectype = master_tcon->ses->sectype; vol_info->sign = master_tcon->ses->sign; rc = cifs_set_vol_auth(vol_info, master_tcon->ses); if (rc) { tcon = ERR_PTR(rc); goto out; } /* get a reference for the same TCP session */ spin_lock(&cifs_tcp_ses_lock); ++master_tcon->ses->server->srv_count; spin_unlock(&cifs_tcp_ses_lock); ses = cifs_get_smb_ses(master_tcon->ses->server, vol_info); if (IS_ERR(ses)) { tcon = (struct cifs_tcon *)ses; cifs_put_tcp_session(master_tcon->ses->server, 0); goto out; } tcon = cifs_get_tcon(ses, vol_info); if (IS_ERR(tcon)) { cifs_put_smb_ses(ses); goto out; } if (cap_unix(ses)) reset_cifs_unix_caps(0, tcon, NULL, vol_info); out: kfree(vol_info->username); kfree(vol_info->password); kfree(vol_info); return tcon; } struct cifs_tcon * cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb) { return tlink_tcon(cifs_sb_master_tlink(cifs_sb)); } /* find and return a tlink with given uid */ static struct tcon_link * tlink_rb_search(struct rb_root *root, kuid_t uid) { struct rb_node *node = root->rb_node; struct tcon_link *tlink; while (node) { tlink = rb_entry(node, struct tcon_link, tl_rbnode); if (uid_gt(tlink->tl_uid, uid)) node = node->rb_left; else if (uid_lt(tlink->tl_uid, uid)) node = node->rb_right; else return tlink; } return NULL; } /* insert a tcon_link into the tree */ static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink) { struct rb_node **new = &(root->rb_node), *parent = NULL; struct tcon_link *tlink; while (*new) { tlink = rb_entry(*new, struct tcon_link, tl_rbnode); parent = *new; if (uid_gt(tlink->tl_uid, new_tlink->tl_uid)) new = &((*new)->rb_left); else new = &((*new)->rb_right); } rb_link_node(&new_tlink->tl_rbnode, parent, new); rb_insert_color(&new_tlink->tl_rbnode, root); } /* * Find or construct an appropriate tcon given a cifs_sb and the fsuid of the * current task. * * If the superblock doesn't refer to a multiuser mount, then just return * the master tcon for the mount. * * First, search the rbtree for an existing tcon for this fsuid. If one * exists, then check to see if it's pending construction. If it is then wait * for construction to complete. Once it's no longer pending, check to see if * it failed and either return an error or retry construction, depending on * the timeout. * * If one doesn't exist then insert a new tcon_link struct into the tree and * try to construct a new one. */ struct tcon_link * cifs_sb_tlink(struct cifs_sb_info *cifs_sb) { int ret; kuid_t fsuid = current_fsuid(); struct tcon_link *tlink, *newtlink; if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)) return cifs_get_tlink(cifs_sb_master_tlink(cifs_sb)); spin_lock(&cifs_sb->tlink_tree_lock); tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid); if (tlink) cifs_get_tlink(tlink); spin_unlock(&cifs_sb->tlink_tree_lock); if (tlink == NULL) { newtlink = kzalloc(sizeof(*tlink), GFP_KERNEL); if (newtlink == NULL) return ERR_PTR(-ENOMEM); newtlink->tl_uid = fsuid; newtlink->tl_tcon = ERR_PTR(-EACCES); set_bit(TCON_LINK_PENDING, &newtlink->tl_flags); set_bit(TCON_LINK_IN_TREE, &newtlink->tl_flags); cifs_get_tlink(newtlink); spin_lock(&cifs_sb->tlink_tree_lock); /* was one inserted after previous search? */ tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid); if (tlink) { cifs_get_tlink(tlink); spin_unlock(&cifs_sb->tlink_tree_lock); kfree(newtlink); goto wait_for_construction; } tlink = newtlink; tlink_rb_insert(&cifs_sb->tlink_tree, tlink); spin_unlock(&cifs_sb->tlink_tree_lock); } else { wait_for_construction: ret = wait_on_bit(&tlink->tl_flags, TCON_LINK_PENDING, TASK_INTERRUPTIBLE); if (ret) { cifs_put_tlink(tlink); return ERR_PTR(-ERESTARTSYS); } /* if it's good, return it */ if (!IS_ERR(tlink->tl_tcon)) return tlink; /* return error if we tried this already recently */ if (time_before(jiffies, tlink->tl_time + TLINK_ERROR_EXPIRE)) { cifs_put_tlink(tlink); return ERR_PTR(-EACCES); } if (test_and_set_bit(TCON_LINK_PENDING, &tlink->tl_flags)) goto wait_for_construction; } tlink->tl_tcon = cifs_construct_tcon(cifs_sb, fsuid); clear_bit(TCON_LINK_PENDING, &tlink->tl_flags); wake_up_bit(&tlink->tl_flags, TCON_LINK_PENDING); if (IS_ERR(tlink->tl_tcon)) { cifs_put_tlink(tlink); return ERR_PTR(-EACCES); } return tlink; } /* * periodic workqueue job that scans tcon_tree for a superblock and closes * out tcons. */ static void cifs_prune_tlinks(struct work_struct *work) { struct cifs_sb_info *cifs_sb = container_of(work, struct cifs_sb_info, prune_tlinks.work); struct rb_root *root = &cifs_sb->tlink_tree; struct rb_node *node = rb_first(root); struct rb_node *tmp; struct tcon_link *tlink; /* * Because we drop the spinlock in the loop in order to put the tlink * it's not guarded against removal of links from the tree. The only * places that remove entries from the tree are this function and * umounts. Because this function is non-reentrant and is canceled * before umount can proceed, this is safe. */ spin_lock(&cifs_sb->tlink_tree_lock); node = rb_first(root); while (node != NULL) { tmp = node; node = rb_next(tmp); tlink = rb_entry(tmp, struct tcon_link, tl_rbnode); if (test_bit(TCON_LINK_MASTER, &tlink->tl_flags) || atomic_read(&tlink->tl_count) != 0 || time_after(tlink->tl_time + TLINK_IDLE_EXPIRE, jiffies)) continue; cifs_get_tlink(tlink); clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags); rb_erase(tmp, root); spin_unlock(&cifs_sb->tlink_tree_lock); cifs_put_tlink(tlink); spin_lock(&cifs_sb->tlink_tree_lock); } spin_unlock(&cifs_sb->tlink_tree_lock); queue_delayed_work(cifsiod_wq, &cifs_sb->prune_tlinks, TLINK_IDLE_EXPIRE); }
null
null
null
null
76,956
25,960
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
25,960
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef EXTENSIONS_BROWSER_API_SOCKETS_TCP_SOCKETS_TCP_API_H_ #define EXTENSIONS_BROWSER_API_SOCKETS_TCP_SOCKETS_TCP_API_H_ #include <stddef.h> #include "base/gtest_prod_util.h" #include "base/macros.h" #include "extensions/browser/api/socket/socket_api.h" #include "extensions/common/api/sockets_tcp.h" namespace extensions { class ResumableTCPSocket; class TLSSocket; } namespace extensions { namespace api { class TCPSocketEventDispatcher; class TCPSocketAsyncApiFunction : public SocketAsyncApiFunction { protected: ~TCPSocketAsyncApiFunction() override; std::unique_ptr<SocketResourceManagerInterface> CreateSocketResourceManager() override; ResumableTCPSocket* GetTcpSocket(int socket_id); }; class TCPSocketExtensionWithDnsLookupFunction : public SocketExtensionWithDnsLookupFunction { protected: ~TCPSocketExtensionWithDnsLookupFunction() override; std::unique_ptr<SocketResourceManagerInterface> CreateSocketResourceManager() override; ResumableTCPSocket* GetTcpSocket(int socket_id); }; class SocketsTcpCreateFunction : public TCPSocketAsyncApiFunction { public: DECLARE_EXTENSION_FUNCTION("sockets.tcp.create", SOCKETS_TCP_CREATE) SocketsTcpCreateFunction(); protected: ~SocketsTcpCreateFunction() override; // AsyncApiFunction: bool Prepare() override; void Work() override; private: FRIEND_TEST_ALL_PREFIXES(SocketsTcpUnitTest, Create); std::unique_ptr<sockets_tcp::Create::Params> params_; }; class SocketsTcpUpdateFunction : public TCPSocketAsyncApiFunction { public: DECLARE_EXTENSION_FUNCTION("sockets.tcp.update", SOCKETS_TCP_UPDATE) SocketsTcpUpdateFunction(); protected: ~SocketsTcpUpdateFunction() override; // AsyncApiFunction: bool Prepare() override; void Work() override; private: std::unique_ptr<sockets_tcp::Update::Params> params_; }; class SocketsTcpSetPausedFunction : public TCPSocketAsyncApiFunction { public: DECLARE_EXTENSION_FUNCTION("sockets.tcp.setPaused", SOCKETS_TCP_SETPAUSED) SocketsTcpSetPausedFunction(); protected: ~SocketsTcpSetPausedFunction() override; // AsyncApiFunction bool Prepare() override; void Work() override; private: std::unique_ptr<sockets_tcp::SetPaused::Params> params_; TCPSocketEventDispatcher* socket_event_dispatcher_; }; class SocketsTcpSetKeepAliveFunction : public TCPSocketAsyncApiFunction { public: DECLARE_EXTENSION_FUNCTION("sockets.tcp.setKeepAlive", SOCKETS_TCP_SETKEEPALIVE) SocketsTcpSetKeepAliveFunction(); protected: ~SocketsTcpSetKeepAliveFunction() override; // AsyncApiFunction bool Prepare() override; void Work() override; private: std::unique_ptr<sockets_tcp::SetKeepAlive::Params> params_; }; class SocketsTcpSetNoDelayFunction : public TCPSocketAsyncApiFunction { public: DECLARE_EXTENSION_FUNCTION("sockets.tcp.setNoDelay", SOCKETS_TCP_SETNODELAY) SocketsTcpSetNoDelayFunction(); protected: ~SocketsTcpSetNoDelayFunction() override; // AsyncApiFunction bool Prepare() override; void Work() override; private: std::unique_ptr<sockets_tcp::SetNoDelay::Params> params_; }; class SocketsTcpConnectFunction : public TCPSocketExtensionWithDnsLookupFunction { public: DECLARE_EXTENSION_FUNCTION("sockets.tcp.connect", SOCKETS_TCP_CONNECT) SocketsTcpConnectFunction(); protected: ~SocketsTcpConnectFunction() override; // AsyncApiFunction: bool Prepare() override; void AsyncWorkStart() override; // SocketExtensionWithDnsLookupFunction: void AfterDnsLookup(int lookup_result) override; private: void StartConnect(); void OnCompleted(int net_result); std::unique_ptr<sockets_tcp::Connect::Params> params_; TCPSocketEventDispatcher* socket_event_dispatcher_; }; class SocketsTcpDisconnectFunction : public TCPSocketAsyncApiFunction { public: DECLARE_EXTENSION_FUNCTION("sockets.tcp.disconnect", SOCKETS_TCP_DISCONNECT) SocketsTcpDisconnectFunction(); protected: ~SocketsTcpDisconnectFunction() override; // AsyncApiFunction: bool Prepare() override; void Work() override; private: std::unique_ptr<sockets_tcp::Disconnect::Params> params_; }; class SocketsTcpSendFunction : public TCPSocketAsyncApiFunction { public: DECLARE_EXTENSION_FUNCTION("sockets.tcp.send", SOCKETS_TCP_SEND) SocketsTcpSendFunction(); protected: ~SocketsTcpSendFunction() override; // AsyncApiFunction: bool Prepare() override; void AsyncWorkStart() override; private: void OnCompleted(int net_result); void SetSendResult(int net_result, int bytes_sent); std::unique_ptr<sockets_tcp::Send::Params> params_; scoped_refptr<net::IOBuffer> io_buffer_; size_t io_buffer_size_; }; class SocketsTcpCloseFunction : public TCPSocketAsyncApiFunction { public: DECLARE_EXTENSION_FUNCTION("sockets.tcp.close", SOCKETS_TCP_CLOSE) SocketsTcpCloseFunction(); protected: ~SocketsTcpCloseFunction() override; // AsyncApiFunction: bool Prepare() override; void Work() override; private: std::unique_ptr<sockets_tcp::Close::Params> params_; }; class SocketsTcpGetInfoFunction : public TCPSocketAsyncApiFunction { public: DECLARE_EXTENSION_FUNCTION("sockets.tcp.getInfo", SOCKETS_TCP_GETINFO) SocketsTcpGetInfoFunction(); protected: ~SocketsTcpGetInfoFunction() override; // AsyncApiFunction: bool Prepare() override; void Work() override; private: std::unique_ptr<sockets_tcp::GetInfo::Params> params_; }; class SocketsTcpGetSocketsFunction : public TCPSocketAsyncApiFunction { public: DECLARE_EXTENSION_FUNCTION("sockets.tcp.getSockets", SOCKETS_TCP_GETSOCKETS) SocketsTcpGetSocketsFunction(); protected: ~SocketsTcpGetSocketsFunction() override; // AsyncApiFunction: bool Prepare() override; void Work() override; }; class SocketsTcpSecureFunction : public TCPSocketAsyncApiFunction { public: DECLARE_EXTENSION_FUNCTION("sockets.tcp.secure", SOCKETS_TCP_SECURE); SocketsTcpSecureFunction(); protected: ~SocketsTcpSecureFunction() override; bool Prepare() override; void AsyncWorkStart() override; private: virtual void TlsConnectDone(std::unique_ptr<extensions::TLSSocket> sock, int result); bool paused_; bool persistent_; std::unique_ptr<sockets_tcp::Secure::Params> params_; scoped_refptr<net::URLRequestContextGetter> url_request_getter_; DISALLOW_COPY_AND_ASSIGN(SocketsTcpSecureFunction); }; } // namespace api } // namespace extensions #endif // EXTENSIONS_BROWSER_API_SOCKETS_TCP_SOCKETS_TCP_API_H_
null
null
null
null
22,823
65,400
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
65,400
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/ui/ash/launcher/settings_window_observer.h" #include "ash/public/cpp/app_list/internal_app_id_constants.h" #include "ash/public/cpp/shelf_item.h" #include "ash/public/cpp/window_properties.h" #include "ash/resources/grit/ash_resources.h" #include "chrome/browser/ui/browser.h" #include "chrome/browser/ui/browser_window.h" #include "chrome/browser/ui/settings_window_manager_chromeos.h" #include "components/strings/grit/components_strings.h" #include "services/ui/public/interfaces/window_manager.mojom.h" #include "ui/aura/client/aura_constants.h" #include "ui/aura/window.h" #include "ui/base/class_property.h" #include "ui/base/l10n/l10n_util.h" #include "ui/base/resource/resource_bundle.h" #include "ui/gfx/image/image_skia.h" namespace { // This class is only used in classic ash to rename the Settings window. class AuraWindowSettingsTitleTracker : public aura::WindowTracker { public: AuraWindowSettingsTitleTracker() {} ~AuraWindowSettingsTitleTracker() override {} // aura::WindowTracker: void OnWindowTitleChanged(aura::Window* window) override { // Name the window "Settings" instead of "Google Chrome - Settings". window->SetTitle(l10n_util::GetStringUTF16(IDS_SETTINGS_TITLE)); } private: DISALLOW_COPY_AND_ASSIGN(AuraWindowSettingsTitleTracker); }; } // namespace SettingsWindowObserver::SettingsWindowObserver() { aura_window_tracker_ = std::make_unique<AuraWindowSettingsTitleTracker>(); chrome::SettingsWindowManager::GetInstance()->AddObserver(this); } SettingsWindowObserver::~SettingsWindowObserver() { chrome::SettingsWindowManager::GetInstance()->RemoveObserver(this); } void SettingsWindowObserver::OnNewSettingsWindow(Browser* settings_browser) { aura::Window* window = settings_browser->window()->GetNativeWindow(); window->SetTitle(l10n_util::GetStringUTF16(IDS_SETTINGS_TITLE)); const ash::ShelfID shelf_id(app_list::kInternalAppIdSettings); window->SetProperty(ash::kShelfIDKey, new std::string(shelf_id.Serialize())); window->SetProperty<int>(ash::kShelfItemTypeKey, ash::TYPE_DIALOG); ui::ResourceBundle& rb = ui::ResourceBundle::GetSharedInstance(); // The new gfx::ImageSkia instance is owned by the window itself. window->SetProperty( aura::client::kWindowIconKey, new gfx::ImageSkia(*rb.GetImageSkiaNamed(IDR_SETTINGS_LOGO_192))); window->SetProperty(aura::client::kHasOverviewIcon, true); aura_window_tracker_->Add(window); }
null
null
null
null
62,263
31,217
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
31,217
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef THIRD_PARTY_BLINK_RENDERER_CORE_FRAME_CSP_SOURCE_LIST_DIRECTIVE_H_ #define THIRD_PARTY_BLINK_RENDERER_CORE_FRAME_CSP_SOURCE_LIST_DIRECTIVE_H_ #include "base/macros.h" #include "third_party/blink/public/platform/web_content_security_policy.h" #include "third_party/blink/renderer/core/core_export.h" #include "third_party/blink/renderer/core/frame/csp/csp_directive.h" #include "third_party/blink/renderer/core/frame/csp/csp_source.h" #include "third_party/blink/renderer/platform/crypto.h" #include "third_party/blink/renderer/platform/loader/fetch/resource_request.h" #include "third_party/blink/renderer/platform/network/content_security_policy_parsers.h" #include "third_party/blink/renderer/platform/wtf/hash_set.h" #include "third_party/blink/renderer/platform/wtf/text/wtf_string.h" namespace blink { class ContentSecurityPolicy; class KURL; class CORE_EXPORT SourceListDirective final : public CSPDirective { public: SourceListDirective(const String& name, const String& value, ContentSecurityPolicy*); void Trace(blink::Visitor*); void Parse(const UChar* begin, const UChar* end); bool Matches(const KURL&, ResourceRequest::RedirectStatus = ResourceRequest::RedirectStatus::kNoRedirect) const; bool Allows(const KURL&, ResourceRequest::RedirectStatus = ResourceRequest::RedirectStatus::kNoRedirect) const; bool AllowInline() const; bool AllowEval() const; bool AllowWasmEval() const; bool AllowDynamic() const; bool AllowNonce(const String& nonce) const; bool AllowHash(const CSPHashValue&) const; bool AllowHashedAttributes() const; bool AllowReportSample() const; bool IsNone() const; bool IsHashOrNoncePresent() const; uint8_t HashAlgorithmsUsed() const; bool AllowAllInline() const; // The algorothm is described more extensively here: // https://w3c.github.io/webappsec-csp/embedded/#subsume-source-list bool Subsumes(const HeapVector<Member<SourceListDirective>>&) const; // Export a subset of the source list that affect navigation. // It contains every source-expressions, '*', 'none' and 'self'. // It doesn't contain 'unsafe-inline' or 'unsafe-eval' for instance. WebContentSecurityPolicySourceList ExposeForNavigationalChecks() const; String DirectiveName() const { return directive_name_; } private: FRIEND_TEST_ALL_PREFIXES(SourceListDirectiveTest, GetIntersectCSPSources); FRIEND_TEST_ALL_PREFIXES(SourceListDirectiveTest, GetIntersectCSPSourcesSchemes); FRIEND_TEST_ALL_PREFIXES(SourceListDirectiveTest, GetIntersectNonces); FRIEND_TEST_ALL_PREFIXES(SourceListDirectiveTest, GetIntersectHashes); FRIEND_TEST_ALL_PREFIXES(SourceListDirectiveTest, GetSources); FRIEND_TEST_ALL_PREFIXES(SourceListDirectiveTest, ParseHost); FRIEND_TEST_ALL_PREFIXES(CSPDirectiveListTest, GetSourceVector); FRIEND_TEST_ALL_PREFIXES(CSPDirectiveListTest, OperativeDirectiveGivenType); bool ParseSource(const UChar* begin, const UChar* end, String& scheme, String& host, int& port, String& path, CSPSource::WildcardDisposition&, CSPSource::WildcardDisposition&); bool ParseScheme(const UChar* begin, const UChar* end, String& scheme); static bool ParseHost(const UChar* begin, const UChar* end, String& host, CSPSource::WildcardDisposition&); bool ParsePort(const UChar* begin, const UChar* end, int& port, CSPSource::WildcardDisposition&); bool ParsePath(const UChar* begin, const UChar* end, String& path); bool ParseNonce(const UChar* begin, const UChar* end, String& nonce); bool ParseHash(const UChar* begin, const UChar* end, DigestValue& hash, ContentSecurityPolicyHashAlgorithm&); void AddSourceSelf(); void AddSourceStar(); void AddSourceUnsafeInline(); void AddSourceUnsafeEval(); void AddSourceWasmEval(); void AddSourceStrictDynamic(); void AddSourceUnsafeHashedAttributes(); void AddReportSample(); void AddSourceNonce(const String& nonce); void AddSourceHash(const ContentSecurityPolicyHashAlgorithm&, const DigestValue& hash); static void AddSourceToMap(HeapHashMap<String, Member<CSPSource>>&, CSPSource*); bool HasSourceMatchInList(const KURL&, ResourceRequest::RedirectStatus) const; HashSet<String> GetIntersectNonces(const HashSet<String>& other) const; HashSet<CSPHashValue> GetIntersectHashes( const HashSet<CSPHashValue>& other) const; HeapVector<Member<CSPSource>> GetIntersectCSPSources( const HeapVector<Member<CSPSource>>& other) const; HeapHashMap<String, Member<CSPSource>> GetIntersectSchemesOnly( const HeapVector<Member<CSPSource>>& other) const; bool SubsumesNoncesAndHashes(const HashSet<String>& nonces, const HashSet<CSPHashValue> hashes) const; HeapVector<Member<CSPSource>> GetSources(Member<CSPSource>) const; Member<ContentSecurityPolicy> policy_; HeapVector<Member<CSPSource>> list_; String directive_name_; bool allow_self_; bool allow_star_; bool allow_inline_; bool allow_eval_; bool allow_wasm_eval_; bool allow_dynamic_; bool allow_hashed_attributes_; bool report_sample_; HashSet<String> nonces_; HashSet<CSPHashValue> hashes_; uint8_t hash_algorithms_used_; DISALLOW_COPY_AND_ASSIGN(SourceListDirective); }; } // namespace blink #endif
null
null
null
null
28,080
17,368
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
182,363
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * linux/arch/unicore32/boot/compressed/misc.c * * Code specific to PKUnity SoC and UniCore ISA * * Copyright (C) 2001-2010 GUAN Xue-tao * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <asm/unaligned.h> #include <mach/uncompress.h> /* * gzip delarations */ unsigned char *output_data; unsigned long output_ptr; unsigned int free_mem_ptr; unsigned int free_mem_end_ptr; #define STATIC static #define STATIC_RW_DATA /* non-static please */ /* * arch-dependent implementations */ #ifndef ARCH_HAVE_DECOMP_ERROR #define arch_decomp_error(x) #endif #ifndef ARCH_HAVE_DECOMP_SETUP #define arch_decomp_setup() #endif #ifndef ARCH_HAVE_DECOMP_PUTS #define arch_decomp_puts(p) #endif void *memcpy(void *dest, const void *src, size_t n) { int i = 0; unsigned char *d = (unsigned char *)dest, *s = (unsigned char *)src; for (i = n >> 3; i > 0; i--) { *d++ = *s++; *d++ = *s++; *d++ = *s++; *d++ = *s++; *d++ = *s++; *d++ = *s++; *d++ = *s++; *d++ = *s++; } if (n & 1 << 2) { *d++ = *s++; *d++ = *s++; *d++ = *s++; *d++ = *s++; } if (n & 1 << 1) { *d++ = *s++; *d++ = *s++; } if (n & 1) *d++ = *s++; return dest; } void error(char *x) { arch_decomp_puts("\n\n"); arch_decomp_puts(x); arch_decomp_puts("\n\n -- System halted"); arch_decomp_error(x); for (;;) ; /* Halt */ } /* Heap size should be adjusted for different decompress method */ #ifdef CONFIG_KERNEL_GZIP #include "../../../../lib/decompress_inflate.c" #endif #ifdef CONFIG_KERNEL_BZIP2 #include "../../../../lib/decompress_bunzip2.c" #endif #ifdef CONFIG_KERNEL_LZO #include "../../../../lib/decompress_unlzo.c" #endif #ifdef CONFIG_KERNEL_LZMA #include "../../../../lib/decompress_unlzma.c" #endif unsigned long decompress_kernel(unsigned long output_start, unsigned long free_mem_ptr_p, unsigned long free_mem_ptr_end_p) { unsigned char *tmp; output_data = (unsigned char *)output_start; free_mem_ptr = free_mem_ptr_p; free_mem_end_ptr = free_mem_ptr_end_p; arch_decomp_setup(); tmp = (unsigned char *) (((unsigned long)input_data_end) - 4); output_ptr = get_unaligned_le32(tmp); arch_decomp_puts("Uncompressing Linux..."); __decompress(input_data, input_data_end - input_data, NULL, NULL, output_data, 0, NULL, error); arch_decomp_puts(" done, booting the kernel.\n"); return output_ptr; }
null
null
null
null
90,710
35,753
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
35,753
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2018 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef THIRD_PARTY_BLINK_RENDERER_CORE_EDITING_COMMANDS_DELETE_SELECTION_OPTIONS_H_ #define THIRD_PARTY_BLINK_RENDERER_CORE_EDITING_COMMANDS_DELETE_SELECTION_OPTIONS_H_ #include "base/macros.h" #include "third_party/blink/renderer/core/core_export.h" #include "third_party/blink/renderer/platform/wtf/allocator.h" namespace blink { // DeleteSelectionOptions of |DeleteSelectionCommand|. class CORE_EXPORT DeleteSelectionOptions final { DISALLOW_NEW(); public: class Builder; DeleteSelectionOptions(const DeleteSelectionOptions&); bool IsExpandForSpecialElements() const; bool IsMergeBlocksAfterDelete() const; bool IsSanitizeMarkup() const; bool IsSmartDelete() const; static DeleteSelectionOptions NormalDelete(); static DeleteSelectionOptions SmartDelete(); private: DeleteSelectionOptions(); bool is_expand_for_special_elements_ = false; bool is_merge_blocks_after_delete_ = false; bool is_sanitize_markup_ = false; bool is_smart_delete_ = false; }; // Build |DeleteSelectionCommand::Options|. class CORE_EXPORT DeleteSelectionOptions::Builder final { DISALLOW_NEW(); public: Builder(); DeleteSelectionOptions Build() const; Builder& SetExpandForSpecialElements(bool); Builder& SetMergeBlocksAfterDelete(bool); Builder& SetSanitizeMarkup(bool); Builder& SetSmartDelete(bool); private: DeleteSelectionOptions options_; DISALLOW_COPY_AND_ASSIGN(Builder); }; } // namespace blink #endif // THIRD_PARTY_BLINK_RENDERER_CORE_EDITING_COMMANDS_DELETE_SELECTION_OPTIONS_H_
null
null
null
null
32,616
29,368
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
194,363
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Voltage regulator support for AMS AS3722 PMIC * * Copyright (C) 2013 ams * * Author: Florian Lobmaier <[email protected]> * Author: Laxman Dewangan <[email protected]> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/err.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mfd/as3722.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> #include <linux/regulator/of_regulator.h> #include <linux/slab.h> /* Regulator IDs */ enum as3722_regulators_id { AS3722_REGULATOR_ID_SD0, AS3722_REGULATOR_ID_SD1, AS3722_REGULATOR_ID_SD2, AS3722_REGULATOR_ID_SD3, AS3722_REGULATOR_ID_SD4, AS3722_REGULATOR_ID_SD5, AS3722_REGULATOR_ID_SD6, AS3722_REGULATOR_ID_LDO0, AS3722_REGULATOR_ID_LDO1, AS3722_REGULATOR_ID_LDO2, AS3722_REGULATOR_ID_LDO3, AS3722_REGULATOR_ID_LDO4, AS3722_REGULATOR_ID_LDO5, AS3722_REGULATOR_ID_LDO6, AS3722_REGULATOR_ID_LDO7, AS3722_REGULATOR_ID_LDO9, AS3722_REGULATOR_ID_LDO10, AS3722_REGULATOR_ID_LDO11, AS3722_REGULATOR_ID_MAX, }; struct as3722_register_mapping { u8 regulator_id; const char *name; const char *sname; u8 vsel_reg; u8 vsel_mask; int n_voltages; u32 enable_reg; u8 enable_mask; u32 control_reg; u8 mode_mask; u32 sleep_ctrl_reg; u8 sleep_ctrl_mask; }; struct as3722_regulator_config_data { struct regulator_init_data *reg_init; bool enable_tracking; int ext_control; }; struct as3722_regulators { struct device *dev; struct as3722 *as3722; struct regulator_dev *rdevs[AS3722_REGULATOR_ID_MAX]; struct regulator_desc desc[AS3722_REGULATOR_ID_MAX]; struct as3722_regulator_config_data reg_config_data[AS3722_REGULATOR_ID_MAX]; }; static const struct as3722_register_mapping as3722_reg_lookup[] = { { .regulator_id = AS3722_REGULATOR_ID_SD0, .name = "as3722-sd0", .vsel_reg = AS3722_SD0_VOLTAGE_REG, .vsel_mask = AS3722_SD_VSEL_MASK, .enable_reg = AS3722_SD_CONTROL_REG, .enable_mask = AS3722_SDn_CTRL(0), .sleep_ctrl_reg = AS3722_ENABLE_CTRL1_REG, .sleep_ctrl_mask = AS3722_SD0_EXT_ENABLE_MASK, .control_reg = AS3722_SD0_CONTROL_REG, .mode_mask = AS3722_SD0_MODE_FAST, }, { .regulator_id = AS3722_REGULATOR_ID_SD1, .name = "as3722-sd1", .vsel_reg = AS3722_SD1_VOLTAGE_REG, .vsel_mask = AS3722_SD_VSEL_MASK, .enable_reg = AS3722_SD_CONTROL_REG, .enable_mask = AS3722_SDn_CTRL(1), .sleep_ctrl_reg = AS3722_ENABLE_CTRL1_REG, .sleep_ctrl_mask = AS3722_SD1_EXT_ENABLE_MASK, .control_reg = AS3722_SD1_CONTROL_REG, .mode_mask = AS3722_SD1_MODE_FAST, }, { .regulator_id = AS3722_REGULATOR_ID_SD2, .name = "as3722-sd2", .sname = "vsup-sd2", .vsel_reg = AS3722_SD2_VOLTAGE_REG, .vsel_mask = AS3722_SD_VSEL_MASK, .enable_reg = AS3722_SD_CONTROL_REG, .enable_mask = AS3722_SDn_CTRL(2), .sleep_ctrl_reg = AS3722_ENABLE_CTRL1_REG, .sleep_ctrl_mask = AS3722_SD2_EXT_ENABLE_MASK, .control_reg = AS3722_SD23_CONTROL_REG, .mode_mask = AS3722_SD2_MODE_FAST, .n_voltages = AS3722_SD2_VSEL_MAX + 1, }, { .regulator_id = AS3722_REGULATOR_ID_SD3, .name = "as3722-sd3", .sname = "vsup-sd3", .vsel_reg = AS3722_SD3_VOLTAGE_REG, .vsel_mask = AS3722_SD_VSEL_MASK, .enable_reg = AS3722_SD_CONTROL_REG, .enable_mask = AS3722_SDn_CTRL(3), .sleep_ctrl_reg = AS3722_ENABLE_CTRL1_REG, .sleep_ctrl_mask = AS3722_SD3_EXT_ENABLE_MASK, .control_reg = AS3722_SD23_CONTROL_REG, .mode_mask = AS3722_SD3_MODE_FAST, .n_voltages = AS3722_SD2_VSEL_MAX + 1, }, { .regulator_id = AS3722_REGULATOR_ID_SD4, .name = "as3722-sd4", .sname = "vsup-sd4", .vsel_reg = AS3722_SD4_VOLTAGE_REG, .vsel_mask = AS3722_SD_VSEL_MASK, .enable_reg = AS3722_SD_CONTROL_REG, .enable_mask = AS3722_SDn_CTRL(4), .sleep_ctrl_reg = AS3722_ENABLE_CTRL2_REG, .sleep_ctrl_mask = AS3722_SD4_EXT_ENABLE_MASK, .control_reg = AS3722_SD4_CONTROL_REG, .mode_mask = AS3722_SD4_MODE_FAST, .n_voltages = AS3722_SD2_VSEL_MAX + 1, }, { .regulator_id = AS3722_REGULATOR_ID_SD5, .name = "as3722-sd5", .sname = "vsup-sd5", .vsel_reg = AS3722_SD5_VOLTAGE_REG, .vsel_mask = AS3722_SD_VSEL_MASK, .enable_reg = AS3722_SD_CONTROL_REG, .enable_mask = AS3722_SDn_CTRL(5), .sleep_ctrl_reg = AS3722_ENABLE_CTRL2_REG, .sleep_ctrl_mask = AS3722_SD5_EXT_ENABLE_MASK, .control_reg = AS3722_SD5_CONTROL_REG, .mode_mask = AS3722_SD5_MODE_FAST, .n_voltages = AS3722_SD2_VSEL_MAX + 1, }, { .regulator_id = AS3722_REGULATOR_ID_SD6, .name = "as3722-sd6", .vsel_reg = AS3722_SD6_VOLTAGE_REG, .vsel_mask = AS3722_SD_VSEL_MASK, .enable_reg = AS3722_SD_CONTROL_REG, .enable_mask = AS3722_SDn_CTRL(6), .sleep_ctrl_reg = AS3722_ENABLE_CTRL2_REG, .sleep_ctrl_mask = AS3722_SD6_EXT_ENABLE_MASK, .control_reg = AS3722_SD6_CONTROL_REG, .mode_mask = AS3722_SD6_MODE_FAST, }, { .regulator_id = AS3722_REGULATOR_ID_LDO0, .name = "as3722-ldo0", .sname = "vin-ldo0", .vsel_reg = AS3722_LDO0_VOLTAGE_REG, .vsel_mask = AS3722_LDO0_VSEL_MASK, .enable_reg = AS3722_LDOCONTROL0_REG, .enable_mask = AS3722_LDO0_CTRL, .sleep_ctrl_reg = AS3722_ENABLE_CTRL3_REG, .sleep_ctrl_mask = AS3722_LDO0_EXT_ENABLE_MASK, .n_voltages = AS3722_LDO0_NUM_VOLT, }, { .regulator_id = AS3722_REGULATOR_ID_LDO1, .name = "as3722-ldo1", .sname = "vin-ldo1-6", .vsel_reg = AS3722_LDO1_VOLTAGE_REG, .vsel_mask = AS3722_LDO_VSEL_MASK, .enable_reg = AS3722_LDOCONTROL0_REG, .enable_mask = AS3722_LDO1_CTRL, .sleep_ctrl_reg = AS3722_ENABLE_CTRL3_REG, .sleep_ctrl_mask = AS3722_LDO1_EXT_ENABLE_MASK, .n_voltages = AS3722_LDO_NUM_VOLT, }, { .regulator_id = AS3722_REGULATOR_ID_LDO2, .name = "as3722-ldo2", .sname = "vin-ldo2-5-7", .vsel_reg = AS3722_LDO2_VOLTAGE_REG, .vsel_mask = AS3722_LDO_VSEL_MASK, .enable_reg = AS3722_LDOCONTROL0_REG, .enable_mask = AS3722_LDO2_CTRL, .sleep_ctrl_reg = AS3722_ENABLE_CTRL3_REG, .sleep_ctrl_mask = AS3722_LDO2_EXT_ENABLE_MASK, .n_voltages = AS3722_LDO_NUM_VOLT, }, { .regulator_id = AS3722_REGULATOR_ID_LDO3, .name = "as3722-ldo3", .sname = "vin-ldo3-4", .vsel_reg = AS3722_LDO3_VOLTAGE_REG, .vsel_mask = AS3722_LDO3_VSEL_MASK, .enable_reg = AS3722_LDOCONTROL0_REG, .enable_mask = AS3722_LDO3_CTRL, .sleep_ctrl_reg = AS3722_ENABLE_CTRL3_REG, .sleep_ctrl_mask = AS3722_LDO3_EXT_ENABLE_MASK, .n_voltages = AS3722_LDO3_NUM_VOLT, }, { .regulator_id = AS3722_REGULATOR_ID_LDO4, .name = "as3722-ldo4", .sname = "vin-ldo3-4", .vsel_reg = AS3722_LDO4_VOLTAGE_REG, .vsel_mask = AS3722_LDO_VSEL_MASK, .enable_reg = AS3722_LDOCONTROL0_REG, .enable_mask = AS3722_LDO4_CTRL, .sleep_ctrl_reg = AS3722_ENABLE_CTRL4_REG, .sleep_ctrl_mask = AS3722_LDO4_EXT_ENABLE_MASK, .n_voltages = AS3722_LDO_NUM_VOLT, }, { .regulator_id = AS3722_REGULATOR_ID_LDO5, .name = "as3722-ldo5", .sname = "vin-ldo2-5-7", .vsel_reg = AS3722_LDO5_VOLTAGE_REG, .vsel_mask = AS3722_LDO_VSEL_MASK, .enable_reg = AS3722_LDOCONTROL0_REG, .enable_mask = AS3722_LDO5_CTRL, .sleep_ctrl_reg = AS3722_ENABLE_CTRL4_REG, .sleep_ctrl_mask = AS3722_LDO5_EXT_ENABLE_MASK, .n_voltages = AS3722_LDO_NUM_VOLT, }, { .regulator_id = AS3722_REGULATOR_ID_LDO6, .name = "as3722-ldo6", .sname = "vin-ldo1-6", .vsel_reg = AS3722_LDO6_VOLTAGE_REG, .vsel_mask = AS3722_LDO_VSEL_MASK, .enable_reg = AS3722_LDOCONTROL0_REG, .enable_mask = AS3722_LDO6_CTRL, .sleep_ctrl_reg = AS3722_ENABLE_CTRL4_REG, .sleep_ctrl_mask = AS3722_LDO6_EXT_ENABLE_MASK, .n_voltages = AS3722_LDO_NUM_VOLT, }, { .regulator_id = AS3722_REGULATOR_ID_LDO7, .name = "as3722-ldo7", .sname = "vin-ldo2-5-7", .vsel_reg = AS3722_LDO7_VOLTAGE_REG, .vsel_mask = AS3722_LDO_VSEL_MASK, .enable_reg = AS3722_LDOCONTROL0_REG, .enable_mask = AS3722_LDO7_CTRL, .sleep_ctrl_reg = AS3722_ENABLE_CTRL4_REG, .sleep_ctrl_mask = AS3722_LDO7_EXT_ENABLE_MASK, .n_voltages = AS3722_LDO_NUM_VOLT, }, { .regulator_id = AS3722_REGULATOR_ID_LDO9, .name = "as3722-ldo9", .sname = "vin-ldo9-10", .vsel_reg = AS3722_LDO9_VOLTAGE_REG, .vsel_mask = AS3722_LDO_VSEL_MASK, .enable_reg = AS3722_LDOCONTROL1_REG, .enable_mask = AS3722_LDO9_CTRL, .sleep_ctrl_reg = AS3722_ENABLE_CTRL5_REG, .sleep_ctrl_mask = AS3722_LDO9_EXT_ENABLE_MASK, .n_voltages = AS3722_LDO_NUM_VOLT, }, { .regulator_id = AS3722_REGULATOR_ID_LDO10, .name = "as3722-ldo10", .sname = "vin-ldo9-10", .vsel_reg = AS3722_LDO10_VOLTAGE_REG, .vsel_mask = AS3722_LDO_VSEL_MASK, .enable_reg = AS3722_LDOCONTROL1_REG, .enable_mask = AS3722_LDO10_CTRL, .sleep_ctrl_reg = AS3722_ENABLE_CTRL5_REG, .sleep_ctrl_mask = AS3722_LDO10_EXT_ENABLE_MASK, .n_voltages = AS3722_LDO_NUM_VOLT, }, { .regulator_id = AS3722_REGULATOR_ID_LDO11, .name = "as3722-ldo11", .sname = "vin-ldo11", .vsel_reg = AS3722_LDO11_VOLTAGE_REG, .vsel_mask = AS3722_LDO_VSEL_MASK, .enable_reg = AS3722_LDOCONTROL1_REG, .enable_mask = AS3722_LDO11_CTRL, .sleep_ctrl_reg = AS3722_ENABLE_CTRL5_REG, .sleep_ctrl_mask = AS3722_LDO11_EXT_ENABLE_MASK, .n_voltages = AS3722_LDO_NUM_VOLT, }, }; static const int as3722_ldo_current[] = { 150000, 300000 }; static const int as3722_sd016_current[] = { 2500000, 3000000, 3500000 }; static int as3722_current_to_index(int min_uA, int max_uA, const int *curr_table, int n_currents) { int i; for (i = n_currents - 1; i >= 0; i--) { if ((min_uA <= curr_table[i]) && (curr_table[i] <= max_uA)) return i; } return -EINVAL; } static int as3722_ldo_get_current_limit(struct regulator_dev *rdev) { struct as3722_regulators *as3722_regs = rdev_get_drvdata(rdev); struct as3722 *as3722 = as3722_regs->as3722; int id = rdev_get_id(rdev); u32 val; int ret; ret = as3722_read(as3722, as3722_reg_lookup[id].vsel_reg, &val); if (ret < 0) { dev_err(as3722_regs->dev, "Reg 0x%02x read failed: %d\n", as3722_reg_lookup[id].vsel_reg, ret); return ret; } if (val & AS3722_LDO_ILIMIT_MASK) return 300000; return 150000; } static int as3722_ldo_set_current_limit(struct regulator_dev *rdev, int min_uA, int max_uA) { struct as3722_regulators *as3722_regs = rdev_get_drvdata(rdev); struct as3722 *as3722 = as3722_regs->as3722; int id = rdev_get_id(rdev); int ret; u32 reg = 0; ret = as3722_current_to_index(min_uA, max_uA, as3722_ldo_current, ARRAY_SIZE(as3722_ldo_current)); if (ret < 0) { dev_err(as3722_regs->dev, "Current range min:max = %d:%d does not support\n", min_uA, max_uA); return ret; } if (ret) reg = AS3722_LDO_ILIMIT_BIT; return as3722_update_bits(as3722, as3722_reg_lookup[id].vsel_reg, AS3722_LDO_ILIMIT_MASK, reg); } static const struct regulator_ops as3722_ldo0_ops = { .is_enabled = regulator_is_enabled_regmap, .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .list_voltage = regulator_list_voltage_linear, .get_voltage_sel = regulator_get_voltage_sel_regmap, .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_current_limit = as3722_ldo_get_current_limit, .set_current_limit = as3722_ldo_set_current_limit, }; static const struct regulator_ops as3722_ldo0_extcntrl_ops = { .list_voltage = regulator_list_voltage_linear, .get_voltage_sel = regulator_get_voltage_sel_regmap, .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_current_limit = as3722_ldo_get_current_limit, .set_current_limit = as3722_ldo_set_current_limit, }; static int as3722_ldo3_set_tracking_mode(struct as3722_regulators *as3722_reg, int id, u8 mode) { struct as3722 *as3722 = as3722_reg->as3722; switch (mode) { case AS3722_LDO3_MODE_PMOS: case AS3722_LDO3_MODE_PMOS_TRACKING: case AS3722_LDO3_MODE_NMOS: case AS3722_LDO3_MODE_SWITCH: return as3722_update_bits(as3722, as3722_reg_lookup[id].vsel_reg, AS3722_LDO3_MODE_MASK, mode); default: return -EINVAL; } } static int as3722_ldo3_get_current_limit(struct regulator_dev *rdev) { return 150000; } static const struct regulator_ops as3722_ldo3_ops = { .is_enabled = regulator_is_enabled_regmap, .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .list_voltage = regulator_list_voltage_linear, .get_voltage_sel = regulator_get_voltage_sel_regmap, .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_current_limit = as3722_ldo3_get_current_limit, }; static const struct regulator_ops as3722_ldo3_extcntrl_ops = { .list_voltage = regulator_list_voltage_linear, .get_voltage_sel = regulator_get_voltage_sel_regmap, .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_current_limit = as3722_ldo3_get_current_limit, }; static const struct regulator_ops as3722_ldo6_ops = { .is_enabled = regulator_is_enabled_regmap, .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .map_voltage = regulator_map_voltage_linear_range, .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_voltage_sel = regulator_get_voltage_sel_regmap, .list_voltage = regulator_list_voltage_linear_range, .get_current_limit = as3722_ldo_get_current_limit, .set_current_limit = as3722_ldo_set_current_limit, .get_bypass = regulator_get_bypass_regmap, .set_bypass = regulator_set_bypass_regmap, }; static const struct regulator_ops as3722_ldo6_extcntrl_ops = { .map_voltage = regulator_map_voltage_linear_range, .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_voltage_sel = regulator_get_voltage_sel_regmap, .list_voltage = regulator_list_voltage_linear_range, .get_current_limit = as3722_ldo_get_current_limit, .set_current_limit = as3722_ldo_set_current_limit, .get_bypass = regulator_get_bypass_regmap, .set_bypass = regulator_set_bypass_regmap, }; static const struct regulator_linear_range as3722_ldo_ranges[] = { REGULATOR_LINEAR_RANGE(0, 0x00, 0x00, 0), REGULATOR_LINEAR_RANGE(825000, 0x01, 0x24, 25000), REGULATOR_LINEAR_RANGE(1725000, 0x40, 0x7F, 25000), }; static const struct regulator_ops as3722_ldo_ops = { .is_enabled = regulator_is_enabled_regmap, .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .map_voltage = regulator_map_voltage_linear_range, .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_voltage_sel = regulator_get_voltage_sel_regmap, .list_voltage = regulator_list_voltage_linear_range, .get_current_limit = as3722_ldo_get_current_limit, .set_current_limit = as3722_ldo_set_current_limit, }; static const struct regulator_ops as3722_ldo_extcntrl_ops = { .map_voltage = regulator_map_voltage_linear_range, .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_voltage_sel = regulator_get_voltage_sel_regmap, .list_voltage = regulator_list_voltage_linear_range, .get_current_limit = as3722_ldo_get_current_limit, .set_current_limit = as3722_ldo_set_current_limit, }; static unsigned int as3722_sd_get_mode(struct regulator_dev *rdev) { struct as3722_regulators *as3722_regs = rdev_get_drvdata(rdev); struct as3722 *as3722 = as3722_regs->as3722; int id = rdev_get_id(rdev); u32 val; int ret; if (!as3722_reg_lookup[id].control_reg) return -ENOTSUPP; ret = as3722_read(as3722, as3722_reg_lookup[id].control_reg, &val); if (ret < 0) { dev_err(as3722_regs->dev, "Reg 0x%02x read failed: %d\n", as3722_reg_lookup[id].control_reg, ret); return ret; } if (val & as3722_reg_lookup[id].mode_mask) return REGULATOR_MODE_FAST; else return REGULATOR_MODE_NORMAL; } static int as3722_sd_set_mode(struct regulator_dev *rdev, unsigned int mode) { struct as3722_regulators *as3722_regs = rdev_get_drvdata(rdev); struct as3722 *as3722 = as3722_regs->as3722; u8 id = rdev_get_id(rdev); u8 val = 0; int ret; if (!as3722_reg_lookup[id].control_reg) return -ERANGE; switch (mode) { case REGULATOR_MODE_FAST: val = as3722_reg_lookup[id].mode_mask; case REGULATOR_MODE_NORMAL: /* fall down */ break; default: return -EINVAL; } ret = as3722_update_bits(as3722, as3722_reg_lookup[id].control_reg, as3722_reg_lookup[id].mode_mask, val); if (ret < 0) { dev_err(as3722_regs->dev, "Reg 0x%02x update failed: %d\n", as3722_reg_lookup[id].control_reg, ret); return ret; } return ret; } static int as3722_sd016_get_current_limit(struct regulator_dev *rdev) { struct as3722_regulators *as3722_regs = rdev_get_drvdata(rdev); struct as3722 *as3722 = as3722_regs->as3722; int id = rdev_get_id(rdev); u32 val, reg; int mask; int ret; switch (id) { case AS3722_REGULATOR_ID_SD0: reg = AS3722_OVCURRENT_REG; mask = AS3722_OVCURRENT_SD0_TRIP_MASK; break; case AS3722_REGULATOR_ID_SD1: reg = AS3722_OVCURRENT_REG; mask = AS3722_OVCURRENT_SD1_TRIP_MASK; break; case AS3722_REGULATOR_ID_SD6: reg = AS3722_OVCURRENT_DEB_REG; mask = AS3722_OVCURRENT_SD6_TRIP_MASK; break; default: return -EINVAL; } ret = as3722_read(as3722, reg, &val); if (ret < 0) { dev_err(as3722_regs->dev, "Reg 0x%02x read failed: %d\n", reg, ret); return ret; } val &= mask; val >>= ffs(mask) - 1; if (val == 3) return -EINVAL; return as3722_sd016_current[val]; } static int as3722_sd016_set_current_limit(struct regulator_dev *rdev, int min_uA, int max_uA) { struct as3722_regulators *as3722_regs = rdev_get_drvdata(rdev); struct as3722 *as3722 = as3722_regs->as3722; int id = rdev_get_id(rdev); int ret; int val; int mask; u32 reg; ret = as3722_current_to_index(min_uA, max_uA, as3722_sd016_current, ARRAY_SIZE(as3722_sd016_current)); if (ret < 0) { dev_err(as3722_regs->dev, "Current range min:max = %d:%d does not support\n", min_uA, max_uA); return ret; } switch (id) { case AS3722_REGULATOR_ID_SD0: reg = AS3722_OVCURRENT_REG; mask = AS3722_OVCURRENT_SD0_TRIP_MASK; break; case AS3722_REGULATOR_ID_SD1: reg = AS3722_OVCURRENT_REG; mask = AS3722_OVCURRENT_SD1_TRIP_MASK; break; case AS3722_REGULATOR_ID_SD6: reg = AS3722_OVCURRENT_DEB_REG; mask = AS3722_OVCURRENT_SD6_TRIP_MASK; break; default: return -EINVAL; } ret <<= ffs(mask) - 1; val = ret & mask; return as3722_update_bits(as3722, reg, mask, val); } static bool as3722_sd0_is_low_voltage(struct as3722_regulators *as3722_regs) { int err; unsigned val; err = as3722_read(as3722_regs->as3722, AS3722_FUSE7_REG, &val); if (err < 0) { dev_err(as3722_regs->dev, "Reg 0x%02x read failed: %d\n", AS3722_FUSE7_REG, err); return false; } if (val & AS3722_FUSE7_SD0_LOW_VOLTAGE) return true; return false; } static const struct regulator_linear_range as3722_sd2345_ranges[] = { REGULATOR_LINEAR_RANGE(0, 0x00, 0x00, 0), REGULATOR_LINEAR_RANGE(612500, 0x01, 0x40, 12500), REGULATOR_LINEAR_RANGE(1425000, 0x41, 0x70, 25000), REGULATOR_LINEAR_RANGE(2650000, 0x71, 0x7F, 50000), }; static const struct regulator_ops as3722_sd016_ops = { .is_enabled = regulator_is_enabled_regmap, .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .list_voltage = regulator_list_voltage_linear, .map_voltage = regulator_map_voltage_linear, .get_voltage_sel = regulator_get_voltage_sel_regmap, .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_current_limit = as3722_sd016_get_current_limit, .set_current_limit = as3722_sd016_set_current_limit, .get_mode = as3722_sd_get_mode, .set_mode = as3722_sd_set_mode, }; static const struct regulator_ops as3722_sd016_extcntrl_ops = { .list_voltage = regulator_list_voltage_linear, .map_voltage = regulator_map_voltage_linear, .get_voltage_sel = regulator_get_voltage_sel_regmap, .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_current_limit = as3722_sd016_get_current_limit, .set_current_limit = as3722_sd016_set_current_limit, .get_mode = as3722_sd_get_mode, .set_mode = as3722_sd_set_mode, }; static const struct regulator_ops as3722_sd2345_ops = { .is_enabled = regulator_is_enabled_regmap, .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .list_voltage = regulator_list_voltage_linear_range, .map_voltage = regulator_map_voltage_linear_range, .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_voltage_sel = regulator_get_voltage_sel_regmap, .get_mode = as3722_sd_get_mode, .set_mode = as3722_sd_set_mode, }; static const struct regulator_ops as3722_sd2345_extcntrl_ops = { .list_voltage = regulator_list_voltage_linear_range, .map_voltage = regulator_map_voltage_linear_range, .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_voltage_sel = regulator_get_voltage_sel_regmap, .get_mode = as3722_sd_get_mode, .set_mode = as3722_sd_set_mode, }; static int as3722_extreg_init(struct as3722_regulators *as3722_regs, int id, int ext_pwr_ctrl) { int ret; unsigned int val; if ((ext_pwr_ctrl < AS3722_EXT_CONTROL_ENABLE1) || (ext_pwr_ctrl > AS3722_EXT_CONTROL_ENABLE3)) return -EINVAL; val = ext_pwr_ctrl << (ffs(as3722_reg_lookup[id].sleep_ctrl_mask) - 1); ret = as3722_update_bits(as3722_regs->as3722, as3722_reg_lookup[id].sleep_ctrl_reg, as3722_reg_lookup[id].sleep_ctrl_mask, val); if (ret < 0) dev_err(as3722_regs->dev, "Reg 0x%02x update failed: %d\n", as3722_reg_lookup[id].sleep_ctrl_reg, ret); return ret; } static struct of_regulator_match as3722_regulator_matches[] = { { .name = "sd0", }, { .name = "sd1", }, { .name = "sd2", }, { .name = "sd3", }, { .name = "sd4", }, { .name = "sd5", }, { .name = "sd6", }, { .name = "ldo0", }, { .name = "ldo1", }, { .name = "ldo2", }, { .name = "ldo3", }, { .name = "ldo4", }, { .name = "ldo5", }, { .name = "ldo6", }, { .name = "ldo7", }, { .name = "ldo9", }, { .name = "ldo10", }, { .name = "ldo11", }, }; static int as3722_get_regulator_dt_data(struct platform_device *pdev, struct as3722_regulators *as3722_regs) { struct device_node *np; struct as3722_regulator_config_data *reg_config; u32 prop; int id; int ret; np = of_get_child_by_name(pdev->dev.parent->of_node, "regulators"); if (!np) { dev_err(&pdev->dev, "Device is not having regulators node\n"); return -ENODEV; } pdev->dev.of_node = np; ret = of_regulator_match(&pdev->dev, np, as3722_regulator_matches, ARRAY_SIZE(as3722_regulator_matches)); of_node_put(np); if (ret < 0) { dev_err(&pdev->dev, "Parsing of regulator node failed: %d\n", ret); return ret; } for (id = 0; id < ARRAY_SIZE(as3722_regulator_matches); ++id) { struct device_node *reg_node; reg_config = &as3722_regs->reg_config_data[id]; reg_config->reg_init = as3722_regulator_matches[id].init_data; reg_node = as3722_regulator_matches[id].of_node; if (!reg_config->reg_init || !reg_node) continue; ret = of_property_read_u32(reg_node, "ams,ext-control", &prop); if (!ret) { if (prop < 3) reg_config->ext_control = prop; else dev_warn(&pdev->dev, "ext-control have invalid option: %u\n", prop); } reg_config->enable_tracking = of_property_read_bool(reg_node, "ams,enable-tracking"); } return 0; } static int as3722_regulator_probe(struct platform_device *pdev) { struct as3722 *as3722 = dev_get_drvdata(pdev->dev.parent); struct as3722_regulators *as3722_regs; struct as3722_regulator_config_data *reg_config; struct regulator_dev *rdev; struct regulator_config config = { }; const struct regulator_ops *ops; int id; int ret; as3722_regs = devm_kzalloc(&pdev->dev, sizeof(*as3722_regs), GFP_KERNEL); if (!as3722_regs) return -ENOMEM; as3722_regs->dev = &pdev->dev; as3722_regs->as3722 = as3722; platform_set_drvdata(pdev, as3722_regs); ret = as3722_get_regulator_dt_data(pdev, as3722_regs); if (ret < 0) return ret; config.dev = &pdev->dev; config.driver_data = as3722_regs; config.regmap = as3722->regmap; for (id = 0; id < AS3722_REGULATOR_ID_MAX; id++) { reg_config = &as3722_regs->reg_config_data[id]; as3722_regs->desc[id].name = as3722_reg_lookup[id].name; as3722_regs->desc[id].supply_name = as3722_reg_lookup[id].sname; as3722_regs->desc[id].id = as3722_reg_lookup[id].regulator_id; as3722_regs->desc[id].n_voltages = as3722_reg_lookup[id].n_voltages; as3722_regs->desc[id].type = REGULATOR_VOLTAGE; as3722_regs->desc[id].owner = THIS_MODULE; as3722_regs->desc[id].enable_reg = as3722_reg_lookup[id].enable_reg; as3722_regs->desc[id].enable_mask = as3722_reg_lookup[id].enable_mask; as3722_regs->desc[id].vsel_reg = as3722_reg_lookup[id].vsel_reg; as3722_regs->desc[id].vsel_mask = as3722_reg_lookup[id].vsel_mask; switch (id) { case AS3722_REGULATOR_ID_LDO0: if (reg_config->ext_control) ops = &as3722_ldo0_extcntrl_ops; else ops = &as3722_ldo0_ops; as3722_regs->desc[id].min_uV = 825000; as3722_regs->desc[id].uV_step = 25000; as3722_regs->desc[id].linear_min_sel = 1; as3722_regs->desc[id].enable_time = 500; break; case AS3722_REGULATOR_ID_LDO3: if (reg_config->ext_control) ops = &as3722_ldo3_extcntrl_ops; else ops = &as3722_ldo3_ops; as3722_regs->desc[id].min_uV = 620000; as3722_regs->desc[id].uV_step = 20000; as3722_regs->desc[id].linear_min_sel = 1; as3722_regs->desc[id].enable_time = 500; if (reg_config->enable_tracking) { ret = as3722_ldo3_set_tracking_mode(as3722_regs, id, AS3722_LDO3_MODE_PMOS_TRACKING); if (ret < 0) { dev_err(&pdev->dev, "LDO3 tracking failed: %d\n", ret); return ret; } } break; case AS3722_REGULATOR_ID_LDO6: if (reg_config->ext_control) ops = &as3722_ldo6_extcntrl_ops; else ops = &as3722_ldo6_ops; as3722_regs->desc[id].enable_time = 500; as3722_regs->desc[id].bypass_reg = AS3722_LDO6_VOLTAGE_REG; as3722_regs->desc[id].bypass_mask = AS3722_LDO_VSEL_MASK; as3722_regs->desc[id].bypass_val_on = AS3722_LDO6_VSEL_BYPASS; as3722_regs->desc[id].bypass_val_off = AS3722_LDO6_VSEL_BYPASS; as3722_regs->desc[id].linear_ranges = as3722_ldo_ranges; as3722_regs->desc[id].n_linear_ranges = ARRAY_SIZE(as3722_ldo_ranges); break; case AS3722_REGULATOR_ID_SD0: case AS3722_REGULATOR_ID_SD1: case AS3722_REGULATOR_ID_SD6: if (reg_config->ext_control) ops = &as3722_sd016_extcntrl_ops; else ops = &as3722_sd016_ops; if (id == AS3722_REGULATOR_ID_SD0 && as3722_sd0_is_low_voltage(as3722_regs)) { as3722_regs->desc[id].n_voltages = AS3722_SD0_VSEL_LOW_VOL_MAX + 1; as3722_regs->desc[id].min_uV = 410000; } else { as3722_regs->desc[id].n_voltages = AS3722_SD0_VSEL_MAX + 1, as3722_regs->desc[id].min_uV = 610000; } as3722_regs->desc[id].uV_step = 10000; as3722_regs->desc[id].linear_min_sel = 1; as3722_regs->desc[id].enable_time = 600; break; case AS3722_REGULATOR_ID_SD2: case AS3722_REGULATOR_ID_SD3: case AS3722_REGULATOR_ID_SD4: case AS3722_REGULATOR_ID_SD5: if (reg_config->ext_control) ops = &as3722_sd2345_extcntrl_ops; else ops = &as3722_sd2345_ops; as3722_regs->desc[id].linear_ranges = as3722_sd2345_ranges; as3722_regs->desc[id].n_linear_ranges = ARRAY_SIZE(as3722_sd2345_ranges); break; default: if (reg_config->ext_control) ops = &as3722_ldo_extcntrl_ops; else ops = &as3722_ldo_ops; as3722_regs->desc[id].enable_time = 500; as3722_regs->desc[id].linear_ranges = as3722_ldo_ranges; as3722_regs->desc[id].n_linear_ranges = ARRAY_SIZE(as3722_ldo_ranges); break; } as3722_regs->desc[id].ops = ops; config.init_data = reg_config->reg_init; config.of_node = as3722_regulator_matches[id].of_node; rdev = devm_regulator_register(&pdev->dev, &as3722_regs->desc[id], &config); if (IS_ERR(rdev)) { ret = PTR_ERR(rdev); dev_err(&pdev->dev, "regulator %d register failed %d\n", id, ret); return ret; } as3722_regs->rdevs[id] = rdev; if (reg_config->ext_control) { ret = regulator_enable_regmap(rdev); if (ret < 0) { dev_err(&pdev->dev, "Regulator %d enable failed: %d\n", id, ret); return ret; } ret = as3722_extreg_init(as3722_regs, id, reg_config->ext_control); if (ret < 0) { dev_err(&pdev->dev, "AS3722 ext control failed: %d", ret); return ret; } } } return 0; } static const struct of_device_id of_as3722_regulator_match[] = { { .compatible = "ams,as3722-regulator", }, {}, }; MODULE_DEVICE_TABLE(of, of_as3722_regulator_match); static struct platform_driver as3722_regulator_driver = { .driver = { .name = "as3722-regulator", .of_match_table = of_as3722_regulator_match, }, .probe = as3722_regulator_probe, }; module_platform_driver(as3722_regulator_driver); MODULE_ALIAS("platform:as3722-regulator"); MODULE_DESCRIPTION("AS3722 regulator driver"); MODULE_AUTHOR("Florian Lobmaier <[email protected]>"); MODULE_AUTHOR("Laxman Dewangan <[email protected]>"); MODULE_LICENSE("GPL");
null
null
null
null
102,710
2,898
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
167,893
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
#ifndef __NET_WIRELESS_REG_H #define __NET_WIRELESS_REG_H /* * Copyright 2008-2011 Luis R. Rodriguez <[email protected]> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ enum ieee80211_regd_source { REGD_SOURCE_INTERNAL_DB, REGD_SOURCE_CRDA, }; extern const struct ieee80211_regdomain __rcu *cfg80211_regdomain; bool reg_is_valid_request(const char *alpha2); bool is_world_regdom(const char *alpha2); bool reg_supported_dfs_region(enum nl80211_dfs_regions dfs_region); enum nl80211_dfs_regions reg_get_dfs_region(struct wiphy *wiphy); int regulatory_hint_user(const char *alpha2, enum nl80211_user_reg_hint_type user_reg_hint_type); /** * regulatory_hint_indoor - hint operation in indoor env. or not * @is_indoor: if true indicates that user space thinks that the * device is operating in an indoor environment. * @portid: the netlink port ID on which the hint was given. */ int regulatory_hint_indoor(bool is_indoor, u32 portid); /** * regulatory_netlink_notify - notify on released netlink socket * @portid: the netlink socket port ID */ void regulatory_netlink_notify(u32 portid); void wiphy_regulatory_register(struct wiphy *wiphy); void wiphy_regulatory_deregister(struct wiphy *wiphy); int __init regulatory_init(void); void regulatory_exit(void); int set_regdom(const struct ieee80211_regdomain *rd, enum ieee80211_regd_source regd_src); unsigned int reg_get_max_bandwidth(const struct ieee80211_regdomain *rd, const struct ieee80211_reg_rule *rule); bool reg_last_request_cell_base(void); const struct ieee80211_regdomain *get_wiphy_regdom(struct wiphy *wiphy); /** * regulatory_hint_found_beacon - hints a beacon was found on a channel * @wiphy: the wireless device where the beacon was found on * @beacon_chan: the channel on which the beacon was found on * @gfp: context flags * * This informs the wireless core that a beacon from an AP was found on * the channel provided. This allows the wireless core to make educated * guesses on regulatory to help with world roaming. This is only used for * world roaming -- when we do not know our current location. This is * only useful on channels 12, 13 and 14 on the 2 GHz band as channels * 1-11 are already enabled by the world regulatory domain; and on * non-radar 5 GHz channels. * * Drivers do not need to call this, cfg80211 will do it for after a scan * on a newly found BSS. If you cannot make use of this feature you can * set the wiphy->disable_beacon_hints to true. */ int regulatory_hint_found_beacon(struct wiphy *wiphy, struct ieee80211_channel *beacon_chan, gfp_t gfp); /** * regulatory_hint_country_ie - hints a country IE as a regulatory domain * @wiphy: the wireless device giving the hint (used only for reporting * conflicts) * @band: the band on which the country IE was received on. This determines * the band we'll process the country IE channel triplets for. * @country_ie: pointer to the country IE * @country_ie_len: length of the country IE * * We will intersect the rd with the what CRDA tells us should apply * for the alpha2 this country IE belongs to, this prevents APs from * sending us incorrect or outdated information against a country. * * The AP is expected to provide Country IE channel triplets for the * band it is on. It is technically possible for APs to send channel * country IE triplets even for channels outside of the band they are * in but for that they would have to use the regulatory extension * in combination with a triplet but this behaviour is currently * not observed. For this reason if a triplet is seen with channel * information for a band the BSS is not present in it will be ignored. */ void regulatory_hint_country_ie(struct wiphy *wiphy, enum nl80211_band band, const u8 *country_ie, u8 country_ie_len); /** * regulatory_hint_disconnect - informs all devices have been disconneted * * Regulotory rules can be enhanced further upon scanning and upon * connection to an AP. These rules become stale if we disconnect * and go to another country, whether or not we suspend and resume. * If we suspend, go to another country and resume we'll automatically * get disconnected shortly after resuming and things will be reset as well. * This routine is a helper to restore regulatory settings to how they were * prior to our first connect attempt. This includes ignoring country IE and * beacon regulatory hints. The ieee80211_regdom module parameter will always * be respected but if a user had set the regulatory domain that will take * precedence. * * Must be called from process context. */ void regulatory_hint_disconnect(void); /** * cfg80211_get_unii - get the U-NII band for the frequency * @freq: the frequency for which we want to get the UNII band. * Get a value specifying the U-NII band frequency belongs to. * U-NII bands are defined by the FCC in C.F.R 47 part 15. * * Returns -EINVAL if freq is invalid, 0 for UNII-1, 1 for UNII-2A, * 2 for UNII-2B, 3 for UNII-2C and 4 for UNII-3. */ int cfg80211_get_unii(int freq); /** * regulatory_indoor_allowed - is indoor operation allowed */ bool regulatory_indoor_allowed(void); #endif /* __NET_WIRELESS_REG_H */
null
null
null
null
76,241
4,819
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
4,819
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef IOS_CHROME_BROWSER_UI_OVERLAYS_OVERLAY_QUEUE_H_ #define IOS_CHROME_BROWSER_UI_OVERLAYS_OVERLAY_QUEUE_H_ #import <Foundation/Foundation.h> #include <vector> #include "base/observer_list.h" #import "ios/chrome/browser/ui/overlays/overlay_queue_observer.h" @class OverlayCoordinator; namespace web { class WebState; } // Class used to enqueue OverlayCoordinators. It communicates changes in the // queue to registered OverlayQueueObservers. class OverlayQueue { public: virtual ~OverlayQueue(); // Adds and removes OverlayQueueObservers. void AddObserver(OverlayQueueObserver* observer); void RemoveObserver(OverlayQueueObserver* observer); // Starts the next overlay in the queue. If GetWebState() returns non-null, // it is expected that its content area is visible before this is called. virtual void StartNextOverlay() = 0; // Tells the OverlayQueue that |overlay_coordinator| was stopped. virtual void OverlayWasStopped(OverlayCoordinator* overlay_coordinator); // Removes the currently displayed overlay and adds |overlay_coordinator| to // the front of the queue to be displayed immediately. virtual void ReplaceVisibleOverlay(OverlayCoordinator* overlay_coordinator); // Returns whether there are any queued overlays. bool HasQueuedOverlays() const; // Returns whether an overlay is curently started. bool IsShowingOverlay() const; // Cancels all queued overlays for this queue. If one is being displayed, it // will also be stopped void CancelOverlays(); // Some OverlayQueues require that a particular WebState's content area is // visible before its queued BrowserCoordinators can be started. If this // queue's overlays require showing a WebState, this function will return that // WebState. virtual web::WebState* GetWebState() const; protected: // Adds |overlay_coordinator| to the queue and schedules its presentation. void AddOverlay(OverlayCoordinator* overlay_coordinator); // Returns the number of overlays in the queue. NSUInteger GetCount() const; // Returns the first BrowserCoordinator in the queue. OverlayCoordinator* GetFirstOverlay(); // Called when the first overlay in the queue is started. void OverlayWasStarted(); // Default constructor. OverlayQueue(); private: // The observers for this queue. base::ObserverList<OverlayQueueObserver> observers_; // The queue of overlays that were added for this WebState. __strong NSMutableArray<OverlayCoordinator*>* overlays_; // Whether an overlay is currently started. If this is true, the first // BrowserCoordinator in |overlays_| has been started. bool showing_overlay_; DISALLOW_COPY_AND_ASSIGN(OverlayQueue); }; #endif // IOS_CHROME_BROWSER_UI_OVERLAYS_OVERLAY_QUEUE_H_
null
null
null
null
1,682
47,114
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
47,114
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CC_TEST_FAKE_PAINTED_SCROLLBAR_LAYER_H_ #define CC_TEST_FAKE_PAINTED_SCROLLBAR_LAYER_H_ #include <stddef.h> #include <memory> #include "cc/layers/painted_scrollbar_layer.h" #include "cc/test/fake_scrollbar.h" namespace base { template<typename T> class AutoReset; } namespace cc { class FakePaintedScrollbarLayer : public PaintedScrollbarLayer { public: static scoped_refptr<FakePaintedScrollbarLayer> Create( bool paint_during_update, bool has_thumb, ElementId scrolling_element_id = ElementId()); static scoped_refptr<FakePaintedScrollbarLayer> Create( bool paint_during_update, bool has_thumb, ScrollbarOrientation orientation, bool is_left_side_vertical_scrollbar, bool is_overlay, ElementId scrolling_element_id = ElementId()); int update_count() const { return update_count_; } void reset_update_count() { update_count_ = 0; } bool Update() override; void PushPropertiesTo(LayerImpl* layer) override; std::unique_ptr<base::AutoReset<bool>> IgnoreSetNeedsCommit(); size_t push_properties_count() const { return push_properties_count_; } void reset_push_properties_count() { push_properties_count_ = 0; } // For unit tests UIResourceId track_resource_id() { return PaintedScrollbarLayer::track_resource_id(); } UIResourceId thumb_resource_id() { return PaintedScrollbarLayer::thumb_resource_id(); } FakeScrollbar* fake_scrollbar() { return fake_scrollbar_; } using PaintedScrollbarLayer::UpdateInternalContentScale; using PaintedScrollbarLayer::UpdateThumbAndTrackGeometry; private: FakePaintedScrollbarLayer(FakeScrollbar* fake_scrollbar, ElementId scrolling_element_id); ~FakePaintedScrollbarLayer() override; int update_count_; size_t push_properties_count_; FakeScrollbar* fake_scrollbar_; }; } // namespace cc #endif // CC_TEST_FAKE_PAINTED_SCROLLBAR_LAYER_H_
null
null
null
null
43,977
9,749
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
9,749
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROMECAST_BASE_METRICS_MOCK_CAST_METRICS_HELPER_H_ #define CHROMECAST_BASE_METRICS_MOCK_CAST_METRICS_HELPER_H_ #include <string> #include "base/macros.h" #include "chromecast/base/metrics/cast_metrics_helper.h" #include "testing/gmock/include/gmock/gmock.h" namespace chromecast { namespace metrics { class MockCastMetricsHelper : public CastMetricsHelper { public: MockCastMetricsHelper(); ~MockCastMetricsHelper() override; MOCK_METHOD2(UpdateCurrentAppInfo, void(const std::string& app_id, const std::string& session_id)); MOCK_METHOD1(UpdateSDKInfo, void(const std::string& sdk_version)); MOCK_METHOD0(LogMediaPlay, void()); MOCK_METHOD0(LogMediaPause, void()); MOCK_METHOD1(RecordSimpleAction, void(const std::string& action)); MOCK_METHOD2(RecordEventWithValue, void(const std::string& action, int value)); MOCK_METHOD1(RecordApplicationEvent, void(const std::string& event)); MOCK_METHOD2(RecordApplicationEventWithValue, void(const std::string& event, int value)); MOCK_METHOD0(LogTimeToFirstPaint, void()); MOCK_METHOD0(LogTimeToFirstAudio, void()); MOCK_METHOD2(LogTimeToBufferAv, void(BufferingType buffering_type, base::TimeDelta time)); MOCK_CONST_METHOD2(GetMetricsNameWithAppName, std::string(const std::string& prefix, const std::string& suffix)); MOCK_METHOD1(SetMetricsSink, void(MetricsSink* delegate)); MOCK_METHOD1(SetRecordActionCallback, void(RecordActionCallback callback)); private: DISALLOW_COPY_AND_ASSIGN(MockCastMetricsHelper); }; } // namespace metrics } // namespace chromecast #endif // CHROMECAST_BASE_METRICS_MOCK_CAST_METRICS_HELPER_H_
null
null
null
null
6,612
14,073
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
179,068
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * bitops.c: atomic operations which got too long to be inlined all over * the place. * * Copyright 1999 Philipp Rumpf ([email protected]) * Copyright 2000 Grant Grundler ([email protected]) */ #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/atomic.h> #ifdef CONFIG_SMP arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = { [0 ... (ATOMIC_HASH_SIZE-1)] = __ARCH_SPIN_LOCK_UNLOCKED }; #endif #ifdef CONFIG_64BIT unsigned long __xchg64(unsigned long x, unsigned long *ptr) { unsigned long temp, flags; _atomic_spin_lock_irqsave(ptr, flags); temp = *ptr; *ptr = x; _atomic_spin_unlock_irqrestore(ptr, flags); return temp; } #endif unsigned long __xchg32(int x, int *ptr) { unsigned long flags; long temp; _atomic_spin_lock_irqsave(ptr, flags); temp = (long) *ptr; /* XXX - sign extension wanted? */ *ptr = x; _atomic_spin_unlock_irqrestore(ptr, flags); return (unsigned long)temp; } unsigned long __xchg8(char x, char *ptr) { unsigned long flags; long temp; _atomic_spin_lock_irqsave(ptr, flags); temp = (long) *ptr; /* XXX - sign extension wanted? */ *ptr = x; _atomic_spin_unlock_irqrestore(ptr, flags); return (unsigned long)temp; } u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new) { unsigned long flags; u64 prev; _atomic_spin_lock_irqsave(ptr, flags); if ((prev = *ptr) == old) *ptr = new; _atomic_spin_unlock_irqrestore(ptr, flags); return prev; } unsigned long __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsigned int new) { unsigned long flags; unsigned int prev; _atomic_spin_lock_irqsave(ptr, flags); if ((prev = *ptr) == old) *ptr = new; _atomic_spin_unlock_irqrestore(ptr, flags); return (unsigned long)prev; }
null
null
null
null
87,415
28,044
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
193,039
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Networking AIM - Networking Application Interface Module for MostCore * * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * This file is licensed under GPLv2. */ #ifndef _NETWORKING_H_ #define _NETWORKING_H_ #include "mostcore.h" void most_deliver_netinfo(struct most_interface *iface, unsigned char link_stat, unsigned char *mac_addr); #endif
null
null
null
null
101,386
46,908
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
46,908
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef GIN_PER_CONTEXT_DATA_H_ #define GIN_PER_CONTEXT_DATA_H_ #include "base/macros.h" #include "base/supports_user_data.h" #include "gin/gin_export.h" #include "v8/include/v8.h" namespace gin { class ContextHolder; class Runner; // There is one instance of PerContextData per v8::Context managed by Gin. This // class stores all the Gin-related data that varies per context. Arbitrary data // can be associated with this class by way of the SupportsUserData methods. // Instances of this class (and any associated user data) are destroyed before // the associated v8::Context. class GIN_EXPORT PerContextData : public base::SupportsUserData { public: PerContextData(ContextHolder* context_holder, v8::Local<v8::Context> context); ~PerContextData() override; // Can return NULL after the ContextHolder has detached from context. static PerContextData* From(v8::Local<v8::Context> context); // The Runner associated with this context. To execute script in this context, // please use the appropriate API on Runner. Runner* runner() const { return runner_; } void set_runner(Runner* runner) { runner_ = runner; } ContextHolder* context_holder() { return context_holder_; } private: ContextHolder* context_holder_; Runner* runner_; DISALLOW_COPY_AND_ASSIGN(PerContextData); }; } // namespace gin #endif // GIN_PER_CONTEXT_DATA_H_
null
null
null
null
43,771
29,861
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
194,856
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Copyright (c) 2008 Sascha Hauer <[email protected]>, Pengutronix * Copyright (c) 2009 Daniel Mack <[email protected]> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/usb/otg.h> #include <linux/usb/ulpi.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/usb/hcd.h> #include <linux/platform_data/usb-ehci-mxc.h> #include "ehci.h" #define DRIVER_DESC "Freescale On-Chip EHCI Host driver" static const char hcd_name[] = "ehci-mxc"; #define ULPI_VIEWPORT_OFFSET 0x170 struct ehci_mxc_priv { struct clk *usbclk, *ahbclk, *phyclk; }; static struct hc_driver __read_mostly ehci_mxc_hc_driver; static const struct ehci_driver_overrides ehci_mxc_overrides __initconst = { .extra_priv_size = sizeof(struct ehci_mxc_priv), }; static int ehci_mxc_drv_probe(struct platform_device *pdev) { struct mxc_usbh_platform_data *pdata = dev_get_platdata(&pdev->dev); struct usb_hcd *hcd; struct resource *res; int irq, ret; struct ehci_mxc_priv *priv; struct device *dev = &pdev->dev; struct ehci_hcd *ehci; if (!pdata) { dev_err(dev, "No platform data given, bailing out.\n"); return -EINVAL; } irq = platform_get_irq(pdev, 0); hcd = usb_create_hcd(&ehci_mxc_hc_driver, dev, dev_name(dev)); if (!hcd) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); hcd->regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(hcd->regs)) { ret = PTR_ERR(hcd->regs); goto err_alloc; } hcd->rsrc_start = res->start; hcd->rsrc_len = resource_size(res); hcd->has_tt = 1; ehci = hcd_to_ehci(hcd); priv = (struct ehci_mxc_priv *) ehci->priv; /* enable clocks */ priv->usbclk = devm_clk_get(&pdev->dev, "ipg"); if (IS_ERR(priv->usbclk)) { ret = PTR_ERR(priv->usbclk); goto err_alloc; } clk_prepare_enable(priv->usbclk); priv->ahbclk = devm_clk_get(&pdev->dev, "ahb"); if (IS_ERR(priv->ahbclk)) { ret = PTR_ERR(priv->ahbclk); goto err_clk_ahb; } clk_prepare_enable(priv->ahbclk); /* "dr" device has its own clock on i.MX51 */ priv->phyclk = devm_clk_get(&pdev->dev, "phy"); if (IS_ERR(priv->phyclk)) priv->phyclk = NULL; if (priv->phyclk) clk_prepare_enable(priv->phyclk); /* call platform specific init function */ if (pdata->init) { ret = pdata->init(pdev); if (ret) { dev_err(dev, "platform init failed\n"); goto err_init; } /* platforms need some time to settle changed IO settings */ mdelay(10); } /* EHCI registers start at offset 0x100 */ ehci->caps = hcd->regs + 0x100; ehci->regs = hcd->regs + 0x100 + HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase)); /* set up the PORTSCx register */ ehci_writel(ehci, pdata->portsc, &ehci->regs->port_status[0]); /* is this really needed? */ msleep(10); /* Initialize the transceiver */ if (pdata->otg) { pdata->otg->io_priv = hcd->regs + ULPI_VIEWPORT_OFFSET; ret = usb_phy_init(pdata->otg); if (ret) { dev_err(dev, "unable to init transceiver, probably missing\n"); ret = -ENODEV; goto err_add; } ret = otg_set_vbus(pdata->otg->otg, 1); if (ret) { dev_err(dev, "unable to enable vbus on transceiver\n"); goto err_add; } } platform_set_drvdata(pdev, hcd); ret = usb_add_hcd(hcd, irq, IRQF_SHARED); if (ret) goto err_add; device_wakeup_enable(hcd->self.controller); return 0; err_add: if (pdata && pdata->exit) pdata->exit(pdev); err_init: if (priv->phyclk) clk_disable_unprepare(priv->phyclk); clk_disable_unprepare(priv->ahbclk); err_clk_ahb: clk_disable_unprepare(priv->usbclk); err_alloc: usb_put_hcd(hcd); return ret; } static int ehci_mxc_drv_remove(struct platform_device *pdev) { struct mxc_usbh_platform_data *pdata = dev_get_platdata(&pdev->dev); struct usb_hcd *hcd = platform_get_drvdata(pdev); struct ehci_hcd *ehci = hcd_to_ehci(hcd); struct ehci_mxc_priv *priv = (struct ehci_mxc_priv *) ehci->priv; usb_remove_hcd(hcd); if (pdata && pdata->exit) pdata->exit(pdev); if (pdata && pdata->otg) usb_phy_shutdown(pdata->otg); clk_disable_unprepare(priv->usbclk); clk_disable_unprepare(priv->ahbclk); if (priv->phyclk) clk_disable_unprepare(priv->phyclk); usb_put_hcd(hcd); return 0; } MODULE_ALIAS("platform:mxc-ehci"); static struct platform_driver ehci_mxc_driver = { .probe = ehci_mxc_drv_probe, .remove = ehci_mxc_drv_remove, .shutdown = usb_hcd_platform_shutdown, .driver = { .name = "mxc-ehci", }, }; static int __init ehci_mxc_init(void) { if (usb_disabled()) return -ENODEV; pr_info("%s: " DRIVER_DESC "\n", hcd_name); ehci_init_driver(&ehci_mxc_hc_driver, &ehci_mxc_overrides); return platform_driver_register(&ehci_mxc_driver); } module_init(ehci_mxc_init); static void __exit ehci_mxc_cleanup(void) { platform_driver_unregister(&ehci_mxc_driver); } module_exit(ehci_mxc_cleanup); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_AUTHOR("Sascha Hauer"); MODULE_LICENSE("GPL");
null
null
null
null
103,203
50,647
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
50,647
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef UI_OZONE_PLATFORM_CAST_OZONE_PLATFORM_CAST_H #define UI_OZONE_PLATFORM_CAST_OZONE_PLATFORM_CAST_H namespace ui { class OzonePlatform; // Constructor hook for use in ozone_platform_list.cc OzonePlatform* CreateOzonePlatformCast(); } // namespace ui #endif // UI_OZONE_PLATFORM_CAST_OZONE_PLATFORM_CAST_H
null
null
null
null
47,510
44,769
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
44,769
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef PPAPI_PROXY_PLUGIN_DISPATCHER_H_ #define PPAPI_PROXY_PLUGIN_DISPATCHER_H_ #include <stdint.h> #include <set> #include <string> #include <unordered_map> #include "base/containers/hash_tables.h" #include "base/macros.h" #include "base/memory/ref_counted.h" #include "base/memory/weak_ptr.h" #include "base/process/process.h" #include "build/build_config.h" #include "ipc/ipc_sync_channel.h" #include "ppapi/c/pp_instance.h" #include "ppapi/c/pp_rect.h" #include "ppapi/c/ppb_console.h" #include "ppapi/proxy/dispatcher.h" #include "ppapi/proxy/message_handler.h" #include "ppapi/shared_impl/ppapi_preferences.h" #include "ppapi/shared_impl/ppb_view_shared.h" #include "ppapi/shared_impl/singleton_resource_id.h" #include "ppapi/shared_impl/tracked_callback.h" namespace IPC { class SyncMessageFilter; } namespace ppapi { struct Preferences; class Resource; namespace thunk { class PPB_Instance_API; class ResourceCreationAPI; } namespace proxy { // Used to keep track of per-instance data. struct PPAPI_PROXY_EXPORT InstanceData { InstanceData(); ~InstanceData(); ViewData view; // When non-NULL, indicates the callback to execute when mouse lock is lost. scoped_refptr<TrackedCallback> mouse_lock_callback; // A map of singleton resources which are lazily created. typedef std::map<SingletonResourceID, scoped_refptr<Resource>> SingletonResourceMap; SingletonResourceMap singleton_resources; // Calls to |RequestSurroundingText()| are done by posted tasks. Track whether // a) a task is pending, to avoid redundant calls, and b) whether we should // actually call |RequestSurroundingText()|, to avoid stale calls (i.e., // calling when we shouldn't). bool is_request_surrounding_text_pending; bool should_do_request_surrounding_text; // The message handler which should handle JavaScript->Plugin messages, if // one has been registered, otherwise NULL. std::unique_ptr<MessageHandler> message_handler; // Flush info for PpapiCommandBufferProxy::OrderingBarrier(). struct PPAPI_PROXY_EXPORT FlushInfo { FlushInfo(); ~FlushInfo(); bool flush_pending; HostResource resource; int32_t put_offset; }; FlushInfo flush_info; }; class PPAPI_PROXY_EXPORT LockedSender { public: // Unlike |Send()|, this function continues to hold the Pepper proxy lock // until we are finished sending |msg|, even if it is a synchronous message. virtual bool SendAndStayLocked(IPC::Message* msg) = 0; protected: virtual ~LockedSender() {} }; class PPAPI_PROXY_EXPORT PluginDispatcher : public Dispatcher, public LockedSender, public base::SupportsWeakPtr<PluginDispatcher> { public: class PPAPI_PROXY_EXPORT PluginDelegate : public ProxyChannel::Delegate { public: // Returns the set used for globally uniquifying PP_Instances. This same // set must be returned for all channels. // // DEREFERENCE ONLY ON THE I/O THREAD. virtual std::set<PP_Instance>* GetGloballySeenInstanceIDSet() = 0; // Registers the plugin dispatcher and returns an ID. // Plugin dispatcher IDs will be used to dispatch messages from the browser. // Each call to Register() has to be matched with a call to Unregister(). virtual uint32_t Register(PluginDispatcher* plugin_dispatcher) = 0; virtual void Unregister(uint32_t plugin_dispatcher_id) = 0; }; class Sender : public IPC::Sender, public base::RefCountedThreadSafe<PluginDispatcher::Sender> { public: Sender(base::WeakPtr<PluginDispatcher> plugin_dispatcher, scoped_refptr<IPC::SyncMessageFilter> sync_filter); ~Sender() override; bool SendMessage(IPC::Message* msg); // IPC::Sender bool Send(IPC::Message* msg) override; private: base::WeakPtr<PluginDispatcher> plugin_dispatcher_; scoped_refptr<IPC::SyncMessageFilter> sync_filter_; DISALLOW_COPY_AND_ASSIGN(Sender); }; // Constructor for the plugin side. The init and shutdown functions will be // will be automatically called when requested by the renderer side. The // module ID will be set upon receipt of the InitializeModule message. // // Note about permissions: On the plugin side, the dispatcher and the plugin // run in the same address space (including in nacl). This means that the // permissions here are subject to malicious modification and bypass, and // an exploited or malicious plugin could send any IPC messages and just // bypass the permissions. All permissions must be checked "for realz" in the // host process when receiving messages. We check them on the plugin side // primarily to keep honest plugins honest, especially with respect to // dev interfaces that they "shouldn't" be using. // // You must call InitPluginWithChannel after the constructor. PluginDispatcher(PP_GetInterface_Func get_interface, const PpapiPermissions& permissions, bool incognito); virtual ~PluginDispatcher(); // The plugin side maintains a mapping from PP_Instance to Dispatcher so // that we can send the messages to the right channel if there are multiple // renderers sharing the same plugin. This mapping is maintained by // DidCreateInstance/DidDestroyInstance. static PluginDispatcher* GetForInstance(PP_Instance instance); // Same as GetForInstance but retrieves the instance from the given resource // object as a convenience. Returns NULL on failure. static PluginDispatcher* GetForResource(const Resource* resource); // Implements the GetInterface function for the plugin to call to retrieve // a browser interface. static const void* GetBrowserInterface(const char* interface_name); // Logs the given log message to the given instance, or, if the instance is // invalid, to all instances associated with all dispatchers. Used for // global log messages. static void LogWithSource(PP_Instance instance, PP_LogLevel level, const std::string& source, const std::string& value); const void* GetPluginInterface(const std::string& interface_name); // You must call this function before anything else. Returns true on success. // The delegate pointer must outlive this class, ownership is not // transferred. bool InitPluginWithChannel(PluginDelegate* delegate, base::ProcessId peer_pid, const IPC::ChannelHandle& channel_handle, bool is_client); // Dispatcher overrides. bool IsPlugin() const override; // Send the message to the renderer. If |msg| is a synchronous message, we // will unlock the ProxyLock so that we can handle incoming messages from the // renderer. bool Send(IPC::Message* msg) override; // Unlike |Send()|, this function continues to hold the Pepper proxy lock // until we are finished sending |msg|, even if it is a synchronous message. bool SendAndStayLocked(IPC::Message* msg) override; // IPC::Listener implementation. bool OnMessageReceived(const IPC::Message& msg) override; void OnChannelError() override; // Keeps track of which dispatcher to use for each instance, active instances // and tracks associated data like the current size. void DidCreateInstance(PP_Instance instance); void DidDestroyInstance(PP_Instance instance); // Gets the data for an existing instance, or NULL if the instance id doesn't // correspond to a known instance. InstanceData* GetInstanceData(PP_Instance instance); // Returns the corresponding API. These are APIs not associated with a // resource. Guaranteed non-NULL. thunk::PPB_Instance_API* GetInstanceAPI(); thunk::ResourceCreationAPI* GetResourceCreationAPI(); // Returns the Preferences. const Preferences& preferences() const { return preferences_; } uint32_t plugin_dispatcher_id() const { return plugin_dispatcher_id_; } bool incognito() const { return incognito_; } scoped_refptr<Sender> sender() { return sender_; } private: friend class PluginDispatcherTest; // Notifies all live instances that they're now closed. This is used when // a renderer crashes or some other error is received. void ForceFreeAllInstances(); // IPC message handlers. void OnMsgSupportsInterface(const std::string& interface_name, bool* result); void OnMsgSetPreferences(const Preferences& prefs); PluginDelegate* plugin_delegate_; // Contains all the plugin interfaces we've queried. The mapped value will // be the pointer to the interface pointer supplied by the plugin if it's // supported, or NULL if it's not supported. This allows us to cache failures // and not req-query if a plugin doesn't support the interface. typedef base::hash_map<std::string, const void*> InterfaceMap; InterfaceMap plugin_interfaces_; typedef std::unordered_map<PP_Instance, std::unique_ptr<InstanceData>> InstanceDataMap; InstanceDataMap instance_map_; // The preferences sent from the host. We only want to set this once, which // is what the received_preferences_ indicates. See OnMsgSetPreferences. bool received_preferences_; Preferences preferences_; uint32_t plugin_dispatcher_id_; // Set to true when the instances associated with this dispatcher are // incognito mode. bool incognito_; scoped_refptr<Sender> sender_; DISALLOW_COPY_AND_ASSIGN(PluginDispatcher); }; } // namespace proxy } // namespace ppapi #endif // PPAPI_PROXY_PLUGIN_DISPATCHER_H_
null
null
null
null
41,632
38,384
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
38,384
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
/* * Copyright (C) 2010 Google Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "third_party/blink/renderer/platform/blob/blob_registry.h" #include <memory> #include "base/location.h" #include "base/memory/scoped_refptr.h" #include "third_party/blink/public/platform/platform.h" #include "third_party/blink/public/platform/web_blob_registry.h" #include "third_party/blink/public/platform/web_string.h" #include "third_party/blink/renderer/platform/blob/blob_data.h" #include "third_party/blink/renderer/platform/blob/blob_url.h" #include "third_party/blink/renderer/platform/cross_thread_functional.h" #include "third_party/blink/renderer/platform/weborigin/security_origin.h" #include "third_party/blink/renderer/platform/wtf/assertions.h" #include "third_party/blink/renderer/platform/wtf/text/string_hash.h" #include "third_party/blink/renderer/platform/wtf/text/wtf_string.h" #include "third_party/blink/renderer/platform/wtf/threading.h" namespace blink { static WebBlobRegistry* GetBlobRegistry() { return Platform::Current()->GetBlobRegistry(); } void BlobRegistry::RegisterPublicBlobURL(SecurityOrigin* origin, const KURL& url, scoped_refptr<BlobDataHandle> handle) { GetBlobRegistry()->RegisterPublicBlobURL(url, handle->Uuid()); } void BlobRegistry::RevokePublicBlobURL(const KURL& url) { GetBlobRegistry()->RevokePublicBlobURL(url); } } // namespace blink
null
null
null
null
35,247
47,673
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
47,673
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include <vector> #include "base/command_line.h" #include "base/threading/thread_task_runner_handle.h" #include "build/build_config.h" #include "cc/paint/image_transfer_cache_entry.h" #include "cc/paint/raw_memory_transfer_cache_entry.h" #include "cc/paint/transfer_cache_entry.h" #include "cc/test/test_in_process_context_provider.h" #include "gpu/command_buffer/client/client_transfer_cache.h" #include "gpu/command_buffer/client/gles2_cmd_helper.h" #include "gpu/command_buffer/client/gles2_implementation.h" #include "gpu/command_buffer/client/gles2_interface.h" #include "gpu/command_buffer/client/shared_memory_limits.h" #include "gpu/command_buffer/common/context_creation_attribs.h" #include "gpu/command_buffer/service/service_transfer_cache.h" #include "gpu/config/gpu_switches.h" #include "gpu/ipc/gl_in_process_context.h" #include "testing/gtest/include/gtest/gtest.h" #include "third_party/skia/include/core/SkImage.h" #include "ui/gl/gl_implementation.h" namespace cc { namespace { class TransferCacheTest : public testing::Test { public: TransferCacheTest() : testing::Test(), test_client_entry_(std::vector<uint8_t>(100)) {} void SetUp() override { bool is_offscreen = true; gpu::ContextCreationAttribs attribs; attribs.alpha_size = -1; attribs.depth_size = 24; attribs.stencil_size = 8; attribs.samples = 0; attribs.sample_buffers = 0; attribs.fail_if_major_perf_caveat = false; attribs.bind_generates_resource = false; // Enable OOP rasterization. attribs.enable_oop_rasterization = true; // Add an OOP rasterization command line flag so that we set // |chromium_raster_transport| features flag. // TODO(vmpstr): Is there a better way to do this? if (!base::CommandLine::ForCurrentProcess()->HasSwitch( switches::kEnableOOPRasterization)) { base::CommandLine::ForCurrentProcess()->AppendSwitch( switches::kEnableOOPRasterization); } context_ = gpu::GLInProcessContext::CreateWithoutInit(); auto result = context_->Initialize( nullptr, nullptr, is_offscreen, gpu::kNullSurfaceHandle, nullptr, attribs, gpu::SharedMemoryLimits(), &gpu_memory_buffer_manager_, &image_factory_, nullptr, base::ThreadTaskRunnerHandle::Get()); ASSERT_EQ(result, gpu::ContextResult::kSuccess); ASSERT_TRUE(context_->GetCapabilities().supports_oop_raster); } void TearDown() override { context_.reset(); } gpu::ServiceTransferCache* ServiceTransferCache() { return context_->GetTransferCacheForTest(); } gpu::gles2::GLES2Implementation* Gl() { return context_->GetImplementation(); } gpu::ContextSupport* ContextSupport() { return context_->GetImplementation(); } const ClientRawMemoryTransferCacheEntry& test_client_entry() const { return test_client_entry_; } void CreateEntry(const ClientTransferCacheEntry& entry) { auto* context_support = ContextSupport(); size_t size = entry.SerializedSize(); void* data = context_support->MapTransferCacheEntry(size); ASSERT_TRUE(data); entry.Serialize(base::make_span(static_cast<uint8_t*>(data), size)); context_support->UnmapAndCreateTransferCacheEntry(entry.UnsafeType(), entry.Id()); } private: viz::TestGpuMemoryBufferManager gpu_memory_buffer_manager_; TestImageFactory image_factory_; std::unique_ptr<gpu::GLInProcessContext> context_; gl::DisableNullDrawGLBindings enable_pixel_output_; ClientRawMemoryTransferCacheEntry test_client_entry_; }; TEST_F(TransferCacheTest, Basic) { auto* service_cache = ServiceTransferCache(); auto* gl = Gl(); auto* context_support = ContextSupport(); // Create an entry. const auto& entry = test_client_entry(); CreateEntry(entry); gl->Finish(); // Validate service-side state. EXPECT_NE(nullptr, service_cache->GetEntry(entry.Type(), entry.Id())); // Unlock on client side and flush to service. context_support->UnlockTransferCacheEntries( {{entry.UnsafeType(), entry.Id()}}); gl->Finish(); // Re-lock on client side and validate state. No need to flush as lock is // local. EXPECT_TRUE(context_support->ThreadsafeLockTransferCacheEntry( entry.UnsafeType(), entry.Id())); // Delete on client side, flush, and validate that deletion reaches service. context_support->DeleteTransferCacheEntry(entry.UnsafeType(), entry.Id()); gl->Finish(); EXPECT_EQ(nullptr, service_cache->GetEntry(entry.Type(), entry.Id())); } TEST_F(TransferCacheTest, Eviction) { auto* service_cache = ServiceTransferCache(); auto* gl = Gl(); auto* context_support = ContextSupport(); const auto& entry = test_client_entry(); // Create an entry. CreateEntry(entry); gl->Finish(); // Validate service-side state. EXPECT_NE(nullptr, service_cache->GetEntry(entry.Type(), entry.Id())); // Unlock on client side and flush to service. context_support->UnlockTransferCacheEntries( {{entry.UnsafeType(), entry.Id()}}); gl->Finish(); // Evict on the service side. service_cache->SetCacheSizeLimitForTesting(0); EXPECT_EQ(nullptr, service_cache->GetEntry(entry.Type(), entry.Id())); // Try to re-lock on the client side. This should fail. EXPECT_FALSE(context_support->ThreadsafeLockTransferCacheEntry( entry.UnsafeType(), entry.Id())); } TEST_F(TransferCacheTest, RawMemoryTransfer) { auto* service_cache = ServiceTransferCache(); auto* gl = Gl(); // Create an entry with some initialized data. std::vector<uint8_t> data; data.resize(100); for (size_t i = 0; i < data.size(); ++i) { data[i] = i; } // Add the entry to the transfer cache ClientRawMemoryTransferCacheEntry client_entry(data); CreateEntry(client_entry); gl->Finish(); // Validate service-side data matches. ServiceTransferCacheEntry* service_entry = service_cache->GetEntry(client_entry.Type(), client_entry.Id()); EXPECT_EQ(service_entry->Type(), client_entry.Type()); const std::vector<uint8_t> service_data = static_cast<ServiceRawMemoryTransferCacheEntry*>(service_entry)->data(); EXPECT_EQ(data, service_data); } TEST_F(TransferCacheTest, ImageMemoryTransfer) { // TODO(ericrk): This test doesn't work on Android. crbug.com/777628 #if defined(OS_ANDROID) return; #endif auto* service_cache = ServiceTransferCache(); auto* gl = Gl(); // Create a 10x10 image. SkImageInfo info = SkImageInfo::MakeN32Premul(10, 10); std::vector<uint8_t> data; data.resize(info.width() * info.height() * 4); for (size_t i = 0; i < data.size(); ++i) { data[i] = i; } SkPixmap pixmap(info, data.data(), info.minRowBytes()); // Add the entry to the transfer cache ClientImageTransferCacheEntry client_entry(&pixmap, nullptr); CreateEntry(client_entry); gl->Finish(); // Validate service-side data matches. ServiceTransferCacheEntry* service_entry = service_cache->GetEntry(client_entry.Type(), client_entry.Id()); EXPECT_EQ(service_entry->Type(), client_entry.Type()); sk_sp<SkImage> service_image = static_cast<ServiceImageTransferCacheEntry*>(service_entry)->image(); EXPECT_TRUE(service_image->isTextureBacked()); std::vector<uint8_t> service_data; service_data.resize(data.size()); service_image->readPixels(info, service_data.data(), info.minRowBytes(), 0, 0); EXPECT_EQ(data, service_data); } } // namespace } // namespace cc
null
null
null
null
44,536
21,298
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
21,298
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "content/renderer/media_recorder/audio_track_recorder.h" #include <stdint.h> #include <string> #include "base/bind.h" #include "base/bind_helpers.h" #include "base/macros.h" #include "base/run_loop.h" #include "base/stl_util.h" #include "base/test/scoped_task_environment.h" #include "base/time/time.h" #include "content/renderer/media/stream/media_stream_audio_source.h" #include "media/audio/simple_sources.h" #include "media/base/audio_sample_types.h" #include "testing/gmock/include/gmock/gmock.h" #include "testing/gtest/include/gtest/gtest.h" #include "third_party/blink/public/platform/web_string.h" #include "third_party/blink/public/web/web_heap.h" #include "third_party/opus/src/include/opus.h" using ::testing::_; using ::testing::DoAll; using ::testing::InSequence; using ::testing::Mock; using ::testing::Return; using ::testing::SaveArg; using ::testing::TestWithParam; using ::testing::ValuesIn; using base::TimeTicks; namespace { const int kDefaultBitsPerSample = 16; const int kDefaultSampleRate = 48000; // The |frames_per_buffer| field of AudioParameters is not used by ATR. const int kIgnoreFramesPerBuffer = 1; // The following parameters replicate those in audio_track_recorder.cc, see this // file for explanations. const int kMediaStreamAudioTrackBufferDurationMs = 10; const int kOpusBufferDurationMs = 60; const int kRatioInputToOutputFrames = kOpusBufferDurationMs / kMediaStreamAudioTrackBufferDurationMs; const int kFramesPerBuffer = kOpusBufferDurationMs * kDefaultSampleRate / 1000; } // namespace namespace content { ACTION_P(RunClosure, closure) { closure.Run(); } struct ATRTestParams { const media::AudioParameters::Format input_format; const media::ChannelLayout channel_layout; const int sample_rate; const int bits_per_sample; const AudioTrackRecorder::CodecId codec; }; const ATRTestParams kATRTestParams[] = { // Equivalent to default settings: {media::AudioParameters::AUDIO_PCM_LOW_LATENCY, /* input format */ media::CHANNEL_LAYOUT_STEREO, /* channel layout */ kDefaultSampleRate, /* sample rate */ kDefaultBitsPerSample, /* bits per sample */ AudioTrackRecorder::CodecId::OPUS}, /* codec for encoding */ // Change to mono: {media::AudioParameters::AUDIO_PCM_LOW_LATENCY, media::CHANNEL_LAYOUT_MONO, kDefaultSampleRate, kDefaultBitsPerSample, AudioTrackRecorder::CodecId::OPUS}, // Different sampling rate as well: {media::AudioParameters::AUDIO_PCM_LOW_LATENCY, media::CHANNEL_LAYOUT_MONO, 24000, kDefaultBitsPerSample, AudioTrackRecorder::CodecId::OPUS}, {media::AudioParameters::AUDIO_PCM_LOW_LATENCY, media::CHANNEL_LAYOUT_STEREO, 8000, kDefaultBitsPerSample, AudioTrackRecorder::CodecId::OPUS}, // Using a non-default Opus sampling rate (48, 24, 16, 12, or 8 kHz). {media::AudioParameters::AUDIO_PCM_LOW_LATENCY, media::CHANNEL_LAYOUT_MONO, 22050, kDefaultBitsPerSample, AudioTrackRecorder::CodecId::OPUS}, {media::AudioParameters::AUDIO_PCM_LOW_LATENCY, media::CHANNEL_LAYOUT_STEREO, 44100, kDefaultBitsPerSample, AudioTrackRecorder::CodecId::OPUS}, {media::AudioParameters::AUDIO_PCM_LOW_LATENCY, media::CHANNEL_LAYOUT_STEREO, 96000, kDefaultBitsPerSample, AudioTrackRecorder::CodecId::OPUS}, {media::AudioParameters::AUDIO_PCM_LOW_LATENCY, media::CHANNEL_LAYOUT_MONO, kDefaultSampleRate, kDefaultBitsPerSample, AudioTrackRecorder::CodecId::PCM}, {media::AudioParameters::AUDIO_PCM_LOW_LATENCY, media::CHANNEL_LAYOUT_STEREO, kDefaultSampleRate, kDefaultBitsPerSample, AudioTrackRecorder::CodecId::PCM}, }; class AudioTrackRecorderTest : public TestWithParam<ATRTestParams> { public: // Initialize |first_params_| based on test parameters, and |second_params_| // to always be the same thing. AudioTrackRecorderTest() : codec_(GetParam().codec), first_params_(GetParam().input_format, GetParam().channel_layout, GetParam().sample_rate, GetParam().bits_per_sample, kIgnoreFramesPerBuffer), second_params_(media::AudioParameters::AUDIO_PCM_LOW_LATENCY, media::CHANNEL_LAYOUT_STEREO, kDefaultSampleRate, kDefaultBitsPerSample, kIgnoreFramesPerBuffer), first_source_(first_params_.channels(), /* # channels */ 440, /* frequency */ first_params_.sample_rate()), /* sample rate */ second_source_(second_params_.channels(), 440, second_params_.sample_rate()), opus_decoder_(nullptr), first_source_cache_pos_(0) { ResetDecoder(first_params_); PrepareBlinkTrack(); audio_track_recorder_.reset(new AudioTrackRecorder( codec_, blink_track_, base::Bind(&AudioTrackRecorderTest::OnEncodedAudio, base::Unretained(this)), 0 /* bits_per_second */)); } ~AudioTrackRecorderTest() { opus_decoder_destroy(opus_decoder_); opus_decoder_ = nullptr; blink_track_.Reset(); blink::WebHeap::CollectAllGarbageForTesting(); audio_track_recorder_.reset(); // Let the message loop run to finish destroying the recorder properly. base::RunLoop().RunUntilIdle(); } void ResetDecoder(const media::AudioParameters& params) { if (opus_decoder_) { opus_decoder_destroy(opus_decoder_); opus_decoder_ = nullptr; } int error; opus_decoder_ = opus_decoder_create(kDefaultSampleRate, params.channels(), &error); EXPECT_TRUE(error == OPUS_OK && opus_decoder_); opus_buffer_.reset(new float[kFramesPerBuffer * params.channels()]); } std::unique_ptr<media::AudioBus> GetFirstSourceAudioBus() { std::unique_ptr<media::AudioBus> bus(media::AudioBus::Create( first_params_.channels(), first_params_.sample_rate() * kMediaStreamAudioTrackBufferDurationMs / base::Time::kMillisecondsPerSecond)); first_source_.OnMoreData(base::TimeDelta(), base::TimeTicks::Now(), 0, bus.get()); // Save the samples that we read into the first_source_cache_. std::unique_ptr<media::AudioBus> cache_bus( media::AudioBus::Create(bus->channels(), bus->frames())); bus->CopyTo(cache_bus.get()); int current_size = first_source_cache_.size(); first_source_cache_.resize(current_size + cache_bus->frames() * cache_bus->channels()); cache_bus->ToInterleaved<media::Float32SampleTypeTraits>( cache_bus->frames(), &first_source_cache_[current_size]); return bus; } std::unique_ptr<media::AudioBus> GetSecondSourceAudioBus() { std::unique_ptr<media::AudioBus> bus(media::AudioBus::Create( second_params_.channels(), second_params_.sample_rate() * kMediaStreamAudioTrackBufferDurationMs / base::Time::kMillisecondsPerSecond)); second_source_.OnMoreData(base::TimeDelta(), base::TimeTicks::Now(), 0, bus.get()); return bus; } MOCK_METHOD3(DoOnEncodedAudio, void(const media::AudioParameters& params, std::string encoded_data, TimeTicks timestamp)); void OnEncodedAudio(const media::AudioParameters& params, std::unique_ptr<std::string> encoded_data, TimeTicks timestamp) { EXPECT_TRUE(!encoded_data->empty()); if (codec_ == AudioTrackRecorder::CodecId::OPUS) { // Decode |encoded_data| and check we get the expected number of frames // per buffer. EXPECT_EQ( kDefaultSampleRate * kOpusBufferDurationMs / 1000, opus_decode_float(opus_decoder_, reinterpret_cast<uint8_t*>( base::string_as_array(encoded_data.get())), encoded_data->size(), opus_buffer_.get(), kFramesPerBuffer, 0)); } else if (codec_ == AudioTrackRecorder::CodecId::PCM) { // Manually confirm that we're getting the same data out as what we // generated from the sine wave. for (size_t b = 0; b + 3 < encoded_data->size() && first_source_cache_pos_ < first_source_cache_.size(); b += sizeof(first_source_cache_[0]), ++first_source_cache_pos_) { float sample; memcpy(&sample, &(*encoded_data)[b], 4); ASSERT_FLOAT_EQ(sample, first_source_cache_[first_source_cache_pos_]) << "(Sample " << first_source_cache_pos_ << ")"; } } DoOnEncodedAudio(params, *encoded_data, timestamp); } const base::test::ScopedTaskEnvironment scoped_task_environment_; // ATR and WebMediaStreamTrack for fooling it. std::unique_ptr<AudioTrackRecorder> audio_track_recorder_; blink::WebMediaStreamTrack blink_track_; // The codec we'll use for compression the audio. const AudioTrackRecorder::CodecId codec_; // Two different sets of AudioParameters for testing re-init of ATR. const media::AudioParameters first_params_; const media::AudioParameters second_params_; // AudioSources for creating AudioBuses. media::SineWaveAudioSource first_source_; media::SineWaveAudioSource second_source_; // Decoder for verifying data was properly encoded. OpusDecoder* opus_decoder_; std::unique_ptr<float[]> opus_buffer_; // Save the data we generate from the first source so that we might compare it // later if we happen to be using the PCM encoder. std::vector<float> first_source_cache_; size_t first_source_cache_pos_; private: // Prepares a blink track of a given MediaStreamType and attaches the native // track, which can be used to capture audio data and pass it to the producer. // Adapted from media::WebRTCLocalAudioSourceProviderTest. void PrepareBlinkTrack() { blink::WebMediaStreamSource audio_source; audio_source.Initialize(blink::WebString::FromUTF8("dummy_source_id"), blink::WebMediaStreamSource::kTypeAudio, blink::WebString::FromUTF8("dummy_source_name"), false /* remote */); audio_source.SetExtraData(new MediaStreamAudioSource(true)); blink_track_.Initialize(blink::WebString::FromUTF8("audio_track"), audio_source); CHECK(MediaStreamAudioSource::From(audio_source) ->ConnectToTrack(blink_track_)); } DISALLOW_COPY_AND_ASSIGN(AudioTrackRecorderTest); }; TEST_P(AudioTrackRecorderTest, OnDataOpus) { if (codec_ != AudioTrackRecorder::CodecId::OPUS) return; InSequence s; base::RunLoop run_loop; base::Closure quit_closure = run_loop.QuitClosure(); // Give ATR initial audio parameters. audio_track_recorder_->OnSetFormat(first_params_); // TODO(ajose): consider adding WillOnce(SaveArg...) and inspecting, as done // in VTR unittests. http://crbug.com/548856 EXPECT_CALL(*this, DoOnEncodedAudio(_, _, _)).Times(1); audio_track_recorder_->OnData(*GetFirstSourceAudioBus(), TimeTicks::Now()); for (int i = 0; i < kRatioInputToOutputFrames - 1; ++i) audio_track_recorder_->OnData(*GetFirstSourceAudioBus(), TimeTicks::Now()); EXPECT_CALL(*this, DoOnEncodedAudio(_, _, _)) .Times(1) // Only reset the decoder once we've heard back: .WillOnce(RunClosure(base::Bind(&AudioTrackRecorderTest::ResetDecoder, base::Unretained(this), second_params_))); audio_track_recorder_->OnData(*GetFirstSourceAudioBus(), TimeTicks::Now()); for (int i = 0; i < kRatioInputToOutputFrames - 1; ++i) audio_track_recorder_->OnData(*GetFirstSourceAudioBus(), TimeTicks::Now()); // If the amount of samples/10ms buffer is not an integer (e.g. 22050Hz) we // need an extra OnData() to account for the round-off error. if (GetParam().sample_rate % 100) audio_track_recorder_->OnData(*GetFirstSourceAudioBus(), TimeTicks::Now()); // Give ATR new audio parameters. audio_track_recorder_->OnSetFormat(second_params_); // Send audio with different params. EXPECT_CALL(*this, DoOnEncodedAudio(_, _, _)) .Times(1) .WillOnce(RunClosure(std::move(quit_closure))); audio_track_recorder_->OnData(*GetSecondSourceAudioBus(), TimeTicks::Now()); for (int i = 0; i < kRatioInputToOutputFrames - 1; ++i) audio_track_recorder_->OnData(*GetSecondSourceAudioBus(), TimeTicks::Now()); run_loop.Run(); Mock::VerifyAndClearExpectations(this); } TEST_P(AudioTrackRecorderTest, OnDataPcm) { if (codec_ != AudioTrackRecorder::CodecId::PCM) return; InSequence s; base::RunLoop run_loop; base::Closure quit_closure = run_loop.QuitClosure(); audio_track_recorder_->OnSetFormat(first_params_); EXPECT_CALL(*this, DoOnEncodedAudio(_, _, _)).Times(5); EXPECT_CALL(*this, DoOnEncodedAudio(_, _, _)) .WillOnce(RunClosure(std::move(quit_closure))); audio_track_recorder_->OnData(*GetFirstSourceAudioBus(), TimeTicks::Now()); for (int i = 0; i < kRatioInputToOutputFrames - 1; ++i) audio_track_recorder_->OnData(*GetFirstSourceAudioBus(), TimeTicks::Now()); run_loop.Run(); Mock::VerifyAndClearExpectations(this); } TEST_P(AudioTrackRecorderTest, PauseResume) { if (codec_ != AudioTrackRecorder::CodecId::OPUS) return; InSequence s; base::RunLoop run_loop; base::Closure quit_closure = run_loop.QuitClosure(); // Give ATR initial audio parameters. audio_track_recorder_->OnSetFormat(first_params_); audio_track_recorder_->Pause(); EXPECT_CALL(*this, DoOnEncodedAudio(_, _, _)).Times(0); audio_track_recorder_->OnData(*GetFirstSourceAudioBus(), TimeTicks::Now()); for (int i = 0; i < kRatioInputToOutputFrames - 1; ++i) audio_track_recorder_->OnData(*GetFirstSourceAudioBus(), TimeTicks::Now()); audio_track_recorder_->Resume(); EXPECT_CALL(*this, DoOnEncodedAudio(_, _, _)) .Times(1) .WillOnce(RunClosure(std::move(quit_closure))); audio_track_recorder_->OnData(*GetFirstSourceAudioBus(), TimeTicks::Now()); for (int i = 0; i < kRatioInputToOutputFrames - 1; ++i) audio_track_recorder_->OnData(*GetFirstSourceAudioBus(), TimeTicks::Now()); if (GetParam().sample_rate % 100) audio_track_recorder_->OnData(*GetFirstSourceAudioBus(), TimeTicks::Now()); run_loop.Run(); Mock::VerifyAndClearExpectations(this); } INSTANTIATE_TEST_CASE_P(, AudioTrackRecorderTest, ValuesIn(kATRTestParams)); } // namespace content
null
null
null
null
18,161
23,896
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
23,896
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CONTENT_BROWSER_MEMORY_BROWSER_MEMORY_MONITOR_H_ #define CONTENT_BROWSER_MEMORY_BROWSER_MEMORY_MONITOR_H_ #include <memory> #include "base/macros.h" #include "base/memory/singleton.h" #include "content/common/content_export.h" namespace base { struct SystemMemoryInfoKB; } namespace content { // A simple class that monitors the amount of free memory available on a system. // This is an interface to facilitate dependency injection for testing. class CONTENT_EXPORT MemoryMonitor { public: MemoryMonitor() {} virtual ~MemoryMonitor() {} // Returns the amount of free memory available on the system until the system // will be in a critical state. Critical is as defined by the OS (swapping // will occur, or physical memory will run out, etc). It is possible for this // to return negative values, in which case that much memory would have to be // freed in order to exit a critical memory state. virtual int GetFreeMemoryUntilCriticalMB() = 0; private: DISALLOW_COPY_AND_ASSIGN(MemoryMonitor); }; // Factory function for creating a monitor for the current platform. CONTENT_EXPORT std::unique_ptr<MemoryMonitor> CreateMemoryMonitor(); // A class for fetching system information used by a memory monitor. This can // be subclassed for testing or if a particular MemoryMonitor implementation // needs additional functionality. class CONTENT_EXPORT MemoryMonitorDelegate { public: static MemoryMonitorDelegate* GetInstance(); MemoryMonitorDelegate() {} virtual ~MemoryMonitorDelegate(); // Returns system memory information. virtual void GetSystemMemoryInfo(base::SystemMemoryInfoKB* mem_info); private: friend struct base::DefaultSingletonTraits<MemoryMonitorDelegate>; DISALLOW_COPY_AND_ASSIGN(MemoryMonitorDelegate); }; } // namespace content #endif // CONTENT_BROWSER_MEMORY_BROWSER_MEMORY_MONITOR_H_
null
null
null
null
20,759
7,101
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
7,101
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "net/quic/core/quic_server_session_base.h" #include <cstdint> #include <memory> #include "base/macros.h" #include "net/quic/core/crypto/quic_crypto_server_config.h" #include "net/quic/core/crypto/quic_random.h" #include "net/quic/core/proto/cached_network_parameters.pb.h" #include "net/quic/core/quic_connection.h" #include "net/quic/core/quic_crypto_server_stream.h" #include "net/quic/core/quic_utils.h" #include "net/quic/core/tls_server_handshaker.h" #include "net/quic/platform/api/quic_flags.h" #include "net/quic/platform/api/quic_ptr_util.h" #include "net/quic/platform/api/quic_socket_address.h" #include "net/quic/platform/api/quic_string.h" #include "net/quic/platform/api/quic_test.h" #include "net/quic/test_tools/crypto_test_utils.h" #include "net/quic/test_tools/fake_proof_source.h" #include "net/quic/test_tools/quic_config_peer.h" #include "net/quic/test_tools/quic_connection_peer.h" #include "net/quic/test_tools/quic_crypto_server_config_peer.h" #include "net/quic/test_tools/quic_sent_packet_manager_peer.h" #include "net/quic/test_tools/quic_session_peer.h" #include "net/quic/test_tools/quic_spdy_session_peer.h" #include "net/quic/test_tools/quic_spdy_stream_peer.h" #include "net/quic/test_tools/quic_stream_peer.h" #include "net/quic/test_tools/quic_sustained_bandwidth_recorder_peer.h" #include "net/quic/test_tools/quic_test_utils.h" #include "net/test/gtest_util.h" #include "net/tools/quic/quic_epoll_connection_helper.h" #include "net/tools/quic/quic_simple_server_stream.h" #include "net/tools/quic/test_tools/mock_quic_session_visitor.h" using testing::_; using testing::StrictMock; namespace net { namespace test { class QuicServerSessionBasePeer { public: static QuicStream* GetOrCreateDynamicStream(QuicServerSessionBase* s, QuicStreamId id) { return s->GetOrCreateDynamicStream(id); } static void SetCryptoStream(QuicServerSessionBase* s, QuicCryptoServerStream* crypto_stream) { s->crypto_stream_.reset(crypto_stream); s->static_streams()[kCryptoStreamId] = crypto_stream; } static bool IsBandwidthResumptionEnabled(QuicServerSessionBase* s) { return s->bandwidth_resumption_enabled_; } }; namespace { class TestServerSession : public QuicServerSessionBase { public: TestServerSession(const QuicConfig& config, QuicConnection* connection, QuicSession::Visitor* visitor, QuicCryptoServerStream::Helper* helper, const QuicCryptoServerConfig* crypto_config, QuicCompressedCertsCache* compressed_certs_cache, QuicHttpResponseCache* response_cache) : QuicServerSessionBase(config, connection, visitor, helper, crypto_config, compressed_certs_cache), response_cache_(response_cache) {} ~TestServerSession() override { delete connection(); }; protected: QuicSpdyStream* CreateIncomingDynamicStream(QuicStreamId id) override { if (!ShouldCreateIncomingDynamicStream(id)) { return nullptr; } QuicSpdyStream* stream = new QuicSimpleServerStream(id, this, response_cache_); ActivateStream(QuicWrapUnique(stream)); return stream; } QuicSpdyStream* CreateOutgoingDynamicStream() override { if (!ShouldCreateOutgoingDynamicStream()) { return nullptr; } QuicSpdyStream* stream = new QuicSimpleServerStream( GetNextOutgoingStreamId(), this, response_cache_); ActivateStream(QuicWrapUnique(stream)); return stream; } QuicCryptoServerStreamBase* CreateQuicCryptoServerStream( const QuicCryptoServerConfig* crypto_config, QuicCompressedCertsCache* compressed_certs_cache) override { return new QuicCryptoServerStream( crypto_config, compressed_certs_cache, GetQuicReloadableFlag(enable_quic_stateless_reject_support), this, stream_helper()); } private: QuicHttpResponseCache* response_cache_; // Owned by QuicServerSessionBaseTest }; const size_t kMaxStreamsForTest = 10; class QuicServerSessionBaseTest : public QuicTestWithParam<ParsedQuicVersion> { protected: QuicServerSessionBaseTest() : QuicServerSessionBaseTest(crypto_test_utils::ProofSourceForTesting()) {} explicit QuicServerSessionBaseTest(std::unique_ptr<ProofSource> proof_source) : crypto_config_(QuicCryptoServerConfig::TESTING, QuicRandom::GetInstance(), std::move(proof_source), TlsServerHandshaker::CreateSslCtx()), compressed_certs_cache_( QuicCompressedCertsCache::kQuicCompressedCertsCacheSize) { config_.SetMaxStreamsPerConnection(kMaxStreamsForTest, kMaxStreamsForTest); config_.SetMaxIncomingDynamicStreamsToSend(kMaxStreamsForTest); QuicConfigPeer::SetReceivedMaxIncomingDynamicStreams(&config_, kMaxStreamsForTest); config_.SetInitialStreamFlowControlWindowToSend( kInitialStreamFlowControlWindowForTest); config_.SetInitialSessionFlowControlWindowToSend( kInitialSessionFlowControlWindowForTest); ParsedQuicVersionVector supported_versions = SupportedVersions(GetParam()); connection_ = new StrictMock<MockQuicConnection>( &helper_, &alarm_factory_, Perspective::IS_SERVER, supported_versions); session_ = QuicMakeUnique<TestServerSession>( config_, connection_, &owner_, &stream_helper_, &crypto_config_, &compressed_certs_cache_, &response_cache_); MockClock clock; handshake_message_.reset(crypto_config_.AddDefaultConfig( QuicRandom::GetInstance(), &clock, QuicCryptoServerConfig::ConfigOptions())); session_->Initialize(); QuicSessionPeer::GetMutableCryptoStream(session_.get()) ->OnSuccessfulVersionNegotiation(supported_versions.front()); visitor_ = QuicConnectionPeer::GetVisitor(connection_); } QuicStreamId GetNthClientInitiatedId(int n) { return QuicSpdySessionPeer::GetNthClientInitiatedStreamId(*session_, n); } QuicStreamId GetNthServerInitiatedId(int n) { return QuicSpdySessionPeer::GetNthServerInitiatedStreamId(*session_, n); } StrictMock<MockQuicSessionVisitor> owner_; StrictMock<MockQuicCryptoServerStreamHelper> stream_helper_; MockQuicConnectionHelper helper_; MockAlarmFactory alarm_factory_; StrictMock<MockQuicConnection>* connection_; QuicConfig config_; QuicCryptoServerConfig crypto_config_; QuicCompressedCertsCache compressed_certs_cache_; QuicHttpResponseCache response_cache_; std::unique_ptr<TestServerSession> session_; std::unique_ptr<CryptoHandshakeMessage> handshake_message_; QuicConnectionVisitorInterface* visitor_; }; // Compares CachedNetworkParameters. MATCHER_P(EqualsProto, network_params, "") { CachedNetworkParameters reference(network_params); return (arg->bandwidth_estimate_bytes_per_second() == reference.bandwidth_estimate_bytes_per_second() && arg->bandwidth_estimate_bytes_per_second() == reference.bandwidth_estimate_bytes_per_second() && arg->max_bandwidth_estimate_bytes_per_second() == reference.max_bandwidth_estimate_bytes_per_second() && arg->max_bandwidth_timestamp_seconds() == reference.max_bandwidth_timestamp_seconds() && arg->min_rtt_ms() == reference.min_rtt_ms() && arg->previous_connection_state() == reference.previous_connection_state()); } INSTANTIATE_TEST_CASE_P(Tests, QuicServerSessionBaseTest, ::testing::ValuesIn(AllSupportedVersions())); TEST_P(QuicServerSessionBaseTest, CloseStreamDueToReset) { // Open a stream, then reset it. // Send two bytes of payload to open it. QuicStreamFrame data1(GetNthClientInitiatedId(0), false, 0, QuicStringPiece("HT")); session_->OnStreamFrame(data1); EXPECT_EQ(1u, session_->GetNumOpenIncomingStreams()); // Send a reset (and expect the peer to send a RST in response). QuicRstStreamFrame rst1(kInvalidControlFrameId, GetNthClientInitiatedId(0), QUIC_ERROR_PROCESSING_STREAM, 0); EXPECT_CALL(owner_, OnRstStreamReceived(_)).Times(1); EXPECT_CALL(*connection_, SendControlFrame(_)); EXPECT_CALL(*connection_, OnStreamReset(GetNthClientInitiatedId(0), QUIC_RST_ACKNOWLEDGEMENT)); visitor_->OnRstStream(rst1); EXPECT_EQ(0u, session_->GetNumOpenIncomingStreams()); // Send the same two bytes of payload in a new packet. visitor_->OnStreamFrame(data1); // The stream should not be re-opened. EXPECT_EQ(0u, session_->GetNumOpenIncomingStreams()); EXPECT_TRUE(connection_->connected()); } TEST_P(QuicServerSessionBaseTest, NeverOpenStreamDueToReset) { // Send a reset (and expect the peer to send a RST in response). QuicRstStreamFrame rst1(kInvalidControlFrameId, GetNthClientInitiatedId(0), QUIC_ERROR_PROCESSING_STREAM, 0); EXPECT_CALL(owner_, OnRstStreamReceived(_)).Times(1); EXPECT_CALL(*connection_, SendControlFrame(_)); EXPECT_CALL(*connection_, OnStreamReset(GetNthClientInitiatedId(0), QUIC_RST_ACKNOWLEDGEMENT)); visitor_->OnRstStream(rst1); EXPECT_EQ(0u, session_->GetNumOpenIncomingStreams()); // Send two bytes of payload. QuicStreamFrame data1(GetNthClientInitiatedId(0), false, 0, QuicStringPiece("HT")); visitor_->OnStreamFrame(data1); // The stream should never be opened, now that the reset is received. EXPECT_EQ(0u, session_->GetNumOpenIncomingStreams()); EXPECT_TRUE(connection_->connected()); } TEST_P(QuicServerSessionBaseTest, AcceptClosedStream) { // Send (empty) compressed headers followed by two bytes of data. QuicStreamFrame frame1(GetNthClientInitiatedId(0), false, 0, QuicStringPiece("\1\0\0\0\0\0\0\0HT")); QuicStreamFrame frame2(GetNthClientInitiatedId(1), false, 0, QuicStringPiece("\2\0\0\0\0\0\0\0HT")); visitor_->OnStreamFrame(frame1); visitor_->OnStreamFrame(frame2); EXPECT_EQ(2u, session_->GetNumOpenIncomingStreams()); // Send a reset (and expect the peer to send a RST in response). QuicRstStreamFrame rst(kInvalidControlFrameId, GetNthClientInitiatedId(0), QUIC_ERROR_PROCESSING_STREAM, 0); EXPECT_CALL(owner_, OnRstStreamReceived(_)).Times(1); EXPECT_CALL(*connection_, SendControlFrame(_)); EXPECT_CALL(*connection_, OnStreamReset(GetNthClientInitiatedId(0), QUIC_RST_ACKNOWLEDGEMENT)); visitor_->OnRstStream(rst); // If we were tracking, we'd probably want to reject this because it's data // past the reset point of stream 3. As it's a closed stream we just drop the // data on the floor, but accept the packet because it has data for stream 5. QuicStreamFrame frame3(GetNthClientInitiatedId(0), false, 2, QuicStringPiece("TP")); QuicStreamFrame frame4(GetNthClientInitiatedId(1), false, 2, QuicStringPiece("TP")); visitor_->OnStreamFrame(frame3); visitor_->OnStreamFrame(frame4); // The stream should never be opened, now that the reset is received. EXPECT_EQ(1u, session_->GetNumOpenIncomingStreams()); EXPECT_TRUE(connection_->connected()); } TEST_P(QuicServerSessionBaseTest, MaxOpenStreams) { // Test that the server refuses if a client attempts to open too many data // streams. The server accepts slightly more than the negotiated stream limit // to deal with rare cases where a client FIN/RST is lost. // The slightly increased stream limit is set during config negotiation. It // is either an increase of 10 over negotiated limit, or a fixed percentage // scaling, whichever is larger. Test both before continuing. session_->OnConfigNegotiated(); EXPECT_LT(kMaxStreamsMultiplier * kMaxStreamsForTest, kMaxStreamsForTest + kMaxStreamsMinimumIncrement); EXPECT_EQ(kMaxStreamsForTest + kMaxStreamsMinimumIncrement, session_->max_open_incoming_streams()); EXPECT_EQ(0u, session_->GetNumOpenIncomingStreams()); QuicStreamId stream_id = GetNthClientInitiatedId(0); // Open the max configured number of streams, should be no problem. for (size_t i = 0; i < kMaxStreamsForTest; ++i) { EXPECT_TRUE(QuicServerSessionBasePeer::GetOrCreateDynamicStream( session_.get(), stream_id)); stream_id += QuicSpdySessionPeer::NextStreamId(*session_); } // Open more streams: server should accept slightly more than the limit. for (size_t i = 0; i < kMaxStreamsMinimumIncrement; ++i) { EXPECT_TRUE(QuicServerSessionBasePeer::GetOrCreateDynamicStream( session_.get(), stream_id)); stream_id += QuicSpdySessionPeer::NextStreamId(*session_); } // Now violate the server's internal stream limit. stream_id += QuicSpdySessionPeer::NextStreamId(*session_); EXPECT_CALL(*connection_, CloseConnection(_, _, _)).Times(0); EXPECT_CALL(*connection_, SendControlFrame(_)); EXPECT_CALL(*connection_, OnStreamReset(stream_id, QUIC_REFUSED_STREAM)); // Even if the connection remains open, the stream creation should fail. EXPECT_FALSE(QuicServerSessionBasePeer::GetOrCreateDynamicStream( session_.get(), stream_id)); } TEST_P(QuicServerSessionBaseTest, MaxAvailableStreams) { // Test that the server closes the connection if a client makes too many data // streams available. The server accepts slightly more than the negotiated // stream limit to deal with rare cases where a client FIN/RST is lost. session_->OnConfigNegotiated(); const size_t kAvailableStreamLimit = session_->MaxAvailableStreams(); EXPECT_EQ( session_->max_open_incoming_streams() * kMaxAvailableStreamsMultiplier, session_->MaxAvailableStreams()); // The protocol specification requires that there can be at least 10 times // as many available streams as the connection's maximum open streams. EXPECT_LE(10 * kMaxStreamsForTest, kAvailableStreamLimit); EXPECT_EQ(0u, session_->GetNumOpenIncomingStreams()); EXPECT_TRUE(QuicServerSessionBasePeer::GetOrCreateDynamicStream( session_.get(), GetNthClientInitiatedId(0))); // Establish available streams up to the server's limit. QuicStreamId next_id = QuicSpdySessionPeer::NextStreamId(*session_); const int kLimitingStreamId = GetNthClientInitiatedId(kAvailableStreamLimit + 1); EXPECT_TRUE(QuicServerSessionBasePeer::GetOrCreateDynamicStream( session_.get(), kLimitingStreamId)); // A further available stream will result in connection close. EXPECT_CALL(*connection_, CloseConnection(QUIC_TOO_MANY_AVAILABLE_STREAMS, _, _)); // This forces stream kLimitingStreamId + 2 to become available, which // violates the quota. EXPECT_FALSE(QuicServerSessionBasePeer::GetOrCreateDynamicStream( session_.get(), kLimitingStreamId + 2 * next_id)); } TEST_P(QuicServerSessionBaseTest, GetEvenIncomingError) { // Incoming streams on the server session must be odd. EXPECT_CALL(*connection_, CloseConnection(QUIC_INVALID_STREAM_ID, _, _)); EXPECT_EQ(nullptr, QuicServerSessionBasePeer::GetOrCreateDynamicStream( session_.get(), GetNthServerInitiatedId(0))); } TEST_P(QuicServerSessionBaseTest, GetStreamDisconnected) { // Don't create new streams if the connection is disconnected. QuicConnectionPeer::TearDownLocalConnectionState(connection_); EXPECT_QUIC_BUG(QuicServerSessionBasePeer::GetOrCreateDynamicStream( session_.get(), GetNthClientInitiatedId(0)), "ShouldCreateIncomingDynamicStream called when disconnected"); } class MockQuicCryptoServerStream : public QuicCryptoServerStream { public: explicit MockQuicCryptoServerStream( const QuicCryptoServerConfig* crypto_config, QuicCompressedCertsCache* compressed_certs_cache, QuicServerSessionBase* session, QuicCryptoServerStream::Helper* helper) : QuicCryptoServerStream( crypto_config, compressed_certs_cache, GetQuicReloadableFlag(enable_quic_stateless_reject_support), session, helper) {} ~MockQuicCryptoServerStream() override {} MOCK_METHOD1(SendServerConfigUpdate, void(const CachedNetworkParameters* cached_network_parameters)); private: DISALLOW_COPY_AND_ASSIGN(MockQuicCryptoServerStream); }; TEST_P(QuicServerSessionBaseTest, BandwidthEstimates) { // Test that bandwidth estimate updates are sent to the client, only when // bandwidth resumption is enabled, the bandwidth estimate has changed // sufficiently, enough time has passed, // and we don't have any other data to write. // Client has sent kBWRE connection option to trigger bandwidth resumption. QuicTagVector copt; copt.push_back(kBWRE); QuicConfigPeer::SetReceivedConnectionOptions(session_->config(), copt); session_->OnConfigNegotiated(); EXPECT_TRUE( QuicServerSessionBasePeer::IsBandwidthResumptionEnabled(session_.get())); int32_t bandwidth_estimate_kbytes_per_second = 123; int32_t max_bandwidth_estimate_kbytes_per_second = 134; int32_t max_bandwidth_estimate_timestamp = 1122334455; const QuicString serving_region = "not a real region"; session_->set_serving_region(serving_region); if (GetQuicReloadableFlag(quic_register_streams_early2) && GetQuicReloadableFlag(quic_register_static_streams)) { session_->UnregisterStreamPriority(kHeadersStreamId, /*is_static=*/true); } QuicServerSessionBasePeer::SetCryptoStream(session_.get(), nullptr); MockQuicCryptoServerStream* crypto_stream = new MockQuicCryptoServerStream(&crypto_config_, &compressed_certs_cache_, session_.get(), &stream_helper_); QuicServerSessionBasePeer::SetCryptoStream(session_.get(), crypto_stream); if (GetQuicReloadableFlag(quic_register_streams_early2) && GetQuicReloadableFlag(quic_register_static_streams)) { session_->RegisterStreamPriority(kHeadersStreamId, /*is_static=*/true, QuicStream::kDefaultPriority); } // Set some initial bandwidth values. QuicSentPacketManager* sent_packet_manager = QuicConnectionPeer::GetSentPacketManager(session_->connection()); QuicSustainedBandwidthRecorder& bandwidth_recorder = QuicSentPacketManagerPeer::GetBandwidthRecorder(sent_packet_manager); // Seed an rtt measurement equal to the initial default rtt. RttStats* rtt_stats = const_cast<RttStats*>(sent_packet_manager->GetRttStats()); rtt_stats->UpdateRtt(rtt_stats->initial_rtt(), QuicTime::Delta::Zero(), QuicTime::Zero()); QuicSustainedBandwidthRecorderPeer::SetBandwidthEstimate( &bandwidth_recorder, bandwidth_estimate_kbytes_per_second); QuicSustainedBandwidthRecorderPeer::SetMaxBandwidthEstimate( &bandwidth_recorder, max_bandwidth_estimate_kbytes_per_second, max_bandwidth_estimate_timestamp); // Queue up some pending data. session_->MarkConnectionLevelWriteBlocked(kCryptoStreamId); EXPECT_TRUE(session_->HasDataToWrite()); // There will be no update sent yet - not enough time has passed. QuicTime now = QuicTime::Zero(); session_->OnCongestionWindowChange(now); // Bandwidth estimate has now changed sufficiently but not enough time has // passed to send a Server Config Update. bandwidth_estimate_kbytes_per_second = bandwidth_estimate_kbytes_per_second * 1.6; session_->OnCongestionWindowChange(now); // Bandwidth estimate has now changed sufficiently and enough time has passed, // but not enough packets have been sent. int64_t srtt_ms = sent_packet_manager->GetRttStats()->smoothed_rtt().ToMilliseconds(); now = now + QuicTime::Delta::FromMilliseconds( kMinIntervalBetweenServerConfigUpdatesRTTs * srtt_ms); session_->OnCongestionWindowChange(now); // The connection no longer has pending data to be written. session_->OnCanWrite(); EXPECT_FALSE(session_->HasDataToWrite()); session_->OnCongestionWindowChange(now); // Bandwidth estimate has now changed sufficiently, enough time has passed, // and enough packets have been sent. SerializedPacket packet(1 + kMinPacketsBetweenServerConfigUpdates, PACKET_4BYTE_PACKET_NUMBER, nullptr, 1000, false, false); sent_packet_manager->OnPacketSent(&packet, 0, now, NOT_RETRANSMISSION, HAS_RETRANSMITTABLE_DATA); // Verify that the proto has exactly the values we expect. CachedNetworkParameters expected_network_params; expected_network_params.set_bandwidth_estimate_bytes_per_second( bandwidth_recorder.BandwidthEstimate().ToBytesPerSecond()); expected_network_params.set_max_bandwidth_estimate_bytes_per_second( bandwidth_recorder.MaxBandwidthEstimate().ToBytesPerSecond()); expected_network_params.set_max_bandwidth_timestamp_seconds( bandwidth_recorder.MaxBandwidthTimestamp()); expected_network_params.set_min_rtt_ms(session_->connection() ->sent_packet_manager() .GetRttStats() ->min_rtt() .ToMilliseconds()); expected_network_params.set_previous_connection_state( CachedNetworkParameters::CONGESTION_AVOIDANCE); expected_network_params.set_timestamp( session_->connection()->clock()->WallNow().ToUNIXSeconds()); expected_network_params.set_serving_region(serving_region); EXPECT_CALL(*crypto_stream, SendServerConfigUpdate(EqualsProto(expected_network_params))) .Times(1); EXPECT_CALL(*connection_, OnSendConnectionState(_)).Times(1); session_->OnCongestionWindowChange(now); } TEST_P(QuicServerSessionBaseTest, BandwidthResumptionExperiment) { // Test that if a client provides a CachedNetworkParameters with the same // serving region as the current server, and which was made within an hour of // now, that this data is passed down to the send algorithm. // Client has sent kBWRE connection option to trigger bandwidth resumption. QuicTagVector copt; copt.push_back(kBWRE); QuicConfigPeer::SetReceivedConnectionOptions(session_->config(), copt); const QuicString kTestServingRegion = "a serving region"; session_->set_serving_region(kTestServingRegion); // Set the time to be one hour + one second from the 0 baseline. connection_->AdvanceTime( QuicTime::Delta::FromSeconds(kNumSecondsPerHour + 1)); QuicCryptoServerStream* crypto_stream = static_cast<QuicCryptoServerStream*>( QuicSessionPeer::GetMutableCryptoStream(session_.get())); // No effect if no CachedNetworkParameters provided. EXPECT_CALL(*connection_, ResumeConnectionState(_, _)).Times(0); session_->OnConfigNegotiated(); // No effect if CachedNetworkParameters provided, but different serving // regions. CachedNetworkParameters cached_network_params; cached_network_params.set_bandwidth_estimate_bytes_per_second(1); cached_network_params.set_serving_region("different serving region"); crypto_stream->SetPreviousCachedNetworkParams(cached_network_params); EXPECT_CALL(*connection_, ResumeConnectionState(_, _)).Times(0); session_->OnConfigNegotiated(); // Same serving region, but timestamp is too old, should have no effect. cached_network_params.set_serving_region(kTestServingRegion); cached_network_params.set_timestamp(0); crypto_stream->SetPreviousCachedNetworkParams(cached_network_params); EXPECT_CALL(*connection_, ResumeConnectionState(_, _)).Times(0); session_->OnConfigNegotiated(); // Same serving region, and timestamp is recent: estimate is stored. cached_network_params.set_timestamp( connection_->clock()->WallNow().ToUNIXSeconds()); crypto_stream->SetPreviousCachedNetworkParams(cached_network_params); EXPECT_CALL(*connection_, ResumeConnectionState(_, _)).Times(1); session_->OnConfigNegotiated(); } TEST_P(QuicServerSessionBaseTest, BandwidthMaxEnablesResumption) { EXPECT_FALSE( QuicServerSessionBasePeer::IsBandwidthResumptionEnabled(session_.get())); // Client has sent kBWMX connection option to trigger bandwidth resumption. QuicTagVector copt; copt.push_back(kBWMX); QuicConfigPeer::SetReceivedConnectionOptions(session_->config(), copt); session_->OnConfigNegotiated(); EXPECT_TRUE( QuicServerSessionBasePeer::IsBandwidthResumptionEnabled(session_.get())); } TEST_P(QuicServerSessionBaseTest, NoBandwidthResumptionByDefault) { EXPECT_FALSE( QuicServerSessionBasePeer::IsBandwidthResumptionEnabled(session_.get())); session_->OnConfigNegotiated(); EXPECT_FALSE( QuicServerSessionBasePeer::IsBandwidthResumptionEnabled(session_.get())); } // Tests which check the lifetime management of data members of // QuicCryptoServerStream objects when async GetProof is in use. class StreamMemberLifetimeTest : public QuicServerSessionBaseTest { public: StreamMemberLifetimeTest() : QuicServerSessionBaseTest( std::unique_ptr<FakeProofSource>(new FakeProofSource())), crypto_config_peer_(&crypto_config_) { GetFakeProofSource()->Activate(); } FakeProofSource* GetFakeProofSource() const { return static_cast<FakeProofSource*>(crypto_config_peer_.GetProofSource()); } private: QuicCryptoServerConfigPeer crypto_config_peer_; }; INSTANTIATE_TEST_CASE_P(StreamMemberLifetimeTests, StreamMemberLifetimeTest, ::testing::ValuesIn(AllSupportedVersions())); // Trigger an operation which causes an async invocation of // ProofSource::GetProof. Delay the completion of the operation until after the // stream has been destroyed, and verify that there are no memory bugs. TEST_P(StreamMemberLifetimeTest, Basic) { SetQuicReloadableFlag(enable_quic_stateless_reject_support, true); SetQuicReloadableFlag(quic_use_cheap_stateless_rejects, true); const QuicClock* clock = helper_.GetClock(); ParsedQuicVersion version = AllSupportedVersions().front(); CryptoHandshakeMessage chlo = crypto_test_utils::GenerateDefaultInchoateCHLO( clock, version.transport_version, &crypto_config_); chlo.SetVector(kCOPT, QuicTagVector{kSREJ}); std::vector<ParsedQuicVersion> packet_version_list = {version}; std::unique_ptr<QuicEncryptedPacket> packet(ConstructEncryptedPacket( 1, true, false, 1, QuicString(chlo.GetSerialized(Perspective::IS_CLIENT) .AsStringPiece() .as_string()), PACKET_8BYTE_CONNECTION_ID, PACKET_4BYTE_PACKET_NUMBER, &packet_version_list)); EXPECT_CALL(stream_helper_, CanAcceptClientHello(_, _, _)) .WillOnce(testing::Return(true)); EXPECT_CALL(stream_helper_, GenerateConnectionIdForReject(_)) .WillOnce(testing::Return(12345)); // Set the current packet QuicConnectionPeer::SetCurrentPacket(session_->connection(), packet->AsStringPiece().as_string()); // Yes, this is horrible. But it's the easiest way to trigger the behavior we // need to exercise. QuicCryptoServerStreamBase* crypto_stream = const_cast<QuicCryptoServerStreamBase*>(session_->crypto_stream()); // Feed the CHLO into the crypto stream, which will trigger a call to // ProofSource::GetProof crypto_test_utils::SendHandshakeMessageToStream(crypto_stream, chlo, Perspective::IS_CLIENT); ASSERT_EQ(GetFakeProofSource()->NumPendingCallbacks(), 1); // Destroy the stream session_.reset(); // Allow the async ProofSource::GetProof call to complete. Verify (under // asan) that this does not result in accesses to any freed memory from the // session or its subobjects. GetFakeProofSource()->InvokePendingCallback(0); } } // namespace } // namespace test } // namespace net
null
null
null
null
3,964
50,937
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
50,937
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef UI_WM_CORE_BASE_FOCUS_RULES_H_ #define UI_WM_CORE_BASE_FOCUS_RULES_H_ #include "base/compiler_specific.h" #include "base/macros.h" #include "ui/wm/core/focus_rules.h" namespace wm { // A set of basic focus and activation rules. Specializations should most likely // subclass this and call up to these methods rather than reimplementing them. class WM_CORE_EXPORT BaseFocusRules : public FocusRules { protected: BaseFocusRules(); ~BaseFocusRules() override; // Returns true if the children of |window| can be activated. virtual bool SupportsChildActivation(aura::Window* window) const = 0; // Returns true if |window| is considered visible for activation purposes. virtual bool IsWindowConsideredVisibleForActivation( aura::Window* window) const; // Overridden from FocusRules: bool IsToplevelWindow(aura::Window* window) const override; bool CanActivateWindow(aura::Window* window) const override; bool CanFocusWindow(aura::Window* window, const ui::Event* event) const override; aura::Window* GetToplevelWindow(aura::Window* window) const override; aura::Window* GetActivatableWindow(aura::Window* window) const override; aura::Window* GetFocusableWindow(aura::Window* window) const override; aura::Window* GetNextActivatableWindow(aura::Window* ignore) const override; private: DISALLOW_COPY_AND_ASSIGN(BaseFocusRules); }; } // namespace wm #endif // UI_WM_CORE_BASE_FOCUS_RULES_H_
null
null
null
null
47,800
1,536
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
166,531
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
#include <linux/ceph/ceph_debug.h> #include <linux/err.h> #include <linux/module.h> #include <linux/random.h> #include <linux/slab.h> #include <linux/ceph/decode.h> #include <linux/ceph/auth.h> #include "auth_none.h" static void reset(struct ceph_auth_client *ac) { struct ceph_auth_none_info *xi = ac->private; xi->starting = true; } static void destroy(struct ceph_auth_client *ac) { kfree(ac->private); ac->private = NULL; } static int is_authenticated(struct ceph_auth_client *ac) { struct ceph_auth_none_info *xi = ac->private; return !xi->starting; } static int should_authenticate(struct ceph_auth_client *ac) { struct ceph_auth_none_info *xi = ac->private; return xi->starting; } static int ceph_auth_none_build_authorizer(struct ceph_auth_client *ac, struct ceph_none_authorizer *au) { void *p = au->buf; void *const end = p + sizeof(au->buf); int ret; ceph_encode_8_safe(&p, end, 1, e_range); ret = ceph_auth_entity_name_encode(ac->name, &p, end); if (ret < 0) return ret; ceph_encode_64_safe(&p, end, ac->global_id, e_range); au->buf_len = p - (void *)au->buf; dout("%s built authorizer len %d\n", __func__, au->buf_len); return 0; e_range: return -ERANGE; } static int build_request(struct ceph_auth_client *ac, void *buf, void *end) { return 0; } /* * the generic auth code decode the global_id, and we carry no actual * authenticate state, so nothing happens here. */ static int handle_reply(struct ceph_auth_client *ac, int result, void *buf, void *end) { struct ceph_auth_none_info *xi = ac->private; xi->starting = false; return result; } static void ceph_auth_none_destroy_authorizer(struct ceph_authorizer *a) { kfree(a); } /* * build an 'authorizer' with our entity_name and global_id. it is * identical for all services we connect to. */ static int ceph_auth_none_create_authorizer( struct ceph_auth_client *ac, int peer_type, struct ceph_auth_handshake *auth) { struct ceph_none_authorizer *au; int ret; au = kmalloc(sizeof(*au), GFP_NOFS); if (!au) return -ENOMEM; au->base.destroy = ceph_auth_none_destroy_authorizer; ret = ceph_auth_none_build_authorizer(ac, au); if (ret) { kfree(au); return ret; } auth->authorizer = (struct ceph_authorizer *) au; auth->authorizer_buf = au->buf; auth->authorizer_buf_len = au->buf_len; auth->authorizer_reply_buf = au->reply_buf; auth->authorizer_reply_buf_len = sizeof (au->reply_buf); return 0; } static const struct ceph_auth_client_ops ceph_auth_none_ops = { .name = "none", .reset = reset, .destroy = destroy, .is_authenticated = is_authenticated, .should_authenticate = should_authenticate, .build_request = build_request, .handle_reply = handle_reply, .create_authorizer = ceph_auth_none_create_authorizer, }; int ceph_auth_none_init(struct ceph_auth_client *ac) { struct ceph_auth_none_info *xi; dout("ceph_auth_none_init %p\n", ac); xi = kzalloc(sizeof(*xi), GFP_NOFS); if (!xi) return -ENOMEM; xi->starting = true; ac->protocol = CEPH_AUTH_NONE; ac->private = xi; ac->ops = &ceph_auth_none_ops; return 0; }
null
null
null
null
74,879
44,762
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
44,762
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef PPAPI_PROXY_COMPOSITOR_LAYER_RESOURCE_H_ #define PPAPI_PROXY_COMPOSITOR_LAYER_RESOURCE_H_ #include <stdint.h> #include "base/macros.h" #include "ppapi/c/ppb_compositor_layer.h" #include "ppapi/proxy/plugin_resource.h" #include "ppapi/proxy/ppapi_proxy_export.h" #include "ppapi/shared_impl/compositor_layer_data.h" #include "ppapi/shared_impl/scoped_pp_resource.h" #include "ppapi/thunk/ppb_compositor_layer_api.h" namespace gpu { struct SyncToken; } namespace ppapi { namespace proxy { class CompositorResource; class PPAPI_PROXY_EXPORT CompositorLayerResource : public PluginResource, public thunk::PPB_CompositorLayer_API { public: // Release callback for texture or image layer. typedef base::Callback<void(int32_t, const gpu::SyncToken&, bool)> ReleaseCallback; CompositorLayerResource(Connection connection, PP_Instance instance, const CompositorResource* compositor); const CompositorLayerData& data() const { return data_; } const ReleaseCallback& release_callback() const { return release_callback_; } void ResetReleaseCallback() { release_callback_.Reset(); } void Invalidate() { compositor_ = NULL; } private: enum LayerType { TYPE_COLOR, TYPE_TEXTURE, TYPE_IMAGE, }; ~CompositorLayerResource() override; // Resource overrides: thunk::PPB_CompositorLayer_API* AsPPB_CompositorLayer_API() override; // thunk::PPB_Compositor_API overrides: int32_t SetColor(float red, float green, float blue, float alpha, const PP_Size* size) override; int32_t SetTexture0_1( PP_Resource context, uint32_t texture, const PP_Size* size, const scoped_refptr<ppapi::TrackedCallback>& callback) override; int32_t SetTexture( PP_Resource context, uint32_t target, uint32_t texture, const PP_Size* size, const scoped_refptr<TrackedCallback>& callback) override; int32_t SetImage( PP_Resource image_data, const PP_Size* size, const scoped_refptr<TrackedCallback>& callback) override; int32_t SetClipRect(const PP_Rect* rect) override; int32_t SetTransform(const float matrix[16]) override; int32_t SetOpacity(float opacity) override; int32_t SetBlendMode(PP_BlendMode mode) override; int32_t SetSourceRect(const PP_FloatRect* rect) override; int32_t SetPremultipliedAlpha(PP_Bool premult) override; bool SetType(LayerType type); int32_t CheckForSetTextureAndImage( LayerType type, const scoped_refptr<TrackedCallback>& release_callback); // The CompositorResource which own the layer. The layer is invalidated if // compositor_ is NULL. const CompositorResource* compositor_; // Release callback for uncommitted texture or image. When CommitLayers() on // the compositor_ is called, the callback will be copied into a map in the // compositor_, and it will be reset to NULL. ReleaseCallback release_callback_; // Size of texture or image. It is used to verify the rect arg of // SetSourceRect(). PP_FloatSize source_size_; // Layer data. CompositorLayerData data_; DISALLOW_COPY_AND_ASSIGN(CompositorLayerResource); }; } // namespace proxy } // namespace ppapi #endif // PPAPI_PROXY_COMPOSITOR_LAYER_RESOURCE_H_
null
null
null
null
41,625
16,157
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
181,152
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Copyright (c) 2003-2012 Broadcom Corporation * All Rights Reserved * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the Broadcom * license below: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _NETLOGIC_MULTI_NODE_H_ #define _NETLOGIC_MULTI_NODE_H_ #ifndef CONFIG_NLM_MULTINODE #define NLM_NR_NODES 1 #else #if defined(CONFIG_NLM_MULTINODE_2) #define NLM_NR_NODES 2 #elif defined(CONFIG_NLM_MULTINODE_4) #define NLM_NR_NODES 4 #else #define NLM_NR_NODES 1 #endif #endif #define NLM_THREADS_PER_CORE 4 struct nlm_soc_info { unsigned long coremask; /* cores enabled on the soc */ unsigned long ebase; /* not used now */ uint64_t irqmask; /* EIMR for the node */ uint64_t sysbase; /* only for XLP - sys block base */ uint64_t picbase; /* PIC block base */ spinlock_t piclock; /* lock for PIC access */ cpumask_t cpumask; /* logical cpu mask for node */ unsigned int socbus; }; extern struct nlm_soc_info nlm_nodes[NLM_NR_NODES]; #define nlm_get_node(i) (&nlm_nodes[i]) #define nlm_node_present(n) ((n) >= 0 && (n) < NLM_NR_NODES && \ nlm_get_node(n)->coremask != 0) #ifdef CONFIG_CPU_XLR #define nlm_current_node() (&nlm_nodes[0]) #else #define nlm_current_node() (&nlm_nodes[nlm_nodeid()]) #endif void nlm_node_init(int node); #endif
null
null
null
null
89,499
36,308
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
201,303
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Copyright (C) 2011 Dmitry Eremin-Solenikov * Copyright (C) 2002 - 2005 Benjamin Herrenschmidt <[email protected]> * and Markus Demleitner <[email protected]> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This driver adds basic cpufreq support for SMU & 970FX based G5 Macs, * that is iMac G5 and latest single CPU desktop. */ #undef DEBUG #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/cpufreq.h> #include <linux/init.h> #include <linux/completion.h> #include <linux/mutex.h> #include <linux/time.h> #include <linux/of_device.h> #define DBG(fmt...) pr_debug(fmt) /* see 970FX user manual */ #define SCOM_PCR 0x0aa001 /* PCR scom addr */ #define PCR_HILO_SELECT 0x80000000U /* 1 = PCR, 0 = PCRH */ #define PCR_SPEED_FULL 0x00000000U /* 1:1 speed value */ #define PCR_SPEED_HALF 0x00020000U /* 1:2 speed value */ #define PCR_SPEED_QUARTER 0x00040000U /* 1:4 speed value */ #define PCR_SPEED_MASK 0x000e0000U /* speed mask */ #define PCR_SPEED_SHIFT 17 #define PCR_FREQ_REQ_VALID 0x00010000U /* freq request valid */ #define PCR_VOLT_REQ_VALID 0x00008000U /* volt request valid */ #define PCR_TARGET_TIME_MASK 0x00006000U /* target time */ #define PCR_STATLAT_MASK 0x00001f00U /* STATLAT value */ #define PCR_SNOOPLAT_MASK 0x000000f0U /* SNOOPLAT value */ #define PCR_SNOOPACC_MASK 0x0000000fU /* SNOOPACC value */ #define SCOM_PSR 0x408001 /* PSR scom addr */ /* warning: PSR is a 64 bits register */ #define PSR_CMD_RECEIVED 0x2000000000000000U /* command received */ #define PSR_CMD_COMPLETED 0x1000000000000000U /* command completed */ #define PSR_CUR_SPEED_MASK 0x0300000000000000U /* current speed */ #define PSR_CUR_SPEED_SHIFT (56) /* * The G5 only supports two frequencies (Quarter speed is not supported) */ #define CPUFREQ_HIGH 0 #define CPUFREQ_LOW 1 static struct cpufreq_frequency_table maple_cpu_freqs[] = { {0, CPUFREQ_HIGH, 0}, {0, CPUFREQ_LOW, 0}, {0, 0, CPUFREQ_TABLE_END}, }; /* Power mode data is an array of the 32 bits PCR values to use for * the various frequencies, retrieved from the device-tree */ static int maple_pmode_cur; static const u32 *maple_pmode_data; static int maple_pmode_max; /* * SCOM based frequency switching for 970FX rev3 */ static int maple_scom_switch_freq(int speed_mode) { unsigned long flags; int to; local_irq_save(flags); /* Clear PCR high */ scom970_write(SCOM_PCR, 0); /* Clear PCR low */ scom970_write(SCOM_PCR, PCR_HILO_SELECT | 0); /* Set PCR low */ scom970_write(SCOM_PCR, PCR_HILO_SELECT | maple_pmode_data[speed_mode]); /* Wait for completion */ for (to = 0; to < 10; to++) { unsigned long psr = scom970_read(SCOM_PSR); if ((psr & PSR_CMD_RECEIVED) == 0 && (((psr >> PSR_CUR_SPEED_SHIFT) ^ (maple_pmode_data[speed_mode] >> PCR_SPEED_SHIFT)) & 0x3) == 0) break; if (psr & PSR_CMD_COMPLETED) break; udelay(100); } local_irq_restore(flags); maple_pmode_cur = speed_mode; ppc_proc_freq = maple_cpu_freqs[speed_mode].frequency * 1000ul; return 0; } static int maple_scom_query_freq(void) { unsigned long psr = scom970_read(SCOM_PSR); int i; for (i = 0; i <= maple_pmode_max; i++) if ((((psr >> PSR_CUR_SPEED_SHIFT) ^ (maple_pmode_data[i] >> PCR_SPEED_SHIFT)) & 0x3) == 0) break; return i; } /* * Common interface to the cpufreq core */ static int maple_cpufreq_target(struct cpufreq_policy *policy, unsigned int index) { return maple_scom_switch_freq(index); } static unsigned int maple_cpufreq_get_speed(unsigned int cpu) { return maple_cpu_freqs[maple_pmode_cur].frequency; } static int maple_cpufreq_cpu_init(struct cpufreq_policy *policy) { return cpufreq_generic_init(policy, maple_cpu_freqs, 12000); } static struct cpufreq_driver maple_cpufreq_driver = { .name = "maple", .flags = CPUFREQ_CONST_LOOPS, .init = maple_cpufreq_cpu_init, .verify = cpufreq_generic_frequency_table_verify, .target_index = maple_cpufreq_target, .get = maple_cpufreq_get_speed, .attr = cpufreq_generic_attr, }; static int __init maple_cpufreq_init(void) { struct device_node *cpunode; unsigned int psize; unsigned long max_freq; const u32 *valp; u32 pvr_hi; int rc = -ENODEV; /* * Behave here like powermac driver which checks machine compatibility * to ease merging of two drivers in future. */ if (!of_machine_is_compatible("Momentum,Maple") && !of_machine_is_compatible("Momentum,Apache")) return 0; /* Get first CPU node */ cpunode = of_cpu_device_node_get(0); if (cpunode == NULL) { pr_err("Can't find any CPU 0 node\n"); goto bail_noprops; } /* Check 970FX for now */ /* we actually don't care on which CPU to access PVR */ pvr_hi = PVR_VER(mfspr(SPRN_PVR)); if (pvr_hi != 0x3c && pvr_hi != 0x44) { pr_err("Unsupported CPU version (%x)\n", pvr_hi); goto bail_noprops; } /* Look for the powertune data in the device-tree */ /* * On Maple this property is provided by PIBS in dual-processor config, * not provided by PIBS in CPU0 config and also not provided by SLOF, * so YMMV */ maple_pmode_data = of_get_property(cpunode, "power-mode-data", &psize); if (!maple_pmode_data) { DBG("No power-mode-data !\n"); goto bail_noprops; } maple_pmode_max = psize / sizeof(u32) - 1; /* * From what I see, clock-frequency is always the maximal frequency. * The current driver can not slew sysclk yet, so we really only deal * with powertune steps for now. We also only implement full freq and * half freq in this version. So far, I haven't yet seen a machine * supporting anything else. */ valp = of_get_property(cpunode, "clock-frequency", NULL); if (!valp) return -ENODEV; max_freq = (*valp)/1000; maple_cpu_freqs[0].frequency = max_freq; maple_cpu_freqs[1].frequency = max_freq/2; /* Force apply current frequency to make sure everything is in * sync (voltage is right for example). Firmware may leave us with * a strange setting ... */ msleep(10); maple_pmode_cur = -1; maple_scom_switch_freq(maple_scom_query_freq()); pr_info("Registering Maple CPU frequency driver\n"); pr_info("Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n", maple_cpu_freqs[1].frequency/1000, maple_cpu_freqs[0].frequency/1000, maple_cpu_freqs[maple_pmode_cur].frequency/1000); rc = cpufreq_register_driver(&maple_cpufreq_driver); of_node_put(cpunode); return rc; bail_noprops: of_node_put(cpunode); return rc; } module_init(maple_cpufreq_init); MODULE_LICENSE("GPL");
null
null
null
null
109,650
18,513
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
18,513
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "components/subresource_filter/content/browser/subresource_filter_observer_test_utils.h" #include "base/logging.h" #include "content/public/browser/navigation_handle.h" namespace subresource_filter { TestSubresourceFilterObserver::TestSubresourceFilterObserver( content::WebContents* web_contents) : scoped_observer_(this) { auto* manager = SubresourceFilterObserverManager::FromWebContents(web_contents); DCHECK(manager); scoped_observer_.Add(manager); Observe(web_contents); } TestSubresourceFilterObserver::~TestSubresourceFilterObserver() {} void TestSubresourceFilterObserver::OnSubresourceFilterGoingAway() { scoped_observer_.RemoveAll(); } void TestSubresourceFilterObserver::OnSafeBrowsingCheckComplete( content::NavigationHandle* navigation_handle, safe_browsing::SBThreatType threat_type, const safe_browsing::ThreatMetadata& threat_metadata) { DCHECK(navigation_handle->IsInMainFrame()); safe_browsing_checks_[navigation_handle->GetURL()] = std::make_pair(threat_type, threat_metadata); } void TestSubresourceFilterObserver::OnPageActivationComputed( content::NavigationHandle* navigation_handle, ActivationDecision activation_decision, const ActivationState& activation_state) { DCHECK(navigation_handle->IsInMainFrame()); page_activations_[navigation_handle->GetURL()] = activation_decision; pending_activations_[navigation_handle] = activation_decision; } void TestSubresourceFilterObserver::OnSubframeNavigationEvaluated( content::NavigationHandle* navigation_handle, LoadPolicy load_policy) { subframe_load_evaluations_[navigation_handle->GetURL()] = load_policy; } void TestSubresourceFilterObserver::DidFinishNavigation( content::NavigationHandle* navigation_handle) { auto it = pending_activations_.find(navigation_handle); bool did_compute = it != pending_activations_.end(); if (!navigation_handle->IsInMainFrame() || !navigation_handle->HasCommitted() || navigation_handle->IsErrorPage()) { if (did_compute) pending_activations_.erase(it); return; } if (did_compute) { last_committed_activation_ = it->second; pending_activations_.erase(it); } else { last_committed_activation_ = base::Optional<ActivationDecision>(); } } base::Optional<ActivationDecision> TestSubresourceFilterObserver::GetPageActivation(const GURL& url) const { auto it = page_activations_.find(url); if (it != page_activations_.end()) return it->second; return base::Optional<ActivationDecision>(); } base::Optional<LoadPolicy> TestSubresourceFilterObserver::GetSubframeLoadPolicy( const GURL& url) const { auto it = subframe_load_evaluations_.find(url); if (it != subframe_load_evaluations_.end()) return it->second; return base::Optional<LoadPolicy>(); } base::Optional<ActivationDecision> TestSubresourceFilterObserver::GetPageActivationForLastCommittedLoad() const { return last_committed_activation_; } base::Optional<TestSubresourceFilterObserver::SafeBrowsingCheck> TestSubresourceFilterObserver::GetSafeBrowsingResult(const GURL& url) const { auto it = safe_browsing_checks_.find(url); if (it != safe_browsing_checks_.end()) return it->second; return base::Optional<SafeBrowsingCheck>(); } } // namespace subresource_filter
null
null
null
null
15,376
1,790
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
166,785
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Copyright (C) ST-Ericsson AB 2010 * Author: Sjur Brendeland * License terms: GNU General Public License (GPL) version 2 */ #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__ #include <linux/string.h> #include <linux/skbuff.h> #include <linux/hardirq.h> #include <linux/export.h> #include <net/caif/cfpkt.h> #define PKT_PREFIX 48 #define PKT_POSTFIX 2 #define PKT_LEN_WHEN_EXTENDING 128 #define PKT_ERROR(pkt, errmsg) \ do { \ cfpkt_priv(pkt)->erronous = true; \ skb_reset_tail_pointer(&pkt->skb); \ pr_warn(errmsg); \ } while (0) struct cfpktq { struct sk_buff_head head; atomic_t count; /* Lock protects count updates */ spinlock_t lock; }; /* * net/caif/ is generic and does not * understand SKB, so we do this typecast */ struct cfpkt { struct sk_buff skb; }; /* Private data inside SKB */ struct cfpkt_priv_data { struct dev_info dev_info; bool erronous; }; static inline struct cfpkt_priv_data *cfpkt_priv(struct cfpkt *pkt) { return (struct cfpkt_priv_data *) pkt->skb.cb; } static inline bool is_erronous(struct cfpkt *pkt) { return cfpkt_priv(pkt)->erronous; } static inline struct sk_buff *pkt_to_skb(struct cfpkt *pkt) { return &pkt->skb; } static inline struct cfpkt *skb_to_pkt(struct sk_buff *skb) { return (struct cfpkt *) skb; } struct cfpkt *cfpkt_fromnative(enum caif_direction dir, void *nativepkt) { struct cfpkt *pkt = skb_to_pkt(nativepkt); cfpkt_priv(pkt)->erronous = false; return pkt; } EXPORT_SYMBOL(cfpkt_fromnative); void *cfpkt_tonative(struct cfpkt *pkt) { return (void *) pkt; } EXPORT_SYMBOL(cfpkt_tonative); static struct cfpkt *cfpkt_create_pfx(u16 len, u16 pfx) { struct sk_buff *skb; if (likely(in_interrupt())) skb = alloc_skb(len + pfx, GFP_ATOMIC); else skb = alloc_skb(len + pfx, GFP_KERNEL); if (unlikely(skb == NULL)) return NULL; skb_reserve(skb, pfx); return skb_to_pkt(skb); } inline struct cfpkt *cfpkt_create(u16 len) { return cfpkt_create_pfx(len + PKT_POSTFIX, PKT_PREFIX); } void cfpkt_destroy(struct cfpkt *pkt) { struct sk_buff *skb = pkt_to_skb(pkt); kfree_skb(skb); } inline bool cfpkt_more(struct cfpkt *pkt) { struct sk_buff *skb = pkt_to_skb(pkt); return skb->len > 0; } int cfpkt_peek_head(struct cfpkt *pkt, void *data, u16 len) { struct sk_buff *skb = pkt_to_skb(pkt); if (skb_headlen(skb) >= len) { memcpy(data, skb->data, len); return 0; } return !cfpkt_extr_head(pkt, data, len) && !cfpkt_add_head(pkt, data, len); } int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len) { struct sk_buff *skb = pkt_to_skb(pkt); u8 *from; if (unlikely(is_erronous(pkt))) return -EPROTO; if (unlikely(len > skb->len)) { PKT_ERROR(pkt, "read beyond end of packet\n"); return -EPROTO; } if (unlikely(len > skb_headlen(skb))) { if (unlikely(skb_linearize(skb) != 0)) { PKT_ERROR(pkt, "linearize failed\n"); return -EPROTO; } } from = skb_pull(skb, len); from -= len; if (data) memcpy(data, from, len); return 0; } EXPORT_SYMBOL(cfpkt_extr_head); int cfpkt_extr_trail(struct cfpkt *pkt, void *dta, u16 len) { struct sk_buff *skb = pkt_to_skb(pkt); u8 *data = dta; u8 *from; if (unlikely(is_erronous(pkt))) return -EPROTO; if (unlikely(skb_linearize(skb) != 0)) { PKT_ERROR(pkt, "linearize failed\n"); return -EPROTO; } if (unlikely(skb->data + len > skb_tail_pointer(skb))) { PKT_ERROR(pkt, "read beyond end of packet\n"); return -EPROTO; } from = skb_tail_pointer(skb) - len; skb_trim(skb, skb->len - len); memcpy(data, from, len); return 0; } int cfpkt_pad_trail(struct cfpkt *pkt, u16 len) { return cfpkt_add_body(pkt, NULL, len); } int cfpkt_add_body(struct cfpkt *pkt, const void *data, u16 len) { struct sk_buff *skb = pkt_to_skb(pkt); struct sk_buff *lastskb; u8 *to; u16 addlen = 0; if (unlikely(is_erronous(pkt))) return -EPROTO; lastskb = skb; /* Check whether we need to add space at the tail */ if (unlikely(skb_tailroom(skb) < len)) { if (likely(len < PKT_LEN_WHEN_EXTENDING)) addlen = PKT_LEN_WHEN_EXTENDING; else addlen = len; } /* Check whether we need to change the SKB before writing to the tail */ if (unlikely((addlen > 0) || skb_cloned(skb) || skb_shared(skb))) { /* Make sure data is writable */ if (unlikely(skb_cow_data(skb, addlen, &lastskb) < 0)) { PKT_ERROR(pkt, "cow failed\n"); return -EPROTO; } } /* All set to put the last SKB and optionally write data there. */ to = pskb_put(skb, lastskb, len); if (likely(data)) memcpy(to, data, len); return 0; } inline int cfpkt_addbdy(struct cfpkt *pkt, u8 data) { return cfpkt_add_body(pkt, &data, 1); } int cfpkt_add_head(struct cfpkt *pkt, const void *data2, u16 len) { struct sk_buff *skb = pkt_to_skb(pkt); struct sk_buff *lastskb; u8 *to; const u8 *data = data2; int ret; if (unlikely(is_erronous(pkt))) return -EPROTO; if (unlikely(skb_headroom(skb) < len)) { PKT_ERROR(pkt, "no headroom\n"); return -EPROTO; } /* Make sure data is writable */ ret = skb_cow_data(skb, 0, &lastskb); if (unlikely(ret < 0)) { PKT_ERROR(pkt, "cow failed\n"); return ret; } to = skb_push(skb, len); memcpy(to, data, len); return 0; } EXPORT_SYMBOL(cfpkt_add_head); inline int cfpkt_add_trail(struct cfpkt *pkt, const void *data, u16 len) { return cfpkt_add_body(pkt, data, len); } inline u16 cfpkt_getlen(struct cfpkt *pkt) { struct sk_buff *skb = pkt_to_skb(pkt); return skb->len; } int cfpkt_iterate(struct cfpkt *pkt, u16 (*iter_func)(u16, void *, u16), u16 data) { /* * Don't care about the performance hit of linearizing, * Checksum should not be used on high-speed interfaces anyway. */ if (unlikely(is_erronous(pkt))) return -EPROTO; if (unlikely(skb_linearize(&pkt->skb) != 0)) { PKT_ERROR(pkt, "linearize failed\n"); return -EPROTO; } return iter_func(data, pkt->skb.data, cfpkt_getlen(pkt)); } int cfpkt_setlen(struct cfpkt *pkt, u16 len) { struct sk_buff *skb = pkt_to_skb(pkt); if (unlikely(is_erronous(pkt))) return -EPROTO; if (likely(len <= skb->len)) { if (unlikely(skb->data_len)) ___pskb_trim(skb, len); else skb_trim(skb, len); return cfpkt_getlen(pkt); } /* Need to expand SKB */ if (unlikely(!cfpkt_pad_trail(pkt, len - skb->len))) PKT_ERROR(pkt, "skb_pad_trail failed\n"); return cfpkt_getlen(pkt); } struct cfpkt *cfpkt_append(struct cfpkt *dstpkt, struct cfpkt *addpkt, u16 expectlen) { struct sk_buff *dst = pkt_to_skb(dstpkt); struct sk_buff *add = pkt_to_skb(addpkt); u16 addlen = skb_headlen(add); u16 neededtailspace; struct sk_buff *tmp; u16 dstlen; u16 createlen; if (unlikely(is_erronous(dstpkt) || is_erronous(addpkt))) { return dstpkt; } if (expectlen > addlen) neededtailspace = expectlen; else neededtailspace = addlen; if (dst->tail + neededtailspace > dst->end) { /* Create a dumplicate of 'dst' with more tail space */ struct cfpkt *tmppkt; dstlen = skb_headlen(dst); createlen = dstlen + neededtailspace; tmppkt = cfpkt_create(createlen + PKT_PREFIX + PKT_POSTFIX); if (tmppkt == NULL) return NULL; tmp = pkt_to_skb(tmppkt); skb_set_tail_pointer(tmp, dstlen); tmp->len = dstlen; memcpy(tmp->data, dst->data, dstlen); cfpkt_destroy(dstpkt); dst = tmp; } memcpy(skb_tail_pointer(dst), add->data, skb_headlen(add)); cfpkt_destroy(addpkt); dst->tail += addlen; dst->len += addlen; return skb_to_pkt(dst); } struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos) { struct sk_buff *skb2; struct sk_buff *skb = pkt_to_skb(pkt); struct cfpkt *tmppkt; u8 *split = skb->data + pos; u16 len2nd = skb_tail_pointer(skb) - split; if (unlikely(is_erronous(pkt))) return NULL; if (skb->data + pos > skb_tail_pointer(skb)) { PKT_ERROR(pkt, "trying to split beyond end of packet\n"); return NULL; } /* Create a new packet for the second part of the data */ tmppkt = cfpkt_create_pfx(len2nd + PKT_PREFIX + PKT_POSTFIX, PKT_PREFIX); if (tmppkt == NULL) return NULL; skb2 = pkt_to_skb(tmppkt); if (skb2 == NULL) return NULL; /* Reduce the length of the original packet */ skb_set_tail_pointer(skb, pos); skb->len = pos; memcpy(skb2->data, split, len2nd); skb2->tail += len2nd; skb2->len += len2nd; skb2->priority = skb->priority; return skb_to_pkt(skb2); } bool cfpkt_erroneous(struct cfpkt *pkt) { return cfpkt_priv(pkt)->erronous; } struct caif_payload_info *cfpkt_info(struct cfpkt *pkt) { return (struct caif_payload_info *)&pkt_to_skb(pkt)->cb; } EXPORT_SYMBOL(cfpkt_info); void cfpkt_set_prio(struct cfpkt *pkt, int prio) { pkt_to_skb(pkt)->priority = prio; } EXPORT_SYMBOL(cfpkt_set_prio);
null
null
null
null
75,133
626
22,29,39
train_val
23adbe12ef7d3d4195e80800ab36b37bee28cd03
165,621
linux
1
https://github.com/torvalds/linux
2014-06-10 13:57:22-07:00
int inode_change_ok(const struct inode *inode, struct iattr *attr) { unsigned int ia_valid = attr->ia_valid; /* * First check size constraints. These can't be overriden using * ATTR_FORCE. */ if (ia_valid & ATTR_SIZE) { int error = inode_newsize_ok(inode, attr->ia_size); if (error) return error; } /* If force is set do it anyway. */ if (ia_valid & ATTR_FORCE) return 0; /* Make sure a caller can chown. */ if ((ia_valid & ATTR_UID) && (!uid_eq(current_fsuid(), inode->i_uid) || !uid_eq(attr->ia_uid, inode->i_uid)) && !inode_capable(inode, CAP_CHOWN)) return -EPERM; /* Make sure caller can chgrp. */ if ((ia_valid & ATTR_GID) && (!uid_eq(current_fsuid(), inode->i_uid) || (!in_group_p(attr->ia_gid) && !gid_eq(attr->ia_gid, inode->i_gid))) && !inode_capable(inode, CAP_CHOWN)) return -EPERM; /* Make sure a caller can chmod. */ if (ia_valid & ATTR_MODE) { if (!inode_owner_or_capable(inode)) return -EPERM; /* Also check the setgid bit! */ if (!in_group_p((ia_valid & ATTR_GID) ? attr->ia_gid : inode->i_gid) && !inode_capable(inode, CAP_FSETID)) attr->ia_mode &= ~S_ISGID; } /* Check for setting the inode time. */ if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)) { if (!inode_owner_or_capable(inode)) return -EPERM; } return 0; }
CVE-2014-4014
CWE-264
https://github.com/torvalds/linux/commit/23adbe12ef7d3d4195e80800ab36b37bee28cd03
High
3,515
67,754
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
67,754
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef REMOTING_IOS_CLIENT_GESTURES_H_ #define REMOTING_IOS_CLIENT_GESTURES_H_ #import <Foundation/Foundation.h> #import <UIKit/UIKit.h> @class RemotingClient; @protocol ClientGesturesDelegate<NSObject> - (void)keyboardShouldShow; - (void)keyboardShouldHide; - (void)menuShouldShow; @end @interface ClientGestures : NSObject - (instancetype)initWithView:(UIView*)view client:(RemotingClient*)client; @property(weak, nonatomic) id<ClientGesturesDelegate> delegate; @end #endif // REMOTING_IOS_CLIENT_GESTURES_H_
null
null
null
null
64,617
26,922
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
191,917
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * DMA driver for Xilinx ZynqMP DMA Engine * * Copyright (C) 2016 Xilinx, Inc. All rights reserved. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or * (at your option) any later version. */ #include <linux/bitops.h> #include <linux/dmapool.h> #include <linux/dma/xilinx_dma.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of_address.h> #include <linux/of_dma.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/slab.h> #include <linux/clk.h> #include <linux/io-64-nonatomic-lo-hi.h> #include "../dmaengine.h" /* Register Offsets */ #define ZYNQMP_DMA_ISR 0x100 #define ZYNQMP_DMA_IMR 0x104 #define ZYNQMP_DMA_IER 0x108 #define ZYNQMP_DMA_IDS 0x10C #define ZYNQMP_DMA_CTRL0 0x110 #define ZYNQMP_DMA_CTRL1 0x114 #define ZYNQMP_DMA_DATA_ATTR 0x120 #define ZYNQMP_DMA_DSCR_ATTR 0x124 #define ZYNQMP_DMA_SRC_DSCR_WRD0 0x128 #define ZYNQMP_DMA_SRC_DSCR_WRD1 0x12C #define ZYNQMP_DMA_SRC_DSCR_WRD2 0x130 #define ZYNQMP_DMA_SRC_DSCR_WRD3 0x134 #define ZYNQMP_DMA_DST_DSCR_WRD0 0x138 #define ZYNQMP_DMA_DST_DSCR_WRD1 0x13C #define ZYNQMP_DMA_DST_DSCR_WRD2 0x140 #define ZYNQMP_DMA_DST_DSCR_WRD3 0x144 #define ZYNQMP_DMA_SRC_START_LSB 0x158 #define ZYNQMP_DMA_SRC_START_MSB 0x15C #define ZYNQMP_DMA_DST_START_LSB 0x160 #define ZYNQMP_DMA_DST_START_MSB 0x164 #define ZYNQMP_DMA_RATE_CTRL 0x18C #define ZYNQMP_DMA_IRQ_SRC_ACCT 0x190 #define ZYNQMP_DMA_IRQ_DST_ACCT 0x194 #define ZYNQMP_DMA_CTRL2 0x200 /* Interrupt registers bit field definitions */ #define ZYNQMP_DMA_DONE BIT(10) #define ZYNQMP_DMA_AXI_WR_DATA BIT(9) #define ZYNQMP_DMA_AXI_RD_DATA BIT(8) #define ZYNQMP_DMA_AXI_RD_DST_DSCR BIT(7) #define ZYNQMP_DMA_AXI_RD_SRC_DSCR BIT(6) #define ZYNQMP_DMA_IRQ_DST_ACCT_ERR BIT(5) #define ZYNQMP_DMA_IRQ_SRC_ACCT_ERR BIT(4) #define ZYNQMP_DMA_BYTE_CNT_OVRFL BIT(3) #define ZYNQMP_DMA_DST_DSCR_DONE BIT(2) #define ZYNQMP_DMA_INV_APB BIT(0) /* Control 0 register bit field definitions */ #define ZYNQMP_DMA_OVR_FETCH BIT(7) #define ZYNQMP_DMA_POINT_TYPE_SG BIT(6) #define ZYNQMP_DMA_RATE_CTRL_EN BIT(3) /* Control 1 register bit field definitions */ #define ZYNQMP_DMA_SRC_ISSUE GENMASK(4, 0) /* Data Attribute register bit field definitions */ #define ZYNQMP_DMA_ARBURST GENMASK(27, 26) #define ZYNQMP_DMA_ARCACHE GENMASK(25, 22) #define ZYNQMP_DMA_ARCACHE_OFST 22 #define ZYNQMP_DMA_ARQOS GENMASK(21, 18) #define ZYNQMP_DMA_ARQOS_OFST 18 #define ZYNQMP_DMA_ARLEN GENMASK(17, 14) #define ZYNQMP_DMA_ARLEN_OFST 14 #define ZYNQMP_DMA_AWBURST GENMASK(13, 12) #define ZYNQMP_DMA_AWCACHE GENMASK(11, 8) #define ZYNQMP_DMA_AWCACHE_OFST 8 #define ZYNQMP_DMA_AWQOS GENMASK(7, 4) #define ZYNQMP_DMA_AWQOS_OFST 4 #define ZYNQMP_DMA_AWLEN GENMASK(3, 0) #define ZYNQMP_DMA_AWLEN_OFST 0 /* Descriptor Attribute register bit field definitions */ #define ZYNQMP_DMA_AXCOHRNT BIT(8) #define ZYNQMP_DMA_AXCACHE GENMASK(7, 4) #define ZYNQMP_DMA_AXCACHE_OFST 4 #define ZYNQMP_DMA_AXQOS GENMASK(3, 0) #define ZYNQMP_DMA_AXQOS_OFST 0 /* Control register 2 bit field definitions */ #define ZYNQMP_DMA_ENABLE BIT(0) /* Buffer Descriptor definitions */ #define ZYNQMP_DMA_DESC_CTRL_STOP 0x10 #define ZYNQMP_DMA_DESC_CTRL_COMP_INT 0x4 #define ZYNQMP_DMA_DESC_CTRL_SIZE_256 0x2 #define ZYNQMP_DMA_DESC_CTRL_COHRNT 0x1 /* Interrupt Mask specific definitions */ #define ZYNQMP_DMA_INT_ERR (ZYNQMP_DMA_AXI_RD_DATA | \ ZYNQMP_DMA_AXI_WR_DATA | \ ZYNQMP_DMA_AXI_RD_DST_DSCR | \ ZYNQMP_DMA_AXI_RD_SRC_DSCR | \ ZYNQMP_DMA_INV_APB) #define ZYNQMP_DMA_INT_OVRFL (ZYNQMP_DMA_BYTE_CNT_OVRFL | \ ZYNQMP_DMA_IRQ_SRC_ACCT_ERR | \ ZYNQMP_DMA_IRQ_DST_ACCT_ERR) #define ZYNQMP_DMA_INT_DONE (ZYNQMP_DMA_DONE | ZYNQMP_DMA_DST_DSCR_DONE) #define ZYNQMP_DMA_INT_EN_DEFAULT_MASK (ZYNQMP_DMA_INT_DONE | \ ZYNQMP_DMA_INT_ERR | \ ZYNQMP_DMA_INT_OVRFL | \ ZYNQMP_DMA_DST_DSCR_DONE) /* Max number of descriptors per channel */ #define ZYNQMP_DMA_NUM_DESCS 32 /* Max transfer size per descriptor */ #define ZYNQMP_DMA_MAX_TRANS_LEN 0x40000000 /* Reset values for data attributes */ #define ZYNQMP_DMA_AXCACHE_VAL 0xF #define ZYNQMP_DMA_ARLEN_RST_VAL 0xF #define ZYNQMP_DMA_AWLEN_RST_VAL 0xF #define ZYNQMP_DMA_SRC_ISSUE_RST_VAL 0x1F #define ZYNQMP_DMA_IDS_DEFAULT_MASK 0xFFF /* Bus width in bits */ #define ZYNQMP_DMA_BUS_WIDTH_64 64 #define ZYNQMP_DMA_BUS_WIDTH_128 128 #define ZYNQMP_DMA_DESC_SIZE(chan) (chan->desc_size) #define to_chan(chan) container_of(chan, struct zynqmp_dma_chan, \ common) #define tx_to_desc(tx) container_of(tx, struct zynqmp_dma_desc_sw, \ async_tx) /** * struct zynqmp_dma_desc_ll - Hw linked list descriptor * @addr: Buffer address * @size: Size of the buffer * @ctrl: Control word * @nxtdscraddr: Next descriptor base address * @rsvd: Reserved field and for Hw internal use. */ struct zynqmp_dma_desc_ll { u64 addr; u32 size; u32 ctrl; u64 nxtdscraddr; u64 rsvd; }; __aligned(64) /** * struct zynqmp_dma_desc_sw - Per Transaction structure * @src: Source address for simple mode dma * @dst: Destination address for simple mode dma * @len: Transfer length for simple mode dma * @node: Node in the channel descriptor list * @tx_list: List head for the current transfer * @async_tx: Async transaction descriptor * @src_v: Virtual address of the src descriptor * @src_p: Physical address of the src descriptor * @dst_v: Virtual address of the dst descriptor * @dst_p: Physical address of the dst descriptor */ struct zynqmp_dma_desc_sw { u64 src; u64 dst; u32 len; struct list_head node; struct list_head tx_list; struct dma_async_tx_descriptor async_tx; struct zynqmp_dma_desc_ll *src_v; dma_addr_t src_p; struct zynqmp_dma_desc_ll *dst_v; dma_addr_t dst_p; }; /** * struct zynqmp_dma_chan - Driver specific DMA channel structure * @zdev: Driver specific device structure * @regs: Control registers offset * @lock: Descriptor operation lock * @pending_list: Descriptors waiting * @free_list: Descriptors free * @active_list: Descriptors active * @sw_desc_pool: SW descriptor pool * @done_list: Complete descriptors * @common: DMA common channel * @desc_pool_v: Statically allocated descriptor base * @desc_pool_p: Physical allocated descriptor base * @desc_free_cnt: Descriptor available count * @dev: The dma device * @irq: Channel IRQ * @is_dmacoherent: Tells whether dma operations are coherent or not * @tasklet: Cleanup work after irq * @idle : Channel status; * @desc_size: Size of the low level descriptor * @err: Channel has errors * @bus_width: Bus width * @src_burst_len: Source burst length * @dst_burst_len: Dest burst length * @clk_main: Pointer to main clock * @clk_apb: Pointer to apb clock */ struct zynqmp_dma_chan { struct zynqmp_dma_device *zdev; void __iomem *regs; spinlock_t lock; struct list_head pending_list; struct list_head free_list; struct list_head active_list; struct zynqmp_dma_desc_sw *sw_desc_pool; struct list_head done_list; struct dma_chan common; void *desc_pool_v; dma_addr_t desc_pool_p; u32 desc_free_cnt; struct device *dev; int irq; bool is_dmacoherent; struct tasklet_struct tasklet; bool idle; u32 desc_size; bool err; u32 bus_width; u32 src_burst_len; u32 dst_burst_len; struct clk *clk_main; struct clk *clk_apb; }; /** * struct zynqmp_dma_device - DMA device structure * @dev: Device Structure * @common: DMA device structure * @chan: Driver specific DMA channel */ struct zynqmp_dma_device { struct device *dev; struct dma_device common; struct zynqmp_dma_chan *chan; }; static inline void zynqmp_dma_writeq(struct zynqmp_dma_chan *chan, u32 reg, u64 value) { lo_hi_writeq(value, chan->regs + reg); } /** * zynqmp_dma_update_desc_to_ctrlr - Updates descriptor to the controller * @chan: ZynqMP DMA DMA channel pointer * @desc: Transaction descriptor pointer */ static void zynqmp_dma_update_desc_to_ctrlr(struct zynqmp_dma_chan *chan, struct zynqmp_dma_desc_sw *desc) { dma_addr_t addr; addr = desc->src_p; zynqmp_dma_writeq(chan, ZYNQMP_DMA_SRC_START_LSB, addr); addr = desc->dst_p; zynqmp_dma_writeq(chan, ZYNQMP_DMA_DST_START_LSB, addr); } /** * zynqmp_dma_desc_config_eod - Mark the descriptor as end descriptor * @chan: ZynqMP DMA channel pointer * @desc: Hw descriptor pointer */ static void zynqmp_dma_desc_config_eod(struct zynqmp_dma_chan *chan, void *desc) { struct zynqmp_dma_desc_ll *hw = (struct zynqmp_dma_desc_ll *)desc; hw->ctrl |= ZYNQMP_DMA_DESC_CTRL_STOP; hw++; hw->ctrl |= ZYNQMP_DMA_DESC_CTRL_COMP_INT | ZYNQMP_DMA_DESC_CTRL_STOP; } /** * zynqmp_dma_config_sg_ll_desc - Configure the linked list descriptor * @chan: ZynqMP DMA channel pointer * @sdesc: Hw descriptor pointer * @src: Source buffer address * @dst: Destination buffer address * @len: Transfer length * @prev: Previous hw descriptor pointer */ static void zynqmp_dma_config_sg_ll_desc(struct zynqmp_dma_chan *chan, struct zynqmp_dma_desc_ll *sdesc, dma_addr_t src, dma_addr_t dst, size_t len, struct zynqmp_dma_desc_ll *prev) { struct zynqmp_dma_desc_ll *ddesc = sdesc + 1; sdesc->size = ddesc->size = len; sdesc->addr = src; ddesc->addr = dst; sdesc->ctrl = ddesc->ctrl = ZYNQMP_DMA_DESC_CTRL_SIZE_256; if (chan->is_dmacoherent) { sdesc->ctrl |= ZYNQMP_DMA_DESC_CTRL_COHRNT; ddesc->ctrl |= ZYNQMP_DMA_DESC_CTRL_COHRNT; } if (prev) { dma_addr_t addr = chan->desc_pool_p + ((uintptr_t)sdesc - (uintptr_t)chan->desc_pool_v); ddesc = prev + 1; prev->nxtdscraddr = addr; ddesc->nxtdscraddr = addr + ZYNQMP_DMA_DESC_SIZE(chan); } } /** * zynqmp_dma_init - Initialize the channel * @chan: ZynqMP DMA channel pointer */ static void zynqmp_dma_init(struct zynqmp_dma_chan *chan) { u32 val; writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS); val = readl(chan->regs + ZYNQMP_DMA_ISR); writel(val, chan->regs + ZYNQMP_DMA_ISR); if (chan->is_dmacoherent) { val = ZYNQMP_DMA_AXCOHRNT; val = (val & ~ZYNQMP_DMA_AXCACHE) | (ZYNQMP_DMA_AXCACHE_VAL << ZYNQMP_DMA_AXCACHE_OFST); writel(val, chan->regs + ZYNQMP_DMA_DSCR_ATTR); } val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR); if (chan->is_dmacoherent) { val = (val & ~ZYNQMP_DMA_ARCACHE) | (ZYNQMP_DMA_AXCACHE_VAL << ZYNQMP_DMA_ARCACHE_OFST); val = (val & ~ZYNQMP_DMA_AWCACHE) | (ZYNQMP_DMA_AXCACHE_VAL << ZYNQMP_DMA_AWCACHE_OFST); } writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR); /* Clearing the interrupt account rgisters */ val = readl(chan->regs + ZYNQMP_DMA_IRQ_SRC_ACCT); val = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT); chan->idle = true; } /** * zynqmp_dma_tx_submit - Submit DMA transaction * @tx: Async transaction descriptor pointer * * Return: cookie value */ static dma_cookie_t zynqmp_dma_tx_submit(struct dma_async_tx_descriptor *tx) { struct zynqmp_dma_chan *chan = to_chan(tx->chan); struct zynqmp_dma_desc_sw *desc, *new; dma_cookie_t cookie; new = tx_to_desc(tx); spin_lock_bh(&chan->lock); cookie = dma_cookie_assign(tx); if (!list_empty(&chan->pending_list)) { desc = list_last_entry(&chan->pending_list, struct zynqmp_dma_desc_sw, node); if (!list_empty(&desc->tx_list)) desc = list_last_entry(&desc->tx_list, struct zynqmp_dma_desc_sw, node); desc->src_v->nxtdscraddr = new->src_p; desc->src_v->ctrl &= ~ZYNQMP_DMA_DESC_CTRL_STOP; desc->dst_v->nxtdscraddr = new->dst_p; desc->dst_v->ctrl &= ~ZYNQMP_DMA_DESC_CTRL_STOP; } list_add_tail(&new->node, &chan->pending_list); spin_unlock_bh(&chan->lock); return cookie; } /** * zynqmp_dma_get_descriptor - Get the sw descriptor from the pool * @chan: ZynqMP DMA channel pointer * * Return: The sw descriptor */ static struct zynqmp_dma_desc_sw * zynqmp_dma_get_descriptor(struct zynqmp_dma_chan *chan) { struct zynqmp_dma_desc_sw *desc; spin_lock_bh(&chan->lock); desc = list_first_entry(&chan->free_list, struct zynqmp_dma_desc_sw, node); list_del(&desc->node); spin_unlock_bh(&chan->lock); INIT_LIST_HEAD(&desc->tx_list); /* Clear the src and dst descriptor memory */ memset((void *)desc->src_v, 0, ZYNQMP_DMA_DESC_SIZE(chan)); memset((void *)desc->dst_v, 0, ZYNQMP_DMA_DESC_SIZE(chan)); return desc; } /** * zynqmp_dma_free_descriptor - Issue pending transactions * @chan: ZynqMP DMA channel pointer * @sdesc: Transaction descriptor pointer */ static void zynqmp_dma_free_descriptor(struct zynqmp_dma_chan *chan, struct zynqmp_dma_desc_sw *sdesc) { struct zynqmp_dma_desc_sw *child, *next; chan->desc_free_cnt++; list_add_tail(&sdesc->node, &chan->free_list); list_for_each_entry_safe(child, next, &sdesc->tx_list, node) { chan->desc_free_cnt++; list_move_tail(&child->node, &chan->free_list); } } /** * zynqmp_dma_free_desc_list - Free descriptors list * @chan: ZynqMP DMA channel pointer * @list: List to parse and delete the descriptor */ static void zynqmp_dma_free_desc_list(struct zynqmp_dma_chan *chan, struct list_head *list) { struct zynqmp_dma_desc_sw *desc, *next; list_for_each_entry_safe(desc, next, list, node) zynqmp_dma_free_descriptor(chan, desc); } /** * zynqmp_dma_alloc_chan_resources - Allocate channel resources * @dchan: DMA channel * * Return: Number of descriptors on success and failure value on error */ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan) { struct zynqmp_dma_chan *chan = to_chan(dchan); struct zynqmp_dma_desc_sw *desc; int i; chan->sw_desc_pool = kzalloc(sizeof(*desc) * ZYNQMP_DMA_NUM_DESCS, GFP_KERNEL); if (!chan->sw_desc_pool) return -ENOMEM; chan->idle = true; chan->desc_free_cnt = ZYNQMP_DMA_NUM_DESCS; INIT_LIST_HEAD(&chan->free_list); for (i = 0; i < ZYNQMP_DMA_NUM_DESCS; i++) { desc = chan->sw_desc_pool + i; dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); desc->async_tx.tx_submit = zynqmp_dma_tx_submit; list_add_tail(&desc->node, &chan->free_list); } chan->desc_pool_v = dma_zalloc_coherent(chan->dev, (2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS), &chan->desc_pool_p, GFP_KERNEL); if (!chan->desc_pool_v) return -ENOMEM; for (i = 0; i < ZYNQMP_DMA_NUM_DESCS; i++) { desc = chan->sw_desc_pool + i; desc->src_v = (struct zynqmp_dma_desc_ll *) (chan->desc_pool_v + (i * ZYNQMP_DMA_DESC_SIZE(chan) * 2)); desc->dst_v = (struct zynqmp_dma_desc_ll *) (desc->src_v + 1); desc->src_p = chan->desc_pool_p + (i * ZYNQMP_DMA_DESC_SIZE(chan) * 2); desc->dst_p = desc->src_p + ZYNQMP_DMA_DESC_SIZE(chan); } return ZYNQMP_DMA_NUM_DESCS; } /** * zynqmp_dma_start - Start DMA channel * @chan: ZynqMP DMA channel pointer */ static void zynqmp_dma_start(struct zynqmp_dma_chan *chan) { writel(ZYNQMP_DMA_INT_EN_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IER); chan->idle = false; writel(ZYNQMP_DMA_ENABLE, chan->regs + ZYNQMP_DMA_CTRL2); } /** * zynqmp_dma_handle_ovfl_int - Process the overflow interrupt * @chan: ZynqMP DMA channel pointer * @status: Interrupt status value */ static void zynqmp_dma_handle_ovfl_int(struct zynqmp_dma_chan *chan, u32 status) { u32 val; if (status & ZYNQMP_DMA_IRQ_DST_ACCT_ERR) val = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT); if (status & ZYNQMP_DMA_IRQ_SRC_ACCT_ERR) val = readl(chan->regs + ZYNQMP_DMA_IRQ_SRC_ACCT); } static void zynqmp_dma_config(struct zynqmp_dma_chan *chan) { u32 val; val = readl(chan->regs + ZYNQMP_DMA_CTRL0); val |= ZYNQMP_DMA_POINT_TYPE_SG; writel(val, chan->regs + ZYNQMP_DMA_CTRL0); val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR); val = (val & ~ZYNQMP_DMA_ARLEN) | (chan->src_burst_len << ZYNQMP_DMA_ARLEN_OFST); val = (val & ~ZYNQMP_DMA_AWLEN) | (chan->dst_burst_len << ZYNQMP_DMA_AWLEN_OFST); writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR); } /** * zynqmp_dma_device_config - Zynqmp dma device configuration * @dchan: DMA channel * @config: DMA device config */ static int zynqmp_dma_device_config(struct dma_chan *dchan, struct dma_slave_config *config) { struct zynqmp_dma_chan *chan = to_chan(dchan); chan->src_burst_len = config->src_maxburst; chan->dst_burst_len = config->dst_maxburst; return 0; } /** * zynqmp_dma_start_transfer - Initiate the new transfer * @chan: ZynqMP DMA channel pointer */ static void zynqmp_dma_start_transfer(struct zynqmp_dma_chan *chan) { struct zynqmp_dma_desc_sw *desc; if (!chan->idle) return; zynqmp_dma_config(chan); desc = list_first_entry_or_null(&chan->pending_list, struct zynqmp_dma_desc_sw, node); if (!desc) return; list_splice_tail_init(&chan->pending_list, &chan->active_list); zynqmp_dma_update_desc_to_ctrlr(chan, desc); zynqmp_dma_start(chan); } /** * zynqmp_dma_chan_desc_cleanup - Cleanup the completed descriptors * @chan: ZynqMP DMA channel */ static void zynqmp_dma_chan_desc_cleanup(struct zynqmp_dma_chan *chan) { struct zynqmp_dma_desc_sw *desc, *next; list_for_each_entry_safe(desc, next, &chan->done_list, node) { dma_async_tx_callback callback; void *callback_param; list_del(&desc->node); callback = desc->async_tx.callback; callback_param = desc->async_tx.callback_param; if (callback) { spin_unlock(&chan->lock); callback(callback_param); spin_lock(&chan->lock); } /* Run any dependencies, then free the descriptor */ zynqmp_dma_free_descriptor(chan, desc); } } /** * zynqmp_dma_complete_descriptor - Mark the active descriptor as complete * @chan: ZynqMP DMA channel pointer */ static void zynqmp_dma_complete_descriptor(struct zynqmp_dma_chan *chan) { struct zynqmp_dma_desc_sw *desc; desc = list_first_entry_or_null(&chan->active_list, struct zynqmp_dma_desc_sw, node); if (!desc) return; list_del(&desc->node); dma_cookie_complete(&desc->async_tx); list_add_tail(&desc->node, &chan->done_list); } /** * zynqmp_dma_issue_pending - Issue pending transactions * @dchan: DMA channel pointer */ static void zynqmp_dma_issue_pending(struct dma_chan *dchan) { struct zynqmp_dma_chan *chan = to_chan(dchan); spin_lock_bh(&chan->lock); zynqmp_dma_start_transfer(chan); spin_unlock_bh(&chan->lock); } /** * zynqmp_dma_free_descriptors - Free channel descriptors * @dchan: DMA channel pointer */ static void zynqmp_dma_free_descriptors(struct zynqmp_dma_chan *chan) { zynqmp_dma_free_desc_list(chan, &chan->active_list); zynqmp_dma_free_desc_list(chan, &chan->pending_list); zynqmp_dma_free_desc_list(chan, &chan->done_list); } /** * zynqmp_dma_free_chan_resources - Free channel resources * @dchan: DMA channel pointer */ static void zynqmp_dma_free_chan_resources(struct dma_chan *dchan) { struct zynqmp_dma_chan *chan = to_chan(dchan); spin_lock_bh(&chan->lock); zynqmp_dma_free_descriptors(chan); spin_unlock_bh(&chan->lock); dma_free_coherent(chan->dev, (2 * ZYNQMP_DMA_DESC_SIZE(chan) * ZYNQMP_DMA_NUM_DESCS), chan->desc_pool_v, chan->desc_pool_p); kfree(chan->sw_desc_pool); } /** * zynqmp_dma_reset - Reset the channel * @chan: ZynqMP DMA channel pointer */ static void zynqmp_dma_reset(struct zynqmp_dma_chan *chan) { writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS); zynqmp_dma_complete_descriptor(chan); zynqmp_dma_chan_desc_cleanup(chan); zynqmp_dma_free_descriptors(chan); zynqmp_dma_init(chan); } /** * zynqmp_dma_irq_handler - ZynqMP DMA Interrupt handler * @irq: IRQ number * @data: Pointer to the ZynqMP DMA channel structure * * Return: IRQ_HANDLED/IRQ_NONE */ static irqreturn_t zynqmp_dma_irq_handler(int irq, void *data) { struct zynqmp_dma_chan *chan = (struct zynqmp_dma_chan *)data; u32 isr, imr, status; irqreturn_t ret = IRQ_NONE; isr = readl(chan->regs + ZYNQMP_DMA_ISR); imr = readl(chan->regs + ZYNQMP_DMA_IMR); status = isr & ~imr; writel(isr, chan->regs + ZYNQMP_DMA_ISR); if (status & ZYNQMP_DMA_INT_DONE) { tasklet_schedule(&chan->tasklet); ret = IRQ_HANDLED; } if (status & ZYNQMP_DMA_DONE) chan->idle = true; if (status & ZYNQMP_DMA_INT_ERR) { chan->err = true; tasklet_schedule(&chan->tasklet); dev_err(chan->dev, "Channel %p has errors\n", chan); ret = IRQ_HANDLED; } if (status & ZYNQMP_DMA_INT_OVRFL) { zynqmp_dma_handle_ovfl_int(chan, status); dev_info(chan->dev, "Channel %p overflow interrupt\n", chan); ret = IRQ_HANDLED; } return ret; } /** * zynqmp_dma_do_tasklet - Schedule completion tasklet * @data: Pointer to the ZynqMP DMA channel structure */ static void zynqmp_dma_do_tasklet(unsigned long data) { struct zynqmp_dma_chan *chan = (struct zynqmp_dma_chan *)data; u32 count; spin_lock(&chan->lock); if (chan->err) { zynqmp_dma_reset(chan); chan->err = false; goto unlock; } count = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT); while (count) { zynqmp_dma_complete_descriptor(chan); zynqmp_dma_chan_desc_cleanup(chan); count--; } if (chan->idle) zynqmp_dma_start_transfer(chan); unlock: spin_unlock(&chan->lock); } /** * zynqmp_dma_device_terminate_all - Aborts all transfers on a channel * @dchan: DMA channel pointer * * Return: Always '0' */ static int zynqmp_dma_device_terminate_all(struct dma_chan *dchan) { struct zynqmp_dma_chan *chan = to_chan(dchan); spin_lock_bh(&chan->lock); writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS); zynqmp_dma_free_descriptors(chan); spin_unlock_bh(&chan->lock); return 0; } /** * zynqmp_dma_prep_memcpy - prepare descriptors for memcpy transaction * @dchan: DMA channel * @dma_dst: Destination buffer address * @dma_src: Source buffer address * @len: Transfer length * @flags: transfer ack flags * * Return: Async transaction descriptor on success and NULL on failure */ static struct dma_async_tx_descriptor *zynqmp_dma_prep_memcpy( struct dma_chan *dchan, dma_addr_t dma_dst, dma_addr_t dma_src, size_t len, ulong flags) { struct zynqmp_dma_chan *chan; struct zynqmp_dma_desc_sw *new, *first = NULL; void *desc = NULL, *prev = NULL; size_t copy; u32 desc_cnt; chan = to_chan(dchan); if (len > ZYNQMP_DMA_MAX_TRANS_LEN) return NULL; desc_cnt = DIV_ROUND_UP(len, ZYNQMP_DMA_MAX_TRANS_LEN); spin_lock_bh(&chan->lock); if (desc_cnt > chan->desc_free_cnt) { spin_unlock_bh(&chan->lock); dev_dbg(chan->dev, "chan %p descs are not available\n", chan); return NULL; } chan->desc_free_cnt = chan->desc_free_cnt - desc_cnt; spin_unlock_bh(&chan->lock); do { /* Allocate and populate the descriptor */ new = zynqmp_dma_get_descriptor(chan); copy = min_t(size_t, len, ZYNQMP_DMA_MAX_TRANS_LEN); desc = (struct zynqmp_dma_desc_ll *)new->src_v; zynqmp_dma_config_sg_ll_desc(chan, desc, dma_src, dma_dst, copy, prev); prev = desc; len -= copy; dma_src += copy; dma_dst += copy; if (!first) first = new; else list_add_tail(&new->node, &first->tx_list); } while (len); zynqmp_dma_desc_config_eod(chan, desc); async_tx_ack(&first->async_tx); first->async_tx.flags = flags; return &first->async_tx; } /** * zynqmp_dma_prep_slave_sg - prepare descriptors for a memory sg transaction * @dchan: DMA channel * @dst_sg: Destination scatter list * @dst_sg_len: Number of entries in destination scatter list * @src_sg: Source scatter list * @src_sg_len: Number of entries in source scatter list * @flags: transfer ack flags * * Return: Async transaction descriptor on success and NULL on failure */ static struct dma_async_tx_descriptor *zynqmp_dma_prep_sg( struct dma_chan *dchan, struct scatterlist *dst_sg, unsigned int dst_sg_len, struct scatterlist *src_sg, unsigned int src_sg_len, unsigned long flags) { struct zynqmp_dma_desc_sw *new, *first = NULL; struct zynqmp_dma_chan *chan = to_chan(dchan); void *desc = NULL, *prev = NULL; size_t len, dst_avail, src_avail; dma_addr_t dma_dst, dma_src; u32 desc_cnt = 0, i; struct scatterlist *sg; for_each_sg(src_sg, sg, src_sg_len, i) desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), ZYNQMP_DMA_MAX_TRANS_LEN); spin_lock_bh(&chan->lock); if (desc_cnt > chan->desc_free_cnt) { spin_unlock_bh(&chan->lock); dev_dbg(chan->dev, "chan %p descs are not available\n", chan); return NULL; } chan->desc_free_cnt = chan->desc_free_cnt - desc_cnt; spin_unlock_bh(&chan->lock); dst_avail = sg_dma_len(dst_sg); src_avail = sg_dma_len(src_sg); /* Run until we are out of scatterlist entries */ while (true) { /* Allocate and populate the descriptor */ new = zynqmp_dma_get_descriptor(chan); desc = (struct zynqmp_dma_desc_ll *)new->src_v; len = min_t(size_t, src_avail, dst_avail); len = min_t(size_t, len, ZYNQMP_DMA_MAX_TRANS_LEN); if (len == 0) goto fetch; dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail; dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail; zynqmp_dma_config_sg_ll_desc(chan, desc, dma_src, dma_dst, len, prev); prev = desc; dst_avail -= len; src_avail -= len; if (!first) first = new; else list_add_tail(&new->node, &first->tx_list); fetch: /* Fetch the next dst scatterlist entry */ if (dst_avail == 0) { if (dst_sg_len == 0) break; dst_sg = sg_next(dst_sg); if (dst_sg == NULL) break; dst_sg_len--; dst_avail = sg_dma_len(dst_sg); } /* Fetch the next src scatterlist entry */ if (src_avail == 0) { if (src_sg_len == 0) break; src_sg = sg_next(src_sg); if (src_sg == NULL) break; src_sg_len--; src_avail = sg_dma_len(src_sg); } } zynqmp_dma_desc_config_eod(chan, desc); first->async_tx.flags = flags; return &first->async_tx; } /** * zynqmp_dma_chan_remove - Channel remove function * @chan: ZynqMP DMA channel pointer */ static void zynqmp_dma_chan_remove(struct zynqmp_dma_chan *chan) { if (!chan) return; devm_free_irq(chan->zdev->dev, chan->irq, chan); tasklet_kill(&chan->tasklet); list_del(&chan->common.device_node); clk_disable_unprepare(chan->clk_apb); clk_disable_unprepare(chan->clk_main); } /** * zynqmp_dma_chan_probe - Per Channel Probing * @zdev: Driver specific device structure * @pdev: Pointer to the platform_device structure * * Return: '0' on success and failure value on error */ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev, struct platform_device *pdev) { struct zynqmp_dma_chan *chan; struct resource *res; struct device_node *node = pdev->dev.of_node; int err; chan = devm_kzalloc(zdev->dev, sizeof(*chan), GFP_KERNEL); if (!chan) return -ENOMEM; chan->dev = zdev->dev; chan->zdev = zdev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); chan->regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(chan->regs)) return PTR_ERR(chan->regs); chan->bus_width = ZYNQMP_DMA_BUS_WIDTH_64; chan->dst_burst_len = ZYNQMP_DMA_AWLEN_RST_VAL; chan->src_burst_len = ZYNQMP_DMA_ARLEN_RST_VAL; err = of_property_read_u32(node, "xlnx,bus-width", &chan->bus_width); if (err < 0) { dev_err(&pdev->dev, "missing xlnx,bus-width property\n"); return err; } if (chan->bus_width != ZYNQMP_DMA_BUS_WIDTH_64 && chan->bus_width != ZYNQMP_DMA_BUS_WIDTH_128) { dev_err(zdev->dev, "invalid bus-width value"); return -EINVAL; } chan->is_dmacoherent = of_property_read_bool(node, "dma-coherent"); zdev->chan = chan; tasklet_init(&chan->tasklet, zynqmp_dma_do_tasklet, (ulong)chan); spin_lock_init(&chan->lock); INIT_LIST_HEAD(&chan->active_list); INIT_LIST_HEAD(&chan->pending_list); INIT_LIST_HEAD(&chan->done_list); INIT_LIST_HEAD(&chan->free_list); dma_cookie_init(&chan->common); chan->common.device = &zdev->common; list_add_tail(&chan->common.device_node, &zdev->common.channels); zynqmp_dma_init(chan); chan->irq = platform_get_irq(pdev, 0); if (chan->irq < 0) return -ENXIO; err = devm_request_irq(&pdev->dev, chan->irq, zynqmp_dma_irq_handler, 0, "zynqmp-dma", chan); if (err) return err; chan->clk_main = devm_clk_get(&pdev->dev, "clk_main"); if (IS_ERR(chan->clk_main)) { dev_err(&pdev->dev, "main clock not found.\n"); return PTR_ERR(chan->clk_main); } chan->clk_apb = devm_clk_get(&pdev->dev, "clk_apb"); if (IS_ERR(chan->clk_apb)) { dev_err(&pdev->dev, "apb clock not found.\n"); return PTR_ERR(chan->clk_apb); } err = clk_prepare_enable(chan->clk_main); if (err) { dev_err(&pdev->dev, "Unable to enable main clock.\n"); return err; } err = clk_prepare_enable(chan->clk_apb); if (err) { clk_disable_unprepare(chan->clk_main); dev_err(&pdev->dev, "Unable to enable apb clock.\n"); return err; } chan->desc_size = sizeof(struct zynqmp_dma_desc_ll); chan->idle = true; return 0; } /** * of_zynqmp_dma_xlate - Translation function * @dma_spec: Pointer to DMA specifier as found in the device tree * @ofdma: Pointer to DMA controller data * * Return: DMA channel pointer on success and NULL on error */ static struct dma_chan *of_zynqmp_dma_xlate(struct of_phandle_args *dma_spec, struct of_dma *ofdma) { struct zynqmp_dma_device *zdev = ofdma->of_dma_data; return dma_get_slave_channel(&zdev->chan->common); } /** * zynqmp_dma_probe - Driver probe function * @pdev: Pointer to the platform_device structure * * Return: '0' on success and failure value on error */ static int zynqmp_dma_probe(struct platform_device *pdev) { struct zynqmp_dma_device *zdev; struct dma_device *p; int ret; zdev = devm_kzalloc(&pdev->dev, sizeof(*zdev), GFP_KERNEL); if (!zdev) return -ENOMEM; zdev->dev = &pdev->dev; INIT_LIST_HEAD(&zdev->common.channels); dma_set_mask(&pdev->dev, DMA_BIT_MASK(44)); dma_cap_set(DMA_SG, zdev->common.cap_mask); dma_cap_set(DMA_MEMCPY, zdev->common.cap_mask); p = &zdev->common; p->device_prep_dma_sg = zynqmp_dma_prep_sg; p->device_prep_dma_memcpy = zynqmp_dma_prep_memcpy; p->device_terminate_all = zynqmp_dma_device_terminate_all; p->device_issue_pending = zynqmp_dma_issue_pending; p->device_alloc_chan_resources = zynqmp_dma_alloc_chan_resources; p->device_free_chan_resources = zynqmp_dma_free_chan_resources; p->device_tx_status = dma_cookie_status; p->device_config = zynqmp_dma_device_config; p->dev = &pdev->dev; platform_set_drvdata(pdev, zdev); ret = zynqmp_dma_chan_probe(zdev, pdev); if (ret) { dev_err(&pdev->dev, "Probing channel failed\n"); goto free_chan_resources; } p->dst_addr_widths = BIT(zdev->chan->bus_width / 8); p->src_addr_widths = BIT(zdev->chan->bus_width / 8); dma_async_device_register(&zdev->common); ret = of_dma_controller_register(pdev->dev.of_node, of_zynqmp_dma_xlate, zdev); if (ret) { dev_err(&pdev->dev, "Unable to register DMA to DT\n"); dma_async_device_unregister(&zdev->common); goto free_chan_resources; } dev_info(&pdev->dev, "ZynqMP DMA driver Probe success\n"); return 0; free_chan_resources: zynqmp_dma_chan_remove(zdev->chan); return ret; } /** * zynqmp_dma_remove - Driver remove function * @pdev: Pointer to the platform_device structure * * Return: Always '0' */ static int zynqmp_dma_remove(struct platform_device *pdev) { struct zynqmp_dma_device *zdev = platform_get_drvdata(pdev); of_dma_controller_free(pdev->dev.of_node); dma_async_device_unregister(&zdev->common); zynqmp_dma_chan_remove(zdev->chan); return 0; } static const struct of_device_id zynqmp_dma_of_match[] = { { .compatible = "xlnx,zynqmp-dma-1.0", }, {} }; MODULE_DEVICE_TABLE(of, zynqmp_dma_of_match); static struct platform_driver zynqmp_dma_driver = { .driver = { .name = "xilinx-zynqmp-dma", .of_match_table = zynqmp_dma_of_match, }, .probe = zynqmp_dma_probe, .remove = zynqmp_dma_remove, }; module_platform_driver(zynqmp_dma_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Xilinx, Inc."); MODULE_DESCRIPTION("Xilinx ZynqMP DMA driver");
null
null
null
null
100,264
68,392
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
68,392
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "remoting/host/file_proxy_wrapper.h" #include <memory> #include <queue> #include "base/bind.h" #include "base/bind_helpers.h" #include "base/containers/queue.h" #include "base/files/file_util.h" #include "base/files/scoped_temp_dir.h" #include "base/run_loop.h" #include "base/test/scoped_task_environment.h" #include "net/base/io_buffer.h" #include "remoting/base/compound_buffer.h" #include "testing/gtest/include/gtest/gtest.h" namespace { constexpr char kTestFilename[] = "test-file.txt"; constexpr char kTestFilenameSecondary[] = "test-file (1).txt"; const std::string& kTestDataOne = "this is the first test string"; const std::string& kTestDataTwo = "this is the second test string"; const std::string& kTestDataThree = "this is the third test string"; std::unique_ptr<remoting::CompoundBuffer> ToBuffer(const std::string& data) { std::unique_ptr<remoting::CompoundBuffer> buffer = std::make_unique<remoting::CompoundBuffer>(); buffer->Append(new net::WrappedIOBuffer(data.data()), data.size()); return buffer; } } // namespace namespace remoting { class FileProxyWrapperLinuxTest : public testing::Test { public: FileProxyWrapperLinuxTest(); ~FileProxyWrapperLinuxTest() override; // testing::Test implementation. void SetUp() override; void TearDown() override; const base::FilePath& TestDir() const { return dir_.GetPath(); } const base::FilePath TestFilePath() const { return dir_.GetPath().Append(kTestFilename); } void StatusCallback( FileProxyWrapper::State state, base::Optional<protocol::FileTransferResponse_ErrorCode> error); void OpenFileCallback(int64_t filesize); void ReadChunkCallback(std::unique_ptr<std::vector<char>> chunk); protected: base::test::ScopedTaskEnvironment scoped_task_environment_; base::ScopedTempDir dir_; std::unique_ptr<FileProxyWrapper> file_proxy_wrapper_; base::Optional<protocol::FileTransferResponse_ErrorCode> error_; FileProxyWrapper::State final_state_; bool done_callback_succeeded_; base::queue<std::vector<char>> read_chunks_; int64_t read_filesize_; }; FileProxyWrapperLinuxTest::FileProxyWrapperLinuxTest() : scoped_task_environment_( base::test::ScopedTaskEnvironment::MainThreadType::DEFAULT, base::test::ScopedTaskEnvironment::ExecutionMode::QUEUED) {} FileProxyWrapperLinuxTest::~FileProxyWrapperLinuxTest() = default; void FileProxyWrapperLinuxTest::SetUp() { ASSERT_TRUE(dir_.CreateUniqueTempDir()); file_proxy_wrapper_ = FileProxyWrapper::Create(); file_proxy_wrapper_->Init(base::BindOnce( &FileProxyWrapperLinuxTest::StatusCallback, base::Unretained(this))); error_ = base::Optional<protocol::FileTransferResponse_ErrorCode>(); final_state_ = FileProxyWrapper::kUninitialized; done_callback_succeeded_ = false; read_chunks_ = base::queue<std::vector<char>>(); read_filesize_ = 0; } void FileProxyWrapperLinuxTest::TearDown() { file_proxy_wrapper_.reset(); } void FileProxyWrapperLinuxTest::StatusCallback( FileProxyWrapper::State state, base::Optional<protocol::FileTransferResponse_ErrorCode> error) { final_state_ = state; error_ = error; done_callback_succeeded_ = !error_.has_value(); } void FileProxyWrapperLinuxTest::OpenFileCallback(int64_t filesize) { read_filesize_ = filesize; } void FileProxyWrapperLinuxTest::ReadChunkCallback( std::unique_ptr<std::vector<char>> chunk) { read_chunks_.push(*chunk); } // Verifies that FileProxyWrapper can write three chunks to a file without // throwing any errors. TEST_F(FileProxyWrapperLinuxTest, WriteThreeChunks) { file_proxy_wrapper_->CreateFile(TestDir(), kTestFilename); file_proxy_wrapper_->WriteChunk(ToBuffer(kTestDataOne)); file_proxy_wrapper_->WriteChunk(ToBuffer(kTestDataTwo)); file_proxy_wrapper_->WriteChunk(ToBuffer(kTestDataThree)); file_proxy_wrapper_->Close(); scoped_task_environment_.RunUntilIdle(); ASSERT_FALSE(error_); ASSERT_EQ(final_state_, FileProxyWrapper::kClosed); ASSERT_TRUE(done_callback_succeeded_); std::string actual_file_data; ASSERT_TRUE(base::ReadFileToString(TestFilePath(), &actual_file_data)); ASSERT_TRUE(kTestDataOne + kTestDataTwo + kTestDataThree == actual_file_data); } // Verifies that calling Cancel() deletes any temporary or destination files. TEST_F(FileProxyWrapperLinuxTest, CancelDeletesFiles) { file_proxy_wrapper_->CreateFile(TestDir(), kTestFilename); file_proxy_wrapper_->WriteChunk(ToBuffer(kTestDataOne)); scoped_task_environment_.RunUntilIdle(); file_proxy_wrapper_->Cancel(); file_proxy_wrapper_.reset(); scoped_task_environment_.RunUntilIdle(); ASSERT_TRUE(base::IsDirectoryEmpty(TestDir())); } // Verifies that FileProxyWrapper will write to a file named "file (1).txt" when // "file.txt" already exists. TEST_F(FileProxyWrapperLinuxTest, FileAlreadyExists) { WriteFile(TestFilePath(), kTestDataOne.data(), kTestDataOne.size()); file_proxy_wrapper_->CreateFile(TestDir(), kTestFilename); file_proxy_wrapper_->WriteChunk(ToBuffer(kTestDataTwo)); file_proxy_wrapper_->Close(); scoped_task_environment_.RunUntilIdle(); std::string actual_file_data; base::FilePath secondary_filepath = TestDir().Append(kTestFilenameSecondary); ASSERT_TRUE(base::ReadFileToString(secondary_filepath, &actual_file_data)); ASSERT_STREQ(kTestDataTwo.data(), actual_file_data.data()); ASSERT_FALSE(error_); ASSERT_EQ(final_state_, FileProxyWrapper::kClosed); } // Verifies that FileProxyWrapper can read chunks from a file. TEST_F(FileProxyWrapperLinuxTest, ReadThreeChunks) { std::string test_data = kTestDataOne + kTestDataTwo + kTestDataThree; WriteFile(TestFilePath(), test_data.data(), test_data.size()); file_proxy_wrapper_->OpenFile( TestFilePath(), base::Bind(&FileProxyWrapperLinuxTest::OpenFileCallback, base::Unretained(this))); scoped_task_environment_.RunUntilIdle(); ASSERT_FALSE(error_); ASSERT_EQ(static_cast<uint64_t>(read_filesize_), test_data.size()); file_proxy_wrapper_->ReadChunk( kTestDataOne.size(), base::BindOnce(&FileProxyWrapperLinuxTest::ReadChunkCallback, base::Unretained(this))); scoped_task_environment_.RunUntilIdle(); file_proxy_wrapper_->ReadChunk( kTestDataTwo.size(), base::BindOnce(&FileProxyWrapperLinuxTest::ReadChunkCallback, base::Unretained(this))); scoped_task_environment_.RunUntilIdle(); file_proxy_wrapper_->ReadChunk( kTestDataThree.size(), base::BindOnce(&FileProxyWrapperLinuxTest::ReadChunkCallback, base::Unretained(this))); scoped_task_environment_.RunUntilIdle(); file_proxy_wrapper_->Close(); scoped_task_environment_.RunUntilIdle(); base::queue<std::vector<char>> expected_read_chunks; expected_read_chunks.push( std::vector<char>(kTestDataOne.begin(), kTestDataOne.end())); expected_read_chunks.push( std::vector<char>(kTestDataTwo.begin(), kTestDataTwo.end())); expected_read_chunks.push( std::vector<char>(kTestDataThree.begin(), kTestDataThree.end())); ASSERT_FALSE(error_); ASSERT_EQ(expected_read_chunks.size(), read_chunks_.size()); while (!expected_read_chunks.empty()) { ASSERT_EQ(expected_read_chunks.front(), read_chunks_.front()); expected_read_chunks.pop(); read_chunks_.pop(); } } // Verifies that FileProxyWrapper fails to open a file for reading if the file // doesn't exist. TEST_F(FileProxyWrapperLinuxTest, FileDoesntExist) { file_proxy_wrapper_->OpenFile( TestFilePath(), base::Bind(&FileProxyWrapperLinuxTest::OpenFileCallback, base::Unretained(this))); scoped_task_environment_.RunUntilIdle(); ASSERT_EQ(error_, protocol::FileTransferResponse_ErrorCode_FILE_IO_ERROR); } } // namespace remoting
null
null
null
null
65,255
62,634
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
62,634
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include <stddef.h> #include <memory> #include "base/macros.h" #include "chrome/browser/autocomplete/chrome_autocomplete_provider_client.h" #include "chrome/browser/autocomplete/chrome_autocomplete_scheme_classifier.h" #include "components/omnibox/browser/autocomplete_controller.h" #include "components/omnibox/browser/autocomplete_provider.h" #include "components/omnibox/browser/omnibox_client.h" #include "components/omnibox/browser/omnibox_controller.h" #include "components/omnibox/browser/test_omnibox_client.h" #include "components/sessions/core/session_id.h" #include "content/public/test/test_browser_thread_bundle.h" #include "testing/gtest/include/gtest/gtest.h" class OmniboxControllerTest : public testing::Test { protected: OmniboxControllerTest(); ~OmniboxControllerTest() override; void CreateController(); void AssertProviders(int expected_providers); const AutocompleteController::Providers& GetAutocompleteProviders() const { return omnibox_controller_->autocomplete_controller()->providers(); } private: // testing::Test: void SetUp() override; void TearDown() override; content::TestBrowserThreadBundle thread_bundle_; std::unique_ptr<TestOmniboxClient> omnibox_client_; std::unique_ptr<OmniboxController> omnibox_controller_; DISALLOW_COPY_AND_ASSIGN(OmniboxControllerTest); }; OmniboxControllerTest::OmniboxControllerTest() { } OmniboxControllerTest::~OmniboxControllerTest() { } void OmniboxControllerTest::CreateController() { DCHECK(omnibox_client_); omnibox_controller_ = std::make_unique<OmniboxController>(nullptr, omnibox_client_.get()); } // Checks that the list of autocomplete providers used by the OmniboxController // matches the one in the |expected_providers| bit field. void OmniboxControllerTest::AssertProviders(int expected_providers) { const AutocompleteController::Providers& providers = GetAutocompleteProviders(); for (size_t i = 0; i < providers.size(); ++i) { // Ensure this is a provider we wanted. int type = providers[i]->type(); ASSERT_TRUE(expected_providers & type); // Remove it from expectations so we fail if it's there twice. expected_providers &= ~type; } // Ensure we saw all the providers we expected. ASSERT_EQ(0, expected_providers); } void OmniboxControllerTest::SetUp() { omnibox_client_ = std::make_unique<TestOmniboxClient>(); } void OmniboxControllerTest::TearDown() { omnibox_controller_.reset(); omnibox_client_.reset(); } TEST_F(OmniboxControllerTest, CheckDefaultAutocompleteProviders) { CreateController(); // First collect the basic providers. int observed_providers = 0; const AutocompleteController::Providers& providers = GetAutocompleteProviders(); for (size_t i = 0; i < providers.size(); ++i) observed_providers |= providers[i]->type(); // Ensure we have at least one provider. ASSERT_NE(0, observed_providers); // Ensure instant extended includes all the provides in classic Chrome. int providers_with_instant_extended = observed_providers; // TODO(beaudoin): remove TYPE_SEARCH once it's no longer needed to pass // the Instant suggestion through via FinalizeInstantQuery. CreateController(); AssertProviders(providers_with_instant_extended); }
null
null
null
null
59,497
987
null
train_val
83ed75feba32e46f736fcce0d96a0445f29b96c2
162,831
krb5
0
https://github.com/krb5/krb5
2016-01-27 15:43:28-05:00
/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */ /* lib/krb5/ccache/cc_retr.c */ /* * Copyright 1990,1991,1999,2007,2008 by the Massachusetts Institute of Technology. * All Rights Reserved. * * Export of this software from the United States of America may * require a specific license from the United States Government. * It is the responsibility of any person or organization contemplating * export to obtain such a license before exporting. * * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and * distribute this software and its documentation for any purpose and * without fee is hereby granted, provided that the above copyright * notice appear in all copies and that both that copyright notice and * this permission notice appear in supporting documentation, and that * the name of M.I.T. not be used in advertising or publicity pertaining * to distribution of the software without specific, written prior * permission. Furthermore if you modify this software you must label * your software as modified software and not distribute it in such a * fashion that it might be confused with the original M.I.T. software. * M.I.T. makes no representations about the suitability of * this software for any purpose. It is provided "as is" without express * or implied warranty. */ #include "k5-int.h" #include "cc-int.h" #include "../krb/int-proto.h" #define KRB5_OK 0 #define set(bits) (whichfields & bits) #define flags_match(a,b) (((a) & (b)) == (a)) static int times_match_exact(const krb5_ticket_times *t1, const krb5_ticket_times *t2) { return (t1->authtime == t2->authtime && t1->starttime == t2->starttime && t1->endtime == t2->endtime && t1->renew_till == t2->renew_till); } static krb5_boolean times_match(const krb5_ticket_times *t1, const krb5_ticket_times *t2) { if (t1->renew_till) { if (t1->renew_till > t2->renew_till) return FALSE; /* this one expires too late */ } if (t1->endtime) { if (t1->endtime > t2->endtime) return FALSE; /* this one expires too late */ } /* only care about expiration on a times_match */ return TRUE; } static krb5_boolean standard_fields_match(krb5_context context, const krb5_creds *mcreds, const krb5_creds *creds) { return (krb5_principal_compare(context, mcreds->client,creds->client) && krb5_principal_compare(context, mcreds->server,creds->server)); } /* only match the server name portion, not the server realm portion */ static krb5_boolean srvname_match(krb5_context context, const krb5_creds *mcreds, const krb5_creds *creds) { krb5_boolean retval; krb5_principal_data p1, p2; retval = krb5_principal_compare(context, mcreds->client,creds->client); if (retval != TRUE) return retval; /* * Hack to ignore the server realm for the purposes of the compare. */ p1 = *mcreds->server; p2 = *creds->server; p1.realm = p2.realm; return krb5_principal_compare(context, &p1, &p2); } static krb5_boolean authdata_match(krb5_authdata *const *mdata, krb5_authdata *const *data) { const krb5_authdata *mdatap, *datap; if (mdata == data) return TRUE; if (mdata == NULL) return *data == NULL; if (data == NULL) return *mdata == NULL; while ((mdatap = *mdata) && (datap = *data)) { if ((mdatap->ad_type != datap->ad_type) || (mdatap->length != datap->length) || (memcmp ((char *)mdatap->contents, (char *)datap->contents, (unsigned) mdatap->length) != 0)) return FALSE; mdata++; data++; } return (*mdata == NULL) && (*data == NULL); } static krb5_boolean data_match(const krb5_data *data1, const krb5_data *data2) { if (!data1) { if (!data2) return TRUE; else return FALSE; } if (!data2) return FALSE; return data_eq(*data1, *data2) ? TRUE : FALSE; } static int pref (krb5_enctype my_ktype, int nktypes, krb5_enctype *ktypes) { int i; for (i = 0; i < nktypes; i++) if (my_ktype == ktypes[i]) return i; return -1; } /* * Effects: * Searches the credentials cache for a credential matching mcreds, * with the fields specified by whichfields. If one if found, it is * returned in creds, which should be freed by the caller with * krb5_free_credentials(). * * The fields are interpreted in the following way (all constants are * preceded by KRB5_TC_). MATCH_IS_SKEY requires the is_skey field to * match exactly. MATCH_TIMES requires the requested lifetime to be * at least as great as that specified; MATCH_TIMES_EXACT requires the * requested lifetime to be exactly that specified. MATCH_FLAGS * requires only the set bits in mcreds be set in creds; * MATCH_FLAGS_EXACT requires all bits to match. * * Flag SUPPORTED_KTYPES means check all matching entries that have * any supported enctype (according to tgs_enctypes) and return the one * with the enctype listed earliest. Return CC_NOT_KTYPE if a match * is found *except* for having a supported enctype. * * Errors: * system errors * permission errors * KRB5_CC_NOMEM * KRB5_CC_NOT_KTYPE */ krb5_boolean krb5int_cc_creds_match_request(krb5_context context, krb5_flags whichfields, krb5_creds *mcreds, krb5_creds *creds) { if (((set(KRB5_TC_MATCH_SRV_NAMEONLY) && srvname_match(context, mcreds, creds)) || standard_fields_match(context, mcreds, creds)) && (! set(KRB5_TC_MATCH_IS_SKEY) || mcreds->is_skey == creds->is_skey) && (! set(KRB5_TC_MATCH_FLAGS_EXACT) || mcreds->ticket_flags == creds->ticket_flags) && (! set(KRB5_TC_MATCH_FLAGS) || flags_match(mcreds->ticket_flags, creds->ticket_flags)) && (! set(KRB5_TC_MATCH_TIMES_EXACT) || times_match_exact(&mcreds->times, &creds->times)) && (! set(KRB5_TC_MATCH_TIMES) || times_match(&mcreds->times, &creds->times)) && ( ! set(KRB5_TC_MATCH_AUTHDATA) || authdata_match(mcreds->authdata, creds->authdata)) && (! set(KRB5_TC_MATCH_2ND_TKT) || data_match (&mcreds->second_ticket, &creds->second_ticket)) && ((! set(KRB5_TC_MATCH_KTYPE))|| (mcreds->keyblock.enctype == creds->keyblock.enctype))) return TRUE; return FALSE; } static krb5_error_code krb5_cc_retrieve_cred_seq (krb5_context context, krb5_ccache id, krb5_flags whichfields, krb5_creds *mcreds, krb5_creds *creds, int nktypes, krb5_enctype *ktypes) { /* This function could be considerably faster if it kept indexing */ /* information.. sounds like a "next version" idea to me. :-) */ krb5_cc_cursor cursor; krb5_error_code kret; krb5_error_code nomatch_err = KRB5_CC_NOTFOUND; struct { krb5_creds creds; int pref; } fetched, best; int have_creds = 0; krb5_flags oflags = 0; #define fetchcreds (fetched.creds) kret = krb5_cc_start_seq_get(context, id, &cursor); if (kret != KRB5_OK) return kret; while (krb5_cc_next_cred(context, id, &cursor, &fetchcreds) == KRB5_OK) { if (krb5int_cc_creds_match_request(context, whichfields, mcreds, &fetchcreds)) { if (ktypes) { fetched.pref = pref (fetchcreds.keyblock.enctype, nktypes, ktypes); if (fetched.pref < 0) nomatch_err = KRB5_CC_NOT_KTYPE; else if (!have_creds || fetched.pref < best.pref) { if (have_creds) krb5_free_cred_contents (context, &best.creds); else have_creds = 1; best = fetched; continue; } } else { krb5_cc_end_seq_get(context, id, &cursor); *creds = fetchcreds; return KRB5_OK; } } /* This one doesn't match */ krb5_free_cred_contents(context, &fetchcreds); } /* If we get here, a match wasn't found */ krb5_cc_end_seq_get(context, id, &cursor); if (have_creds) { *creds = best.creds; return KRB5_OK; } else return nomatch_err; } krb5_error_code k5_cc_retrieve_cred_default(krb5_context context, krb5_ccache id, krb5_flags flags, krb5_creds *mcreds, krb5_creds *creds) { krb5_enctype *ktypes; int nktypes; krb5_error_code ret; if (flags & KRB5_TC_SUPPORTED_KTYPES) { ret = krb5_get_tgs_ktypes (context, mcreds->server, &ktypes); if (ret) return ret; nktypes = k5_count_etypes (ktypes); ret = krb5_cc_retrieve_cred_seq (context, id, flags, mcreds, creds, nktypes, ktypes); free (ktypes); return ret; } else { return krb5_cc_retrieve_cred_seq (context, id, flags, mcreds, creds, 0, 0); } }
null
null
null
null
74,139
9,026
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
9,026
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "net/http2/decoder/payload_decoders/headers_payload_decoder.h" #include <stddef.h> #include "base/logging.h" #include "base/macros.h" #include "net/http2/decoder/decode_buffer.h" #include "net/http2/decoder/http2_frame_decoder_listener.h" #include "net/http2/http2_constants.h" #include "net/http2/http2_structures.h" #include "net/http2/tools/http2_bug_tracker.h" namespace net { std::ostream& operator<<(std::ostream& out, HeadersPayloadDecoder::PayloadState v) { switch (v) { case HeadersPayloadDecoder::PayloadState::kReadPadLength: return out << "kReadPadLength"; case HeadersPayloadDecoder::PayloadState::kStartDecodingPriorityFields: return out << "kStartDecodingPriorityFields"; case HeadersPayloadDecoder::PayloadState::kResumeDecodingPriorityFields: return out << "kResumeDecodingPriorityFields"; case HeadersPayloadDecoder::PayloadState::kReadPayload: return out << "kReadPayload"; case HeadersPayloadDecoder::PayloadState::kSkipPadding: return out << "kSkipPadding"; } // Since the value doesn't come over the wire, only a programming bug should // result in reaching this point. int unknown = static_cast<int>(v); HTTP2_BUG << "Invalid HeadersPayloadDecoder::PayloadState: " << unknown; return out << "HeadersPayloadDecoder::PayloadState(" << unknown << ")"; } DecodeStatus HeadersPayloadDecoder::StartDecodingPayload( FrameDecoderState* state, DecodeBuffer* db) { const Http2FrameHeader& frame_header = state->frame_header(); const uint32_t total_length = frame_header.payload_length; DVLOG(2) << "HeadersPayloadDecoder::StartDecodingPayload: " << frame_header; DCHECK_EQ(Http2FrameType::HEADERS, frame_header.type); DCHECK_LE(db->Remaining(), total_length); DCHECK_EQ(0, frame_header.flags & ~(Http2FrameFlag::END_STREAM | Http2FrameFlag::END_HEADERS | Http2FrameFlag::PADDED | Http2FrameFlag::PRIORITY)); // Special case for HEADERS frames that contain only the HPACK block // (fragment or whole) and that fit fully into the decode buffer. // Why? Unencoded browser GET requests are typically under 1K and HPACK // commonly shrinks request headers by 80%, so we can expect this to // be common. // TODO(jamessynge) Add counters here and to Spdy for determining how // common this situation is. A possible approach is to create a // Http2FrameDecoderListener that counts the callbacks and then forwards // them on to another listener, which makes it easy to add and remove // counting on a connection or even frame basis. // PADDED and PRIORITY both extra steps to decode, but if neither flag is // set then we can decode faster. const auto payload_flags = Http2FrameFlag::PADDED | Http2FrameFlag::PRIORITY; if (!frame_header.HasAnyFlags(payload_flags)) { DVLOG(2) << "StartDecodingPayload !IsPadded && !HasPriority"; if (db->Remaining() == total_length) { DVLOG(2) << "StartDecodingPayload all present"; // Note that we don't cache the listener field so that the callee can // replace it if the frame is bad. // If this case is common enough, consider combining the 3 callbacks // into one, especially if END_HEADERS is also set. state->listener()->OnHeadersStart(frame_header); if (total_length > 0) { state->listener()->OnHpackFragment(db->cursor(), total_length); db->AdvanceCursor(total_length); } state->listener()->OnHeadersEnd(); return DecodeStatus::kDecodeDone; } payload_state_ = PayloadState::kReadPayload; } else if (frame_header.IsPadded()) { payload_state_ = PayloadState::kReadPadLength; } else { DCHECK(frame_header.HasPriority()) << frame_header; payload_state_ = PayloadState::kStartDecodingPriorityFields; } state->InitializeRemainders(); state->listener()->OnHeadersStart(frame_header); return ResumeDecodingPayload(state, db); } DecodeStatus HeadersPayloadDecoder::ResumeDecodingPayload( FrameDecoderState* state, DecodeBuffer* db) { DVLOG(2) << "HeadersPayloadDecoder::ResumeDecodingPayload " << "remaining_payload=" << state->remaining_payload() << "; db->Remaining=" << db->Remaining(); const Http2FrameHeader& frame_header = state->frame_header(); DCHECK_EQ(Http2FrameType::HEADERS, frame_header.type); DCHECK_LE(state->remaining_payload_and_padding(), frame_header.payload_length); DCHECK_LE(db->Remaining(), state->remaining_payload_and_padding()); DecodeStatus status; size_t avail; while (true) { DVLOG(2) << "HeadersPayloadDecoder::ResumeDecodingPayload payload_state_=" << payload_state_; switch (payload_state_) { case PayloadState::kReadPadLength: // ReadPadLength handles the OnPadLength callback, and updating the // remaining_payload and remaining_padding fields. If the amount of // padding is too large to fit in the frame's payload, ReadPadLength // instead calls OnPaddingTooLong and returns kDecodeError. status = state->ReadPadLength(db, /*report_pad_length*/ true); if (status != DecodeStatus::kDecodeDone) { return status; } if (!frame_header.HasPriority()) { payload_state_ = PayloadState::kReadPayload; continue; } FALLTHROUGH; case PayloadState::kStartDecodingPriorityFields: status = state->StartDecodingStructureInPayload(&priority_fields_, db); if (status != DecodeStatus::kDecodeDone) { payload_state_ = PayloadState::kResumeDecodingPriorityFields; return status; } state->listener()->OnHeadersPriority(priority_fields_); FALLTHROUGH; case PayloadState::kReadPayload: avail = state->AvailablePayload(db); if (avail > 0) { state->listener()->OnHpackFragment(db->cursor(), avail); db->AdvanceCursor(avail); state->ConsumePayload(avail); } if (state->remaining_payload() > 0) { payload_state_ = PayloadState::kReadPayload; return DecodeStatus::kDecodeInProgress; } FALLTHROUGH; case PayloadState::kSkipPadding: // SkipPadding handles the OnPadding callback. if (state->SkipPadding(db)) { state->listener()->OnHeadersEnd(); return DecodeStatus::kDecodeDone; } payload_state_ = PayloadState::kSkipPadding; return DecodeStatus::kDecodeInProgress; case PayloadState::kResumeDecodingPriorityFields: status = state->ResumeDecodingStructureInPayload(&priority_fields_, db); if (status != DecodeStatus::kDecodeDone) { return status; } state->listener()->OnHeadersPriority(priority_fields_); payload_state_ = PayloadState::kReadPayload; continue; } HTTP2_BUG << "PayloadState: " << payload_state_; } } } // namespace net
null
null
null
null
5,889
18,906
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
183,901
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. * Copyright (c) 2014- QLogic Corporation. * All rights reserved * www.qlogic.com * * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as * published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ /* * bfa_attr.c Linux driver configuration interface module. */ #include "bfad_drv.h" #include "bfad_im.h" /* * FC transport template entry, get SCSI target port ID. */ static void bfad_im_get_starget_port_id(struct scsi_target *starget) { struct Scsi_Host *shost; struct bfad_im_port_s *im_port; struct bfad_s *bfad; struct bfad_itnim_s *itnim = NULL; u32 fc_id = -1; unsigned long flags; shost = dev_to_shost(starget->dev.parent); im_port = (struct bfad_im_port_s *) shost->hostdata[0]; bfad = im_port->bfad; spin_lock_irqsave(&bfad->bfad_lock, flags); itnim = bfad_get_itnim(im_port, starget->id); if (itnim) fc_id = bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim); fc_starget_port_id(starget) = fc_id; spin_unlock_irqrestore(&bfad->bfad_lock, flags); } /* * FC transport template entry, get SCSI target nwwn. */ static void bfad_im_get_starget_node_name(struct scsi_target *starget) { struct Scsi_Host *shost; struct bfad_im_port_s *im_port; struct bfad_s *bfad; struct bfad_itnim_s *itnim = NULL; u64 node_name = 0; unsigned long flags; shost = dev_to_shost(starget->dev.parent); im_port = (struct bfad_im_port_s *) shost->hostdata[0]; bfad = im_port->bfad; spin_lock_irqsave(&bfad->bfad_lock, flags); itnim = bfad_get_itnim(im_port, starget->id); if (itnim) node_name = bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim); fc_starget_node_name(starget) = cpu_to_be64(node_name); spin_unlock_irqrestore(&bfad->bfad_lock, flags); } /* * FC transport template entry, get SCSI target pwwn. */ static void bfad_im_get_starget_port_name(struct scsi_target *starget) { struct Scsi_Host *shost; struct bfad_im_port_s *im_port; struct bfad_s *bfad; struct bfad_itnim_s *itnim = NULL; u64 port_name = 0; unsigned long flags; shost = dev_to_shost(starget->dev.parent); im_port = (struct bfad_im_port_s *) shost->hostdata[0]; bfad = im_port->bfad; spin_lock_irqsave(&bfad->bfad_lock, flags); itnim = bfad_get_itnim(im_port, starget->id); if (itnim) port_name = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim); fc_starget_port_name(starget) = cpu_to_be64(port_name); spin_unlock_irqrestore(&bfad->bfad_lock, flags); } /* * FC transport template entry, get SCSI host port ID. */ static void bfad_im_get_host_port_id(struct Scsi_Host *shost) { struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_port_s *port = im_port->port; fc_host_port_id(shost) = bfa_hton3b(bfa_fcs_lport_get_fcid(port->fcs_port)); } /* * FC transport template entry, get SCSI host port type. */ static void bfad_im_get_host_port_type(struct Scsi_Host *shost) { struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; struct bfa_lport_attr_s port_attr; bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr); switch (port_attr.port_type) { case BFA_PORT_TYPE_NPORT: fc_host_port_type(shost) = FC_PORTTYPE_NPORT; break; case BFA_PORT_TYPE_NLPORT: fc_host_port_type(shost) = FC_PORTTYPE_NLPORT; break; case BFA_PORT_TYPE_P2P: fc_host_port_type(shost) = FC_PORTTYPE_PTP; break; case BFA_PORT_TYPE_LPORT: fc_host_port_type(shost) = FC_PORTTYPE_LPORT; break; default: fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN; break; } } /* * FC transport template entry, get SCSI host port state. */ static void bfad_im_get_host_port_state(struct Scsi_Host *shost) { struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; struct bfa_port_attr_s attr; bfa_fcport_get_attr(&bfad->bfa, &attr); switch (attr.port_state) { case BFA_PORT_ST_LINKDOWN: fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; break; case BFA_PORT_ST_LINKUP: fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; break; case BFA_PORT_ST_DISABLED: case BFA_PORT_ST_STOPPED: case BFA_PORT_ST_IOCDOWN: case BFA_PORT_ST_IOCDIS: fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; break; case BFA_PORT_ST_UNINIT: case BFA_PORT_ST_ENABLING_QWAIT: case BFA_PORT_ST_ENABLING: case BFA_PORT_ST_DISABLING_QWAIT: case BFA_PORT_ST_DISABLING: default: fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN; break; } } /* * FC transport template entry, get SCSI host active fc4s. */ static void bfad_im_get_host_active_fc4s(struct Scsi_Host *shost) { struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_port_s *port = im_port->port; memset(fc_host_active_fc4s(shost), 0, sizeof(fc_host_active_fc4s(shost))); if (port->supported_fc4s & BFA_LPORT_ROLE_FCP_IM) fc_host_active_fc4s(shost)[2] = 1; fc_host_active_fc4s(shost)[7] = 1; } /* * FC transport template entry, get SCSI host link speed. */ static void bfad_im_get_host_speed(struct Scsi_Host *shost) { struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; struct bfa_port_attr_s attr; bfa_fcport_get_attr(&bfad->bfa, &attr); switch (attr.speed) { case BFA_PORT_SPEED_10GBPS: fc_host_speed(shost) = FC_PORTSPEED_10GBIT; break; case BFA_PORT_SPEED_16GBPS: fc_host_speed(shost) = FC_PORTSPEED_16GBIT; break; case BFA_PORT_SPEED_8GBPS: fc_host_speed(shost) = FC_PORTSPEED_8GBIT; break; case BFA_PORT_SPEED_4GBPS: fc_host_speed(shost) = FC_PORTSPEED_4GBIT; break; case BFA_PORT_SPEED_2GBPS: fc_host_speed(shost) = FC_PORTSPEED_2GBIT; break; case BFA_PORT_SPEED_1GBPS: fc_host_speed(shost) = FC_PORTSPEED_1GBIT; break; default: fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; break; } } /* * FC transport template entry, get SCSI host port type. */ static void bfad_im_get_host_fabric_name(struct Scsi_Host *shost) { struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_port_s *port = im_port->port; wwn_t fabric_nwwn = 0; fabric_nwwn = bfa_fcs_lport_get_fabric_name(port->fcs_port); fc_host_fabric_name(shost) = cpu_to_be64(fabric_nwwn); } /* * FC transport template entry, get BFAD statistics. */ static struct fc_host_statistics * bfad_im_get_stats(struct Scsi_Host *shost) { struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; struct bfad_hal_comp fcomp; union bfa_port_stats_u *fcstats; struct fc_host_statistics *hstats; bfa_status_t rc; unsigned long flags; fcstats = kzalloc(sizeof(union bfa_port_stats_u), GFP_KERNEL); if (fcstats == NULL) return NULL; hstats = &bfad->link_stats; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); memset(hstats, 0, sizeof(struct fc_host_statistics)); rc = bfa_port_get_stats(BFA_FCPORT(&bfad->bfa), fcstats, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (rc != BFA_STATUS_OK) return NULL; wait_for_completion(&fcomp.comp); /* Fill the fc_host_statistics structure */ hstats->seconds_since_last_reset = fcstats->fc.secs_reset; hstats->tx_frames = fcstats->fc.tx_frames; hstats->tx_words = fcstats->fc.tx_words; hstats->rx_frames = fcstats->fc.rx_frames; hstats->rx_words = fcstats->fc.rx_words; hstats->lip_count = fcstats->fc.lip_count; hstats->nos_count = fcstats->fc.nos_count; hstats->error_frames = fcstats->fc.error_frames; hstats->dumped_frames = fcstats->fc.dropped_frames; hstats->link_failure_count = fcstats->fc.link_failures; hstats->loss_of_sync_count = fcstats->fc.loss_of_syncs; hstats->loss_of_signal_count = fcstats->fc.loss_of_signals; hstats->prim_seq_protocol_err_count = fcstats->fc.primseq_errs; hstats->invalid_crc_count = fcstats->fc.invalid_crcs; kfree(fcstats); return hstats; } /* * FC transport template entry, reset BFAD statistics. */ static void bfad_im_reset_stats(struct Scsi_Host *shost) { struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; struct bfad_hal_comp fcomp; unsigned long flags; bfa_status_t rc; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); rc = bfa_port_clear_stats(BFA_FCPORT(&bfad->bfa), bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (rc != BFA_STATUS_OK) return; wait_for_completion(&fcomp.comp); return; } /* * FC transport template entry, set rport loss timeout. * Update dev_loss_tmo based on the value pushed down by the stack * In case it is lesser than path_tov of driver, set it to path_tov + 1 * to ensure that the driver times out before the application */ static void bfad_im_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout) { struct bfad_itnim_data_s *itnim_data = rport->dd_data; struct bfad_itnim_s *itnim = itnim_data->itnim; struct bfad_s *bfad = itnim->im->bfad; uint16_t path_tov = bfa_fcpim_path_tov_get(&bfad->bfa); rport->dev_loss_tmo = timeout; if (timeout < path_tov) rport->dev_loss_tmo = path_tov + 1; } static int bfad_im_vport_create(struct fc_vport *fc_vport, bool disable) { char *vname = fc_vport->symbolic_name; struct Scsi_Host *shost = fc_vport->shost; struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; struct bfa_lport_cfg_s port_cfg; struct bfad_vport_s *vp; int status = 0, rc; unsigned long flags; memset(&port_cfg, 0, sizeof(port_cfg)); u64_to_wwn(fc_vport->node_name, (u8 *)&port_cfg.nwwn); u64_to_wwn(fc_vport->port_name, (u8 *)&port_cfg.pwwn); if (strlen(vname) > 0) strcpy((char *)&port_cfg.sym_name, vname); port_cfg.roles = BFA_LPORT_ROLE_FCP_IM; spin_lock_irqsave(&bfad->bfad_lock, flags); list_for_each_entry(vp, &bfad->pbc_vport_list, list_entry) { if (port_cfg.pwwn == vp->fcs_vport.lport.port_cfg.pwwn) { port_cfg.preboot_vp = vp->fcs_vport.lport.port_cfg.preboot_vp; break; } } spin_unlock_irqrestore(&bfad->bfad_lock, flags); rc = bfad_vport_create(bfad, 0, &port_cfg, &fc_vport->dev); if (rc == BFA_STATUS_OK) { struct bfad_vport_s *vport; struct bfa_fcs_vport_s *fcs_vport; struct Scsi_Host *vshost; spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, port_cfg.pwwn); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (fcs_vport == NULL) return VPCERR_BAD_WWN; fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE); if (disable) { spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_fcs_vport_stop(fcs_vport); spin_unlock_irqrestore(&bfad->bfad_lock, flags); fc_vport_set_state(fc_vport, FC_VPORT_DISABLED); } vport = fcs_vport->vport_drv; vshost = vport->drv_port.im_port->shost; fc_host_node_name(vshost) = wwn_to_u64((u8 *)&port_cfg.nwwn); fc_host_port_name(vshost) = wwn_to_u64((u8 *)&port_cfg.pwwn); fc_host_supported_classes(vshost) = FC_COS_CLASS3; memset(fc_host_supported_fc4s(vshost), 0, sizeof(fc_host_supported_fc4s(vshost))); /* For FCP type 0x08 */ if (supported_fc4s & BFA_LPORT_ROLE_FCP_IM) fc_host_supported_fc4s(vshost)[2] = 1; /* For fibre channel services type 0x20 */ fc_host_supported_fc4s(vshost)[7] = 1; fc_host_supported_speeds(vshost) = bfad_im_supported_speeds(&bfad->bfa); fc_host_maxframe_size(vshost) = bfa_fcport_get_maxfrsize(&bfad->bfa); fc_vport->dd_data = vport; vport->drv_port.im_port->fc_vport = fc_vport; } else if (rc == BFA_STATUS_INVALID_WWN) return VPCERR_BAD_WWN; else if (rc == BFA_STATUS_VPORT_EXISTS) return VPCERR_BAD_WWN; else if (rc == BFA_STATUS_VPORT_MAX) return VPCERR_NO_FABRIC_SUPP; else if (rc == BFA_STATUS_VPORT_WWN_BP) return VPCERR_BAD_WWN; else return FC_VPORT_FAILED; return status; } int bfad_im_issue_fc_host_lip(struct Scsi_Host *shost) { struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; struct bfad_hal_comp fcomp; unsigned long flags; uint32_t status; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); status = bfa_port_disable(&bfad->bfa.modules.port, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (status != BFA_STATUS_OK) return -EIO; wait_for_completion(&fcomp.comp); if (fcomp.status != BFA_STATUS_OK) return -EIO; spin_lock_irqsave(&bfad->bfad_lock, flags); status = bfa_port_enable(&bfad->bfa.modules.port, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (status != BFA_STATUS_OK) return -EIO; wait_for_completion(&fcomp.comp); if (fcomp.status != BFA_STATUS_OK) return -EIO; return 0; } static int bfad_im_vport_delete(struct fc_vport *fc_vport) { struct bfad_vport_s *vport = (struct bfad_vport_s *)fc_vport->dd_data; struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) vport->drv_port.im_port; struct bfad_s *bfad = im_port->bfad; struct bfad_port_s *port; struct bfa_fcs_vport_s *fcs_vport; struct Scsi_Host *vshost; wwn_t pwwn; int rc; unsigned long flags; struct completion fcomp; if (im_port->flags & BFAD_PORT_DELETE) { bfad_scsi_host_free(bfad, im_port); list_del(&vport->list_entry); kfree(vport); return 0; } port = im_port->port; vshost = vport->drv_port.im_port->shost; u64_to_wwn(fc_host_port_name(vshost), (u8 *)&pwwn); spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, pwwn); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (fcs_vport == NULL) return VPCERR_BAD_WWN; vport->drv_port.flags |= BFAD_PORT_DELETE; vport->comp_del = &fcomp; init_completion(vport->comp_del); spin_lock_irqsave(&bfad->bfad_lock, flags); rc = bfa_fcs_vport_delete(&vport->fcs_vport); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (rc == BFA_STATUS_PBC) { vport->drv_port.flags &= ~BFAD_PORT_DELETE; vport->comp_del = NULL; return -1; } wait_for_completion(vport->comp_del); bfad_scsi_host_free(bfad, im_port); list_del(&vport->list_entry); kfree(vport); return 0; } static int bfad_im_vport_disable(struct fc_vport *fc_vport, bool disable) { struct bfad_vport_s *vport; struct bfad_s *bfad; struct bfa_fcs_vport_s *fcs_vport; struct Scsi_Host *vshost; wwn_t pwwn; unsigned long flags; vport = (struct bfad_vport_s *)fc_vport->dd_data; bfad = vport->drv_port.bfad; vshost = vport->drv_port.im_port->shost; u64_to_wwn(fc_host_port_name(vshost), (u8 *)&pwwn); spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, pwwn); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (fcs_vport == NULL) return VPCERR_BAD_WWN; if (disable) { bfa_fcs_vport_stop(fcs_vport); fc_vport_set_state(fc_vport, FC_VPORT_DISABLED); } else { bfa_fcs_vport_start(fcs_vport); fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE); } return 0; } void bfad_im_vport_set_symbolic_name(struct fc_vport *fc_vport) { struct bfad_vport_s *vport = (struct bfad_vport_s *)fc_vport->dd_data; struct bfad_im_port_s *im_port = (struct bfad_im_port_s *)vport->drv_port.im_port; struct bfad_s *bfad = im_port->bfad; struct Scsi_Host *vshost = vport->drv_port.im_port->shost; char *sym_name = fc_vport->symbolic_name; struct bfa_fcs_vport_s *fcs_vport; wwn_t pwwn; unsigned long flags; u64_to_wwn(fc_host_port_name(vshost), (u8 *)&pwwn); spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, pwwn); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (fcs_vport == NULL) return; spin_lock_irqsave(&bfad->bfad_lock, flags); if (strlen(sym_name) > 0) bfa_fcs_lport_set_symname(&fcs_vport->lport, sym_name); spin_unlock_irqrestore(&bfad->bfad_lock, flags); } struct fc_function_template bfad_im_fc_function_template = { /* Target dynamic attributes */ .get_starget_port_id = bfad_im_get_starget_port_id, .show_starget_port_id = 1, .get_starget_node_name = bfad_im_get_starget_node_name, .show_starget_node_name = 1, .get_starget_port_name = bfad_im_get_starget_port_name, .show_starget_port_name = 1, /* Host dynamic attribute */ .get_host_port_id = bfad_im_get_host_port_id, .show_host_port_id = 1, /* Host fixed attributes */ .show_host_node_name = 1, .show_host_port_name = 1, .show_host_supported_classes = 1, .show_host_supported_fc4s = 1, .show_host_supported_speeds = 1, .show_host_maxframe_size = 1, /* More host dynamic attributes */ .show_host_port_type = 1, .get_host_port_type = bfad_im_get_host_port_type, .show_host_port_state = 1, .get_host_port_state = bfad_im_get_host_port_state, .show_host_active_fc4s = 1, .get_host_active_fc4s = bfad_im_get_host_active_fc4s, .show_host_speed = 1, .get_host_speed = bfad_im_get_host_speed, .show_host_fabric_name = 1, .get_host_fabric_name = bfad_im_get_host_fabric_name, .show_host_symbolic_name = 1, /* Statistics */ .get_fc_host_stats = bfad_im_get_stats, .reset_fc_host_stats = bfad_im_reset_stats, /* Allocation length for host specific data */ .dd_fcrport_size = sizeof(struct bfad_itnim_data_s *), /* Remote port fixed attributes */ .show_rport_maxframe_size = 1, .show_rport_supported_classes = 1, .show_rport_dev_loss_tmo = 1, .set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo, .issue_fc_host_lip = bfad_im_issue_fc_host_lip, .vport_create = bfad_im_vport_create, .vport_delete = bfad_im_vport_delete, .vport_disable = bfad_im_vport_disable, .set_vport_symbolic_name = bfad_im_vport_set_symbolic_name, .bsg_request = bfad_im_bsg_request, .bsg_timeout = bfad_im_bsg_timeout, }; struct fc_function_template bfad_im_vport_fc_function_template = { /* Target dynamic attributes */ .get_starget_port_id = bfad_im_get_starget_port_id, .show_starget_port_id = 1, .get_starget_node_name = bfad_im_get_starget_node_name, .show_starget_node_name = 1, .get_starget_port_name = bfad_im_get_starget_port_name, .show_starget_port_name = 1, /* Host dynamic attribute */ .get_host_port_id = bfad_im_get_host_port_id, .show_host_port_id = 1, /* Host fixed attributes */ .show_host_node_name = 1, .show_host_port_name = 1, .show_host_supported_classes = 1, .show_host_supported_fc4s = 1, .show_host_supported_speeds = 1, .show_host_maxframe_size = 1, /* More host dynamic attributes */ .show_host_port_type = 1, .get_host_port_type = bfad_im_get_host_port_type, .show_host_port_state = 1, .get_host_port_state = bfad_im_get_host_port_state, .show_host_active_fc4s = 1, .get_host_active_fc4s = bfad_im_get_host_active_fc4s, .show_host_speed = 1, .get_host_speed = bfad_im_get_host_speed, .show_host_fabric_name = 1, .get_host_fabric_name = bfad_im_get_host_fabric_name, .show_host_symbolic_name = 1, /* Statistics */ .get_fc_host_stats = bfad_im_get_stats, .reset_fc_host_stats = bfad_im_reset_stats, /* Allocation length for host specific data */ .dd_fcrport_size = sizeof(struct bfad_itnim_data_s *), /* Remote port fixed attributes */ .show_rport_maxframe_size = 1, .show_rport_supported_classes = 1, .show_rport_dev_loss_tmo = 1, .set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo, }; /* * Scsi_Host_attrs SCSI host attributes */ static ssize_t bfad_im_serial_num_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN]; bfa_get_adapter_serial_num(&bfad->bfa, serial_num); return snprintf(buf, PAGE_SIZE, "%s\n", serial_num); } static ssize_t bfad_im_model_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; char model[BFA_ADAPTER_MODEL_NAME_LEN]; bfa_get_adapter_model(&bfad->bfa, model); return snprintf(buf, PAGE_SIZE, "%s\n", model); } static ssize_t bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; char model[BFA_ADAPTER_MODEL_NAME_LEN]; char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN]; int nports = 0; bfa_get_adapter_model(&bfad->bfa, model); nports = bfa_get_nports(&bfad->bfa); if (!strcmp(model, "QLogic-425")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 4Gbps PCIe dual port FC HBA"); else if (!strcmp(model, "QLogic-825")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 8Gbps PCIe dual port FC HBA"); else if (!strcmp(model, "QLogic-42B")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 4Gbps PCIe dual port FC HBA for HP"); else if (!strcmp(model, "QLogic-82B")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 8Gbps PCIe dual port FC HBA for HP"); else if (!strcmp(model, "QLogic-1010")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 10Gbps single port CNA"); else if (!strcmp(model, "QLogic-1020")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 10Gbps dual port CNA"); else if (!strcmp(model, "QLogic-1007")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 10Gbps CNA for IBM Blade Center"); else if (!strcmp(model, "QLogic-415")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 4Gbps PCIe single port FC HBA"); else if (!strcmp(model, "QLogic-815")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 8Gbps PCIe single port FC HBA"); else if (!strcmp(model, "QLogic-41B")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 4Gbps PCIe single port FC HBA for HP"); else if (!strcmp(model, "QLogic-81B")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 8Gbps PCIe single port FC HBA for HP"); else if (!strcmp(model, "QLogic-804")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 8Gbps FC HBA for HP Bladesystem C-class"); else if (!strcmp(model, "QLogic-1741")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 10Gbps CNA for Dell M-Series Blade Servers"); else if (strstr(model, "QLogic-1860")) { if (nports == 1 && bfa_ioc_is_cna(&bfad->bfa.ioc)) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 10Gbps single port CNA"); else if (nports == 1 && !bfa_ioc_is_cna(&bfad->bfa.ioc)) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 16Gbps PCIe single port FC HBA"); else if (nports == 2 && bfa_ioc_is_cna(&bfad->bfa.ioc)) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 10Gbps dual port CNA"); else if (nports == 2 && !bfa_ioc_is_cna(&bfad->bfa.ioc)) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 16Gbps PCIe dual port FC HBA"); } else if (!strcmp(model, "QLogic-1867")) { if (nports == 1 && !bfa_ioc_is_cna(&bfad->bfa.ioc)) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 16Gbps PCIe single port FC HBA for IBM"); else if (nports == 2 && !bfa_ioc_is_cna(&bfad->bfa.ioc)) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 16Gbps PCIe dual port FC HBA for IBM"); } else snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "Invalid Model"); return snprintf(buf, PAGE_SIZE, "%s\n", model_descr); } static ssize_t bfad_im_node_name_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_port_s *port = im_port->port; u64 nwwn; nwwn = bfa_fcs_lport_get_nwwn(port->fcs_port); return snprintf(buf, PAGE_SIZE, "0x%llx\n", cpu_to_be64(nwwn)); } static ssize_t bfad_im_symbolic_name_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; struct bfa_lport_attr_s port_attr; char symname[BFA_SYMNAME_MAXLEN]; bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr); strncpy(symname, port_attr.port_cfg.sym_name.symname, BFA_SYMNAME_MAXLEN); return snprintf(buf, PAGE_SIZE, "%s\n", symname); } static ssize_t bfad_im_hw_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; char hw_ver[BFA_VERSION_LEN]; bfa_get_pci_chip_rev(&bfad->bfa, hw_ver); return snprintf(buf, PAGE_SIZE, "%s\n", hw_ver); } static ssize_t bfad_im_drv_version_show(struct device *dev, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%s\n", BFAD_DRIVER_VERSION); } static ssize_t bfad_im_optionrom_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; char optrom_ver[BFA_VERSION_LEN]; bfa_get_adapter_optrom_ver(&bfad->bfa, optrom_ver); return snprintf(buf, PAGE_SIZE, "%s\n", optrom_ver); } static ssize_t bfad_im_fw_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; char fw_ver[BFA_VERSION_LEN]; bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver); return snprintf(buf, PAGE_SIZE, "%s\n", fw_ver); } static ssize_t bfad_im_num_of_ports_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; return snprintf(buf, PAGE_SIZE, "%d\n", bfa_get_nports(&bfad->bfa)); } static ssize_t bfad_im_drv_name_show(struct device *dev, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%s\n", BFAD_DRIVER_NAME); } static ssize_t bfad_im_num_of_discovered_ports_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_port_s *port = im_port->port; struct bfad_s *bfad = im_port->bfad; int nrports = 2048; struct bfa_rport_qualifier_s *rports = NULL; unsigned long flags; rports = kzalloc(sizeof(struct bfa_rport_qualifier_s) * nrports, GFP_ATOMIC); if (rports == NULL) return snprintf(buf, PAGE_SIZE, "Failed\n"); spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_fcs_lport_get_rport_quals(port->fcs_port, rports, &nrports); spin_unlock_irqrestore(&bfad->bfad_lock, flags); kfree(rports); return snprintf(buf, PAGE_SIZE, "%d\n", nrports); } static DEVICE_ATTR(serial_number, S_IRUGO, bfad_im_serial_num_show, NULL); static DEVICE_ATTR(model, S_IRUGO, bfad_im_model_show, NULL); static DEVICE_ATTR(model_description, S_IRUGO, bfad_im_model_desc_show, NULL); static DEVICE_ATTR(node_name, S_IRUGO, bfad_im_node_name_show, NULL); static DEVICE_ATTR(symbolic_name, S_IRUGO, bfad_im_symbolic_name_show, NULL); static DEVICE_ATTR(hardware_version, S_IRUGO, bfad_im_hw_version_show, NULL); static DEVICE_ATTR(driver_version, S_IRUGO, bfad_im_drv_version_show, NULL); static DEVICE_ATTR(option_rom_version, S_IRUGO, bfad_im_optionrom_version_show, NULL); static DEVICE_ATTR(firmware_version, S_IRUGO, bfad_im_fw_version_show, NULL); static DEVICE_ATTR(number_of_ports, S_IRUGO, bfad_im_num_of_ports_show, NULL); static DEVICE_ATTR(driver_name, S_IRUGO, bfad_im_drv_name_show, NULL); static DEVICE_ATTR(number_of_discovered_ports, S_IRUGO, bfad_im_num_of_discovered_ports_show, NULL); struct device_attribute *bfad_im_host_attrs[] = { &dev_attr_serial_number, &dev_attr_model, &dev_attr_model_description, &dev_attr_node_name, &dev_attr_symbolic_name, &dev_attr_hardware_version, &dev_attr_driver_version, &dev_attr_option_rom_version, &dev_attr_firmware_version, &dev_attr_number_of_ports, &dev_attr_driver_name, &dev_attr_number_of_discovered_ports, NULL, }; struct device_attribute *bfad_im_vport_attrs[] = { &dev_attr_serial_number, &dev_attr_model, &dev_attr_model_description, &dev_attr_node_name, &dev_attr_symbolic_name, &dev_attr_hardware_version, &dev_attr_driver_version, &dev_attr_option_rom_version, &dev_attr_firmware_version, &dev_attr_number_of_ports, &dev_attr_driver_name, &dev_attr_number_of_discovered_ports, NULL, };
null
null
null
null
92,248
60,026
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
60,026
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_OFFLINE_PAGES_OFFLINE_PAGE_TAB_HELPER_H_ #define CHROME_BROWSER_OFFLINE_PAGES_OFFLINE_PAGE_TAB_HELPER_H_ #include "base/macros.h" #include "base/memory/weak_ptr.h" #include "base/time/time.h" #include "chrome/browser/offline_pages/offline_page_utils.h" #include "chrome/common/mhtml_page_notifier.mojom.h" #include "components/offline_pages/core/request_header/offline_page_header.h" #include "content/public/browser/web_contents_binding_set.h" #include "content/public/browser/web_contents_observer.h" #include "content/public/browser/web_contents_user_data.h" #include "services/service_manager/public/cpp/binder_registry.h" #include "url/gurl.h" namespace content { class WebContents; } namespace offline_pages { struct OfflinePageItem; class PrefetchService; // This enum is used for UMA reporting. It contains all possible trusted states // of the offline page. // NOTE: because this is used for UMA reporting, these values should not be // changed or reused; new values should be ended immediately before the MAX // value. Make sure to update the histogram enum (OfflinePageTrustedState in // enums.xml) accordingly. enum class OfflinePageTrustedState { // Trusted because the archive file is in internal directory. TRUSTED_AS_IN_INTERNAL_DIR, // Trusted because the archive file is in public directory without // modification. TRUSTED_AS_UNMODIFIED_AND_IN_PUBLIC_DIR, // No trusted because the archive file is in public directory and it is // modified. UNTRUSTED, TRUSTED_STATE_MAX }; // Per-tab class that monitors the navigations and stores the necessary info // to facilitate the synchronous access to offline information. class OfflinePageTabHelper : public content::WebContentsObserver, public content::WebContentsUserData<OfflinePageTabHelper>, public mojom::MhtmlPageNotifier { public: ~OfflinePageTabHelper() override; // Creates the Mojo service that can listen to the renderer's archive events. void CreateMhtmlPageNotifier(mojom::MhtmlPageNotifierRequest request); // MhtmlPageNotifier overrides. void NotifyIsMhtmlPage(const GURL& main_frame_url, base::Time date_header_time) override; void SetOfflinePage(const OfflinePageItem& offline_page, const OfflinePageHeader& offline_header, OfflinePageTrustedState trusted_state, bool is_offline_preview); void ClearOfflinePage(); const OfflinePageItem* offline_page() { return offline_info_.offline_page.get(); } const OfflinePageHeader& offline_header() const { return offline_info_.offline_header; } OfflinePageTrustedState trusted_state() const { return offline_info_.trusted_state; } // Returns whether a trusted offline page is being displayed. bool IsShowingTrustedOfflinePage() const; // Returns nullptr if the page is not an offline preview. Returns the // OfflinePageItem related to the page if the page is an offline preview. const OfflinePageItem* GetOfflinePreviewItem() const; // Returns provisional offline page since actual navigation does not happen // during unit tests. const OfflinePageItem* GetOfflinePageForTest() const; // Returns trusted state of provisional offline page. OfflinePageTrustedState GetTrustedStateForTest() const; // Sets the target frame, useful for unit testing the MhtmlPageNotifier // interface. void SetCurrentTargetFrameForTest( content::RenderFrameHost* render_frame_host); // Helper function which normally should only be called by // OfflinePageUtils::ScheduleDownload to do the work. This is because we need // to ensure |web_contents| is still valid after returning from the // asynchronous call of duplicate checking function. The lifetime of // OfflinePageTabHelper instance is tied with the associated |web_contents| // and thus the callback will be automatically invalidated if |web_contents| // is gone. void ScheduleDownloadHelper(content::WebContents* web_contents, const std::string& name_space, const GURL& url, OfflinePageUtils::DownloadUIActionFlags ui_action, const std::string& request_origin); private: friend class content::WebContentsUserData<OfflinePageTabHelper>; // Contains the info about the offline page being loaded. struct LoadedOfflinePageInfo { LoadedOfflinePageInfo(); ~LoadedOfflinePageInfo(); // Constructs a valid but untrusted LoadedOfflinePageInfo with |url| as the // online URL. static LoadedOfflinePageInfo MakeUntrusted(); LoadedOfflinePageInfo& operator=(LoadedOfflinePageInfo&& other); LoadedOfflinePageInfo(LoadedOfflinePageInfo&& other); // The cached copy of OfflinePageItem. Note that if |is_trusted| is false, // offline_page may contain information derived from the MHTML itself and // should be exposed to the user as untrusted. std::unique_ptr<OfflinePageItem> offline_page; // The offline header that is provided when offline page is loaded. OfflinePageHeader offline_header; // The trusted state of the page. OfflinePageTrustedState trusted_state; // Whether the page is an offline preview. Offline page previews are shown // when a user's effective connection type is prohibitively slow. bool is_showing_offline_preview = false; // Returns true if this contains an offline page. When constructed, // LoadedOfflinePageInfo objects are invalid until filled with an offline // page. bool IsValid() const; void Clear(); }; explicit OfflinePageTabHelper(content::WebContents* web_contents); // Overridden from content::WebContentsObserver: void DidStartNavigation( content::NavigationHandle* navigation_handle) override; void DidFinishNavigation( content::NavigationHandle* navigation_handle) override; // Finalize the offline info when the navigation is done. void FinalizeOfflineInfo(content::NavigationHandle* navigation_handle); void ReportOfflinePageMetrics(); // Report the metrics essential to PrefetchService. void ReportPrefetchMetrics(content::NavigationHandle* navigation_handle); // Reload the URL in order to fetch the offline page on certain net errors. void TryLoadingOfflinePageOnNetError( content::NavigationHandle* navigation_handle); // Creates an offline info with an invalid offline ID and the given URL. LoadedOfflinePageInfo MakeUntrustedOfflineInfo(const GURL& url); void SelectPagesForURLDone(const std::vector<OfflinePageItem>& offline_pages); void DuplicateCheckDoneForScheduleDownload( content::WebContents* web_contents, const std::string& name_space, const GURL& url, OfflinePageUtils::DownloadUIActionFlags ui_action, const std::string& request_origin, OfflinePageUtils::DuplicateCheckResult result); void DoDownloadPageLater(content::WebContents* web_contents, const std::string& name_space, const GURL& url, OfflinePageUtils::DownloadUIActionFlags ui_action, const std::string& request_origin); // The provisional info about the offline page being loaded. This is set when // the offline interceptor decides to serve the offline page and it will be // moved to |offline_info_| once the navigation is committed without error. LoadedOfflinePageInfo provisional_offline_info_; // The info about offline page being loaded. This is set from // |provisional_offline_info_| when the navigation is committed without error. // This can be used to by the Tab to synchronously ask about the offline // info. LoadedOfflinePageInfo offline_info_; bool reloading_url_on_net_error_ = false; // Service, overlives this object. PrefetchService* prefetch_service_ = nullptr; // Table of OfflinePages policies. // TODO(dimich): When we only have one shared version of PolicyController, // replace this instance with access to a shared one. ClientPolicyController policy_controller_; // TODO(crbug.com/827215): We only really want interface messages for the main // frame but this is not easily done with the current helper classes. content::WebContentsFrameBindingSet<mojom::MhtmlPageNotifier> mhtml_page_notifier_bindings_; base::WeakPtrFactory<OfflinePageTabHelper> weak_ptr_factory_; DISALLOW_COPY_AND_ASSIGN(OfflinePageTabHelper); }; } // namespace offline_pages #endif // CHROME_BROWSER_OFFLINE_PAGES_OFFLINE_PAGE_TAB_HELPER_H_
null
null
null
null
56,889
599
null
train_val
04b570817b2b38e35675b17328239746212f4c3f
153,656
FFmpeg
0
https://github.com/FFmpeg/FFmpeg
2018-06-01 01:23:12+05:30
/* * Copyright (c) 2012 Nicolas George * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with FFmpeg; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/channel_layout.h" #include "libavutil/avassert.h" #include "audio.h" #include "avfilter.h" #include "internal.h" typedef struct VolDetectContext { /** * Number of samples at each PCM value. * histogram[0x8000 + i] is the number of samples at value i. * The extra element is there for symmetry. */ uint64_t histogram[0x10001]; } VolDetectContext; static int query_formats(AVFilterContext *ctx) { static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16P, AV_SAMPLE_FMT_NONE }; AVFilterFormats *formats; AVFilterChannelLayouts *layouts; int ret; if (!(formats = ff_make_format_list(sample_fmts))) return AVERROR(ENOMEM); layouts = ff_all_channel_counts(); if (!layouts) return AVERROR(ENOMEM); ret = ff_set_common_channel_layouts(ctx, layouts); if (ret < 0) return ret; return ff_set_common_formats(ctx, formats); } static int filter_frame(AVFilterLink *inlink, AVFrame *samples) { AVFilterContext *ctx = inlink->dst; VolDetectContext *vd = ctx->priv; int nb_samples = samples->nb_samples; int nb_channels = samples->channels; int nb_planes = nb_channels; int plane, i; int16_t *pcm; if (!av_sample_fmt_is_planar(samples->format)) { nb_samples *= nb_channels; nb_planes = 1; } for (plane = 0; plane < nb_planes; plane++) { pcm = (int16_t *)samples->extended_data[plane]; for (i = 0; i < nb_samples; i++) vd->histogram[pcm[i] + 0x8000]++; } return ff_filter_frame(inlink->dst->outputs[0], samples); } #define MAX_DB 91 static inline double logdb(uint64_t v) { double d = v / (double)(0x8000 * 0x8000); if (!v) return MAX_DB; return -log10(d) * 10; } static void print_stats(AVFilterContext *ctx) { VolDetectContext *vd = ctx->priv; int i, max_volume, shift; uint64_t nb_samples = 0, power = 0, nb_samples_shift = 0, sum = 0; uint64_t histdb[MAX_DB + 1] = { 0 }; for (i = 0; i < 0x10000; i++) nb_samples += vd->histogram[i]; av_log(ctx, AV_LOG_INFO, "n_samples: %"PRId64"\n", nb_samples); if (!nb_samples) return; /* If nb_samples > 1<<34, there is a risk of overflow in the multiplication or the sum: shift all histogram values to avoid that. The total number of samples must be recomputed to avoid rounding errors. */ shift = av_log2(nb_samples >> 33); for (i = 0; i < 0x10000; i++) { nb_samples_shift += vd->histogram[i] >> shift; power += (i - 0x8000) * (i - 0x8000) * (vd->histogram[i] >> shift); } if (!nb_samples_shift) return; power = (power + nb_samples_shift / 2) / nb_samples_shift; av_assert0(power <= 0x8000 * 0x8000); av_log(ctx, AV_LOG_INFO, "mean_volume: %.1f dB\n", -logdb(power)); max_volume = 0x8000; while (max_volume > 0 && !vd->histogram[0x8000 + max_volume] && !vd->histogram[0x8000 - max_volume]) max_volume--; av_log(ctx, AV_LOG_INFO, "max_volume: %.1f dB\n", -logdb(max_volume * max_volume)); for (i = 0; i < 0x10000; i++) histdb[(int)logdb((i - 0x8000) * (i - 0x8000))] += vd->histogram[i]; for (i = 0; i <= MAX_DB && !histdb[i]; i++); for (; i <= MAX_DB && sum < nb_samples / 1000; i++) { av_log(ctx, AV_LOG_INFO, "histogram_%ddb: %"PRId64"\n", i, histdb[i]); sum += histdb[i]; } } static av_cold void uninit(AVFilterContext *ctx) { print_stats(ctx); } static const AVFilterPad volumedetect_inputs[] = { { .name = "default", .type = AVMEDIA_TYPE_AUDIO, .filter_frame = filter_frame, }, { NULL } }; static const AVFilterPad volumedetect_outputs[] = { { .name = "default", .type = AVMEDIA_TYPE_AUDIO, }, { NULL } }; AVFilter ff_af_volumedetect = { .name = "volumedetect", .description = NULL_IF_CONFIG_SMALL("Detect audio volume."), .priv_size = sizeof(VolDetectContext), .query_formats = query_formats, .uninit = uninit, .inputs = volumedetect_inputs, .outputs = volumedetect_outputs, };
null
null
null
null
69,711
39,109
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
39,109
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "third_party/blink/renderer/modules/vr/vr_controller.h" #include "services/metrics/public/cpp/ukm_builders.h" #include "services/service_manager/public/cpp/interface_provider.h" #include "third_party/blink/renderer/bindings/core/v8/script_promise_resolver.h" #include "third_party/blink/renderer/core/dom/document.h" #include "third_party/blink/renderer/core/dom/dom_exception.h" #include "third_party/blink/renderer/core/frame/local_frame.h" #include "third_party/blink/renderer/modules/vr/navigator_vr.h" #include "third_party/blink/renderer/modules/vr/vr_get_devices_callback.h" #include "third_party/blink/renderer/platform/wtf/assertions.h" namespace blink { VRController::VRController(NavigatorVR* navigator_vr) : ContextLifecycleObserver(navigator_vr->GetDocument()), navigator_vr_(navigator_vr), display_synced_(false), binding_(this) { navigator_vr->GetDocument()->GetFrame()->GetInterfaceProvider().GetInterface( mojo::MakeRequest(&service_)); service_.set_connection_error_handler( WTF::Bind(&VRController::Dispose, WrapWeakPersistent(this))); device::mojom::blink::VRServiceClientPtr client; binding_.Bind(mojo::MakeRequest(&client)); service_->SetClient( std::move(client), WTF::Bind(&VRController::OnDisplaysSynced, WrapPersistent(this))); } VRController::~VRController() = default; void VRController::GetDisplays(ScriptPromiseResolver* resolver) { // If we've previously synced the VRDisplays or no longer have a valid service // connection just return the current list. In the case of the service being // disconnected this will be an empty array. if (!service_ || display_synced_) { LogGetDisplayResult(); resolver->Resolve(displays_); return; } // Otherwise we're still waiting for the full list of displays to be populated // so queue up the promise for resolution when onDisplaysSynced is called. pending_get_devices_callbacks_.push_back( std::make_unique<VRGetDevicesCallback>(resolver)); } void VRController::SetListeningForActivate(bool listening) { if (service_) service_->SetListeningForActivate(listening); } // Each time a new VRDisplay is connected we'll receive a VRDisplayPtr for it // here. Upon calling SetClient in the constructor we should receive one call // for each VRDisplay that was already connected at the time. void VRController::OnDisplayConnected( device::mojom::blink::VRMagicWindowProviderPtr magic_window_provider, device::mojom::blink::VRDisplayHostPtr display, device::mojom::blink::VRDisplayClientRequest request, device::mojom::blink::VRDisplayInfoPtr display_info) { VRDisplay* vr_display = new VRDisplay(navigator_vr_, std::move(magic_window_provider), std::move(display), std::move(request)); vr_display->Update(display_info); vr_display->OnConnected(); vr_display->FocusChanged(); has_presentation_capable_display_ = display_info->capabilities->canPresent; has_display_ = true; displays_.push_back(vr_display); } void VRController::FocusChanged() { for (const auto& display : displays_) display->FocusChanged(); } // Called when the VRService has called OnDisplayConnected for all active // VRDisplays. void VRController::OnDisplaysSynced() { display_synced_ = true; OnGetDisplays(); } void VRController::LogGetDisplayResult() { Document* doc = navigator_vr_->GetDocument(); if (has_display_ && doc && doc->IsInMainFrame()) { ukm::builders::XR_WebXR ukm_builder(doc->UkmSourceID()); ukm_builder.SetReturnedDevice(1); if (has_presentation_capable_display_) { ukm_builder.SetReturnedPresentationCapableDevice(1); } ukm_builder.Record(doc->UkmRecorder()); } } void VRController::OnGetDisplays() { while (!pending_get_devices_callbacks_.IsEmpty()) { LogGetDisplayResult(); std::unique_ptr<VRGetDevicesCallback> callback = pending_get_devices_callbacks_.TakeFirst(); callback->OnSuccess(displays_); } } void VRController::ContextDestroyed(ExecutionContext*) { Dispose(); } void VRController::Dispose() { // If the document context was destroyed, shut down the client connection // and never call the mojo service again. service_.reset(); binding_.Close(); // Shutdown all displays' message pipe for (const auto& display : displays_) display->Dispose(); displays_.clear(); // Ensure that any outstanding getDisplays promises are resolved. OnGetDisplays(); } void VRController::Trace(blink::Visitor* visitor) { visitor->Trace(navigator_vr_); visitor->Trace(displays_); ContextLifecycleObserver::Trace(visitor); } } // namespace blink
null
null
null
null
35,972
21,701
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
186,696
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/****************************************************************************** * * Copyright(c) 2009-2013 Realtek Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <[email protected]> * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, * Hsinchu 300, Taiwan. * * Created on 2010/ 5/18, 1:41 * * Larry Finger <[email protected]> * *****************************************************************************/ #ifndef __RTL92CE_TABLE__H_ #define __RTL92CE_TABLE__H_ #include <linux/types.h> #define RTL8188EEPHY_REG_1TARRAYLEN 382 extern u32 RTL8188EEPHY_REG_1TARRAY[]; #define RTL8188EEPHY_REG_ARRAY_PGLEN 264 extern u32 RTL8188EEPHY_REG_ARRAY_PG[]; #define RTL8188EE_RADIOA_1TARRAYLEN 190 extern u32 RTL8188EE_RADIOA_1TARRAY[]; #define RTL8188EEMAC_1T_ARRAYLEN 180 extern u32 RTL8188EEMAC_1T_ARRAY[]; #define RTL8188EEAGCTAB_1TARRAYLEN 256 extern u32 RTL8188EEAGCTAB_1TARRAY[]; #endif
null
null
null
null
95,043
12,831
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
12,831
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef COMPONENTS_STORAGE_MONITOR_STORAGE_MONITOR_MAC_H_ #define COMPONENTS_STORAGE_MONITOR_STORAGE_MONITOR_MAC_H_ #include <DiskArbitration/DiskArbitration.h> #include <map> #include <memory> #include "base/mac/scoped_cftyperef.h" #include "base/macros.h" #include "base/memory/weak_ptr.h" #include "components/storage_monitor/storage_monitor.h" namespace storage_monitor { class ImageCaptureDeviceManager; // This class posts notifications to listeners when a new disk // is attached, removed, or changed. class StorageMonitorMac : public StorageMonitor, public base::SupportsWeakPtr<StorageMonitorMac> { public: enum UpdateType { UPDATE_DEVICE_ADDED, UPDATE_DEVICE_CHANGED, UPDATE_DEVICE_REMOVED, }; // Should only be called by browser start up code. Use GetInstance() instead. StorageMonitorMac(); ~StorageMonitorMac() override; void Init() override; void UpdateDisk(UpdateType update_type, std::string* bsd_name, const StorageInfo& info); bool GetStorageInfoForPath(const base::FilePath& path, StorageInfo* device_info) const override; void EjectDevice(const std::string& device_id, base::Callback<void(EjectStatus)> callback) override; private: static void DiskAppearedCallback(DADiskRef disk, void* context); static void DiskDisappearedCallback(DADiskRef disk, void* context); static void DiskDescriptionChangedCallback(DADiskRef disk, CFArrayRef keys, void* context); void GetDiskInfoAndUpdate(DADiskRef disk, UpdateType update_type); bool ShouldPostNotificationForDisk(const StorageInfo& info) const; bool FindDiskWithMountPoint(const base::FilePath& mount_point, StorageInfo* info) const; base::ScopedCFTypeRef<DASessionRef> session_; // Maps disk bsd names to disk info objects. This map tracks all mountable // devices on the system, though only notifications for removable devices are // posted. std::map<std::string, StorageInfo> disk_info_map_; int pending_disk_updates_; std::unique_ptr<ImageCaptureDeviceManager> image_capture_device_manager_; DISALLOW_COPY_AND_ASSIGN(StorageMonitorMac); }; } // namespace storage_monitor #endif // COMPONENTS_STORAGE_MONITOR_STORAGE_MONITOR_MAC_H_
null
null
null
null
9,694
1,384
null
train_val
83ed75feba32e46f736fcce0d96a0445f29b96c2
163,228
krb5
0
https://github.com/krb5/krb5
2016-01-27 15:43:28-05:00
/* * Copyright (c) 2001, Dr Brian Gladman <[email protected]>, Worcester, UK. * All rights reserved. * * LICENSE TERMS * * The free distribution and use of this software in both source and binary * form is allowed (with or without changes) provided that: * * 1. distributions of this source code include the above copyright * notice, this list of conditions and the following disclaimer; * * 2. distributions in binary form include the above copyright * notice, this list of conditions and the following disclaimer * in the documentation and/or other associated materials; * * 3. the copyright holder's name is not used to endorse products * built using this software without specific written permission. * * DISCLAIMER * * This software is provided 'as is' with no explcit or implied warranties * in respect of any properties, including, but not limited to, correctness * and fitness for purpose. */ /* * Issue Date: 21/01/2002 * * This file contains the code for implementing encryption and decryption * for AES (Rijndael) for block and key sizes of 16, 20, 24, 28 and 32 bytes. * It can optionally be replaced by code written in assembler using NASM. */ #include "aesopt.h" #define unused 77 /* Sunset Strip */ #define si(y,x,k,c) s(y,c) = word_in(x + 4 * c) ^ k[c] #define so(y,x,c) word_out(y + 4 * c, s(x,c)) #if BLOCK_SIZE == 16 #if defined(ARRAYS) #define locals(y,x) x[4],y[4] #else #define locals(y,x) x##0,x##1,x##2,x##3,y##0,y##1,y##2,y##3 /* the following defines prevent the compiler requiring the declaration of generated but unused variables in the fwd_var and inv_var macros */ #define b04 unused #define b05 unused #define b06 unused #define b07 unused #define b14 unused #define b15 unused #define b16 unused #define b17 unused #endif #define l_copy(y, x) s(y,0) = s(x,0); s(y,1) = s(x,1); \ s(y,2) = s(x,2); s(y,3) = s(x,3); #define state_in(y,x,k) si(y,x,k,0); si(y,x,k,1); si(y,x,k,2); si(y,x,k,3) #define state_out(y,x) so(y,x,0); so(y,x,1); so(y,x,2); so(y,x,3) #define round(rm,y,x,k) rm(y,x,k,0); rm(y,x,k,1); rm(y,x,k,2); rm(y,x,k,3) #elif BLOCK_SIZE == 20 #if defined(ARRAYS) #define locals(y,x) x[5],y[5] #else #define locals(y,x) x##0,x##1,x##2,x##3,x##4,y##0,y##1,y##2,y##3,y##4 #define b05 unused #define b06 unused #define b07 unused #define b15 unused #define b16 unused #define b17 unused #endif #define l_copy(y, x) s(y,0) = s(x,0); s(y,1) = s(x,1); \ s(y,2) = s(x,2); s(y,3) = s(x,3); s(y,4) = s(x,4); #define state_in(y,x,k) si(y,x,k,0); si(y,x,k,1); si(y,x,k,2); si(y,x,k,3); si(y,x,k,4) #define state_out(y,x) so(y,x,0); so(y,x,1); so(y,x,2); so(y,x,3); so(y,x,4) #define round(rm,y,x,k) rm(y,x,k,0); rm(y,x,k,1); rm(y,x,k,2); rm(y,x,k,3); rm(y,x,k,4) #elif BLOCK_SIZE == 24 #if defined(ARRAYS) #define locals(y,x) x[6],y[6] #else #define locals(y,x) x##0,x##1,x##2,x##3,x##4,x##5, \ y##0,y##1,y##2,y##3,y##4,y##5 #define b06 unused #define b07 unused #define b16 unused #define b17 unused #endif #define l_copy(y, x) s(y,0) = s(x,0); s(y,1) = s(x,1); \ s(y,2) = s(x,2); s(y,3) = s(x,3); \ s(y,4) = s(x,4); s(y,5) = s(x,5); #define state_in(y,x,k) si(y,x,k,0); si(y,x,k,1); si(y,x,k,2); \ si(y,x,k,3); si(y,x,k,4); si(y,x,k,5) #define state_out(y,x) so(y,x,0); so(y,x,1); so(y,x,2); \ so(y,x,3); so(y,x,4); so(y,x,5) #define round(rm,y,x,k) rm(y,x,k,0); rm(y,x,k,1); rm(y,x,k,2); \ rm(y,x,k,3); rm(y,x,k,4); rm(y,x,k,5) #elif BLOCK_SIZE == 28 #if defined(ARRAYS) #define locals(y,x) x[7],y[7] #else #define locals(y,x) x##0,x##1,x##2,x##3,x##4,x##5,x##6 \ y##0,y##1,y##2,y##3,y##4,y##5,y##6 #define b07 unused #define b17 unused #endif #define l_copy(y, x) s(y,0) = s(x,0); s(y,1) = s(x,1); \ s(y,2) = s(x,2); s(y,3) = s(x,3); \ s(y,4) = s(x,4); s(y,5) = s(x,5);; s(y,6) = s(x,6); #define state_in(y,x,k) si(y,x,k,0); si(y,x,k,1); si(y,x,k,2); \ si(y,x,k,3); si(y,x,k,4); si(y,x,k,5); si(y,x,k,6) #define state_out(y,x) so(y,x,0); so(y,x,1); so(y,x,2); \ so(y,x,3); so(y,x,4); so(y,x,5); so(y,x,6) #define round(rm,y,x,k) rm(y,x,k,0); rm(y,x,k,1); rm(y,x,k,2); \ rm(y,x,k,3); rm(y,x,k,4); rm(y,x,k,5); rm(y,x,k,6) #else #if defined(ARRAYS) #define locals(y,x) x[8],y[8] #else #define locals(y,x) x##0,x##1,x##2,x##3,x##4,x##5,x##6,x##7, \ y##0,y##1,y##2,y##3,y##4,y##5,y##6,y##7 #endif #define l_copy(y, x) s(y,0) = s(x,0); s(y,1) = s(x,1); \ s(y,2) = s(x,2); s(y,3) = s(x,3); \ s(y,4) = s(x,4); s(y,5) = s(x,5); \ s(y,6) = s(x,6); s(y,7) = s(x,7); #if BLOCK_SIZE == 32 #define state_in(y,x,k) si(y,x,k,0); si(y,x,k,1); si(y,x,k,2); si(y,x,k,3); \ si(y,x,k,4); si(y,x,k,5); si(y,x,k,6); si(y,x,k,7) #define state_out(y,x) so(y,x,0); so(y,x,1); so(y,x,2); so(y,x,3); \ so(y,x,4); so(y,x,5); so(y,x,6); so(y,x,7) #define round(rm,y,x,k) rm(y,x,k,0); rm(y,x,k,1); rm(y,x,k,2); rm(y,x,k,3); \ rm(y,x,k,4); rm(y,x,k,5); rm(y,x,k,6); rm(y,x,k,7) #else #define state_in(y,x,k) \ switch(nc) \ { case 8: si(y,x,k,7); \ case 7: si(y,x,k,6); \ case 6: si(y,x,k,5); \ case 5: si(y,x,k,4); \ case 4: si(y,x,k,3); si(y,x,k,2); \ si(y,x,k,1); si(y,x,k,0); \ } #define state_out(y,x) \ switch(nc) \ { case 8: so(y,x,7); \ case 7: so(y,x,6); \ case 6: so(y,x,5); \ case 5: so(y,x,4); \ case 4: so(y,x,3); so(y,x,2); \ so(y,x,1); so(y,x,0); \ } #if defined(FAST_VARIABLE) #define round(rm,y,x,k) \ switch(nc) \ { case 8: rm(y,x,k,7); rm(y,x,k,6); \ rm(y,x,k,5); rm(y,x,k,4); \ rm(y,x,k,3); rm(y,x,k,2); \ rm(y,x,k,1); rm(y,x,k,0); \ break; \ case 7: rm(y,x,k,6); rm(y,x,k,5); \ rm(y,x,k,4); rm(y,x,k,3); \ rm(y,x,k,2); rm(y,x,k,1); \ rm(y,x,k,0); \ break; \ case 6: rm(y,x,k,5); rm(y,x,k,4); \ rm(y,x,k,3); rm(y,x,k,2); \ rm(y,x,k,1); rm(y,x,k,0); \ break; \ case 5: rm(y,x,k,4); rm(y,x,k,3); \ rm(y,x,k,2); rm(y,x,k,1); \ rm(y,x,k,0); \ break; \ case 4: rm(y,x,k,3); rm(y,x,k,2); \ rm(y,x,k,1); rm(y,x,k,0); \ break; \ } #else #define round(rm,y,x,k) \ switch(nc) \ { case 8: rm(y,x,k,7); \ case 7: rm(y,x,k,6); \ case 6: rm(y,x,k,5); \ case 5: rm(y,x,k,4); \ case 4: rm(y,x,k,3); rm(y,x,k,2); \ rm(y,x,k,1); rm(y,x,k,0); \ } #endif #endif #endif #if defined(ENCRYPTION) /* I am grateful to Frank Yellin for the following construction (and that for decryption) which, given the column (c) of the output state variable, gives the input state variables which are needed for each row (r) of the state. For the fixed block size options, compilers should reduce these two expressions to fixed variable references. But for variable block size code conditional clauses will sometimes be returned. y = output word, x = input word, r = row, c = column for r = 0, 1, 2 and 3 = column accessed for row r. */ #define fwd_var(x,r,c) \ ( r==0 ? \ ( c==0 ? s(x,0) \ : c==1 ? s(x,1) \ : c==2 ? s(x,2) \ : c==3 ? s(x,3) \ : c==4 ? s(x,4) \ : c==5 ? s(x,5) \ : c==6 ? s(x,6) \ : s(x,7)) \ : r==1 ? \ ( c==0 ? s(x,1) \ : c==1 ? s(x,2) \ : c==2 ? s(x,3) \ : c==3 ? nc==4 ? s(x,0) : s(x,4) \ : c==4 ? nc==5 ? s(x,0) : s(x,5) \ : c==5 ? nc==6 ? s(x,0) : s(x,6) \ : c==6 ? nc==7 ? s(x,0) : s(x,7) \ : s(x,0)) \ : r==2 ? \ ( c==0 ? nc==8 ? s(x,3) : s(x,2) \ : c==1 ? nc==8 ? s(x,4) : s(x,3) \ : c==2 ? nc==8 ? s(x,5) : nc==4 ? s(x,0) : s(x,4) \ : c==3 ? nc==8 ? s(x,6) : nc==5 ? s(x,0) : nc==4 ? s(x,1) : s(x,5) \ : c==4 ? nc==8 ? s(x,7) : nc==7 ? s(x,6) : nc==6 ? s(x,0) : s(x,1) \ : c==5 ? nc==6 ? s(x,1) : s(x,0) \ : c==6 ? s(x,1) \ : s(x,2)) \ : \ ( c==0 ? nc>6 ? s(x,4) : s(x,3) \ : c==1 ? nc>6 ? s(x,5) : nc==4 ? s(x,0) : s(x,4) \ : c==2 ? nc>6 ? s(x,6) : nc==6 ? s(x,5) : nc==5 ? s(x,0) : s(x,1) \ : c==3 ? nc==8 ? s(x,7) : nc==5 ? s(x,1) : nc==4 ? s(x,2) : s(x,0) \ : c==4 ? nc==8 ? s(x,0) : nc==5 ? s(x,2) : s(x,1) \ : c==5 ? nc==8 ? s(x,1) : s(x,2) \ : c==6 ? nc==8 ? s(x,2) : s(x,3) \ : s(x,3))) #if defined(FT4_SET) #undef dec_fmvars #define dec_fmvars #define fwd_rnd(y,x,k,c) s(y,c)= (k)[c] ^ four_tables(x,ft_tab,fwd_var,rf1,c) #elif defined(FT1_SET) #undef dec_fmvars #define dec_fmvars #define fwd_rnd(y,x,k,c) s(y,c)= (k)[c] ^ one_table(x,upr,ft_tab,fwd_var,rf1,c) #else #define fwd_rnd(y,x,k,c) s(y,c) = fwd_mcol(no_table(x,s_box,fwd_var,rf1,c)) ^ (k)[c] #endif #if defined(FL4_SET) #define fwd_lrnd(y,x,k,c) s(y,c)= (k)[c] ^ four_tables(x,fl_tab,fwd_var,rf1,c) #elif defined(FL1_SET) #define fwd_lrnd(y,x,k,c) s(y,c)= (k)[c] ^ one_table(x,ups,fl_tab,fwd_var,rf1,c) #else #define fwd_lrnd(y,x,k,c) s(y,c) = no_table(x,s_box,fwd_var,rf1,c) ^ (k)[c] #endif aes_rval aes_enc_blk(const unsigned char in_blk[], unsigned char out_blk[], const aes_ctx cx[1]) { uint32_t locals(b0, b1); const uint32_t *kp = cx->k_sch; dec_fmvars /* declare variables for fwd_mcol() if needed */ if(!(cx->n_blk & 1)) return aes_bad; #if (ENC_UNROLL == FULL) state_in((cx->n_rnd & 1 ? b1 : b0), in_blk, kp); kp += (cx->n_rnd - 9) * nc; switch(cx->n_rnd) { case 14: round(fwd_rnd, b1, b0, kp - 4 * nc); case 13: round(fwd_rnd, b0, b1, kp - 3 * nc); case 12: round(fwd_rnd, b1, b0, kp - 2 * nc); case 11: round(fwd_rnd, b0, b1, kp - nc); case 10: round(fwd_rnd, b1, b0, kp ); round(fwd_rnd, b0, b1, kp + nc); round(fwd_rnd, b1, b0, kp + 2 * nc); round(fwd_rnd, b0, b1, kp + 3 * nc); round(fwd_rnd, b1, b0, kp + 4 * nc); round(fwd_rnd, b0, b1, kp + 5 * nc); round(fwd_rnd, b1, b0, kp + 6 * nc); round(fwd_rnd, b0, b1, kp + 7 * nc); round(fwd_rnd, b1, b0, kp + 8 * nc); round(fwd_lrnd, b0, b1, kp + 9 * nc); } #else { uint32_t rnd; state_in(b0, in_blk, kp); #if (ENC_UNROLL == PARTIAL) for(rnd = 0; rnd < (cx->n_rnd - 1) >> 1; ++rnd) { kp += nc; round(fwd_rnd, b1, b0, kp); kp += nc; round(fwd_rnd, b0, b1, kp); } if(cx->n_rnd & 1) { l_copy(b1, b0); } else { kp += nc; round(fwd_rnd, b1, b0, kp); } #else for(rnd = 0; rnd < cx->n_rnd - 1; ++rnd) { kp += nc; round(fwd_rnd, b1, b0, kp); l_copy(b0, b1); } #endif kp += nc; round(fwd_lrnd, b0, b1, kp); } #endif state_out(out_blk, b0); return aes_good; } #endif #if defined(DECRYPTION) #define inv_var(x,r,c) \ ( r==0 ? \ ( c==0 ? s(x,0) \ : c==1 ? s(x,1) \ : c==2 ? s(x,2) \ : c==3 ? s(x,3) \ : c==4 ? s(x,4) \ : c==5 ? s(x,5) \ : c==6 ? s(x,6) \ : s(x,7)) \ : r==1 ? \ ( c==0 ? nc==8 ? s(x,7) : nc==7 ? s(x,6) : nc==6 ? s(x,5) : nc==5 ? s(x,4) : s(x,3) \ : c==1 ? s(x,0) \ : c==2 ? s(x,1) \ : c==3 ? s(x,2) \ : c==4 ? s(x,3) \ : c==5 ? s(x,4) \ : c==6 ? s(x,5) \ : s(x,6)) \ : r==2 ? \ ( c==0 ? nc>6 ? s(x,5) : nc==6 ? s(x,4) : nc==5 ? s(x,3) : s(x,2) \ : c==1 ? nc>6 ? s(x,6) : nc==6 ? s(x,5) : nc==5 ? s(x,4) : s(x,3) \ : c==2 ? nc==8 ? s(x,7) : s(x,0) \ : c==3 ? nc==8 ? s(x,0) : s(x,1) \ : c==4 ? nc==8 ? s(x,1) : s(x,2) \ : c==5 ? nc==8 ? s(x,2) : s(x,3) \ : c==6 ? nc==8 ? s(x,3) : s(x,4) \ : s(x,4)) \ : \ ( c==0 ? nc==8 ? s(x,4) : nc==5 ? s(x,2) : nc==4 ? s(x,1) : s(x,3) \ : c==1 ? nc==8 ? s(x,5) : nc==5 ? s(x,3) : nc==4 ? s(x,2) : s(x,4) \ : c==2 ? nc==8 ? s(x,6) : nc==5 ? s(x,4) : nc==4 ? s(x,3) : s(x,5) \ : c==3 ? nc==8 ? s(x,7) : nc==7 ? s(x,6) : s(x,0) \ : c==4 ? nc>6 ? s(x,0) : s(x,1) \ : c==5 ? nc==6 ? s(x,2) : s(x,1) \ : c==6 ? s(x,2) \ : s(x,3))) #if defined(IT4_SET) #undef dec_imvars #define dec_imvars #define inv_rnd(y,x,k,c) s(y,c)= (k)[c] ^ four_tables(x,it_tab,inv_var,rf1,c) #elif defined(IT1_SET) #undef dec_imvars #define dec_imvars #define inv_rnd(y,x,k,c) s(y,c)= (k)[c] ^ one_table(x,upr,it_tab,inv_var,rf1,c) #else #define inv_rnd(y,x,k,c) s(y,c) = inv_mcol(no_table(x,inv_s_box,inv_var,rf1,c) ^ (k)[c]) #endif #if defined(IL4_SET) #define inv_lrnd(y,x,k,c) s(y,c)= (k)[c] ^ four_tables(x,il_tab,inv_var,rf1,c) #elif defined(IL1_SET) #define inv_lrnd(y,x,k,c) s(y,c)= (k)[c] ^ one_table(x,ups,il_tab,inv_var,rf1,c) #else #define inv_lrnd(y,x,k,c) s(y,c) = no_table(x,inv_s_box,inv_var,rf1,c) ^ (k)[c] #endif aes_rval aes_dec_blk(const unsigned char in_blk[], unsigned char out_blk[], const aes_ctx cx[1]) { uint32_t locals(b0, b1); const uint32_t *kp = cx->k_sch + nc * cx->n_rnd; dec_imvars /* declare variables for inv_mcol() if needed */ if(!(cx->n_blk & 2)) return aes_bad; #if (DEC_UNROLL == FULL) state_in((cx->n_rnd & 1 ? b1 : b0), in_blk, kp); kp = cx->k_sch + 9 * nc; switch(cx->n_rnd) { case 14: round(inv_rnd, b1, b0, kp + 4 * nc); case 13: round(inv_rnd, b0, b1, kp + 3 * nc); case 12: round(inv_rnd, b1, b0, kp + 2 * nc); case 11: round(inv_rnd, b0, b1, kp + nc); case 10: round(inv_rnd, b1, b0, kp ); round(inv_rnd, b0, b1, kp - nc); round(inv_rnd, b1, b0, kp - 2 * nc); round(inv_rnd, b0, b1, kp - 3 * nc); round(inv_rnd, b1, b0, kp - 4 * nc); round(inv_rnd, b0, b1, kp - 5 * nc); round(inv_rnd, b1, b0, kp - 6 * nc); round(inv_rnd, b0, b1, kp - 7 * nc); round(inv_rnd, b1, b0, kp - 8 * nc); round(inv_lrnd, b0, b1, kp - 9 * nc); } #else { uint32_t rnd; state_in(b0, in_blk, kp); #if (DEC_UNROLL == PARTIAL) for(rnd = 0; rnd < (cx->n_rnd - 1) >> 1; ++rnd) { kp -= nc; round(inv_rnd, b1, b0, kp); kp -= nc; round(inv_rnd, b0, b1, kp); } if(cx->n_rnd & 1) { l_copy(b1, b0); } else { kp -= nc; round(inv_rnd, b1, b0, kp); } #else for(rnd = 0; rnd < cx->n_rnd - 1; ++rnd) { kp -= nc; round(inv_rnd, b1, b0, kp); l_copy(b0, b1); } #endif kp -= nc; round(inv_lrnd, b0, b1, kp); } #endif state_out(out_blk, b0); return aes_good; } #endif
null
null
null
null
74,536
24,714
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
24,714
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "extensions/shell/browser/api/identity/identity_api.h" #include <set> #include <string> #include "base/guid.h" #include "base/memory/ptr_util.h" #include "content/public/browser/browser_context.h" #include "content/public/browser/storage_partition.h" #include "extensions/common/manifest_handlers/oauth2_manifest_handler.h" #include "extensions/shell/browser/shell_oauth2_token_service.h" #include "extensions/shell/common/api/identity.h" #include "google_apis/gaia/gaia_auth_util.h" namespace extensions { namespace shell { namespace { const char kIdentityApiId[] = "identity_api"; const char kErrorNoUserAccount[] = "No user account."; const char kErrorNoRefreshToken[] = "No refresh token."; const char kErrorNoScopesInManifest[] = "No scopes in manifest."; const char kErrorUserPermissionRequired[] = "User permission required but not available in app_shell"; } // namespace IdentityAPI::IdentityAPI(content::BrowserContext* context) : device_id_(base::GenerateGUID()) { } IdentityAPI::~IdentityAPI() { } // static IdentityAPI* IdentityAPI::Get(content::BrowserContext* context) { return BrowserContextKeyedAPIFactory<IdentityAPI>::Get(context); } // static BrowserContextKeyedAPIFactory<IdentityAPI>* IdentityAPI::GetFactoryInstance() { static base::LazyInstance< BrowserContextKeyedAPIFactory<IdentityAPI>>::DestructorAtExit factory = LAZY_INSTANCE_INITIALIZER; return factory.Pointer(); } /////////////////////////////////////////////////////////////////////////////// IdentityGetAuthTokenFunction::IdentityGetAuthTokenFunction() : OAuth2TokenService::Consumer(kIdentityApiId) { } IdentityGetAuthTokenFunction::~IdentityGetAuthTokenFunction() { } void IdentityGetAuthTokenFunction::SetMintTokenFlowForTesting( OAuth2MintTokenFlow* flow) { mint_token_flow_.reset(flow); } ExtensionFunction::ResponseAction IdentityGetAuthTokenFunction::Run() { std::unique_ptr<api::identity::GetAuthToken::Params> params( api::identity::GetAuthToken::Params::Create(*args_)); EXTENSION_FUNCTION_VALIDATE(params.get()); ShellOAuth2TokenService* service = ShellOAuth2TokenService::GetInstance(); std::string account_id = service->AccountId(); if (account_id.empty()) return RespondNow(Error(kErrorNoUserAccount)); if (!service->RefreshTokenIsAvailable(account_id)) return RespondNow(Error(kErrorNoRefreshToken)); // Verify that we have scopes. const OAuth2Info& oauth2_info = OAuth2Info::GetOAuth2Info(extension()); if (oauth2_info.scopes.empty()) return RespondNow(Error(kErrorNoScopesInManifest)); // Balanced in OnGetTokenFailure() and in the OAuth2MintTokenFlow callbacks. AddRef(); // First, fetch a logged-in-user access token for the Chrome project client ID // and client secret. This token is used later to get a second access token // that will be returned to the app. std::set<std::string> no_scopes; access_token_request_ = service->StartRequest(service->AccountId(), no_scopes, this); return RespondLater(); } void IdentityGetAuthTokenFunction::OnGetTokenSuccess( const OAuth2TokenService::Request* request, const std::string& access_token, const base::Time& expiration_time) { // Tests may override the mint token flow. if (!mint_token_flow_) { const OAuth2Info& oauth2_info = OAuth2Info::GetOAuth2Info(extension()); DCHECK(!oauth2_info.scopes.empty()); mint_token_flow_.reset(new OAuth2MintTokenFlow( this, OAuth2MintTokenFlow::Parameters( extension()->id(), oauth2_info.client_id, oauth2_info.scopes, IdentityAPI::Get(browser_context())->device_id(), OAuth2MintTokenFlow::MODE_MINT_TOKEN_FORCE))); } // Use the logging-in-user access token to mint an access token for this app. mint_token_flow_->Start( content::BrowserContext::GetDefaultStoragePartition(browser_context())-> GetURLRequestContext(), access_token); } void IdentityGetAuthTokenFunction::OnGetTokenFailure( const OAuth2TokenService::Request* request, const GoogleServiceAuthError& error) { Respond(Error(error.ToString())); Release(); // Balanced in Run(). } void IdentityGetAuthTokenFunction::OnMintTokenSuccess( const std::string& access_token, int time_to_live) { Respond(OneArgument(std::make_unique<base::Value>(access_token))); Release(); // Balanced in Run(). } void IdentityGetAuthTokenFunction::OnIssueAdviceSuccess( const IssueAdviceInfo& issue_advice) { Respond(Error(kErrorUserPermissionRequired)); Release(); // Balanced in Run(). } void IdentityGetAuthTokenFunction::OnMintTokenFailure( const GoogleServiceAuthError& error) { Respond(Error(error.ToString())); Release(); // Balanced in Run(). } /////////////////////////////////////////////////////////////////////////////// IdentityRemoveCachedAuthTokenFunction::IdentityRemoveCachedAuthTokenFunction() { } IdentityRemoveCachedAuthTokenFunction:: ~IdentityRemoveCachedAuthTokenFunction() { } ExtensionFunction::ResponseAction IdentityRemoveCachedAuthTokenFunction::Run() { std::unique_ptr<api::identity::RemoveCachedAuthToken::Params> params( api::identity::RemoveCachedAuthToken::Params::Create(*args_)); EXTENSION_FUNCTION_VALIDATE(params.get()); // This stub identity API does not maintain a token cache, so there is nothing // to remove. return RespondNow(NoArguments()); } } // namespace shell } // namespace extensions
null
null
null
null
21,577
19,221
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
19,221
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2018 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include <utility> #include "components/consent_auditor/fake_consent_auditor.h" namespace consent_auditor { FakeConsentAuditor::FakeConsentAuditor( PrefService* pref_service, syncer::UserEventService* user_event_service) : ConsentAuditor(pref_service, user_event_service, std::string(), std::string()) {} FakeConsentAuditor::~FakeConsentAuditor() {} void FakeConsentAuditor::RecordGaiaConsent( consent_auditor::Feature feature, const std::vector<int>& description_grd_ids, int confirmation_grd_id, consent_auditor::ConsentStatus status) { std::vector<int> ids = description_grd_ids; ids.push_back(confirmation_grd_id); recorded_id_vectors_.push_back(std::move(ids)); recorded_features_.push_back(feature); recorded_statuses_.push_back(status); } } // namespace consent_auditor
null
null
null
null
16,084
9,116
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
174,111
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * omap_hwmod_2xxx_3xxx_interconnect_data.c - common interconnect data, OMAP2/3 * * Copyright (C) 2009-2011 Nokia Corporation * Paul Walmsley * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * XXX handle crossbar/shared link difference for L3? * XXX these should be marked initdata for multi-OMAP kernels */ #include <asm/sizes.h> #include "omap_hwmod.h" #include "omap_hwmod_common_data.h" struct omap_hwmod_addr_space omap2_dma_system_addrs[] = { { .pa_start = 0x48056000, .pa_end = 0x48056000 + SZ_4K - 1, .flags = ADDR_TYPE_RT, }, { }, };
null
null
null
null
82,458
6,942
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
6,942
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "net/quic/test_tools/simulator/traffic_policer.h" #include <algorithm> using std::string; namespace net { namespace simulator { TrafficPolicer::TrafficPolicer(Simulator* simulator, string name, QuicByteCount initial_bucket_size, QuicByteCount max_bucket_size, QuicBandwidth target_bandwidth, Endpoint* input) : PacketFilter(simulator, name, input), initial_bucket_size_(initial_bucket_size), max_bucket_size_(max_bucket_size), target_bandwidth_(target_bandwidth), last_refill_time_(clock_->Now()) {} TrafficPolicer::~TrafficPolicer() {} void TrafficPolicer::Refill() { QuicTime::Delta time_passed = clock_->Now() - last_refill_time_; QuicByteCount refill_size = time_passed * target_bandwidth_; for (auto& bucket : token_buckets_) { bucket.second = std::min(bucket.second + refill_size, max_bucket_size_); } last_refill_time_ = clock_->Now(); } bool TrafficPolicer::FilterPacket(const Packet& packet) { // Refill existing buckets. Refill(); // Create a new bucket if one does not exist. if (token_buckets_.count(packet.destination) == 0) { token_buckets_.insert( std::make_pair(packet.destination, initial_bucket_size_)); } auto bucket = token_buckets_.find(packet.destination); DCHECK(bucket != token_buckets_.end()); // Silently drop the packet on the floor if out of tokens if (bucket->second < packet.size) { return false; } bucket->second -= packet.size; return true; } } // namespace simulator } // namespace net
null
null
null
null
3,805
39,572
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
204,567
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Power trace points * * Copyright (C) 2009 Ming Lei <[email protected]> */ #include <linux/string.h> #include <linux/types.h> #include <linux/workqueue.h> #include <linux/sched.h> #include <linux/module.h> #include <linux/usb.h> #define CREATE_TRACE_POINTS #include <trace/events/rpm.h> EXPORT_TRACEPOINT_SYMBOL_GPL(rpm_return_int); EXPORT_TRACEPOINT_SYMBOL_GPL(rpm_idle); EXPORT_TRACEPOINT_SYMBOL_GPL(rpm_suspend); EXPORT_TRACEPOINT_SYMBOL_GPL(rpm_resume);
null
null
null
null
112,914
9,261
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
9,261
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "net/http/http_response_headers.h" #include <stdint.h> #include <algorithm> #include <iostream> #include <limits> #include <memory> #include <unordered_set> #include "base/pickle.h" #include "base/time/time.h" #include "base/values.h" #include "net/http/http_byte_range.h" #include "net/http/http_util.h" #include "net/log/net_log_capture_mode.h" #include "testing/gtest/include/gtest/gtest.h" namespace net { namespace { struct TestData { const char* raw_headers; const char* expected_headers; HttpVersion expected_version; int expected_response_code; const char* expected_status_text; }; class HttpResponseHeadersTest : public testing::Test { }; // Transform "normal"-looking headers (\n-separated) to the appropriate // input format for ParseRawHeaders (\0-separated). void HeadersToRaw(std::string* headers) { std::replace(headers->begin(), headers->end(), '\n', '\0'); if (!headers->empty()) *headers += '\0'; } class HttpResponseHeadersCacheControlTest : public HttpResponseHeadersTest { protected: // Make tests less verbose. typedef base::TimeDelta TimeDelta; // Initilise the headers() value with a Cache-Control header set to // |cache_control|. |cache_control| is copied and so can safely be a // temporary. void InitializeHeadersWithCacheControl(const char* cache_control) { std::string raw_headers("HTTP/1.1 200 OK\n"); raw_headers += "Cache-Control: "; raw_headers += cache_control; raw_headers += "\n"; HeadersToRaw(&raw_headers); headers_ = new HttpResponseHeaders(raw_headers); } const scoped_refptr<HttpResponseHeaders>& headers() { return headers_; } // Return a pointer to a TimeDelta object. For use when the value doesn't // matter. TimeDelta* TimeDeltaPointer() { return &delta_; } // Get the max-age value. This should only be used in tests where a valid // max-age parameter is expected to be present. TimeDelta GetMaxAgeValue() { DCHECK(headers_.get()) << "Call InitializeHeadersWithCacheControl() first"; TimeDelta max_age_value; EXPECT_TRUE(headers()->GetMaxAgeValue(&max_age_value)); return max_age_value; } private: scoped_refptr<HttpResponseHeaders> headers_; TimeDelta delta_; }; class CommonHttpResponseHeadersTest : public HttpResponseHeadersTest, public ::testing::WithParamInterface<TestData> { }; // Returns a simple text serialization of the given // |HttpResponseHeaders|. This is used by tests to verify that an // |HttpResponseHeaders| matches an expectation string. // // * One line per header, written as: // HEADER_NAME: HEADER_VALUE\n // * The original case of header names is preserved. // * Whitespace around head names/values is stripped. // * Repeated headers are not aggregated. // * Headers are listed in their original order. std::string ToSimpleString(const scoped_refptr<HttpResponseHeaders>& parsed) { std::string result = parsed->GetStatusLine() + "\n"; size_t iter = 0; std::string name; std::string value; while (parsed->EnumerateHeaderLines(&iter, &name, &value)) { std::string new_line = name + ": " + value + "\n"; // Verify that |name| and |value| do not contain ':' or '\n' (if they did // it would make this serialized format ambiguous). if (std::count(new_line.begin(), new_line.end(), '\n') != 1 || std::count(new_line.begin(), new_line.end(), ':') != 1) { ADD_FAILURE() << "Unexpected characters in the header name or value: " << new_line; return result; } result += new_line; } return result; } TEST_P(CommonHttpResponseHeadersTest, TestCommon) { const TestData test = GetParam(); std::string raw_headers(test.raw_headers); HeadersToRaw(&raw_headers); std::string expected_headers(test.expected_headers); scoped_refptr<HttpResponseHeaders> parsed( new HttpResponseHeaders(raw_headers)); std::string headers = ToSimpleString(parsed); // Transform to readable output format (so it's easier to see diffs). std::replace(headers.begin(), headers.end(), ' ', '_'); std::replace(headers.begin(), headers.end(), '\n', '\\'); std::replace(expected_headers.begin(), expected_headers.end(), ' ', '_'); std::replace(expected_headers.begin(), expected_headers.end(), '\n', '\\'); EXPECT_EQ(expected_headers, headers); EXPECT_TRUE(test.expected_version == parsed->GetHttpVersion()); EXPECT_EQ(test.expected_response_code, parsed->response_code()); EXPECT_EQ(test.expected_status_text, parsed->GetStatusText()); } TestData response_headers_tests[] = { {// Normalize whitespace. "HTTP/1.1 202 Accepted \n" "Content-TYPE : text/html; charset=utf-8 \n" "Set-Cookie: a \n" "Set-Cookie: b \n", "HTTP/1.1 202 Accepted\n" "Content-TYPE: text/html; charset=utf-8\n" "Set-Cookie: a\n" "Set-Cookie: b\n", HttpVersion(1, 1), 202, "Accepted"}, {// Normalize leading whitespace. "HTTP/1.1 202 Accepted \n" // Starts with space -- will be skipped as invalid. " Content-TYPE : text/html; charset=utf-8 \n" "Set-Cookie: a \n" "Set-Cookie: b \n", "HTTP/1.1 202 Accepted\n" "Set-Cookie: a\n" "Set-Cookie: b\n", HttpVersion(1, 1), 202, "Accepted"}, {// Keep whitespace within status text. "HTTP/1.0 404 Not found \n", "HTTP/1.0 404 Not found\n", HttpVersion(1, 0), 404, "Not found"}, {// Normalize blank headers. "HTTP/1.1 200 OK\n" "Header1 : \n" "Header2: \n" "Header3:\n" "Header4\n" "Header5 :\n", "HTTP/1.1 200 OK\n" "Header1: \n" "Header2: \n" "Header3: \n" "Header5: \n", HttpVersion(1, 1), 200, "OK"}, {// Don't believe the http/0.9 version if there are headers! "hTtP/0.9 201\n" "Content-TYPE: text/html; charset=utf-8\n", "HTTP/1.0 201\n" "Content-TYPE: text/html; charset=utf-8\n", HttpVersion(1, 0), 201, ""}, {// Accept the HTTP/0.9 version number if there are no headers. // This is how HTTP/0.9 responses get constructed from // HttpNetworkTransaction. "hTtP/0.9 200 OK\n", "HTTP/0.9 200 OK\n", HttpVersion(0, 9), 200, "OK"}, {// Do not add missing status text. "HTTP/1.1 201\n" "Content-TYPE: text/html; charset=utf-8\n", "HTTP/1.1 201\n" "Content-TYPE: text/html; charset=utf-8\n", HttpVersion(1, 1), 201, ""}, {// Normalize bad status line. "SCREWED_UP_STATUS_LINE\n" "Content-TYPE: text/html; charset=utf-8\n", "HTTP/1.0 200 OK\n" "Content-TYPE: text/html; charset=utf-8\n", HttpVersion(1, 0), 200, "OK"}, {// Normalize bad status line. "Foo bar.", "HTTP/1.0 200\n", HttpVersion(1, 0), 200, ""}, {// Normalize invalid status code. "HTTP/1.1 -1 Unknown\n", "HTTP/1.1 200\n", HttpVersion(1, 1), 200, ""}, {// Normalize empty header. "", "HTTP/1.0 200 OK\n", HttpVersion(1, 0), 200, "OK"}, {// Normalize headers that start with a colon. "HTTP/1.1 202 Accepted \n" "foo: bar\n" ": a \n" " : b\n" "baz: blat \n", "HTTP/1.1 202 Accepted\n" "foo: bar\n" "baz: blat\n", HttpVersion(1, 1), 202, "Accepted"}, {// Normalize headers that end with a colon. "HTTP/1.1 202 Accepted \n" "foo: \n" "bar:\n" "baz: blat \n" "zip:\n", "HTTP/1.1 202 Accepted\n" "foo: \n" "bar: \n" "baz: blat\n" "zip: \n", HttpVersion(1, 1), 202, "Accepted"}, {// Normalize whitespace headers. "\n \n", "HTTP/1.0 200 OK\n", HttpVersion(1, 0), 200, "OK"}, {// Has multiple Set-Cookie headers. "HTTP/1.1 200 OK\n" "Set-Cookie: x=1\n" "Set-Cookie: y=2\n", "HTTP/1.1 200 OK\n" "Set-Cookie: x=1\n" "Set-Cookie: y=2\n", HttpVersion(1, 1), 200, "OK"}, {// Has multiple cache-control headers. "HTTP/1.1 200 OK\n" "Cache-control: private\n" "cache-Control: no-store\n", "HTTP/1.1 200 OK\n" "Cache-control: private\n" "cache-Control: no-store\n", HttpVersion(1, 1), 200, "OK"}, }; INSTANTIATE_TEST_CASE_P(HttpResponseHeaders, CommonHttpResponseHeadersTest, testing::ValuesIn(response_headers_tests)); struct PersistData { HttpResponseHeaders::PersistOptions options; const char* raw_headers; const char* expected_headers; }; class PersistenceTest : public HttpResponseHeadersTest, public ::testing::WithParamInterface<PersistData> { }; TEST_P(PersistenceTest, Persist) { const PersistData test = GetParam(); std::string headers = test.raw_headers; HeadersToRaw(&headers); scoped_refptr<HttpResponseHeaders> parsed1(new HttpResponseHeaders(headers)); base::Pickle pickle; parsed1->Persist(&pickle, test.options); base::PickleIterator iter(pickle); scoped_refptr<HttpResponseHeaders> parsed2(new HttpResponseHeaders(&iter)); EXPECT_EQ(std::string(test.expected_headers), ToSimpleString(parsed2)); } const struct PersistData persistence_tests[] = { {HttpResponseHeaders::PERSIST_ALL, "HTTP/1.1 200 OK\n" "Cache-control:private\n" "cache-Control:no-store\n", "HTTP/1.1 200 OK\n" "Cache-control: private\n" "cache-Control: no-store\n"}, {HttpResponseHeaders::PERSIST_SANS_HOP_BY_HOP, "HTTP/1.1 200 OK\n" "connection: keep-alive\n" "server: blah\n", "HTTP/1.1 200 OK\n" "server: blah\n"}, {HttpResponseHeaders::PERSIST_SANS_NON_CACHEABLE | HttpResponseHeaders::PERSIST_SANS_HOP_BY_HOP, "HTTP/1.1 200 OK\n" "fOo: 1\n" "Foo: 2\n" "Transfer-Encoding: chunked\n" "CoNnection: keep-alive\n" "cache-control: private, no-cache=\"foo\"\n", "HTTP/1.1 200 OK\n" "cache-control: private, no-cache=\"foo\"\n"}, {HttpResponseHeaders::PERSIST_SANS_NON_CACHEABLE, "HTTP/1.1 200 OK\n" "Foo: 2\n" "Cache-Control: private,no-cache=\"foo, bar\"\n" "bar", "HTTP/1.1 200 OK\n" "Cache-Control: private,no-cache=\"foo, bar\"\n"}, // Ignore bogus no-cache value. {HttpResponseHeaders::PERSIST_SANS_NON_CACHEABLE, "HTTP/1.1 200 OK\n" "Foo: 2\n" "Cache-Control: private,no-cache=foo\n", "HTTP/1.1 200 OK\n" "Foo: 2\n" "Cache-Control: private,no-cache=foo\n"}, // Ignore bogus no-cache value. {HttpResponseHeaders::PERSIST_SANS_NON_CACHEABLE, "HTTP/1.1 200 OK\n" "Foo: 2\n" "Cache-Control: private, no-cache=\n", "HTTP/1.1 200 OK\n" "Foo: 2\n" "Cache-Control: private, no-cache=\n"}, // Ignore empty no-cache value. {HttpResponseHeaders::PERSIST_SANS_NON_CACHEABLE, "HTTP/1.1 200 OK\n" "Foo: 2\n" "Cache-Control: private, no-cache=\"\"\n", "HTTP/1.1 200 OK\n" "Foo: 2\n" "Cache-Control: private, no-cache=\"\"\n"}, // Ignore wrong quotes no-cache value. {HttpResponseHeaders::PERSIST_SANS_NON_CACHEABLE, "HTTP/1.1 200 OK\n" "Foo: 2\n" "Cache-Control: private, no-cache=\'foo\'\n", "HTTP/1.1 200 OK\n" "Foo: 2\n" "Cache-Control: private, no-cache=\'foo\'\n"}, // Ignore unterminated quotes no-cache value. {HttpResponseHeaders::PERSIST_SANS_NON_CACHEABLE, "HTTP/1.1 200 OK\n" "Foo: 2\n" "Cache-Control: private, no-cache=\"foo\n", "HTTP/1.1 200 OK\n" "Foo: 2\n" "Cache-Control: private, no-cache=\"foo\n"}, // Accept sloppy LWS. {HttpResponseHeaders::PERSIST_SANS_NON_CACHEABLE, "HTTP/1.1 200 OK\n" "Foo: 2\n" "Cache-Control: private, no-cache=\" foo\t, bar\"\n", "HTTP/1.1 200 OK\n" "Cache-Control: private, no-cache=\" foo\t, bar\"\n"}, // Header name appears twice, separated by another header. {HttpResponseHeaders::PERSIST_ALL, "HTTP/1.1 200 OK\n" "Foo: 1\n" "Bar: 2\n" "Foo: 3\n", "HTTP/1.1 200 OK\n" "Foo: 1\n" "Bar: 2\n" "Foo: 3\n"}, // Header name appears twice, separated by another header (type 2). {HttpResponseHeaders::PERSIST_ALL, "HTTP/1.1 200 OK\n" "Foo: 1, 3\n" "Bar: 2\n" "Foo: 4\n", "HTTP/1.1 200 OK\n" "Foo: 1, 3\n" "Bar: 2\n" "Foo: 4\n"}, // Test filtering of cookie headers. {HttpResponseHeaders::PERSIST_SANS_COOKIES, "HTTP/1.1 200 OK\n" "Set-Cookie: foo=bar; httponly\n" "Set-Cookie: bar=foo\n" "Bar: 1\n" "Set-Cookie2: bar2=foo2\n", "HTTP/1.1 200 OK\n" "Bar: 1\n"}, {HttpResponseHeaders::PERSIST_SANS_COOKIES, "HTTP/1.1 200 OK\n" "Set-Cookie: foo=bar\n" "Foo: 2\n" "Clear-Site-Data: { \"types\" : [ \"cookies\" ] }\n" "Bar: 3\n", "HTTP/1.1 200 OK\n" "Foo: 2\n" "Bar: 3\n"}, // Test LWS at the end of a header. {HttpResponseHeaders::PERSIST_ALL, "HTTP/1.1 200 OK\n" "Content-Length: 450 \n" "Content-Encoding: gzip\n", "HTTP/1.1 200 OK\n" "Content-Length: 450\n" "Content-Encoding: gzip\n"}, // Test LWS at the end of a header. {HttpResponseHeaders::PERSIST_RAW, "HTTP/1.1 200 OK\n" "Content-Length: 450 \n" "Content-Encoding: gzip\n", "HTTP/1.1 200 OK\n" "Content-Length: 450\n" "Content-Encoding: gzip\n"}, // Test filtering of transport security state headers. {HttpResponseHeaders::PERSIST_SANS_SECURITY_STATE, "HTTP/1.1 200 OK\n" "Strict-Transport-Security: max-age=1576800\n" "Bar: 1\n" "Public-Key-Pins: max-age=100000; " "pin-sha256=\"1111111111111111111111111111111111111111111=\";" "pin-sha256=\"2222222222222222222222222222222222222222222=\"", "HTTP/1.1 200 OK\n" "Bar: 1\n"}, }; INSTANTIATE_TEST_CASE_P(HttpResponseHeaders, PersistenceTest, testing::ValuesIn(persistence_tests)); TEST(HttpResponseHeadersTest, EnumerateHeader_Coalesced) { // Ensure that commas in quoted strings are not regarded as value separators. // Ensure that whitespace following a value is trimmed properly. std::string headers = "HTTP/1.1 200 OK\n" "Cache-control:private , no-cache=\"set-cookie,server\" \n" "cache-Control: no-store\n"; HeadersToRaw(&headers); scoped_refptr<HttpResponseHeaders> parsed(new HttpResponseHeaders(headers)); size_t iter = 0; std::string value; EXPECT_TRUE(parsed->EnumerateHeader(&iter, "cache-control", &value)); EXPECT_EQ("private", value); EXPECT_TRUE(parsed->EnumerateHeader(&iter, "cache-control", &value)); EXPECT_EQ("no-cache=\"set-cookie,server\"", value); EXPECT_TRUE(parsed->EnumerateHeader(&iter, "cache-control", &value)); EXPECT_EQ("no-store", value); EXPECT_FALSE(parsed->EnumerateHeader(&iter, "cache-control", &value)); } TEST(HttpResponseHeadersTest, EnumerateHeader_Challenge) { // Even though WWW-Authenticate has commas, it should not be treated as // coalesced values. std::string headers = "HTTP/1.1 401 OK\n" "WWW-Authenticate:Digest realm=foobar, nonce=x, domain=y\n" "WWW-Authenticate:Basic realm=quatar\n"; HeadersToRaw(&headers); scoped_refptr<HttpResponseHeaders> parsed(new HttpResponseHeaders(headers)); size_t iter = 0; std::string value; EXPECT_TRUE(parsed->EnumerateHeader(&iter, "WWW-Authenticate", &value)); EXPECT_EQ("Digest realm=foobar, nonce=x, domain=y", value); EXPECT_TRUE(parsed->EnumerateHeader(&iter, "WWW-Authenticate", &value)); EXPECT_EQ("Basic realm=quatar", value); EXPECT_FALSE(parsed->EnumerateHeader(&iter, "WWW-Authenticate", &value)); } TEST(HttpResponseHeadersTest, EnumerateHeader_DateValued) { // The comma in a date valued header should not be treated as a // field-value separator. std::string headers = "HTTP/1.1 200 OK\n" "Date: Tue, 07 Aug 2007 23:10:55 GMT\n" "Last-Modified: Wed, 01 Aug 2007 23:23:45 GMT\n"; HeadersToRaw(&headers); scoped_refptr<HttpResponseHeaders> parsed(new HttpResponseHeaders(headers)); std::string value; EXPECT_TRUE(parsed->EnumerateHeader(NULL, "date", &value)); EXPECT_EQ("Tue, 07 Aug 2007 23:10:55 GMT", value); EXPECT_TRUE(parsed->EnumerateHeader(NULL, "last-modified", &value)); EXPECT_EQ("Wed, 01 Aug 2007 23:23:45 GMT", value); } TEST(HttpResponseHeadersTest, DefaultDateToGMT) { // Verify we make the best interpretation when parsing dates that incorrectly // do not end in "GMT" as RFC2616 requires. std::string headers = "HTTP/1.1 200 OK\n" "Date: Tue, 07 Aug 2007 23:10:55\n" "Last-Modified: Tue, 07 Aug 2007 19:10:55 EDT\n" "Expires: Tue, 07 Aug 2007 23:10:55 UTC\n"; HeadersToRaw(&headers); scoped_refptr<HttpResponseHeaders> parsed(new HttpResponseHeaders(headers)); base::Time expected_value; ASSERT_TRUE(base::Time::FromString("Tue, 07 Aug 2007 23:10:55 GMT", &expected_value)); base::Time value; // When the timezone is missing, GMT is a good guess as its what RFC2616 // requires. EXPECT_TRUE(parsed->GetDateValue(&value)); EXPECT_EQ(expected_value, value); // If GMT is missing but an RFC822-conforming one is present, use that. EXPECT_TRUE(parsed->GetLastModifiedValue(&value)); EXPECT_EQ(expected_value, value); // If an unknown timezone is present, treat like a missing timezone and // default to GMT. The only example of a web server not specifying "GMT" // used "UTC" which is equivalent to GMT. if (parsed->GetExpiresValue(&value)) EXPECT_EQ(expected_value, value); } TEST(HttpResponseHeadersTest, GetAgeValue10) { std::string headers = "HTTP/1.1 200 OK\n" "Age: 10\n"; HeadersToRaw(&headers); scoped_refptr<HttpResponseHeaders> parsed(new HttpResponseHeaders(headers)); base::TimeDelta age; ASSERT_TRUE(parsed->GetAgeValue(&age)); EXPECT_EQ(10, age.InSeconds()); } TEST(HttpResponseHeadersTest, GetAgeValue0) { std::string headers = "HTTP/1.1 200 OK\n" "Age: 0\n"; HeadersToRaw(&headers); scoped_refptr<HttpResponseHeaders> parsed(new HttpResponseHeaders(headers)); base::TimeDelta age; ASSERT_TRUE(parsed->GetAgeValue(&age)); EXPECT_EQ(0, age.InSeconds()); } TEST(HttpResponseHeadersTest, GetAgeValueBogus) { std::string headers = "HTTP/1.1 200 OK\n" "Age: donkey\n"; HeadersToRaw(&headers); scoped_refptr<HttpResponseHeaders> parsed(new HttpResponseHeaders(headers)); base::TimeDelta age; ASSERT_FALSE(parsed->GetAgeValue(&age)); } TEST(HttpResponseHeadersTest, GetAgeValueNegative) { std::string headers = "HTTP/1.1 200 OK\n" "Age: -10\n"; HeadersToRaw(&headers); scoped_refptr<HttpResponseHeaders> parsed(new HttpResponseHeaders(headers)); base::TimeDelta age; ASSERT_FALSE(parsed->GetAgeValue(&age)); } TEST(HttpResponseHeadersTest, GetAgeValueLeadingPlus) { std::string headers = "HTTP/1.1 200 OK\n" "Age: +10\n"; HeadersToRaw(&headers); scoped_refptr<HttpResponseHeaders> parsed(new HttpResponseHeaders(headers)); base::TimeDelta age; ASSERT_FALSE(parsed->GetAgeValue(&age)); } TEST(HttpResponseHeadersTest, GetAgeValueOverflow) { std::string headers = "HTTP/1.1 200 OK\n" "Age: 999999999999999999999999999999999999999999\n"; HeadersToRaw(&headers); scoped_refptr<HttpResponseHeaders> parsed(new HttpResponseHeaders(headers)); base::TimeDelta age; ASSERT_TRUE(parsed->GetAgeValue(&age)); // Should have saturated to 2^32 - 1. EXPECT_EQ(static_cast<int64_t>(0xFFFFFFFFL), age.InSeconds()); } struct ContentTypeTestData { const std::string raw_headers; const std::string mime_type; const bool has_mimetype; const std::string charset; const bool has_charset; const std::string all_content_type; }; class ContentTypeTest : public HttpResponseHeadersTest, public ::testing::WithParamInterface<ContentTypeTestData> { }; TEST_P(ContentTypeTest, GetMimeType) { const ContentTypeTestData test = GetParam(); std::string headers(test.raw_headers); HeadersToRaw(&headers); scoped_refptr<HttpResponseHeaders> parsed(new HttpResponseHeaders(headers)); std::string value; EXPECT_EQ(test.has_mimetype, parsed->GetMimeType(&value)); EXPECT_EQ(test.mime_type, value); value.clear(); EXPECT_EQ(test.has_charset, parsed->GetCharset(&value)); EXPECT_EQ(test.charset, value); EXPECT_TRUE(parsed->GetNormalizedHeader("content-type", &value)); EXPECT_EQ(test.all_content_type, value); } // clang-format off const ContentTypeTestData mimetype_tests[] = { { "HTTP/1.1 200 OK\n" "Content-type: text/html\n", "text/html", true, "", false, "text/html" }, // Multiple content-type headers should give us the last one. { "HTTP/1.1 200 OK\n" "Content-type: text/html\n" "Content-type: text/html\n", "text/html", true, "", false, "text/html, text/html" }, { "HTTP/1.1 200 OK\n" "Content-type: text/plain\n" "Content-type: text/html\n" "Content-type: text/plain\n" "Content-type: text/html\n", "text/html", true, "", false, "text/plain, text/html, text/plain, text/html" }, // Test charset parsing. { "HTTP/1.1 200 OK\n" "Content-type: text/html\n" "Content-type: text/html; charset=ISO-8859-1\n", "text/html", true, "iso-8859-1", true, "text/html, text/html; charset=ISO-8859-1" }, // Test charset in double quotes. { "HTTP/1.1 200 OK\n" "Content-type: text/html\n" "Content-type: text/html; charset=\"ISO-8859-1\"\n", "text/html", true, "iso-8859-1", true, "text/html, text/html; charset=\"ISO-8859-1\"" }, // If there are multiple matching content-type headers, we carry // over the charset value. { "HTTP/1.1 200 OK\n" "Content-type: text/html;charset=utf-8\n" "Content-type: text/html\n", "text/html", true, "utf-8", true, "text/html;charset=utf-8, text/html" }, // Regression test for https://crbug.com/772350: // Single quotes are not delimiters but must be treated as part of charset. { "HTTP/1.1 200 OK\n" "Content-type: text/html;charset='utf-8'\n" "Content-type: text/html\n", "text/html", true, "'utf-8'", true, "text/html;charset='utf-8', text/html" }, // Last charset wins if matching content-type. { "HTTP/1.1 200 OK\n" "Content-type: text/html;charset=utf-8\n" "Content-type: text/html;charset=iso-8859-1\n", "text/html", true, "iso-8859-1", true, "text/html;charset=utf-8, text/html;charset=iso-8859-1" }, // Charset is ignored if the content types change. { "HTTP/1.1 200 OK\n" "Content-type: text/plain;charset=utf-8\n" "Content-type: text/html\n", "text/html", true, "", false, "text/plain;charset=utf-8, text/html" }, // Empty content-type. { "HTTP/1.1 200 OK\n" "Content-type: \n", "", false, "", false, "" }, // Emtpy charset. { "HTTP/1.1 200 OK\n" "Content-type: text/html;charset=\n", "text/html", true, "", false, "text/html;charset=" }, // Multiple charsets, last one wins. { "HTTP/1.1 200 OK\n" "Content-type: text/html;charset=utf-8; charset=iso-8859-1\n", "text/html", true, "iso-8859-1", true, "text/html;charset=utf-8; charset=iso-8859-1" }, // Multiple params. { "HTTP/1.1 200 OK\n" "Content-type: text/html; foo=utf-8; charset=iso-8859-1\n", "text/html", true, "iso-8859-1", true, "text/html; foo=utf-8; charset=iso-8859-1" }, { "HTTP/1.1 200 OK\n" "Content-type: text/html ; charset=utf-8 ; bar=iso-8859-1\n", "text/html", true, "utf-8", true, "text/html ; charset=utf-8 ; bar=iso-8859-1" }, // Comma embeded in quotes. { "HTTP/1.1 200 OK\n" "Content-type: text/html ; charset=\"utf-8,text/plain\" ;\n", "text/html", true, "utf-8,text/plain", true, "text/html ; charset=\"utf-8,text/plain\" ;" }, // Charset with leading spaces. { "HTTP/1.1 200 OK\n" "Content-type: text/html ; charset= \"utf-8\" ;\n", "text/html", true, "utf-8", true, "text/html ; charset= \"utf-8\" ;" }, // Media type comments in mime-type. { "HTTP/1.1 200 OK\n" "Content-type: text/html (html)\n", "text/html", true, "", false, "text/html (html)" }, // Incomplete charset= param. { "HTTP/1.1 200 OK\n" "Content-type: text/html; char=\n", "text/html", true, "", false, "text/html; char=" }, // Invalid media type: no slash. { "HTTP/1.1 200 OK\n" "Content-type: texthtml\n", "", false, "", false, "texthtml" }, // Invalid media type: "*/*". { "HTTP/1.1 200 OK\n" "Content-type: */*\n", "", false, "", false, "*/*" }, }; // clang-format on INSTANTIATE_TEST_CASE_P(HttpResponseHeaders, ContentTypeTest, testing::ValuesIn(mimetype_tests)); struct RequiresValidationTestData { const char* headers; bool requires_validation; }; class RequiresValidationTest : public HttpResponseHeadersTest, public ::testing::WithParamInterface<RequiresValidationTestData> { }; TEST_P(RequiresValidationTest, RequiresValidation) { const RequiresValidationTestData test = GetParam(); base::Time request_time, response_time, current_time; ASSERT_TRUE( base::Time::FromString("Wed, 28 Nov 2007 00:40:09 GMT", &request_time)); ASSERT_TRUE( base::Time::FromString("Wed, 28 Nov 2007 00:40:12 GMT", &response_time)); ASSERT_TRUE( base::Time::FromString("Wed, 28 Nov 2007 00:45:20 GMT", &current_time)); std::string headers(test.headers); HeadersToRaw(&headers); scoped_refptr<HttpResponseHeaders> parsed(new HttpResponseHeaders(headers)); bool requires_validation = parsed->RequiresValidation(request_time, response_time, current_time); EXPECT_EQ(test.requires_validation, requires_validation); } const struct RequiresValidationTestData requires_validation_tests[] = { // No expiry info: expires immediately. { "HTTP/1.1 200 OK\n" "\n", true }, // No expiry info: expires immediately. { "HTTP/1.1 200 OK\n" "\n", true }, // Valid for a little while. { "HTTP/1.1 200 OK\n" "cache-control: max-age=10000\n" "\n", false }, // Expires in the future. { "HTTP/1.1 200 OK\n" "date: Wed, 28 Nov 2007 00:40:11 GMT\n" "expires: Wed, 28 Nov 2007 01:00:00 GMT\n" "\n", false }, // Already expired. { "HTTP/1.1 200 OK\n" "date: Wed, 28 Nov 2007 00:40:11 GMT\n" "expires: Wed, 28 Nov 2007 00:00:00 GMT\n" "\n", true }, // Max-age trumps expires. { "HTTP/1.1 200 OK\n" "date: Wed, 28 Nov 2007 00:40:11 GMT\n" "expires: Wed, 28 Nov 2007 00:00:00 GMT\n" "cache-control: max-age=10000\n" "\n", false }, // Last-modified heuristic: modified a while ago. { "HTTP/1.1 200 OK\n" "date: Wed, 28 Nov 2007 00:40:11 GMT\n" "last-modified: Wed, 27 Nov 2007 08:00:00 GMT\n" "\n", false }, { "HTTP/1.1 203 Non-Authoritative Information\n" "date: Wed, 28 Nov 2007 00:40:11 GMT\n" "last-modified: Wed, 27 Nov 2007 08:00:00 GMT\n" "\n", false }, { "HTTP/1.1 206 Partial Content\n" "date: Wed, 28 Nov 2007 00:40:11 GMT\n" "last-modified: Wed, 27 Nov 2007 08:00:00 GMT\n" "\n", false }, // Last-modified heuristic: modified recently. { "HTTP/1.1 200 OK\n" "date: Wed, 28 Nov 2007 00:40:11 GMT\n" "last-modified: Wed, 28 Nov 2007 00:40:10 GMT\n" "\n", true }, { "HTTP/1.1 203 Non-Authoritative Information\n" "date: Wed, 28 Nov 2007 00:40:11 GMT\n" "last-modified: Wed, 28 Nov 2007 00:40:10 GMT\n" "\n", true }, { "HTTP/1.1 206 Partial Content\n" "date: Wed, 28 Nov 2007 00:40:11 GMT\n" "last-modified: Wed, 28 Nov 2007 00:40:10 GMT\n" "\n", true }, // Cached permanent redirect. { "HTTP/1.1 301 Moved Permanently\n" "\n", false }, // Another cached permanent redirect. { "HTTP/1.1 308 Permanent Redirect\n" "\n", false }, // Cached redirect: not reusable even though by default it would be. { "HTTP/1.1 300 Multiple Choices\n" "Cache-Control: no-cache\n" "\n", true }, // Cached forever by default. { "HTTP/1.1 410 Gone\n" "\n", false }, // Cached temporary redirect: not reusable. { "HTTP/1.1 302 Found\n" "\n", true }, // Cached temporary redirect: reusable. { "HTTP/1.1 302 Found\n" "cache-control: max-age=10000\n" "\n", false }, // Cache-control: max-age=N overrides expires: date in the past. { "HTTP/1.1 200 OK\n" "date: Wed, 28 Nov 2007 00:40:11 GMT\n" "expires: Wed, 28 Nov 2007 00:20:11 GMT\n" "cache-control: max-age=10000\n" "\n", false }, // Cache-control: no-store overrides expires: in the future. { "HTTP/1.1 200 OK\n" "date: Wed, 28 Nov 2007 00:40:11 GMT\n" "expires: Wed, 29 Nov 2007 00:40:11 GMT\n" "cache-control: no-store,private,no-cache=\"foo\"\n" "\n", true }, // Pragma: no-cache overrides last-modified heuristic. { "HTTP/1.1 200 OK\n" "date: Wed, 28 Nov 2007 00:40:11 GMT\n" "last-modified: Wed, 27 Nov 2007 08:00:00 GMT\n" "pragma: no-cache\n" "\n", true }, // max-age has expired, needs synchronous revalidation { "HTTP/1.1 200 OK\n" "date: Wed, 28 Nov 2007 00:40:11 GMT\n" "cache-control: max-age=300\n" "\n", true }, // TODO(darin): Add many many more tests here. }; INSTANTIATE_TEST_CASE_P(HttpResponseHeaders, RequiresValidationTest, testing::ValuesIn(requires_validation_tests)); struct UpdateTestData { const char* orig_headers; const char* new_headers; const char* expected_headers; }; class UpdateTest : public HttpResponseHeadersTest, public ::testing::WithParamInterface<UpdateTestData> { }; TEST_P(UpdateTest, Update) { const UpdateTestData test = GetParam(); std::string orig_headers(test.orig_headers); HeadersToRaw(&orig_headers); scoped_refptr<HttpResponseHeaders> parsed( new HttpResponseHeaders(orig_headers)); std::string new_headers(test.new_headers); HeadersToRaw(&new_headers); scoped_refptr<HttpResponseHeaders> new_parsed( new HttpResponseHeaders(new_headers)); parsed->Update(*new_parsed.get()); EXPECT_EQ(std::string(test.expected_headers), ToSimpleString(parsed)); } const UpdateTestData update_tests[] = { { "HTTP/1.1 200 OK\n", "HTTP/1/1 304 Not Modified\n" "connection: keep-alive\n" "Cache-control: max-age=10000\n", "HTTP/1.1 200 OK\n" "Cache-control: max-age=10000\n" }, { "HTTP/1.1 200 OK\n" "Foo: 1\n" "Cache-control: private\n", "HTTP/1/1 304 Not Modified\n" "connection: keep-alive\n" "Cache-control: max-age=10000\n", "HTTP/1.1 200 OK\n" "Cache-control: max-age=10000\n" "Foo: 1\n" }, { "HTTP/1.1 200 OK\n" "Foo: 1\n" "Cache-control: private\n", "HTTP/1/1 304 Not Modified\n" "connection: keep-alive\n" "Cache-CONTROL: max-age=10000\n", "HTTP/1.1 200 OK\n" "Cache-CONTROL: max-age=10000\n" "Foo: 1\n" }, { "HTTP/1.1 200 OK\n" "Content-Length: 450\n", "HTTP/1/1 304 Not Modified\n" "connection: keep-alive\n" "Cache-control: max-age=10001 \n", "HTTP/1.1 200 OK\n" "Cache-control: max-age=10001\n" "Content-Length: 450\n" }, { "HTTP/1.1 200 OK\n" "X-Frame-Options: DENY\n", "HTTP/1/1 304 Not Modified\n" "X-Frame-Options: ALLOW\n", "HTTP/1.1 200 OK\n" "X-Frame-Options: DENY\n", }, { "HTTP/1.1 200 OK\n" "X-WebKit-CSP: default-src 'none'\n", "HTTP/1/1 304 Not Modified\n" "X-WebKit-CSP: default-src *\n", "HTTP/1.1 200 OK\n" "X-WebKit-CSP: default-src 'none'\n", }, { "HTTP/1.1 200 OK\n" "X-XSS-Protection: 1\n", "HTTP/1/1 304 Not Modified\n" "X-XSS-Protection: 0\n", "HTTP/1.1 200 OK\n" "X-XSS-Protection: 1\n", }, { "HTTP/1.1 200 OK\n", "HTTP/1/1 304 Not Modified\n" "X-Content-Type-Options: nosniff\n", "HTTP/1.1 200 OK\n" }, }; INSTANTIATE_TEST_CASE_P(HttpResponseHeaders, UpdateTest, testing::ValuesIn(update_tests)); struct EnumerateHeaderTestData { const char* headers; const char* expected_lines; }; class EnumerateHeaderLinesTest : public HttpResponseHeadersTest, public ::testing::WithParamInterface<EnumerateHeaderTestData> { }; TEST_P(EnumerateHeaderLinesTest, EnumerateHeaderLines) { const EnumerateHeaderTestData test = GetParam(); std::string headers(test.headers); HeadersToRaw(&headers); scoped_refptr<HttpResponseHeaders> parsed(new HttpResponseHeaders(headers)); std::string name, value, lines; size_t iter = 0; while (parsed->EnumerateHeaderLines(&iter, &name, &value)) { lines.append(name); lines.append(": "); lines.append(value); lines.append("\n"); } EXPECT_EQ(std::string(test.expected_lines), lines); } const EnumerateHeaderTestData enumerate_header_tests[] = { { "HTTP/1.1 200 OK\n", "" }, { "HTTP/1.1 200 OK\n" "Foo: 1\n", "Foo: 1\n" }, { "HTTP/1.1 200 OK\n" "Foo: 1\n" "Bar: 2\n" "Foo: 3\n", "Foo: 1\nBar: 2\nFoo: 3\n" }, { "HTTP/1.1 200 OK\n" "Foo: 1, 2, 3\n", "Foo: 1, 2, 3\n" }, }; INSTANTIATE_TEST_CASE_P(HttpResponseHeaders, EnumerateHeaderLinesTest, testing::ValuesIn(enumerate_header_tests)); struct IsRedirectTestData { const char* headers; const char* location; bool is_redirect; }; class IsRedirectTest : public HttpResponseHeadersTest, public ::testing::WithParamInterface<IsRedirectTestData> { }; TEST_P(IsRedirectTest, IsRedirect) { const IsRedirectTestData test = GetParam(); std::string headers(test.headers); HeadersToRaw(&headers); scoped_refptr<HttpResponseHeaders> parsed(new HttpResponseHeaders(headers)); std::string location; EXPECT_EQ(parsed->IsRedirect(&location), test.is_redirect); EXPECT_EQ(location, test.location); } const IsRedirectTestData is_redirect_tests[] = { { "HTTP/1.1 200 OK\n", "", false }, { "HTTP/1.1 301 Moved\n" "Location: http://foopy/\n", "http://foopy/", true }, { "HTTP/1.1 301 Moved\n" "Location: \t \n", "", false }, // We use the first location header as the target of the redirect. { "HTTP/1.1 301 Moved\n" "Location: http://foo/\n" "Location: http://bar/\n", "http://foo/", true }, // We use the first _valid_ location header as the target of the redirect. { "HTTP/1.1 301 Moved\n" "Location: \n" "Location: http://bar/\n", "http://bar/", true }, // Bug 1050541 (location header with an unescaped comma). { "HTTP/1.1 301 Moved\n" "Location: http://foo/bar,baz.html\n", "http://foo/bar,baz.html", true }, // Bug 1224617 (location header with non-ASCII bytes). { "HTTP/1.1 301 Moved\n" "Location: http://foo/bar?key=\xE4\xF6\xFC\n", "http://foo/bar?key=%E4%F6%FC", true }, // Shift_JIS, Big5, and GBK contain multibyte characters with the trailing // byte falling in the ASCII range. { "HTTP/1.1 301 Moved\n" "Location: http://foo/bar?key=\x81\x5E\xD8\xBF\n", "http://foo/bar?key=%81^%D8%BF", true }, { "HTTP/1.1 301 Moved\n" "Location: http://foo/bar?key=\x82\x40\xBD\xC4\n", "http://foo/bar?key=%82@%BD%C4", true }, { "HTTP/1.1 301 Moved\n" "Location: http://foo/bar?key=\x83\x5C\x82\x5D\xCB\xD7\n", "http://foo/bar?key=%83\\%82]%CB%D7", true }, }; INSTANTIATE_TEST_CASE_P(HttpResponseHeaders, IsRedirectTest, testing::ValuesIn(is_redirect_tests)); struct ContentLengthTestData { const char* headers; int64_t expected_len; }; class GetContentLengthTest : public HttpResponseHeadersTest, public ::testing::WithParamInterface<ContentLengthTestData> { }; TEST_P(GetContentLengthTest, GetContentLength) { const ContentLengthTestData test = GetParam(); std::string headers(test.headers); HeadersToRaw(&headers); scoped_refptr<HttpResponseHeaders> parsed(new HttpResponseHeaders(headers)); EXPECT_EQ(test.expected_len, parsed->GetContentLength()); } const ContentLengthTestData content_length_tests[] = { {"HTTP/1.1 200 OK\n", -1}, {"HTTP/1.1 200 OK\n" "Content-Length: 10\n", 10}, {"HTTP/1.1 200 OK\n" "Content-Length: \n", -1}, {"HTTP/1.1 200 OK\n" "Content-Length: abc\n", -1}, {"HTTP/1.1 200 OK\n" "Content-Length: -10\n", -1}, {"HTTP/1.1 200 OK\n" "Content-Length: +10\n", -1}, {"HTTP/1.1 200 OK\n" "Content-Length: 23xb5\n", -1}, {"HTTP/1.1 200 OK\n" "Content-Length: 0xA\n", -1}, {"HTTP/1.1 200 OK\n" "Content-Length: 010\n", 10}, // Content-Length too big, will overflow an int64_t. {"HTTP/1.1 200 OK\n" "Content-Length: 40000000000000000000\n", -1}, {"HTTP/1.1 200 OK\n" "Content-Length: 10\n", 10}, {"HTTP/1.1 200 OK\n" "Content-Length: 10 \n", 10}, {"HTTP/1.1 200 OK\n" "Content-Length: \t10\n", 10}, {"HTTP/1.1 200 OK\n" "Content-Length: \v10\n", -1}, {"HTTP/1.1 200 OK\n" "Content-Length: \f10\n", -1}, {"HTTP/1.1 200 OK\n" "cOnTeNt-LENgth: 33\n", 33}, {"HTTP/1.1 200 OK\n" "Content-Length: 34\r\n", -1}, }; INSTANTIATE_TEST_CASE_P(HttpResponseHeaders, GetContentLengthTest, testing::ValuesIn(content_length_tests)); struct ContentRangeTestData { const char* headers; bool expected_return_value; int64_t expected_first_byte_position; int64_t expected_last_byte_position; int64_t expected_instance_size; }; class ContentRangeTest : public HttpResponseHeadersTest, public ::testing::WithParamInterface<ContentRangeTestData> { }; TEST_P(ContentRangeTest, GetContentRangeFor206) { const ContentRangeTestData test = GetParam(); std::string headers(test.headers); HeadersToRaw(&headers); scoped_refptr<HttpResponseHeaders> parsed(new HttpResponseHeaders(headers)); int64_t first_byte_position; int64_t last_byte_position; int64_t instance_size; bool return_value = parsed->GetContentRangeFor206( &first_byte_position, &last_byte_position, &instance_size); EXPECT_EQ(test.expected_return_value, return_value); EXPECT_EQ(test.expected_first_byte_position, first_byte_position); EXPECT_EQ(test.expected_last_byte_position, last_byte_position); EXPECT_EQ(test.expected_instance_size, instance_size); } const ContentRangeTestData content_range_tests[] = { {"HTTP/1.1 206 Partial Content", false, -1, -1, -1}, {"HTTP/1.1 206 Partial Content\n" "Content-Range:", false, -1, -1, -1}, {"HTTP/1.1 206 Partial Content\n" "Content-Range: bytes 0-50/51", true, 0, 50, 51}, {"HTTP/1.1 206 Partial Content\n" "Content-Range: bytes 50-0/51", false, -1, -1, -1}, {"HTTP/1.1 416 Requested range not satisfiable\n" "Content-Range: bytes */*", false, -1, -1, -1}, {"HTTP/1.1 206 Partial Content\n" "Content-Range: bytes 0-50/*", false, -1, -1, -1}, }; INSTANTIATE_TEST_CASE_P(HttpResponseHeaders, ContentRangeTest, testing::ValuesIn(content_range_tests)); struct KeepAliveTestData { const char* headers; bool expected_keep_alive; }; // Enable GTest to print KeepAliveTestData in an intelligible way if the test // fails. void PrintTo(const KeepAliveTestData& keep_alive_test_data, std::ostream* os) { *os << "{\"" << keep_alive_test_data.headers << "\", " << std::boolalpha << keep_alive_test_data.expected_keep_alive << "}"; } class IsKeepAliveTest : public HttpResponseHeadersTest, public ::testing::WithParamInterface<KeepAliveTestData> { }; TEST_P(IsKeepAliveTest, IsKeepAlive) { const KeepAliveTestData test = GetParam(); std::string headers(test.headers); HeadersToRaw(&headers); scoped_refptr<HttpResponseHeaders> parsed(new HttpResponseHeaders(headers)); EXPECT_EQ(test.expected_keep_alive, parsed->IsKeepAlive()); } const KeepAliveTestData keepalive_tests[] = { // The status line fabricated by HttpNetworkTransaction for a 0.9 response. // Treated as 0.9. { "HTTP/0.9 200 OK", false }, // This could come from a broken server. Treated as 1.0 because it has a // header. { "HTTP/0.9 200 OK\n" "connection: keep-alive\n", true }, { "HTTP/1.1 200 OK\n", true }, { "HTTP/1.0 200 OK\n", false }, { "HTTP/1.0 200 OK\n" "connection: close\n", false }, { "HTTP/1.0 200 OK\n" "connection: keep-alive\n", true }, { "HTTP/1.0 200 OK\n" "connection: kEeP-AliVe\n", true }, { "HTTP/1.0 200 OK\n" "connection: keep-aliveX\n", false }, { "HTTP/1.1 200 OK\n" "connection: close\n", false }, { "HTTP/1.1 200 OK\n" "connection: keep-alive\n", true }, { "HTTP/1.0 200 OK\n" "proxy-connection: close\n", false }, { "HTTP/1.0 200 OK\n" "proxy-connection: keep-alive\n", true }, { "HTTP/1.1 200 OK\n" "proxy-connection: close\n", false }, { "HTTP/1.1 200 OK\n" "proxy-connection: keep-alive\n", true }, { "HTTP/1.1 200 OK\n" "Connection: Upgrade, close\n", false }, { "HTTP/1.1 200 OK\n" "Connection: Upgrade, keep-alive\n", true }, { "HTTP/1.1 200 OK\n" "Connection: Upgrade\n" "Connection: close\n", false }, { "HTTP/1.1 200 OK\n" "Connection: Upgrade\n" "Connection: keep-alive\n", true }, { "HTTP/1.1 200 OK\n" "Connection: close, Upgrade\n", false }, { "HTTP/1.1 200 OK\n" "Connection: keep-alive, Upgrade\n", true }, { "HTTP/1.1 200 OK\n" "Connection: Upgrade\n" "Proxy-Connection: close\n", false }, { "HTTP/1.1 200 OK\n" "Connection: Upgrade\n" "Proxy-Connection: keep-alive\n", true }, // In situations where the response headers conflict with themselves, use the // first one for backwards-compatibility. { "HTTP/1.1 200 OK\n" "Connection: close\n" "Connection: keep-alive\n", false }, { "HTTP/1.1 200 OK\n" "Connection: keep-alive\n" "Connection: close\n", true }, { "HTTP/1.0 200 OK\n" "Connection: close\n" "Connection: keep-alive\n", false }, { "HTTP/1.0 200 OK\n" "Connection: keep-alive\n" "Connection: close\n", true }, // Ignore the Proxy-Connection header if at all possible. { "HTTP/1.0 200 OK\n" "Proxy-Connection: keep-alive\n" "Connection: close\n", false }, { "HTTP/1.1 200 OK\n" "Proxy-Connection: close\n" "Connection: keep-alive\n", true }, // Older versions of Chrome would have ignored Proxy-Connection in this case, // but it doesn't seem safe. { "HTTP/1.1 200 OK\n" "Proxy-Connection: close\n" "Connection: Transfer-Encoding\n", false }, }; INSTANTIATE_TEST_CASE_P(HttpResponseHeaders, IsKeepAliveTest, testing::ValuesIn(keepalive_tests)); struct HasStrongValidatorsTestData { const char* headers; bool expected_result; }; class HasStrongValidatorsTest : public HttpResponseHeadersTest, public ::testing::WithParamInterface<HasStrongValidatorsTestData> { }; TEST_P(HasStrongValidatorsTest, HasStrongValidators) { const HasStrongValidatorsTestData test = GetParam(); std::string headers(test.headers); HeadersToRaw(&headers); scoped_refptr<HttpResponseHeaders> parsed(new HttpResponseHeaders(headers)); EXPECT_EQ(test.expected_result, parsed->HasStrongValidators()); } const HasStrongValidatorsTestData strong_validators_tests[] = { { "HTTP/0.9 200 OK", false }, { "HTTP/1.0 200 OK\n" "Date: Wed, 28 Nov 2007 01:40:10 GMT\n" "Last-Modified: Wed, 28 Nov 2007 00:40:10 GMT\n" "ETag: \"foo\"\n", false }, { "HTTP/1.1 200 OK\n" "Date: Wed, 28 Nov 2007 01:40:10 GMT\n" "Last-Modified: Wed, 28 Nov 2007 00:40:10 GMT\n" "ETag: \"foo\"\n", true }, { "HTTP/1.1 200 OK\n" "Date: Wed, 28 Nov 2007 00:41:10 GMT\n" "Last-Modified: Wed, 28 Nov 2007 00:40:10 GMT\n", true }, { "HTTP/1.1 200 OK\n" "Date: Wed, 28 Nov 2007 00:41:09 GMT\n" "Last-Modified: Wed, 28 Nov 2007 00:40:10 GMT\n", false }, { "HTTP/1.1 200 OK\n" "ETag: \"foo\"\n", true }, // This is not really a weak etag: { "HTTP/1.1 200 OK\n" "etag: \"w/foo\"\n", true }, // This is a weak etag: { "HTTP/1.1 200 OK\n" "etag: w/\"foo\"\n", false }, { "HTTP/1.1 200 OK\n" "etag: W / \"foo\"\n", false } }; INSTANTIATE_TEST_CASE_P(HttpResponseHeaders, HasStrongValidatorsTest, testing::ValuesIn(strong_validators_tests)); TEST(HttpResponseHeadersTest, HasValidatorsNone) { std::string headers("HTTP/1.1 200 OK"); HeadersToRaw(&headers); scoped_refptr<HttpResponseHeaders> parsed(new HttpResponseHeaders(headers)); EXPECT_FALSE(parsed->HasValidators()); } TEST(HttpResponseHeadersTest, HasValidatorsEtag) { std::string headers( "HTTP/1.1 200 OK\n" "etag: \"anything\""); HeadersToRaw(&headers); scoped_refptr<HttpResponseHeaders> parsed(new HttpResponseHeaders(headers)); EXPECT_TRUE(parsed->HasValidators()); } TEST(HttpResponseHeadersTest, HasValidatorsLastModified) { std::string headers( "HTTP/1.1 200 OK\n" "Last-Modified: Wed, 28 Nov 2007 00:40:10 GMT"); HeadersToRaw(&headers); scoped_refptr<HttpResponseHeaders> parsed(new HttpResponseHeaders(headers)); EXPECT_TRUE(parsed->HasValidators()); } TEST(HttpResponseHeadersTest, HasValidatorsWeakEtag) { std::string headers( "HTTP/1.1 200 OK\n" "etag: W/\"anything\""); HeadersToRaw(&headers); scoped_refptr<HttpResponseHeaders> parsed(new HttpResponseHeaders(headers)); EXPECT_TRUE(parsed->HasValidators()); } struct AddHeaderTestData { const char* orig_headers; const char* new_header; const char* expected_headers; }; class AddHeaderTest : public HttpResponseHeadersTest, public ::testing::WithParamInterface<AddHeaderTestData> { }; TEST_P(AddHeaderTest, AddHeader) { const AddHeaderTestData test = GetParam(); std::string orig_headers(test.orig_headers); HeadersToRaw(&orig_headers); scoped_refptr<HttpResponseHeaders> parsed( new HttpResponseHeaders(orig_headers)); std::string new_header(test.new_header); parsed->AddHeader(new_header); EXPECT_EQ(std::string(test.expected_headers), ToSimpleString(parsed)); } const AddHeaderTestData add_header_tests[] = { { "HTTP/1.1 200 OK\n" "connection: keep-alive\n" "Cache-control: max-age=10000\n", "Content-Length: 450", "HTTP/1.1 200 OK\n" "connection: keep-alive\n" "Cache-control: max-age=10000\n" "Content-Length: 450\n" }, { "HTTP/1.1 200 OK\n" "connection: keep-alive\n" "Cache-control: max-age=10000 \n", "Content-Length: 450 ", "HTTP/1.1 200 OK\n" "connection: keep-alive\n" "Cache-control: max-age=10000\n" "Content-Length: 450\n" }, }; INSTANTIATE_TEST_CASE_P(HttpResponseHeaders, AddHeaderTest, testing::ValuesIn(add_header_tests)); struct RemoveHeaderTestData { const char* orig_headers; const char* to_remove; const char* expected_headers; }; class RemoveHeaderTest : public HttpResponseHeadersTest, public ::testing::WithParamInterface<RemoveHeaderTestData> { }; TEST_P(RemoveHeaderTest, RemoveHeader) { const RemoveHeaderTestData test = GetParam(); std::string orig_headers(test.orig_headers); HeadersToRaw(&orig_headers); scoped_refptr<HttpResponseHeaders> parsed( new HttpResponseHeaders(orig_headers)); std::string name(test.to_remove); parsed->RemoveHeader(name); EXPECT_EQ(std::string(test.expected_headers), ToSimpleString(parsed)); } const RemoveHeaderTestData remove_header_tests[] = { { "HTTP/1.1 200 OK\n" "connection: keep-alive\n" "Cache-control: max-age=10000\n" "Content-Length: 450\n", "Content-Length", "HTTP/1.1 200 OK\n" "connection: keep-alive\n" "Cache-control: max-age=10000\n" }, { "HTTP/1.1 200 OK\n" "connection: keep-alive \n" "Content-Length : 450 \n" "Cache-control: max-age=10000\n", "Content-Length", "HTTP/1.1 200 OK\n" "connection: keep-alive\n" "Cache-control: max-age=10000\n" }, }; INSTANTIATE_TEST_CASE_P(HttpResponseHeaders, RemoveHeaderTest, testing::ValuesIn(remove_header_tests)); struct RemoveHeadersTestData { const char* orig_headers; const char* to_remove[2]; const char* expected_headers; }; class RemoveHeadersTest : public HttpResponseHeadersTest, public ::testing::WithParamInterface<RemoveHeadersTestData> {}; TEST_P(RemoveHeadersTest, RemoveHeaders) { const RemoveHeadersTestData test = GetParam(); std::string orig_headers(test.orig_headers); HeadersToRaw(&orig_headers); scoped_refptr<HttpResponseHeaders> parsed( new HttpResponseHeaders(orig_headers)); std::unordered_set<std::string> to_remove; for (auto* header : test.to_remove) { if (header) to_remove.insert(header); } parsed->RemoveHeaders(to_remove); EXPECT_EQ(std::string(test.expected_headers), ToSimpleString(parsed)); } const RemoveHeadersTestData remove_headers_tests[] = { {"HTTP/1.1 200 OK\n" "connection: keep-alive\n" "Cache-control: max-age=10000\n" "Content-Length: 450\n", {"Content-Length", "CACHE-control"}, "HTTP/1.1 200 OK\n" "connection: keep-alive\n"}, {"HTTP/1.1 200 OK\n" "connection: keep-alive\n" "Content-Length: 450\n", {"foo", "bar"}, "HTTP/1.1 200 OK\n" "connection: keep-alive\n" "Content-Length: 450\n"}, {"HTTP/1.1 404 Kinda not OK\n" "connection: keep-alive \n", {}, "HTTP/1.1 404 Kinda not OK\n" "connection: keep-alive\n"}, }; INSTANTIATE_TEST_CASE_P(HttpResponseHeaders, RemoveHeadersTest, testing::ValuesIn(remove_headers_tests)); struct RemoveIndividualHeaderTestData { const char* orig_headers; const char* to_remove_name; const char* to_remove_value; const char* expected_headers; }; class RemoveIndividualHeaderTest : public HttpResponseHeadersTest, public ::testing::WithParamInterface<RemoveIndividualHeaderTestData> { }; TEST_P(RemoveIndividualHeaderTest, RemoveIndividualHeader) { const RemoveIndividualHeaderTestData test = GetParam(); std::string orig_headers(test.orig_headers); HeadersToRaw(&orig_headers); scoped_refptr<HttpResponseHeaders> parsed( new HttpResponseHeaders(orig_headers)); std::string name(test.to_remove_name); std::string value(test.to_remove_value); parsed->RemoveHeaderLine(name, value); EXPECT_EQ(std::string(test.expected_headers), ToSimpleString(parsed)); } const RemoveIndividualHeaderTestData remove_individual_header_tests[] = { { "HTTP/1.1 200 OK\n" "connection: keep-alive\n" "Cache-control: max-age=10000\n" "Content-Length: 450\n", "Content-Length", "450", "HTTP/1.1 200 OK\n" "connection: keep-alive\n" "Cache-control: max-age=10000\n" }, { "HTTP/1.1 200 OK\n" "connection: keep-alive \n" "Content-Length : 450 \n" "Cache-control: max-age=10000\n", "Content-Length", "450", "HTTP/1.1 200 OK\n" "connection: keep-alive\n" "Cache-control: max-age=10000\n" }, { "HTTP/1.1 200 OK\n" "connection: keep-alive \n" "Content-Length: 450\n" "Cache-control: max-age=10000\n", "Content-Length", // Matching name. "999", // Mismatching value. "HTTP/1.1 200 OK\n" "connection: keep-alive\n" "Content-Length: 450\n" "Cache-control: max-age=10000\n" }, { "HTTP/1.1 200 OK\n" "connection: keep-alive \n" "Foo: bar, baz\n" "Foo: bar\n" "Cache-control: max-age=10000\n", "Foo", "bar, baz", // Space in value. "HTTP/1.1 200 OK\n" "connection: keep-alive\n" "Foo: bar\n" "Cache-control: max-age=10000\n" }, { "HTTP/1.1 200 OK\n" "connection: keep-alive \n" "Foo: bar, baz\n" "Cache-control: max-age=10000\n", "Foo", "baz", // Only partial match -> ignored. "HTTP/1.1 200 OK\n" "connection: keep-alive\n" "Foo: bar, baz\n" "Cache-control: max-age=10000\n" }, }; INSTANTIATE_TEST_CASE_P(HttpResponseHeaders, RemoveIndividualHeaderTest, testing::ValuesIn(remove_individual_header_tests)); struct ReplaceStatusTestData { const char* orig_headers; const char* new_status; const char* expected_headers; }; class ReplaceStatusTest : public HttpResponseHeadersTest, public ::testing::WithParamInterface<ReplaceStatusTestData> { }; TEST_P(ReplaceStatusTest, ReplaceStatus) { const ReplaceStatusTestData test = GetParam(); std::string orig_headers(test.orig_headers); HeadersToRaw(&orig_headers); scoped_refptr<HttpResponseHeaders> parsed( new HttpResponseHeaders(orig_headers)); std::string name(test.new_status); parsed->ReplaceStatusLine(name); EXPECT_EQ(std::string(test.expected_headers), ToSimpleString(parsed)); } const ReplaceStatusTestData replace_status_tests[] = { { "HTTP/1.1 206 Partial Content\n" "connection: keep-alive\n" "Cache-control: max-age=10000\n" "Content-Length: 450\n", "HTTP/1.1 200 OK", "HTTP/1.1 200 OK\n" "connection: keep-alive\n" "Cache-control: max-age=10000\n" "Content-Length: 450\n" }, { "HTTP/1.1 200 OK\n" "connection: keep-alive\n", "HTTP/1.1 304 Not Modified", "HTTP/1.1 304 Not Modified\n" "connection: keep-alive\n" }, { "HTTP/1.1 200 OK\n" "connection: keep-alive \n" "Content-Length : 450 \n" "Cache-control: max-age=10000\n", "HTTP/1//1 304 Not Modified", "HTTP/1.0 304 Not Modified\n" "connection: keep-alive\n" "Content-Length: 450\n" "Cache-control: max-age=10000\n" }, }; INSTANTIATE_TEST_CASE_P(HttpResponseHeaders, ReplaceStatusTest, testing::ValuesIn(replace_status_tests)); struct UpdateWithNewRangeTestData { const char* orig_headers; const char* expected_headers; const char* expected_headers_with_replaced_status; }; class UpdateWithNewRangeTest : public HttpResponseHeadersTest, public ::testing::WithParamInterface<UpdateWithNewRangeTestData> { }; TEST_P(UpdateWithNewRangeTest, UpdateWithNewRange) { const UpdateWithNewRangeTestData test = GetParam(); const HttpByteRange range = HttpByteRange::Bounded(3, 5); std::string orig_headers(test.orig_headers); std::replace(orig_headers.begin(), orig_headers.end(), '\n', '\0'); scoped_refptr<HttpResponseHeaders> parsed( new HttpResponseHeaders(orig_headers + '\0')); int64_t content_size = parsed->GetContentLength(); // Update headers without replacing status line. parsed->UpdateWithNewRange(range, content_size, false); EXPECT_EQ(std::string(test.expected_headers), ToSimpleString(parsed)); // Replace status line too. parsed->UpdateWithNewRange(range, content_size, true); EXPECT_EQ(std::string(test.expected_headers_with_replaced_status), ToSimpleString(parsed)); } const UpdateWithNewRangeTestData update_range_tests[] = { { "HTTP/1.1 200 OK\n" "Content-Length: 450\n", "HTTP/1.1 200 OK\n" "Content-Range: bytes 3-5/450\n" "Content-Length: 3\n", "HTTP/1.1 206 Partial Content\n" "Content-Range: bytes 3-5/450\n" "Content-Length: 3\n", }, { "HTTP/1.1 200 OK\n" "Content-Length: 5\n", "HTTP/1.1 200 OK\n" "Content-Range: bytes 3-5/5\n" "Content-Length: 3\n", "HTTP/1.1 206 Partial Content\n" "Content-Range: bytes 3-5/5\n" "Content-Length: 3\n", }, }; INSTANTIATE_TEST_CASE_P(HttpResponseHeaders, UpdateWithNewRangeTest, testing::ValuesIn(update_range_tests)); TEST_F(HttpResponseHeadersCacheControlTest, AbsentMaxAgeReturnsFalse) { InitializeHeadersWithCacheControl("nocache"); EXPECT_FALSE(headers()->GetMaxAgeValue(TimeDeltaPointer())); } TEST_F(HttpResponseHeadersCacheControlTest, MaxAgeWithNoParameterRejected) { InitializeHeadersWithCacheControl("max-age=,private"); EXPECT_FALSE(headers()->GetMaxAgeValue(TimeDeltaPointer())); } TEST_F(HttpResponseHeadersCacheControlTest, MaxAgeWithSpaceParameterRejected) { InitializeHeadersWithCacheControl("max-age= ,private"); EXPECT_FALSE(headers()->GetMaxAgeValue(TimeDeltaPointer())); } TEST_F(HttpResponseHeadersCacheControlTest, MaxAgeWithSpaceBeforeEqualsIsRejected) { InitializeHeadersWithCacheControl("max-age = 7"); EXPECT_FALSE(headers()->GetMaxAgeValue(TimeDeltaPointer())); } TEST_F(HttpResponseHeadersCacheControlTest, MaxAgeFirstMatchUsed) { InitializeHeadersWithCacheControl("max-age=10, max-age=20"); EXPECT_EQ(TimeDelta::FromSeconds(10), GetMaxAgeValue()); } TEST_F(HttpResponseHeadersCacheControlTest, MaxAgeBogusFirstMatchUsed) { // "max-age10" isn't parsed as "max-age"; "max-age=now" is parsed as // "max-age=0" and so "max-age=20" is not used. InitializeHeadersWithCacheControl("max-age10, max-age=now, max-age=20"); EXPECT_EQ(TimeDelta::FromSeconds(0), GetMaxAgeValue()); } TEST_F(HttpResponseHeadersCacheControlTest, MaxAgeCaseInsensitive) { InitializeHeadersWithCacheControl("Max-aGe=15"); EXPECT_EQ(TimeDelta::FromSeconds(15), GetMaxAgeValue()); } struct MaxAgeTestData { const char* max_age_string; const int64_t expected_seconds; }; class MaxAgeEdgeCasesTest : public HttpResponseHeadersCacheControlTest, public ::testing::WithParamInterface<MaxAgeTestData> { }; TEST_P(MaxAgeEdgeCasesTest, MaxAgeEdgeCases) { const MaxAgeTestData test = GetParam(); std::string max_age = "max-age="; InitializeHeadersWithCacheControl( (max_age + test.max_age_string).c_str()); EXPECT_EQ(test.expected_seconds, GetMaxAgeValue().InSeconds()) << " for max-age=" << test.max_age_string; } const MaxAgeTestData max_age_tests[] = { {" 1 ", 1}, // Spaces are ignored. {"-1", -1}, // Negative numbers are passed through. {"--1", 0}, // Leading junk gives 0. {"2s", 2}, // Trailing junk is ignored. {"3 days", 3}, {"'4'", 0}, // Single quotes don't work. {"\"5\"", 0}, // Double quotes don't work. {"0x6", 0}, // Hex not parsed as hex. {"7F", 7}, // Hex without 0x still not parsed as hex. {"010", 10}, // Octal not parsed as octal. {"9223372036854", 9223372036854}, // {"9223372036855", -9223372036854}, // Undefined behaviour. // {"9223372036854775806", -2}, // Undefined behaviour. {"9223372036854775807", 9223372036854775807}, {"20000000000000000000", std::numeric_limits<int64_t>::max()}, // Overflow int64_t. }; INSTANTIATE_TEST_CASE_P(HttpResponseHeadersCacheControl, MaxAgeEdgeCasesTest, testing::ValuesIn(max_age_tests)); struct GetCurrentAgeTestData { const char* headers; const char* request_time; const char* response_time; const char* current_time; const int expected_age; }; class GetCurrentAgeTest : public HttpResponseHeadersTest, public ::testing::WithParamInterface<GetCurrentAgeTestData> { }; TEST_P(GetCurrentAgeTest, GetCurrentAge) { const GetCurrentAgeTestData test = GetParam(); base::Time request_time, response_time, current_time; ASSERT_TRUE(base::Time::FromString(test.request_time, &request_time)); ASSERT_TRUE(base::Time::FromString(test.response_time, &response_time)); ASSERT_TRUE(base::Time::FromString(test.current_time, &current_time)); std::string headers(test.headers); HeadersToRaw(&headers); scoped_refptr<HttpResponseHeaders> parsed(new HttpResponseHeaders(headers)); base::TimeDelta age = parsed->GetCurrentAge(request_time, response_time, current_time); EXPECT_EQ(test.expected_age, age.InSeconds()); } const struct GetCurrentAgeTestData get_current_age_tests[] = { // Without Date header. {"HTTP/1.1 200 OK\n" "Age: 2", "Fri, 20 Jan 2011 10:40:08 GMT", "Fri, 20 Jan 2011 10:40:12 GMT", "Fri, 20 Jan 2011 10:40:14 GMT", 8}, // Without Age header. {"HTTP/1.1 200 OK\n" "Date: Fri, 20 Jan 2011 10:40:10 GMT\n", "Fri, 20 Jan 2011 10:40:08 GMT", "Fri, 20 Jan 2011 10:40:12 GMT", "Fri, 20 Jan 2011 10:40:14 GMT", 6}, // date_value > response_time with Age header. {"HTTP/1.1 200 OK\n" "Date: Fri, 20 Jan 2011 10:40:14 GMT\n" "Age: 2\n", "Fri, 20 Jan 2011 10:40:08 GMT", "Fri, 20 Jan 2011 10:40:12 GMT", "Fri, 20 Jan 2011 10:40:14 GMT", 8}, // date_value > response_time without Age header. {"HTTP/1.1 200 OK\n" "Date: Fri, 20 Jan 2011 10:40:14 GMT\n", "Fri, 20 Jan 2011 10:40:08 GMT", "Fri, 20 Jan 2011 10:40:12 GMT", "Fri, 20 Jan 2011 10:40:14 GMT", 6}, // apparent_age > corrected_age_value {"HTTP/1.1 200 OK\n" "Date: Fri, 20 Jan 2011 10:40:07 GMT\n" "Age: 0\n", "Fri, 20 Jan 2011 10:40:08 GMT", "Fri, 20 Jan 2011 10:40:12 GMT", "Fri, 20 Jan 2011 10:40:14 GMT", 7}}; INSTANTIATE_TEST_CASE_P(HttpResponseHeaders, GetCurrentAgeTest, testing::ValuesIn(get_current_age_tests)); } // namespace } // namespace net
null
null
null
null
6,124
52,950
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
52,950
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include <stddef.h> #include <memory> #include "base/bind.h" #include "base/bind_helpers.h" #include "base/macros.h" #include "base/run_loop.h" #include "base/test/scoped_task_environment.h" #include "media/base/audio_latency.h" #include "media/base/audio_renderer_mixer.h" #include "media/base/audio_renderer_mixer_input.h" #include "media/base/audio_renderer_mixer_pool.h" #include "media/base/fake_audio_render_callback.h" #include "media/base/mock_audio_renderer_sink.h" #include "testing/gmock/include/gmock/gmock.h" #include "testing/gtest/include/gtest/gtest.h" namespace { void LogUma(int value) {} } namespace media { static const int kBitsPerChannel = 16; static const int kSampleRate = 48000; static const int kBufferSize = 8192; static const int kRenderFrameId = 42; static const ChannelLayout kChannelLayout = CHANNEL_LAYOUT_STEREO; static const char kDefaultDeviceId[] = "default"; static const char kAnotherDeviceId[] = "another"; static const char kUnauthorizedDeviceId[] = "unauthorized"; static const char kNonexistentDeviceId[] = "nonexistent"; class AudioRendererMixerInputTest : public testing::Test, AudioRendererMixerPool { public: AudioRendererMixerInputTest() { audio_parameters_ = AudioParameters( AudioParameters::AUDIO_PCM_LINEAR, kChannelLayout, kSampleRate, kBitsPerChannel, kBufferSize); CreateMixerInput(kDefaultDeviceId); fake_callback_.reset(new FakeAudioRenderCallback(0, kSampleRate)); audio_bus_ = AudioBus::Create(audio_parameters_); } void CreateMixerInput(const std::string& device_id) { mixer_input_ = new AudioRendererMixerInput(this, kRenderFrameId, device_id, url::Origin(), AudioLatency::LATENCY_PLAYBACK); } AudioRendererMixer* GetMixer(int owner_id, const AudioParameters& params, AudioLatency::LatencyType latency, const std::string& device_id, const url::Origin& security_origin, OutputDeviceStatus* device_status) { EXPECT_TRUE(params.IsValid()); if (device_id == kNonexistentDeviceId) { if (device_status) *device_status = OUTPUT_DEVICE_STATUS_ERROR_NOT_FOUND; return nullptr; } if (device_id == kUnauthorizedDeviceId) { if (device_status) *device_status = OUTPUT_DEVICE_STATUS_ERROR_NOT_AUTHORIZED; return nullptr; } size_t idx = (device_id == kDefaultDeviceId) ? 0 : 1; if (!mixers_[idx]) { sinks_[idx] = new MockAudioRendererSink(device_id, OUTPUT_DEVICE_STATUS_OK); EXPECT_CALL(*(sinks_[idx].get()), Start()); EXPECT_CALL(*(sinks_[idx].get()), Stop()); mixers_[idx].reset(new AudioRendererMixer( audio_parameters_, sinks_[idx].get(), base::Bind(&LogUma))); } EXPECT_CALL(*this, ReturnMixer(mixers_[idx].get())); if (device_status) *device_status = OUTPUT_DEVICE_STATUS_OK; return mixers_[idx].get(); } double ProvideInput() { return mixer_input_->ProvideInput(audio_bus_.get(), 0); } OutputDeviceInfo GetOutputDeviceInfo(int source_render_frame_id, int session_id, const std::string& device_id, const url::Origin& security_origin) { OutputDeviceStatus status = OUTPUT_DEVICE_STATUS_OK; if (device_id == kNonexistentDeviceId) status = OUTPUT_DEVICE_STATUS_ERROR_NOT_FOUND; else if (device_id == kUnauthorizedDeviceId) status = OUTPUT_DEVICE_STATUS_ERROR_NOT_AUTHORIZED; GetOutputDeviceInfoCalled(device_id); return OutputDeviceInfo(device_id, status, AudioParameters::UnavailableDeviceParams()); } MOCK_METHOD1(GetOutputDeviceInfoCalled, void(const std::string&)); MOCK_METHOD1(ReturnMixer, void(AudioRendererMixer*)); MOCK_METHOD1(SwitchCallbackCalled, void(OutputDeviceStatus)); void SwitchCallback(base::RunLoop* loop, OutputDeviceStatus result) { SwitchCallbackCalled(result); loop->Quit(); } AudioRendererMixer* GetInputMixer() { return mixer_input_->mixer_; } protected: virtual ~AudioRendererMixerInputTest() = default; base::test::ScopedTaskEnvironment scoped_task_environment_; AudioParameters audio_parameters_; scoped_refptr<MockAudioRendererSink> sinks_[2]; std::unique_ptr<AudioRendererMixer> mixers_[2]; scoped_refptr<AudioRendererMixerInput> mixer_input_; std::unique_ptr<FakeAudioRenderCallback> fake_callback_; std::unique_ptr<AudioBus> audio_bus_; private: DISALLOW_COPY_AND_ASSIGN(AudioRendererMixerInputTest); }; TEST_F(AudioRendererMixerInputTest, GetDeviceInfo) { EXPECT_CALL(*this, GetOutputDeviceInfoCalled(kDefaultDeviceId)) .Times(testing::Exactly(1)); mixer_input_->Initialize(audio_parameters_, fake_callback_.get()); // Calling GetOutputDeviceInfo() should result in the mock call, since there // is no mixer created yet for mixer input. mixer_input_->GetOutputDeviceInfo(); mixer_input_->Start(); // This call should be directed to the mixer and should not result in the mock // call. EXPECT_STREQ(kDefaultDeviceId, mixer_input_->GetOutputDeviceInfo().device_id().c_str()); mixer_input_->Stop(); } // Test that getting and setting the volume work as expected. The volume is // returned from ProvideInput() only when playing. TEST_F(AudioRendererMixerInputTest, GetSetVolume) { mixer_input_->Initialize(audio_parameters_, fake_callback_.get()); mixer_input_->Start(); mixer_input_->Play(); // Starting volume should be 1.0. EXPECT_DOUBLE_EQ(ProvideInput(), 1); const double kVolume = 0.5; EXPECT_TRUE(mixer_input_->SetVolume(kVolume)); EXPECT_DOUBLE_EQ(ProvideInput(), kVolume); mixer_input_->Stop(); } // Test Start()/Play()/Pause()/Stop()/playing() all work as expected. Also // implicitly tests that AddMixerInput() and RemoveMixerInput() work without // crashing; functional tests for these methods are in AudioRendererMixerTest. TEST_F(AudioRendererMixerInputTest, StartPlayPauseStopPlaying) { mixer_input_->Initialize(audio_parameters_, fake_callback_.get()); mixer_input_->Start(); mixer_input_->Play(); EXPECT_DOUBLE_EQ(ProvideInput(), 1); mixer_input_->Pause(); mixer_input_->Play(); EXPECT_DOUBLE_EQ(ProvideInput(), 1); mixer_input_->Stop(); } // Test that Stop() can be called before Initialize() and Start(). TEST_F(AudioRendererMixerInputTest, StopBeforeInitializeOrStart) { mixer_input_->Stop(); // Verify Stop() works without Initialize() or Start(). CreateMixerInput(kDefaultDeviceId); mixer_input_->Stop(); } // Test that Start() can be called after Stop(). TEST_F(AudioRendererMixerInputTest, StartAfterStop) { mixer_input_->Initialize(audio_parameters_, fake_callback_.get()); mixer_input_->Stop(); mixer_input_->Start(); mixer_input_->Stop(); } // Test that Initialize() can be called again after Stop(). TEST_F(AudioRendererMixerInputTest, InitializeAfterStop) { mixer_input_->Initialize(audio_parameters_, fake_callback_.get()); mixer_input_->Start(); mixer_input_->Stop(); mixer_input_->Initialize(audio_parameters_, fake_callback_.get()); mixer_input_->Stop(); } // Test SwitchOutputDevice(). TEST_F(AudioRendererMixerInputTest, SwitchOutputDevice) { EXPECT_CALL(*this, GetOutputDeviceInfoCalled(testing::_)) .Times(testing::Exactly(0)); mixer_input_->Initialize(audio_parameters_, fake_callback_.get()); mixer_input_->Start(); const std::string kDeviceId("mock-device-id"); EXPECT_CALL(*this, SwitchCallbackCalled(OUTPUT_DEVICE_STATUS_OK)); AudioRendererMixer* old_mixer = GetInputMixer(); EXPECT_EQ(old_mixer, mixers_[0].get()); base::RunLoop run_loop; mixer_input_->SwitchOutputDevice( kDeviceId, url::Origin(), base::Bind(&AudioRendererMixerInputTest::SwitchCallback, base::Unretained(this), &run_loop)); run_loop.Run(); AudioRendererMixer* new_mixer = GetInputMixer(); EXPECT_EQ(new_mixer, mixers_[1].get()); EXPECT_NE(old_mixer, new_mixer); mixer_input_->Stop(); } // Test SwitchOutputDevice() to the same device as the current (default) device TEST_F(AudioRendererMixerInputTest, SwitchOutputDeviceToSameDevice) { EXPECT_CALL(*this, GetOutputDeviceInfoCalled(testing::_)) .Times(testing::Exactly(0)); mixer_input_->Initialize(audio_parameters_, fake_callback_.get()); mixer_input_->Start(); EXPECT_CALL(*this, SwitchCallbackCalled(OUTPUT_DEVICE_STATUS_OK)); AudioRendererMixer* old_mixer = GetInputMixer(); base::RunLoop run_loop; mixer_input_->SwitchOutputDevice( kDefaultDeviceId, url::Origin(), base::Bind(&AudioRendererMixerInputTest::SwitchCallback, base::Unretained(this), &run_loop)); run_loop.Run(); AudioRendererMixer* new_mixer = GetInputMixer(); EXPECT_EQ(old_mixer, new_mixer); mixer_input_->Stop(); } // Test SwitchOutputDevice() to the new device TEST_F(AudioRendererMixerInputTest, SwitchOutputDeviceToAnotherDevice) { EXPECT_CALL(*this, GetOutputDeviceInfoCalled(testing::_)) .Times(testing::Exactly(0)); mixer_input_->Initialize(audio_parameters_, fake_callback_.get()); mixer_input_->Start(); EXPECT_CALL(*this, SwitchCallbackCalled(OUTPUT_DEVICE_STATUS_OK)); AudioRendererMixer* old_mixer = GetInputMixer(); base::RunLoop run_loop; mixer_input_->SwitchOutputDevice( kAnotherDeviceId, url::Origin(), base::Bind(&AudioRendererMixerInputTest::SwitchCallback, base::Unretained(this), &run_loop)); run_loop.Run(); AudioRendererMixer* new_mixer = GetInputMixer(); EXPECT_NE(old_mixer, new_mixer); mixer_input_->Stop(); } // Test that SwitchOutputDevice() to a nonexistent device fails. TEST_F(AudioRendererMixerInputTest, SwitchOutputDeviceToNonexistentDevice) { EXPECT_CALL(*this, GetOutputDeviceInfoCalled(testing::_)) .Times(testing::Exactly(0)); mixer_input_->Initialize(audio_parameters_, fake_callback_.get()); mixer_input_->Start(); EXPECT_CALL(*this, SwitchCallbackCalled(OUTPUT_DEVICE_STATUS_ERROR_NOT_FOUND)); base::RunLoop run_loop; mixer_input_->SwitchOutputDevice( kNonexistentDeviceId, url::Origin(), base::Bind(&AudioRendererMixerInputTest::SwitchCallback, base::Unretained(this), &run_loop)); run_loop.Run(); mixer_input_->Stop(); } // Test that SwitchOutputDevice() to an unauthorized device fails. TEST_F(AudioRendererMixerInputTest, SwitchOutputDeviceToUnauthorizedDevice) { EXPECT_CALL(*this, GetOutputDeviceInfoCalled(testing::_)) .Times(testing::Exactly(0)); mixer_input_->Initialize(audio_parameters_, fake_callback_.get()); mixer_input_->Start(); EXPECT_CALL(*this, SwitchCallbackCalled(OUTPUT_DEVICE_STATUS_ERROR_NOT_AUTHORIZED)); base::RunLoop run_loop; mixer_input_->SwitchOutputDevice( kUnauthorizedDeviceId, url::Origin(), base::Bind(&AudioRendererMixerInputTest::SwitchCallback, base::Unretained(this), &run_loop)); run_loop.Run(); mixer_input_->Stop(); } // Test that calling SwitchOutputDevice() before Start() succeeds. TEST_F(AudioRendererMixerInputTest, SwitchOutputDeviceBeforeStart) { EXPECT_CALL(*this, GetOutputDeviceInfoCalled(kAnotherDeviceId)) .Times(testing::Exactly(1)); mixer_input_->Initialize(audio_parameters_, fake_callback_.get()); base::RunLoop run_loop; EXPECT_CALL(*this, SwitchCallbackCalled(OUTPUT_DEVICE_STATUS_OK)); mixer_input_->SwitchOutputDevice( kAnotherDeviceId, url::Origin(), base::Bind(&AudioRendererMixerInputTest::SwitchCallback, base::Unretained(this), &run_loop)); mixer_input_->Start(); run_loop.Run(); mixer_input_->Stop(); } // Test that calling SwitchOutputDevice() succeeds even if Start() is never // called. TEST_F(AudioRendererMixerInputTest, SwitchOutputDeviceWithoutStart) { EXPECT_CALL(*this, GetOutputDeviceInfoCalled(kAnotherDeviceId)) .Times(testing::Exactly(1)); mixer_input_->Initialize(audio_parameters_, fake_callback_.get()); base::RunLoop run_loop; EXPECT_CALL(*this, SwitchCallbackCalled(OUTPUT_DEVICE_STATUS_OK)); mixer_input_->SwitchOutputDevice( kAnotherDeviceId, url::Origin(), base::Bind(&AudioRendererMixerInputTest::SwitchCallback, base::Unretained(this), &run_loop)); mixer_input_->Stop(); run_loop.Run(); } // Test creation with an invalid device. OnRenderError() should be called. // Play(), Pause() and SwitchOutputDevice() should not cause crashes, even if // they have no effect. TEST_F(AudioRendererMixerInputTest, CreateWithInvalidDevice) { EXPECT_CALL(*this, GetOutputDeviceInfoCalled(kDefaultDeviceId)) .Times(testing::Exactly(1)); // |mixer_input_| was initialized during construction. mixer_input_->Stop(); CreateMixerInput(kNonexistentDeviceId); EXPECT_CALL(*fake_callback_, OnRenderError()); mixer_input_->Initialize(audio_parameters_, fake_callback_.get()); mixer_input_->Start(); mixer_input_->Play(); mixer_input_->Pause(); base::RunLoop run_loop; EXPECT_CALL(*this, SwitchCallbackCalled(testing::_)); mixer_input_->SwitchOutputDevice( kDefaultDeviceId, url::Origin(), base::Bind(&AudioRendererMixerInputTest::SwitchCallback, base::Unretained(this), &run_loop)); mixer_input_->Stop(); run_loop.Run(); } // Test that calling SwitchOutputDevice() works after calling Stop(), and that // restarting works after the call to SwitchOutputDevice(). TEST_F(AudioRendererMixerInputTest, SwitchOutputDeviceAfterStopBeforeRestart) { EXPECT_CALL(*this, GetOutputDeviceInfoCalled(kAnotherDeviceId)) .Times(testing::Exactly(1)); mixer_input_->Initialize(audio_parameters_, fake_callback_.get()); mixer_input_->Start(); mixer_input_->Stop(); base::RunLoop run_loop; EXPECT_CALL(*this, SwitchCallbackCalled(OUTPUT_DEVICE_STATUS_OK)); mixer_input_->SwitchOutputDevice( kAnotherDeviceId, url::Origin(), base::Bind(&AudioRendererMixerInputTest::SwitchCallback, base::Unretained(this), &run_loop)); run_loop.Run(); mixer_input_->Start(); mixer_input_->Stop(); } // Test that calling SwitchOutputDevice() works before calling Initialize(), // and that initialization and restart work after the call to // SwitchOutputDevice(). TEST_F(AudioRendererMixerInputTest, SwitchOutputDeviceBeforeInitialize) { EXPECT_CALL(*this, GetOutputDeviceInfoCalled(kAnotherDeviceId)) .Times(testing::Exactly(1)); base::RunLoop run_loop; EXPECT_CALL(*this, SwitchCallbackCalled(OUTPUT_DEVICE_STATUS_OK)); mixer_input_->SwitchOutputDevice( kAnotherDeviceId, url::Origin(), base::Bind(&AudioRendererMixerInputTest::SwitchCallback, base::Unretained(this), &run_loop)); run_loop.Run(); mixer_input_->Initialize(audio_parameters_, fake_callback_.get()); mixer_input_->Start(); mixer_input_->Stop(); } } // namespace media
null
null
null
null
49,813
24,435
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
24,435
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "content/browser/accessibility/browser_accessibility_manager.h" #include <stddef.h> #include <utility> #include "base/debug/crash_logging.h" #include "base/logging.h" #include "build/build_config.h" #include "content/browser/accessibility/browser_accessibility.h" #include "content/common/accessibility_messages.h" #include "content/public/common/use_zoom_for_dsf_policy.h" #include "ui/accessibility/ax_tree_data.h" #include "ui/accessibility/ax_tree_serializer.h" namespace content { namespace { // Search the tree recursively from |node| and return any node that has // a child tree ID of |ax_tree_id|. BrowserAccessibility* FindNodeWithChildTreeId(BrowserAccessibility* node, int ax_tree_id) { if (!node) return nullptr; if (node->GetIntAttribute(ax::mojom::IntAttribute::kChildTreeId) == ax_tree_id) return node; for (unsigned int i = 0; i < node->InternalChildCount(); ++i) { BrowserAccessibility* child = node->InternalGetChild(i); BrowserAccessibility* result = FindNodeWithChildTreeId(child, ax_tree_id); if (result) return result; } return nullptr; } // Map from AXTreeID to BrowserAccessibilityManager using AXTreeIDMap = base::hash_map<ui::AXTreeIDRegistry::AXTreeID, BrowserAccessibilityManager*>; base::LazyInstance<AXTreeIDMap>::Leaky g_ax_tree_id_map = LAZY_INSTANCE_INITIALIZER; // A function to call when focus changes, for testing only. base::LazyInstance<base::Closure>::DestructorAtExit g_focus_change_callback_for_testing = LAZY_INSTANCE_INITIALIZER; } // namespace ui::AXTreeUpdate MakeAXTreeUpdate( const ui::AXNodeData& node1, const ui::AXNodeData& node2 /* = ui::AXNodeData() */, const ui::AXNodeData& node3 /* = ui::AXNodeData() */, const ui::AXNodeData& node4 /* = ui::AXNodeData() */, const ui::AXNodeData& node5 /* = ui::AXNodeData() */, const ui::AXNodeData& node6 /* = ui::AXNodeData() */, const ui::AXNodeData& node7 /* = ui::AXNodeData() */, const ui::AXNodeData& node8 /* = ui::AXNodeData() */, const ui::AXNodeData& node9 /* = ui::AXNodeData() */, const ui::AXNodeData& node10 /* = ui::AXNodeData() */, const ui::AXNodeData& node11 /* = ui::AXNodeData() */, const ui::AXNodeData& node12 /* = ui::AXNodeData() */) { CR_DEFINE_STATIC_LOCAL(ui::AXNodeData, empty_data, ()); int32_t no_id = empty_data.id; ui::AXTreeUpdate update; ui::AXTreeData tree_data; tree_data.tree_id = 1; tree_data.focused_tree_id = 1; update.tree_data = tree_data; update.has_tree_data = true; update.root_id = node1.id; update.nodes.push_back(node1); if (node2.id != no_id) update.nodes.push_back(node2); if (node3.id != no_id) update.nodes.push_back(node3); if (node4.id != no_id) update.nodes.push_back(node4); if (node5.id != no_id) update.nodes.push_back(node5); if (node6.id != no_id) update.nodes.push_back(node6); if (node7.id != no_id) update.nodes.push_back(node7); if (node8.id != no_id) update.nodes.push_back(node8); if (node9.id != no_id) update.nodes.push_back(node9); if (node10.id != no_id) update.nodes.push_back(node10); if (node11.id != no_id) update.nodes.push_back(node11); if (node12.id != no_id) update.nodes.push_back(node12); return update; } BrowserAccessibility* BrowserAccessibilityFactory::Create() { return BrowserAccessibility::Create(); } BrowserAccessibilityFindInPageInfo::BrowserAccessibilityFindInPageInfo() : request_id(-1), match_index(-1), start_id(-1), start_offset(0), end_id(-1), end_offset(-1), active_request_id(-1) {} #if !defined(PLATFORM_HAS_NATIVE_ACCESSIBILITY_IMPL) // static BrowserAccessibilityManager* BrowserAccessibilityManager::Create( const ui::AXTreeUpdate& initial_tree, BrowserAccessibilityDelegate* delegate, BrowserAccessibilityFactory* factory) { return new BrowserAccessibilityManager(initial_tree, delegate, factory); } #endif // static BrowserAccessibilityManager* BrowserAccessibilityManager::FromID( ui::AXTreeIDRegistry::AXTreeID ax_tree_id) { AXTreeIDMap* ax_tree_id_map = g_ax_tree_id_map.Pointer(); auto iter = ax_tree_id_map->find(ax_tree_id); return iter == ax_tree_id_map->end() ? nullptr : iter->second; } BrowserAccessibilityManager::BrowserAccessibilityManager( BrowserAccessibilityDelegate* delegate, BrowserAccessibilityFactory* factory) : AXEventGenerator(), delegate_(delegate), factory_(factory), tree_(new ui::AXSerializableTree()), user_is_navigating_away_(false), last_focused_node_(nullptr), last_focused_manager_(nullptr), connected_to_parent_tree_node_(false), ax_tree_id_(ui::AXTreeIDRegistry::kNoAXTreeID), parent_node_id_from_parent_tree_(0), device_scale_factor_(1.0f), use_custom_device_scale_factor_for_testing_(false) { SetTree(tree_.get()); } BrowserAccessibilityManager::BrowserAccessibilityManager( const ui::AXTreeUpdate& initial_tree, BrowserAccessibilityDelegate* delegate, BrowserAccessibilityFactory* factory) : AXEventGenerator(), delegate_(delegate), factory_(factory), tree_(new ui::AXSerializableTree()), user_is_navigating_away_(false), last_focused_node_(nullptr), last_focused_manager_(nullptr), ax_tree_id_(ui::AXTreeIDRegistry::kNoAXTreeID), parent_node_id_from_parent_tree_(0), device_scale_factor_(1.0f), use_custom_device_scale_factor_for_testing_(false) { SetTree(tree_.get()); Initialize(initial_tree); } BrowserAccessibilityManager::~BrowserAccessibilityManager() { tree_.reset(nullptr); ReleaseTree(); g_ax_tree_id_map.Get().erase(ax_tree_id_); } void BrowserAccessibilityManager::Initialize( const ui::AXTreeUpdate& initial_tree) { if (!tree_->Unserialize(initial_tree)) { static auto* ax_tree_error = base::debug::AllocateCrashKeyString( "ax_tree_error", base::debug::CrashKeySize::Size32); static auto* ax_tree_update = base::debug::AllocateCrashKeyString( "ax_tree_update", base::debug::CrashKeySize::Size64); // Temporarily log some additional crash keys so we can try to // figure out why we're getting bad accessibility trees here. // http://crbug.com/765490 // Be sure to re-enable BrowserAccessibilityManagerTest.TestFatalError // when done (or delete it if no longer needed). base::debug::SetCrashKeyString(ax_tree_error, tree_->error()); base::debug::SetCrashKeyString(ax_tree_update, initial_tree.ToString()); LOG(FATAL) << tree_->error(); } } // A flag for use in tests to ensure events aren't suppressed or delayed. // static bool BrowserAccessibilityManager::never_suppress_or_delay_events_for_testing_ = false; // static ui::AXTreeUpdate BrowserAccessibilityManager::GetEmptyDocument() { ui::AXNodeData empty_document; empty_document.id = 0; empty_document.role = ax::mojom::Role::kRootWebArea; ui::AXTreeUpdate update; update.nodes.push_back(empty_document); return update; } void BrowserAccessibilityManager::FireFocusEventsIfNeeded() { BrowserAccessibility* focus = GetFocus(); // Don't fire focus events if the window itself doesn't have focus. // Bypass this check for some tests. if (!never_suppress_or_delay_events_for_testing_ && !g_focus_change_callback_for_testing.Get()) { if (delegate_ && !delegate_->AccessibilityViewHasFocus()) focus = nullptr; if (!CanFireEvents()) focus = nullptr; } // Don't allow the document to be focused if it has no children and // hasn't finished loading yet. Wait for at least a tiny bit of content, // or for the document to actually finish loading. if (focus && focus == focus->manager()->GetRoot() && focus->PlatformChildCount() == 0 && !focus->GetBoolAttribute(ax::mojom::BoolAttribute::kBusy) && !focus->manager()->GetTreeData().loaded) { focus = nullptr; } if (focus && focus != last_focused_node_) FireFocusEvent(focus); last_focused_node_ = focus; last_focused_manager_ = focus ? focus->manager() : nullptr; } bool BrowserAccessibilityManager::CanFireEvents() { return true; } void BrowserAccessibilityManager::FireFocusEvent(BrowserAccessibility* node) { if (g_focus_change_callback_for_testing.Get()) g_focus_change_callback_for_testing.Get().Run(); } BrowserAccessibility* BrowserAccessibilityManager::GetRoot() { // tree_ can be null during destruction. if (!tree_) return nullptr; // tree_->root() can be null during AXTreeDelegate callbacks. ui::AXNode* root = tree_->root(); return root ? GetFromAXNode(root) : nullptr; } BrowserAccessibility* BrowserAccessibilityManager::GetFromAXNode( const ui::AXNode* node) const { if (!node) return nullptr; return GetFromID(node->id()); } BrowserAccessibility* BrowserAccessibilityManager::GetFromID(int32_t id) const { const auto iter = id_wrapper_map_.find(id); if (iter != id_wrapper_map_.end()) return iter->second; return nullptr; } BrowserAccessibility* BrowserAccessibilityManager::GetParentNodeFromParentTree() { if (!GetRoot()) return nullptr; int parent_tree_id = GetTreeData().parent_tree_id; BrowserAccessibilityManager* parent_manager = BrowserAccessibilityManager::FromID(parent_tree_id); if (!parent_manager) return nullptr; // Try to use the cached parent node from the most recent time this // was called. if (parent_node_id_from_parent_tree_) { BrowserAccessibility* parent_node = parent_manager->GetFromID( parent_node_id_from_parent_tree_); if (parent_node) { int parent_child_tree_id = parent_node->GetIntAttribute(ax::mojom::IntAttribute::kChildTreeId); if (parent_child_tree_id == ax_tree_id_) return parent_node; } } // If that fails, search for it and cache it for next time. BrowserAccessibility* parent_node = FindNodeWithChildTreeId( parent_manager->GetRoot(), ax_tree_id_); if (parent_node) { parent_node_id_from_parent_tree_ = parent_node->GetId(); return parent_node; } return nullptr; } const ui::AXTreeData& BrowserAccessibilityManager::GetTreeData() { return tree_->data(); } void BrowserAccessibilityManager::OnWindowFocused() { if (this == GetRootManager()) FireFocusEventsIfNeeded(); } void BrowserAccessibilityManager::OnWindowBlurred() { if (this == GetRootManager()) { last_focused_node_ = nullptr; last_focused_manager_ = nullptr; } } void BrowserAccessibilityManager::UserIsNavigatingAway() { user_is_navigating_away_ = true; } void BrowserAccessibilityManager::UserIsReloading() { user_is_navigating_away_ = true; } void BrowserAccessibilityManager::NavigationSucceeded() { user_is_navigating_away_ = false; } void BrowserAccessibilityManager::NavigationFailed() { user_is_navigating_away_ = false; } void BrowserAccessibilityManager::DidStopLoading() { user_is_navigating_away_ = false; } bool BrowserAccessibilityManager::UseRootScrollOffsetsWhenComputingBounds() { return true; } void BrowserAccessibilityManager::OnAccessibilityEvents( const std::vector<AXEventNotificationDetails>& details) { TRACE_EVENT0("accessibility", "BrowserAccessibilityManager::OnAccessibilityEvents"); // Update the cached device scale factor. if (delegate_ && !use_custom_device_scale_factor_for_testing_) device_scale_factor_ = delegate_->AccessibilityGetDeviceScaleFactor(); // Process all changes to the accessibility tree first. for (uint32_t index = 0; index < details.size(); ++index) { const AXEventNotificationDetails& detail = details[index]; if (!tree_->Unserialize(detail.update)) { if (delegate_) { LOG(ERROR) << tree_->error(); delegate_->AccessibilityFatalError(); } else { CHECK(false) << tree_->error(); } return; } } // If this page is hidden by an interstitial, suppress all events. if (GetRootManager()->hidden_by_interstitial_page()) { ClearEvents(); return; } // If the root's parent is in another accessibility tree but it wasn't // previously connected, post the proper notifications on the parent. BrowserAccessibility* parent = GetParentNodeFromParentTree(); if (parent) { if (!connected_to_parent_tree_node_) { parent->OnDataChanged(); parent->UpdatePlatformAttributes(); FireGeneratedEvent(Event::CHILDREN_CHANGED, parent); connected_to_parent_tree_node_ = true; } } else { connected_to_parent_tree_node_ = false; } // Based on the changes to the tree, fire focus events if needed. // Screen readers might not do the right thing if they're not aware of what // has focus, so always try that first. Nothing will be fired if the window // itself isn't focused or if focus hasn't changed. GetRootManager()->FireFocusEventsIfNeeded(); // Fire any events related to changes to the tree. for (auto targeted_event : *this) { BrowserAccessibility* event_target = GetFromAXNode(targeted_event.node); if (!event_target) continue; FireGeneratedEvent(targeted_event.event, event_target); } ClearEvents(); // Fire events from Blink. for (uint32_t index = 0; index < details.size(); index++) { const AXEventNotificationDetails& detail = details[index]; // Fire the native event. BrowserAccessibility* event_target = GetFromID(detail.id); if (!event_target) return; if (detail.event_type == ax::mojom::Event::kHover) GetRootManager()->CacheHitTestResult(event_target); FireBlinkEvent(detail.event_type, event_target); } } void BrowserAccessibilityManager::OnLocationChanges( const std::vector<AccessibilityHostMsg_LocationChangeParams>& params) { for (size_t i = 0; i < params.size(); ++i) { BrowserAccessibility* obj = GetFromID(params[i].id); if (!obj) continue; ui::AXNode* node = obj->node(); node->SetLocation(params[i].new_location.offset_container_id, params[i].new_location.bounds, params[i].new_location.transform.get()); } SendLocationChangeEvents(params); } void BrowserAccessibilityManager::SendLocationChangeEvents( const std::vector<AccessibilityHostMsg_LocationChangeParams>& params) { for (size_t i = 0; i < params.size(); ++i) { BrowserAccessibility* obj = GetFromID(params[i].id); if (obj) obj->OnLocationChanged(); } } void BrowserAccessibilityManager::OnFindInPageResult( int request_id, int match_index, int start_id, int start_offset, int end_id, int end_offset) { find_in_page_info_.request_id = request_id; find_in_page_info_.match_index = match_index; find_in_page_info_.start_id = start_id; find_in_page_info_.start_offset = start_offset; find_in_page_info_.end_id = end_id; find_in_page_info_.end_offset = end_offset; if (find_in_page_info_.active_request_id == request_id) ActivateFindInPageResult(request_id); } void BrowserAccessibilityManager::ActivateFindInPageResult( int request_id) { find_in_page_info_.active_request_id = request_id; if (find_in_page_info_.request_id != request_id) return; BrowserAccessibility* node = GetFromID(find_in_page_info_.start_id); if (!node) return; // If an ancestor of this node is a leaf node, fire the notification on that. node = node->GetClosestPlatformObject(); // The "scrolled to anchor" notification is a great way to get a // screen reader to jump directly to a specific location in a document. FireBlinkEvent(ax::mojom::Event::kScrolledToAnchor, node); } BrowserAccessibility* BrowserAccessibilityManager::GetActiveDescendant( BrowserAccessibility* focus) { if (!focus) return nullptr; int32_t active_descendant_id; BrowserAccessibility* active_descendant = nullptr; if (focus->GetIntAttribute(ax::mojom::IntAttribute::kActivedescendantId, &active_descendant_id)) { active_descendant = focus->manager()->GetFromID(active_descendant_id); } if (focus->GetRole() == ax::mojom::Role::kPopUpButton) { BrowserAccessibility* child = focus->InternalGetChild(0); if (child && child->GetRole() == ax::mojom::Role::kMenuListPopup) { // The active descendant is found on the menu list popup, i.e. on the // actual list and not on the button that opens it. // If there is no active descendant, focus should stay on the button so // that Windows screen readers would enable their virtual cursor. if (child->GetIntAttribute(ax::mojom::IntAttribute::kActivedescendantId, &active_descendant_id)) { active_descendant = child->manager()->GetFromID(active_descendant_id); } } } if (active_descendant) return active_descendant; return focus; } bool BrowserAccessibilityManager::NativeViewHasFocus() { BrowserAccessibilityDelegate* delegate = GetDelegateFromRootManager(); return delegate && delegate->AccessibilityViewHasFocus(); } BrowserAccessibility* BrowserAccessibilityManager::GetFocus() { BrowserAccessibilityManager* root_manager = GetRootManager(); if (!root_manager) root_manager = this; int32_t focused_tree_id = root_manager->GetTreeData().focused_tree_id; BrowserAccessibilityManager* focused_manager = nullptr; if (focused_tree_id) focused_manager = BrowserAccessibilityManager::FromID(focused_tree_id); // BrowserAccessibilityManager::FromID(focused_tree_id) may return nullptr // if the tree is not created or has been destroyed. if (!focused_manager) focused_manager = root_manager; return focused_manager->GetFocusFromThisOrDescendantFrame(); } BrowserAccessibility* BrowserAccessibilityManager::GetFocusFromThisOrDescendantFrame() { int32_t focus_id = GetTreeData().focus_id; BrowserAccessibility* obj = GetFromID(focus_id); if (!obj) return GetRoot(); if (obj->HasIntAttribute(ax::mojom::IntAttribute::kChildTreeId)) { BrowserAccessibilityManager* child_manager = BrowserAccessibilityManager::FromID( obj->GetIntAttribute(ax::mojom::IntAttribute::kChildTreeId)); if (child_manager) return child_manager->GetFocusFromThisOrDescendantFrame(); } return obj; } void BrowserAccessibilityManager::SetFocus(const BrowserAccessibility& node) { if (!delegate_) return; ui::AXActionData action_data; action_data.action = ax::mojom::Action::kFocus; action_data.target_node_id = node.GetId(); delegate_->AccessibilityPerformAction(action_data); } void BrowserAccessibilityManager::SetFocusLocallyForTesting( BrowserAccessibility* node) { ui::AXTreeData data = GetTreeData(); data.focus_id = node->GetId(); tree_->UpdateData(data); } // static void BrowserAccessibilityManager::SetFocusChangeCallbackForTesting( const base::Closure& callback) { g_focus_change_callback_for_testing.Get() = callback; } // static void BrowserAccessibilityManager::NeverSuppressOrDelayEventsForTesting() { never_suppress_or_delay_events_for_testing_ = true; } void BrowserAccessibilityManager::Decrement( const BrowserAccessibility& node) { if (!delegate_) return; ui::AXActionData action_data; action_data.action = ax::mojom::Action::kDecrement; action_data.target_node_id = node.GetId(); delegate_->AccessibilityPerformAction(action_data); } void BrowserAccessibilityManager::DoDefaultAction( const BrowserAccessibility& node) { if (!delegate_) return; ui::AXActionData action_data; action_data.action = ax::mojom::Action::kDoDefault; action_data.target_node_id = node.GetId(); delegate_->AccessibilityPerformAction(action_data); } void BrowserAccessibilityManager::GetImageData( const BrowserAccessibility& node, const gfx::Size& max_size) { if (!delegate_) return; ui::AXActionData action_data; action_data.action = ax::mojom::Action::kGetImageData; action_data.target_node_id = node.GetId(); action_data.target_rect = gfx::Rect(gfx::Point(), max_size); delegate_->AccessibilityPerformAction(action_data); } void BrowserAccessibilityManager::Increment( const BrowserAccessibility& node) { if (!delegate_) return; ui::AXActionData action_data; action_data.action = ax::mojom::Action::kIncrement; action_data.target_node_id = node.GetId(); delegate_->AccessibilityPerformAction(action_data); } void BrowserAccessibilityManager::ShowContextMenu( const BrowserAccessibility& node) { if (!delegate_) return; ui::AXActionData action_data; action_data.action = ax::mojom::Action::kShowContextMenu; action_data.target_node_id = node.GetId(); delegate_->AccessibilityPerformAction(action_data); } void BrowserAccessibilityManager::ScrollToMakeVisible( const BrowserAccessibility& node, gfx::Rect subfocus) { if (!delegate_) return; ui::AXActionData action_data; action_data.target_node_id = node.GetId(); action_data.action = ax::mojom::Action::kScrollToMakeVisible; action_data.target_rect = subfocus; delegate_->AccessibilityPerformAction(action_data); } void BrowserAccessibilityManager::ScrollToPoint( const BrowserAccessibility& node, gfx::Point point) { if (!delegate_) return; ui::AXActionData action_data; action_data.target_node_id = node.GetId(); action_data.action = ax::mojom::Action::kScrollToPoint; action_data.target_point = point; delegate_->AccessibilityPerformAction(action_data); } void BrowserAccessibilityManager::SetScrollOffset( const BrowserAccessibility& node, gfx::Point offset) { if (!delegate_) return; ui::AXActionData action_data; action_data.target_node_id = node.GetId(); action_data.action = ax::mojom::Action::kSetScrollOffset; action_data.target_point = offset; delegate_->AccessibilityPerformAction(action_data); } void BrowserAccessibilityManager::SetValue( const BrowserAccessibility& node, const base::string16& value) { if (!delegate_) return; ui::AXActionData action_data; action_data.target_node_id = node.GetId(); action_data.action = ax::mojom::Action::kSetValue; action_data.value = value; delegate_->AccessibilityPerformAction(action_data); } void BrowserAccessibilityManager::SetSelection(AXPlatformRange range) { if (!delegate_ || range.IsNull()) return; ui::AXActionData action_data; action_data.anchor_node_id = range.anchor()->anchor_id(); action_data.anchor_offset = range.anchor()->text_offset(); action_data.focus_node_id = range.focus()->anchor_id(); action_data.focus_offset = range.focus()->text_offset(); action_data.action = ax::mojom::Action::kSetSelection; delegate_->AccessibilityPerformAction(action_data); } void BrowserAccessibilityManager::LoadInlineTextBoxes( const BrowserAccessibility& node) { if (!delegate_) return; ui::AXActionData action_data; action_data.action = ax::mojom::Action::kLoadInlineTextBoxes; action_data.target_node_id = node.GetId(); delegate_->AccessibilityPerformAction(action_data); } void BrowserAccessibilityManager::HitTest(const gfx::Point& point) { if (!delegate_) return; ui::AXActionData action_data; action_data.action = ax::mojom::Action::kHitTest; action_data.target_point = point; action_data.hit_test_event_to_fire = ax::mojom::Event::kHover; delegate_->AccessibilityPerformAction(action_data); } gfx::Rect BrowserAccessibilityManager::GetViewBounds() { BrowserAccessibilityDelegate* delegate = GetDelegateFromRootManager(); if (delegate) return delegate->AccessibilityGetViewBounds(); return gfx::Rect(); } // static // Next object in tree using depth-first pre-order traversal. BrowserAccessibility* BrowserAccessibilityManager::NextInTreeOrder( const BrowserAccessibility* object) { if (!object) return nullptr; if (object->PlatformChildCount()) return object->PlatformGetChild(0); while (object) { BrowserAccessibility* sibling = object->GetNextSibling(); if (sibling) return sibling; object = object->PlatformGetParent(); } return nullptr; } // static // Previous object in tree using depth-first pre-order traversal. BrowserAccessibility* BrowserAccessibilityManager::PreviousInTreeOrder( const BrowserAccessibility* object, bool can_wrap_to_last_element) { if (!object) return nullptr; // For android, this needs to be handled carefully. If not, there is a chance // of getting into infinite loop. if (can_wrap_to_last_element && object->GetRole() == ax::mojom::Role::kRootWebArea && object->PlatformChildCount() != 0) { return object->PlatformDeepestLastChild(); } BrowserAccessibility* sibling = object->GetPreviousSibling(); if (!sibling) return object->PlatformGetParent(); if (sibling->PlatformChildCount()) return sibling->PlatformDeepestLastChild(); return sibling; } // static BrowserAccessibility* BrowserAccessibilityManager::PreviousTextOnlyObject( const BrowserAccessibility* object) { BrowserAccessibility* previous_object = PreviousInTreeOrder(object, false); while (previous_object && !previous_object->IsTextOnlyObject()) previous_object = PreviousInTreeOrder(previous_object, false); return previous_object; } // static BrowserAccessibility* BrowserAccessibilityManager::NextTextOnlyObject( const BrowserAccessibility* object) { BrowserAccessibility* next_object = NextInTreeOrder(object); while (next_object && !next_object->IsTextOnlyObject()) next_object = NextInTreeOrder(next_object); return next_object; } // static bool BrowserAccessibilityManager::FindIndicesInCommonParent( const BrowserAccessibility& object1, const BrowserAccessibility& object2, BrowserAccessibility** common_parent, int* child_index1, int* child_index2) { DCHECK(common_parent && child_index1 && child_index2); auto* ancestor1 = const_cast<BrowserAccessibility*>(&object1); auto* ancestor2 = const_cast<BrowserAccessibility*>(&object2); do { *child_index1 = ancestor1->GetIndexInParent(); ancestor1 = ancestor1->PlatformGetParent(); } while ( ancestor1 && // |BrowserAccessibility::IsAncestorOf| returns true if objects are equal. (ancestor1 == ancestor2 || !ancestor2->IsDescendantOf(ancestor1))); if (!ancestor1) { *common_parent = nullptr; *child_index1 = -1; *child_index2 = -1; return false; } do { *child_index2 = ancestor2->GetIndexInParent(); ancestor2 = ancestor2->PlatformGetParent(); } while (ancestor1 != ancestor2); *common_parent = ancestor1; return true; } // static ax::mojom::TreeOrder BrowserAccessibilityManager::CompareNodes( const BrowserAccessibility& object1, const BrowserAccessibility& object2) { if (&object1 == &object2) return ax::mojom::TreeOrder::kEqual; BrowserAccessibility* common_parent; int child_index1; int child_index2; if (FindIndicesInCommonParent( object1, object2, &common_parent, &child_index1, &child_index2)) { if (child_index1 < child_index2) return ax::mojom::TreeOrder::kBefore; if (child_index1 > child_index2) return ax::mojom::TreeOrder::kAfter; } if (object2.IsDescendantOf(&object1)) return ax::mojom::TreeOrder::kBefore; if (object1.IsDescendantOf(&object2)) return ax::mojom::TreeOrder::kAfter; return ax::mojom::TreeOrder::kUndefined; } std::vector<const BrowserAccessibility*> BrowserAccessibilityManager::FindTextOnlyObjectsInRange( const BrowserAccessibility& start_object, const BrowserAccessibility& end_object) { std::vector<const BrowserAccessibility*> text_only_objects; int child_index1 = -1; int child_index2 = -1; if (&start_object != &end_object) { BrowserAccessibility* common_parent; if (!FindIndicesInCommonParent(start_object, end_object, &common_parent, &child_index1, &child_index2)) { return text_only_objects; } DCHECK(common_parent); DCHECK_GE(child_index1, 0); DCHECK_GE(child_index2, 0); // If the child indices are equal, one object is a descendant of the other. DCHECK(child_index1 != child_index2 || start_object.IsDescendantOf(&end_object) || end_object.IsDescendantOf(&start_object)); } const BrowserAccessibility* start_text_object = nullptr; const BrowserAccessibility* end_text_object = nullptr; if (&start_object == &end_object && start_object.IsPlainTextField()) { // We need to get to the shadow DOM that is inside the text control in order // to find the text-only objects. if (!start_object.InternalChildCount()) return text_only_objects; start_text_object = start_object.InternalGetChild(0); end_text_object = start_object.InternalGetChild(start_object.InternalChildCount() - 1); } else if (child_index1 <= child_index2 || end_object.IsDescendantOf(&start_object)) { start_text_object = &start_object; end_text_object = &end_object; } else if (child_index1 > child_index2 || start_object.IsDescendantOf(&end_object)) { start_text_object = &end_object; end_text_object = &start_object; } // Pre-order traversal might leave some text-only objects behind if we don't // start from the deepest children of the end object. if (!end_text_object->PlatformIsLeaf()) end_text_object = end_text_object->PlatformDeepestLastChild(); if (!start_text_object->IsTextOnlyObject()) start_text_object = NextTextOnlyObject(start_text_object); if (!end_text_object->IsTextOnlyObject()) end_text_object = PreviousTextOnlyObject(end_text_object); if (!start_text_object || !end_text_object) return text_only_objects; while (start_text_object && start_text_object != end_text_object) { text_only_objects.push_back(start_text_object); start_text_object = NextTextOnlyObject(start_text_object); } text_only_objects.push_back(end_text_object); return text_only_objects; } // static base::string16 BrowserAccessibilityManager::GetTextForRange( const BrowserAccessibility& start_object, const BrowserAccessibility& end_object) { return GetTextForRange(start_object, 0, end_object, end_object.GetText().length()); } // static base::string16 BrowserAccessibilityManager::GetTextForRange( const BrowserAccessibility& start_object, int start_offset, const BrowserAccessibility& end_object, int end_offset) { DCHECK_GE(start_offset, 0); DCHECK_GE(end_offset, 0); if (&start_object == &end_object && start_object.IsPlainTextField()) { if (start_offset > end_offset) std::swap(start_offset, end_offset); if (start_offset >= static_cast<int>(start_object.GetText().length()) || end_offset > static_cast<int>(start_object.GetText().length())) { return base::string16(); } return start_object.GetText().substr(start_offset, end_offset - start_offset); } std::vector<const BrowserAccessibility*> text_only_objects = FindTextOnlyObjectsInRange(start_object, end_object); if (text_only_objects.empty()) return base::string16(); if (text_only_objects.size() == 1) { // Be a little permissive with the start and end offsets. if (start_offset > end_offset) std::swap(start_offset, end_offset); const BrowserAccessibility* text_object = text_only_objects[0]; if (start_offset < static_cast<int>(text_object->GetText().length()) && end_offset <= static_cast<int>(text_object->GetText().length())) { return text_object->GetText().substr(start_offset, end_offset - start_offset); } return text_object->GetText(); } base::string16 text; const BrowserAccessibility* start_text_object = text_only_objects[0]; // Figure out if the start and end positions have been reversed. const BrowserAccessibility* first_object = &start_object; if (!first_object->IsTextOnlyObject()) first_object = NextTextOnlyObject(first_object); if (!first_object || first_object != start_text_object) std::swap(start_offset, end_offset); if (start_offset < static_cast<int>(start_text_object->GetText().length())) { text += start_text_object->GetText().substr(start_offset); } else { text += start_text_object->GetText(); } for (size_t i = 1; i < text_only_objects.size() - 1; ++i) { text += text_only_objects[i]->GetText(); } const BrowserAccessibility* end_text_object = text_only_objects.back(); if (end_offset <= static_cast<int>(end_text_object->GetText().length())) { text += end_text_object->GetText().substr(0, end_offset); } else { text += end_text_object->GetText(); } return text; } // static gfx::Rect BrowserAccessibilityManager::GetPageBoundsForRange( const BrowserAccessibility& start_object, int start_offset, const BrowserAccessibility& end_object, int end_offset) { DCHECK_GE(start_offset, 0); DCHECK_GE(end_offset, 0); if (&start_object == &end_object && start_object.IsPlainTextField()) { if (start_offset > end_offset) std::swap(start_offset, end_offset); if (start_offset >= static_cast<int>(start_object.GetText().length()) || end_offset > static_cast<int>(start_object.GetText().length())) { return gfx::Rect(); } return start_object.GetPageBoundsForRange( start_offset, end_offset - start_offset); } gfx::Rect result; const BrowserAccessibility* first = &start_object; const BrowserAccessibility* last = &end_object; switch (CompareNodes(*first, *last)) { case ax::mojom::TreeOrder::kBefore: case ax::mojom::TreeOrder::kEqual: break; case ax::mojom::TreeOrder::kAfter: std::swap(first, last); std::swap(start_offset, end_offset); break; default: return gfx::Rect(); } const BrowserAccessibility* current = first; do { if (current->IsTextOnlyObject()) { int len = static_cast<int>(current->GetText().size()); int start_char_index = 0; int end_char_index = len; if (current == first) start_char_index = start_offset; if (current == last) end_char_index = end_offset; result.Union(current->GetPageBoundsForRange( start_char_index, end_char_index - start_char_index)); } else { result.Union(current->GetPageBoundsRect()); } if (current == last) break; current = NextInTreeOrder(current); } while (current); return result; } void BrowserAccessibilityManager::OnNodeWillBeDeleted(ui::AXTree* tree, ui::AXNode* node) { AXEventGenerator::OnNodeWillBeDeleted(tree, node); DCHECK(node); if (id_wrapper_map_.find(node->id()) == id_wrapper_map_.end()) return; GetFromAXNode(node)->Destroy(); id_wrapper_map_.erase(node->id()); } void BrowserAccessibilityManager::OnSubtreeWillBeDeleted(ui::AXTree* tree, ui::AXNode* node) { AXEventGenerator::OnSubtreeWillBeDeleted(tree, node); DCHECK(node); BrowserAccessibility* obj = GetFromAXNode(node); if (obj) obj->OnSubtreeWillBeDeleted(); } void BrowserAccessibilityManager::OnNodeWillBeReparented(ui::AXTree* tree, ui::AXNode* node) { AXEventGenerator::OnNodeWillBeReparented(tree, node); // BrowserAccessibility should probably ask the tree source for the AXNode via // an id rather than weakly holding a pointer to a AXNode that might have been // destroyed under the hood and re-created later on. Treat this as a delete to // make things work. if (id_wrapper_map_.find(node->id()) == id_wrapper_map_.end()) return; GetFromAXNode(node)->Destroy(); id_wrapper_map_.erase(node->id()); } void BrowserAccessibilityManager::OnSubtreeWillBeReparented(ui::AXTree* tree, ui::AXNode* node) { AXEventGenerator::OnSubtreeWillBeReparented(tree, node); // BrowserAccessibility should probably ask the tree source for the AXNode via // an id rather than weakly holding a pointer to a AXNode that might have been // destroyed under the hood and re-created later on. Treat this as a delete to // make things work. DCHECK(node); BrowserAccessibility* obj = GetFromAXNode(node); if (obj) obj->OnSubtreeWillBeDeleted(); } void BrowserAccessibilityManager::OnNodeCreated(ui::AXTree* tree, ui::AXNode* node) { AXEventGenerator::OnNodeCreated(tree, node); BrowserAccessibility* wrapper = factory_->Create(); wrapper->Init(this, node); id_wrapper_map_[node->id()] = wrapper; wrapper->OnDataChanged(); } void BrowserAccessibilityManager::OnNodeReparented(ui::AXTree* tree, ui::AXNode* node) { AXEventGenerator::OnNodeReparented(tree, node); // BrowserAccessibility should probably ask the tree source for the AXNode via // an id rather than weakly holding a pointer to a AXNode that might have been // destroyed under the hood and re-created later on. Treat this as a create to // make things work. BrowserAccessibility* wrapper = factory_->Create(); wrapper->Init(this, node); id_wrapper_map_[node->id()] = wrapper; wrapper->OnDataChanged(); } void BrowserAccessibilityManager::OnNodeChanged(ui::AXTree* tree, ui::AXNode* node) { AXEventGenerator::OnNodeChanged(tree, node); DCHECK(node); GetFromAXNode(node)->OnDataChanged(); } void BrowserAccessibilityManager::OnAtomicUpdateFinished( ui::AXTree* tree, bool root_changed, const std::vector<ui::AXTreeDelegate::Change>& changes) { AXEventGenerator::OnAtomicUpdateFinished(tree, root_changed, changes); bool ax_tree_id_changed = false; if (GetTreeData().tree_id != -1 && GetTreeData().tree_id != ax_tree_id_) { g_ax_tree_id_map.Get().erase(ax_tree_id_); ax_tree_id_ = GetTreeData().tree_id; g_ax_tree_id_map.Get().insert(std::make_pair(ax_tree_id_, this)); ax_tree_id_changed = true; } // Whenever the tree ID or the root of this tree changes we may need to // fire an event on our parent node in the parent tree to ensure that // we're properly connected. if (ax_tree_id_changed || root_changed) connected_to_parent_tree_node_ = false; // When the root changes and this is the root manager, we may need to // fire a new focus event. if (root_changed && last_focused_manager_ == this) { last_focused_node_ = nullptr; last_focused_manager_ = nullptr; } } BrowserAccessibilityManager* BrowserAccessibilityManager::GetRootManager() { BrowserAccessibility* parent = GetParentNodeFromParentTree(); if (!parent) return this; return parent->manager()->GetRootManager(); } BrowserAccessibilityDelegate* BrowserAccessibilityManager::GetDelegateFromRootManager() { BrowserAccessibilityManager* root_manager = GetRootManager(); if (root_manager) return root_manager->delegate(); return nullptr; } bool BrowserAccessibilityManager::IsRootTree() { return delegate() && delegate()->AccessibilityGetAcceleratedWidget(); } ui::AXTreeUpdate BrowserAccessibilityManager::SnapshotAXTreeForTesting() { std::unique_ptr< ui::AXTreeSource<const ui::AXNode*, ui::AXNodeData, ui::AXTreeData>> tree_source(tree_->CreateTreeSource()); ui::AXTreeSerializer<const ui::AXNode*, ui::AXNodeData, ui::AXTreeData> serializer(tree_source.get()); ui::AXTreeUpdate update; serializer.SerializeChanges(tree_->root(), &update); return update; } void BrowserAccessibilityManager::UseCustomDeviceScaleFactorForTesting( float device_scale_factor) { use_custom_device_scale_factor_for_testing_ = true; device_scale_factor_ = device_scale_factor; } BrowserAccessibility* BrowserAccessibilityManager::CachingAsyncHitTest( const gfx::Point& screen_point) { gfx::Point scaled_point = IsUseZoomForDSFEnabled() ? ScaleToRoundedPoint(screen_point, device_scale_factor()) : screen_point; BrowserAccessibilityManager* root_manager = GetRootManager(); if (root_manager && root_manager != this) return root_manager->CachingAsyncHitTest(scaled_point); if (delegate()) { // This triggers an asynchronous request to compute the true object that's // under |scaled_point|. HitTest(scaled_point - GetViewBounds().OffsetFromOrigin()); // Unfortunately we still have to return an answer synchronously because // the APIs were designed that way. The best case scenario is that the // screen point is within the bounds of the last result we got from a // call to AccessibilityHitTest - in that case, we can return that object! if (last_hover_bounds_.Contains(scaled_point)) { BrowserAccessibilityManager* manager = BrowserAccessibilityManager::FromID(last_hover_ax_tree_id_); if (manager) { BrowserAccessibility* node = manager->GetFromID(last_hover_node_id_); if (node) return node; } } } // If that test failed we have to fall back on searching the accessibility // tree locally for the best bounding box match. This is generally right // for simple pages but wrong in cases of z-index, overflow, and other // more complicated layouts. The hope is that if the user is moving the // mouse, this fallback will only be used transiently, and the asynchronous // result will be used for the next call. return GetRoot()->ApproximateHitTest(screen_point); } void BrowserAccessibilityManager::CacheHitTestResult( BrowserAccessibility* hit_test_result) { // Walk up to the highest ancestor that's a leaf node; we don't want to // return a node that's hidden from the tree. BrowserAccessibility* parent = hit_test_result->PlatformGetParent(); while (parent) { if (parent->PlatformChildCount() == 0) hit_test_result = parent; parent = parent->PlatformGetParent(); } last_hover_ax_tree_id_ = hit_test_result->manager()->ax_tree_id(); last_hover_node_id_ = hit_test_result->GetId(); last_hover_bounds_ = hit_test_result->GetClippedScreenBoundsRect(); } } // namespace content
null
null
null
null
21,298
51,369
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
51,369
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef UI_EVENTS_GESTURE_DETECTION_GESTURE_PROVIDER_H_ #define UI_EVENTS_GESTURE_DETECTION_GESTURE_PROVIDER_H_ #include <memory> #include "ui/display/display.h" #include "ui/events/gesture_detection/gesture_detection_export.h" #include "ui/events/gesture_detection/gesture_detector.h" #include "ui/events/gesture_detection/gesture_event_data.h" #include "ui/events/gesture_detection/gesture_touch_uma_histogram.h" #include "ui/events/gesture_detection/scale_gesture_detector.h" #include "ui/events/gesture_detection/snap_scroll_controller.h" namespace ui { class GESTURE_DETECTION_EXPORT GestureProviderClient { public: virtual ~GestureProviderClient() {} virtual void OnGestureEvent(const GestureEventData& gesture) = 0; }; // Given a stream of |MotionEvent|'s, provides gesture detection and gesture // event dispatch. class GESTURE_DETECTION_EXPORT GestureProvider { public: struct GESTURE_DETECTION_EXPORT Config { Config(); Config(const Config& other); ~Config(); display::Display display; GestureDetector::Config gesture_detector_config; ScaleGestureDetector::Config scale_gesture_detector_config; // Whether double-tap detection is supported by the platform. If disabled, // there will be no delay before tap events. Defaults to true. bool double_tap_support_for_platform_enabled; // If |gesture_begin_end_types_enabled| is true, fire an ET_GESTURE_BEGIN // event for every added touch point, and an ET_GESTURE_END event for every // removed touch point. This requires one ACTION_CANCEL event to be sent per // touch point, which only occurs on Aura. Defaults to false. bool gesture_begin_end_types_enabled; // The min and max size (both length and width, in dips) of the generated // bounding box for all gesture types. This is useful for touch streams // that may report zero or unreasonably small or large touch sizes. // Note that these bounds are only applied for touch or unknown tool types; // mouse and stylus-derived gestures will not be affected. // Both values default to 0 (disabled). float min_gesture_bounds_length; float max_gesture_bounds_length; }; GestureProvider(const Config& config, GestureProviderClient* client); ~GestureProvider(); // Handle the incoming MotionEvent, returning false if the event could not // be handled. bool OnTouchEvent(const MotionEvent& event); // Reset any active gesture detection, including detection of timeout-based // events (e.g., double-tap or delayed tap) for which the pointer has already // been released. void ResetDetection(); // Update whether multi-touch pinch zoom is supported by the platform. void SetMultiTouchZoomSupportEnabled(bool enabled); // Update whether double-tap gestures are supported by the platform. void SetDoubleTapSupportForPlatformEnabled(bool enabled); // Update whether double-tap gesture detection should be suppressed, e.g., // if the page scale is fixed or the page has a mobile viewport. This disables // the tap delay, allowing rapid and responsive single-tap gestures. void SetDoubleTapSupportForPageEnabled(bool enabled); // Whether a scroll gesture is in-progress. bool IsScrollInProgress() const; // Whether a pinch gesture is in-progress (i.e. a pinch update has been // forwarded and detection is still active). bool IsPinchInProgress() const; // Whether a double-tap gesture is in-progress (either double-tap or // double-tap drag zoom). bool IsDoubleTapInProgress() const; // May be NULL if there is no currently active touch sequence. const ui::MotionEvent* current_down_event() const { return current_down_event_.get(); } private: bool CanHandle(const MotionEvent& event) const; void OnTouchEventHandlingBegin(const MotionEvent& event); void OnTouchEventHandlingEnd(const MotionEvent& event); void UpdateDoubleTapDetectionSupport(); class GestureListenerImpl; std::unique_ptr<GestureListenerImpl> gesture_listener_; std::unique_ptr<MotionEvent> current_down_event_; // Logs information on touch and gesture events. GestureTouchUMAHistogram uma_histogram_; // Whether double-tap gesture detection is currently supported. bool double_tap_support_for_page_; bool double_tap_support_for_platform_; const bool gesture_begin_end_types_enabled_; }; } // namespace ui #endif // UI_EVENTS_GESTURE_DETECTION_GESTURE_PROVIDER_H_
null
null
null
null
48,232
27,373
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
192,368
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * addi_apci_16xx.c * Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module. * Project manager: S. Weber * * ADDI-DATA GmbH * Dieselstrasse 3 * D-77833 Ottersweier * Tel: +19(0)7223/9493-0 * Fax: +49(0)7223/9493-92 * http://www.addi-data.com * [email protected] * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include <linux/module.h> #include "../comedi_pci.h" /* * Register I/O map */ #define APCI16XX_IN_REG(x) (((x) * 4) + 0x08) #define APCI16XX_OUT_REG(x) (((x) * 4) + 0x14) #define APCI16XX_DIR_REG(x) (((x) * 4) + 0x20) enum apci16xx_boardid { BOARD_APCI1648, BOARD_APCI1696, }; struct apci16xx_boardinfo { const char *name; int n_chan; }; static const struct apci16xx_boardinfo apci16xx_boardtypes[] = { [BOARD_APCI1648] = { .name = "apci1648", .n_chan = 48, /* 2 subdevices */ }, [BOARD_APCI1696] = { .name = "apci1696", .n_chan = 96, /* 3 subdevices */ }, }; static int apci16xx_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int chan = CR_CHAN(insn->chanspec); unsigned int mask; int ret; if (chan < 8) mask = 0x000000ff; else if (chan < 16) mask = 0x0000ff00; else if (chan < 24) mask = 0x00ff0000; else mask = 0xff000000; ret = comedi_dio_insn_config(dev, s, insn, data, mask); if (ret) return ret; outl(s->io_bits, dev->iobase + APCI16XX_DIR_REG(s->index)); return insn->n; } static int apci16xx_dio_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { if (comedi_dio_update_state(s, data)) outl(s->state, dev->iobase + APCI16XX_OUT_REG(s->index)); data[1] = inl(dev->iobase + APCI16XX_IN_REG(s->index)); return insn->n; } static int apci16xx_auto_attach(struct comedi_device *dev, unsigned long context) { struct pci_dev *pcidev = comedi_to_pci_dev(dev); const struct apci16xx_boardinfo *board = NULL; struct comedi_subdevice *s; unsigned int n_subdevs; unsigned int last; int i; int ret; if (context < ARRAY_SIZE(apci16xx_boardtypes)) board = &apci16xx_boardtypes[context]; if (!board) return -ENODEV; dev->board_ptr = board; dev->board_name = board->name; ret = comedi_pci_enable(dev); if (ret) return ret; dev->iobase = pci_resource_start(pcidev, 0); /* * Work out the nubmer of subdevices needed to support all the * digital i/o channels on the board. Each subdevice supports * up to 32 channels. */ n_subdevs = board->n_chan / 32; if ((n_subdevs * 32) < board->n_chan) { last = board->n_chan - (n_subdevs * 32); n_subdevs++; } else { last = 0; } ret = comedi_alloc_subdevices(dev, n_subdevs); if (ret) return ret; /* Initialize the TTL digital i/o subdevices */ for (i = 0; i < n_subdevs; i++) { s = &dev->subdevices[i]; s->type = COMEDI_SUBD_DIO; s->subdev_flags = SDF_WRITABLE | SDF_READABLE; s->n_chan = ((i * 32) < board->n_chan) ? 32 : last; s->maxdata = 1; s->range_table = &range_digital; s->insn_config = apci16xx_insn_config; s->insn_bits = apci16xx_dio_insn_bits; /* Default all channels to inputs */ s->io_bits = 0; outl(s->io_bits, dev->iobase + APCI16XX_DIR_REG(i)); } return 0; } static struct comedi_driver apci16xx_driver = { .driver_name = "addi_apci_16xx", .module = THIS_MODULE, .auto_attach = apci16xx_auto_attach, .detach = comedi_pci_detach, }; static int apci16xx_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { return comedi_pci_auto_config(dev, &apci16xx_driver, id->driver_data); } static const struct pci_device_id apci16xx_pci_table[] = { { PCI_VDEVICE(ADDIDATA, 0x1009), BOARD_APCI1648 }, { PCI_VDEVICE(ADDIDATA, 0x100a), BOARD_APCI1696 }, { 0 } }; MODULE_DEVICE_TABLE(pci, apci16xx_pci_table); static struct pci_driver apci16xx_pci_driver = { .name = "addi_apci_16xx", .id_table = apci16xx_pci_table, .probe = apci16xx_pci_probe, .remove = comedi_pci_auto_unconfig, }; module_comedi_pci_driver(apci16xx_driver, apci16xx_pci_driver); MODULE_DESCRIPTION("ADDI-DATA APCI-1648/1696, TTL I/O boards"); MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_LICENSE("GPL");
null
null
null
null
100,715
34,351
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
34,351
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef THIRD_PARTY_BLINK_RENDERER_CORE_PAINT_LIST_ITEM_PAINTER_H_ #define THIRD_PARTY_BLINK_RENDERER_CORE_PAINT_LIST_ITEM_PAINTER_H_ #include "third_party/blink/renderer/platform/wtf/allocator.h" namespace blink { struct PaintInfo; class LayoutListItem; class LayoutPoint; class ListItemPainter { STACK_ALLOCATED(); public: ListItemPainter(const LayoutListItem& layout_list_item) : layout_list_item_(layout_list_item) {} void Paint(const PaintInfo&, const LayoutPoint& paint_offset); private: const LayoutListItem& layout_list_item_; }; } // namespace blink #endif // THIRD_PARTY_BLINK_RENDERER_CORE_PAINT_LIST_ITEM_PAINTER_H_
null
null
null
null
31,214
41,297
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
206,292
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
#ifndef _LINUX_KERNEL_VTIME_H #define _LINUX_KERNEL_VTIME_H #include <linux/context_tracking_state.h> #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE #include <asm/vtime.h> #endif struct task_struct; /* * vtime_accounting_cpu_enabled() definitions/declarations */ #if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) static inline bool vtime_accounting_cpu_enabled(void) { return true; } #elif defined(CONFIG_VIRT_CPU_ACCOUNTING_GEN) /* * Checks if vtime is enabled on some CPU. Cputime readers want to be careful * in that case and compute the tickless cputime. * For now vtime state is tied to context tracking. We might want to decouple * those later if necessary. */ static inline bool vtime_accounting_enabled(void) { return context_tracking_is_enabled(); } static inline bool vtime_accounting_cpu_enabled(void) { if (vtime_accounting_enabled()) { if (context_tracking_cpu_is_enabled()) return true; } return false; } #else /* !CONFIG_VIRT_CPU_ACCOUNTING */ static inline bool vtime_accounting_cpu_enabled(void) { return false; } #endif /* * Common vtime APIs */ #ifdef CONFIG_VIRT_CPU_ACCOUNTING #ifdef __ARCH_HAS_VTIME_TASK_SWITCH extern void vtime_task_switch(struct task_struct *prev); #else extern void vtime_common_task_switch(struct task_struct *prev); static inline void vtime_task_switch(struct task_struct *prev) { if (vtime_accounting_cpu_enabled()) vtime_common_task_switch(prev); } #endif /* __ARCH_HAS_VTIME_TASK_SWITCH */ extern void vtime_account_system(struct task_struct *tsk); extern void vtime_account_idle(struct task_struct *tsk); #else /* !CONFIG_VIRT_CPU_ACCOUNTING */ static inline void vtime_task_switch(struct task_struct *prev) { } static inline void vtime_account_system(struct task_struct *tsk) { } #endif /* !CONFIG_VIRT_CPU_ACCOUNTING */ #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN extern void arch_vtime_task_switch(struct task_struct *tsk); extern void vtime_account_user(struct task_struct *tsk); extern void vtime_user_enter(struct task_struct *tsk); static inline void vtime_user_exit(struct task_struct *tsk) { vtime_account_user(tsk); } extern void vtime_guest_enter(struct task_struct *tsk); extern void vtime_guest_exit(struct task_struct *tsk); extern void vtime_init_idle(struct task_struct *tsk, int cpu); #else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN */ static inline void vtime_account_user(struct task_struct *tsk) { } static inline void vtime_user_enter(struct task_struct *tsk) { } static inline void vtime_user_exit(struct task_struct *tsk) { } static inline void vtime_guest_enter(struct task_struct *tsk) { } static inline void vtime_guest_exit(struct task_struct *tsk) { } static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { } #endif #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE extern void vtime_account_irq_enter(struct task_struct *tsk); static inline void vtime_account_irq_exit(struct task_struct *tsk) { /* On hard|softirq exit we always account to hard|softirq cputime */ vtime_account_system(tsk); } extern void vtime_flush(struct task_struct *tsk); #else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ static inline void vtime_account_irq_enter(struct task_struct *tsk) { } static inline void vtime_account_irq_exit(struct task_struct *tsk) { } static inline void vtime_flush(struct task_struct *tsk) { } #endif #ifdef CONFIG_IRQ_TIME_ACCOUNTING extern void irqtime_account_irq(struct task_struct *tsk); #else static inline void irqtime_account_irq(struct task_struct *tsk) { } #endif static inline void account_irq_enter_time(struct task_struct *tsk) { vtime_account_irq_enter(tsk); irqtime_account_irq(tsk); } static inline void account_irq_exit_time(struct task_struct *tsk) { vtime_account_irq_exit(tsk); irqtime_account_irq(tsk); } #endif /* _LINUX_KERNEL_VTIME_H */
null
null
null
null
114,639
70,316
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
70,316
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef DEVICE_USB_MOJO_DEVICE_IMPL_H_ #define DEVICE_USB_MOJO_DEVICE_IMPL_H_ #include <stdint.h> #include <vector> #include "base/callback_forward.h" #include "base/macros.h" #include "base/memory/ref_counted.h" #include "base/memory/weak_ptr.h" #include "base/scoped_observer.h" #include "device/usb/mojo/permission_provider.h" #include "device/usb/public/mojom/device.mojom.h" #include "device/usb/usb_device.h" #include "device/usb/usb_device_handle.h" #include "mojo/public/cpp/bindings/strong_binding.h" namespace device { namespace usb { class PermissionProvider; // Implementation of the public Device interface. Instances of this class are // constructed by DeviceManagerImpl and are strongly bound to their MessagePipe // lifetime. class DeviceImpl : public mojom::UsbDevice, public device::UsbDevice::Observer { public: static void Create(scoped_refptr<device::UsbDevice> device, base::WeakPtr<PermissionProvider> permission_provider, mojom::UsbDeviceRequest request); ~DeviceImpl() override; private: DeviceImpl(scoped_refptr<device::UsbDevice> device, base::WeakPtr<PermissionProvider> permission_provider); // Closes the device if it's open. This will always set |device_handle_| to // null. void CloseHandle(); // Checks interface permissions for control transfers. bool HasControlTransferPermission( mojom::UsbControlTransferRecipient recipient, uint16_t index); // Handles completion of an open request. static void OnOpen(base::WeakPtr<DeviceImpl> device, OpenCallback callback, scoped_refptr<device::UsbDeviceHandle> handle); void OnPermissionGrantedForOpen(OpenCallback callback, bool granted); // Device implementation: void Open(OpenCallback callback) override; void Close(CloseCallback callback) override; void SetConfiguration(uint8_t value, SetConfigurationCallback callback) override; void ClaimInterface(uint8_t interface_number, ClaimInterfaceCallback callback) override; void ReleaseInterface(uint8_t interface_number, ReleaseInterfaceCallback callback) override; void SetInterfaceAlternateSetting( uint8_t interface_number, uint8_t alternate_setting, SetInterfaceAlternateSettingCallback callback) override; void Reset(ResetCallback callback) override; void ClearHalt(uint8_t endpoint, ClearHaltCallback callback) override; void ControlTransferIn(mojom::UsbControlTransferParamsPtr params, uint32_t length, uint32_t timeout, ControlTransferInCallback callback) override; void ControlTransferOut(mojom::UsbControlTransferParamsPtr params, const std::vector<uint8_t>& data, uint32_t timeout, ControlTransferOutCallback callback) override; void GenericTransferIn(uint8_t endpoint_number, uint32_t length, uint32_t timeout, GenericTransferInCallback callback) override; void GenericTransferOut(uint8_t endpoint_number, const std::vector<uint8_t>& data, uint32_t timeout, GenericTransferOutCallback callback) override; void IsochronousTransferIn(uint8_t endpoint_number, const std::vector<uint32_t>& packet_lengths, uint32_t timeout, IsochronousTransferInCallback callback) override; void IsochronousTransferOut(uint8_t endpoint_number, const std::vector<uint8_t>& data, const std::vector<uint32_t>& packet_lengths, uint32_t timeout, IsochronousTransferOutCallback callback) override; // device::UsbDevice::Observer implementation: void OnDeviceRemoved(scoped_refptr<device::UsbDevice> device) override; const scoped_refptr<device::UsbDevice> device_; base::WeakPtr<PermissionProvider> permission_provider_; ScopedObserver<device::UsbDevice, device::UsbDevice::Observer> observer_; // The device handle. Will be null before the device is opened and after it // has been closed. scoped_refptr<UsbDeviceHandle> device_handle_; mojo::StrongBindingPtr<mojom::UsbDevice> binding_; base::WeakPtrFactory<DeviceImpl> weak_factory_; DISALLOW_COPY_AND_ASSIGN(DeviceImpl); }; } // namespace usb } // namespace device #endif // DEVICE_USB_MOJO_DEVICE_IMPL_H_
null
null
null
null
67,179
11,375
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
176,370
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Copyright 2010 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. */ #include <linux/module.h> #include <linux/seq_file.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/kernel_stat.h> #include <linux/uaccess.h> #include <hv/drv_pcie_rc_intf.h> #include <arch/spr_def.h> #include <asm/traps.h> #include <linux/perf_event.h> /* Bit-flag stored in irq_desc->chip_data to indicate HW-cleared irqs. */ #define IS_HW_CLEARED 1 /* * The set of interrupts we enable for arch_local_irq_enable(). * This is initialized to have just a single interrupt that the kernel * doesn't actually use as a sentinel. During kernel init, * interrupts are added as the kernel gets prepared to support them. * NOTE: we could probably initialize them all statically up front. */ DEFINE_PER_CPU(unsigned long long, interrupts_enabled_mask) = INITIAL_INTERRUPTS_ENABLED; EXPORT_PER_CPU_SYMBOL(interrupts_enabled_mask); /* Define per-tile device interrupt statistics state. */ DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp; EXPORT_PER_CPU_SYMBOL(irq_stat); /* * Define per-tile irq disable mask; the hardware/HV only has a single * mask that we use to implement both masking and disabling. */ static DEFINE_PER_CPU(unsigned long, irq_disable_mask) ____cacheline_internodealigned_in_smp; /* * Per-tile IRQ nesting depth. Used to make sure we enable newly * enabled IRQs before exiting the outermost interrupt. */ static DEFINE_PER_CPU(int, irq_depth); #if CHIP_HAS_IPI() /* Use SPRs to manipulate device interrupts. */ #define mask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_SET_K, irq_mask) #define unmask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_RESET_K, irq_mask) #define clear_irqs(irq_mask) __insn_mtspr(SPR_IPI_EVENT_RESET_K, irq_mask) #else /* Use HV to manipulate device interrupts. */ #define mask_irqs(irq_mask) hv_disable_intr(irq_mask) #define unmask_irqs(irq_mask) hv_enable_intr(irq_mask) #define clear_irqs(irq_mask) hv_clear_intr(irq_mask) #endif /* * The interrupt handling path, implemented in terms of HV interrupt * emulation on TILEPro, and IPI hardware on TILE-Gx. * Entered with interrupts disabled. */ void tile_dev_intr(struct pt_regs *regs, int intnum) { int depth = __this_cpu_inc_return(irq_depth); unsigned long original_irqs; unsigned long remaining_irqs; struct pt_regs *old_regs; #if CHIP_HAS_IPI() /* * Pending interrupts are listed in an SPR. We might be * nested, so be sure to only handle irqs that weren't already * masked by a previous interrupt. Then, mask out the ones * we're going to handle. */ unsigned long masked = __insn_mfspr(SPR_IPI_MASK_K); original_irqs = __insn_mfspr(SPR_IPI_EVENT_K) & ~masked; __insn_mtspr(SPR_IPI_MASK_SET_K, original_irqs); #else /* * Hypervisor performs the equivalent of the Gx code above and * then puts the pending interrupt mask into a system save reg * for us to find. */ original_irqs = __insn_mfspr(SPR_SYSTEM_SAVE_K_3); #endif remaining_irqs = original_irqs; /* Track time spent here in an interrupt context. */ old_regs = set_irq_regs(regs); irq_enter(); #ifdef CONFIG_DEBUG_STACKOVERFLOW /* Debugging check for stack overflow: less than 1/8th stack free? */ { long sp = stack_pointer - (long) current_thread_info(); if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { pr_emerg("%s: stack overflow: %ld\n", __func__, sp - sizeof(struct thread_info)); dump_stack(); } } #endif while (remaining_irqs) { unsigned long irq = __ffs(remaining_irqs); remaining_irqs &= ~(1UL << irq); /* Count device irqs; Linux IPIs are counted elsewhere. */ if (irq != IRQ_RESCHEDULE) __this_cpu_inc(irq_stat.irq_dev_intr_count); generic_handle_irq(irq); } /* * If we weren't nested, turn on all enabled interrupts, * including any that were reenabled during interrupt * handling. */ if (depth == 1) unmask_irqs(~__this_cpu_read(irq_disable_mask)); __this_cpu_dec(irq_depth); /* * Track time spent against the current process again and * process any softirqs if they are waiting. */ irq_exit(); set_irq_regs(old_regs); } /* * Remove an irq from the disabled mask. If we're in an interrupt * context, defer enabling the HW interrupt until we leave. */ static void tile_irq_chip_enable(struct irq_data *d) { get_cpu_var(irq_disable_mask) &= ~(1UL << d->irq); if (__this_cpu_read(irq_depth) == 0) unmask_irqs(1UL << d->irq); put_cpu_var(irq_disable_mask); } /* * Add an irq to the disabled mask. We disable the HW interrupt * immediately so that there's no possibility of it firing. If we're * in an interrupt context, the return path is careful to avoid * unmasking a newly disabled interrupt. */ static void tile_irq_chip_disable(struct irq_data *d) { get_cpu_var(irq_disable_mask) |= (1UL << d->irq); mask_irqs(1UL << d->irq); put_cpu_var(irq_disable_mask); } /* Mask an interrupt. */ static void tile_irq_chip_mask(struct irq_data *d) { mask_irqs(1UL << d->irq); } /* Unmask an interrupt. */ static void tile_irq_chip_unmask(struct irq_data *d) { unmask_irqs(1UL << d->irq); } /* * Clear an interrupt before processing it so that any new assertions * will trigger another irq. */ static void tile_irq_chip_ack(struct irq_data *d) { if ((unsigned long)irq_data_get_irq_chip_data(d) != IS_HW_CLEARED) clear_irqs(1UL << d->irq); } /* * For per-cpu interrupts, we need to avoid unmasking any interrupts * that we disabled via disable_percpu_irq(). */ static void tile_irq_chip_eoi(struct irq_data *d) { if (!(__this_cpu_read(irq_disable_mask) & (1UL << d->irq))) unmask_irqs(1UL << d->irq); } static struct irq_chip tile_irq_chip = { .name = "tile_irq_chip", .irq_enable = tile_irq_chip_enable, .irq_disable = tile_irq_chip_disable, .irq_ack = tile_irq_chip_ack, .irq_eoi = tile_irq_chip_eoi, .irq_mask = tile_irq_chip_mask, .irq_unmask = tile_irq_chip_unmask, }; void __init init_IRQ(void) { ipi_init(); } void setup_irq_regs(void) { /* Enable interrupt delivery. */ unmask_irqs(~0UL); #if CHIP_HAS_IPI() arch_local_irq_unmask(INT_IPI_K); #endif } void tile_irq_activate(unsigned int irq, int tile_irq_type) { /* * We use handle_level_irq() by default because the pending * interrupt vector (whether modeled by the HV on * TILEPro or implemented in hardware on TILE-Gx) has * level-style semantics for each bit. An interrupt fires * whenever a bit is high, not just at edges. */ irq_flow_handler_t handle = handle_level_irq; if (tile_irq_type == TILE_IRQ_PERCPU) handle = handle_percpu_irq; irq_set_chip_and_handler(irq, &tile_irq_chip, handle); /* * Flag interrupts that are hardware-cleared so that ack() * won't clear them. */ if (tile_irq_type == TILE_IRQ_HW_CLEAR) irq_set_chip_data(irq, (void *)IS_HW_CLEARED); } EXPORT_SYMBOL(tile_irq_activate); void ack_bad_irq(unsigned int irq) { pr_err("unexpected IRQ trap at vector %02x\n", irq); } /* * /proc/interrupts printing: */ int arch_show_interrupts(struct seq_file *p, int prec) { #ifdef CONFIG_PERF_EVENTS int i; seq_printf(p, "%*s: ", prec, "PMI"); for_each_online_cpu(i) seq_printf(p, "%10llu ", per_cpu(perf_irqs, i)); seq_puts(p, " perf_events\n"); #endif return 0; } #if CHIP_HAS_IPI() int arch_setup_hwirq(unsigned int irq, int node) { return irq >= NR_IRQS ? -EINVAL : 0; } void arch_teardown_hwirq(unsigned int irq) { } #endif
null
null
null
null
84,717
19,218
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
184,213
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* $Id: cosa.h,v 1.6 1999/01/06 14:02:44 kas Exp $ */ /* * Copyright (C) 1995-1997 Jan "Yenya" Kasprzak <[email protected]> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef COSA_H__ #define COSA_H__ #include <linux/ioctl.h> #ifdef __KERNEL__ /* status register - output bits */ #define SR_RX_DMA_ENA 0x04 /* receiver DMA enable bit */ #define SR_TX_DMA_ENA 0x08 /* transmitter DMA enable bit */ #define SR_RST 0x10 /* SRP reset */ #define SR_USR_INT_ENA 0x20 /* user interrupt enable bit */ #define SR_TX_INT_ENA 0x40 /* transmitter interrupt enable bit */ #define SR_RX_INT_ENA 0x80 /* receiver interrupt enable bit */ /* status register - input bits */ #define SR_USR_RQ 0x20 /* user interrupt request pending */ #define SR_TX_RDY 0x40 /* transmitter empty (ready) */ #define SR_RX_RDY 0x80 /* receiver data ready */ #define SR_UP_REQUEST 0x02 /* request from SRP to transfer data up to PC */ #define SR_DOWN_REQUEST 0x01 /* SRP is able to transfer data down from PC to SRP */ #define SR_END_OF_TRANSFER 0x03 /* SRP signalize end of transfer (up or down) */ #define SR_CMD_FROM_SRP_MASK 0x03 /* mask to get SRP command */ /* bits in driver status byte definitions : */ #define SR_RDY_RCV 0x01 /* ready to receive packet */ #define SR_RDY_SND 0x02 /* ready to send packet */ #define SR_CMD_PND 0x04 /* command pending */ /* not currently used */ /* ???? */ #define SR_PKT_UP 0x01 /* transfer of packet up in progress */ #define SR_PKT_DOWN 0x02 /* transfer of packet down in progress */ #endif /* __KERNEL__ */ #define SR_LOAD_ADDR 0x4400 /* SRP microcode load address */ #define SR_START_ADDR 0x4400 /* SRP microcode start address */ #define COSA_LOAD_ADDR 0x400 /* SRP microcode load address */ #define COSA_MAX_FIRMWARE_SIZE 0x10000 /* ioctls */ struct cosa_download { int addr, len; char __user *code; }; /* Reset the device */ #define COSAIORSET _IO('C',0xf0) /* Start microcode at given address */ #define COSAIOSTRT _IOW('C',0xf1, int) /* Read the block from the device memory */ #define COSAIORMEM _IOWR('C',0xf2, struct cosa_download *) /* actually the struct cosa_download itself; this is to keep * the ioctl number same as in 2.4 in order to keep the user-space * utils compatible. */ /* Write the block to the device memory (i.e. download the microcode) */ #define COSAIODOWNLD _IOW('C',0xf2, struct cosa_download *) /* actually the struct cosa_download itself; this is to keep * the ioctl number same as in 2.4 in order to keep the user-space * utils compatible. */ /* Read the device type (one of "srp", "cosa", and "cosa8" for now) */ #define COSAIORTYPE _IOR('C',0xf3, char *) /* Read the device identification string */ #define COSAIORIDSTR _IOR('C',0xf4, char *) /* Maximum length of the identification string. */ #define COSA_MAX_ID_STRING 128 /* Increment/decrement the module usage count :-) */ /* #define COSAIOMINC _IO('C',0xf5) */ /* #define COSAIOMDEC _IO('C',0xf6) */ /* Get the total number of cards installed */ #define COSAIONRCARDS _IO('C',0xf7) /* Get the number of channels on this card */ #define COSAIONRCHANS _IO('C',0xf8) /* Set the driver for the bus-master operations */ #define COSAIOBMSET _IOW('C', 0xf9, unsigned short) #define COSA_BM_OFF 0 /* Bus-mastering off - use ISA DMA (default) */ #define COSA_BM_ON 1 /* Bus-mastering on - faster but untested */ /* Gets the busmaster status */ #define COSAIOBMGET _IO('C', 0xfa) #endif /* !COSA_H__ */
null
null
null
null
92,560
25,223
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
25,223
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef EXTENSIONS_COMMON_API_SOCKETS_SOCKETS_MANIFEST_DATA_H_ #define EXTENSIONS_COMMON_API_SOCKETS_SOCKETS_MANIFEST_DATA_H_ #include <vector> #include "base/strings/string16.h" #include "extensions/common/extension.h" #include "extensions/common/manifest_handler.h" namespace content { struct SocketPermissionRequest; } namespace extensions { class SocketsManifestPermission; } namespace extensions { // The parsed form of the "sockets" manifest entry. class SocketsManifestData : public Extension::ManifestData { public: explicit SocketsManifestData( std::unique_ptr<SocketsManifestPermission> permission); ~SocketsManifestData() override; // Gets the SocketsManifestData for |extension|, or NULL if none was // specified. static SocketsManifestData* Get(const Extension* extension); static bool CheckRequest(const Extension* extension, const content::SocketPermissionRequest& request); // Tries to construct the info based on |value|, as it would have appeared in // the manifest. Sets |error| and returns an empty scoped_ptr on failure. static std::unique_ptr<SocketsManifestData> FromValue( const base::Value& value, base::string16* error); const SocketsManifestPermission* permission() const { return permission_.get(); } private: std::unique_ptr<SocketsManifestPermission> permission_; }; } // namespace extensions #endif // EXTENSIONS_COMMON_API_SOCKETS_SOCKETS_MANIFEST_DATA_H_
null
null
null
null
22,086
20,842
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
20,842
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "content/shell/browser/layout_test/layout_test_javascript_dialog_manager.h" #include <utility> #include "base/command_line.h" #include "base/logging.h" #include "base/strings/utf_string_conversions.h" #include "content/public/browser/web_contents.h" #include "content/shell/browser/layout_test/blink_test_controller.h" #include "content/shell/browser/shell_javascript_dialog.h" #include "content/shell/common/shell_switches.h" namespace content { LayoutTestJavaScriptDialogManager::LayoutTestJavaScriptDialogManager() { } LayoutTestJavaScriptDialogManager::~LayoutTestJavaScriptDialogManager() { } void LayoutTestJavaScriptDialogManager::RunJavaScriptDialog( WebContents* web_contents, RenderFrameHost* render_frame_host, JavaScriptDialogType dialog_type, const base::string16& message_text, const base::string16& default_prompt_text, DialogClosedCallback callback, bool* did_suppress_message) { std::move(callback).Run(true, base::string16()); } void LayoutTestJavaScriptDialogManager::RunBeforeUnloadDialog( WebContents* web_contents, RenderFrameHost* render_frame_host, bool is_reload, DialogClosedCallback callback) { std::move(callback).Run(true, base::string16()); } } // namespace content
null
null
null
null
17,705
25,304
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
25,304
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef EXTENSIONS_COMMON_MANIFEST_HANDLERS_MIME_TYPES_HANDLER_H_ #define EXTENSIONS_COMMON_MANIFEST_HANDLERS_MIME_TYPES_HANDLER_H_ #include <set> #include <string> #include <vector> #include "extensions/common/extension.h" #include "extensions/common/manifest_handler.h" class MimeTypesHandler { public: // Returns list of extensions' ids that are allowed to use MIME type filters. static std::vector<std::string> GetMIMETypeWhitelist(); static MimeTypesHandler* GetHandler(const extensions::Extension* extension); MimeTypesHandler(); ~MimeTypesHandler(); // extension id std::string extension_id() const { return extension_id_; } void set_extension_id(const std::string& extension_id) { extension_id_ = extension_id; } // Adds a MIME type filter to the handler. void AddMIMEType(const std::string& mime_type); // Tests if the handler has registered a filter for the MIME type. bool CanHandleMIMEType(const std::string& mime_type) const; // Set the URL that will be used to handle MIME type requests. void set_handler_url(const std::string& handler_url) { handler_url_ = handler_url; } // The URL that will be used to handle MIME type requests. const std::string& handler_url() const { return handler_url_; } const std::set<std::string>& mime_type_set() const { return mime_type_set_; } // Returns true if this MimeTypesHandler has a plugin associated with it (for // the mimeHandlerPrivate API). Returns false if the MimeTypesHandler is for // the streamsPrivate API. bool HasPlugin() const; // If HasPlugin() returns true, this will return the plugin path for the // plugin associated with this MimeTypesHandler. base::FilePath GetPluginPath() const; private: // The id for the extension this action belongs to (as defined in the // extension manifest). std::string extension_id_; // A list of MIME type filters. std::set<std::string> mime_type_set_; std::string handler_url_; }; class MimeTypesHandlerParser : public extensions::ManifestHandler { public: MimeTypesHandlerParser(); ~MimeTypesHandlerParser() override; bool Parse(extensions::Extension* extension, base::string16* error) override; private: const std::vector<std::string> Keys() const override; }; #endif // EXTENSIONS_COMMON_MANIFEST_HANDLERS_MIME_TYPES_HANDLER_H_
null
null
null
null
22,167
33,211
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
33,211
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
/* * Copyright (C) 2010 Google, Inc. All Rights Reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef THIRD_PARTY_BLINK_RENDERER_CORE_DOM_PARSER_CONTENT_POLICY_H_ #define THIRD_PARTY_BLINK_RENDERER_CORE_DOM_PARSER_CONTENT_POLICY_H_ namespace blink { enum ParserContentPolicy { kDisallowScriptingAndPluginContent, kAllowScriptingContent, kAllowScriptingContentAndDoNotMarkAlreadyStarted, }; static inline bool ScriptingContentIsAllowed( ParserContentPolicy parser_content_policy) { return parser_content_policy == kAllowScriptingContent || parser_content_policy == kAllowScriptingContentAndDoNotMarkAlreadyStarted; } static inline bool PluginContentIsAllowed( ParserContentPolicy parser_content_policy) { return parser_content_policy != kDisallowScriptingAndPluginContent; } } // namespace blink #endif // THIRD_PARTY_BLINK_RENDERER_CORE_DOM_PARSER_CONTENT_POLICY_H_
null
null
null
null
30,074
44,198
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
209,193
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Copyright 2014 Maxime Ripard * * Maxime Ripard <[email protected]> * * This file is dual-licensed: you can use it either under the terms * of the GPL or the X11 license, at your option. Note that this dual * licensing only applies to this file, and not this project as a * whole. * * a) This file is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This file is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this file; if not, write to the Free * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, * MA 02110-1301 USA * * Or, alternatively, * * b) Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following * conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #ifndef __DT_BINDINGS_DMA_SUN4I_A10_H_ #define __DT_BINDINGS_DMA_SUN4I_A10_H_ #define SUN4I_DMA_NORMAL 0 #define SUN4I_DMA_DEDICATED 1 #endif /* __DT_BINDINGS_DMA_SUN4I_A10_H_ */
null
null
null
null
117,540
26,261
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
191,256
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Copyright © 2016 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * */ #include <linux/slab.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/frame.h> #include <asm/hypervisor.h> #include "drmP.h" #include "vmwgfx_msg.h" #define MESSAGE_STATUS_SUCCESS 0x0001 #define MESSAGE_STATUS_DORECV 0x0002 #define MESSAGE_STATUS_CPT 0x0010 #define MESSAGE_STATUS_HB 0x0080 #define RPCI_PROTOCOL_NUM 0x49435052 #define GUESTMSG_FLAG_COOKIE 0x80000000 #define RETRIES 3 #define VMW_HYPERVISOR_MAGIC 0x564D5868 #define VMW_HYPERVISOR_PORT 0x5658 #define VMW_HYPERVISOR_HB_PORT 0x5659 #define VMW_PORT_CMD_MSG 30 #define VMW_PORT_CMD_HB_MSG 0 #define VMW_PORT_CMD_OPEN_CHANNEL (MSG_TYPE_OPEN << 16 | VMW_PORT_CMD_MSG) #define VMW_PORT_CMD_CLOSE_CHANNEL (MSG_TYPE_CLOSE << 16 | VMW_PORT_CMD_MSG) #define VMW_PORT_CMD_SENDSIZE (MSG_TYPE_SENDSIZE << 16 | VMW_PORT_CMD_MSG) #define VMW_PORT_CMD_RECVSIZE (MSG_TYPE_RECVSIZE << 16 | VMW_PORT_CMD_MSG) #define VMW_PORT_CMD_RECVSTATUS (MSG_TYPE_RECVSTATUS << 16 | VMW_PORT_CMD_MSG) #define HIGH_WORD(X) ((X & 0xFFFF0000) >> 16) static u32 vmw_msg_enabled = 1; enum rpc_msg_type { MSG_TYPE_OPEN, MSG_TYPE_SENDSIZE, MSG_TYPE_SENDPAYLOAD, MSG_TYPE_RECVSIZE, MSG_TYPE_RECVPAYLOAD, MSG_TYPE_RECVSTATUS, MSG_TYPE_CLOSE, }; struct rpc_channel { u16 channel_id; u32 cookie_high; u32 cookie_low; }; /** * vmw_open_channel * * @channel: RPC channel * @protocol: * * Returns: 0 on success */ static int vmw_open_channel(struct rpc_channel *channel, unsigned int protocol) { unsigned long eax, ebx, ecx, edx, si = 0, di = 0; VMW_PORT(VMW_PORT_CMD_OPEN_CHANNEL, (protocol | GUESTMSG_FLAG_COOKIE), si, di, VMW_HYPERVISOR_PORT, VMW_HYPERVISOR_MAGIC, eax, ebx, ecx, edx, si, di); if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) return -EINVAL; channel->channel_id = HIGH_WORD(edx); channel->cookie_high = si; channel->cookie_low = di; return 0; } /** * vmw_close_channel * * @channel: RPC channel * * Returns: 0 on success */ static int vmw_close_channel(struct rpc_channel *channel) { unsigned long eax, ebx, ecx, edx, si, di; /* Set up additional parameters */ si = channel->cookie_high; di = channel->cookie_low; VMW_PORT(VMW_PORT_CMD_CLOSE_CHANNEL, 0, si, di, (VMW_HYPERVISOR_PORT | (channel->channel_id << 16)), VMW_HYPERVISOR_MAGIC, eax, ebx, ecx, edx, si, di); if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) return -EINVAL; return 0; } /** * vmw_send_msg: Sends a message to the host * * @channel: RPC channel * @logmsg: NULL terminated string * * Returns: 0 on success */ static int vmw_send_msg(struct rpc_channel *channel, const char *msg) { unsigned long eax, ebx, ecx, edx, si, di, bp; size_t msg_len = strlen(msg); int retries = 0; while (retries < RETRIES) { retries++; /* Set up additional parameters */ si = channel->cookie_high; di = channel->cookie_low; VMW_PORT(VMW_PORT_CMD_SENDSIZE, msg_len, si, di, VMW_HYPERVISOR_PORT | (channel->channel_id << 16), VMW_HYPERVISOR_MAGIC, eax, ebx, ecx, edx, si, di); if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0 || (HIGH_WORD(ecx) & MESSAGE_STATUS_HB) == 0) { /* Expected success + high-bandwidth. Give up. */ return -EINVAL; } /* Send msg */ si = (uintptr_t) msg; di = channel->cookie_low; bp = channel->cookie_high; VMW_PORT_HB_OUT( (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG, msg_len, si, di, VMW_HYPERVISOR_HB_PORT | (channel->channel_id << 16), VMW_HYPERVISOR_MAGIC, bp, eax, ebx, ecx, edx, si, di); if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) != 0) { return 0; } else if ((HIGH_WORD(ebx) & MESSAGE_STATUS_CPT) != 0) { /* A checkpoint occurred. Retry. */ continue; } else { break; } } return -EINVAL; } STACK_FRAME_NON_STANDARD(vmw_send_msg); /** * vmw_recv_msg: Receives a message from the host * * Note: It is the caller's responsibility to call kfree() on msg. * * @channel: channel opened by vmw_open_channel * @msg: [OUT] message received from the host * @msg_len: message length */ static int vmw_recv_msg(struct rpc_channel *channel, void **msg, size_t *msg_len) { unsigned long eax, ebx, ecx, edx, si, di, bp; char *reply; size_t reply_len; int retries = 0; *msg_len = 0; *msg = NULL; while (retries < RETRIES) { retries++; /* Set up additional parameters */ si = channel->cookie_high; di = channel->cookie_low; VMW_PORT(VMW_PORT_CMD_RECVSIZE, 0, si, di, (VMW_HYPERVISOR_PORT | (channel->channel_id << 16)), VMW_HYPERVISOR_MAGIC, eax, ebx, ecx, edx, si, di); if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0 || (HIGH_WORD(ecx) & MESSAGE_STATUS_HB) == 0) { DRM_ERROR("Failed to get reply size\n"); return -EINVAL; } /* No reply available. This is okay. */ if ((HIGH_WORD(ecx) & MESSAGE_STATUS_DORECV) == 0) return 0; reply_len = ebx; reply = kzalloc(reply_len + 1, GFP_KERNEL); if (reply == NULL) { DRM_ERROR("Cannot allocate memory for reply\n"); return -ENOMEM; } /* Receive buffer */ si = channel->cookie_high; di = (uintptr_t) reply; bp = channel->cookie_low; VMW_PORT_HB_IN( (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG, reply_len, si, di, VMW_HYPERVISOR_HB_PORT | (channel->channel_id << 16), VMW_HYPERVISOR_MAGIC, bp, eax, ebx, ecx, edx, si, di); if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) == 0) { kfree(reply); if ((HIGH_WORD(ebx) & MESSAGE_STATUS_CPT) != 0) { /* A checkpoint occurred. Retry. */ continue; } return -EINVAL; } reply[reply_len] = '\0'; /* Ack buffer */ si = channel->cookie_high; di = channel->cookie_low; VMW_PORT(VMW_PORT_CMD_RECVSTATUS, MESSAGE_STATUS_SUCCESS, si, di, (VMW_HYPERVISOR_PORT | (channel->channel_id << 16)), VMW_HYPERVISOR_MAGIC, eax, ebx, ecx, edx, si, di); if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) { kfree(reply); if ((HIGH_WORD(ecx) & MESSAGE_STATUS_CPT) != 0) { /* A checkpoint occurred. Retry. */ continue; } return -EINVAL; } break; } if (retries == RETRIES) return -EINVAL; *msg_len = reply_len; *msg = reply; return 0; } STACK_FRAME_NON_STANDARD(vmw_recv_msg); /** * vmw_host_get_guestinfo: Gets a GuestInfo parameter * * Gets the value of a GuestInfo.* parameter. The value returned will be in * a string, and it is up to the caller to post-process. * * @guest_info_param: Parameter to get, e.g. GuestInfo.svga.gl3 * @buffer: if NULL, *reply_len will contain reply size. * @length: size of the reply_buf. Set to size of reply upon return * * Returns: 0 on success */ int vmw_host_get_guestinfo(const char *guest_info_param, char *buffer, size_t *length) { struct rpc_channel channel; char *msg, *reply = NULL; size_t msg_len, reply_len = 0; int ret = 0; if (!vmw_msg_enabled) return -ENODEV; if (!guest_info_param || !length) return -EINVAL; msg_len = strlen(guest_info_param) + strlen("info-get ") + 1; msg = kzalloc(msg_len, GFP_KERNEL); if (msg == NULL) { DRM_ERROR("Cannot allocate memory to get %s", guest_info_param); return -ENOMEM; } sprintf(msg, "info-get %s", guest_info_param); if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM) || vmw_send_msg(&channel, msg) || vmw_recv_msg(&channel, (void *) &reply, &reply_len) || vmw_close_channel(&channel)) { DRM_ERROR("Failed to get %s", guest_info_param); ret = -EINVAL; } if (buffer && reply && reply_len > 0) { /* Remove reply code, which are the first 2 characters of * the reply */ reply_len = max(reply_len - 2, (size_t) 0); reply_len = min(reply_len, *length); if (reply_len > 0) memcpy(buffer, reply + 2, reply_len); } *length = reply_len; kfree(reply); kfree(msg); return ret; } /** * vmw_host_log: Sends a log message to the host * * @log: NULL terminated string * * Returns: 0 on success */ int vmw_host_log(const char *log) { struct rpc_channel channel; char *msg; int msg_len; int ret = 0; if (!vmw_msg_enabled) return -ENODEV; if (!log) return ret; msg_len = strlen(log) + strlen("log ") + 1; msg = kzalloc(msg_len, GFP_KERNEL); if (msg == NULL) { DRM_ERROR("Cannot allocate memory for log message\n"); return -ENOMEM; } sprintf(msg, "log %s", log); if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM) || vmw_send_msg(&channel, msg) || vmw_close_channel(&channel)) { DRM_ERROR("Failed to send log\n"); ret = -EINVAL; } kfree(msg); return ret; }
null
null
null
null
99,603
1,098
null
train_val
c536b6be1a72aefd632d5530106a67c516cb9f4b
257,485
openssl
0
https://github.com/openssl/openssl
2016-09-22 23:12:38+01:00
/* * Copyright 2001-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include <openssl/opensslconf.h> #ifdef OPENSSL_NO_OCSP NON_EMPTY_TRANSLATION_UNIT #else # ifdef OPENSSL_SYS_VMS # define _XOPEN_SOURCE_EXTENDED/* So fd_set and friends get properly defined * on OpenVMS */ # endif # define USE_SOCKETS # include <stdio.h> # include <stdlib.h> # include <string.h> # include <time.h> # include <ctype.h> /* Needs to be included before the openssl headers */ # include "apps.h" # include <openssl/e_os2.h> # include <openssl/crypto.h> # include <openssl/err.h> # include <openssl/ssl.h> # include <openssl/evp.h> # include <openssl/bn.h> # include <openssl/x509v3.h> # if defined(NETWARE_CLIB) # ifdef NETWARE_BSDSOCK # include <sys/socket.h> # include <sys/bsdskt.h> # else # include <novsock2.h> # endif # elif defined(NETWARE_LIBC) # ifdef NETWARE_BSDSOCK # include <sys/select.h> # else # include <novsock2.h> # endif # endif /* Maximum leeway in validity period: default 5 minutes */ # define MAX_VALIDITY_PERIOD (5 * 60) static int add_ocsp_cert(OCSP_REQUEST **req, X509 *cert, const EVP_MD *cert_id_md, X509 *issuer, STACK_OF(OCSP_CERTID) *ids); static int add_ocsp_serial(OCSP_REQUEST **req, char *serial, const EVP_MD *cert_id_md, X509 *issuer, STACK_OF(OCSP_CERTID) *ids); static void print_ocsp_summary(BIO *out, OCSP_BASICRESP *bs, OCSP_REQUEST *req, STACK_OF(OPENSSL_STRING) *names, STACK_OF(OCSP_CERTID) *ids, long nsec, long maxage); static void make_ocsp_response(OCSP_RESPONSE **resp, OCSP_REQUEST *req, CA_DB *db, X509 *ca, X509 *rcert, EVP_PKEY *rkey, const EVP_MD *md, STACK_OF(X509) *rother, unsigned long flags, int nmin, int ndays, int badsig); static char **lookup_serial(CA_DB *db, ASN1_INTEGER *ser); static BIO *init_responder(const char *port); static int do_responder(OCSP_REQUEST **preq, BIO **pcbio, BIO *acbio); static int send_ocsp_response(BIO *cbio, OCSP_RESPONSE *resp); # ifndef OPENSSL_NO_SOCK static OCSP_RESPONSE *query_responder(BIO *cbio, const char *host, const char *path, const STACK_OF(CONF_VALUE) *headers, OCSP_REQUEST *req, int req_timeout); # endif typedef enum OPTION_choice { OPT_ERR = -1, OPT_EOF = 0, OPT_HELP, OPT_OUTFILE, OPT_TIMEOUT, OPT_URL, OPT_HOST, OPT_PORT, OPT_IGNORE_ERR, OPT_NOVERIFY, OPT_NONCE, OPT_NO_NONCE, OPT_RESP_NO_CERTS, OPT_RESP_KEY_ID, OPT_NO_CERTS, OPT_NO_SIGNATURE_VERIFY, OPT_NO_CERT_VERIFY, OPT_NO_CHAIN, OPT_NO_CERT_CHECKS, OPT_NO_EXPLICIT, OPT_TRUST_OTHER, OPT_NO_INTERN, OPT_BADSIG, OPT_TEXT, OPT_REQ_TEXT, OPT_RESP_TEXT, OPT_REQIN, OPT_RESPIN, OPT_SIGNER, OPT_VAFILE, OPT_SIGN_OTHER, OPT_VERIFY_OTHER, OPT_CAFILE, OPT_CAPATH, OPT_NOCAFILE, OPT_NOCAPATH, OPT_VALIDITY_PERIOD, OPT_STATUS_AGE, OPT_SIGNKEY, OPT_REQOUT, OPT_RESPOUT, OPT_PATH, OPT_ISSUER, OPT_CERT, OPT_SERIAL, OPT_INDEX, OPT_CA, OPT_NMIN, OPT_REQUEST, OPT_NDAYS, OPT_RSIGNER, OPT_RKEY, OPT_ROTHER, OPT_RMD, OPT_HEADER, OPT_V_ENUM, OPT_MD } OPTION_CHOICE; OPTIONS ocsp_options[] = { {"help", OPT_HELP, '-', "Display this summary"}, {"out", OPT_OUTFILE, '>', "Output filename"}, {"timeout", OPT_TIMEOUT, 'p', "Connection timeout (in seconds) to the OCSP responder"}, {"url", OPT_URL, 's', "Responder URL"}, {"host", OPT_HOST, 's', "TCP/IP hostname:port to connect to"}, {"port", OPT_PORT, 'p', "Port to run responder on"}, {"ignore_err", OPT_IGNORE_ERR, '-', "Ignore Error response from OCSP responder, and retry "}, {"noverify", OPT_NOVERIFY, '-', "Don't verify response at all"}, {"nonce", OPT_NONCE, '-', "Add OCSP nonce to request"}, {"no_nonce", OPT_NO_NONCE, '-', "Don't add OCSP nonce to request"}, {"resp_no_certs", OPT_RESP_NO_CERTS, '-', "Don't include any certificates in response"}, {"resp_key_id", OPT_RESP_KEY_ID, '-', "Identify response by signing certificate key ID"}, {"no_certs", OPT_NO_CERTS, '-', "Don't include any certificates in signed request"}, {"no_signature_verify", OPT_NO_SIGNATURE_VERIFY, '-', "Don't check signature on response"}, {"no_cert_verify", OPT_NO_CERT_VERIFY, '-', "Don't check signing certificate"}, {"no_chain", OPT_NO_CHAIN, '-', "Don't chain verify response"}, {"no_cert_checks", OPT_NO_CERT_CHECKS, '-', "Don't do additional checks on signing certificate"}, {"no_explicit", OPT_NO_EXPLICIT, '-', "Do not explicitly check the chain, just verify the root"}, {"trust_other", OPT_TRUST_OTHER, '-', "Don't verify additional certificates"}, {"no_intern", OPT_NO_INTERN, '-', "Don't search certificates contained in response for signer"}, {"badsig", OPT_BADSIG, '-', "Corrupt last byte of loaded OSCP response signature (for test)"}, {"text", OPT_TEXT, '-', "Print text form of request and response"}, {"req_text", OPT_REQ_TEXT, '-', "Print text form of request"}, {"resp_text", OPT_RESP_TEXT, '-', "Print text form of response"}, {"reqin", OPT_REQIN, 's', "File with the DER-encoded request"}, {"respin", OPT_RESPIN, 's', "File with the DER-encoded response"}, {"signer", OPT_SIGNER, '<', "Certificate to sign OCSP request with"}, {"VAfile", OPT_VAFILE, '<', "Validator certificates file"}, {"sign_other", OPT_SIGN_OTHER, '<', "Additional certificates to include in signed request"}, {"verify_other", OPT_VERIFY_OTHER, '<', "Additional certificates to search for signer"}, {"CAfile", OPT_CAFILE, '<', "Trusted certificates file"}, {"CApath", OPT_CAPATH, '<', "Trusted certificates directory"}, {"no-CAfile", OPT_NOCAFILE, '-', "Do not load the default certificates file"}, {"no-CApath", OPT_NOCAPATH, '-', "Do not load certificates from the default certificates directory"}, {"validity_period", OPT_VALIDITY_PERIOD, 'u', "Maximum validity discrepancy in seconds"}, {"status_age", OPT_STATUS_AGE, 'p', "Maximum status age in seconds"}, {"signkey", OPT_SIGNKEY, 's', "Private key to sign OCSP request with"}, {"reqout", OPT_REQOUT, 's', "Output file for the DER-encoded request"}, {"respout", OPT_RESPOUT, 's', "Output file for the DER-encoded response"}, {"path", OPT_PATH, 's', "Path to use in OCSP request"}, {"issuer", OPT_ISSUER, '<', "Issuer certificate"}, {"cert", OPT_CERT, '<', "Certificate to check"}, {"serial", OPT_SERIAL, 's', "Serial number to check"}, {"index", OPT_INDEX, '<', "Certificate status index file"}, {"CA", OPT_CA, '<', "CA certificate"}, {"nmin", OPT_NMIN, 'p', "Number of minutes before next update"}, {"nrequest", OPT_REQUEST, 'p', "Number of requests to accept (default unlimited)"}, {"ndays", OPT_NDAYS, 'p', "Number of days before next update"}, {"rsigner", OPT_RSIGNER, '<', "Responder certificate to sign responses with"}, {"rkey", OPT_RKEY, '<', "Responder key to sign responses with"}, {"rother", OPT_ROTHER, '<', "Other certificates to include in response"}, {"rmd", OPT_RMD, 's', "Digest Algorithm to use in signature of OCSP response"}, {"header", OPT_HEADER, 's', "key=value header to add"}, {"", OPT_MD, '-', "Any supported digest algorithm (sha1,sha256, ... )"}, OPT_V_OPTIONS, {NULL} }; int ocsp_main(int argc, char **argv) { BIO *acbio = NULL, *cbio = NULL, *derbio = NULL, *out = NULL; const EVP_MD *cert_id_md = NULL, *rsign_md = NULL; int trailing_md = 0; CA_DB *rdb = NULL; EVP_PKEY *key = NULL, *rkey = NULL; OCSP_BASICRESP *bs = NULL; OCSP_REQUEST *req = NULL; OCSP_RESPONSE *resp = NULL; STACK_OF(CONF_VALUE) *headers = NULL; STACK_OF(OCSP_CERTID) *ids = NULL; STACK_OF(OPENSSL_STRING) *reqnames = NULL; STACK_OF(X509) *sign_other = NULL, *verify_other = NULL, *rother = NULL; STACK_OF(X509) *issuers = NULL; X509 *issuer = NULL, *cert = NULL, *rca_cert = NULL; X509 *signer = NULL, *rsigner = NULL; X509_STORE *store = NULL; X509_VERIFY_PARAM *vpm = NULL; const char *CAfile = NULL, *CApath = NULL; char *header, *value; char *host = NULL, *port = NULL, *path = "/", *outfile = NULL; char *rca_filename = NULL, *reqin = NULL, *respin = NULL; char *reqout = NULL, *respout = NULL, *ridx_filename = NULL; char *rsignfile = NULL, *rkeyfile = NULL; char *sign_certfile = NULL, *verify_certfile = NULL, *rcertfile = NULL; char *signfile = NULL, *keyfile = NULL; char *thost = NULL, *tport = NULL, *tpath = NULL; int noCAfile = 0, noCApath = 0; int accept_count = -1, add_nonce = 1, noverify = 0, use_ssl = -1; int vpmtouched = 0, badsig = 0, i, ignore_err = 0, nmin = 0, ndays = -1; int req_text = 0, resp_text = 0, ret = 1; #ifndef OPENSSL_NO_SOCK int req_timeout = -1; #endif long nsec = MAX_VALIDITY_PERIOD, maxage = -1; unsigned long sign_flags = 0, verify_flags = 0, rflags = 0; OPTION_CHOICE o; char *prog; reqnames = sk_OPENSSL_STRING_new_null(); if (!reqnames) goto end; ids = sk_OCSP_CERTID_new_null(); if (!ids) goto end; if ((vpm = X509_VERIFY_PARAM_new()) == NULL) return 1; prog = opt_init(argc, argv, ocsp_options); while ((o = opt_next()) != OPT_EOF) { switch (o) { case OPT_EOF: case OPT_ERR: opthelp: BIO_printf(bio_err, "%s: Use -help for summary.\n", prog); goto end; case OPT_HELP: ret = 0; opt_help(ocsp_options); goto end; case OPT_OUTFILE: outfile = opt_arg(); break; case OPT_TIMEOUT: #ifndef OPENSSL_NO_SOCK req_timeout = atoi(opt_arg()); #endif break; case OPT_URL: OPENSSL_free(thost); OPENSSL_free(tport); OPENSSL_free(tpath); thost = tport = tpath = NULL; if (!OCSP_parse_url(opt_arg(), &host, &port, &path, &use_ssl)) { BIO_printf(bio_err, "%s Error parsing URL\n", prog); goto end; } thost = host; tport = port; tpath = path; break; case OPT_HOST: host = opt_arg(); break; case OPT_PORT: port = opt_arg(); break; case OPT_IGNORE_ERR: ignore_err = 1; break; case OPT_NOVERIFY: noverify = 1; break; case OPT_NONCE: add_nonce = 2; break; case OPT_NO_NONCE: add_nonce = 0; break; case OPT_RESP_NO_CERTS: rflags |= OCSP_NOCERTS; break; case OPT_RESP_KEY_ID: rflags |= OCSP_RESPID_KEY; break; case OPT_NO_CERTS: sign_flags |= OCSP_NOCERTS; break; case OPT_NO_SIGNATURE_VERIFY: verify_flags |= OCSP_NOSIGS; break; case OPT_NO_CERT_VERIFY: verify_flags |= OCSP_NOVERIFY; break; case OPT_NO_CHAIN: verify_flags |= OCSP_NOCHAIN; break; case OPT_NO_CERT_CHECKS: verify_flags |= OCSP_NOCHECKS; break; case OPT_NO_EXPLICIT: verify_flags |= OCSP_NOEXPLICIT; break; case OPT_TRUST_OTHER: verify_flags |= OCSP_TRUSTOTHER; break; case OPT_NO_INTERN: verify_flags |= OCSP_NOINTERN; break; case OPT_BADSIG: badsig = 1; break; case OPT_TEXT: req_text = resp_text = 1; break; case OPT_REQ_TEXT: req_text = 1; break; case OPT_RESP_TEXT: resp_text = 1; break; case OPT_REQIN: reqin = opt_arg(); break; case OPT_RESPIN: respin = opt_arg(); break; case OPT_SIGNER: signfile = opt_arg(); break; case OPT_VAFILE: verify_certfile = opt_arg(); verify_flags |= OCSP_TRUSTOTHER; break; case OPT_SIGN_OTHER: sign_certfile = opt_arg(); break; case OPT_VERIFY_OTHER: verify_certfile = opt_arg(); break; case OPT_CAFILE: CAfile = opt_arg(); break; case OPT_CAPATH: CApath = opt_arg(); break; case OPT_NOCAFILE: noCAfile = 1; break; case OPT_NOCAPATH: noCApath = 1; break; case OPT_V_CASES: if (!opt_verify(o, vpm)) goto end; vpmtouched++; break; case OPT_VALIDITY_PERIOD: opt_long(opt_arg(), &nsec); break; case OPT_STATUS_AGE: opt_long(opt_arg(), &maxage); break; case OPT_SIGNKEY: keyfile = opt_arg(); break; case OPT_REQOUT: reqout = opt_arg(); break; case OPT_RESPOUT: respout = opt_arg(); break; case OPT_PATH: path = opt_arg(); break; case OPT_ISSUER: issuer = load_cert(opt_arg(), FORMAT_PEM, "issuer certificate"); if (issuer == NULL) goto end; if (issuers == NULL) { if ((issuers = sk_X509_new_null()) == NULL) goto end; } sk_X509_push(issuers, issuer); break; case OPT_CERT: X509_free(cert); cert = load_cert(opt_arg(), FORMAT_PEM, "certificate"); if (cert == NULL) goto end; if (cert_id_md == NULL) cert_id_md = EVP_sha1(); if (!add_ocsp_cert(&req, cert, cert_id_md, issuer, ids)) goto end; if (!sk_OPENSSL_STRING_push(reqnames, opt_arg())) goto end; trailing_md = 0; break; case OPT_SERIAL: if (cert_id_md == NULL) cert_id_md = EVP_sha1(); if (!add_ocsp_serial(&req, opt_arg(), cert_id_md, issuer, ids)) goto end; if (!sk_OPENSSL_STRING_push(reqnames, opt_arg())) goto end; trailing_md = 0; break; case OPT_INDEX: ridx_filename = opt_arg(); break; case OPT_CA: rca_filename = opt_arg(); break; case OPT_NMIN: opt_int(opt_arg(), &nmin); if (ndays == -1) ndays = 0; break; case OPT_REQUEST: opt_int(opt_arg(), &accept_count); break; case OPT_NDAYS: ndays = atoi(opt_arg()); break; case OPT_RSIGNER: rsignfile = opt_arg(); break; case OPT_RKEY: rkeyfile = opt_arg(); break; case OPT_ROTHER: rcertfile = opt_arg(); break; case OPT_RMD: /* Response MessageDigest */ if (!opt_md(opt_arg(), &rsign_md)) goto end; break; case OPT_HEADER: header = opt_arg(); value = strchr(header, '='); if (value == NULL) { BIO_printf(bio_err, "Missing = in header key=value\n"); goto opthelp; } *value++ = '\0'; if (!X509V3_add_value(header, value, &headers)) goto end; break; case OPT_MD: if (trailing_md) { BIO_printf(bio_err, "%s: Digest must be before -cert or -serial\n", prog); goto opthelp; } if (!opt_md(opt_unknown(), &cert_id_md)) goto opthelp; trailing_md = 1; break; } } if (trailing_md) { BIO_printf(bio_err, "%s: Digest must be before -cert or -serial\n", prog); goto opthelp; } argc = opt_num_rest(); if (argc != 0) goto opthelp; /* Have we anything to do? */ if (!req && !reqin && !respin && !(port && ridx_filename)) goto opthelp; out = bio_open_default(outfile, 'w', FORMAT_TEXT); if (out == NULL) goto end; if (!req && (add_nonce != 2)) add_nonce = 0; if (!req && reqin) { derbio = bio_open_default(reqin, 'r', FORMAT_ASN1); if (derbio == NULL) goto end; req = d2i_OCSP_REQUEST_bio(derbio, NULL); BIO_free(derbio); if (!req) { BIO_printf(bio_err, "Error reading OCSP request\n"); goto end; } } if (!req && port) { acbio = init_responder(port); if (!acbio) goto end; } if (rsignfile) { if (!rkeyfile) rkeyfile = rsignfile; rsigner = load_cert(rsignfile, FORMAT_PEM, "responder certificate"); if (!rsigner) { BIO_printf(bio_err, "Error loading responder certificate\n"); goto end; } rca_cert = load_cert(rca_filename, FORMAT_PEM, "CA certificate"); if (rcertfile) { if (!load_certs(rcertfile, &rother, FORMAT_PEM, NULL, "responder other certificates")) goto end; } rkey = load_key(rkeyfile, FORMAT_PEM, 0, NULL, NULL, "responder private key"); if (!rkey) goto end; } if (acbio) BIO_printf(bio_err, "Waiting for OCSP client connections...\n"); redo_accept: if (acbio) { if (!do_responder(&req, &cbio, acbio)) goto end; if (!req) { resp = OCSP_response_create(OCSP_RESPONSE_STATUS_MALFORMEDREQUEST, NULL); send_ocsp_response(cbio, resp); goto done_resp; } } if (!req && (signfile || reqout || host || add_nonce || ridx_filename)) { BIO_printf(bio_err, "Need an OCSP request for this operation!\n"); goto end; } if (req && add_nonce) OCSP_request_add1_nonce(req, NULL, -1); if (signfile) { if (!keyfile) keyfile = signfile; signer = load_cert(signfile, FORMAT_PEM, "signer certificate"); if (!signer) { BIO_printf(bio_err, "Error loading signer certificate\n"); goto end; } if (sign_certfile) { if (!load_certs(sign_certfile, &sign_other, FORMAT_PEM, NULL, "signer certificates")) goto end; } key = load_key(keyfile, FORMAT_PEM, 0, NULL, NULL, "signer private key"); if (!key) goto end; if (!OCSP_request_sign (req, signer, key, NULL, sign_other, sign_flags)) { BIO_printf(bio_err, "Error signing OCSP request\n"); goto end; } } if (req_text && req) OCSP_REQUEST_print(out, req, 0); if (reqout) { derbio = bio_open_default(reqout, 'w', FORMAT_ASN1); if (derbio == NULL) goto end; i2d_OCSP_REQUEST_bio(derbio, req); BIO_free(derbio); } if (ridx_filename && (!rkey || !rsigner || !rca_cert)) { BIO_printf(bio_err, "Need a responder certificate, key and CA for this operation!\n"); goto end; } if (ridx_filename && !rdb) { rdb = load_index(ridx_filename, NULL); if (!rdb) goto end; if (!index_index(rdb)) goto end; } if (rdb) { make_ocsp_response(&resp, req, rdb, rca_cert, rsigner, rkey, rsign_md, rother, rflags, nmin, ndays, badsig); if (cbio) send_ocsp_response(cbio, resp); } else if (host) { # ifndef OPENSSL_NO_SOCK resp = process_responder(req, host, path, port, use_ssl, headers, req_timeout); if (!resp) goto end; # else BIO_printf(bio_err, "Error creating connect BIO - sockets not supported.\n"); goto end; # endif } else if (respin) { derbio = bio_open_default(respin, 'r', FORMAT_ASN1); if (derbio == NULL) goto end; resp = d2i_OCSP_RESPONSE_bio(derbio, NULL); BIO_free(derbio); if (!resp) { BIO_printf(bio_err, "Error reading OCSP response\n"); goto end; } } else { ret = 0; goto end; } done_resp: if (respout) { derbio = bio_open_default(respout, 'w', FORMAT_ASN1); if (derbio == NULL) goto end; i2d_OCSP_RESPONSE_bio(derbio, resp); BIO_free(derbio); } i = OCSP_response_status(resp); if (i != OCSP_RESPONSE_STATUS_SUCCESSFUL) { BIO_printf(out, "Responder Error: %s (%d)\n", OCSP_response_status_str(i), i); if (ignore_err) goto redo_accept; ret = 0; goto end; } if (resp_text) OCSP_RESPONSE_print(out, resp, 0); /* If running as responder don't verify our own response */ if (cbio) { /* If not unlimited, see if we took all we should. */ if (accept_count != -1 && --accept_count <= 0) { ret = 0; goto end; } BIO_free_all(cbio); cbio = NULL; OCSP_REQUEST_free(req); req = NULL; OCSP_RESPONSE_free(resp); resp = NULL; goto redo_accept; } if (ridx_filename) { ret = 0; goto end; } if (!store) { store = setup_verify(CAfile, CApath, noCAfile, noCApath); if (!store) goto end; } if (vpmtouched) X509_STORE_set1_param(store, vpm); if (verify_certfile) { if (!load_certs(verify_certfile, &verify_other, FORMAT_PEM, NULL, "validator certificate")) goto end; } bs = OCSP_response_get1_basic(resp); if (!bs) { BIO_printf(bio_err, "Error parsing response\n"); goto end; } ret = 0; if (!noverify) { if (req && ((i = OCSP_check_nonce(req, bs)) <= 0)) { if (i == -1) BIO_printf(bio_err, "WARNING: no nonce in response\n"); else { BIO_printf(bio_err, "Nonce Verify error\n"); ret = 1; goto end; } } i = OCSP_basic_verify(bs, verify_other, store, verify_flags); if (i <= 0 && issuers) { i = OCSP_basic_verify(bs, issuers, store, OCSP_TRUSTOTHER); if (i > 0) ERR_clear_error(); } if (i <= 0) { BIO_printf(bio_err, "Response Verify Failure\n"); ERR_print_errors(bio_err); ret = 1; } else BIO_printf(bio_err, "Response verify OK\n"); } print_ocsp_summary(out, bs, req, reqnames, ids, nsec, maxage); end: ERR_print_errors(bio_err); X509_free(signer); X509_STORE_free(store); X509_VERIFY_PARAM_free(vpm); EVP_PKEY_free(key); EVP_PKEY_free(rkey); X509_free(cert); sk_X509_pop_free(issuers, X509_free); X509_free(rsigner); X509_free(rca_cert); free_index(rdb); BIO_free_all(cbio); BIO_free_all(acbio); BIO_free(out); OCSP_REQUEST_free(req); OCSP_RESPONSE_free(resp); OCSP_BASICRESP_free(bs); sk_OPENSSL_STRING_free(reqnames); sk_OCSP_CERTID_free(ids); sk_X509_pop_free(sign_other, X509_free); sk_X509_pop_free(verify_other, X509_free); sk_CONF_VALUE_pop_free(headers, X509V3_conf_free); OPENSSL_free(thost); OPENSSL_free(tport); OPENSSL_free(tpath); return (ret); } static int add_ocsp_cert(OCSP_REQUEST **req, X509 *cert, const EVP_MD *cert_id_md, X509 *issuer, STACK_OF(OCSP_CERTID) *ids) { OCSP_CERTID *id; if (!issuer) { BIO_printf(bio_err, "No issuer certificate specified\n"); return 0; } if (*req == NULL) *req = OCSP_REQUEST_new(); if (*req == NULL) goto err; id = OCSP_cert_to_id(cert_id_md, cert, issuer); if (!id || !sk_OCSP_CERTID_push(ids, id)) goto err; if (!OCSP_request_add0_id(*req, id)) goto err; return 1; err: BIO_printf(bio_err, "Error Creating OCSP request\n"); return 0; } static int add_ocsp_serial(OCSP_REQUEST **req, char *serial, const EVP_MD *cert_id_md, X509 *issuer, STACK_OF(OCSP_CERTID) *ids) { OCSP_CERTID *id; X509_NAME *iname; ASN1_BIT_STRING *ikey; ASN1_INTEGER *sno; if (!issuer) { BIO_printf(bio_err, "No issuer certificate specified\n"); return 0; } if (*req == NULL) *req = OCSP_REQUEST_new(); if (*req == NULL) goto err; iname = X509_get_subject_name(issuer); ikey = X509_get0_pubkey_bitstr(issuer); sno = s2i_ASN1_INTEGER(NULL, serial); if (!sno) { BIO_printf(bio_err, "Error converting serial number %s\n", serial); return 0; } id = OCSP_cert_id_new(cert_id_md, iname, ikey, sno); ASN1_INTEGER_free(sno); if (id == NULL || !sk_OCSP_CERTID_push(ids, id)) goto err; if (!OCSP_request_add0_id(*req, id)) goto err; return 1; err: BIO_printf(bio_err, "Error Creating OCSP request\n"); return 0; } static void print_ocsp_summary(BIO *out, OCSP_BASICRESP *bs, OCSP_REQUEST *req, STACK_OF(OPENSSL_STRING) *names, STACK_OF(OCSP_CERTID) *ids, long nsec, long maxage) { OCSP_CERTID *id; const char *name; int i, status, reason; ASN1_GENERALIZEDTIME *rev, *thisupd, *nextupd; if (!bs || !req || !sk_OPENSSL_STRING_num(names) || !sk_OCSP_CERTID_num(ids)) return; for (i = 0; i < sk_OCSP_CERTID_num(ids); i++) { id = sk_OCSP_CERTID_value(ids, i); name = sk_OPENSSL_STRING_value(names, i); BIO_printf(out, "%s: ", name); if (!OCSP_resp_find_status(bs, id, &status, &reason, &rev, &thisupd, &nextupd)) { BIO_puts(out, "ERROR: No Status found.\n"); continue; } /* * Check validity: if invalid write to output BIO so we know which * response this refers to. */ if (!OCSP_check_validity(thisupd, nextupd, nsec, maxage)) { BIO_puts(out, "WARNING: Status times invalid.\n"); ERR_print_errors(out); } BIO_printf(out, "%s\n", OCSP_cert_status_str(status)); BIO_puts(out, "\tThis Update: "); ASN1_GENERALIZEDTIME_print(out, thisupd); BIO_puts(out, "\n"); if (nextupd) { BIO_puts(out, "\tNext Update: "); ASN1_GENERALIZEDTIME_print(out, nextupd); BIO_puts(out, "\n"); } if (status != V_OCSP_CERTSTATUS_REVOKED) continue; if (reason != -1) BIO_printf(out, "\tReason: %s\n", OCSP_crl_reason_str(reason)); BIO_puts(out, "\tRevocation Time: "); ASN1_GENERALIZEDTIME_print(out, rev); BIO_puts(out, "\n"); } } static void make_ocsp_response(OCSP_RESPONSE **resp, OCSP_REQUEST *req, CA_DB *db, X509 *ca, X509 *rcert, EVP_PKEY *rkey, const EVP_MD *rmd, STACK_OF(X509) *rother, unsigned long flags, int nmin, int ndays, int badsig) { ASN1_TIME *thisupd = NULL, *nextupd = NULL; OCSP_CERTID *cid, *ca_id = NULL; OCSP_BASICRESP *bs = NULL; int i, id_count; id_count = OCSP_request_onereq_count(req); if (id_count <= 0) { *resp = OCSP_response_create(OCSP_RESPONSE_STATUS_MALFORMEDREQUEST, NULL); goto end; } bs = OCSP_BASICRESP_new(); thisupd = X509_gmtime_adj(NULL, 0); if (ndays != -1) nextupd = X509_time_adj_ex(NULL, ndays, nmin * 60, NULL); /* Examine each certificate id in the request */ for (i = 0; i < id_count; i++) { OCSP_ONEREQ *one; ASN1_INTEGER *serial; char **inf; ASN1_OBJECT *cert_id_md_oid; const EVP_MD *cert_id_md; one = OCSP_request_onereq_get0(req, i); cid = OCSP_onereq_get0_id(one); OCSP_id_get0_info(NULL, &cert_id_md_oid, NULL, NULL, cid); cert_id_md = EVP_get_digestbyobj(cert_id_md_oid); if (!cert_id_md) { *resp = OCSP_response_create(OCSP_RESPONSE_STATUS_INTERNALERROR, NULL); goto end; } OCSP_CERTID_free(ca_id); ca_id = OCSP_cert_to_id(cert_id_md, NULL, ca); /* Is this request about our CA? */ if (OCSP_id_issuer_cmp(ca_id, cid)) { OCSP_basic_add1_status(bs, cid, V_OCSP_CERTSTATUS_UNKNOWN, 0, NULL, thisupd, nextupd); continue; } OCSP_id_get0_info(NULL, NULL, NULL, &serial, cid); inf = lookup_serial(db, serial); if (!inf) OCSP_basic_add1_status(bs, cid, V_OCSP_CERTSTATUS_UNKNOWN, 0, NULL, thisupd, nextupd); else if (inf[DB_type][0] == DB_TYPE_VAL) OCSP_basic_add1_status(bs, cid, V_OCSP_CERTSTATUS_GOOD, 0, NULL, thisupd, nextupd); else if (inf[DB_type][0] == DB_TYPE_REV) { ASN1_OBJECT *inst = NULL; ASN1_TIME *revtm = NULL; ASN1_GENERALIZEDTIME *invtm = NULL; OCSP_SINGLERESP *single; int reason = -1; unpack_revinfo(&revtm, &reason, &inst, &invtm, inf[DB_rev_date]); single = OCSP_basic_add1_status(bs, cid, V_OCSP_CERTSTATUS_REVOKED, reason, revtm, thisupd, nextupd); if (invtm) OCSP_SINGLERESP_add1_ext_i2d(single, NID_invalidity_date, invtm, 0, 0); else if (inst) OCSP_SINGLERESP_add1_ext_i2d(single, NID_hold_instruction_code, inst, 0, 0); ASN1_OBJECT_free(inst); ASN1_TIME_free(revtm); ASN1_GENERALIZEDTIME_free(invtm); } } OCSP_copy_nonce(bs, req); OCSP_basic_sign(bs, rcert, rkey, rmd, rother, flags); if (badsig) { const ASN1_OCTET_STRING *sig = OCSP_resp_get0_signature(bs); corrupt_signature(sig); } *resp = OCSP_response_create(OCSP_RESPONSE_STATUS_SUCCESSFUL, bs); end: ASN1_TIME_free(thisupd); ASN1_TIME_free(nextupd); OCSP_CERTID_free(ca_id); OCSP_BASICRESP_free(bs); } static char **lookup_serial(CA_DB *db, ASN1_INTEGER *ser) { int i; BIGNUM *bn = NULL; char *itmp, *row[DB_NUMBER], **rrow; for (i = 0; i < DB_NUMBER; i++) row[i] = NULL; bn = ASN1_INTEGER_to_BN(ser, NULL); OPENSSL_assert(bn); /* FIXME: should report an error at this * point and abort */ if (BN_is_zero(bn)) itmp = OPENSSL_strdup("00"); else itmp = BN_bn2hex(bn); row[DB_serial] = itmp; BN_free(bn); rrow = TXT_DB_get_by_index(db->db, DB_serial, row); OPENSSL_free(itmp); return rrow; } /* Quick and dirty OCSP server: read in and parse input request */ static BIO *init_responder(const char *port) { # ifdef OPENSSL_NO_SOCK BIO_printf(bio_err, "Error setting up accept BIO - sockets not supported.\n"); return NULL; # else BIO *acbio = NULL, *bufbio = NULL; bufbio = BIO_new(BIO_f_buffer()); if (bufbio == NULL) goto err; acbio = BIO_new(BIO_s_accept()); if (acbio == NULL || BIO_set_bind_mode(acbio, BIO_BIND_REUSEADDR) < 0 || BIO_set_accept_port(acbio, port) < 0) { BIO_printf(bio_err, "Error setting up accept BIO\n"); ERR_print_errors(bio_err); goto err; } BIO_set_accept_bios(acbio, bufbio); bufbio = NULL; if (BIO_do_accept(acbio) <= 0) { BIO_printf(bio_err, "Error starting accept\n"); ERR_print_errors(bio_err); goto err; } return acbio; err: BIO_free_all(acbio); BIO_free(bufbio); return NULL; # endif } # ifndef OPENSSL_NO_SOCK /* * Decode %xx URL-decoding in-place. Ignores mal-formed sequences. */ static int urldecode(char *p) { unsigned char *out = (unsigned char *)p; unsigned char *save = out; for (; *p; p++) { if (*p != '%') *out++ = *p; else if (isxdigit(_UC(p[1])) && isxdigit(_UC(p[2]))) { /* Don't check, can't fail because of ixdigit() call. */ *out++ = (OPENSSL_hexchar2int(p[1]) << 4) | OPENSSL_hexchar2int(p[2]); p += 2; } else return -1; } *out = '\0'; return (int)(out - save); } # endif static int do_responder(OCSP_REQUEST **preq, BIO **pcbio, BIO *acbio) { # ifdef OPENSSL_NO_SOCK return 0; # else int len; OCSP_REQUEST *req = NULL; char inbuf[2048], reqbuf[2048]; char *p, *q; BIO *cbio = NULL, *getbio = NULL, *b64 = NULL; if (BIO_do_accept(acbio) <= 0) { BIO_printf(bio_err, "Error accepting connection\n"); ERR_print_errors(bio_err); return 0; } cbio = BIO_pop(acbio); *pcbio = cbio; /* Read the request line. */ len = BIO_gets(cbio, reqbuf, sizeof reqbuf); if (len <= 0) return 1; if (strncmp(reqbuf, "GET ", 4) == 0) { /* Expecting GET {sp} /URL {sp} HTTP/1.x */ for (p = reqbuf + 4; *p == ' '; ++p) continue; if (*p != '/') { BIO_printf(bio_err, "Invalid request -- bad URL\n"); return 1; } p++; /* Splice off the HTTP version identifier. */ for (q = p; *q; q++) if (*q == ' ') break; if (strncmp(q, " HTTP/1.", 8) != 0) { BIO_printf(bio_err, "Invalid request -- bad HTTP vesion\n"); return 1; } *q = '\0'; len = urldecode(p); if (len <= 0) { BIO_printf(bio_err, "Invalid request -- bad URL encoding\n"); return 1; } if ((getbio = BIO_new_mem_buf(p, len)) == NULL || (b64 = BIO_new(BIO_f_base64())) == NULL) { BIO_printf(bio_err, "Could not allocate memory\n"); ERR_print_errors(bio_err); return 1; } BIO_set_flags(b64, BIO_FLAGS_BASE64_NO_NL); getbio = BIO_push(b64, getbio); } else if (strncmp(reqbuf, "POST ", 5) != 0) { BIO_printf(bio_err, "Invalid request -- bad HTTP verb\n"); return 1; } /* Read and skip past the headers. */ for (;;) { len = BIO_gets(cbio, inbuf, sizeof inbuf); if (len <= 0) return 1; if ((inbuf[0] == '\r') || (inbuf[0] == '\n')) break; } /* Try to read OCSP request */ if (getbio) { req = d2i_OCSP_REQUEST_bio(getbio, NULL); BIO_free_all(getbio); } else req = d2i_OCSP_REQUEST_bio(cbio, NULL); if (!req) { BIO_printf(bio_err, "Error parsing OCSP request\n"); ERR_print_errors(bio_err); } *preq = req; return 1; # endif } static int send_ocsp_response(BIO *cbio, OCSP_RESPONSE *resp) { char http_resp[] = "HTTP/1.0 200 OK\r\nContent-type: application/ocsp-response\r\n" "Content-Length: %d\r\n\r\n"; if (!cbio) return 0; BIO_printf(cbio, http_resp, i2d_OCSP_RESPONSE(resp, NULL)); i2d_OCSP_RESPONSE_bio(cbio, resp); (void)BIO_flush(cbio); return 1; } # ifndef OPENSSL_NO_SOCK static OCSP_RESPONSE *query_responder(BIO *cbio, const char *host, const char *path, const STACK_OF(CONF_VALUE) *headers, OCSP_REQUEST *req, int req_timeout) { int fd; int rv; int i; int add_host = 1; OCSP_REQ_CTX *ctx = NULL; OCSP_RESPONSE *rsp = NULL; fd_set confds; struct timeval tv; if (req_timeout != -1) BIO_set_nbio(cbio, 1); rv = BIO_do_connect(cbio); if ((rv <= 0) && ((req_timeout == -1) || !BIO_should_retry(cbio))) { BIO_puts(bio_err, "Error connecting BIO\n"); return NULL; } if (BIO_get_fd(cbio, &fd) < 0) { BIO_puts(bio_err, "Can't get connection fd\n"); goto err; } if (req_timeout != -1 && rv <= 0) { FD_ZERO(&confds); openssl_fdset(fd, &confds); tv.tv_usec = 0; tv.tv_sec = req_timeout; rv = select(fd + 1, NULL, (void *)&confds, NULL, &tv); if (rv == 0) { BIO_puts(bio_err, "Timeout on connect\n"); return NULL; } } ctx = OCSP_sendreq_new(cbio, path, NULL, -1); if (ctx == NULL) return NULL; for (i = 0; i < sk_CONF_VALUE_num(headers); i++) { CONF_VALUE *hdr = sk_CONF_VALUE_value(headers, i); if (add_host == 1 && strcasecmp("host", hdr->name) == 0) add_host = 0; if (!OCSP_REQ_CTX_add1_header(ctx, hdr->name, hdr->value)) goto err; } if (add_host == 1 && OCSP_REQ_CTX_add1_header(ctx, "Host", host) == 0) goto err; if (!OCSP_REQ_CTX_set1_req(ctx, req)) goto err; for (;;) { rv = OCSP_sendreq_nbio(&rsp, ctx); if (rv != -1) break; if (req_timeout == -1) continue; FD_ZERO(&confds); openssl_fdset(fd, &confds); tv.tv_usec = 0; tv.tv_sec = req_timeout; if (BIO_should_read(cbio)) rv = select(fd + 1, (void *)&confds, NULL, NULL, &tv); else if (BIO_should_write(cbio)) rv = select(fd + 1, NULL, (void *)&confds, NULL, &tv); else { BIO_puts(bio_err, "Unexpected retry condition\n"); goto err; } if (rv == 0) { BIO_puts(bio_err, "Timeout on request\n"); break; } if (rv == -1) { BIO_puts(bio_err, "Select error\n"); break; } } err: OCSP_REQ_CTX_free(ctx); return rsp; } OCSP_RESPONSE *process_responder(OCSP_REQUEST *req, const char *host, const char *path, const char *port, int use_ssl, STACK_OF(CONF_VALUE) *headers, int req_timeout) { BIO *cbio = NULL; SSL_CTX *ctx = NULL; OCSP_RESPONSE *resp = NULL; cbio = BIO_new_connect(host); if (!cbio) { BIO_printf(bio_err, "Error creating connect BIO\n"); goto end; } if (port) BIO_set_conn_port(cbio, port); if (use_ssl == 1) { BIO *sbio; ctx = SSL_CTX_new(TLS_client_method()); if (ctx == NULL) { BIO_printf(bio_err, "Error creating SSL context.\n"); goto end; } SSL_CTX_set_mode(ctx, SSL_MODE_AUTO_RETRY); sbio = BIO_new_ssl(ctx, 1); cbio = BIO_push(sbio, cbio); } resp = query_responder(cbio, host, path, headers, req, req_timeout); if (!resp) BIO_printf(bio_err, "Error querying OCSP responder\n"); end: BIO_free_all(cbio); SSL_CTX_free(ctx); return resp; } # endif #endif
null
null
null
null
118,930
23,407
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
23,407
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "content/browser/indexed_db/list_set.h" #include <memory> #include "base/memory/ptr_util.h" #include "base/memory/ref_counted.h" #include "testing/gtest/include/gtest/gtest.h" namespace content { TEST(ListSetTest, ListSetIterator) { list_set<int> set; for (int i = 3; i > 0; --i) set.insert(i); list_set<int>::iterator it = set.begin(); EXPECT_EQ(3, *it); ++it; EXPECT_EQ(2, *it); it++; EXPECT_EQ(1, *it); --it; EXPECT_EQ(2, *it); it--; EXPECT_EQ(3, *it); ++it; EXPECT_EQ(2, *it); it++; EXPECT_EQ(1, *it); ++it; EXPECT_EQ(set.end(), it); } TEST(ListSetTest, ListSetConstIterator) { list_set<int> set; for (int i = 5; i > 0; --i) set.insert(i); const list_set<int>& ref = set; list_set<int>::const_iterator it = ref.begin(); for (int i = 5; i > 0; --i) { EXPECT_EQ(i, *it); ++it; } EXPECT_EQ(ref.end(), it); } TEST(ListSetTest, ListSetInsertFront) { list_set<int> set; for (int i = 5; i > 0; --i) set.insert(i); for (int i = 6; i <= 10; ++i) set.insert_front(i); const list_set<int>& ref = set; list_set<int>::const_iterator it = ref.begin(); for (int i = 10; i > 0; --i) { EXPECT_EQ(i, *it); ++it; } EXPECT_EQ(ref.end(), it); } TEST(ListSetTest, ListSetPrimitive) { list_set<int> set; EXPECT_TRUE(set.empty()); EXPECT_EQ(0u, set.size()); { list_set<int>::iterator it = set.begin(); EXPECT_EQ(set.end(), it); } for (int i = 5; i > 0; --i) set.insert(i); EXPECT_EQ(5u, set.size()); EXPECT_FALSE(set.empty()); set.erase(3); EXPECT_EQ(4u, set.size()); EXPECT_EQ(1u, set.count(2)); set.erase(2); EXPECT_EQ(0u, set.count(2)); EXPECT_EQ(3u, set.size()); { list_set<int>::iterator it = set.begin(); EXPECT_EQ(5, *it); ++it; EXPECT_EQ(4, *it); ++it; EXPECT_EQ(1, *it); ++it; EXPECT_EQ(set.end(), it); } set.erase(1); set.erase(4); set.erase(5); EXPECT_EQ(0u, set.size()); EXPECT_TRUE(set.empty()); { list_set<int>::iterator it = set.begin(); EXPECT_EQ(set.end(), it); } } template <typename T> class Wrapped { public: explicit Wrapped(const T& value) : value_(value) {} explicit Wrapped(const Wrapped<T>& other) : value_(other.value_) {} Wrapped& operator=(const Wrapped<T>& rhs) { value_ = rhs.value_; return *this; } int value() const { return value_; } bool operator<(const Wrapped<T>& rhs) const { return value_ < rhs.value_; } bool operator==(const Wrapped<T>& rhs) const { return value_ == rhs.value_; } private: T value_; }; TEST(ListSetTest, ListSetObject) { list_set<Wrapped<int> > set; EXPECT_EQ(0u, set.size()); { list_set<Wrapped<int> >::iterator it = set.begin(); EXPECT_EQ(set.end(), it); } set.insert(Wrapped<int>(0)); set.insert(Wrapped<int>(1)); set.insert(Wrapped<int>(2)); EXPECT_EQ(3u, set.size()); { list_set<Wrapped<int> >::iterator it = set.begin(); EXPECT_EQ(0, it->value()); ++it; EXPECT_EQ(1, it->value()); ++it; EXPECT_EQ(2, it->value()); ++it; EXPECT_EQ(set.end(), it); } set.erase(Wrapped<int>(0)); set.erase(Wrapped<int>(1)); set.erase(Wrapped<int>(2)); EXPECT_EQ(0u, set.size()); { list_set<Wrapped<int> >::iterator it = set.begin(); EXPECT_EQ(set.end(), it); } } TEST(ListSetTest, ListSetPointer) { std::unique_ptr<Wrapped<int>> w0 = std::make_unique<Wrapped<int>>(0); std::unique_ptr<Wrapped<int>> w1 = std::make_unique<Wrapped<int>>(1); std::unique_ptr<Wrapped<int>> w2 = std::make_unique<Wrapped<int>>(2); list_set<Wrapped<int>*> set; EXPECT_EQ(0u, set.size()); { list_set<Wrapped<int>*>::iterator it = set.begin(); EXPECT_EQ(set.end(), it); } set.insert(w0.get()); set.insert(w1.get()); set.insert(w2.get()); EXPECT_EQ(3u, set.size()); { list_set<Wrapped<int>*>::iterator it = set.begin(); EXPECT_EQ(0, (*it)->value()); ++it; EXPECT_EQ(1, (*it)->value()); ++it; EXPECT_EQ(2, (*it)->value()); ++it; EXPECT_EQ(set.end(), it); } set.erase(w0.get()); set.erase(w1.get()); set.erase(w2.get()); EXPECT_EQ(0u, set.size()); { list_set<Wrapped<int>*>::iterator it = set.begin(); EXPECT_EQ(set.end(), it); } } template <typename T> class RefCounted : public base::RefCounted<RefCounted<T> > { public: explicit RefCounted(const T& value) : value_(value) {} T value() { return value_; } private: virtual ~RefCounted() {} friend class base::RefCounted<RefCounted<T> >; T value_; }; TEST(ListSetTest, ListSetRefCounted) { list_set<scoped_refptr<RefCounted<int> > > set; EXPECT_EQ(0u, set.size()); { list_set<scoped_refptr<RefCounted<int> > >::iterator it = set.begin(); EXPECT_EQ(set.end(), it); } scoped_refptr<RefCounted<int> > r0(new RefCounted<int>(0)); scoped_refptr<RefCounted<int> > r1(new RefCounted<int>(1)); scoped_refptr<RefCounted<int> > r2(new RefCounted<int>(2)); set.insert(r0); set.insert(r1); set.insert(r2); EXPECT_EQ(3u, set.size()); { list_set<scoped_refptr<RefCounted<int> > >::iterator it = set.begin(); EXPECT_EQ(0, (*it)->value()); ++it; EXPECT_EQ(1, (*it)->value()); ++it; EXPECT_EQ(2, (*it)->value()); ++it; EXPECT_EQ(set.end(), it); } set.erase(r0); set.erase(r1); set.erase(r2); EXPECT_EQ(0u, set.size()); { list_set<scoped_refptr<RefCounted<int> > >::iterator it = set.begin(); EXPECT_EQ(set.end(), it); } } } // namespace content
null
null
null
null
20,270
14,447
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
179,442
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
#ifndef __iop_fifo_in_extra_defs_asm_h #define __iop_fifo_in_extra_defs_asm_h /* * This file is autogenerated from * file: ../../inst/io_proc/rtl/iop_fifo_in_extra.r * id: <not found> * last modfied: Mon Apr 11 16:10:08 2005 * * by /n/asic/design/tools/rdesc/src/rdes2c -asm --outfile asm/iop_fifo_in_extra_defs_asm.h ../../inst/io_proc/rtl/iop_fifo_in_extra.r * id: $Id: iop_fifo_in_extra_defs_asm.h,v 1.1 2005/04/24 18:31:06 starvik Exp $ * Any changes here will be lost. * * -*- buffer-read-only: t -*- */ #ifndef REG_FIELD #define REG_FIELD( scope, reg, field, value ) \ REG_FIELD_X_( value, reg_##scope##_##reg##___##field##___lsb ) #define REG_FIELD_X_( value, shift ) ((value) << shift) #endif #ifndef REG_STATE #define REG_STATE( scope, reg, field, symbolic_value ) \ REG_STATE_X_( regk_##scope##_##symbolic_value, reg_##scope##_##reg##___##field##___lsb ) #define REG_STATE_X_( k, shift ) (k << shift) #endif #ifndef REG_MASK #define REG_MASK( scope, reg, field ) \ REG_MASK_X_( reg_##scope##_##reg##___##field##___width, reg_##scope##_##reg##___##field##___lsb ) #define REG_MASK_X_( width, lsb ) (((1 << width)-1) << lsb) #endif #ifndef REG_LSB #define REG_LSB( scope, reg, field ) reg_##scope##_##reg##___##field##___lsb #endif #ifndef REG_BIT #define REG_BIT( scope, reg, field ) reg_##scope##_##reg##___##field##___bit #endif #ifndef REG_ADDR #define REG_ADDR( scope, inst, reg ) REG_ADDR_X_(inst, reg_##scope##_##reg##_offset) #define REG_ADDR_X_( inst, offs ) ((inst) + offs) #endif #ifndef REG_ADDR_VECT #define REG_ADDR_VECT( scope, inst, reg, index ) \ REG_ADDR_VECT_X_(inst, reg_##scope##_##reg##_offset, index, \ STRIDE_##scope##_##reg ) #define REG_ADDR_VECT_X_( inst, offs, index, stride ) \ ((inst) + offs + (index) * stride) #endif /* Register rw_wr_data, scope iop_fifo_in_extra, type rw */ #define reg_iop_fifo_in_extra_rw_wr_data_offset 0 /* Register r_stat, scope iop_fifo_in_extra, type r */ #define reg_iop_fifo_in_extra_r_stat___avail_bytes___lsb 0 #define reg_iop_fifo_in_extra_r_stat___avail_bytes___width 4 #define reg_iop_fifo_in_extra_r_stat___last___lsb 4 #define reg_iop_fifo_in_extra_r_stat___last___width 8 #define reg_iop_fifo_in_extra_r_stat___dif_in_en___lsb 12 #define reg_iop_fifo_in_extra_r_stat___dif_in_en___width 1 #define reg_iop_fifo_in_extra_r_stat___dif_in_en___bit 12 #define reg_iop_fifo_in_extra_r_stat___dif_out_en___lsb 13 #define reg_iop_fifo_in_extra_r_stat___dif_out_en___width 1 #define reg_iop_fifo_in_extra_r_stat___dif_out_en___bit 13 #define reg_iop_fifo_in_extra_r_stat_offset 4 /* Register rw_strb_dif_in, scope iop_fifo_in_extra, type rw */ #define reg_iop_fifo_in_extra_rw_strb_dif_in___last___lsb 0 #define reg_iop_fifo_in_extra_rw_strb_dif_in___last___width 2 #define reg_iop_fifo_in_extra_rw_strb_dif_in_offset 8 /* Register rw_intr_mask, scope iop_fifo_in_extra, type rw */ #define reg_iop_fifo_in_extra_rw_intr_mask___urun___lsb 0 #define reg_iop_fifo_in_extra_rw_intr_mask___urun___width 1 #define reg_iop_fifo_in_extra_rw_intr_mask___urun___bit 0 #define reg_iop_fifo_in_extra_rw_intr_mask___last_data___lsb 1 #define reg_iop_fifo_in_extra_rw_intr_mask___last_data___width 1 #define reg_iop_fifo_in_extra_rw_intr_mask___last_data___bit 1 #define reg_iop_fifo_in_extra_rw_intr_mask___dav___lsb 2 #define reg_iop_fifo_in_extra_rw_intr_mask___dav___width 1 #define reg_iop_fifo_in_extra_rw_intr_mask___dav___bit 2 #define reg_iop_fifo_in_extra_rw_intr_mask___avail___lsb 3 #define reg_iop_fifo_in_extra_rw_intr_mask___avail___width 1 #define reg_iop_fifo_in_extra_rw_intr_mask___avail___bit 3 #define reg_iop_fifo_in_extra_rw_intr_mask___orun___lsb 4 #define reg_iop_fifo_in_extra_rw_intr_mask___orun___width 1 #define reg_iop_fifo_in_extra_rw_intr_mask___orun___bit 4 #define reg_iop_fifo_in_extra_rw_intr_mask_offset 12 /* Register rw_ack_intr, scope iop_fifo_in_extra, type rw */ #define reg_iop_fifo_in_extra_rw_ack_intr___urun___lsb 0 #define reg_iop_fifo_in_extra_rw_ack_intr___urun___width 1 #define reg_iop_fifo_in_extra_rw_ack_intr___urun___bit 0 #define reg_iop_fifo_in_extra_rw_ack_intr___last_data___lsb 1 #define reg_iop_fifo_in_extra_rw_ack_intr___last_data___width 1 #define reg_iop_fifo_in_extra_rw_ack_intr___last_data___bit 1 #define reg_iop_fifo_in_extra_rw_ack_intr___dav___lsb 2 #define reg_iop_fifo_in_extra_rw_ack_intr___dav___width 1 #define reg_iop_fifo_in_extra_rw_ack_intr___dav___bit 2 #define reg_iop_fifo_in_extra_rw_ack_intr___avail___lsb 3 #define reg_iop_fifo_in_extra_rw_ack_intr___avail___width 1 #define reg_iop_fifo_in_extra_rw_ack_intr___avail___bit 3 #define reg_iop_fifo_in_extra_rw_ack_intr___orun___lsb 4 #define reg_iop_fifo_in_extra_rw_ack_intr___orun___width 1 #define reg_iop_fifo_in_extra_rw_ack_intr___orun___bit 4 #define reg_iop_fifo_in_extra_rw_ack_intr_offset 16 /* Register r_intr, scope iop_fifo_in_extra, type r */ #define reg_iop_fifo_in_extra_r_intr___urun___lsb 0 #define reg_iop_fifo_in_extra_r_intr___urun___width 1 #define reg_iop_fifo_in_extra_r_intr___urun___bit 0 #define reg_iop_fifo_in_extra_r_intr___last_data___lsb 1 #define reg_iop_fifo_in_extra_r_intr___last_data___width 1 #define reg_iop_fifo_in_extra_r_intr___last_data___bit 1 #define reg_iop_fifo_in_extra_r_intr___dav___lsb 2 #define reg_iop_fifo_in_extra_r_intr___dav___width 1 #define reg_iop_fifo_in_extra_r_intr___dav___bit 2 #define reg_iop_fifo_in_extra_r_intr___avail___lsb 3 #define reg_iop_fifo_in_extra_r_intr___avail___width 1 #define reg_iop_fifo_in_extra_r_intr___avail___bit 3 #define reg_iop_fifo_in_extra_r_intr___orun___lsb 4 #define reg_iop_fifo_in_extra_r_intr___orun___width 1 #define reg_iop_fifo_in_extra_r_intr___orun___bit 4 #define reg_iop_fifo_in_extra_r_intr_offset 20 /* Register r_masked_intr, scope iop_fifo_in_extra, type r */ #define reg_iop_fifo_in_extra_r_masked_intr___urun___lsb 0 #define reg_iop_fifo_in_extra_r_masked_intr___urun___width 1 #define reg_iop_fifo_in_extra_r_masked_intr___urun___bit 0 #define reg_iop_fifo_in_extra_r_masked_intr___last_data___lsb 1 #define reg_iop_fifo_in_extra_r_masked_intr___last_data___width 1 #define reg_iop_fifo_in_extra_r_masked_intr___last_data___bit 1 #define reg_iop_fifo_in_extra_r_masked_intr___dav___lsb 2 #define reg_iop_fifo_in_extra_r_masked_intr___dav___width 1 #define reg_iop_fifo_in_extra_r_masked_intr___dav___bit 2 #define reg_iop_fifo_in_extra_r_masked_intr___avail___lsb 3 #define reg_iop_fifo_in_extra_r_masked_intr___avail___width 1 #define reg_iop_fifo_in_extra_r_masked_intr___avail___bit 3 #define reg_iop_fifo_in_extra_r_masked_intr___orun___lsb 4 #define reg_iop_fifo_in_extra_r_masked_intr___orun___width 1 #define reg_iop_fifo_in_extra_r_masked_intr___orun___bit 4 #define reg_iop_fifo_in_extra_r_masked_intr_offset 24 /* Constants */ #define regk_iop_fifo_in_extra_fifo_in 0x00000002 #define regk_iop_fifo_in_extra_no 0x00000000 #define regk_iop_fifo_in_extra_rw_intr_mask_default 0x00000000 #define regk_iop_fifo_in_extra_yes 0x00000001 #endif /* __iop_fifo_in_extra_defs_asm_h */
null
null
null
null
87,789
66,067
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
66,067
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_MEDIA_WEBRTC_MEDIA_STREAM_CAPTURE_INDICATOR_H_ #define CHROME_BROWSER_MEDIA_WEBRTC_MEDIA_STREAM_CAPTURE_INDICATOR_H_ #include <unordered_map> #include <vector> #include "base/callback_forward.h" #include "base/macros.h" #include "base/memory/ref_counted.h" #include "chrome/browser/status_icons/status_icon_menu_model.h" #include "content/public/common/media_stream_request.h" namespace content { class WebContents; } // namespace content namespace gfx { class ImageSkia; } // namespace gfx class StatusIcon; // This indicator is owned by MediaCaptureDevicesDispatcher // (MediaCaptureDevicesDispatcher is a singleton). class MediaStreamCaptureIndicator : public base::RefCountedThreadSafe<MediaStreamCaptureIndicator>, public StatusIconMenuModel::Delegate { public: MediaStreamCaptureIndicator(); // Registers a new media stream for |web_contents| and returns UI object // that's used by the content layer to notify about state of the stream. std::unique_ptr<content::MediaStreamUI> RegisterMediaStream( content::WebContents* web_contents, const content::MediaStreamDevices& devices); // Overrides from StatusIconMenuModel::Delegate implementation. void ExecuteCommand(int command_id, int event_flags) override; // Returns true if the |web_contents| is capturing user media (e.g., webcam or // microphone input). bool IsCapturingUserMedia(content::WebContents* web_contents) const; // Returns true if the |web_contents| is capturing video (e.g., webcam). bool IsCapturingVideo(content::WebContents* web_contents) const; // Returns true if the |web_contents| is capturing audio (e.g., microphone). bool IsCapturingAudio(content::WebContents* web_contents) const; // Returns true if the |web_contents| itself is being mirrored (e.g., a source // of media for remote broadcast). bool IsBeingMirrored(content::WebContents* web_contents) const; // Called when STOP button in media capture notification is clicked. void NotifyStopped(content::WebContents* web_contents) const; private: class UIDelegate; class WebContentsDeviceUsage; friend class WebContentsDeviceUsage; friend class base::RefCountedThreadSafe<MediaStreamCaptureIndicator>; ~MediaStreamCaptureIndicator() override; // Following functions/variables are executed/accessed only on UI thread. // Called by WebContentsDeviceUsage when it's about to destroy itself, i.e. // when WebContents is being destroyed. void UnregisterWebContents(content::WebContents* web_contents); // Updates the status tray menu. Called by WebContentsDeviceUsage. void UpdateNotificationUserInterface(); // Helpers to create and destroy status tray icon. Called from // UpdateNotificationUserInterface(). void EnsureStatusTrayIconResources(); void MaybeCreateStatusTrayIcon(bool audio, bool video); void MaybeDestroyStatusTrayIcon(); // Gets the status icon image and the string to use as the tooltip. void GetStatusTrayIconInfo(bool audio, bool video, gfx::ImageSkia* image, base::string16* tool_tip); // Reference to our status icon - owned by the StatusTray. If null, // the platform doesn't support status icons. StatusIcon* status_icon_ = nullptr; // A map that contains the usage counts of the opened capture devices for each // WebContents instance. std::unordered_map<content::WebContents*, std::unique_ptr<WebContentsDeviceUsage>> usage_map_; // A vector which maps command IDs to their associated WebContents // instance. This is rebuilt each time the status tray icon context menu is // updated. typedef std::vector<content::WebContents*> CommandTargets; CommandTargets command_targets_; DISALLOW_COPY_AND_ASSIGN(MediaStreamCaptureIndicator); }; #endif // CHROME_BROWSER_MEDIA_WEBRTC_MEDIA_STREAM_CAPTURE_INDICATOR_H_
null
null
null
null
62,930
31,375
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
31,375
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
/** * (C) 1999-2003 Lars Knoll ([email protected]) * Copyright (C) 2004, 2005, 2006 Apple Computer, Inc. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public License * along with this library; see the file COPYING.LIB. If not, write to * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, * Boston, MA 02110-1301, USA. */ #include "third_party/blink/renderer/core/css/css_property_value.h" #include "third_party/blink/renderer/core/style/computed_style_constants.h" #include "third_party/blink/renderer/core/style_property_shorthand.h" namespace blink { struct SameSizeAsCSSPropertyValue { uint32_t bitfields; void* property; Member<void*> value; }; static_assert(sizeof(CSSPropertyValue) == sizeof(SameSizeAsCSSPropertyValue), "CSSPropertyValue should stay small"); CSSPropertyID CSSPropertyValueMetadata::ShorthandID() const { if (!is_set_from_shorthand_) return CSSPropertyInvalid; Vector<StylePropertyShorthand, 4> shorthands; getMatchingShorthandsForLonghand(Property().PropertyID(), &shorthands); DCHECK(shorthands.size()); DCHECK_GE(index_in_shorthands_vector_, 0u); DCHECK_LT(index_in_shorthands_vector_, shorthands.size()); return shorthands.at(index_in_shorthands_vector_).id(); } bool CSSPropertyValue::operator==(const CSSPropertyValue& other) const { return DataEquivalent(value_, other.value_) && IsImportant() == other.IsImportant(); } } // namespace blink
null
null
null
null
28,238
15,048
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
15,048
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "components/dom_distiller/core/distiller.h" #include <map> #include <memory> #include <utility> #include <vector> #include "base/auto_reset.h" #include "base/bind.h" #include "base/callback.h" #include "base/location.h" #include "base/memory/ptr_util.h" #include "base/metrics/histogram_macros.h" #include "base/single_thread_task_runner.h" #include "base/strings/string_number_conversions.h" #include "base/strings/utf_string_conversions.h" #include "base/threading/thread_task_runner_handle.h" #include "base/values.h" #include "components/dom_distiller/core/distiller_page.h" #include "components/dom_distiller/core/distiller_url_fetcher.h" #include "components/dom_distiller/core/proto/distilled_article.pb.h" #include "components/dom_distiller/core/proto/distilled_page.pb.h" #include "net/url_request/url_request_context_getter.h" namespace { // Maximum number of distilled pages in an article. const size_t kMaxPagesInArticle = 32; } namespace dom_distiller { DistillerFactoryImpl::DistillerFactoryImpl( std::unique_ptr<DistillerURLFetcherFactory> distiller_url_fetcher_factory, const dom_distiller::proto::DomDistillerOptions& dom_distiller_options) : distiller_url_fetcher_factory_(std::move(distiller_url_fetcher_factory)), dom_distiller_options_(dom_distiller_options) {} DistillerFactoryImpl::~DistillerFactoryImpl() {} std::unique_ptr<Distiller> DistillerFactoryImpl::CreateDistillerForUrl( const GURL& unused) { // This default implementation has the same behavior for all URLs. std::unique_ptr<DistillerImpl> distiller(new DistillerImpl( *distiller_url_fetcher_factory_, dom_distiller_options_)); return std::move(distiller); } DistillerImpl::DistilledPageData::DistilledPageData() {} DistillerImpl::DistilledPageData::~DistilledPageData() {} DistillerImpl::DistillerImpl( const DistillerURLFetcherFactory& distiller_url_fetcher_factory, const dom_distiller::proto::DomDistillerOptions& dom_distiller_options) : distiller_url_fetcher_factory_(distiller_url_fetcher_factory), dom_distiller_options_(dom_distiller_options), max_pages_in_article_(kMaxPagesInArticle), destruction_allowed_(true), weak_factory_(this) { } DistillerImpl::~DistillerImpl() { DCHECK(destruction_allowed_); } void DistillerImpl::SetMaxNumPagesInArticle(size_t max_num_pages) { max_pages_in_article_ = max_num_pages; } bool DistillerImpl::AreAllPagesFinished() const { return started_pages_index_.empty() && waiting_pages_.empty(); } size_t DistillerImpl::TotalPageCount() const { return waiting_pages_.size() + started_pages_index_.size() + finished_pages_index_.size(); } void DistillerImpl::AddToDistillationQueue(int page_num, const GURL& url) { if (!IsPageNumberInUse(page_num) && url.is_valid() && TotalPageCount() < max_pages_in_article_ && seen_urls_.find(url.spec()) == seen_urls_.end()) { waiting_pages_[page_num] = url; } } bool DistillerImpl::IsPageNumberInUse(int page_num) const { return waiting_pages_.find(page_num) != waiting_pages_.end() || started_pages_index_.find(page_num) != started_pages_index_.end() || finished_pages_index_.find(page_num) != finished_pages_index_.end(); } DistillerImpl::DistilledPageData* DistillerImpl::GetPageAtIndex(size_t index) const { DCHECK_LT(index, pages_.size()); DistilledPageData* page_data = pages_[index].get(); DCHECK(page_data); return page_data; } void DistillerImpl::DistillPage(const GURL& url, std::unique_ptr<DistillerPage> distiller_page, const DistillationFinishedCallback& finished_cb, const DistillationUpdateCallback& update_cb) { DCHECK(AreAllPagesFinished()); distiller_page_ = std::move(distiller_page); finished_cb_ = finished_cb; update_cb_ = update_cb; AddToDistillationQueue(0, url); DistillNextPage(); } void DistillerImpl::DistillNextPage() { if (!waiting_pages_.empty()) { std::map<int, GURL>::iterator front = waiting_pages_.begin(); int page_num = front->first; const GURL url = front->second; waiting_pages_.erase(front); DCHECK(url.is_valid()); DCHECK(started_pages_index_.find(page_num) == started_pages_index_.end()); DCHECK(finished_pages_index_.find(page_num) == finished_pages_index_.end()); seen_urls_.insert(url.spec()); pages_.push_back(std::make_unique<DistilledPageData>()); started_pages_index_[page_num] = pages_.size() - 1; distiller_page_->DistillPage( url, dom_distiller_options_, base::Bind(&DistillerImpl::OnPageDistillationFinished, weak_factory_.GetWeakPtr(), page_num, url)); } } void DistillerImpl::OnPageDistillationFinished( int page_num, const GURL& page_url, std::unique_ptr<proto::DomDistillerResult> distiller_result, bool distillation_successful) { DCHECK(started_pages_index_.find(page_num) != started_pages_index_.end()); if (!distillation_successful) { started_pages_index_.erase(page_num); RunDistillerCallbackIfDone(); return; } if (distiller_result->has_statistics_info() && page_num == 0) { if (distiller_result->statistics_info().has_word_count()) { UMA_HISTOGRAM_CUSTOM_COUNTS( "DomDistiller.Statistics.FirstPageWordCount", distiller_result->statistics_info().word_count(), 1, 4000, 50); } } DCHECK(distiller_result.get()); DistilledPageData* page_data = GetPageAtIndex(started_pages_index_[page_num]); page_data->distilled_page_proto = new base::RefCountedData<DistilledPageProto>(); page_data->page_num = page_num; if (distiller_result->has_title()) { page_data->distilled_page_proto->data.set_title( distiller_result->title()); } page_data->distilled_page_proto->data.set_url(page_url.spec()); bool content_empty = true; if (distiller_result->has_distilled_content() && distiller_result->distilled_content().has_html()) { page_data->distilled_page_proto->data.set_html( distiller_result->distilled_content().html()); if (!distiller_result->distilled_content().html().empty()) { content_empty = false; } } if (distiller_result->has_timing_info()) { const proto::TimingInfo& distiller_timing_info = distiller_result->timing_info(); DistilledPageProto::TimingInfo timing_info; if (distiller_timing_info.has_markup_parsing_time()) { timing_info.set_name("markup_parsing"); timing_info.set_time(distiller_timing_info.markup_parsing_time()); *page_data->distilled_page_proto->data.add_timing_info() = timing_info; } if (distiller_timing_info.has_document_construction_time()) { timing_info.set_name("document_construction"); timing_info.set_time( distiller_timing_info.document_construction_time()); *page_data->distilled_page_proto->data.add_timing_info() = timing_info; } if (distiller_timing_info.has_article_processing_time()) { timing_info.set_name("article_processing"); timing_info.set_time( distiller_timing_info.article_processing_time()); *page_data->distilled_page_proto->data.add_timing_info() = timing_info; } if (distiller_timing_info.has_formatting_time()) { timing_info.set_name("formatting"); timing_info.set_time( distiller_timing_info.formatting_time()); *page_data->distilled_page_proto->data.add_timing_info() = timing_info; } if (distiller_timing_info.has_total_time()) { timing_info.set_name("total"); timing_info.set_time( distiller_timing_info.total_time()); *page_data->distilled_page_proto->data.add_timing_info() = timing_info; } for (int i = 0; i < distiller_timing_info.other_times_size(); i++) { timing_info.set_name(distiller_timing_info.other_times(i).name()); timing_info.set_time(distiller_timing_info.other_times(i).time()); *page_data->distilled_page_proto->data.add_timing_info() = timing_info; } } if (distiller_result->has_debug_info() && distiller_result->debug_info().has_log()) { page_data->distilled_page_proto->data.mutable_debug_info()->set_log( distiller_result->debug_info().log()); } if (distiller_result->has_text_direction()) { page_data->distilled_page_proto->data.set_text_direction( distiller_result->text_direction()); } else { page_data->distilled_page_proto->data.set_text_direction("auto"); } if (distiller_result->has_pagination_info()) { const proto::PaginationInfo& pagination_info = distiller_result->pagination_info(); // Skip the next page if the first page is empty. if (pagination_info.has_next_page() && (page_num != 0 || !content_empty)) { GURL next_page_url(pagination_info.next_page()); if (next_page_url.is_valid()) { // The pages should be in same origin. DCHECK_EQ(next_page_url.GetOrigin(), page_url.GetOrigin()); AddToDistillationQueue(page_num + 1, next_page_url); page_data->distilled_page_proto->data.mutable_pagination_info()-> set_next_page(next_page_url.spec()); } } if (pagination_info.has_prev_page()) { GURL prev_page_url(pagination_info.prev_page()); if (prev_page_url.is_valid()) { DCHECK_EQ(prev_page_url.GetOrigin(), page_url.GetOrigin()); AddToDistillationQueue(page_num - 1, prev_page_url); page_data->distilled_page_proto->data.mutable_pagination_info()-> set_prev_page(prev_page_url.spec()); } } if (pagination_info.has_canonical_page()) { GURL canonical_page_url(pagination_info.canonical_page()); if (canonical_page_url.is_valid()) { page_data->distilled_page_proto->data.mutable_pagination_info()-> set_canonical_page(canonical_page_url.spec()); } } } for (int img_num = 0; img_num < distiller_result->content_images_size(); ++img_num) { std::string image_id = base::IntToString(page_num + 1) + "_" + base::IntToString(img_num); FetchImage(page_num, image_id, distiller_result->content_images(img_num).url()); } AddPageIfDone(page_num); DistillNextPage(); } void DistillerImpl::FetchImage(int page_num, const std::string& image_id, const std::string& image_url) { if (!GURL(image_url).is_valid()) return; DCHECK(started_pages_index_.find(page_num) != started_pages_index_.end()); DistilledPageData* page_data = GetPageAtIndex(started_pages_index_[page_num]); DistillerURLFetcher* fetcher = distiller_url_fetcher_factory_.CreateDistillerURLFetcher(); page_data->image_fetchers_.push_back(base::WrapUnique(fetcher)); fetcher->FetchURL(image_url, base::Bind(&DistillerImpl::OnFetchImageDone, weak_factory_.GetWeakPtr(), page_num, base::Unretained(fetcher), image_id, image_url)); } void DistillerImpl::OnFetchImageDone(int page_num, DistillerURLFetcher* url_fetcher, const std::string& id, const std::string& original_url, const std::string& response) { DCHECK(started_pages_index_.find(page_num) != started_pages_index_.end()); DistilledPageData* page_data = GetPageAtIndex(started_pages_index_[page_num]); DCHECK(page_data->distilled_page_proto.get()); DCHECK(url_fetcher); auto fetcher_it = std::find_if( page_data->image_fetchers_.begin(), page_data->image_fetchers_.end(), [url_fetcher](const std::unique_ptr<DistillerURLFetcher>& f) { return url_fetcher == f.get(); }); DCHECK(fetcher_it != page_data->image_fetchers_.end()); // Delete the |url_fetcher| by DeleteSoon since the OnFetchImageDone // callback is invoked by the |url_fetcher|. fetcher_it->release(); page_data->image_fetchers_.erase(fetcher_it); base::ThreadTaskRunnerHandle::Get()->DeleteSoon(FROM_HERE, url_fetcher); DistilledPageProto_Image* image = page_data->distilled_page_proto->data.add_image(); image->set_name(id); image->set_data(response); image->set_url(original_url); AddPageIfDone(page_num); } void DistillerImpl::AddPageIfDone(int page_num) { DCHECK(started_pages_index_.find(page_num) != started_pages_index_.end()); DCHECK(finished_pages_index_.find(page_num) == finished_pages_index_.end()); DistilledPageData* page_data = GetPageAtIndex(started_pages_index_[page_num]); if (page_data->image_fetchers_.empty()) { finished_pages_index_[page_num] = started_pages_index_[page_num]; started_pages_index_.erase(page_num); const ArticleDistillationUpdate& article_update = CreateDistillationUpdate(); DCHECK_EQ(article_update.GetPagesSize(), finished_pages_index_.size()); update_cb_.Run(article_update); RunDistillerCallbackIfDone(); } } const ArticleDistillationUpdate DistillerImpl::CreateDistillationUpdate() const { bool has_prev_page = false; bool has_next_page = false; if (!finished_pages_index_.empty()) { int prev_page_num = finished_pages_index_.begin()->first - 1; int next_page_num = finished_pages_index_.rbegin()->first + 1; has_prev_page = IsPageNumberInUse(prev_page_num); has_next_page = IsPageNumberInUse(next_page_num); } std::vector<scoped_refptr<ArticleDistillationUpdate::RefCountedPageProto> > update_pages; for (std::map<int, size_t>::const_iterator it = finished_pages_index_.begin(); it != finished_pages_index_.end(); ++it) { update_pages.push_back(pages_[it->second]->distilled_page_proto); } return ArticleDistillationUpdate(update_pages, has_next_page, has_prev_page); } void DistillerImpl::RunDistillerCallbackIfDone() { DCHECK(!finished_cb_.is_null()); if (AreAllPagesFinished()) { bool first_page = true; std::unique_ptr<DistilledArticleProto> article_proto( new DistilledArticleProto()); // Stitch the pages back into the article. for (std::map<int, size_t>::iterator it = finished_pages_index_.begin(); it != finished_pages_index_.end();) { DistilledPageData* page_data = GetPageAtIndex(it->second); *(article_proto->add_pages()) = page_data->distilled_page_proto->data; if (first_page) { article_proto->set_title(page_data->distilled_page_proto->data.title()); first_page = false; } finished_pages_index_.erase(it++); } pages_.clear(); DCHECK_LE(static_cast<size_t>(article_proto->pages_size()), max_pages_in_article_); DCHECK(pages_.empty()); DCHECK(finished_pages_index_.empty()); base::AutoReset<bool> dont_delete_this_in_callback(&destruction_allowed_, false); finished_cb_.Run(std::move(article_proto)); finished_cb_.Reset(); } } } // namespace dom_distiller
null
null
null
null
11,911
65,035
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
65,035
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2018 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_UI_APP_LIST_SEARCH_ARC_ARC_APP_DATA_SEARCH_RESULT_H_ #define CHROME_BROWSER_UI_APP_LIST_SEARCH_ARC_ARC_APP_DATA_SEARCH_RESULT_H_ #include <memory> #include <string> #include <vector> #include "base/memory/weak_ptr.h" #include "base/optional.h" #include "chrome/browser/ui/app_list/search/chrome_search_result.h" #include "components/arc/common/app.mojom.h" class AppListControllerDelegate; class Profile; namespace app_list { class IconDecodeRequest; class ArcAppDataSearchResult : public ChromeSearchResult { public: ArcAppDataSearchResult(arc::mojom::AppDataResultPtr data, Profile* profile, AppListControllerDelegate* list_controller); ~ArcAppDataSearchResult() override; // ChromeSearchResult: std::unique_ptr<ChromeSearchResult> Duplicate() const override; ui::MenuModel* GetContextMenuModel() override; void Open(int event_flags) override; private: const std::string& launch_intent_uri() const { return data_->launch_intent_uri; } const std::string& label() const { return data_->label; } const base::Optional<std::vector<uint8_t>>& icon_png_data() const { return data_->icon_png_data; } // Apply avatar style to |icon| and use it for SearchResult. void SetIconToAvatarIcon(const gfx::ImageSkia& icon); arc::mojom::AppDataResultPtr data_; std::unique_ptr<IconDecodeRequest> icon_decode_request_; // |profile_| is owned by ProfileInfo. Profile* const profile_; // |list_controller_| is owned by AppListServiceAsh and lives until the // service finishes. AppListControllerDelegate* const list_controller_; base::WeakPtrFactory<ArcAppDataSearchResult> weak_ptr_factory_; DISALLOW_COPY_AND_ASSIGN(ArcAppDataSearchResult); }; } // namespace app_list #endif // CHROME_BROWSER_UI_APP_LIST_SEARCH_ARC_ARC_APP_DATA_SEARCH_RESULT_H_
null
null
null
null
61,898
18,185
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
18,185
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef COMPONENTS_POLICY_CORE_COMMON_REMOTE_COMMANDS_REMOTE_COMMANDS_FACTORY_H_ #define COMPONENTS_POLICY_CORE_COMMON_REMOTE_COMMANDS_REMOTE_COMMANDS_FACTORY_H_ #include <memory> #include "base/macros.h" #include "components/policy/policy_export.h" #include "components/policy/proto/device_management_backend.pb.h" namespace policy { class RemoteCommandJob; // An interface class for creating remote commands based on command type. class POLICY_EXPORT RemoteCommandsFactory { public: virtual ~RemoteCommandsFactory(); virtual std::unique_ptr<RemoteCommandJob> BuildJobForType( enterprise_management::RemoteCommand_Type type) = 0; private: DISALLOW_ASSIGN(RemoteCommandsFactory); }; } // namespace policy #endif // COMPONENTS_POLICY_CORE_COMMON_REMOTE_COMMANDS_REMOTE_COMMANDS_FACTORY_H_
null
null
null
null
15,048
6,526
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
6,526
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chromeos/network/proxy/ui_proxy_config.h" #include "base/logging.h" #include "base/values.h" #include "components/proxy_config/proxy_config_dictionary.h" #include "net/proxy_resolution/proxy_config.h" #include "url/url_constants.h" namespace { const char kSocksScheme[] = "socks"; } namespace chromeos { UIProxyConfig::UIProxyConfig() : mode(MODE_DIRECT), state(ProxyPrefs::CONFIG_UNSET), user_modifiable(true) {} UIProxyConfig::~UIProxyConfig() = default; void UIProxyConfig::SetPacUrl(const GURL& pac_url) { mode = UIProxyConfig::MODE_PAC_SCRIPT; automatic_proxy.pac_url = pac_url; } void UIProxyConfig::SetSingleProxy(const net::ProxyServer& server) { mode = UIProxyConfig::MODE_SINGLE_PROXY; single_proxy.server = server; } void UIProxyConfig::SetProxyForScheme(const std::string& scheme, const net::ProxyServer& server) { ManualProxy* proxy = MapSchemeToProxy(scheme); if (!proxy) { NOTREACHED() << "Cannot set proxy: invalid scheme [" << scheme << "]"; return; } mode = UIProxyConfig::MODE_PROXY_PER_SCHEME; proxy->server = server; } void UIProxyConfig::SetBypassRules(const net::ProxyBypassRules& rules) { if (mode != UIProxyConfig::MODE_SINGLE_PROXY && mode != UIProxyConfig::MODE_PROXY_PER_SCHEME) { NOTREACHED() << "Cannot set bypass rules for proxy mode [" << mode << "]"; return; } bypass_rules = rules; } bool UIProxyConfig::FromNetProxyConfig(const net::ProxyConfig& net_config) { *this = UIProxyConfig(); // Reset to default. const net::ProxyConfig::ProxyRules& rules = net_config.proxy_rules(); switch (rules.type) { case net::ProxyConfig::ProxyRules::Type::EMPTY: if (!net_config.HasAutomaticSettings()) { mode = UIProxyConfig::MODE_DIRECT; } else if (net_config.auto_detect()) { mode = UIProxyConfig::MODE_AUTO_DETECT; } else if (net_config.has_pac_url()) { mode = UIProxyConfig::MODE_PAC_SCRIPT; automatic_proxy.pac_url = net_config.pac_url(); } else { return false; } return true; case net::ProxyConfig::ProxyRules::Type::PROXY_LIST: if (rules.single_proxies.IsEmpty()) return false; mode = MODE_SINGLE_PROXY; single_proxy.server = rules.single_proxies.Get(); bypass_rules = rules.bypass_rules; return true; case net::ProxyConfig::ProxyRules::Type::PROXY_LIST_PER_SCHEME: // Make sure we have valid server for at least one of the protocols. if (rules.proxies_for_http.IsEmpty() && rules.proxies_for_https.IsEmpty() && rules.proxies_for_ftp.IsEmpty() && rules.fallback_proxies.IsEmpty()) { return false; } mode = MODE_PROXY_PER_SCHEME; if (!rules.proxies_for_http.IsEmpty()) http_proxy.server = rules.proxies_for_http.Get(); if (!rules.proxies_for_https.IsEmpty()) https_proxy.server = rules.proxies_for_https.Get(); if (!rules.proxies_for_ftp.IsEmpty()) ftp_proxy.server = rules.proxies_for_ftp.Get(); if (!rules.fallback_proxies.IsEmpty()) socks_proxy.server = rules.fallback_proxies.Get(); bypass_rules = rules.bypass_rules; return true; default: NOTREACHED() << "Unrecognized proxy config mode"; break; } return false; } std::unique_ptr<base::DictionaryValue> UIProxyConfig::ToPrefProxyConfig() const { switch (mode) { case MODE_DIRECT: { return ProxyConfigDictionary::CreateDirect(); } case MODE_AUTO_DETECT: { return ProxyConfigDictionary::CreateAutoDetect(); } case MODE_PAC_SCRIPT: { return ProxyConfigDictionary::CreatePacScript( automatic_proxy.pac_url.spec(), false); } case MODE_SINGLE_PROXY: { std::string spec; if (single_proxy.server.is_valid()) spec = single_proxy.server.ToURI(); return ProxyConfigDictionary::CreateFixedServers(spec, bypass_rules.ToString()); } case MODE_PROXY_PER_SCHEME: { std::string spec; ProxyConfigDictionary::EncodeAndAppendProxyServer( url::kHttpScheme, http_proxy.server, &spec); ProxyConfigDictionary::EncodeAndAppendProxyServer( url::kHttpsScheme, https_proxy.server, &spec); ProxyConfigDictionary::EncodeAndAppendProxyServer( url::kFtpScheme, ftp_proxy.server, &spec); ProxyConfigDictionary::EncodeAndAppendProxyServer( kSocksScheme, socks_proxy.server, &spec); return ProxyConfigDictionary::CreateFixedServers(spec, bypass_rules.ToString()); } default: break; } NOTREACHED() << "Unrecognized proxy config mode for preference"; return nullptr; } UIProxyConfig::ManualProxy* UIProxyConfig::MapSchemeToProxy( const std::string& scheme) { if (scheme == url::kHttpScheme) return &http_proxy; if (scheme == url::kHttpsScheme) return &https_proxy; if (scheme == url::kFtpScheme) return &ftp_proxy; if (scheme == kSocksScheme) return &socks_proxy; NOTREACHED() << "Invalid scheme: " << scheme; return NULL; } } // namespace chromeos
null
null
null
null
3,389
18,613
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
183,608
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Linux MegaRAID driver for SAS based RAID controllers * * Copyright (c) 2003-2013 LSI Corporation * Copyright (c) 2013-2014 Avago Technologies * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * * Authors: Avago Technologies * Sreenivas Bagalkote * Sumant Patro * Bo Yang * Adam Radford * Kashyap Desai <[email protected]> * Sumit Saxena <[email protected]> * * Send feedback to: [email protected] * * Mail to: Avago Technologies, 350 West Trimble Road, Building 90, * San Jose, California 95131 */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/list.h> #include <linux/moduleparam.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/uio.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <asm/unaligned.h> #include <linux/fs.h> #include <linux/compat.h> #include <linux/blkdev.h> #include <linux/mutex.h> #include <linux/poll.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include "megaraid_sas_fusion.h" #include "megaraid_sas.h" /* * Number of sectors per IO command * Will be set in megasas_init_mfi if user does not provide */ static unsigned int max_sectors; module_param_named(max_sectors, max_sectors, int, 0); MODULE_PARM_DESC(max_sectors, "Maximum number of sectors per IO command"); static int msix_disable; module_param(msix_disable, int, S_IRUGO); MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0"); static unsigned int msix_vectors; module_param(msix_vectors, int, S_IRUGO); MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW"); static int allow_vf_ioctls; module_param(allow_vf_ioctls, int, S_IRUGO); MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0"); static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH; module_param(throttlequeuedepth, int, S_IRUGO); MODULE_PARM_DESC(throttlequeuedepth, "Adapter queue depth when throttled due to I/O timeout. Default: 16"); unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME; module_param(resetwaittime, int, S_IRUGO); MODULE_PARM_DESC(resetwaittime, "Wait time in seconds after I/O timeout " "before resetting adapter. Default: 180"); int smp_affinity_enable = 1; module_param(smp_affinity_enable, int, S_IRUGO); MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disbale Default: enable(1)"); int rdpq_enable = 1; module_param(rdpq_enable, int, S_IRUGO); MODULE_PARM_DESC(rdpq_enable, " Allocate reply queue in chunks for large queue depth enable/disable Default: disable(0)"); unsigned int dual_qdepth_disable; module_param(dual_qdepth_disable, int, S_IRUGO); MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0"); unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT; module_param(scmd_timeout, int, S_IRUGO); MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer."); MODULE_LICENSE("GPL"); MODULE_VERSION(MEGASAS_VERSION); MODULE_AUTHOR("[email protected]"); MODULE_DESCRIPTION("Avago MegaRAID SAS Driver"); int megasas_transition_to_ready(struct megasas_instance *instance, int ocr); static int megasas_get_pd_list(struct megasas_instance *instance); static int megasas_ld_list_query(struct megasas_instance *instance, u8 query_type); static int megasas_issue_init_mfi(struct megasas_instance *instance); static int megasas_register_aen(struct megasas_instance *instance, u32 seq_num, u32 class_locale_word); static void megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev); static int megasas_get_target_prop(struct megasas_instance *instance, struct scsi_device *sdev); /* * PCI ID table for all supported controllers */ static struct pci_device_id megasas_pci_table[] = { {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)}, /* xscale IOP */ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)}, /* ppc IOP */ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)}, /* ppc IOP */ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)}, /* gen2*/ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)}, /* gen2*/ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)}, /* skinny*/ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)}, /* skinny*/ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)}, /* xscale IOP, vega */ {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)}, /* xscale IOP */ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)}, /* Fusion */ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)}, /* Plasma */ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)}, /* Invader */ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)}, /* Fury */ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER)}, /* Intruder */ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER_24)}, /* Intruder 24 port*/ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_52)}, {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)}, /* VENTURA */ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA)}, {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_HARPOON)}, {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)}, {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)}, {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER_4PORT)}, {} }; MODULE_DEVICE_TABLE(pci, megasas_pci_table); static int megasas_mgmt_majorno; struct megasas_mgmt_info megasas_mgmt_info; static struct fasync_struct *megasas_async_queue; static DEFINE_MUTEX(megasas_async_queue_mutex); static int megasas_poll_wait_aen; static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait); static u32 support_poll_for_event; u32 megasas_dbg_lvl; static u32 support_device_change; /* define lock for aen poll */ spinlock_t poll_aen_lock; void megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, u8 alt_status); static u32 megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs); static int megasas_adp_reset_gen2(struct megasas_instance *instance, struct megasas_register_set __iomem *reg_set); static irqreturn_t megasas_isr(int irq, void *devp); static u32 megasas_init_adapter_mfi(struct megasas_instance *instance); u32 megasas_build_and_issue_cmd(struct megasas_instance *instance, struct scsi_cmnd *scmd); static void megasas_complete_cmd_dpc(unsigned long instance_addr); int wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, int seconds); void megasas_fusion_ocr_wq(struct work_struct *work); static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, int initial); void megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd) { instance->instancet->fire_cmd(instance, cmd->frame_phys_addr, 0, instance->reg_set); return; } /** * megasas_get_cmd - Get a command from the free pool * @instance: Adapter soft state * * Returns a free command from the pool */ struct megasas_cmd *megasas_get_cmd(struct megasas_instance *instance) { unsigned long flags; struct megasas_cmd *cmd = NULL; spin_lock_irqsave(&instance->mfi_pool_lock, flags); if (!list_empty(&instance->cmd_pool)) { cmd = list_entry((&instance->cmd_pool)->next, struct megasas_cmd, list); list_del_init(&cmd->list); } else { dev_err(&instance->pdev->dev, "Command pool empty!\n"); } spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); return cmd; } /** * megasas_return_cmd - Return a cmd to free command pool * @instance: Adapter soft state * @cmd: Command packet to be returned to free command pool */ inline void megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) { unsigned long flags; u32 blk_tags; struct megasas_cmd_fusion *cmd_fusion; struct fusion_context *fusion = instance->ctrl_context; /* This flag is used only for fusion adapter. * Wait for Interrupt for Polled mode DCMD */ if (cmd->flags & DRV_DCMD_POLLED_MODE) return; spin_lock_irqsave(&instance->mfi_pool_lock, flags); if (fusion) { blk_tags = instance->max_scsi_cmds + cmd->index; cmd_fusion = fusion->cmd_list[blk_tags]; megasas_return_cmd_fusion(instance, cmd_fusion); } cmd->scmd = NULL; cmd->frame_count = 0; cmd->flags = 0; memset(cmd->frame, 0, instance->mfi_frame_size); cmd->frame->io.context = cpu_to_le32(cmd->index); if (!fusion && reset_devices) cmd->frame->hdr.cmd = MFI_CMD_INVALID; list_add(&cmd->list, (&instance->cmd_pool)->next); spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); } static const char * format_timestamp(uint32_t timestamp) { static char buffer[32]; if ((timestamp & 0xff000000) == 0xff000000) snprintf(buffer, sizeof(buffer), "boot + %us", timestamp & 0x00ffffff); else snprintf(buffer, sizeof(buffer), "%us", timestamp); return buffer; } static const char * format_class(int8_t class) { static char buffer[6]; switch (class) { case MFI_EVT_CLASS_DEBUG: return "debug"; case MFI_EVT_CLASS_PROGRESS: return "progress"; case MFI_EVT_CLASS_INFO: return "info"; case MFI_EVT_CLASS_WARNING: return "WARN"; case MFI_EVT_CLASS_CRITICAL: return "CRIT"; case MFI_EVT_CLASS_FATAL: return "FATAL"; case MFI_EVT_CLASS_DEAD: return "DEAD"; default: snprintf(buffer, sizeof(buffer), "%d", class); return buffer; } } /** * megasas_decode_evt: Decode FW AEN event and print critical event * for information. * @instance: Adapter soft state */ static void megasas_decode_evt(struct megasas_instance *instance) { struct megasas_evt_detail *evt_detail = instance->evt_detail; union megasas_evt_class_locale class_locale; class_locale.word = le32_to_cpu(evt_detail->cl.word); if (class_locale.members.class >= MFI_EVT_CLASS_CRITICAL) dev_info(&instance->pdev->dev, "%d (%s/0x%04x/%s) - %s\n", le32_to_cpu(evt_detail->seq_num), format_timestamp(le32_to_cpu(evt_detail->time_stamp)), (class_locale.members.locale), format_class(class_locale.members.class), evt_detail->description); } /** * The following functions are defined for xscale * (deviceid : 1064R, PERC5) controllers */ /** * megasas_enable_intr_xscale - Enables interrupts * @regs: MFI register set */ static inline void megasas_enable_intr_xscale(struct megasas_instance *instance) { struct megasas_register_set __iomem *regs; regs = instance->reg_set; writel(0, &(regs)->outbound_intr_mask); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_mask); } /** * megasas_disable_intr_xscale -Disables interrupt * @regs: MFI register set */ static inline void megasas_disable_intr_xscale(struct megasas_instance *instance) { struct megasas_register_set __iomem *regs; u32 mask = 0x1f; regs = instance->reg_set; writel(mask, &regs->outbound_intr_mask); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_mask); } /** * megasas_read_fw_status_reg_xscale - returns the current FW status value * @regs: MFI register set */ static u32 megasas_read_fw_status_reg_xscale(struct megasas_register_set __iomem * regs) { return readl(&(regs)->outbound_msg_0); } /** * megasas_clear_interrupt_xscale - Check & clear interrupt * @regs: MFI register set */ static int megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs) { u32 status; u32 mfiStatus = 0; /* * Check if it is our interrupt */ status = readl(&regs->outbound_intr_status); if (status & MFI_OB_INTR_STATUS_MASK) mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT) mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; /* * Clear the interrupt by writing back the same value */ if (mfiStatus) writel(status, &regs->outbound_intr_status); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_status); return mfiStatus; } /** * megasas_fire_cmd_xscale - Sends command to the FW * @frame_phys_addr : Physical address of cmd * @frame_count : Number of frames for the command * @regs : MFI register set */ static inline void megasas_fire_cmd_xscale(struct megasas_instance *instance, dma_addr_t frame_phys_addr, u32 frame_count, struct megasas_register_set __iomem *regs) { unsigned long flags; spin_lock_irqsave(&instance->hba_lock, flags); writel((frame_phys_addr >> 3)|(frame_count), &(regs)->inbound_queue_port); spin_unlock_irqrestore(&instance->hba_lock, flags); } /** * megasas_adp_reset_xscale - For controller reset * @regs: MFI register set */ static int megasas_adp_reset_xscale(struct megasas_instance *instance, struct megasas_register_set __iomem *regs) { u32 i; u32 pcidata; writel(MFI_ADP_RESET, &regs->inbound_doorbell); for (i = 0; i < 3; i++) msleep(1000); /* sleep for 3 secs */ pcidata = 0; pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata); dev_notice(&instance->pdev->dev, "pcidata = %x\n", pcidata); if (pcidata & 0x2) { dev_notice(&instance->pdev->dev, "mfi 1068 offset read=%x\n", pcidata); pcidata &= ~0x2; pci_write_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, pcidata); for (i = 0; i < 2; i++) msleep(1000); /* need to wait 2 secs again */ pcidata = 0; pci_read_config_dword(instance->pdev, MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata); dev_notice(&instance->pdev->dev, "1068 offset handshake read=%x\n", pcidata); if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) { dev_notice(&instance->pdev->dev, "1068 offset pcidt=%x\n", pcidata); pcidata = 0; pci_write_config_dword(instance->pdev, MFI_1068_FW_HANDSHAKE_OFFSET, pcidata); } } return 0; } /** * megasas_check_reset_xscale - For controller reset check * @regs: MFI register set */ static int megasas_check_reset_xscale(struct megasas_instance *instance, struct megasas_register_set __iomem *regs) { if ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) && (le32_to_cpu(*instance->consumer) == MEGASAS_ADPRESET_INPROG_SIGN)) return 1; return 0; } static struct megasas_instance_template megasas_instance_template_xscale = { .fire_cmd = megasas_fire_cmd_xscale, .enable_intr = megasas_enable_intr_xscale, .disable_intr = megasas_disable_intr_xscale, .clear_intr = megasas_clear_intr_xscale, .read_fw_status_reg = megasas_read_fw_status_reg_xscale, .adp_reset = megasas_adp_reset_xscale, .check_reset = megasas_check_reset_xscale, .service_isr = megasas_isr, .tasklet = megasas_complete_cmd_dpc, .init_adapter = megasas_init_adapter_mfi, .build_and_issue_cmd = megasas_build_and_issue_cmd, .issue_dcmd = megasas_issue_dcmd, }; /** * This is the end of set of functions & definitions specific * to xscale (deviceid : 1064R, PERC5) controllers */ /** * The following functions are defined for ppc (deviceid : 0x60) * controllers */ /** * megasas_enable_intr_ppc - Enables interrupts * @regs: MFI register set */ static inline void megasas_enable_intr_ppc(struct megasas_instance *instance) { struct megasas_register_set __iomem *regs; regs = instance->reg_set; writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); writel(~0x80000000, &(regs)->outbound_intr_mask); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_mask); } /** * megasas_disable_intr_ppc - Disable interrupt * @regs: MFI register set */ static inline void megasas_disable_intr_ppc(struct megasas_instance *instance) { struct megasas_register_set __iomem *regs; u32 mask = 0xFFFFFFFF; regs = instance->reg_set; writel(mask, &regs->outbound_intr_mask); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_mask); } /** * megasas_read_fw_status_reg_ppc - returns the current FW status value * @regs: MFI register set */ static u32 megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs) { return readl(&(regs)->outbound_scratch_pad); } /** * megasas_clear_interrupt_ppc - Check & clear interrupt * @regs: MFI register set */ static int megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs) { u32 status, mfiStatus = 0; /* * Check if it is our interrupt */ status = readl(&regs->outbound_intr_status); if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT) mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; /* * Clear the interrupt by writing back the same value */ writel(status, &regs->outbound_doorbell_clear); /* Dummy readl to force pci flush */ readl(&regs->outbound_doorbell_clear); return mfiStatus; } /** * megasas_fire_cmd_ppc - Sends command to the FW * @frame_phys_addr : Physical address of cmd * @frame_count : Number of frames for the command * @regs : MFI register set */ static inline void megasas_fire_cmd_ppc(struct megasas_instance *instance, dma_addr_t frame_phys_addr, u32 frame_count, struct megasas_register_set __iomem *regs) { unsigned long flags; spin_lock_irqsave(&instance->hba_lock, flags); writel((frame_phys_addr | (frame_count<<1))|1, &(regs)->inbound_queue_port); spin_unlock_irqrestore(&instance->hba_lock, flags); } /** * megasas_check_reset_ppc - For controller reset check * @regs: MFI register set */ static int megasas_check_reset_ppc(struct megasas_instance *instance, struct megasas_register_set __iomem *regs) { if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) return 1; return 0; } static struct megasas_instance_template megasas_instance_template_ppc = { .fire_cmd = megasas_fire_cmd_ppc, .enable_intr = megasas_enable_intr_ppc, .disable_intr = megasas_disable_intr_ppc, .clear_intr = megasas_clear_intr_ppc, .read_fw_status_reg = megasas_read_fw_status_reg_ppc, .adp_reset = megasas_adp_reset_xscale, .check_reset = megasas_check_reset_ppc, .service_isr = megasas_isr, .tasklet = megasas_complete_cmd_dpc, .init_adapter = megasas_init_adapter_mfi, .build_and_issue_cmd = megasas_build_and_issue_cmd, .issue_dcmd = megasas_issue_dcmd, }; /** * megasas_enable_intr_skinny - Enables interrupts * @regs: MFI register set */ static inline void megasas_enable_intr_skinny(struct megasas_instance *instance) { struct megasas_register_set __iomem *regs; regs = instance->reg_set; writel(0xFFFFFFFF, &(regs)->outbound_intr_mask); writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_mask); } /** * megasas_disable_intr_skinny - Disables interrupt * @regs: MFI register set */ static inline void megasas_disable_intr_skinny(struct megasas_instance *instance) { struct megasas_register_set __iomem *regs; u32 mask = 0xFFFFFFFF; regs = instance->reg_set; writel(mask, &regs->outbound_intr_mask); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_mask); } /** * megasas_read_fw_status_reg_skinny - returns the current FW status value * @regs: MFI register set */ static u32 megasas_read_fw_status_reg_skinny(struct megasas_register_set __iomem *regs) { return readl(&(regs)->outbound_scratch_pad); } /** * megasas_clear_interrupt_skinny - Check & clear interrupt * @regs: MFI register set */ static int megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs) { u32 status; u32 mfiStatus = 0; /* * Check if it is our interrupt */ status = readl(&regs->outbound_intr_status); if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) { return 0; } /* * Check if it is our interrupt */ if ((megasas_read_fw_status_reg_skinny(regs) & MFI_STATE_MASK) == MFI_STATE_FAULT) { mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; } else mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; /* * Clear the interrupt by writing back the same value */ writel(status, &regs->outbound_intr_status); /* * dummy read to flush PCI */ readl(&regs->outbound_intr_status); return mfiStatus; } /** * megasas_fire_cmd_skinny - Sends command to the FW * @frame_phys_addr : Physical address of cmd * @frame_count : Number of frames for the command * @regs : MFI register set */ static inline void megasas_fire_cmd_skinny(struct megasas_instance *instance, dma_addr_t frame_phys_addr, u32 frame_count, struct megasas_register_set __iomem *regs) { unsigned long flags; spin_lock_irqsave(&instance->hba_lock, flags); writel(upper_32_bits(frame_phys_addr), &(regs)->inbound_high_queue_port); writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1, &(regs)->inbound_low_queue_port); mmiowb(); spin_unlock_irqrestore(&instance->hba_lock, flags); } /** * megasas_check_reset_skinny - For controller reset check * @regs: MFI register set */ static int megasas_check_reset_skinny(struct megasas_instance *instance, struct megasas_register_set __iomem *regs) { if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) return 1; return 0; } static struct megasas_instance_template megasas_instance_template_skinny = { .fire_cmd = megasas_fire_cmd_skinny, .enable_intr = megasas_enable_intr_skinny, .disable_intr = megasas_disable_intr_skinny, .clear_intr = megasas_clear_intr_skinny, .read_fw_status_reg = megasas_read_fw_status_reg_skinny, .adp_reset = megasas_adp_reset_gen2, .check_reset = megasas_check_reset_skinny, .service_isr = megasas_isr, .tasklet = megasas_complete_cmd_dpc, .init_adapter = megasas_init_adapter_mfi, .build_and_issue_cmd = megasas_build_and_issue_cmd, .issue_dcmd = megasas_issue_dcmd, }; /** * The following functions are defined for gen2 (deviceid : 0x78 0x79) * controllers */ /** * megasas_enable_intr_gen2 - Enables interrupts * @regs: MFI register set */ static inline void megasas_enable_intr_gen2(struct megasas_instance *instance) { struct megasas_register_set __iomem *regs; regs = instance->reg_set; writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); /* write ~0x00000005 (4 & 1) to the intr mask*/ writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_mask); } /** * megasas_disable_intr_gen2 - Disables interrupt * @regs: MFI register set */ static inline void megasas_disable_intr_gen2(struct megasas_instance *instance) { struct megasas_register_set __iomem *regs; u32 mask = 0xFFFFFFFF; regs = instance->reg_set; writel(mask, &regs->outbound_intr_mask); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_mask); } /** * megasas_read_fw_status_reg_gen2 - returns the current FW status value * @regs: MFI register set */ static u32 megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs) { return readl(&(regs)->outbound_scratch_pad); } /** * megasas_clear_interrupt_gen2 - Check & clear interrupt * @regs: MFI register set */ static int megasas_clear_intr_gen2(struct megasas_register_set __iomem *regs) { u32 status; u32 mfiStatus = 0; /* * Check if it is our interrupt */ status = readl(&regs->outbound_intr_status); if (status & MFI_INTR_FLAG_REPLY_MESSAGE) { mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; } if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) { mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; } /* * Clear the interrupt by writing back the same value */ if (mfiStatus) writel(status, &regs->outbound_doorbell_clear); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_status); return mfiStatus; } /** * megasas_fire_cmd_gen2 - Sends command to the FW * @frame_phys_addr : Physical address of cmd * @frame_count : Number of frames for the command * @regs : MFI register set */ static inline void megasas_fire_cmd_gen2(struct megasas_instance *instance, dma_addr_t frame_phys_addr, u32 frame_count, struct megasas_register_set __iomem *regs) { unsigned long flags; spin_lock_irqsave(&instance->hba_lock, flags); writel((frame_phys_addr | (frame_count<<1))|1, &(regs)->inbound_queue_port); spin_unlock_irqrestore(&instance->hba_lock, flags); } /** * megasas_adp_reset_gen2 - For controller reset * @regs: MFI register set */ static int megasas_adp_reset_gen2(struct megasas_instance *instance, struct megasas_register_set __iomem *reg_set) { u32 retry = 0 ; u32 HostDiag; u32 __iomem *seq_offset = &reg_set->seq_offset; u32 __iomem *hostdiag_offset = &reg_set->host_diag; if (instance->instancet == &megasas_instance_template_skinny) { seq_offset = &reg_set->fusion_seq_offset; hostdiag_offset = &reg_set->fusion_host_diag; } writel(0, seq_offset); writel(4, seq_offset); writel(0xb, seq_offset); writel(2, seq_offset); writel(7, seq_offset); writel(0xd, seq_offset); msleep(1000); HostDiag = (u32)readl(hostdiag_offset); while (!(HostDiag & DIAG_WRITE_ENABLE)) { msleep(100); HostDiag = (u32)readl(hostdiag_offset); dev_notice(&instance->pdev->dev, "RESETGEN2: retry=%x, hostdiag=%x\n", retry, HostDiag); if (retry++ >= 100) return 1; } dev_notice(&instance->pdev->dev, "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag); writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset); ssleep(10); HostDiag = (u32)readl(hostdiag_offset); while (HostDiag & DIAG_RESET_ADAPTER) { msleep(100); HostDiag = (u32)readl(hostdiag_offset); dev_notice(&instance->pdev->dev, "RESET_GEN2: retry=%x, hostdiag=%x\n", retry, HostDiag); if (retry++ >= 1000) return 1; } return 0; } /** * megasas_check_reset_gen2 - For controller reset check * @regs: MFI register set */ static int megasas_check_reset_gen2(struct megasas_instance *instance, struct megasas_register_set __iomem *regs) { if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) return 1; return 0; } static struct megasas_instance_template megasas_instance_template_gen2 = { .fire_cmd = megasas_fire_cmd_gen2, .enable_intr = megasas_enable_intr_gen2, .disable_intr = megasas_disable_intr_gen2, .clear_intr = megasas_clear_intr_gen2, .read_fw_status_reg = megasas_read_fw_status_reg_gen2, .adp_reset = megasas_adp_reset_gen2, .check_reset = megasas_check_reset_gen2, .service_isr = megasas_isr, .tasklet = megasas_complete_cmd_dpc, .init_adapter = megasas_init_adapter_mfi, .build_and_issue_cmd = megasas_build_and_issue_cmd, .issue_dcmd = megasas_issue_dcmd, }; /** * This is the end of set of functions & definitions * specific to gen2 (deviceid : 0x78, 0x79) controllers */ /* * Template added for TB (Fusion) */ extern struct megasas_instance_template megasas_instance_template_fusion; /** * megasas_issue_polled - Issues a polling command * @instance: Adapter soft state * @cmd: Command packet to be issued * * For polling, MFI requires the cmd_status to be set to MFI_STAT_INVALID_STATUS before posting. */ int megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd) { struct megasas_header *frame_hdr = &cmd->frame->hdr; frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS; frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); return DCMD_NOT_FIRED; } instance->instancet->issue_dcmd(instance, cmd); return wait_and_poll(instance, cmd, instance->requestorId ? MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS); } /** * megasas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds * @instance: Adapter soft state * @cmd: Command to be issued * @timeout: Timeout in seconds * * This function waits on an event for the command to be returned from ISR. * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs * Used to issue ioctl commands. */ int megasas_issue_blocked_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, int timeout) { int ret = 0; cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); return DCMD_NOT_FIRED; } instance->instancet->issue_dcmd(instance, cmd); if (timeout) { ret = wait_event_timeout(instance->int_cmd_wait_q, cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ); if (!ret) { dev_err(&instance->pdev->dev, "Failed from %s %d DCMD Timed out\n", __func__, __LINE__); return DCMD_TIMEOUT; } } else wait_event(instance->int_cmd_wait_q, cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS); return (cmd->cmd_status_drv == MFI_STAT_OK) ? DCMD_SUCCESS : DCMD_FAILED; } /** * megasas_issue_blocked_abort_cmd - Aborts previously issued cmd * @instance: Adapter soft state * @cmd_to_abort: Previously issued cmd to be aborted * @timeout: Timeout in seconds * * MFI firmware can abort previously issued AEN comamnd (automatic event * notification). The megasas_issue_blocked_abort_cmd() issues such abort * cmd and waits for return status. * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs */ static int megasas_issue_blocked_abort_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd_to_abort, int timeout) { struct megasas_cmd *cmd; struct megasas_abort_frame *abort_fr; int ret = 0; cmd = megasas_get_cmd(instance); if (!cmd) return -1; abort_fr = &cmd->frame->abort; /* * Prepare and issue the abort frame */ abort_fr->cmd = MFI_CMD_ABORT; abort_fr->cmd_status = MFI_STAT_INVALID_STATUS; abort_fr->flags = cpu_to_le16(0); abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index); abort_fr->abort_mfi_phys_addr_lo = cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr)); abort_fr->abort_mfi_phys_addr_hi = cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr)); cmd->sync_cmd = 1; cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); return DCMD_NOT_FIRED; } instance->instancet->issue_dcmd(instance, cmd); if (timeout) { ret = wait_event_timeout(instance->abort_cmd_wait_q, cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ); if (!ret) { dev_err(&instance->pdev->dev, "Failed from %s %d Abort Timed out\n", __func__, __LINE__); return DCMD_TIMEOUT; } } else wait_event(instance->abort_cmd_wait_q, cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS); cmd->sync_cmd = 0; megasas_return_cmd(instance, cmd); return (cmd->cmd_status_drv == MFI_STAT_OK) ? DCMD_SUCCESS : DCMD_FAILED; } /** * megasas_make_sgl32 - Prepares 32-bit SGL * @instance: Adapter soft state * @scp: SCSI command from the mid-layer * @mfi_sgl: SGL to be filled in * * If successful, this function returns the number of SG elements. Otherwise, * it returnes -1. */ static int megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl) { int i; int sge_count; struct scatterlist *os_sgl; sge_count = scsi_dma_map(scp); BUG_ON(sge_count < 0); if (sge_count) { scsi_for_each_sg(scp, os_sgl, sge_count, i) { mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl)); mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl)); } } return sge_count; } /** * megasas_make_sgl64 - Prepares 64-bit SGL * @instance: Adapter soft state * @scp: SCSI command from the mid-layer * @mfi_sgl: SGL to be filled in * * If successful, this function returns the number of SG elements. Otherwise, * it returnes -1. */ static int megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl) { int i; int sge_count; struct scatterlist *os_sgl; sge_count = scsi_dma_map(scp); BUG_ON(sge_count < 0); if (sge_count) { scsi_for_each_sg(scp, os_sgl, sge_count, i) { mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl)); mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl)); } } return sge_count; } /** * megasas_make_sgl_skinny - Prepares IEEE SGL * @instance: Adapter soft state * @scp: SCSI command from the mid-layer * @mfi_sgl: SGL to be filled in * * If successful, this function returns the number of SG elements. Otherwise, * it returnes -1. */ static int megasas_make_sgl_skinny(struct megasas_instance *instance, struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl) { int i; int sge_count; struct scatterlist *os_sgl; sge_count = scsi_dma_map(scp); if (sge_count) { scsi_for_each_sg(scp, os_sgl, sge_count, i) { mfi_sgl->sge_skinny[i].length = cpu_to_le32(sg_dma_len(os_sgl)); mfi_sgl->sge_skinny[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl)); mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0); } } return sge_count; } /** * megasas_get_frame_count - Computes the number of frames * @frame_type : type of frame- io or pthru frame * @sge_count : number of sg elements * * Returns the number of frames required for numnber of sge's (sge_count) */ static u32 megasas_get_frame_count(struct megasas_instance *instance, u8 sge_count, u8 frame_type) { int num_cnt; int sge_bytes; u32 sge_sz; u32 frame_count = 0; sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) : sizeof(struct megasas_sge32); if (instance->flag_ieee) { sge_sz = sizeof(struct megasas_sge_skinny); } /* * Main frame can contain 2 SGEs for 64-bit SGLs and * 3 SGEs for 32-bit SGLs for ldio & * 1 SGEs for 64-bit SGLs and * 2 SGEs for 32-bit SGLs for pthru frame */ if (unlikely(frame_type == PTHRU_FRAME)) { if (instance->flag_ieee == 1) { num_cnt = sge_count - 1; } else if (IS_DMA64) num_cnt = sge_count - 1; else num_cnt = sge_count - 2; } else { if (instance->flag_ieee == 1) { num_cnt = sge_count - 1; } else if (IS_DMA64) num_cnt = sge_count - 2; else num_cnt = sge_count - 3; } if (num_cnt > 0) { sge_bytes = sge_sz * num_cnt; frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) + ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ; } /* Main frame */ frame_count += 1; if (frame_count > 7) frame_count = 8; return frame_count; } /** * megasas_build_dcdb - Prepares a direct cdb (DCDB) command * @instance: Adapter soft state * @scp: SCSI command * @cmd: Command to be prepared in * * This function prepares CDB commands. These are typcially pass-through * commands to the devices. */ static int megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, struct megasas_cmd *cmd) { u32 is_logical; u32 device_id; u16 flags = 0; struct megasas_pthru_frame *pthru; is_logical = MEGASAS_IS_LOGICAL(scp->device); device_id = MEGASAS_DEV_INDEX(scp); pthru = (struct megasas_pthru_frame *)cmd->frame; if (scp->sc_data_direction == PCI_DMA_TODEVICE) flags = MFI_FRAME_DIR_WRITE; else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) flags = MFI_FRAME_DIR_READ; else if (scp->sc_data_direction == PCI_DMA_NONE) flags = MFI_FRAME_DIR_NONE; if (instance->flag_ieee == 1) { flags |= MFI_FRAME_IEEE; } /* * Prepare the DCDB frame */ pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO; pthru->cmd_status = 0x0; pthru->scsi_status = 0x0; pthru->target_id = device_id; pthru->lun = scp->device->lun; pthru->cdb_len = scp->cmd_len; pthru->timeout = 0; pthru->pad_0 = 0; pthru->flags = cpu_to_le16(flags); pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp)); memcpy(pthru->cdb, scp->cmnd, scp->cmd_len); /* * If the command is for the tape device, set the * pthru timeout to the os layer timeout value. */ if (scp->device->type == TYPE_TAPE) { if ((scp->request->timeout / HZ) > 0xFFFF) pthru->timeout = cpu_to_le16(0xFFFF); else pthru->timeout = cpu_to_le16(scp->request->timeout / HZ); } /* * Construct SGL */ if (instance->flag_ieee == 1) { pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64); pthru->sge_count = megasas_make_sgl_skinny(instance, scp, &pthru->sgl); } else if (IS_DMA64) { pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64); pthru->sge_count = megasas_make_sgl64(instance, scp, &pthru->sgl); } else pthru->sge_count = megasas_make_sgl32(instance, scp, &pthru->sgl); if (pthru->sge_count > instance->max_num_sge) { dev_err(&instance->pdev->dev, "DCDB too many SGE NUM=%x\n", pthru->sge_count); return 0; } /* * Sense info specific */ pthru->sense_len = SCSI_SENSE_BUFFERSIZE; pthru->sense_buf_phys_addr_hi = cpu_to_le32(upper_32_bits(cmd->sense_phys_addr)); pthru->sense_buf_phys_addr_lo = cpu_to_le32(lower_32_bits(cmd->sense_phys_addr)); /* * Compute the total number of frames this command consumes. FW uses * this number to pull sufficient number of frames from host memory. */ cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count, PTHRU_FRAME); return cmd->frame_count; } /** * megasas_build_ldio - Prepares IOs to logical devices * @instance: Adapter soft state * @scp: SCSI command * @cmd: Command to be prepared * * Frames (and accompanying SGLs) for regular SCSI IOs use this function. */ static int megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp, struct megasas_cmd *cmd) { u32 device_id; u8 sc = scp->cmnd[0]; u16 flags = 0; struct megasas_io_frame *ldio; device_id = MEGASAS_DEV_INDEX(scp); ldio = (struct megasas_io_frame *)cmd->frame; if (scp->sc_data_direction == PCI_DMA_TODEVICE) flags = MFI_FRAME_DIR_WRITE; else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) flags = MFI_FRAME_DIR_READ; if (instance->flag_ieee == 1) { flags |= MFI_FRAME_IEEE; } /* * Prepare the Logical IO frame: 2nd bit is zero for all read cmds */ ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ; ldio->cmd_status = 0x0; ldio->scsi_status = 0x0; ldio->target_id = device_id; ldio->timeout = 0; ldio->reserved_0 = 0; ldio->pad_0 = 0; ldio->flags = cpu_to_le16(flags); ldio->start_lba_hi = 0; ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0; /* * 6-byte READ(0x08) or WRITE(0x0A) cdb */ if (scp->cmd_len == 6) { ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]); ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) | ((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3]); ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF); } /* * 10-byte READ(0x28) or WRITE(0x2A) cdb */ else if (scp->cmd_len == 10) { ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] | ((u32) scp->cmnd[7] << 8)); ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) | ((u32) scp->cmnd[3] << 16) | ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]); } /* * 12-byte READ(0xA8) or WRITE(0xAA) cdb */ else if (scp->cmd_len == 12) { ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) | ((u32) scp->cmnd[7] << 16) | ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]); ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) | ((u32) scp->cmnd[3] << 16) | ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]); } /* * 16-byte READ(0x88) or WRITE(0x8A) cdb */ else if (scp->cmd_len == 16) { ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) | ((u32) scp->cmnd[11] << 16) | ((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13]); ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) | ((u32) scp->cmnd[7] << 16) | ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]); ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) | ((u32) scp->cmnd[3] << 16) | ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]); } /* * Construct SGL */ if (instance->flag_ieee) { ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64); ldio->sge_count = megasas_make_sgl_skinny(instance, scp, &ldio->sgl); } else if (IS_DMA64) { ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64); ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl); } else ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl); if (ldio->sge_count > instance->max_num_sge) { dev_err(&instance->pdev->dev, "build_ld_io: sge_count = %x\n", ldio->sge_count); return 0; } /* * Sense info specific */ ldio->sense_len = SCSI_SENSE_BUFFERSIZE; ldio->sense_buf_phys_addr_hi = 0; ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr); /* * Compute the total number of frames this command consumes. FW uses * this number to pull sufficient number of frames from host memory. */ cmd->frame_count = megasas_get_frame_count(instance, ldio->sge_count, IO_FRAME); return cmd->frame_count; } /** * megasas_cmd_type - Checks if the cmd is for logical drive/sysPD * and whether it's RW or non RW * @scmd: SCSI command * */ inline int megasas_cmd_type(struct scsi_cmnd *cmd) { int ret; switch (cmd->cmnd[0]) { case READ_10: case WRITE_10: case READ_12: case WRITE_12: case READ_6: case WRITE_6: case READ_16: case WRITE_16: ret = (MEGASAS_IS_LOGICAL(cmd->device)) ? READ_WRITE_LDIO : READ_WRITE_SYSPDIO; break; default: ret = (MEGASAS_IS_LOGICAL(cmd->device)) ? NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO; } return ret; } /** * megasas_dump_pending_frames - Dumps the frame address of all pending cmds * in FW * @instance: Adapter soft state */ static inline void megasas_dump_pending_frames(struct megasas_instance *instance) { struct megasas_cmd *cmd; int i,n; union megasas_sgl *mfi_sgl; struct megasas_io_frame *ldio; struct megasas_pthru_frame *pthru; u32 sgcount; u16 max_cmd = instance->max_fw_cmds; dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no); dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding)); if (IS_DMA64) dev_err(&instance->pdev->dev, "[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no); else dev_err(&instance->pdev->dev, "[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no); dev_err(&instance->pdev->dev, "[%d]: Pending OS cmds in FW : \n",instance->host->host_no); for (i = 0; i < max_cmd; i++) { cmd = instance->cmd_list[i]; if (!cmd->scmd) continue; dev_err(&instance->pdev->dev, "[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr); if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) { ldio = (struct megasas_io_frame *)cmd->frame; mfi_sgl = &ldio->sgl; sgcount = ldio->sge_count; dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x," " lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n", instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id, le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi), le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount); } else { pthru = (struct megasas_pthru_frame *) cmd->frame; mfi_sgl = &pthru->sgl; sgcount = pthru->sge_count; dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, " "lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n", instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id, pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len), le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount); } if (megasas_dbg_lvl & MEGASAS_DBG_LVL) { for (n = 0; n < sgcount; n++) { if (IS_DMA64) dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%llx\n", le32_to_cpu(mfi_sgl->sge64[n].length), le64_to_cpu(mfi_sgl->sge64[n].phys_addr)); else dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%x\n", le32_to_cpu(mfi_sgl->sge32[n].length), le32_to_cpu(mfi_sgl->sge32[n].phys_addr)); } } } /*for max_cmd*/ dev_err(&instance->pdev->dev, "[%d]: Pending Internal cmds in FW : \n",instance->host->host_no); for (i = 0; i < max_cmd; i++) { cmd = instance->cmd_list[i]; if (cmd->sync_cmd == 1) dev_err(&instance->pdev->dev, "0x%08lx : ", (unsigned long)cmd->frame_phys_addr); } dev_err(&instance->pdev->dev, "[%d]: Dumping Done\n\n",instance->host->host_no); } u32 megasas_build_and_issue_cmd(struct megasas_instance *instance, struct scsi_cmnd *scmd) { struct megasas_cmd *cmd; u32 frame_count; cmd = megasas_get_cmd(instance); if (!cmd) return SCSI_MLQUEUE_HOST_BUSY; /* * Logical drive command */ if (megasas_cmd_type(scmd) == READ_WRITE_LDIO) frame_count = megasas_build_ldio(instance, scmd, cmd); else frame_count = megasas_build_dcdb(instance, scmd, cmd); if (!frame_count) goto out_return_cmd; cmd->scmd = scmd; scmd->SCp.ptr = (char *)cmd; /* * Issue the command to the FW */ atomic_inc(&instance->fw_outstanding); instance->instancet->fire_cmd(instance, cmd->frame_phys_addr, cmd->frame_count-1, instance->reg_set); return 0; out_return_cmd: megasas_return_cmd(instance, cmd); return SCSI_MLQUEUE_HOST_BUSY; } /** * megasas_queue_command - Queue entry point * @scmd: SCSI command to be queued * @done: Callback entry point */ static int megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd) { struct megasas_instance *instance; struct MR_PRIV_DEVICE *mr_device_priv_data; instance = (struct megasas_instance *) scmd->device->host->hostdata; if (instance->unload == 1) { scmd->result = DID_NO_CONNECT << 16; scmd->scsi_done(scmd); return 0; } if (instance->issuepend_done == 0) return SCSI_MLQUEUE_HOST_BUSY; /* Check for an mpio path and adjust behavior */ if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) { if (megasas_check_mpio_paths(instance, scmd) == (DID_REQUEUE << 16)) { return SCSI_MLQUEUE_HOST_BUSY; } else { scmd->result = DID_NO_CONNECT << 16; scmd->scsi_done(scmd); return 0; } } if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { scmd->result = DID_NO_CONNECT << 16; scmd->scsi_done(scmd); return 0; } mr_device_priv_data = scmd->device->hostdata; if (!mr_device_priv_data) { scmd->result = DID_NO_CONNECT << 16; scmd->scsi_done(scmd); return 0; } if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) return SCSI_MLQUEUE_HOST_BUSY; if (mr_device_priv_data->tm_busy) return SCSI_MLQUEUE_DEVICE_BUSY; scmd->result = 0; if (MEGASAS_IS_LOGICAL(scmd->device) && (scmd->device->id >= instance->fw_supported_vd_count || scmd->device->lun)) { scmd->result = DID_BAD_TARGET << 16; goto out_done; } if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && MEGASAS_IS_LOGICAL(scmd->device) && (!instance->fw_sync_cache_support)) { scmd->result = DID_OK << 16; goto out_done; } return instance->instancet->build_and_issue_cmd(instance, scmd); out_done: scmd->scsi_done(scmd); return 0; } static struct megasas_instance *megasas_lookup_instance(u16 host_no) { int i; for (i = 0; i < megasas_mgmt_info.max_index; i++) { if ((megasas_mgmt_info.instance[i]) && (megasas_mgmt_info.instance[i]->host->host_no == host_no)) return megasas_mgmt_info.instance[i]; } return NULL; } /* * megasas_set_dynamic_target_properties - * Device property set by driver may not be static and it is required to be * updated after OCR * * set tm_capable. * set dma alignment (only for eedp protection enable vd). * * @sdev: OS provided scsi device * * Returns void */ void megasas_set_dynamic_target_properties(struct scsi_device *sdev) { u16 pd_index = 0, ld; u32 device_id; struct megasas_instance *instance; struct fusion_context *fusion; struct MR_PRIV_DEVICE *mr_device_priv_data; struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; struct MR_LD_RAID *raid; struct MR_DRV_RAID_MAP_ALL *local_map_ptr; instance = megasas_lookup_instance(sdev->host->host_no); fusion = instance->ctrl_context; mr_device_priv_data = sdev->hostdata; if (!fusion || !mr_device_priv_data) return; if (MEGASAS_IS_LOGICAL(sdev)) { device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; ld = MR_TargetIdToLdGet(device_id, local_map_ptr); if (ld >= instance->fw_supported_vd_count) return; raid = MR_LdRaidGet(ld, local_map_ptr); if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) blk_queue_update_dma_alignment(sdev->request_queue, 0x7); mr_device_priv_data->is_tm_capable = raid->capability.tmCapable; } else if (instance->use_seqnum_jbod_fp) { pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; pd_sync = (void *)fusion->pd_seq_sync [(instance->pd_seq_map_id - 1) & 1]; mr_device_priv_data->is_tm_capable = pd_sync->seq[pd_index].capability.tmCapable; } } /* * megasas_set_nvme_device_properties - * set nomerges=2 * set virtual page boundary = 4K (current mr_nvme_pg_size is 4K). * set maximum io transfer = MDTS of NVME device provided by MR firmware. * * MR firmware provides value in KB. Caller of this function converts * kb into bytes. * * e.a MDTS=5 means 2^5 * nvme page size. (In case of 4K page size, * MR firmware provides value 128 as (32 * 4K) = 128K. * * @sdev: scsi device * @max_io_size: maximum io transfer size * */ static inline void megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size) { struct megasas_instance *instance; u32 mr_nvme_pg_size; instance = (struct megasas_instance *)sdev->host->hostdata; mr_nvme_pg_size = max_t(u32, instance->nvme_page_size, MR_DEFAULT_NVME_PAGE_SIZE); blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512)); queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, sdev->request_queue); blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1); } /* * megasas_set_static_target_properties - * Device property set by driver are static and it is not required to be * updated after OCR. * * set io timeout * set device queue depth * set nvme device properties. see - megasas_set_nvme_device_properties * * @sdev: scsi device * @is_target_prop true, if fw provided target properties. */ static void megasas_set_static_target_properties(struct scsi_device *sdev, bool is_target_prop) { u16 target_index = 0; u8 interface_type; u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN; u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB; u32 tgt_device_qd; struct megasas_instance *instance; struct MR_PRIV_DEVICE *mr_device_priv_data; instance = megasas_lookup_instance(sdev->host->host_no); mr_device_priv_data = sdev->hostdata; interface_type = mr_device_priv_data->interface_type; /* * The RAID firmware may require extended timeouts. */ blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ); target_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; switch (interface_type) { case SAS_PD: device_qd = MEGASAS_SAS_QD; break; case SATA_PD: device_qd = MEGASAS_SATA_QD; break; case NVME_PD: device_qd = MEGASAS_NVME_QD; break; } if (is_target_prop) { tgt_device_qd = le32_to_cpu(instance->tgt_prop->device_qdepth); if (tgt_device_qd && (tgt_device_qd <= instance->host->can_queue)) device_qd = tgt_device_qd; /* max_io_size_kb will be set to non zero for * nvme based vd and syspd. */ max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb); } if (instance->nvme_page_size && max_io_size_kb) megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10)); scsi_change_queue_depth(sdev, device_qd); } static int megasas_slave_configure(struct scsi_device *sdev) { u16 pd_index = 0; struct megasas_instance *instance; int ret_target_prop = DCMD_FAILED; bool is_target_prop = false; instance = megasas_lookup_instance(sdev->host->host_no); if (instance->pd_list_not_supported) { if (!MEGASAS_IS_LOGICAL(sdev) && sdev->type == TYPE_DISK) { pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; if (instance->pd_list[pd_index].driveState != MR_PD_STATE_SYSTEM) return -ENXIO; } } mutex_lock(&instance->hba_mutex); /* Send DCMD to Firmware and cache the information */ if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev)) megasas_get_pd_info(instance, sdev); /* Some ventura firmware may not have instance->nvme_page_size set. * Do not send MR_DCMD_DRV_GET_TARGET_PROP */ if ((instance->tgt_prop) && (instance->nvme_page_size)) ret_target_prop = megasas_get_target_prop(instance, sdev); is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false; megasas_set_static_target_properties(sdev, is_target_prop); mutex_unlock(&instance->hba_mutex); /* This sdev property may change post OCR */ megasas_set_dynamic_target_properties(sdev); return 0; } static int megasas_slave_alloc(struct scsi_device *sdev) { u16 pd_index = 0; struct megasas_instance *instance ; struct MR_PRIV_DEVICE *mr_device_priv_data; instance = megasas_lookup_instance(sdev->host->host_no); if (!MEGASAS_IS_LOGICAL(sdev)) { /* * Open the OS scan to the SYSTEM PD */ pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; if ((instance->pd_list_not_supported || instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM)) { goto scan_target; } return -ENXIO; } scan_target: mr_device_priv_data = kzalloc(sizeof(*mr_device_priv_data), GFP_KERNEL); if (!mr_device_priv_data) return -ENOMEM; sdev->hostdata = mr_device_priv_data; atomic_set(&mr_device_priv_data->r1_ldio_hint, instance->r1_ldio_hint_default); return 0; } static void megasas_slave_destroy(struct scsi_device *sdev) { kfree(sdev->hostdata); sdev->hostdata = NULL; } /* * megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a * kill adapter * @instance: Adapter soft state * */ static void megasas_complete_outstanding_ioctls(struct megasas_instance *instance) { int i; struct megasas_cmd *cmd_mfi; struct megasas_cmd_fusion *cmd_fusion; struct fusion_context *fusion = instance->ctrl_context; /* Find all outstanding ioctls */ if (fusion) { for (i = 0; i < instance->max_fw_cmds; i++) { cmd_fusion = fusion->cmd_list[i]; if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) { cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) megasas_complete_cmd(instance, cmd_mfi, DID_OK); } } } else { for (i = 0; i < instance->max_fw_cmds; i++) { cmd_mfi = instance->cmd_list[i]; if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) megasas_complete_cmd(instance, cmd_mfi, DID_OK); } } } void megaraid_sas_kill_hba(struct megasas_instance *instance) { /* Set critical error to block I/O & ioctls in case caller didn't */ atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR); /* Wait 1 second to ensure IO or ioctls in build have posted */ msleep(1000); if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || (instance->ctrl_context)) { writel(MFI_STOP_ADP, &instance->reg_set->doorbell); /* Flush */ readl(&instance->reg_set->doorbell); if (instance->requestorId && instance->peerIsPresent) memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); } else { writel(MFI_STOP_ADP, &instance->reg_set->inbound_doorbell); } /* Complete outstanding ioctls when adapter is killed */ megasas_complete_outstanding_ioctls(instance); } /** * megasas_check_and_restore_queue_depth - Check if queue depth needs to be * restored to max value * @instance: Adapter soft state * */ void megasas_check_and_restore_queue_depth(struct megasas_instance *instance) { unsigned long flags; if (instance->flag & MEGASAS_FW_BUSY && time_after(jiffies, instance->last_time + 5 * HZ) && atomic_read(&instance->fw_outstanding) < instance->throttlequeuedepth + 1) { spin_lock_irqsave(instance->host->host_lock, flags); instance->flag &= ~MEGASAS_FW_BUSY; instance->host->can_queue = instance->cur_can_queue; spin_unlock_irqrestore(instance->host->host_lock, flags); } } /** * megasas_complete_cmd_dpc - Returns FW's controller structure * @instance_addr: Address of adapter soft state * * Tasklet to complete cmds */ static void megasas_complete_cmd_dpc(unsigned long instance_addr) { u32 producer; u32 consumer; u32 context; struct megasas_cmd *cmd; struct megasas_instance *instance = (struct megasas_instance *)instance_addr; unsigned long flags; /* If we have already declared adapter dead, donot complete cmds */ if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) return; spin_lock_irqsave(&instance->completion_lock, flags); producer = le32_to_cpu(*instance->producer); consumer = le32_to_cpu(*instance->consumer); while (consumer != producer) { context = le32_to_cpu(instance->reply_queue[consumer]); if (context >= instance->max_fw_cmds) { dev_err(&instance->pdev->dev, "Unexpected context value %x\n", context); BUG(); } cmd = instance->cmd_list[context]; megasas_complete_cmd(instance, cmd, DID_OK); consumer++; if (consumer == (instance->max_fw_cmds + 1)) { consumer = 0; } } *instance->consumer = cpu_to_le32(producer); spin_unlock_irqrestore(&instance->completion_lock, flags); /* * Check if we can restore can_queue */ megasas_check_and_restore_queue_depth(instance); } /** * megasas_start_timer - Initializes a timer object * @instance: Adapter soft state * @timer: timer object to be initialized * @fn: timer function * @interval: time interval between timer function call * */ void megasas_start_timer(struct megasas_instance *instance, struct timer_list *timer, void *fn, unsigned long interval) { init_timer(timer); timer->expires = jiffies + interval; timer->data = (unsigned long)instance; timer->function = fn; add_timer(timer); } static void megasas_internal_reset_defer_cmds(struct megasas_instance *instance); static void process_fw_state_change_wq(struct work_struct *work); void megasas_do_ocr(struct megasas_instance *instance) { if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) { *instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN); } instance->instancet->disable_intr(instance); atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); instance->issuepend_done = 0; atomic_set(&instance->fw_outstanding, 0); megasas_internal_reset_defer_cmds(instance); process_fw_state_change_wq(&instance->work_init); } static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance, int initial) { struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL; dma_addr_t new_affiliation_111_h; int ld, retval = 0; u8 thisVf; cmd = megasas_get_cmd(instance); if (!cmd) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation_111:" "Failed to get cmd for scsi%d\n", instance->host->host_no); return -ENOMEM; } dcmd = &cmd->frame->dcmd; if (!instance->vf_affiliation_111) { dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF " "affiliation for scsi%d\n", instance->host->host_no); megasas_return_cmd(instance, cmd); return -ENOMEM; } if (initial) memset(instance->vf_affiliation_111, 0, sizeof(struct MR_LD_VF_AFFILIATION_111)); else { new_affiliation_111 = pci_alloc_consistent(instance->pdev, sizeof(struct MR_LD_VF_AFFILIATION_111), &new_affiliation_111_h); if (!new_affiliation_111) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " "memory for new affiliation for scsi%d\n", instance->host->host_no); megasas_return_cmd(instance, cmd); return -ENOMEM; } memset(new_affiliation_111, 0, sizeof(struct MR_LD_VF_AFFILIATION_111)); } memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = MFI_STAT_INVALID_STATUS; dcmd->sge_count = 1; dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_VF_AFFILIATION_111)); dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111); if (initial) dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->vf_affiliation_111_h); else dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(new_affiliation_111_h); dcmd->sgl.sge32[0].length = cpu_to_le32( sizeof(struct MR_LD_VF_AFFILIATION_111)); dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for " "scsi%d\n", instance->host->host_no); if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) { dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD" " failed with status 0x%x for scsi%d\n", dcmd->cmd_status, instance->host->host_no); retval = 1; /* Do a scan if we couldn't get affiliation */ goto out; } if (!initial) { thisVf = new_affiliation_111->thisVf; for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++) if (instance->vf_affiliation_111->map[ld].policy[thisVf] != new_affiliation_111->map[ld].policy[thisVf]) { dev_warn(&instance->pdev->dev, "SR-IOV: " "Got new LD/VF affiliation for scsi%d\n", instance->host->host_no); memcpy(instance->vf_affiliation_111, new_affiliation_111, sizeof(struct MR_LD_VF_AFFILIATION_111)); retval = 1; goto out; } } out: if (new_affiliation_111) { pci_free_consistent(instance->pdev, sizeof(struct MR_LD_VF_AFFILIATION_111), new_affiliation_111, new_affiliation_111_h); } megasas_return_cmd(instance, cmd); return retval; } static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance, int initial) { struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; struct MR_LD_VF_AFFILIATION *new_affiliation = NULL; struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL; dma_addr_t new_affiliation_h; int i, j, retval = 0, found = 0, doscan = 0; u8 thisVf; cmd = megasas_get_cmd(instance); if (!cmd) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation12: " "Failed to get cmd for scsi%d\n", instance->host->host_no); return -ENOMEM; } dcmd = &cmd->frame->dcmd; if (!instance->vf_affiliation) { dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF " "affiliation for scsi%d\n", instance->host->host_no); megasas_return_cmd(instance, cmd); return -ENOMEM; } if (initial) memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION)); else { new_affiliation = pci_alloc_consistent(instance->pdev, (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION), &new_affiliation_h); if (!new_affiliation) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " "memory for new affiliation for scsi%d\n", instance->host->host_no); megasas_return_cmd(instance, cmd); return -ENOMEM; } memset(new_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION)); } memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = MFI_STAT_INVALID_STATUS; dcmd->sge_count = 1; dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION)); dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS); if (initial) dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->vf_affiliation_h); else dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(new_affiliation_h); dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION)); dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for " "scsi%d\n", instance->host->host_no); if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) { dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD" " failed with status 0x%x for scsi%d\n", dcmd->cmd_status, instance->host->host_no); retval = 1; /* Do a scan if we couldn't get affiliation */ goto out; } if (!initial) { if (!new_affiliation->ldCount) { dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF " "affiliation for passive path for scsi%d\n", instance->host->host_no); retval = 1; goto out; } newmap = new_affiliation->map; savedmap = instance->vf_affiliation->map; thisVf = new_affiliation->thisVf; for (i = 0 ; i < new_affiliation->ldCount; i++) { found = 0; for (j = 0; j < instance->vf_affiliation->ldCount; j++) { if (newmap->ref.targetId == savedmap->ref.targetId) { found = 1; if (newmap->policy[thisVf] != savedmap->policy[thisVf]) { doscan = 1; goto out; } } savedmap = (struct MR_LD_VF_MAP *) ((unsigned char *)savedmap + savedmap->size); } if (!found && newmap->policy[thisVf] != MR_LD_ACCESS_HIDDEN) { doscan = 1; goto out; } newmap = (struct MR_LD_VF_MAP *) ((unsigned char *)newmap + newmap->size); } newmap = new_affiliation->map; savedmap = instance->vf_affiliation->map; for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) { found = 0; for (j = 0 ; j < new_affiliation->ldCount; j++) { if (savedmap->ref.targetId == newmap->ref.targetId) { found = 1; if (savedmap->policy[thisVf] != newmap->policy[thisVf]) { doscan = 1; goto out; } } newmap = (struct MR_LD_VF_MAP *) ((unsigned char *)newmap + newmap->size); } if (!found && savedmap->policy[thisVf] != MR_LD_ACCESS_HIDDEN) { doscan = 1; goto out; } savedmap = (struct MR_LD_VF_MAP *) ((unsigned char *)savedmap + savedmap->size); } } out: if (doscan) { dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF " "affiliation for scsi%d\n", instance->host->host_no); memcpy(instance->vf_affiliation, new_affiliation, new_affiliation->size); retval = 1; } if (new_affiliation) pci_free_consistent(instance->pdev, (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION), new_affiliation, new_affiliation_h); megasas_return_cmd(instance, cmd); return retval; } /* This function will get the current SR-IOV LD/VF affiliation */ static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, int initial) { int retval; if (instance->PlasmaFW111) retval = megasas_get_ld_vf_affiliation_111(instance, initial); else retval = megasas_get_ld_vf_affiliation_12(instance, initial); return retval; } /* This function will tell FW to start the SR-IOV heartbeat */ int megasas_sriov_start_heartbeat(struct megasas_instance *instance, int initial) { struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; int retval = 0; cmd = megasas_get_cmd(instance); if (!cmd) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_sriov_start_heartbeat: " "Failed to get cmd for scsi%d\n", instance->host->host_no); return -ENOMEM; } dcmd = &cmd->frame->dcmd; if (initial) { instance->hb_host_mem = pci_zalloc_consistent(instance->pdev, sizeof(struct MR_CTRL_HB_HOST_MEM), &instance->hb_host_mem_h); if (!instance->hb_host_mem) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate" " memory for heartbeat host memory for scsi%d\n", instance->host->host_no); retval = -ENOMEM; goto out; } } memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->mbox.s[0] = cpu_to_le16(sizeof(struct MR_CTRL_HB_HOST_MEM)); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = MFI_STAT_INVALID_STATUS; dcmd->sge_count = 1; dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM)); dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC); dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->hb_host_mem_h); dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM)); dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n", instance->host->host_no); if (instance->ctrl_context && !instance->mask_interrupts) retval = megasas_issue_blocked_cmd(instance, cmd, MEGASAS_ROUTINE_WAIT_TIME_VF); else retval = megasas_issue_polled(instance, cmd); if (retval) { dev_warn(&instance->pdev->dev, "SR-IOV: MR_DCMD_CTRL_SHARED_HOST" "_MEM_ALLOC DCMD %s for scsi%d\n", (dcmd->cmd_status == MFI_STAT_INVALID_STATUS) ? "timed out" : "failed", instance->host->host_no); retval = 1; } out: megasas_return_cmd(instance, cmd); return retval; } /* Handler for SR-IOV heartbeat */ void megasas_sriov_heartbeat_handler(unsigned long instance_addr) { struct megasas_instance *instance = (struct megasas_instance *)instance_addr; if (instance->hb_host_mem->HB.fwCounter != instance->hb_host_mem->HB.driverCounter) { instance->hb_host_mem->HB.driverCounter = instance->hb_host_mem->HB.fwCounter; mod_timer(&instance->sriov_heartbeat_timer, jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); } else { dev_warn(&instance->pdev->dev, "SR-IOV: Heartbeat never " "completed for scsi%d\n", instance->host->host_no); schedule_work(&instance->work_init); } } /** * megasas_wait_for_outstanding - Wait for all outstanding cmds * @instance: Adapter soft state * * This function waits for up to MEGASAS_RESET_WAIT_TIME seconds for FW to * complete all its outstanding commands. Returns error if one or more IOs * are pending after this time period. It also marks the controller dead. */ static int megasas_wait_for_outstanding(struct megasas_instance *instance) { int i, sl, outstanding; u32 reset_index; u32 wait_time = MEGASAS_RESET_WAIT_TIME; unsigned long flags; struct list_head clist_local; struct megasas_cmd *reset_cmd; u32 fw_state; if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { dev_info(&instance->pdev->dev, "%s:%d HBA is killed.\n", __func__, __LINE__); return FAILED; } if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { INIT_LIST_HEAD(&clist_local); spin_lock_irqsave(&instance->hba_lock, flags); list_splice_init(&instance->internal_reset_pending_q, &clist_local); spin_unlock_irqrestore(&instance->hba_lock, flags); dev_notice(&instance->pdev->dev, "HBA reset wait ...\n"); for (i = 0; i < wait_time; i++) { msleep(1000); if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) break; } if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n"); atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR); return FAILED; } reset_index = 0; while (!list_empty(&clist_local)) { reset_cmd = list_entry((&clist_local)->next, struct megasas_cmd, list); list_del_init(&reset_cmd->list); if (reset_cmd->scmd) { reset_cmd->scmd->result = DID_REQUEUE << 16; dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n", reset_index, reset_cmd, reset_cmd->scmd->cmnd[0]); reset_cmd->scmd->scsi_done(reset_cmd->scmd); megasas_return_cmd(instance, reset_cmd); } else if (reset_cmd->sync_cmd) { dev_notice(&instance->pdev->dev, "%p synch cmds" "reset queue\n", reset_cmd); reset_cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; instance->instancet->fire_cmd(instance, reset_cmd->frame_phys_addr, 0, instance->reg_set); } else { dev_notice(&instance->pdev->dev, "%p unexpected" "cmds lst\n", reset_cmd); } reset_index++; } return SUCCESS; } for (i = 0; i < resetwaittime; i++) { outstanding = atomic_read(&instance->fw_outstanding); if (!outstanding) break; if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { dev_notice(&instance->pdev->dev, "[%2d]waiting for %d " "commands to complete\n",i,outstanding); /* * Call cmd completion routine. Cmd to be * be completed directly without depending on isr. */ megasas_complete_cmd_dpc((unsigned long)instance); } msleep(1000); } i = 0; outstanding = atomic_read(&instance->fw_outstanding); fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK; if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL))) goto no_outstanding; if (instance->disableOnlineCtrlReset) goto kill_hba_and_failed; do { if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) { dev_info(&instance->pdev->dev, "%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, oustanding 0x%x\n", __func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding)); if (i == 3) goto kill_hba_and_failed; megasas_do_ocr(instance); if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { dev_info(&instance->pdev->dev, "%s:%d OCR failed and HBA is killed.\n", __func__, __LINE__); return FAILED; } dev_info(&instance->pdev->dev, "%s:%d waiting_for_outstanding: after issue OCR.\n", __func__, __LINE__); for (sl = 0; sl < 10; sl++) msleep(500); outstanding = atomic_read(&instance->fw_outstanding); fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK; if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL))) goto no_outstanding; } i++; } while (i <= 3); no_outstanding: dev_info(&instance->pdev->dev, "%s:%d no more pending commands remain after reset handling.\n", __func__, __LINE__); return SUCCESS; kill_hba_and_failed: /* Reset not supported, kill adapter */ dev_info(&instance->pdev->dev, "%s:%d killing adapter scsi%d" " disableOnlineCtrlReset %d fw_outstanding %d \n", __func__, __LINE__, instance->host->host_no, instance->disableOnlineCtrlReset, atomic_read(&instance->fw_outstanding)); megasas_dump_pending_frames(instance); megaraid_sas_kill_hba(instance); return FAILED; } /** * megasas_generic_reset - Generic reset routine * @scmd: Mid-layer SCSI command * * This routine implements a generic reset handler for device, bus and host * reset requests. Device, bus and host specific reset handlers can use this * function after they do their specific tasks. */ static int megasas_generic_reset(struct scsi_cmnd *scmd) { int ret_val; struct megasas_instance *instance; instance = (struct megasas_instance *)scmd->device->host->hostdata; scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n", scmd->cmnd[0], scmd->retries); if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n"); return FAILED; } ret_val = megasas_wait_for_outstanding(instance); if (ret_val == SUCCESS) dev_notice(&instance->pdev->dev, "reset successful\n"); else dev_err(&instance->pdev->dev, "failed to do reset\n"); return ret_val; } /** * megasas_reset_timer - quiesce the adapter if required * @scmd: scsi cmnd * * Sets the FW busy flag and reduces the host->can_queue if the * cmd has not been completed within the timeout period. */ static enum blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd) { struct megasas_instance *instance; unsigned long flags; if (time_after(jiffies, scmd->jiffies_at_alloc + (scmd_timeout * 2) * HZ)) { return BLK_EH_NOT_HANDLED; } instance = (struct megasas_instance *)scmd->device->host->hostdata; if (!(instance->flag & MEGASAS_FW_BUSY)) { /* FW is busy, throttle IO */ spin_lock_irqsave(instance->host->host_lock, flags); instance->host->can_queue = instance->throttlequeuedepth; instance->last_time = jiffies; instance->flag |= MEGASAS_FW_BUSY; spin_unlock_irqrestore(instance->host->host_lock, flags); } return BLK_EH_RESET_TIMER; } /** * megasas_dump_frame - This function will dump MPT/MFI frame */ static inline void megasas_dump_frame(void *mpi_request, int sz) { int i; __le32 *mfp = (__le32 *)mpi_request; printk(KERN_INFO "IO request frame:\n\t"); for (i = 0; i < sz / sizeof(__le32); i++) { if (i && ((i % 8) == 0)) printk("\n\t"); printk("%08x ", le32_to_cpu(mfp[i])); } printk("\n"); } /** * megasas_reset_bus_host - Bus & host reset handler entry point */ static int megasas_reset_bus_host(struct scsi_cmnd *scmd) { int ret; struct megasas_instance *instance; instance = (struct megasas_instance *)scmd->device->host->hostdata; scmd_printk(KERN_INFO, scmd, "Controller reset is requested due to IO timeout\n" "SCSI command pointer: (%p)\t SCSI host state: %d\t" " SCSI host busy: %d\t FW outstanding: %d\n", scmd, scmd->device->host->shost_state, atomic_read((atomic_t *)&scmd->device->host->host_busy), atomic_read(&instance->fw_outstanding)); /* * First wait for all commands to complete */ if (instance->ctrl_context) { struct megasas_cmd_fusion *cmd; cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr; if (cmd) megasas_dump_frame(cmd->io_request, sizeof(struct MPI2_RAID_SCSI_IO_REQUEST)); ret = megasas_reset_fusion(scmd->device->host, SCSIIO_TIMEOUT_OCR); } else ret = megasas_generic_reset(scmd); return ret; } /** * megasas_task_abort - Issues task abort request to firmware * (supported only for fusion adapters) * @scmd: SCSI command pointer */ static int megasas_task_abort(struct scsi_cmnd *scmd) { int ret; struct megasas_instance *instance; instance = (struct megasas_instance *)scmd->device->host->hostdata; if (instance->ctrl_context) ret = megasas_task_abort_fusion(scmd); else { sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n"); ret = FAILED; } return ret; } /** * megasas_reset_target: Issues target reset request to firmware * (supported only for fusion adapters) * @scmd: SCSI command pointer */ static int megasas_reset_target(struct scsi_cmnd *scmd) { int ret; struct megasas_instance *instance; instance = (struct megasas_instance *)scmd->device->host->hostdata; if (instance->ctrl_context) ret = megasas_reset_target_fusion(scmd); else { sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n"); ret = FAILED; } return ret; } /** * megasas_bios_param - Returns disk geometry for a disk * @sdev: device handle * @bdev: block device * @capacity: drive capacity * @geom: geometry parameters */ static int megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[]) { int heads; int sectors; sector_t cylinders; unsigned long tmp; /* Default heads (64) & sectors (32) */ heads = 64; sectors = 32; tmp = heads * sectors; cylinders = capacity; sector_div(cylinders, tmp); /* * Handle extended translation size for logical drives > 1Gb */ if (capacity >= 0x200000) { heads = 255; sectors = 63; tmp = heads*sectors; cylinders = capacity; sector_div(cylinders, tmp); } geom[0] = heads; geom[1] = sectors; geom[2] = cylinders; return 0; } static void megasas_aen_polling(struct work_struct *work); /** * megasas_service_aen - Processes an event notification * @instance: Adapter soft state * @cmd: AEN command completed by the ISR * * For AEN, driver sends a command down to FW that is held by the FW till an * event occurs. When an event of interest occurs, FW completes the command * that it was previously holding. * * This routines sends SIGIO signal to processes that have registered with the * driver for AEN. */ static void megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd) { unsigned long flags; /* * Don't signal app if it is just an aborted previously registered aen */ if ((!cmd->abort_aen) && (instance->unload == 0)) { spin_lock_irqsave(&poll_aen_lock, flags); megasas_poll_wait_aen = 1; spin_unlock_irqrestore(&poll_aen_lock, flags); wake_up(&megasas_poll_wait); kill_fasync(&megasas_async_queue, SIGIO, POLL_IN); } else cmd->abort_aen = 0; instance->aen_cmd = NULL; megasas_return_cmd(instance, cmd); if ((instance->unload == 0) && ((instance->issuepend_done == 1))) { struct megasas_aen_event *ev; ev = kzalloc(sizeof(*ev), GFP_ATOMIC); if (!ev) { dev_err(&instance->pdev->dev, "megasas_service_aen: out of memory\n"); } else { ev->instance = instance; instance->ev = ev; INIT_DELAYED_WORK(&ev->hotplug_work, megasas_aen_polling); schedule_delayed_work(&ev->hotplug_work, 0); } } } static ssize_t megasas_fw_crash_buffer_store(struct device *cdev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(cdev); struct megasas_instance *instance = (struct megasas_instance *) shost->hostdata; int val = 0; unsigned long flags; if (kstrtoint(buf, 0, &val) != 0) return -EINVAL; spin_lock_irqsave(&instance->crashdump_lock, flags); instance->fw_crash_buffer_offset = val; spin_unlock_irqrestore(&instance->crashdump_lock, flags); return strlen(buf); } static ssize_t megasas_fw_crash_buffer_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct megasas_instance *instance = (struct megasas_instance *) shost->hostdata; u32 size; unsigned long buff_addr; unsigned long dmachunk = CRASH_DMA_BUF_SIZE; unsigned long src_addr; unsigned long flags; u32 buff_offset; spin_lock_irqsave(&instance->crashdump_lock, flags); buff_offset = instance->fw_crash_buffer_offset; if (!instance->crash_dump_buf && !((instance->fw_crash_state == AVAILABLE) || (instance->fw_crash_state == COPYING))) { dev_err(&instance->pdev->dev, "Firmware crash dump is not available\n"); spin_unlock_irqrestore(&instance->crashdump_lock, flags); return -EINVAL; } buff_addr = (unsigned long) buf; if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) { dev_err(&instance->pdev->dev, "Firmware crash dump offset is out of range\n"); spin_unlock_irqrestore(&instance->crashdump_lock, flags); return 0; } size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset; size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size; src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] + (buff_offset % dmachunk); memcpy(buf, (void *)src_addr, size); spin_unlock_irqrestore(&instance->crashdump_lock, flags); return size; } static ssize_t megasas_fw_crash_buffer_size_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct megasas_instance *instance = (struct megasas_instance *) shost->hostdata; return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long) ((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE); } static ssize_t megasas_fw_crash_state_store(struct device *cdev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(cdev); struct megasas_instance *instance = (struct megasas_instance *) shost->hostdata; int val = 0; unsigned long flags; if (kstrtoint(buf, 0, &val) != 0) return -EINVAL; if ((val <= AVAILABLE || val > COPY_ERROR)) { dev_err(&instance->pdev->dev, "application updates invalid " "firmware crash state\n"); return -EINVAL; } instance->fw_crash_state = val; if ((val == COPIED) || (val == COPY_ERROR)) { spin_lock_irqsave(&instance->crashdump_lock, flags); megasas_free_host_crash_buffer(instance); spin_unlock_irqrestore(&instance->crashdump_lock, flags); if (val == COPY_ERROR) dev_info(&instance->pdev->dev, "application failed to " "copy Firmware crash dump\n"); else dev_info(&instance->pdev->dev, "Firmware crash dump " "copied successfully\n"); } return strlen(buf); } static ssize_t megasas_fw_crash_state_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct megasas_instance *instance = (struct megasas_instance *) shost->hostdata; return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state); } static ssize_t megasas_page_size_show(struct device *cdev, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1); } static ssize_t megasas_ldio_outstanding_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata; return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding)); } static DEVICE_ATTR(fw_crash_buffer, S_IRUGO | S_IWUSR, megasas_fw_crash_buffer_show, megasas_fw_crash_buffer_store); static DEVICE_ATTR(fw_crash_buffer_size, S_IRUGO, megasas_fw_crash_buffer_size_show, NULL); static DEVICE_ATTR(fw_crash_state, S_IRUGO | S_IWUSR, megasas_fw_crash_state_show, megasas_fw_crash_state_store); static DEVICE_ATTR(page_size, S_IRUGO, megasas_page_size_show, NULL); static DEVICE_ATTR(ldio_outstanding, S_IRUGO, megasas_ldio_outstanding_show, NULL); struct device_attribute *megaraid_host_attrs[] = { &dev_attr_fw_crash_buffer_size, &dev_attr_fw_crash_buffer, &dev_attr_fw_crash_state, &dev_attr_page_size, &dev_attr_ldio_outstanding, NULL, }; /* * Scsi host template for megaraid_sas driver */ static struct scsi_host_template megasas_template = { .module = THIS_MODULE, .name = "Avago SAS based MegaRAID driver", .proc_name = "megaraid_sas", .slave_configure = megasas_slave_configure, .slave_alloc = megasas_slave_alloc, .slave_destroy = megasas_slave_destroy, .queuecommand = megasas_queue_command, .eh_target_reset_handler = megasas_reset_target, .eh_abort_handler = megasas_task_abort, .eh_host_reset_handler = megasas_reset_bus_host, .eh_timed_out = megasas_reset_timer, .shost_attrs = megaraid_host_attrs, .bios_param = megasas_bios_param, .use_clustering = ENABLE_CLUSTERING, .change_queue_depth = scsi_change_queue_depth, .no_write_same = 1, }; /** * megasas_complete_int_cmd - Completes an internal command * @instance: Adapter soft state * @cmd: Command to be completed * * The megasas_issue_blocked_cmd() function waits for a command to complete * after it issues a command. This function wakes up that waiting routine by * calling wake_up() on the wait queue. */ static void megasas_complete_int_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) { cmd->cmd_status_drv = cmd->frame->io.cmd_status; wake_up(&instance->int_cmd_wait_q); } /** * megasas_complete_abort - Completes aborting a command * @instance: Adapter soft state * @cmd: Cmd that was issued to abort another cmd * * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q * after it issues an abort on a previously issued command. This function * wakes up all functions waiting on the same wait queue. */ static void megasas_complete_abort(struct megasas_instance *instance, struct megasas_cmd *cmd) { if (cmd->sync_cmd) { cmd->sync_cmd = 0; cmd->cmd_status_drv = 0; wake_up(&instance->abort_cmd_wait_q); } } /** * megasas_complete_cmd - Completes a command * @instance: Adapter soft state * @cmd: Command to be completed * @alt_status: If non-zero, use this value as status to * SCSI mid-layer instead of the value returned * by the FW. This should be used if caller wants * an alternate status (as in the case of aborted * commands) */ void megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, u8 alt_status) { int exception = 0; struct megasas_header *hdr = &cmd->frame->hdr; unsigned long flags; struct fusion_context *fusion = instance->ctrl_context; u32 opcode, status; /* flag for the retry reset */ cmd->retry_for_fw_reset = 0; if (cmd->scmd) cmd->scmd->SCp.ptr = NULL; switch (hdr->cmd) { case MFI_CMD_INVALID: /* Some older 1068 controller FW may keep a pended MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel when booting the kdump kernel. Ignore this command to prevent a kernel panic on shutdown of the kdump kernel. */ dev_warn(&instance->pdev->dev, "MFI_CMD_INVALID command " "completed\n"); dev_warn(&instance->pdev->dev, "If you have a controller " "other than PERC5, please upgrade your firmware\n"); break; case MFI_CMD_PD_SCSI_IO: case MFI_CMD_LD_SCSI_IO: /* * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been * issued either through an IO path or an IOCTL path. If it * was via IOCTL, we will send it to internal completion. */ if (cmd->sync_cmd) { cmd->sync_cmd = 0; megasas_complete_int_cmd(instance, cmd); break; } case MFI_CMD_LD_READ: case MFI_CMD_LD_WRITE: if (alt_status) { cmd->scmd->result = alt_status << 16; exception = 1; } if (exception) { atomic_dec(&instance->fw_outstanding); scsi_dma_unmap(cmd->scmd); cmd->scmd->scsi_done(cmd->scmd); megasas_return_cmd(instance, cmd); break; } switch (hdr->cmd_status) { case MFI_STAT_OK: cmd->scmd->result = DID_OK << 16; break; case MFI_STAT_SCSI_IO_FAILED: case MFI_STAT_LD_INIT_IN_PROGRESS: cmd->scmd->result = (DID_ERROR << 16) | hdr->scsi_status; break; case MFI_STAT_SCSI_DONE_WITH_ERROR: cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status; if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) { memset(cmd->scmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); memcpy(cmd->scmd->sense_buffer, cmd->sense, hdr->sense_len); cmd->scmd->result |= DRIVER_SENSE << 24; } break; case MFI_STAT_LD_OFFLINE: case MFI_STAT_DEVICE_NOT_FOUND: cmd->scmd->result = DID_BAD_TARGET << 16; break; default: dev_printk(KERN_DEBUG, &instance->pdev->dev, "MFI FW status %#x\n", hdr->cmd_status); cmd->scmd->result = DID_ERROR << 16; break; } atomic_dec(&instance->fw_outstanding); scsi_dma_unmap(cmd->scmd); cmd->scmd->scsi_done(cmd->scmd); megasas_return_cmd(instance, cmd); break; case MFI_CMD_SMP: case MFI_CMD_STP: case MFI_CMD_DCMD: opcode = le32_to_cpu(cmd->frame->dcmd.opcode); /* Check for LD map update */ if ((opcode == MR_DCMD_LD_MAP_GET_INFO) && (cmd->frame->dcmd.mbox.b[1] == 1)) { fusion->fast_path_io = 0; spin_lock_irqsave(instance->host->host_lock, flags); instance->map_update_cmd = NULL; if (cmd->frame->hdr.cmd_status != 0) { if (cmd->frame->hdr.cmd_status != MFI_STAT_NOT_FOUND) dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n", cmd->frame->hdr.cmd_status); else { megasas_return_cmd(instance, cmd); spin_unlock_irqrestore( instance->host->host_lock, flags); break; } } else instance->map_id++; megasas_return_cmd(instance, cmd); /* * Set fast path IO to ZERO. * Validate Map will set proper value. * Meanwhile all IOs will go as LD IO. */ if (MR_ValidateMapInfo(instance)) fusion->fast_path_io = 1; else fusion->fast_path_io = 0; megasas_sync_map_info(instance); spin_unlock_irqrestore(instance->host->host_lock, flags); break; } if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO || opcode == MR_DCMD_CTRL_EVENT_GET) { spin_lock_irqsave(&poll_aen_lock, flags); megasas_poll_wait_aen = 0; spin_unlock_irqrestore(&poll_aen_lock, flags); } /* FW has an updated PD sequence */ if ((opcode == MR_DCMD_SYSTEM_PD_MAP_GET_INFO) && (cmd->frame->dcmd.mbox.b[0] == 1)) { spin_lock_irqsave(instance->host->host_lock, flags); status = cmd->frame->hdr.cmd_status; instance->jbod_seq_cmd = NULL; megasas_return_cmd(instance, cmd); if (status == MFI_STAT_OK) { instance->pd_seq_map_id++; /* Re-register a pd sync seq num cmd */ if (megasas_sync_pd_seq_num(instance, true)) instance->use_seqnum_jbod_fp = false; } else instance->use_seqnum_jbod_fp = false; spin_unlock_irqrestore(instance->host->host_lock, flags); break; } /* * See if got an event notification */ if (opcode == MR_DCMD_CTRL_EVENT_WAIT) megasas_service_aen(instance, cmd); else megasas_complete_int_cmd(instance, cmd); break; case MFI_CMD_ABORT: /* * Cmd issued to abort another cmd returned */ megasas_complete_abort(instance, cmd); break; default: dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n", hdr->cmd); break; } } /** * megasas_issue_pending_cmds_again - issue all pending cmds * in FW again because of the fw reset * @instance: Adapter soft state */ static inline void megasas_issue_pending_cmds_again(struct megasas_instance *instance) { struct megasas_cmd *cmd; struct list_head clist_local; union megasas_evt_class_locale class_locale; unsigned long flags; u32 seq_num; INIT_LIST_HEAD(&clist_local); spin_lock_irqsave(&instance->hba_lock, flags); list_splice_init(&instance->internal_reset_pending_q, &clist_local); spin_unlock_irqrestore(&instance->hba_lock, flags); while (!list_empty(&clist_local)) { cmd = list_entry((&clist_local)->next, struct megasas_cmd, list); list_del_init(&cmd->list); if (cmd->sync_cmd || cmd->scmd) { dev_notice(&instance->pdev->dev, "command %p, %p:%d" "detected to be pending while HBA reset\n", cmd, cmd->scmd, cmd->sync_cmd); cmd->retry_for_fw_reset++; if (cmd->retry_for_fw_reset == 3) { dev_notice(&instance->pdev->dev, "cmd %p, %p:%d" "was tried multiple times during reset." "Shutting down the HBA\n", cmd, cmd->scmd, cmd->sync_cmd); instance->instancet->disable_intr(instance); atomic_set(&instance->fw_reset_no_pci_access, 1); megaraid_sas_kill_hba(instance); return; } } if (cmd->sync_cmd == 1) { if (cmd->scmd) { dev_notice(&instance->pdev->dev, "unexpected" "cmd attached to internal command!\n"); } dev_notice(&instance->pdev->dev, "%p synchronous cmd" "on the internal reset queue," "issue it again.\n", cmd); cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; instance->instancet->fire_cmd(instance, cmd->frame_phys_addr, 0, instance->reg_set); } else if (cmd->scmd) { dev_notice(&instance->pdev->dev, "%p scsi cmd [%02x]" "detected on the internal queue, issue again.\n", cmd, cmd->scmd->cmnd[0]); atomic_inc(&instance->fw_outstanding); instance->instancet->fire_cmd(instance, cmd->frame_phys_addr, cmd->frame_count-1, instance->reg_set); } else { dev_notice(&instance->pdev->dev, "%p unexpected cmd on the" "internal reset defer list while re-issue!!\n", cmd); } } if (instance->aen_cmd) { dev_notice(&instance->pdev->dev, "aen_cmd in def process\n"); megasas_return_cmd(instance, instance->aen_cmd); instance->aen_cmd = NULL; } /* * Initiate AEN (Asynchronous Event Notification) */ seq_num = instance->last_seq_num; class_locale.members.reserved = 0; class_locale.members.locale = MR_EVT_LOCALE_ALL; class_locale.members.class = MR_EVT_CLASS_DEBUG; megasas_register_aen(instance, seq_num, class_locale.word); } /** * Move the internal reset pending commands to a deferred queue. * * We move the commands pending at internal reset time to a * pending queue. This queue would be flushed after successful * completion of the internal reset sequence. if the internal reset * did not complete in time, the kernel reset handler would flush * these commands. **/ static void megasas_internal_reset_defer_cmds(struct megasas_instance *instance) { struct megasas_cmd *cmd; int i; u16 max_cmd = instance->max_fw_cmds; u32 defer_index; unsigned long flags; defer_index = 0; spin_lock_irqsave(&instance->mfi_pool_lock, flags); for (i = 0; i < max_cmd; i++) { cmd = instance->cmd_list[i]; if (cmd->sync_cmd == 1 || cmd->scmd) { dev_notice(&instance->pdev->dev, "moving cmd[%d]:%p:%d:%p" "on the defer queue as internal\n", defer_index, cmd, cmd->sync_cmd, cmd->scmd); if (!list_empty(&cmd->list)) { dev_notice(&instance->pdev->dev, "ERROR while" " moving this cmd:%p, %d %p, it was" "discovered on some list?\n", cmd, cmd->sync_cmd, cmd->scmd); list_del_init(&cmd->list); } defer_index++; list_add_tail(&cmd->list, &instance->internal_reset_pending_q); } } spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); } static void process_fw_state_change_wq(struct work_struct *work) { struct megasas_instance *instance = container_of(work, struct megasas_instance, work_init); u32 wait; unsigned long flags; if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) { dev_notice(&instance->pdev->dev, "error, recovery st %x\n", atomic_read(&instance->adprecovery)); return ; } if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) { dev_notice(&instance->pdev->dev, "FW detected to be in fault" "state, restarting it...\n"); instance->instancet->disable_intr(instance); atomic_set(&instance->fw_outstanding, 0); atomic_set(&instance->fw_reset_no_pci_access, 1); instance->instancet->adp_reset(instance, instance->reg_set); atomic_set(&instance->fw_reset_no_pci_access, 0); dev_notice(&instance->pdev->dev, "FW restarted successfully," "initiating next stage...\n"); dev_notice(&instance->pdev->dev, "HBA recovery state machine," "state 2 starting...\n"); /* waiting for about 20 second before start the second init */ for (wait = 0; wait < 30; wait++) { msleep(1000); } if (megasas_transition_to_ready(instance, 1)) { dev_notice(&instance->pdev->dev, "adapter not ready\n"); atomic_set(&instance->fw_reset_no_pci_access, 1); megaraid_sas_kill_hba(instance); return ; } if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR) ) { *instance->consumer = *instance->producer; } else { *instance->consumer = 0; *instance->producer = 0; } megasas_issue_init_mfi(instance); spin_lock_irqsave(&instance->hba_lock, flags); atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); spin_unlock_irqrestore(&instance->hba_lock, flags); instance->instancet->enable_intr(instance); megasas_issue_pending_cmds_again(instance); instance->issuepend_done = 1; } } /** * megasas_deplete_reply_queue - Processes all completed commands * @instance: Adapter soft state * @alt_status: Alternate status to be returned to * SCSI mid-layer instead of the status * returned by the FW * Note: this must be called with hba lock held */ static int megasas_deplete_reply_queue(struct megasas_instance *instance, u8 alt_status) { u32 mfiStatus; u32 fw_state; if ((mfiStatus = instance->instancet->check_reset(instance, instance->reg_set)) == 1) { return IRQ_HANDLED; } if ((mfiStatus = instance->instancet->clear_intr( instance->reg_set) ) == 0) { /* Hardware may not set outbound_intr_status in MSI-X mode */ if (!instance->msix_vectors) return IRQ_NONE; } instance->mfiStatus = mfiStatus; if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) { fw_state = instance->instancet->read_fw_status_reg( instance->reg_set) & MFI_STATE_MASK; if (fw_state != MFI_STATE_FAULT) { dev_notice(&instance->pdev->dev, "fw state:%x\n", fw_state); } if ((fw_state == MFI_STATE_FAULT) && (instance->disableOnlineCtrlReset == 0)) { dev_notice(&instance->pdev->dev, "wait adp restart\n"); if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) { *instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN); } instance->instancet->disable_intr(instance); atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); instance->issuepend_done = 0; atomic_set(&instance->fw_outstanding, 0); megasas_internal_reset_defer_cmds(instance); dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n", fw_state, atomic_read(&instance->adprecovery)); schedule_work(&instance->work_init); return IRQ_HANDLED; } else { dev_notice(&instance->pdev->dev, "fwstate:%x, dis_OCR=%x\n", fw_state, instance->disableOnlineCtrlReset); } } tasklet_schedule(&instance->isr_tasklet); return IRQ_HANDLED; } /** * megasas_isr - isr entry point */ static irqreturn_t megasas_isr(int irq, void *devp) { struct megasas_irq_context *irq_context = devp; struct megasas_instance *instance = irq_context->instance; unsigned long flags; irqreturn_t rc; if (atomic_read(&instance->fw_reset_no_pci_access)) return IRQ_HANDLED; spin_lock_irqsave(&instance->hba_lock, flags); rc = megasas_deplete_reply_queue(instance, DID_OK); spin_unlock_irqrestore(&instance->hba_lock, flags); return rc; } /** * megasas_transition_to_ready - Move the FW to READY state * @instance: Adapter soft state * * During the initialization, FW passes can potentially be in any one of * several possible states. If the FW in operational, waiting-for-handshake * states, driver must take steps to bring it to ready state. Otherwise, it * has to wait for the ready state. */ int megasas_transition_to_ready(struct megasas_instance *instance, int ocr) { int i; u8 max_wait; u32 fw_state; u32 cur_state; u32 abs_state, curr_abs_state; abs_state = instance->instancet->read_fw_status_reg(instance->reg_set); fw_state = abs_state & MFI_STATE_MASK; if (fw_state != MFI_STATE_READY) dev_info(&instance->pdev->dev, "Waiting for FW to come to ready" " state\n"); while (fw_state != MFI_STATE_READY) { switch (fw_state) { case MFI_STATE_FAULT: dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW in FAULT state!!\n"); if (ocr) { max_wait = MEGASAS_RESET_WAIT_TIME; cur_state = MFI_STATE_FAULT; break; } else return -ENODEV; case MFI_STATE_WAIT_HANDSHAKE: /* * Set the CLR bit in inbound doorbell */ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || (instance->ctrl_context)) writel( MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, &instance->reg_set->doorbell); else writel( MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, &instance->reg_set->inbound_doorbell); max_wait = MEGASAS_RESET_WAIT_TIME; cur_state = MFI_STATE_WAIT_HANDSHAKE; break; case MFI_STATE_BOOT_MESSAGE_PENDING: if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || (instance->ctrl_context)) writel(MFI_INIT_HOTPLUG, &instance->reg_set->doorbell); else writel(MFI_INIT_HOTPLUG, &instance->reg_set->inbound_doorbell); max_wait = MEGASAS_RESET_WAIT_TIME; cur_state = MFI_STATE_BOOT_MESSAGE_PENDING; break; case MFI_STATE_OPERATIONAL: /* * Bring it to READY state; assuming max wait 10 secs */ instance->instancet->disable_intr(instance); if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || (instance->ctrl_context)) { writel(MFI_RESET_FLAGS, &instance->reg_set->doorbell); if (instance->ctrl_context) { for (i = 0; i < (10 * 1000); i += 20) { if (readl( &instance-> reg_set-> doorbell) & 1) msleep(20); else break; } } } else writel(MFI_RESET_FLAGS, &instance->reg_set->inbound_doorbell); max_wait = MEGASAS_RESET_WAIT_TIME; cur_state = MFI_STATE_OPERATIONAL; break; case MFI_STATE_UNDEFINED: /* * This state should not last for more than 2 seconds */ max_wait = MEGASAS_RESET_WAIT_TIME; cur_state = MFI_STATE_UNDEFINED; break; case MFI_STATE_BB_INIT: max_wait = MEGASAS_RESET_WAIT_TIME; cur_state = MFI_STATE_BB_INIT; break; case MFI_STATE_FW_INIT: max_wait = MEGASAS_RESET_WAIT_TIME; cur_state = MFI_STATE_FW_INIT; break; case MFI_STATE_FW_INIT_2: max_wait = MEGASAS_RESET_WAIT_TIME; cur_state = MFI_STATE_FW_INIT_2; break; case MFI_STATE_DEVICE_SCAN: max_wait = MEGASAS_RESET_WAIT_TIME; cur_state = MFI_STATE_DEVICE_SCAN; break; case MFI_STATE_FLUSH_CACHE: max_wait = MEGASAS_RESET_WAIT_TIME; cur_state = MFI_STATE_FLUSH_CACHE; break; default: dev_printk(KERN_DEBUG, &instance->pdev->dev, "Unknown state 0x%x\n", fw_state); return -ENODEV; } /* * The cur_state should not last for more than max_wait secs */ for (i = 0; i < (max_wait * 1000); i++) { curr_abs_state = instance->instancet-> read_fw_status_reg(instance->reg_set); if (abs_state == curr_abs_state) { msleep(1); } else break; } /* * Return error if fw_state hasn't changed after max_wait */ if (curr_abs_state == abs_state) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW state [%d] hasn't changed " "in %d secs\n", fw_state, max_wait); return -ENODEV; } abs_state = curr_abs_state; fw_state = curr_abs_state & MFI_STATE_MASK; } dev_info(&instance->pdev->dev, "FW now in Ready state\n"); return 0; } /** * megasas_teardown_frame_pool - Destroy the cmd frame DMA pool * @instance: Adapter soft state */ static void megasas_teardown_frame_pool(struct megasas_instance *instance) { int i; u16 max_cmd = instance->max_mfi_cmds; struct megasas_cmd *cmd; if (!instance->frame_dma_pool) return; /* * Return all frames to pool */ for (i = 0; i < max_cmd; i++) { cmd = instance->cmd_list[i]; if (cmd->frame) pci_pool_free(instance->frame_dma_pool, cmd->frame, cmd->frame_phys_addr); if (cmd->sense) pci_pool_free(instance->sense_dma_pool, cmd->sense, cmd->sense_phys_addr); } /* * Now destroy the pool itself */ pci_pool_destroy(instance->frame_dma_pool); pci_pool_destroy(instance->sense_dma_pool); instance->frame_dma_pool = NULL; instance->sense_dma_pool = NULL; } /** * megasas_create_frame_pool - Creates DMA pool for cmd frames * @instance: Adapter soft state * * Each command packet has an embedded DMA memory buffer that is used for * filling MFI frame and the SG list that immediately follows the frame. This * function creates those DMA memory buffers for each command packet by using * PCI pool facility. */ static int megasas_create_frame_pool(struct megasas_instance *instance) { int i; u16 max_cmd; u32 sge_sz; u32 frame_count; struct megasas_cmd *cmd; max_cmd = instance->max_mfi_cmds; /* * Size of our frame is 64 bytes for MFI frame, followed by max SG * elements and finally SCSI_SENSE_BUFFERSIZE bytes for sense buffer */ sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) : sizeof(struct megasas_sge32); if (instance->flag_ieee) sge_sz = sizeof(struct megasas_sge_skinny); /* * For MFI controllers. * max_num_sge = 60 * max_sge_sz = 16 byte (sizeof megasas_sge_skinny) * Total 960 byte (15 MFI frame of 64 byte) * * Fusion adapter require only 3 extra frame. * max_num_sge = 16 (defined as MAX_IOCTL_SGE) * max_sge_sz = 12 byte (sizeof megasas_sge64) * Total 192 byte (3 MFI frame of 64 byte) */ frame_count = instance->ctrl_context ? (3 + 1) : (15 + 1); instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count; /* * Use DMA pool facility provided by PCI layer */ instance->frame_dma_pool = pci_pool_create("megasas frame pool", instance->pdev, instance->mfi_frame_size, 256, 0); if (!instance->frame_dma_pool) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n"); return -ENOMEM; } instance->sense_dma_pool = pci_pool_create("megasas sense pool", instance->pdev, 128, 4, 0); if (!instance->sense_dma_pool) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n"); pci_pool_destroy(instance->frame_dma_pool); instance->frame_dma_pool = NULL; return -ENOMEM; } /* * Allocate and attach a frame to each of the commands in cmd_list. * By making cmd->index as the context instead of the &cmd, we can * always use 32bit context regardless of the architecture */ for (i = 0; i < max_cmd; i++) { cmd = instance->cmd_list[i]; cmd->frame = pci_pool_alloc(instance->frame_dma_pool, GFP_KERNEL, &cmd->frame_phys_addr); cmd->sense = pci_pool_alloc(instance->sense_dma_pool, GFP_KERNEL, &cmd->sense_phys_addr); /* * megasas_teardown_frame_pool() takes care of freeing * whatever has been allocated */ if (!cmd->frame || !cmd->sense) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "pci_pool_alloc failed\n"); megasas_teardown_frame_pool(instance); return -ENOMEM; } memset(cmd->frame, 0, instance->mfi_frame_size); cmd->frame->io.context = cpu_to_le32(cmd->index); cmd->frame->io.pad_0 = 0; if (!instance->ctrl_context && reset_devices) cmd->frame->hdr.cmd = MFI_CMD_INVALID; } return 0; } /** * megasas_free_cmds - Free all the cmds in the free cmd pool * @instance: Adapter soft state */ void megasas_free_cmds(struct megasas_instance *instance) { int i; /* First free the MFI frame pool */ megasas_teardown_frame_pool(instance); /* Free all the commands in the cmd_list */ for (i = 0; i < instance->max_mfi_cmds; i++) kfree(instance->cmd_list[i]); /* Free the cmd_list buffer itself */ kfree(instance->cmd_list); instance->cmd_list = NULL; INIT_LIST_HEAD(&instance->cmd_pool); } /** * megasas_alloc_cmds - Allocates the command packets * @instance: Adapter soft state * * Each command that is issued to the FW, whether IO commands from the OS or * internal commands like IOCTLs, are wrapped in local data structure called * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to * the FW. * * Each frame has a 32-bit field called context (tag). This context is used * to get back the megasas_cmd from the frame when a frame gets completed in * the ISR. Typically the address of the megasas_cmd itself would be used as * the context. But we wanted to keep the differences between 32 and 64 bit * systems to the mininum. We always use 32 bit integers for the context. In * this driver, the 32 bit values are the indices into an array cmd_list. * This array is used only to look up the megasas_cmd given the context. The * free commands themselves are maintained in a linked list called cmd_pool. */ int megasas_alloc_cmds(struct megasas_instance *instance) { int i; int j; u16 max_cmd; struct megasas_cmd *cmd; struct fusion_context *fusion; fusion = instance->ctrl_context; max_cmd = instance->max_mfi_cmds; /* * instance->cmd_list is an array of struct megasas_cmd pointers. * Allocate the dynamic array first and then allocate individual * commands. */ instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL); if (!instance->cmd_list) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory\n"); return -ENOMEM; } memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) *max_cmd); for (i = 0; i < max_cmd; i++) { instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd), GFP_KERNEL); if (!instance->cmd_list[i]) { for (j = 0; j < i; j++) kfree(instance->cmd_list[j]); kfree(instance->cmd_list); instance->cmd_list = NULL; return -ENOMEM; } } for (i = 0; i < max_cmd; i++) { cmd = instance->cmd_list[i]; memset(cmd, 0, sizeof(struct megasas_cmd)); cmd->index = i; cmd->scmd = NULL; cmd->instance = instance; list_add_tail(&cmd->list, &instance->cmd_pool); } /* * Create a frame pool and assign one frame to each cmd */ if (megasas_create_frame_pool(instance)) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n"); megasas_free_cmds(instance); } return 0; } /* * dcmd_timeout_ocr_possible - Check if OCR is possible based on Driver/FW state. * @instance: Adapter soft state * * Return 0 for only Fusion adapter, if driver load/unload is not in progress * or FW is not under OCR. */ inline int dcmd_timeout_ocr_possible(struct megasas_instance *instance) { if (!instance->ctrl_context) return KILL_ADAPTER; else if (instance->unload || test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags)) return IGNORE_TIMEOUT; else return INITIATE_OCR; } static void megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev) { int ret; struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; struct MR_PRIV_DEVICE *mr_device_priv_data; u16 device_id = 0; device_id = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; cmd = megasas_get_cmd(instance); if (!cmd) { dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__); return; } dcmd = &cmd->frame->dcmd; memset(instance->pd_info, 0, sizeof(*instance->pd_info)); memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->mbox.s[0] = cpu_to_le16(device_id); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO)); dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO); dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->pd_info_h); dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_PD_INFO)); if (instance->ctrl_context && !instance->mask_interrupts) ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); else ret = megasas_issue_polled(instance, cmd); switch (ret) { case DCMD_SUCCESS: mr_device_priv_data = sdev->hostdata; le16_to_cpus((u16 *)&instance->pd_info->state.ddf.pdType); mr_device_priv_data->interface_type = instance->pd_info->state.ddf.pdType.intf; break; case DCMD_TIMEOUT: switch (dcmd_timeout_ocr_possible(instance)) { case INITIATE_OCR: cmd->flags |= DRV_DCMD_SKIP_REFIRE; megasas_reset_fusion(instance->host, MFI_IO_TIMEOUT_OCR); break; case KILL_ADAPTER: megaraid_sas_kill_hba(instance); break; case IGNORE_TIMEOUT: dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", __func__, __LINE__); break; } break; } if (ret != DCMD_TIMEOUT) megasas_return_cmd(instance, cmd); return; } /* * megasas_get_pd_list_info - Returns FW's pd_list structure * @instance: Adapter soft state * @pd_list: pd_list structure * * Issues an internal command (DCMD) to get the FW's controller PD * list structure. This information is mainly used to find out SYSTEM * supported by the FW. */ static int megasas_get_pd_list(struct megasas_instance *instance) { int ret = 0, pd_index = 0; struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; struct MR_PD_LIST *ci; struct MR_PD_ADDRESS *pd_addr; dma_addr_t ci_h = 0; if (instance->pd_list_not_supported) { dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY " "not supported by firmware\n"); return ret; } cmd = megasas_get_cmd(instance); if (!cmd) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "(get_pd_list): Failed to get cmd\n"); return -ENOMEM; } dcmd = &cmd->frame->dcmd; ci = pci_alloc_consistent(instance->pdev, MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), &ci_h); if (!ci) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for pd_list\n"); megasas_return_cmd(instance, cmd); return -ENOMEM; } memset(ci, 0, sizeof(*ci)); memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST; dcmd->mbox.b[1] = 0; dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = MFI_STAT_INVALID_STATUS; dcmd->sge_count = 1; dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)); dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY); dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); dcmd->sgl.sge32[0].length = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)); if (instance->ctrl_context && !instance->mask_interrupts) ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); else ret = megasas_issue_polled(instance, cmd); switch (ret) { case DCMD_FAILED: dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY " "failed/not supported by firmware\n"); if (instance->ctrl_context) megaraid_sas_kill_hba(instance); else instance->pd_list_not_supported = 1; break; case DCMD_TIMEOUT: switch (dcmd_timeout_ocr_possible(instance)) { case INITIATE_OCR: cmd->flags |= DRV_DCMD_SKIP_REFIRE; /* * DCMD failed from AEN path. * AEN path already hold reset_mutex to avoid PCI access * while OCR is in progress. */ mutex_unlock(&instance->reset_mutex); megasas_reset_fusion(instance->host, MFI_IO_TIMEOUT_OCR); mutex_lock(&instance->reset_mutex); break; case KILL_ADAPTER: megaraid_sas_kill_hba(instance); break; case IGNORE_TIMEOUT: dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d \n", __func__, __LINE__); break; } break; case DCMD_SUCCESS: pd_addr = ci->addr; if ((le32_to_cpu(ci->count) > (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) break; memset(instance->local_pd_list, 0, MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)); for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) { instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid = le16_to_cpu(pd_addr->deviceId); instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType = pd_addr->scsiDevType; instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState = MR_PD_STATE_SYSTEM; pd_addr++; } memcpy(instance->pd_list, instance->local_pd_list, sizeof(instance->pd_list)); break; } pci_free_consistent(instance->pdev, MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), ci, ci_h); if (ret != DCMD_TIMEOUT) megasas_return_cmd(instance, cmd); return ret; } /* * megasas_get_ld_list_info - Returns FW's ld_list structure * @instance: Adapter soft state * @ld_list: ld_list structure * * Issues an internal command (DCMD) to get the FW's controller PD * list structure. This information is mainly used to find out SYSTEM * supported by the FW. */ static int megasas_get_ld_list(struct megasas_instance *instance) { int ret = 0, ld_index = 0, ids = 0; struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; struct MR_LD_LIST *ci; dma_addr_t ci_h = 0; u32 ld_count; cmd = megasas_get_cmd(instance); if (!cmd) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_list: Failed to get cmd\n"); return -ENOMEM; } dcmd = &cmd->frame->dcmd; ci = pci_alloc_consistent(instance->pdev, sizeof(struct MR_LD_LIST), &ci_h); if (!ci) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem in get_ld_list\n"); megasas_return_cmd(instance, cmd); return -ENOMEM; } memset(ci, 0, sizeof(*ci)); memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); if (instance->supportmax256vd) dcmd->mbox.b[0] = 1; dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = MFI_STAT_INVALID_STATUS; dcmd->sge_count = 1; dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); dcmd->timeout = 0; dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST)); dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST); dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_LIST)); dcmd->pad_0 = 0; if (instance->ctrl_context && !instance->mask_interrupts) ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); else ret = megasas_issue_polled(instance, cmd); ld_count = le32_to_cpu(ci->ldCount); switch (ret) { case DCMD_FAILED: megaraid_sas_kill_hba(instance); break; case DCMD_TIMEOUT: switch (dcmd_timeout_ocr_possible(instance)) { case INITIATE_OCR: cmd->flags |= DRV_DCMD_SKIP_REFIRE; /* * DCMD failed from AEN path. * AEN path already hold reset_mutex to avoid PCI access * while OCR is in progress. */ mutex_unlock(&instance->reset_mutex); megasas_reset_fusion(instance->host, MFI_IO_TIMEOUT_OCR); mutex_lock(&instance->reset_mutex); break; case KILL_ADAPTER: megaraid_sas_kill_hba(instance); break; case IGNORE_TIMEOUT: dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", __func__, __LINE__); break; } break; case DCMD_SUCCESS: if (ld_count > instance->fw_supported_vd_count) break; memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT); for (ld_index = 0; ld_index < ld_count; ld_index++) { if (ci->ldList[ld_index].state != 0) { ids = ci->ldList[ld_index].ref.targetId; instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId; } } break; } pci_free_consistent(instance->pdev, sizeof(struct MR_LD_LIST), ci, ci_h); if (ret != DCMD_TIMEOUT) megasas_return_cmd(instance, cmd); return ret; } /** * megasas_ld_list_query - Returns FW's ld_list structure * @instance: Adapter soft state * @ld_list: ld_list structure * * Issues an internal command (DCMD) to get the FW's controller PD * list structure. This information is mainly used to find out SYSTEM * supported by the FW. */ static int megasas_ld_list_query(struct megasas_instance *instance, u8 query_type) { int ret = 0, ld_index = 0, ids = 0; struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; struct MR_LD_TARGETID_LIST *ci; dma_addr_t ci_h = 0; u32 tgtid_count; cmd = megasas_get_cmd(instance); if (!cmd) { dev_warn(&instance->pdev->dev, "megasas_ld_list_query: Failed to get cmd\n"); return -ENOMEM; } dcmd = &cmd->frame->dcmd; ci = pci_alloc_consistent(instance->pdev, sizeof(struct MR_LD_TARGETID_LIST), &ci_h); if (!ci) { dev_warn(&instance->pdev->dev, "Failed to alloc mem for ld_list_query\n"); megasas_return_cmd(instance, cmd); return -ENOMEM; } memset(ci, 0, sizeof(*ci)); memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->mbox.b[0] = query_type; if (instance->supportmax256vd) dcmd->mbox.b[2] = 1; dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = MFI_STAT_INVALID_STATUS; dcmd->sge_count = 1; dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); dcmd->timeout = 0; dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST)); dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY); dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST)); dcmd->pad_0 = 0; if (instance->ctrl_context && !instance->mask_interrupts) ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); else ret = megasas_issue_polled(instance, cmd); switch (ret) { case DCMD_FAILED: dev_info(&instance->pdev->dev, "DCMD not supported by firmware - %s %d\n", __func__, __LINE__); ret = megasas_get_ld_list(instance); break; case DCMD_TIMEOUT: switch (dcmd_timeout_ocr_possible(instance)) { case INITIATE_OCR: cmd->flags |= DRV_DCMD_SKIP_REFIRE; /* * DCMD failed from AEN path. * AEN path already hold reset_mutex to avoid PCI access * while OCR is in progress. */ mutex_unlock(&instance->reset_mutex); megasas_reset_fusion(instance->host, MFI_IO_TIMEOUT_OCR); mutex_lock(&instance->reset_mutex); break; case KILL_ADAPTER: megaraid_sas_kill_hba(instance); break; case IGNORE_TIMEOUT: dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", __func__, __LINE__); break; } break; case DCMD_SUCCESS: tgtid_count = le32_to_cpu(ci->count); if ((tgtid_count > (instance->fw_supported_vd_count))) break; memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); for (ld_index = 0; ld_index < tgtid_count; ld_index++) { ids = ci->targetId[ld_index]; instance->ld_ids[ids] = ci->targetId[ld_index]; } break; } pci_free_consistent(instance->pdev, sizeof(struct MR_LD_TARGETID_LIST), ci, ci_h); if (ret != DCMD_TIMEOUT) megasas_return_cmd(instance, cmd); return ret; } /* * megasas_update_ext_vd_details : Update details w.r.t Extended VD * instance : Controller's instance */ static void megasas_update_ext_vd_details(struct megasas_instance *instance) { struct fusion_context *fusion; u32 ventura_map_sz = 0; fusion = instance->ctrl_context; /* For MFI based controllers return dummy success */ if (!fusion) return; instance->supportmax256vd = instance->ctrl_info->adapterOperations3.supportMaxExtLDs; /* Below is additional check to address future FW enhancement */ if (instance->ctrl_info->max_lds > 64) instance->supportmax256vd = 1; instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL; instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL; if (instance->supportmax256vd) { instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT; instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; } else { instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES; instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; } dev_info(&instance->pdev->dev, "firmware type\t: %s\n", instance->supportmax256vd ? "Extended VD(240 VD)firmware" : "Legacy(64 VD) firmware"); if (instance->max_raid_mapsize) { ventura_map_sz = instance->max_raid_mapsize * MR_MIN_MAP_SIZE; /* 64k */ fusion->current_map_sz = ventura_map_sz; fusion->max_map_sz = ventura_map_sz; } else { fusion->old_map_sz = sizeof(struct MR_FW_RAID_MAP) + (sizeof(struct MR_LD_SPAN_MAP) * (instance->fw_supported_vd_count - 1)); fusion->new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT); fusion->max_map_sz = max(fusion->old_map_sz, fusion->new_map_sz); if (instance->supportmax256vd) fusion->current_map_sz = fusion->new_map_sz; else fusion->current_map_sz = fusion->old_map_sz; } /* irrespective of FW raid maps, driver raid map is constant */ fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL); } /** * megasas_get_controller_info - Returns FW's controller structure * @instance: Adapter soft state * * Issues an internal command (DCMD) to get the FW's controller structure. * This information is mainly used to find out the maximum IO transfer per * command supported by the FW. */ int megasas_get_ctrl_info(struct megasas_instance *instance) { int ret = 0; struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; struct megasas_ctrl_info *ci; struct megasas_ctrl_info *ctrl_info; dma_addr_t ci_h = 0; ctrl_info = instance->ctrl_info; cmd = megasas_get_cmd(instance); if (!cmd) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a free cmd\n"); return -ENOMEM; } dcmd = &cmd->frame->dcmd; ci = pci_alloc_consistent(instance->pdev, sizeof(struct megasas_ctrl_info), &ci_h); if (!ci) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for ctrl info\n"); megasas_return_cmd(instance, cmd); return -ENOMEM; } memset(ci, 0, sizeof(*ci)); memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = MFI_STAT_INVALID_STATUS; dcmd->sge_count = 1; dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info)); dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO); dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_ctrl_info)); dcmd->mbox.b[0] = 1; if (instance->ctrl_context && !instance->mask_interrupts) ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); else ret = megasas_issue_polled(instance, cmd); switch (ret) { case DCMD_SUCCESS: memcpy(ctrl_info, ci, sizeof(struct megasas_ctrl_info)); /* Save required controller information in * CPU endianness format. */ le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties); le32_to_cpus((u32 *)&ctrl_info->adapterOperations2); le32_to_cpus((u32 *)&ctrl_info->adapterOperations3); le16_to_cpus((u16 *)&ctrl_info->adapter_operations4); /* Update the latest Ext VD info. * From Init path, store current firmware details. * From OCR path, detect any firmware properties changes. * in case of Firmware upgrade without system reboot. */ megasas_update_ext_vd_details(instance); instance->use_seqnum_jbod_fp = ctrl_info->adapterOperations3.useSeqNumJbodFP; instance->support_morethan256jbod = ctrl_info->adapter_operations4.support_pd_map_target_id; /*Check whether controller is iMR or MR */ instance->is_imr = (ctrl_info->memory_size ? 0 : 1); dev_info(&instance->pdev->dev, "controller type\t: %s(%dMB)\n", instance->is_imr ? "iMR" : "MR", le16_to_cpu(ctrl_info->memory_size)); instance->disableOnlineCtrlReset = ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; instance->secure_jbod_support = ctrl_info->adapterOperations3.supportSecurityonJBOD; dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n", instance->disableOnlineCtrlReset ? "Disabled" : "Enabled"); dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n", instance->secure_jbod_support ? "Yes" : "No"); break; case DCMD_TIMEOUT: switch (dcmd_timeout_ocr_possible(instance)) { case INITIATE_OCR: cmd->flags |= DRV_DCMD_SKIP_REFIRE; megasas_reset_fusion(instance->host, MFI_IO_TIMEOUT_OCR); break; case KILL_ADAPTER: megaraid_sas_kill_hba(instance); break; case IGNORE_TIMEOUT: dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", __func__, __LINE__); break; } case DCMD_FAILED: megaraid_sas_kill_hba(instance); break; } pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info), ci, ci_h); megasas_return_cmd(instance, cmd); return ret; } /* * megasas_set_crash_dump_params - Sends address of crash dump DMA buffer * to firmware * * @instance: Adapter soft state * @crash_buf_state - tell FW to turn ON/OFF crash dump feature MR_CRASH_BUF_TURN_OFF = 0 MR_CRASH_BUF_TURN_ON = 1 * @return 0 on success non-zero on failure. * Issues an internal command (DCMD) to set parameters for crash dump feature. * Driver will send address of crash dump DMA buffer and set mbox to tell FW * that driver supports crash dump feature. This DCMD will be sent only if * crash dump feature is supported by the FW. * */ int megasas_set_crash_dump_params(struct megasas_instance *instance, u8 crash_buf_state) { int ret = 0; struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; cmd = megasas_get_cmd(instance); if (!cmd) { dev_err(&instance->pdev->dev, "Failed to get a free cmd\n"); return -ENOMEM; } dcmd = &cmd->frame->dcmd; memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->mbox.b[0] = crash_buf_state; dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = MFI_STAT_INVALID_STATUS; dcmd->sge_count = 1; dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE); dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS); dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->crash_dump_h); dcmd->sgl.sge32[0].length = cpu_to_le32(CRASH_DMA_BUF_SIZE); if (instance->ctrl_context && !instance->mask_interrupts) ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); else ret = megasas_issue_polled(instance, cmd); if (ret == DCMD_TIMEOUT) { switch (dcmd_timeout_ocr_possible(instance)) { case INITIATE_OCR: cmd->flags |= DRV_DCMD_SKIP_REFIRE; megasas_reset_fusion(instance->host, MFI_IO_TIMEOUT_OCR); break; case KILL_ADAPTER: megaraid_sas_kill_hba(instance); break; case IGNORE_TIMEOUT: dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", __func__, __LINE__); break; } } else megasas_return_cmd(instance, cmd); return ret; } /** * megasas_issue_init_mfi - Initializes the FW * @instance: Adapter soft state * * Issues the INIT MFI cmd */ static int megasas_issue_init_mfi(struct megasas_instance *instance) { __le32 context; struct megasas_cmd *cmd; struct megasas_init_frame *init_frame; struct megasas_init_queue_info *initq_info; dma_addr_t init_frame_h; dma_addr_t initq_info_h; /* * Prepare a init frame. Note the init frame points to queue info * structure. Each frame has SGL allocated after first 64 bytes. For * this frame - since we don't need any SGL - we use SGL's space as * queue info structure * * We will not get a NULL command below. We just created the pool. */ cmd = megasas_get_cmd(instance); init_frame = (struct megasas_init_frame *)cmd->frame; initq_info = (struct megasas_init_queue_info *) ((unsigned long)init_frame + 64); init_frame_h = cmd->frame_phys_addr; initq_info_h = init_frame_h + 64; context = init_frame->context; memset(init_frame, 0, MEGAMFI_FRAME_SIZE); memset(initq_info, 0, sizeof(struct megasas_init_queue_info)); init_frame->context = context; initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1); initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h); initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h); initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h); init_frame->cmd = MFI_CMD_INIT; init_frame->cmd_status = MFI_STAT_INVALID_STATUS; init_frame->queue_info_new_phys_addr_lo = cpu_to_le32(lower_32_bits(initq_info_h)); init_frame->queue_info_new_phys_addr_hi = cpu_to_le32(upper_32_bits(initq_info_h)); init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info)); /* * disable the intr before firing the init frame to FW */ instance->instancet->disable_intr(instance); /* * Issue the init frame in polled mode */ if (megasas_issue_polled(instance, cmd)) { dev_err(&instance->pdev->dev, "Failed to init firmware\n"); megasas_return_cmd(instance, cmd); goto fail_fw_init; } megasas_return_cmd(instance, cmd); return 0; fail_fw_init: return -EINVAL; } static u32 megasas_init_adapter_mfi(struct megasas_instance *instance) { struct megasas_register_set __iomem *reg_set; u32 context_sz; u32 reply_q_sz; reg_set = instance->reg_set; /* * Get various operational parameters from status register */ instance->max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF; /* * Reduce the max supported cmds by 1. This is to ensure that the * reply_q_sz (1 more than the max cmd that driver may send) * does not exceed max cmds that the FW can support */ instance->max_fw_cmds = instance->max_fw_cmds-1; instance->max_mfi_cmds = instance->max_fw_cmds; instance->max_num_sge = (instance->instancet->read_fw_status_reg(reg_set) & 0xFF0000) >> 0x10; /* * For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands * are reserved for IOCTL + driver's internal DCMDs. */ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { instance->max_scsi_cmds = (instance->max_fw_cmds - MEGASAS_SKINNY_INT_CMDS); sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS); } else { instance->max_scsi_cmds = (instance->max_fw_cmds - MEGASAS_INT_CMDS); sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS)); } instance->cur_can_queue = instance->max_scsi_cmds; /* * Create a pool of commands */ if (megasas_alloc_cmds(instance)) goto fail_alloc_cmds; /* * Allocate memory for reply queue. Length of reply queue should * be _one_ more than the maximum commands handled by the firmware. * * Note: When FW completes commands, it places corresponding contex * values in this circular reply queue. This circular queue is a fairly * typical producer-consumer queue. FW is the producer (of completed * commands) and the driver is the consumer. */ context_sz = sizeof(u32); reply_q_sz = context_sz * (instance->max_fw_cmds + 1); instance->reply_queue = pci_alloc_consistent(instance->pdev, reply_q_sz, &instance->reply_queue_h); if (!instance->reply_queue) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n"); goto fail_reply_queue; } if (megasas_issue_init_mfi(instance)) goto fail_fw_init; if (megasas_get_ctrl_info(instance)) { dev_err(&instance->pdev->dev, "(%d): Could get controller info " "Fail from %s %d\n", instance->unique_id, __func__, __LINE__); goto fail_fw_init; } instance->fw_support_ieee = 0; instance->fw_support_ieee = (instance->instancet->read_fw_status_reg(reg_set) & 0x04000000); dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d", instance->fw_support_ieee); if (instance->fw_support_ieee) instance->flag_ieee = 1; return 0; fail_fw_init: pci_free_consistent(instance->pdev, reply_q_sz, instance->reply_queue, instance->reply_queue_h); fail_reply_queue: megasas_free_cmds(instance); fail_alloc_cmds: return 1; } /* * megasas_setup_irqs_ioapic - register legacy interrupts. * @instance: Adapter soft state * * Do not enable interrupt, only setup ISRs. * * Return 0 on success. */ static int megasas_setup_irqs_ioapic(struct megasas_instance *instance) { struct pci_dev *pdev; pdev = instance->pdev; instance->irq_context[0].instance = instance; instance->irq_context[0].MSIxIndex = 0; if (request_irq(pci_irq_vector(pdev, 0), instance->instancet->service_isr, IRQF_SHARED, "megasas", &instance->irq_context[0])) { dev_err(&instance->pdev->dev, "Failed to register IRQ from %s %d\n", __func__, __LINE__); return -1; } return 0; } /** * megasas_setup_irqs_msix - register MSI-x interrupts. * @instance: Adapter soft state * @is_probe: Driver probe check * * Do not enable interrupt, only setup ISRs. * * Return 0 on success. */ static int megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe) { int i, j; struct pci_dev *pdev; pdev = instance->pdev; /* Try MSI-x */ for (i = 0; i < instance->msix_vectors; i++) { instance->irq_context[i].instance = instance; instance->irq_context[i].MSIxIndex = i; if (request_irq(pci_irq_vector(pdev, i), instance->instancet->service_isr, 0, "megasas", &instance->irq_context[i])) { dev_err(&instance->pdev->dev, "Failed to register IRQ for vector %d.\n", i); for (j = 0; j < i; j++) free_irq(pci_irq_vector(pdev, j), &instance->irq_context[j]); /* Retry irq register for IO_APIC*/ instance->msix_vectors = 0; if (is_probe) { pci_free_irq_vectors(instance->pdev); return megasas_setup_irqs_ioapic(instance); } else { return -1; } } } return 0; } /* * megasas_destroy_irqs- unregister interrupts. * @instance: Adapter soft state * return: void */ static void megasas_destroy_irqs(struct megasas_instance *instance) { int i; if (instance->msix_vectors) for (i = 0; i < instance->msix_vectors; i++) { free_irq(pci_irq_vector(instance->pdev, i), &instance->irq_context[i]); } else free_irq(pci_irq_vector(instance->pdev, 0), &instance->irq_context[0]); } /** * megasas_setup_jbod_map - setup jbod map for FP seq_number. * @instance: Adapter soft state * @is_probe: Driver probe check * * Return 0 on success. */ void megasas_setup_jbod_map(struct megasas_instance *instance) { int i; struct fusion_context *fusion = instance->ctrl_context; u32 pd_seq_map_sz; pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1)); if (reset_devices || !fusion || !instance->ctrl_info->adapterOperations3.useSeqNumJbodFP) { dev_info(&instance->pdev->dev, "Jbod map is not supported %s %d\n", __func__, __LINE__); instance->use_seqnum_jbod_fp = false; return; } if (fusion->pd_seq_sync[0]) goto skip_alloc; for (i = 0; i < JBOD_MAPS_COUNT; i++) { fusion->pd_seq_sync[i] = dma_alloc_coherent (&instance->pdev->dev, pd_seq_map_sz, &fusion->pd_seq_phys[i], GFP_KERNEL); if (!fusion->pd_seq_sync[i]) { dev_err(&instance->pdev->dev, "Failed to allocate memory from %s %d\n", __func__, __LINE__); if (i == 1) { dma_free_coherent(&instance->pdev->dev, pd_seq_map_sz, fusion->pd_seq_sync[0], fusion->pd_seq_phys[0]); fusion->pd_seq_sync[0] = NULL; } instance->use_seqnum_jbod_fp = false; return; } } skip_alloc: if (!megasas_sync_pd_seq_num(instance, false) && !megasas_sync_pd_seq_num(instance, true)) instance->use_seqnum_jbod_fp = true; else instance->use_seqnum_jbod_fp = false; } /** * megasas_init_fw - Initializes the FW * @instance: Adapter soft state * * This is the main function for initializing firmware */ static int megasas_init_fw(struct megasas_instance *instance) { u32 max_sectors_1; u32 max_sectors_2, tmp_sectors, msix_enable; u32 scratch_pad_2, scratch_pad_3, scratch_pad_4; resource_size_t base_addr; struct megasas_register_set __iomem *reg_set; struct megasas_ctrl_info *ctrl_info = NULL; unsigned long bar_list; int i, j, loop, fw_msix_count = 0; struct IOV_111 *iovPtr; struct fusion_context *fusion; fusion = instance->ctrl_context; /* Find first memory bar */ bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM); instance->bar = find_first_bit(&bar_list, BITS_PER_LONG); if (pci_request_selected_regions(instance->pdev, 1<<instance->bar, "megasas: LSI")) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n"); return -EBUSY; } base_addr = pci_resource_start(instance->pdev, instance->bar); instance->reg_set = ioremap_nocache(base_addr, 8192); if (!instance->reg_set) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n"); goto fail_ioremap; } reg_set = instance->reg_set; if (fusion) instance->instancet = &megasas_instance_template_fusion; else { switch (instance->pdev->device) { case PCI_DEVICE_ID_LSI_SAS1078R: case PCI_DEVICE_ID_LSI_SAS1078DE: instance->instancet = &megasas_instance_template_ppc; break; case PCI_DEVICE_ID_LSI_SAS1078GEN2: case PCI_DEVICE_ID_LSI_SAS0079GEN2: instance->instancet = &megasas_instance_template_gen2; break; case PCI_DEVICE_ID_LSI_SAS0073SKINNY: case PCI_DEVICE_ID_LSI_SAS0071SKINNY: instance->instancet = &megasas_instance_template_skinny; break; case PCI_DEVICE_ID_LSI_SAS1064R: case PCI_DEVICE_ID_DELL_PERC5: default: instance->instancet = &megasas_instance_template_xscale; instance->pd_list_not_supported = 1; break; } } if (megasas_transition_to_ready(instance, 0)) { atomic_set(&instance->fw_reset_no_pci_access, 1); instance->instancet->adp_reset (instance, instance->reg_set); atomic_set(&instance->fw_reset_no_pci_access, 0); dev_info(&instance->pdev->dev, "FW restarted successfully from %s!\n", __func__); /*waitting for about 30 second before retry*/ ssleep(30); if (megasas_transition_to_ready(instance, 0)) goto fail_ready_state; } if (instance->is_ventura) { scratch_pad_3 = readl(&instance->reg_set->outbound_scratch_pad_3); instance->max_raid_mapsize = ((scratch_pad_3 >> MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) & MR_MAX_RAID_MAP_SIZE_MASK); } /* Check if MSI-X is supported while in ready state */ msix_enable = (instance->instancet->read_fw_status_reg(reg_set) & 0x4000000) >> 0x1a; if (msix_enable && !msix_disable) { int irq_flags = PCI_IRQ_MSIX; scratch_pad_2 = readl (&instance->reg_set->outbound_scratch_pad_2); /* Check max MSI-X vectors */ if (fusion) { if (fusion->adapter_type == THUNDERBOLT_SERIES) { /* Thunderbolt Series*/ instance->msix_vectors = (scratch_pad_2 & MR_MAX_REPLY_QUEUES_OFFSET) + 1; fw_msix_count = instance->msix_vectors; } else { /* Invader series supports more than 8 MSI-x vectors*/ instance->msix_vectors = ((scratch_pad_2 & MR_MAX_REPLY_QUEUES_EXT_OFFSET) >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1; if (instance->msix_vectors > 16) instance->msix_combined = true; if (rdpq_enable) instance->is_rdpq = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ? 1 : 0; fw_msix_count = instance->msix_vectors; /* Save 1-15 reply post index address to local memory * Index 0 is already saved from reg offset * MPI2_REPLY_POST_HOST_INDEX_OFFSET */ for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) { instance->reply_post_host_index_addr[loop] = (u32 __iomem *) ((u8 __iomem *)instance->reg_set + MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET + (loop * 0x10)); } } if (msix_vectors) instance->msix_vectors = min(msix_vectors, instance->msix_vectors); } else /* MFI adapters */ instance->msix_vectors = 1; /* Don't bother allocating more MSI-X vectors than cpus */ instance->msix_vectors = min(instance->msix_vectors, (unsigned int)num_online_cpus()); if (smp_affinity_enable) irq_flags |= PCI_IRQ_AFFINITY; i = pci_alloc_irq_vectors(instance->pdev, 1, instance->msix_vectors, irq_flags); if (i > 0) instance->msix_vectors = i; else instance->msix_vectors = 0; } /* * MSI-X host index 0 is common for all adapter. * It is used for all MPT based Adapters. */ if (instance->msix_combined) { instance->reply_post_host_index_addr[0] = (u32 *)((u8 *)instance->reg_set + MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET); } else { instance->reply_post_host_index_addr[0] = (u32 *)((u8 *)instance->reg_set + MPI2_REPLY_POST_HOST_INDEX_OFFSET); } if (!instance->msix_vectors) { i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY); if (i < 0) goto fail_setup_irqs; } dev_info(&instance->pdev->dev, "firmware supports msix\t: (%d)", fw_msix_count); dev_info(&instance->pdev->dev, "current msix/online cpus\t: (%d/%d)\n", instance->msix_vectors, (unsigned int)num_online_cpus()); dev_info(&instance->pdev->dev, "RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled"); tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, (unsigned long)instance); instance->ctrl_info = kzalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL); if (instance->ctrl_info == NULL) goto fail_init_adapter; /* * Below are default value for legacy Firmware. * non-fusion based controllers */ instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES; instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; /* Get operational params, sge flags, send init cmd to controller */ if (instance->instancet->init_adapter(instance)) goto fail_init_adapter; if (instance->is_ventura) { scratch_pad_4 = readl(&instance->reg_set->outbound_scratch_pad_4); if ((scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK) >= MR_DEFAULT_NVME_PAGE_SHIFT) instance->nvme_page_size = (1 << (scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK)); dev_info(&instance->pdev->dev, "NVME page size\t: (%d)\n", instance->nvme_page_size); } if (instance->msix_vectors ? megasas_setup_irqs_msix(instance, 1) : megasas_setup_irqs_ioapic(instance)) goto fail_init_adapter; instance->instancet->enable_intr(instance); dev_info(&instance->pdev->dev, "INIT adapter done\n"); megasas_setup_jbod_map(instance); /** for passthrough * the following function will get the PD LIST. */ memset(instance->pd_list, 0, (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list))); if (megasas_get_pd_list(instance) < 0) { dev_err(&instance->pdev->dev, "failed to get PD list\n"); goto fail_get_ld_pd_list; } memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); /* stream detection initialization */ if (instance->is_ventura && fusion) { fusion->stream_detect_by_ld = kzalloc(sizeof(struct LD_STREAM_DETECT *) * MAX_LOGICAL_DRIVES_EXT, GFP_KERNEL); if (!fusion->stream_detect_by_ld) { dev_err(&instance->pdev->dev, "unable to allocate stream detection for pool of LDs\n"); goto fail_get_ld_pd_list; } for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) { fusion->stream_detect_by_ld[i] = kmalloc(sizeof(struct LD_STREAM_DETECT), GFP_KERNEL); if (!fusion->stream_detect_by_ld[i]) { dev_err(&instance->pdev->dev, "unable to allocate stream detect by LD\n "); for (j = 0; j < i; ++j) kfree(fusion->stream_detect_by_ld[j]); kfree(fusion->stream_detect_by_ld); fusion->stream_detect_by_ld = NULL; goto fail_get_ld_pd_list; } fusion->stream_detect_by_ld[i]->mru_bit_map = MR_STREAM_BITMAP; } } if (megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) goto fail_get_ld_pd_list; /* * Compute the max allowed sectors per IO: The controller info has two * limits on max sectors. Driver should use the minimum of these two. * * 1 << stripe_sz_ops.min = max sectors per strip * * Note that older firmwares ( < FW ver 30) didn't report information * to calculate max_sectors_1. So the number ended up as zero always. */ tmp_sectors = 0; ctrl_info = instance->ctrl_info; max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) * le16_to_cpu(ctrl_info->max_strips_per_io); max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size); tmp_sectors = min_t(u32, max_sectors_1, max_sectors_2); instance->peerIsPresent = ctrl_info->cluster.peerIsPresent; instance->passive = ctrl_info->cluster.passive; memcpy(instance->clusterId, ctrl_info->clusterId, sizeof(instance->clusterId)); instance->UnevenSpanSupport = ctrl_info->adapterOperations2.supportUnevenSpans; if (instance->UnevenSpanSupport) { struct fusion_context *fusion = instance->ctrl_context; if (MR_ValidateMapInfo(instance)) fusion->fast_path_io = 1; else fusion->fast_path_io = 0; } if (ctrl_info->host_interface.SRIOV) { instance->requestorId = ctrl_info->iov.requestorId; if (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) { if (!ctrl_info->adapterOperations2.activePassive) instance->PlasmaFW111 = 1; dev_info(&instance->pdev->dev, "SR-IOV: firmware type: %s\n", instance->PlasmaFW111 ? "1.11" : "new"); if (instance->PlasmaFW111) { iovPtr = (struct IOV_111 *) ((unsigned char *)ctrl_info + IOV_111_OFFSET); instance->requestorId = iovPtr->requestorId; } } dev_info(&instance->pdev->dev, "SRIOV: VF requestorId %d\n", instance->requestorId); } instance->crash_dump_fw_support = ctrl_info->adapterOperations3.supportCrashDump; instance->crash_dump_drv_support = (instance->crash_dump_fw_support && instance->crash_dump_buf); if (instance->crash_dump_drv_support) megasas_set_crash_dump_params(instance, MR_CRASH_BUF_TURN_OFF); else { if (instance->crash_dump_buf) pci_free_consistent(instance->pdev, CRASH_DMA_BUF_SIZE, instance->crash_dump_buf, instance->crash_dump_h); instance->crash_dump_buf = NULL; } dev_info(&instance->pdev->dev, "pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n", le16_to_cpu(ctrl_info->pci.vendor_id), le16_to_cpu(ctrl_info->pci.device_id), le16_to_cpu(ctrl_info->pci.sub_vendor_id), le16_to_cpu(ctrl_info->pci.sub_device_id)); dev_info(&instance->pdev->dev, "unevenspan support : %s\n", instance->UnevenSpanSupport ? "yes" : "no"); dev_info(&instance->pdev->dev, "firmware crash dump : %s\n", instance->crash_dump_drv_support ? "yes" : "no"); dev_info(&instance->pdev->dev, "jbod sync map : %s\n", instance->use_seqnum_jbod_fp ? "yes" : "no"); instance->max_sectors_per_req = instance->max_num_sge * SGE_BUFFER_SIZE / 512; if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors)) instance->max_sectors_per_req = tmp_sectors; /* Check for valid throttlequeuedepth module parameter */ if (throttlequeuedepth && throttlequeuedepth <= instance->max_scsi_cmds) instance->throttlequeuedepth = throttlequeuedepth; else instance->throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH; if (resetwaittime > MEGASAS_RESET_WAIT_TIME) resetwaittime = MEGASAS_RESET_WAIT_TIME; if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT)) scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT; /* Launch SR-IOV heartbeat timer */ if (instance->requestorId) { if (!megasas_sriov_start_heartbeat(instance, 1)) megasas_start_timer(instance, &instance->sriov_heartbeat_timer, megasas_sriov_heartbeat_handler, MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); else instance->skip_heartbeat_timer_del = 1; } return 0; fail_get_ld_pd_list: instance->instancet->disable_intr(instance); fail_init_adapter: megasas_destroy_irqs(instance); fail_setup_irqs: if (instance->msix_vectors) pci_free_irq_vectors(instance->pdev); instance->msix_vectors = 0; fail_ready_state: kfree(instance->ctrl_info); instance->ctrl_info = NULL; iounmap(instance->reg_set); fail_ioremap: pci_release_selected_regions(instance->pdev, 1<<instance->bar); dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); return -EINVAL; } /** * megasas_release_mfi - Reverses the FW initialization * @instance: Adapter soft state */ static void megasas_release_mfi(struct megasas_instance *instance) { u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1); if (instance->reply_queue) pci_free_consistent(instance->pdev, reply_q_sz, instance->reply_queue, instance->reply_queue_h); megasas_free_cmds(instance); iounmap(instance->reg_set); pci_release_selected_regions(instance->pdev, 1<<instance->bar); } /** * megasas_get_seq_num - Gets latest event sequence numbers * @instance: Adapter soft state * @eli: FW event log sequence numbers information * * FW maintains a log of all events in a non-volatile area. Upper layers would * usually find out the latest sequence number of the events, the seq number at * the boot etc. They would "read" all the events below the latest seq number * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq * number), they would subsribe to AEN (asynchronous event notification) and * wait for the events to happen. */ static int megasas_get_seq_num(struct megasas_instance *instance, struct megasas_evt_log_info *eli) { struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; struct megasas_evt_log_info *el_info; dma_addr_t el_info_h = 0; cmd = megasas_get_cmd(instance); if (!cmd) { return -ENOMEM; } dcmd = &cmd->frame->dcmd; el_info = pci_alloc_consistent(instance->pdev, sizeof(struct megasas_evt_log_info), &el_info_h); if (!el_info) { megasas_return_cmd(instance, cmd); return -ENOMEM; } memset(el_info, 0, sizeof(*el_info)); memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0x0; dcmd->sge_count = 1; dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info)); dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO); dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(el_info_h); dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_log_info)); if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) == DCMD_SUCCESS) { /* * Copy the data back into callers buffer */ eli->newest_seq_num = el_info->newest_seq_num; eli->oldest_seq_num = el_info->oldest_seq_num; eli->clear_seq_num = el_info->clear_seq_num; eli->shutdown_seq_num = el_info->shutdown_seq_num; eli->boot_seq_num = el_info->boot_seq_num; } else dev_err(&instance->pdev->dev, "DCMD failed " "from %s\n", __func__); pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info), el_info, el_info_h); megasas_return_cmd(instance, cmd); return 0; } /** * megasas_register_aen - Registers for asynchronous event notification * @instance: Adapter soft state * @seq_num: The starting sequence number * @class_locale: Class of the event * * This function subscribes for AEN for events beyond the @seq_num. It requests * to be notified if and only if the event is of type @class_locale */ static int megasas_register_aen(struct megasas_instance *instance, u32 seq_num, u32 class_locale_word) { int ret_val; struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; union megasas_evt_class_locale curr_aen; union megasas_evt_class_locale prev_aen; /* * If there an AEN pending already (aen_cmd), check if the * class_locale of that pending AEN is inclusive of the new * AEN request we currently have. If it is, then we don't have * to do anything. In other words, whichever events the current * AEN request is subscribing to, have already been subscribed * to. * * If the old_cmd is _not_ inclusive, then we have to abort * that command, form a class_locale that is superset of both * old and current and re-issue to the FW */ curr_aen.word = class_locale_word; if (instance->aen_cmd) { prev_aen.word = le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]); /* * A class whose enum value is smaller is inclusive of all * higher values. If a PROGRESS (= -1) was previously * registered, then a new registration requests for higher * classes need not be sent to FW. They are automatically * included. * * Locale numbers don't have such hierarchy. They are bitmap * values */ if ((prev_aen.members.class <= curr_aen.members.class) && !((prev_aen.members.locale & curr_aen.members.locale) ^ curr_aen.members.locale)) { /* * Previously issued event registration includes * current request. Nothing to do. */ return 0; } else { curr_aen.members.locale |= prev_aen.members.locale; if (prev_aen.members.class < curr_aen.members.class) curr_aen.members.class = prev_aen.members.class; instance->aen_cmd->abort_aen = 1; ret_val = megasas_issue_blocked_abort_cmd(instance, instance-> aen_cmd, 30); if (ret_val) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to abort " "previous AEN command\n"); return ret_val; } } } cmd = megasas_get_cmd(instance); if (!cmd) return -ENOMEM; dcmd = &cmd->frame->dcmd; memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail)); /* * Prepare DCMD for aen registration */ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0x0; dcmd->sge_count = 1; dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail)); dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT); dcmd->mbox.w[0] = cpu_to_le32(seq_num); instance->last_seq_num = seq_num; dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word); dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->evt_detail_h); dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_detail)); if (instance->aen_cmd != NULL) { megasas_return_cmd(instance, cmd); return 0; } /* * Store reference to the cmd used to register for AEN. When an * application wants us to register for AEN, we have to abort this * cmd and re-register with a new EVENT LOCALE supplied by that app */ instance->aen_cmd = cmd; /* * Issue the aen registration frame */ instance->instancet->issue_dcmd(instance, cmd); return 0; } /* megasas_get_target_prop - Send DCMD with below details to firmware. * * This DCMD will fetch few properties of LD/system PD defined * in MR_TARGET_DEV_PROPERTIES. eg. Queue Depth, MDTS value. * * DCMD send by drivers whenever new target is added to the OS. * * dcmd.opcode - MR_DCMD_DEV_GET_TARGET_PROP * dcmd.mbox.b[0] - DCMD is to be fired for LD or system PD. * 0 = system PD, 1 = LD. * dcmd.mbox.s[1] - TargetID for LD/system PD. * dcmd.sge IN - Pointer to return MR_TARGET_DEV_PROPERTIES. * * @instance: Adapter soft state * @sdev: OS provided scsi device * * Returns 0 on success non-zero on failure. */ static int megasas_get_target_prop(struct megasas_instance *instance, struct scsi_device *sdev) { int ret; struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; u16 targetId = (sdev->channel % 2) + sdev->id; cmd = megasas_get_cmd(instance); if (!cmd) { dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__); return -ENOMEM; } dcmd = &cmd->frame->dcmd; memset(instance->tgt_prop, 0, sizeof(*instance->tgt_prop)); memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->mbox.b[0] = MEGASAS_IS_LOGICAL(sdev); dcmd->mbox.s[1] = cpu_to_le16(targetId); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES)); dcmd->opcode = cpu_to_le32(MR_DCMD_DRV_GET_TARGET_PROP); dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->tgt_prop_h); dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES)); if (instance->ctrl_context && !instance->mask_interrupts) ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); else ret = megasas_issue_polled(instance, cmd); switch (ret) { case DCMD_TIMEOUT: switch (dcmd_timeout_ocr_possible(instance)) { case INITIATE_OCR: cmd->flags |= DRV_DCMD_SKIP_REFIRE; megasas_reset_fusion(instance->host, MFI_IO_TIMEOUT_OCR); break; case KILL_ADAPTER: megaraid_sas_kill_hba(instance); break; case IGNORE_TIMEOUT: dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", __func__, __LINE__); break; } break; default: megasas_return_cmd(instance, cmd); } if (ret != DCMD_SUCCESS) dev_err(&instance->pdev->dev, "return from %s %d return value %d\n", __func__, __LINE__, ret); return ret; } /** * megasas_start_aen - Subscribes to AEN during driver load time * @instance: Adapter soft state */ static int megasas_start_aen(struct megasas_instance *instance) { struct megasas_evt_log_info eli; union megasas_evt_class_locale class_locale; /* * Get the latest sequence number from FW */ memset(&eli, 0, sizeof(eli)); if (megasas_get_seq_num(instance, &eli)) return -1; /* * Register AEN with FW for latest sequence number plus 1 */ class_locale.members.reserved = 0; class_locale.members.locale = MR_EVT_LOCALE_ALL; class_locale.members.class = MR_EVT_CLASS_DEBUG; return megasas_register_aen(instance, le32_to_cpu(eli.newest_seq_num) + 1, class_locale.word); } /** * megasas_io_attach - Attaches this driver to SCSI mid-layer * @instance: Adapter soft state */ static int megasas_io_attach(struct megasas_instance *instance) { struct Scsi_Host *host = instance->host; /* * Export parameters required by SCSI mid-layer */ host->unique_id = instance->unique_id; host->can_queue = instance->max_scsi_cmds; host->this_id = instance->init_id; host->sg_tablesize = instance->max_num_sge; if (instance->fw_support_ieee) instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE; /* * Check if the module parameter value for max_sectors can be used */ if (max_sectors && max_sectors < instance->max_sectors_per_req) instance->max_sectors_per_req = max_sectors; else { if (max_sectors) { if (((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1078GEN2) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0079GEN2)) && (max_sectors <= MEGASAS_MAX_SECTORS)) { instance->max_sectors_per_req = max_sectors; } else { dev_info(&instance->pdev->dev, "max_sectors should be > 0" "and <= %d (or < 1MB for GEN2 controller)\n", instance->max_sectors_per_req); } } } host->max_sectors = instance->max_sectors_per_req; host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN; host->max_channel = MEGASAS_MAX_CHANNELS - 1; host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL; host->max_lun = MEGASAS_MAX_LUN; host->max_cmd_len = 16; /* * Notify the mid-layer about the new controller */ if (scsi_add_host(host, &instance->pdev->dev)) { dev_err(&instance->pdev->dev, "Failed to add host from %s %d\n", __func__, __LINE__); return -ENODEV; } return 0; } static int megasas_set_dma_mask(struct pci_dev *pdev) { /* * All our controllers are capable of performing 64-bit DMA */ if (IS_DMA64) { if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) { if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) goto fail_set_dma_mask; } } else { if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) goto fail_set_dma_mask; } /* * Ensure that all data structures are allocated in 32-bit * memory. */ if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { /* Try 32bit DMA mask and 32 bit Consistent dma mask */ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) dev_info(&pdev->dev, "set 32bit DMA mask" "and 32 bit consistent mask\n"); else goto fail_set_dma_mask; } return 0; fail_set_dma_mask: return 1; } /** * megasas_probe_one - PCI hotplug entry point * @pdev: PCI device structure * @id: PCI ids of supported hotplugged adapter */ static int megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) { int rval, pos; struct Scsi_Host *host; struct megasas_instance *instance; u16 control = 0; struct fusion_context *fusion = NULL; /* Reset MSI-X in the kdump kernel */ if (reset_devices) { pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); if (pos) { pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS, &control); if (control & PCI_MSIX_FLAGS_ENABLE) { dev_info(&pdev->dev, "resetting MSI-X\n"); pci_write_config_word(pdev, pos + PCI_MSIX_FLAGS, control & ~PCI_MSIX_FLAGS_ENABLE); } } } /* * PCI prepping: enable device set bus mastering and dma mask */ rval = pci_enable_device_mem(pdev); if (rval) { return rval; } pci_set_master(pdev); if (megasas_set_dma_mask(pdev)) goto fail_set_dma_mask; host = scsi_host_alloc(&megasas_template, sizeof(struct megasas_instance)); if (!host) { dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n"); goto fail_alloc_instance; } instance = (struct megasas_instance *)host->hostdata; memset(instance, 0, sizeof(*instance)); atomic_set(&instance->fw_reset_no_pci_access, 0); instance->pdev = pdev; switch (instance->pdev->device) { case PCI_DEVICE_ID_LSI_VENTURA: case PCI_DEVICE_ID_LSI_HARPOON: case PCI_DEVICE_ID_LSI_TOMCAT: case PCI_DEVICE_ID_LSI_VENTURA_4PORT: case PCI_DEVICE_ID_LSI_CRUSADER_4PORT: instance->is_ventura = true; case PCI_DEVICE_ID_LSI_FUSION: case PCI_DEVICE_ID_LSI_PLASMA: case PCI_DEVICE_ID_LSI_INVADER: case PCI_DEVICE_ID_LSI_FURY: case PCI_DEVICE_ID_LSI_INTRUDER: case PCI_DEVICE_ID_LSI_INTRUDER_24: case PCI_DEVICE_ID_LSI_CUTLASS_52: case PCI_DEVICE_ID_LSI_CUTLASS_53: { if (megasas_alloc_fusion_context(instance)) { megasas_free_fusion_context(instance); goto fail_alloc_dma_buf; } fusion = instance->ctrl_context; if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA)) fusion->adapter_type = THUNDERBOLT_SERIES; else if (instance->is_ventura) fusion->adapter_type = VENTURA_SERIES; else fusion->adapter_type = INVADER_SERIES; } break; default: /* For all other supported controllers */ instance->producer = pci_alloc_consistent(pdev, sizeof(u32), &instance->producer_h); instance->consumer = pci_alloc_consistent(pdev, sizeof(u32), &instance->consumer_h); if (!instance->producer || !instance->consumer) { dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate " "memory for producer, consumer\n"); goto fail_alloc_dma_buf; } *instance->producer = 0; *instance->consumer = 0; break; } /* Crash dump feature related initialisation*/ instance->drv_buf_index = 0; instance->drv_buf_alloc = 0; instance->crash_dump_fw_support = 0; instance->crash_dump_app_support = 0; instance->fw_crash_state = UNAVAILABLE; spin_lock_init(&instance->crashdump_lock); instance->crash_dump_buf = NULL; megasas_poll_wait_aen = 0; instance->flag_ieee = 0; instance->ev = NULL; instance->issuepend_done = 1; atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); instance->is_imr = 0; instance->evt_detail = pci_alloc_consistent(pdev, sizeof(struct megasas_evt_detail), &instance->evt_detail_h); if (!instance->evt_detail) { dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate memory for " "event detail structure\n"); goto fail_alloc_dma_buf; } if (!reset_devices) { instance->system_info_buf = pci_zalloc_consistent(pdev, sizeof(struct MR_DRV_SYSTEM_INFO), &instance->system_info_h); if (!instance->system_info_buf) dev_info(&instance->pdev->dev, "Can't allocate system info buffer\n"); instance->pd_info = pci_alloc_consistent(pdev, sizeof(struct MR_PD_INFO), &instance->pd_info_h); instance->pd_info = pci_alloc_consistent(pdev, sizeof(struct MR_PD_INFO), &instance->pd_info_h); instance->tgt_prop = pci_alloc_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES), &instance->tgt_prop_h); if (!instance->pd_info) dev_err(&instance->pdev->dev, "Failed to alloc mem for pd_info\n"); if (!instance->tgt_prop) dev_err(&instance->pdev->dev, "Failed to alloc mem for tgt_prop\n"); instance->crash_dump_buf = pci_alloc_consistent(pdev, CRASH_DMA_BUF_SIZE, &instance->crash_dump_h); if (!instance->crash_dump_buf) dev_err(&pdev->dev, "Can't allocate Firmware " "crash dump DMA buffer\n"); } /* * Initialize locks and queues */ INIT_LIST_HEAD(&instance->cmd_pool); INIT_LIST_HEAD(&instance->internal_reset_pending_q); atomic_set(&instance->fw_outstanding,0); init_waitqueue_head(&instance->int_cmd_wait_q); init_waitqueue_head(&instance->abort_cmd_wait_q); spin_lock_init(&instance->mfi_pool_lock); spin_lock_init(&instance->hba_lock); spin_lock_init(&instance->stream_lock); spin_lock_init(&instance->completion_lock); mutex_init(&instance->reset_mutex); mutex_init(&instance->hba_mutex); /* * Initialize PCI related and misc parameters */ instance->host = host; instance->unique_id = pdev->bus->number << 8 | pdev->devfn; instance->init_id = MEGASAS_DEFAULT_INIT_ID; instance->ctrl_info = NULL; if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) instance->flag_ieee = 1; megasas_dbg_lvl = 0; instance->flag = 0; instance->unload = 1; instance->last_time = 0; instance->disableOnlineCtrlReset = 1; instance->UnevenSpanSupport = 0; if (instance->ctrl_context) { INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq); INIT_WORK(&instance->crash_init, megasas_fusion_crash_dump_wq); } else INIT_WORK(&instance->work_init, process_fw_state_change_wq); /* * Initialize MFI Firmware */ if (megasas_init_fw(instance)) goto fail_init_mfi; if (instance->requestorId) { if (instance->PlasmaFW111) { instance->vf_affiliation_111 = pci_alloc_consistent(pdev, sizeof(struct MR_LD_VF_AFFILIATION_111), &instance->vf_affiliation_111_h); if (!instance->vf_affiliation_111) dev_warn(&pdev->dev, "Can't allocate " "memory for VF affiliation buffer\n"); } else { instance->vf_affiliation = pci_alloc_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION), &instance->vf_affiliation_h); if (!instance->vf_affiliation) dev_warn(&pdev->dev, "Can't allocate " "memory for VF affiliation buffer\n"); } } /* * Store instance in PCI softstate */ pci_set_drvdata(pdev, instance); /* * Add this controller to megasas_mgmt_info structure so that it * can be exported to management applications */ megasas_mgmt_info.count++; megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance; megasas_mgmt_info.max_index++; /* * Register with SCSI mid-layer */ if (megasas_io_attach(instance)) goto fail_io_attach; instance->unload = 0; /* * Trigger SCSI to scan our drives */ scsi_scan_host(host); /* * Initiate AEN (Asynchronous Event Notification) */ if (megasas_start_aen(instance)) { dev_printk(KERN_DEBUG, &pdev->dev, "start aen failed\n"); goto fail_start_aen; } /* Get current SR-IOV LD/VF affiliation */ if (instance->requestorId) megasas_get_ld_vf_affiliation(instance, 1); return 0; fail_start_aen: fail_io_attach: megasas_mgmt_info.count--; megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL; megasas_mgmt_info.max_index--; instance->instancet->disable_intr(instance); megasas_destroy_irqs(instance); if (instance->ctrl_context) megasas_release_fusion(instance); else megasas_release_mfi(instance); if (instance->msix_vectors) pci_free_irq_vectors(instance->pdev); fail_init_mfi: fail_alloc_dma_buf: if (instance->evt_detail) pci_free_consistent(pdev, sizeof(struct megasas_evt_detail), instance->evt_detail, instance->evt_detail_h); if (instance->pd_info) pci_free_consistent(pdev, sizeof(struct MR_PD_INFO), instance->pd_info, instance->pd_info_h); if (instance->tgt_prop) pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES), instance->tgt_prop, instance->tgt_prop_h); if (instance->producer) pci_free_consistent(pdev, sizeof(u32), instance->producer, instance->producer_h); if (instance->consumer) pci_free_consistent(pdev, sizeof(u32), instance->consumer, instance->consumer_h); scsi_host_put(host); fail_alloc_instance: fail_set_dma_mask: pci_disable_device(pdev); return -ENODEV; } /** * megasas_flush_cache - Requests FW to flush all its caches * @instance: Adapter soft state */ static void megasas_flush_cache(struct megasas_instance *instance) { struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) return; cmd = megasas_get_cmd(instance); if (!cmd) return; dcmd = &cmd->frame->dcmd; memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0x0; dcmd->sge_count = 0; dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = 0; dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH); dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) != DCMD_SUCCESS) { dev_err(&instance->pdev->dev, "return from %s %d\n", __func__, __LINE__); return; } megasas_return_cmd(instance, cmd); } /** * megasas_shutdown_controller - Instructs FW to shutdown the controller * @instance: Adapter soft state * @opcode: Shutdown/Hibernate */ static void megasas_shutdown_controller(struct megasas_instance *instance, u32 opcode) { struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) return; cmd = megasas_get_cmd(instance); if (!cmd) return; if (instance->aen_cmd) megasas_issue_blocked_abort_cmd(instance, instance->aen_cmd, MFI_IO_TIMEOUT_SECS); if (instance->map_update_cmd) megasas_issue_blocked_abort_cmd(instance, instance->map_update_cmd, MFI_IO_TIMEOUT_SECS); if (instance->jbod_seq_cmd) megasas_issue_blocked_abort_cmd(instance, instance->jbod_seq_cmd, MFI_IO_TIMEOUT_SECS); dcmd = &cmd->frame->dcmd; memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0x0; dcmd->sge_count = 0; dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = 0; dcmd->opcode = cpu_to_le32(opcode); if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) != DCMD_SUCCESS) { dev_err(&instance->pdev->dev, "return from %s %d\n", __func__, __LINE__); return; } megasas_return_cmd(instance, cmd); } #ifdef CONFIG_PM /** * megasas_suspend - driver suspend entry point * @pdev: PCI device structure * @state: PCI power state to suspend routine */ static int megasas_suspend(struct pci_dev *pdev, pm_message_t state) { struct Scsi_Host *host; struct megasas_instance *instance; instance = pci_get_drvdata(pdev); host = instance->host; instance->unload = 1; /* Shutdown SR-IOV heartbeat timer */ if (instance->requestorId && !instance->skip_heartbeat_timer_del) del_timer_sync(&instance->sriov_heartbeat_timer); megasas_flush_cache(instance); megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN); /* cancel the delayed work if this work still in queue */ if (instance->ev != NULL) { struct megasas_aen_event *ev = instance->ev; cancel_delayed_work_sync(&ev->hotplug_work); instance->ev = NULL; } tasklet_kill(&instance->isr_tasklet); pci_set_drvdata(instance->pdev, instance); instance->instancet->disable_intr(instance); megasas_destroy_irqs(instance); if (instance->msix_vectors) pci_free_irq_vectors(instance->pdev); pci_save_state(pdev); pci_disable_device(pdev); pci_set_power_state(pdev, pci_choose_state(pdev, state)); return 0; } /** * megasas_resume- driver resume entry point * @pdev: PCI device structure */ static int megasas_resume(struct pci_dev *pdev) { int rval; struct Scsi_Host *host; struct megasas_instance *instance; int irq_flags = PCI_IRQ_LEGACY; instance = pci_get_drvdata(pdev); host = instance->host; pci_set_power_state(pdev, PCI_D0); pci_enable_wake(pdev, PCI_D0, 0); pci_restore_state(pdev); /* * PCI prepping: enable device set bus mastering and dma mask */ rval = pci_enable_device_mem(pdev); if (rval) { dev_err(&pdev->dev, "Enable device failed\n"); return rval; } pci_set_master(pdev); if (megasas_set_dma_mask(pdev)) goto fail_set_dma_mask; /* * Initialize MFI Firmware */ atomic_set(&instance->fw_outstanding, 0); /* * We expect the FW state to be READY */ if (megasas_transition_to_ready(instance, 0)) goto fail_ready_state; /* Now re-enable MSI-X */ if (instance->msix_vectors) { irq_flags = PCI_IRQ_MSIX; if (smp_affinity_enable) irq_flags |= PCI_IRQ_AFFINITY; } rval = pci_alloc_irq_vectors(instance->pdev, 1, instance->msix_vectors ? instance->msix_vectors : 1, irq_flags); if (rval < 0) goto fail_reenable_msix; if (instance->ctrl_context) { megasas_reset_reply_desc(instance); if (megasas_ioc_init_fusion(instance)) { megasas_free_cmds(instance); megasas_free_cmds_fusion(instance); goto fail_init_mfi; } if (!megasas_get_map_info(instance)) megasas_sync_map_info(instance); } else { *instance->producer = 0; *instance->consumer = 0; if (megasas_issue_init_mfi(instance)) goto fail_init_mfi; } tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, (unsigned long)instance); if (instance->msix_vectors ? megasas_setup_irqs_msix(instance, 0) : megasas_setup_irqs_ioapic(instance)) goto fail_init_mfi; /* Re-launch SR-IOV heartbeat timer */ if (instance->requestorId) { if (!megasas_sriov_start_heartbeat(instance, 0)) megasas_start_timer(instance, &instance->sriov_heartbeat_timer, megasas_sriov_heartbeat_handler, MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); else { instance->skip_heartbeat_timer_del = 1; goto fail_init_mfi; } } instance->instancet->enable_intr(instance); megasas_setup_jbod_map(instance); instance->unload = 0; /* * Initiate AEN (Asynchronous Event Notification) */ if (megasas_start_aen(instance)) dev_err(&instance->pdev->dev, "Start AEN failed\n"); return 0; fail_init_mfi: if (instance->evt_detail) pci_free_consistent(pdev, sizeof(struct megasas_evt_detail), instance->evt_detail, instance->evt_detail_h); if (instance->pd_info) pci_free_consistent(pdev, sizeof(struct MR_PD_INFO), instance->pd_info, instance->pd_info_h); if (instance->tgt_prop) pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES), instance->tgt_prop, instance->tgt_prop_h); if (instance->producer) pci_free_consistent(pdev, sizeof(u32), instance->producer, instance->producer_h); if (instance->consumer) pci_free_consistent(pdev, sizeof(u32), instance->consumer, instance->consumer_h); scsi_host_put(host); fail_set_dma_mask: fail_ready_state: fail_reenable_msix: pci_disable_device(pdev); return -ENODEV; } #else #define megasas_suspend NULL #define megasas_resume NULL #endif static inline int megasas_wait_for_adapter_operational(struct megasas_instance *instance) { int wait_time = MEGASAS_RESET_WAIT_TIME * 2; int i; if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) return 1; for (i = 0; i < wait_time; i++) { if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) break; if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) dev_notice(&instance->pdev->dev, "waiting for controller reset to finish\n"); msleep(1000); } if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { dev_info(&instance->pdev->dev, "%s timed out while waiting for HBA to recover.\n", __func__); return 1; } return 0; } /** * megasas_detach_one - PCI hot"un"plug entry point * @pdev: PCI device structure */ static void megasas_detach_one(struct pci_dev *pdev) { int i; struct Scsi_Host *host; struct megasas_instance *instance; struct fusion_context *fusion; u32 pd_seq_map_sz; instance = pci_get_drvdata(pdev); instance->unload = 1; host = instance->host; fusion = instance->ctrl_context; /* Shutdown SR-IOV heartbeat timer */ if (instance->requestorId && !instance->skip_heartbeat_timer_del) del_timer_sync(&instance->sriov_heartbeat_timer); if (instance->fw_crash_state != UNAVAILABLE) megasas_free_host_crash_buffer(instance); scsi_remove_host(instance->host); if (megasas_wait_for_adapter_operational(instance)) goto skip_firing_dcmds; megasas_flush_cache(instance); megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); skip_firing_dcmds: /* cancel the delayed work if this work still in queue*/ if (instance->ev != NULL) { struct megasas_aen_event *ev = instance->ev; cancel_delayed_work_sync(&ev->hotplug_work); instance->ev = NULL; } /* cancel all wait events */ wake_up_all(&instance->int_cmd_wait_q); tasklet_kill(&instance->isr_tasklet); /* * Take the instance off the instance array. Note that we will not * decrement the max_index. We let this array be sparse array */ for (i = 0; i < megasas_mgmt_info.max_index; i++) { if (megasas_mgmt_info.instance[i] == instance) { megasas_mgmt_info.count--; megasas_mgmt_info.instance[i] = NULL; break; } } instance->instancet->disable_intr(instance); megasas_destroy_irqs(instance); if (instance->msix_vectors) pci_free_irq_vectors(instance->pdev); if (instance->is_ventura) { for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) kfree(fusion->stream_detect_by_ld[i]); kfree(fusion->stream_detect_by_ld); fusion->stream_detect_by_ld = NULL; } if (instance->ctrl_context) { megasas_release_fusion(instance); pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1)); for (i = 0; i < 2 ; i++) { if (fusion->ld_map[i]) dma_free_coherent(&instance->pdev->dev, fusion->max_map_sz, fusion->ld_map[i], fusion->ld_map_phys[i]); if (fusion->ld_drv_map[i]) free_pages((ulong)fusion->ld_drv_map[i], fusion->drv_map_pages); if (fusion->pd_seq_sync[i]) dma_free_coherent(&instance->pdev->dev, pd_seq_map_sz, fusion->pd_seq_sync[i], fusion->pd_seq_phys[i]); } megasas_free_fusion_context(instance); } else { megasas_release_mfi(instance); pci_free_consistent(pdev, sizeof(u32), instance->producer, instance->producer_h); pci_free_consistent(pdev, sizeof(u32), instance->consumer, instance->consumer_h); } kfree(instance->ctrl_info); if (instance->evt_detail) pci_free_consistent(pdev, sizeof(struct megasas_evt_detail), instance->evt_detail, instance->evt_detail_h); if (instance->pd_info) pci_free_consistent(pdev, sizeof(struct MR_PD_INFO), instance->pd_info, instance->pd_info_h); if (instance->tgt_prop) pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES), instance->tgt_prop, instance->tgt_prop_h); if (instance->vf_affiliation) pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION), instance->vf_affiliation, instance->vf_affiliation_h); if (instance->vf_affiliation_111) pci_free_consistent(pdev, sizeof(struct MR_LD_VF_AFFILIATION_111), instance->vf_affiliation_111, instance->vf_affiliation_111_h); if (instance->hb_host_mem) pci_free_consistent(pdev, sizeof(struct MR_CTRL_HB_HOST_MEM), instance->hb_host_mem, instance->hb_host_mem_h); if (instance->crash_dump_buf) pci_free_consistent(pdev, CRASH_DMA_BUF_SIZE, instance->crash_dump_buf, instance->crash_dump_h); if (instance->system_info_buf) pci_free_consistent(pdev, sizeof(struct MR_DRV_SYSTEM_INFO), instance->system_info_buf, instance->system_info_h); scsi_host_put(host); pci_disable_device(pdev); } /** * megasas_shutdown - Shutdown entry point * @device: Generic device structure */ static void megasas_shutdown(struct pci_dev *pdev) { struct megasas_instance *instance = pci_get_drvdata(pdev); instance->unload = 1; if (megasas_wait_for_adapter_operational(instance)) goto skip_firing_dcmds; megasas_flush_cache(instance); megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); skip_firing_dcmds: instance->instancet->disable_intr(instance); megasas_destroy_irqs(instance); if (instance->msix_vectors) pci_free_irq_vectors(instance->pdev); } /** * megasas_mgmt_open - char node "open" entry point */ static int megasas_mgmt_open(struct inode *inode, struct file *filep) { /* * Allow only those users with admin rights */ if (!capable(CAP_SYS_ADMIN)) return -EACCES; return 0; } /** * megasas_mgmt_fasync - Async notifier registration from applications * * This function adds the calling process to a driver global queue. When an * event occurs, SIGIO will be sent to all processes in this queue. */ static int megasas_mgmt_fasync(int fd, struct file *filep, int mode) { int rc; mutex_lock(&megasas_async_queue_mutex); rc = fasync_helper(fd, filep, mode, &megasas_async_queue); mutex_unlock(&megasas_async_queue_mutex); if (rc >= 0) { /* For sanity check when we get ioctl */ filep->private_data = filep; return 0; } printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc); return rc; } /** * megasas_mgmt_poll - char node "poll" entry point * */ static unsigned int megasas_mgmt_poll(struct file *file, poll_table *wait) { unsigned int mask; unsigned long flags; poll_wait(file, &megasas_poll_wait, wait); spin_lock_irqsave(&poll_aen_lock, flags); if (megasas_poll_wait_aen) mask = (POLLIN | POLLRDNORM); else mask = 0; megasas_poll_wait_aen = 0; spin_unlock_irqrestore(&poll_aen_lock, flags); return mask; } /* * megasas_set_crash_dump_params_ioctl: * Send CRASH_DUMP_MODE DCMD to all controllers * @cmd: MFI command frame */ static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd) { struct megasas_instance *local_instance; int i, error = 0; int crash_support; crash_support = cmd->frame->dcmd.mbox.w[0]; for (i = 0; i < megasas_mgmt_info.max_index; i++) { local_instance = megasas_mgmt_info.instance[i]; if (local_instance && local_instance->crash_dump_drv_support) { if ((atomic_read(&local_instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) && !megasas_set_crash_dump_params(local_instance, crash_support)) { local_instance->crash_dump_app_support = crash_support; dev_info(&local_instance->pdev->dev, "Application firmware crash " "dump mode set success\n"); error = 0; } else { dev_info(&local_instance->pdev->dev, "Application firmware crash " "dump mode set failed\n"); error = -1; } } } return error; } /** * megasas_mgmt_fw_ioctl - Issues management ioctls to FW * @instance: Adapter soft state * @argp: User's ioctl packet */ static int megasas_mgmt_fw_ioctl(struct megasas_instance *instance, struct megasas_iocpacket __user * user_ioc, struct megasas_iocpacket *ioc) { struct megasas_sge32 *kern_sge32; struct megasas_cmd *cmd; void *kbuff_arr[MAX_IOCTL_SGE]; dma_addr_t buf_handle = 0; int error = 0, i; void *sense = NULL; dma_addr_t sense_handle; unsigned long *sense_ptr; memset(kbuff_arr, 0, sizeof(kbuff_arr)); if (ioc->sge_count > MAX_IOCTL_SGE) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "SGE count [%d] > max limit [%d]\n", ioc->sge_count, MAX_IOCTL_SGE); return -EINVAL; } cmd = megasas_get_cmd(instance); if (!cmd) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n"); return -ENOMEM; } /* * User's IOCTL packet has 2 frames (maximum). Copy those two * frames into our cmd's frames. cmd->frame's context will get * overwritten when we copy from user's frames. So set that value * alone separately */ memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE); cmd->frame->hdr.context = cpu_to_le32(cmd->index); cmd->frame->hdr.pad_0 = 0; cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_IEEE | MFI_FRAME_SGL64 | MFI_FRAME_SENSE64)); if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_SHUTDOWN) { if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) { megasas_return_cmd(instance, cmd); return -1; } } if (cmd->frame->dcmd.opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) { error = megasas_set_crash_dump_params_ioctl(cmd); megasas_return_cmd(instance, cmd); return error; } /* * The management interface between applications and the fw uses * MFI frames. E.g, RAID configuration changes, LD property changes * etc are accomplishes through different kinds of MFI frames. The * driver needs to care only about substituting user buffers with * kernel buffers in SGLs. The location of SGL is embedded in the * struct iocpacket itself. */ kern_sge32 = (struct megasas_sge32 *) ((unsigned long)cmd->frame + ioc->sgl_off); /* * For each user buffer, create a mirror buffer and copy in */ for (i = 0; i < ioc->sge_count; i++) { if (!ioc->sgl[i].iov_len) continue; kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev, ioc->sgl[i].iov_len, &buf_handle, GFP_KERNEL); if (!kbuff_arr[i]) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc " "kernel SGL buffer for IOCTL\n"); error = -ENOMEM; goto out; } /* * We don't change the dma_coherent_mask, so * pci_alloc_consistent only returns 32bit addresses */ kern_sge32[i].phys_addr = cpu_to_le32(buf_handle); kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len); /* * We created a kernel buffer corresponding to the * user buffer. Now copy in from the user buffer */ if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base, (u32) (ioc->sgl[i].iov_len))) { error = -EFAULT; goto out; } } if (ioc->sense_len) { sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len, &sense_handle, GFP_KERNEL); if (!sense) { error = -ENOMEM; goto out; } sense_ptr = (unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off); *sense_ptr = cpu_to_le32(sense_handle); } /* * Set the sync_cmd flag so that the ISR knows not to complete this * cmd to the SCSI mid-layer */ cmd->sync_cmd = 1; if (megasas_issue_blocked_cmd(instance, cmd, 0) == DCMD_NOT_FIRED) { cmd->sync_cmd = 0; dev_err(&instance->pdev->dev, "return -EBUSY from %s %d opcode 0x%x cmd->cmd_status_drv 0x%x\n", __func__, __LINE__, cmd->frame->dcmd.opcode, cmd->cmd_status_drv); return -EBUSY; } cmd->sync_cmd = 0; if (instance->unload == 1) { dev_info(&instance->pdev->dev, "Driver unload is in progress " "don't submit data to application\n"); goto out; } /* * copy out the kernel buffers to user buffers */ for (i = 0; i < ioc->sge_count; i++) { if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i], ioc->sgl[i].iov_len)) { error = -EFAULT; goto out; } } /* * copy out the sense */ if (ioc->sense_len) { /* * sense_ptr points to the location that has the user * sense buffer address */ sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw + ioc->sense_off); if (copy_to_user((void __user *)((unsigned long) get_unaligned((unsigned long *)sense_ptr)), sense, ioc->sense_len)) { dev_err(&instance->pdev->dev, "Failed to copy out to user " "sense data\n"); error = -EFAULT; goto out; } } /* * copy the status codes returned by the fw */ if (copy_to_user(&user_ioc->frame.hdr.cmd_status, &cmd->frame->hdr.cmd_status, sizeof(u8))) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error copying out cmd_status\n"); error = -EFAULT; } out: if (sense) { dma_free_coherent(&instance->pdev->dev, ioc->sense_len, sense, sense_handle); } for (i = 0; i < ioc->sge_count; i++) { if (kbuff_arr[i]) { dma_free_coherent(&instance->pdev->dev, le32_to_cpu(kern_sge32[i].length), kbuff_arr[i], le32_to_cpu(kern_sge32[i].phys_addr)); kbuff_arr[i] = NULL; } } megasas_return_cmd(instance, cmd); return error; } static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg) { struct megasas_iocpacket __user *user_ioc = (struct megasas_iocpacket __user *)arg; struct megasas_iocpacket *ioc; struct megasas_instance *instance; int error; int i; unsigned long flags; u32 wait_time = MEGASAS_RESET_WAIT_TIME; ioc = memdup_user(user_ioc, sizeof(*ioc)); if (IS_ERR(ioc)) return PTR_ERR(ioc); instance = megasas_lookup_instance(ioc->host_no); if (!instance) { error = -ENODEV; goto out_kfree_ioc; } /* Adjust ioctl wait time for VF mode */ if (instance->requestorId) wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF; /* Block ioctls in VF mode */ if (instance->requestorId && !allow_vf_ioctls) { error = -ENODEV; goto out_kfree_ioc; } if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { dev_err(&instance->pdev->dev, "Controller in crit error\n"); error = -ENODEV; goto out_kfree_ioc; } if (instance->unload == 1) { error = -ENODEV; goto out_kfree_ioc; } if (down_interruptible(&instance->ioctl_sem)) { error = -ERESTARTSYS; goto out_kfree_ioc; } for (i = 0; i < wait_time; i++) { spin_lock_irqsave(&instance->hba_lock, flags); if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) { spin_unlock_irqrestore(&instance->hba_lock, flags); break; } spin_unlock_irqrestore(&instance->hba_lock, flags); if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { dev_notice(&instance->pdev->dev, "waiting" "for controller reset to finish\n"); } msleep(1000); } spin_lock_irqsave(&instance->hba_lock, flags); if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { spin_unlock_irqrestore(&instance->hba_lock, flags); dev_err(&instance->pdev->dev, "timed out while waiting for HBA to recover\n"); error = -ENODEV; goto out_up; } spin_unlock_irqrestore(&instance->hba_lock, flags); error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc); out_up: up(&instance->ioctl_sem); out_kfree_ioc: kfree(ioc); return error; } static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg) { struct megasas_instance *instance; struct megasas_aen aen; int error; int i; unsigned long flags; u32 wait_time = MEGASAS_RESET_WAIT_TIME; if (file->private_data != file) { printk(KERN_DEBUG "megasas: fasync_helper was not " "called first\n"); return -EINVAL; } if (copy_from_user(&aen, (void __user *)arg, sizeof(aen))) return -EFAULT; instance = megasas_lookup_instance(aen.host_no); if (!instance) return -ENODEV; if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { return -ENODEV; } if (instance->unload == 1) { return -ENODEV; } for (i = 0; i < wait_time; i++) { spin_lock_irqsave(&instance->hba_lock, flags); if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) { spin_unlock_irqrestore(&instance->hba_lock, flags); break; } spin_unlock_irqrestore(&instance->hba_lock, flags); if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { dev_notice(&instance->pdev->dev, "waiting for" "controller reset to finish\n"); } msleep(1000); } spin_lock_irqsave(&instance->hba_lock, flags); if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { spin_unlock_irqrestore(&instance->hba_lock, flags); dev_err(&instance->pdev->dev, "timed out while waiting for HBA to recover\n"); return -ENODEV; } spin_unlock_irqrestore(&instance->hba_lock, flags); mutex_lock(&instance->reset_mutex); error = megasas_register_aen(instance, aen.seq_num, aen.class_locale_word); mutex_unlock(&instance->reset_mutex); return error; } /** * megasas_mgmt_ioctl - char node ioctl entry point */ static long megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { switch (cmd) { case MEGASAS_IOC_FIRMWARE: return megasas_mgmt_ioctl_fw(file, arg); case MEGASAS_IOC_GET_AEN: return megasas_mgmt_ioctl_aen(file, arg); } return -ENOTTY; } #ifdef CONFIG_COMPAT static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg) { struct compat_megasas_iocpacket __user *cioc = (struct compat_megasas_iocpacket __user *)arg; struct megasas_iocpacket __user *ioc = compat_alloc_user_space(sizeof(struct megasas_iocpacket)); int i; int error = 0; compat_uptr_t ptr; u32 local_sense_off; u32 local_sense_len; u32 user_sense_off; if (clear_user(ioc, sizeof(*ioc))) return -EFAULT; if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) || copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) || copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) || copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) || copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) || copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32))) return -EFAULT; /* * The sense_ptr is used in megasas_mgmt_fw_ioctl only when * sense_len is not null, so prepare the 64bit value under * the same condition. */ if (get_user(local_sense_off, &ioc->sense_off) || get_user(local_sense_len, &ioc->sense_len) || get_user(user_sense_off, &cioc->sense_off)) return -EFAULT; if (local_sense_len) { void __user **sense_ioc_ptr = (void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off); compat_uptr_t *sense_cioc_ptr = (compat_uptr_t *)(((unsigned long)&cioc->frame.raw) + user_sense_off); if (get_user(ptr, sense_cioc_ptr) || put_user(compat_ptr(ptr), sense_ioc_ptr)) return -EFAULT; } for (i = 0; i < MAX_IOCTL_SGE; i++) { if (get_user(ptr, &cioc->sgl[i].iov_base) || put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) || copy_in_user(&ioc->sgl[i].iov_len, &cioc->sgl[i].iov_len, sizeof(compat_size_t))) return -EFAULT; } error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc); if (copy_in_user(&cioc->frame.hdr.cmd_status, &ioc->frame.hdr.cmd_status, sizeof(u8))) { printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n"); return -EFAULT; } return error; } static long megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { switch (cmd) { case MEGASAS_IOC_FIRMWARE32: return megasas_mgmt_compat_ioctl_fw(file, arg); case MEGASAS_IOC_GET_AEN: return megasas_mgmt_ioctl_aen(file, arg); } return -ENOTTY; } #endif /* * File operations structure for management interface */ static const struct file_operations megasas_mgmt_fops = { .owner = THIS_MODULE, .open = megasas_mgmt_open, .fasync = megasas_mgmt_fasync, .unlocked_ioctl = megasas_mgmt_ioctl, .poll = megasas_mgmt_poll, #ifdef CONFIG_COMPAT .compat_ioctl = megasas_mgmt_compat_ioctl, #endif .llseek = noop_llseek, }; /* * PCI hotplug support registration structure */ static struct pci_driver megasas_pci_driver = { .name = "megaraid_sas", .id_table = megasas_pci_table, .probe = megasas_probe_one, .remove = megasas_detach_one, .suspend = megasas_suspend, .resume = megasas_resume, .shutdown = megasas_shutdown, }; /* * Sysfs driver attributes */ static ssize_t megasas_sysfs_show_version(struct device_driver *dd, char *buf) { return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n", MEGASAS_VERSION); } static DRIVER_ATTR(version, S_IRUGO, megasas_sysfs_show_version, NULL); static ssize_t megasas_sysfs_show_release_date(struct device_driver *dd, char *buf) { return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n", MEGASAS_RELDATE); } static DRIVER_ATTR(release_date, S_IRUGO, megasas_sysfs_show_release_date, NULL); static ssize_t megasas_sysfs_show_support_poll_for_event(struct device_driver *dd, char *buf) { return sprintf(buf, "%u\n", support_poll_for_event); } static DRIVER_ATTR(support_poll_for_event, S_IRUGO, megasas_sysfs_show_support_poll_for_event, NULL); static ssize_t megasas_sysfs_show_support_device_change(struct device_driver *dd, char *buf) { return sprintf(buf, "%u\n", support_device_change); } static DRIVER_ATTR(support_device_change, S_IRUGO, megasas_sysfs_show_support_device_change, NULL); static ssize_t megasas_sysfs_show_dbg_lvl(struct device_driver *dd, char *buf) { return sprintf(buf, "%u\n", megasas_dbg_lvl); } static ssize_t megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t count) { int retval = count; if (sscanf(buf, "%u", &megasas_dbg_lvl) < 1) { printk(KERN_ERR "megasas: could not set dbg_lvl\n"); retval = -EINVAL; } return retval; } static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUSR, megasas_sysfs_show_dbg_lvl, megasas_sysfs_set_dbg_lvl); static inline void megasas_remove_scsi_device(struct scsi_device *sdev) { sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n"); scsi_remove_device(sdev); scsi_device_put(sdev); } static void megasas_aen_polling(struct work_struct *work) { struct megasas_aen_event *ev = container_of(work, struct megasas_aen_event, hotplug_work.work); struct megasas_instance *instance = ev->instance; union megasas_evt_class_locale class_locale; struct Scsi_Host *host; struct scsi_device *sdev1; u16 pd_index = 0; u16 ld_index = 0; int i, j, doscan = 0; u32 seq_num, wait_time = MEGASAS_RESET_WAIT_TIME; int error; u8 dcmd_ret = DCMD_SUCCESS; if (!instance) { printk(KERN_ERR "invalid instance!\n"); kfree(ev); return; } /* Adjust event workqueue thread wait time for VF mode */ if (instance->requestorId) wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF; /* Don't run the event workqueue thread if OCR is running */ mutex_lock(&instance->reset_mutex); instance->ev = NULL; host = instance->host; if (instance->evt_detail) { megasas_decode_evt(instance); switch (le32_to_cpu(instance->evt_detail->code)) { case MR_EVT_PD_INSERTED: case MR_EVT_PD_REMOVED: dcmd_ret = megasas_get_pd_list(instance); if (dcmd_ret == DCMD_SUCCESS) doscan = SCAN_PD_CHANNEL; break; case MR_EVT_LD_OFFLINE: case MR_EVT_CFG_CLEARED: case MR_EVT_LD_DELETED: case MR_EVT_LD_CREATED: if (!instance->requestorId || (instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0))) dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST); if (dcmd_ret == DCMD_SUCCESS) doscan = SCAN_VD_CHANNEL; break; case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED: case MR_EVT_FOREIGN_CFG_IMPORTED: case MR_EVT_LD_STATE_CHANGE: dcmd_ret = megasas_get_pd_list(instance); if (dcmd_ret != DCMD_SUCCESS) break; if (!instance->requestorId || (instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0))) dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST); if (dcmd_ret != DCMD_SUCCESS) break; doscan = SCAN_VD_CHANNEL | SCAN_PD_CHANNEL; dev_info(&instance->pdev->dev, "scanning for scsi%d...\n", instance->host->host_no); break; case MR_EVT_CTRL_PROP_CHANGED: dcmd_ret = megasas_get_ctrl_info(instance); break; default: doscan = 0; break; } } else { dev_err(&instance->pdev->dev, "invalid evt_detail!\n"); mutex_unlock(&instance->reset_mutex); kfree(ev); return; } mutex_unlock(&instance->reset_mutex); if (doscan & SCAN_PD_CHANNEL) { for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j; sdev1 = scsi_device_lookup(host, i, j, 0); if (instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) { if (!sdev1) scsi_add_device(host, i, j, 0); else scsi_device_put(sdev1); } else { if (sdev1) megasas_remove_scsi_device(sdev1); } } } } if (doscan & SCAN_VD_CHANNEL) { for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0); if (instance->ld_ids[ld_index] != 0xff) { if (!sdev1) scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0); else scsi_device_put(sdev1); } else { if (sdev1) megasas_remove_scsi_device(sdev1); } } } } if (dcmd_ret == DCMD_SUCCESS) seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1; else seq_num = instance->last_seq_num; /* Register AEN with FW for latest sequence number plus 1 */ class_locale.members.reserved = 0; class_locale.members.locale = MR_EVT_LOCALE_ALL; class_locale.members.class = MR_EVT_CLASS_DEBUG; if (instance->aen_cmd != NULL) { kfree(ev); return; } mutex_lock(&instance->reset_mutex); error = megasas_register_aen(instance, seq_num, class_locale.word); if (error) dev_err(&instance->pdev->dev, "register aen failed error %x\n", error); mutex_unlock(&instance->reset_mutex); kfree(ev); } /** * megasas_init - Driver load entry point */ static int __init megasas_init(void) { int rval; /* * Booted in kdump kernel, minimize memory footprints by * disabling few features */ if (reset_devices) { msix_vectors = 1; rdpq_enable = 0; dual_qdepth_disable = 1; } /* * Announce driver version and other information */ pr_info("megasas: %s\n", MEGASAS_VERSION); spin_lock_init(&poll_aen_lock); support_poll_for_event = 2; support_device_change = 1; memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info)); /* * Register character device node */ rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops); if (rval < 0) { printk(KERN_DEBUG "megasas: failed to open device node\n"); return rval; } megasas_mgmt_majorno = rval; /* * Register ourselves as PCI hotplug module */ rval = pci_register_driver(&megasas_pci_driver); if (rval) { printk(KERN_DEBUG "megasas: PCI hotplug registration failed \n"); goto err_pcidrv; } rval = driver_create_file(&megasas_pci_driver.driver, &driver_attr_version); if (rval) goto err_dcf_attr_ver; rval = driver_create_file(&megasas_pci_driver.driver, &driver_attr_release_date); if (rval) goto err_dcf_rel_date; rval = driver_create_file(&megasas_pci_driver.driver, &driver_attr_support_poll_for_event); if (rval) goto err_dcf_support_poll_for_event; rval = driver_create_file(&megasas_pci_driver.driver, &driver_attr_dbg_lvl); if (rval) goto err_dcf_dbg_lvl; rval = driver_create_file(&megasas_pci_driver.driver, &driver_attr_support_device_change); if (rval) goto err_dcf_support_device_change; return rval; err_dcf_support_device_change: driver_remove_file(&megasas_pci_driver.driver, &driver_attr_dbg_lvl); err_dcf_dbg_lvl: driver_remove_file(&megasas_pci_driver.driver, &driver_attr_support_poll_for_event); err_dcf_support_poll_for_event: driver_remove_file(&megasas_pci_driver.driver, &driver_attr_release_date); err_dcf_rel_date: driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); err_dcf_attr_ver: pci_unregister_driver(&megasas_pci_driver); err_pcidrv: unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl"); return rval; } /** * megasas_exit - Driver unload entry point */ static void __exit megasas_exit(void) { driver_remove_file(&megasas_pci_driver.driver, &driver_attr_dbg_lvl); driver_remove_file(&megasas_pci_driver.driver, &driver_attr_support_poll_for_event); driver_remove_file(&megasas_pci_driver.driver, &driver_attr_support_device_change); driver_remove_file(&megasas_pci_driver.driver, &driver_attr_release_date); driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); pci_unregister_driver(&megasas_pci_driver); unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl"); } module_init(megasas_init); module_exit(megasas_exit);
null
null
null
null
91,955
63,639
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
63,639
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2011 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_UI_BOOKMARKS_BOOKMARK_BAR_H_ #define CHROME_BROWSER_UI_BOOKMARKS_BOOKMARK_BAR_H_ #include "base/macros.h" class BookmarkBar { public: enum State { // The bookmark bar is not visible. HIDDEN, // The bookmark bar is visible and not detached. SHOW, // The bookmark bar is visible and detached from the location bar (as // happens on the new tab page). DETACHED }; // Used when the state changes to indicate if the transition should be // animated. enum AnimateChangeType { ANIMATE_STATE_CHANGE, DONT_ANIMATE_STATE_CHANGE }; private: DISALLOW_IMPLICIT_CONSTRUCTORS(BookmarkBar); }; #endif // CHROME_BROWSER_UI_BOOKMARKS_BOOKMARK_BAR_H_
null
null
null
null
60,502
9,870
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
9,870
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chromecast/media/cma/base/media_task_runner.h" namespace chromecast { namespace media { MediaTaskRunner::MediaTaskRunner() { } MediaTaskRunner::~MediaTaskRunner() { } } // namespace media } // namespace chromecast
null
null
null
null
6,733
13,059
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
13,059
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef COMPONENTS_WEBDATA_COMMON_WEB_DATABASE_H_ #define COMPONENTS_WEBDATA_COMMON_WEB_DATABASE_H_ #include <map> #include <string> #include "base/macros.h" #include "components/webdata/common/web_database_table.h" #include "components/webdata/common/webdata_export.h" #include "sql/connection.h" #include "sql/init_status.h" #include "sql/meta_table.h" namespace base { class FilePath; } // This class manages a SQLite database that stores various web page meta data. class WEBDATA_EXPORT WebDatabase { public: enum State { COMMIT_NOT_NEEDED, COMMIT_NEEDED }; // Exposed publicly so the keyword table can access it. static const int kCurrentVersionNumber; // The newest version of the database Chrome will NOT try to migrate. static const int kDeprecatedVersionNumber; WebDatabase(); virtual ~WebDatabase(); // Adds a database table. Ownership remains with the caller, which // must ensure that the lifetime of |table| exceeds this object's // lifetime. Must only be called before Init. void AddTable(WebDatabaseTable* table); // Retrieves a table based on its |key|. WebDatabaseTable* GetTable(WebDatabaseTable::TypeKey key); // Call before Init() to set the error callback to be used for the // underlying database connection. void set_error_callback( const sql::Connection::ErrorCallback& error_callback) { db_.set_error_callback(error_callback); } // Initialize the database given a name. The name defines where the SQLite // file is. If this returns an error code, no other method should be called. // // Before calling this method, you must call AddTable for any // WebDatabaseTable objects that are supposed to participate in // managing the database. sql::InitStatus Init(const base::FilePath& db_name); // Transactions management void BeginTransaction(); void CommitTransaction(); std::string GetDiagnosticInfo(int extended_error, sql::Statement* statement); // Exposed for testing only. sql::Connection* GetSQLConnection(); private: // Used by |Init()| to migration database schema from older versions to // current version. sql::InitStatus MigrateOldVersionsAsNeeded(); // Migrates this database to |version|. Returns false if there was // migration work to do and it failed, true otherwise. // // Implementations may set |*update_compatible_version| to true if // the compatible version should be changed to |version|. // Implementations should otherwise not modify this parameter. bool MigrateToVersion(int version, bool* update_compatible_version); // Migration method for version 58. bool MigrateToVersion58DropWebAppsAndIntents(); sql::Connection db_; sql::MetaTable meta_table_; // Map of all the different tables that have been added to this // object. Non-owning. typedef std::map<WebDatabaseTable::TypeKey, WebDatabaseTable*> TableMap; TableMap tables_; DISALLOW_COPY_AND_ASSIGN(WebDatabase); }; #endif // COMPONENTS_WEBDATA_COMMON_WEB_DATABASE_H_
null
null
null
null
9,922
11,184
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
176,179
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Copyright (C) 2000 Jeff Dike ([email protected]) * Licensed under the GPL */ #ifndef __SSL_H__ #define __SSL_H__ extern int ssl_read(int fd, int line); extern void ssl_receive_char(int line, char ch); #endif
null
null
null
null
84,526
16,847
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
181,842
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * arch/sh/kernel/process.c * * This file handles the architecture-dependent parts of process handling.. * * Copyright (C) 1995 Linus Torvalds * * SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima * Copyright (C) 2006 Lineo Solutions Inc. support SH4A UBC * Copyright (C) 2002 - 2008 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/module.h> #include <linux/mm.h> #include <linux/sched/debug.h> #include <linux/sched/task.h> #include <linux/sched/task_stack.h> #include <linux/slab.h> #include <linux/elfcore.h> #include <linux/kallsyms.h> #include <linux/fs.h> #include <linux/ftrace.h> #include <linux/hw_breakpoint.h> #include <linux/prefetch.h> #include <linux/stackprotector.h> #include <linux/uaccess.h> #include <asm/mmu_context.h> #include <asm/fpu.h> #include <asm/syscalls.h> #include <asm/switch_to.h> void show_regs(struct pt_regs * regs) { printk("\n"); show_regs_print_info(KERN_DEFAULT); print_symbol("PC is at %s\n", instruction_pointer(regs)); print_symbol("PR is at %s\n", regs->pr); printk("PC : %08lx SP : %08lx SR : %08lx ", regs->pc, regs->regs[15], regs->sr); #ifdef CONFIG_MMU printk("TEA : %08x\n", __raw_readl(MMU_TEA)); #else printk("\n"); #endif printk("R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n", regs->regs[0],regs->regs[1], regs->regs[2],regs->regs[3]); printk("R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n", regs->regs[4],regs->regs[5], regs->regs[6],regs->regs[7]); printk("R8 : %08lx R9 : %08lx R10 : %08lx R11 : %08lx\n", regs->regs[8],regs->regs[9], regs->regs[10],regs->regs[11]); printk("R12 : %08lx R13 : %08lx R14 : %08lx\n", regs->regs[12],regs->regs[13], regs->regs[14]); printk("MACH: %08lx MACL: %08lx GBR : %08lx PR : %08lx\n", regs->mach, regs->macl, regs->gbr, regs->pr); show_trace(NULL, (unsigned long *)regs->regs[15], regs); show_code(regs); } void start_thread(struct pt_regs *regs, unsigned long new_pc, unsigned long new_sp) { regs->pr = 0; regs->sr = SR_FD; regs->pc = new_pc; regs->regs[15] = new_sp; free_thread_xstate(current); } EXPORT_SYMBOL(start_thread); void flush_thread(void) { struct task_struct *tsk = current; flush_ptrace_hw_breakpoint(tsk); #if defined(CONFIG_SH_FPU) /* Forget lazy FPU state */ clear_fpu(tsk, task_pt_regs(tsk)); clear_used_math(); #endif } void release_thread(struct task_struct *dead_task) { /* do nothing */ } /* Fill in the fpu structure for a core dump.. */ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) { int fpvalid = 0; #if defined(CONFIG_SH_FPU) struct task_struct *tsk = current; fpvalid = !!tsk_used_math(tsk); if (fpvalid) fpvalid = !fpregs_get(tsk, NULL, 0, sizeof(struct user_fpu_struct), fpu, NULL); #endif return fpvalid; } EXPORT_SYMBOL(dump_fpu); asmlinkage void ret_from_fork(void); asmlinkage void ret_from_kernel_thread(void); int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg, struct task_struct *p) { struct thread_info *ti = task_thread_info(p); struct pt_regs *childregs; #if defined(CONFIG_SH_DSP) struct task_struct *tsk = current; if (is_dsp_enabled(tsk)) { /* We can use the __save_dsp or just copy the struct: * __save_dsp(p); * p->thread.dsp_status.status |= SR_DSP */ p->thread.dsp_status = tsk->thread.dsp_status; } #endif memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); childregs = task_pt_regs(p); p->thread.sp = (unsigned long) childregs; if (unlikely(p->flags & PF_KTHREAD)) { memset(childregs, 0, sizeof(struct pt_regs)); p->thread.pc = (unsigned long) ret_from_kernel_thread; childregs->regs[4] = arg; childregs->regs[5] = usp; childregs->sr = SR_MD; #if defined(CONFIG_SH_FPU) childregs->sr |= SR_FD; #endif ti->addr_limit = KERNEL_DS; ti->status &= ~TS_USEDFPU; p->thread.fpu_counter = 0; return 0; } *childregs = *current_pt_regs(); if (usp) childregs->regs[15] = usp; ti->addr_limit = USER_DS; if (clone_flags & CLONE_SETTLS) childregs->gbr = childregs->regs[0]; childregs->regs[0] = 0; /* Set return value for child */ p->thread.pc = (unsigned long) ret_from_fork; return 0; } /* * switch_to(x,y) should switch tasks from x to y. * */ __notrace_funcgraph struct task_struct * __switch_to(struct task_struct *prev, struct task_struct *next) { struct thread_struct *next_t = &next->thread; #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) __stack_chk_guard = next->stack_canary; #endif unlazy_fpu(prev, task_pt_regs(prev)); /* we're going to use this soon, after a few expensive things */ if (next->thread.fpu_counter > 5) prefetch(next_t->xstate); #ifdef CONFIG_MMU /* * Restore the kernel mode register * k7 (r7_bank1) */ asm volatile("ldc %0, r7_bank" : /* no output */ : "r" (task_thread_info(next))); #endif /* * If the task has used fpu the last 5 timeslices, just do a full * restore of the math state immediately to avoid the trap; the * chances of needing FPU soon are obviously high now */ if (next->thread.fpu_counter > 5) __fpu_state_restore(); return prev; } unsigned long get_wchan(struct task_struct *p) { unsigned long pc; if (!p || p == current || p->state == TASK_RUNNING) return 0; /* * The same comment as on the Alpha applies here, too ... */ pc = thread_saved_pc(p); #ifdef CONFIG_FRAME_POINTER if (in_sched_functions(pc)) { unsigned long schedule_frame = (unsigned long)p->thread.sp; return ((unsigned long *)schedule_frame)[21]; } #endif return pc; }
null
null
null
null
90,189
1,941
null
train_val
04b570817b2b38e35675b17328239746212f4c3f
154,998
FFmpeg
0
https://github.com/FFmpeg/FFmpeg
2018-06-01 01:23:12+05:30
/* * Android MediaCodec Wrapper * * Copyright (c) 2015-2016 Matthieu Bouron <matthieu.bouron stupeflix.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <jni.h> #include "libavutil/avassert.h" #include "libavutil/mem.h" #include "libavutil/avstring.h" #include "avcodec.h" #include "ffjni.h" #include "version.h" #include "mediacodec_wrapper.h" struct JNIAMediaCodecListFields { jclass mediacodec_list_class; jmethodID init_id; jmethodID find_decoder_for_format_id; jmethodID get_codec_count_id; jmethodID get_codec_info_at_id; jclass mediacodec_info_class; jmethodID get_name_id; jmethodID get_codec_capabilities_id; jmethodID get_supported_types_id; jmethodID is_encoder_id; jclass codec_capabilities_class; jfieldID color_formats_id; jfieldID profile_levels_id; jclass codec_profile_level_class; jfieldID profile_id; jfieldID level_id; jfieldID avc_profile_baseline_id; jfieldID avc_profile_main_id; jfieldID avc_profile_extended_id; jfieldID avc_profile_high_id; jfieldID avc_profile_high10_id; jfieldID avc_profile_high422_id; jfieldID avc_profile_high444_id; jfieldID hevc_profile_main_id; jfieldID hevc_profile_main10_id; jfieldID hevc_profile_main10_hdr10_id; }; static const struct FFJniField jni_amediacodeclist_mapping[] = { { "android/media/MediaCodecList", NULL, NULL, FF_JNI_CLASS, offsetof(struct JNIAMediaCodecListFields, mediacodec_list_class), 1 }, { "android/media/MediaCodecList", "<init>", "(I)V", FF_JNI_METHOD, offsetof(struct JNIAMediaCodecListFields, init_id), 0 }, { "android/media/MediaCodecList", "findDecoderForFormat", "(Landroid/media/MediaFormat;)Ljava/lang/String;", FF_JNI_METHOD, offsetof(struct JNIAMediaCodecListFields, find_decoder_for_format_id), 0 }, { "android/media/MediaCodecList", "getCodecCount", "()I", FF_JNI_STATIC_METHOD, offsetof(struct JNIAMediaCodecListFields, get_codec_count_id), 1 }, { "android/media/MediaCodecList", "getCodecInfoAt", "(I)Landroid/media/MediaCodecInfo;", FF_JNI_STATIC_METHOD, offsetof(struct JNIAMediaCodecListFields, get_codec_info_at_id), 1 }, { "android/media/MediaCodecInfo", NULL, NULL, FF_JNI_CLASS, offsetof(struct JNIAMediaCodecListFields, mediacodec_info_class), 1 }, { "android/media/MediaCodecInfo", "getName", "()Ljava/lang/String;", FF_JNI_METHOD, offsetof(struct JNIAMediaCodecListFields, get_name_id), 1 }, { "android/media/MediaCodecInfo", "getCapabilitiesForType", "(Ljava/lang/String;)Landroid/media/MediaCodecInfo$CodecCapabilities;", FF_JNI_METHOD, offsetof(struct JNIAMediaCodecListFields, get_codec_capabilities_id), 1 }, { "android/media/MediaCodecInfo", "getSupportedTypes", "()[Ljava/lang/String;", FF_JNI_METHOD, offsetof(struct JNIAMediaCodecListFields, get_supported_types_id), 1 }, { "android/media/MediaCodecInfo", "isEncoder", "()Z", FF_JNI_METHOD, offsetof(struct JNIAMediaCodecListFields, is_encoder_id), 1 }, { "android/media/MediaCodecInfo$CodecCapabilities", NULL, NULL, FF_JNI_CLASS, offsetof(struct JNIAMediaCodecListFields, codec_capabilities_class), 1 }, { "android/media/MediaCodecInfo$CodecCapabilities", "colorFormats", "[I", FF_JNI_FIELD, offsetof(struct JNIAMediaCodecListFields, color_formats_id), 1 }, { "android/media/MediaCodecInfo$CodecCapabilities", "profileLevels", "[Landroid/media/MediaCodecInfo$CodecProfileLevel;", FF_JNI_FIELD, offsetof(struct JNIAMediaCodecListFields, profile_levels_id), 1 }, { "android/media/MediaCodecInfo$CodecProfileLevel", NULL, NULL, FF_JNI_CLASS, offsetof(struct JNIAMediaCodecListFields, codec_profile_level_class), 1 }, { "android/media/MediaCodecInfo$CodecProfileLevel", "profile", "I", FF_JNI_FIELD, offsetof(struct JNIAMediaCodecListFields, profile_id), 1 }, { "android/media/MediaCodecInfo$CodecProfileLevel", "level", "I", FF_JNI_FIELD, offsetof(struct JNIAMediaCodecListFields, level_id), 1 }, { "android/media/MediaCodecInfo$CodecProfileLevel", "AVCProfileBaseline", "I", FF_JNI_STATIC_FIELD, offsetof(struct JNIAMediaCodecListFields, avc_profile_baseline_id), 1 }, { "android/media/MediaCodecInfo$CodecProfileLevel", "AVCProfileMain", "I", FF_JNI_STATIC_FIELD, offsetof(struct JNIAMediaCodecListFields, avc_profile_main_id), 1 }, { "android/media/MediaCodecInfo$CodecProfileLevel", "AVCProfileExtended", "I", FF_JNI_STATIC_FIELD, offsetof(struct JNIAMediaCodecListFields, avc_profile_extended_id), 1 }, { "android/media/MediaCodecInfo$CodecProfileLevel", "AVCProfileHigh", "I", FF_JNI_STATIC_FIELD, offsetof(struct JNIAMediaCodecListFields, avc_profile_high_id), 1 }, { "android/media/MediaCodecInfo$CodecProfileLevel", "AVCProfileHigh10", "I", FF_JNI_STATIC_FIELD, offsetof(struct JNIAMediaCodecListFields, avc_profile_high10_id), 1 }, { "android/media/MediaCodecInfo$CodecProfileLevel", "AVCProfileHigh422", "I", FF_JNI_STATIC_FIELD, offsetof(struct JNIAMediaCodecListFields, avc_profile_high422_id), 1 }, { "android/media/MediaCodecInfo$CodecProfileLevel", "AVCProfileHigh444", "I", FF_JNI_STATIC_FIELD, offsetof(struct JNIAMediaCodecListFields, avc_profile_high444_id), 1 }, { "android/media/MediaCodecInfo$CodecProfileLevel", "HEVCProfileMain", "I", FF_JNI_STATIC_FIELD, offsetof(struct JNIAMediaCodecListFields, hevc_profile_main_id), 0 }, { "android/media/MediaCodecInfo$CodecProfileLevel", "HEVCProfileMain10", "I", FF_JNI_STATIC_FIELD, offsetof(struct JNIAMediaCodecListFields, hevc_profile_main10_id), 0 }, { "android/media/MediaCodecInfo$CodecProfileLevel", "HEVCProfileMain10HDR10", "I", FF_JNI_STATIC_FIELD, offsetof(struct JNIAMediaCodecListFields, hevc_profile_main10_hdr10_id), 0 }, { NULL } }; struct JNIAMediaFormatFields { jclass mediaformat_class; jmethodID init_id; jmethodID contains_key_id; jmethodID get_integer_id; jmethodID get_long_id; jmethodID get_float_id; jmethodID get_bytebuffer_id; jmethodID get_string_id; jmethodID set_integer_id; jmethodID set_long_id; jmethodID set_float_id; jmethodID set_bytebuffer_id; jmethodID set_string_id; jmethodID to_string_id; }; static const struct FFJniField jni_amediaformat_mapping[] = { { "android/media/MediaFormat", NULL, NULL, FF_JNI_CLASS, offsetof(struct JNIAMediaFormatFields, mediaformat_class), 1 }, { "android/media/MediaFormat", "<init>", "()V", FF_JNI_METHOD, offsetof(struct JNIAMediaFormatFields, init_id), 1 }, { "android/media/MediaFormat", "containsKey", "(Ljava/lang/String;)Z", FF_JNI_METHOD,offsetof(struct JNIAMediaFormatFields, contains_key_id), 1 }, { "android/media/MediaFormat", "getInteger", "(Ljava/lang/String;)I", FF_JNI_METHOD, offsetof(struct JNIAMediaFormatFields, get_integer_id), 1 }, { "android/media/MediaFormat", "getLong", "(Ljava/lang/String;)J", FF_JNI_METHOD, offsetof(struct JNIAMediaFormatFields, get_long_id), 1 }, { "android/media/MediaFormat", "getFloat", "(Ljava/lang/String;)F", FF_JNI_METHOD, offsetof(struct JNIAMediaFormatFields, get_float_id), 1 }, { "android/media/MediaFormat", "getByteBuffer", "(Ljava/lang/String;)Ljava/nio/ByteBuffer;", FF_JNI_METHOD, offsetof(struct JNIAMediaFormatFields, get_bytebuffer_id), 1 }, { "android/media/MediaFormat", "getString", "(Ljava/lang/String;)Ljava/lang/String;", FF_JNI_METHOD, offsetof(struct JNIAMediaFormatFields, get_string_id), 1 }, { "android/media/MediaFormat", "setInteger", "(Ljava/lang/String;I)V", FF_JNI_METHOD, offsetof(struct JNIAMediaFormatFields, set_integer_id), 1 }, { "android/media/MediaFormat", "setLong", "(Ljava/lang/String;J)V", FF_JNI_METHOD, offsetof(struct JNIAMediaFormatFields, set_long_id), 1 }, { "android/media/MediaFormat", "setFloat", "(Ljava/lang/String;F)V", FF_JNI_METHOD, offsetof(struct JNIAMediaFormatFields, set_float_id), 1 }, { "android/media/MediaFormat", "setByteBuffer", "(Ljava/lang/String;Ljava/nio/ByteBuffer;)V", FF_JNI_METHOD, offsetof(struct JNIAMediaFormatFields, set_bytebuffer_id), 1 }, { "android/media/MediaFormat", "setString", "(Ljava/lang/String;Ljava/lang/String;)V", FF_JNI_METHOD, offsetof(struct JNIAMediaFormatFields, set_string_id), 1 }, { "android/media/MediaFormat", "toString", "()Ljava/lang/String;", FF_JNI_METHOD, offsetof(struct JNIAMediaFormatFields, to_string_id), 1 }, { NULL } }; static const AVClass amediaformat_class = { .class_name = "amediaformat", .item_name = av_default_item_name, .version = LIBAVUTIL_VERSION_INT, }; struct FFAMediaFormat { const AVClass *class; struct JNIAMediaFormatFields jfields; jobject object; }; struct JNIAMediaCodecFields { jclass mediacodec_class; jfieldID info_try_again_later_id; jfieldID info_output_buffers_changed_id; jfieldID info_output_format_changed_id; jfieldID buffer_flag_codec_config_id; jfieldID buffer_flag_end_of_stream_id; jfieldID buffer_flag_key_frame_id; jfieldID configure_flag_encode_id; jmethodID create_by_codec_name_id; jmethodID create_decoder_by_type_id; jmethodID create_encoder_by_type_id; jmethodID get_name_id; jmethodID configure_id; jmethodID start_id; jmethodID flush_id; jmethodID stop_id; jmethodID release_id; jmethodID get_output_format_id; jmethodID dequeue_input_buffer_id; jmethodID queue_input_buffer_id; jmethodID get_input_buffer_id; jmethodID get_input_buffers_id; jmethodID dequeue_output_buffer_id; jmethodID get_output_buffer_id; jmethodID get_output_buffers_id; jmethodID release_output_buffer_id; jmethodID release_output_buffer_at_time_id; jclass mediainfo_class; jmethodID init_id; jfieldID flags_id; jfieldID offset_id; jfieldID presentation_time_us_id; jfieldID size_id; }; static const struct FFJniField jni_amediacodec_mapping[] = { { "android/media/MediaCodec", NULL, NULL, FF_JNI_CLASS, offsetof(struct JNIAMediaCodecFields, mediacodec_class), 1 }, { "android/media/MediaCodec", "INFO_TRY_AGAIN_LATER", "I", FF_JNI_STATIC_FIELD, offsetof(struct JNIAMediaCodecFields, info_try_again_later_id), 1 }, { "android/media/MediaCodec", "INFO_OUTPUT_BUFFERS_CHANGED", "I", FF_JNI_STATIC_FIELD, offsetof(struct JNIAMediaCodecFields, info_output_buffers_changed_id), 1 }, { "android/media/MediaCodec", "INFO_OUTPUT_FORMAT_CHANGED", "I", FF_JNI_STATIC_FIELD, offsetof(struct JNIAMediaCodecFields, info_output_format_changed_id), 1 }, { "android/media/MediaCodec", "BUFFER_FLAG_CODEC_CONFIG", "I", FF_JNI_STATIC_FIELD, offsetof(struct JNIAMediaCodecFields, buffer_flag_codec_config_id), 1 }, { "android/media/MediaCodec", "BUFFER_FLAG_END_OF_STREAM", "I", FF_JNI_STATIC_FIELD, offsetof(struct JNIAMediaCodecFields, buffer_flag_end_of_stream_id), 1 }, { "android/media/MediaCodec", "BUFFER_FLAG_KEY_FRAME", "I", FF_JNI_STATIC_FIELD, offsetof(struct JNIAMediaCodecFields, buffer_flag_key_frame_id), 0 }, { "android/media/MediaCodec", "CONFIGURE_FLAG_ENCODE", "I", FF_JNI_STATIC_FIELD, offsetof(struct JNIAMediaCodecFields, configure_flag_encode_id), 1 }, { "android/media/MediaCodec", "createByCodecName", "(Ljava/lang/String;)Landroid/media/MediaCodec;", FF_JNI_STATIC_METHOD, offsetof(struct JNIAMediaCodecFields, create_by_codec_name_id), 1 }, { "android/media/MediaCodec", "createDecoderByType", "(Ljava/lang/String;)Landroid/media/MediaCodec;", FF_JNI_STATIC_METHOD, offsetof(struct JNIAMediaCodecFields, create_decoder_by_type_id), 1 }, { "android/media/MediaCodec", "createEncoderByType", "(Ljava/lang/String;)Landroid/media/MediaCodec;", FF_JNI_STATIC_METHOD, offsetof(struct JNIAMediaCodecFields, create_encoder_by_type_id), 1 }, { "android/media/MediaCodec", "getName", "()Ljava/lang/String;", FF_JNI_METHOD, offsetof(struct JNIAMediaCodecFields, get_name_id), 1 }, { "android/media/MediaCodec", "configure", "(Landroid/media/MediaFormat;Landroid/view/Surface;Landroid/media/MediaCrypto;I)V", FF_JNI_METHOD, offsetof(struct JNIAMediaCodecFields, configure_id), 1 }, { "android/media/MediaCodec", "start", "()V", FF_JNI_METHOD, offsetof(struct JNIAMediaCodecFields, start_id), 1 }, { "android/media/MediaCodec", "flush", "()V", FF_JNI_METHOD, offsetof(struct JNIAMediaCodecFields, flush_id), 1 }, { "android/media/MediaCodec", "stop", "()V", FF_JNI_METHOD, offsetof(struct JNIAMediaCodecFields, stop_id), 1 }, { "android/media/MediaCodec", "release", "()V", FF_JNI_METHOD, offsetof(struct JNIAMediaCodecFields, release_id), 1 }, { "android/media/MediaCodec", "getOutputFormat", "()Landroid/media/MediaFormat;", FF_JNI_METHOD, offsetof(struct JNIAMediaCodecFields, get_output_format_id), 1 }, { "android/media/MediaCodec", "dequeueInputBuffer", "(J)I", FF_JNI_METHOD, offsetof(struct JNIAMediaCodecFields, dequeue_input_buffer_id), 1 }, { "android/media/MediaCodec", "queueInputBuffer", "(IIIJI)V", FF_JNI_METHOD, offsetof(struct JNIAMediaCodecFields, queue_input_buffer_id), 1 }, { "android/media/MediaCodec", "getInputBuffer", "(I)Ljava/nio/ByteBuffer;", FF_JNI_METHOD, offsetof(struct JNIAMediaCodecFields, get_input_buffer_id), 0 }, { "android/media/MediaCodec", "getInputBuffers", "()[Ljava/nio/ByteBuffer;", FF_JNI_METHOD, offsetof(struct JNIAMediaCodecFields, get_input_buffers_id), 1 }, { "android/media/MediaCodec", "dequeueOutputBuffer", "(Landroid/media/MediaCodec$BufferInfo;J)I", FF_JNI_METHOD, offsetof(struct JNIAMediaCodecFields, dequeue_output_buffer_id), 1 }, { "android/media/MediaCodec", "getOutputBuffer", "(I)Ljava/nio/ByteBuffer;", FF_JNI_METHOD, offsetof(struct JNIAMediaCodecFields, get_output_buffer_id), 0 }, { "android/media/MediaCodec", "getOutputBuffers", "()[Ljava/nio/ByteBuffer;", FF_JNI_METHOD, offsetof(struct JNIAMediaCodecFields, get_output_buffers_id), 1 }, { "android/media/MediaCodec", "releaseOutputBuffer", "(IZ)V", FF_JNI_METHOD, offsetof(struct JNIAMediaCodecFields, release_output_buffer_id), 1 }, { "android/media/MediaCodec", "releaseOutputBuffer", "(IJ)V", FF_JNI_METHOD, offsetof(struct JNIAMediaCodecFields, release_output_buffer_at_time_id), 0 }, { "android/media/MediaCodec$BufferInfo", NULL, NULL, FF_JNI_CLASS, offsetof(struct JNIAMediaCodecFields, mediainfo_class), 1 }, { "android/media/MediaCodec.BufferInfo", "<init>", "()V", FF_JNI_METHOD, offsetof(struct JNIAMediaCodecFields, init_id), 1 }, { "android/media/MediaCodec.BufferInfo", "flags", "I", FF_JNI_FIELD, offsetof(struct JNIAMediaCodecFields, flags_id), 1 }, { "android/media/MediaCodec.BufferInfo", "offset", "I", FF_JNI_FIELD, offsetof(struct JNIAMediaCodecFields, offset_id), 1 }, { "android/media/MediaCodec.BufferInfo", "presentationTimeUs", "J", FF_JNI_FIELD, offsetof(struct JNIAMediaCodecFields, presentation_time_us_id), 1 }, { "android/media/MediaCodec.BufferInfo", "size", "I", FF_JNI_FIELD, offsetof(struct JNIAMediaCodecFields, size_id), 1 }, { NULL } }; static const AVClass amediacodec_class = { .class_name = "amediacodec", .item_name = av_default_item_name, .version = LIBAVUTIL_VERSION_INT, }; struct FFAMediaCodec { const AVClass *class; struct JNIAMediaCodecFields jfields; jobject object; jobject buffer_info; jobject input_buffers; jobject output_buffers; int INFO_TRY_AGAIN_LATER; int INFO_OUTPUT_BUFFERS_CHANGED; int INFO_OUTPUT_FORMAT_CHANGED; int BUFFER_FLAG_CODEC_CONFIG; int BUFFER_FLAG_END_OF_STREAM; int BUFFER_FLAG_KEY_FRAME; int CONFIGURE_FLAG_ENCODE; int has_get_i_o_buffer; }; #define JNI_GET_ENV_OR_RETURN(env, log_ctx, ret) do { \ (env) = ff_jni_get_env(log_ctx); \ if (!(env)) { \ return ret; \ } \ } while (0) #define JNI_GET_ENV_OR_RETURN_VOID(env, log_ctx) do { \ (env) = ff_jni_get_env(log_ctx); \ if (!(env)) { \ return; \ } \ } while (0) int ff_AMediaCodecProfile_getProfileFromAVCodecContext(AVCodecContext *avctx) { int ret = -1; JNIEnv *env = NULL; struct JNIAMediaCodecListFields jfields = { 0 }; jfieldID field_id = 0; JNI_GET_ENV_OR_RETURN(env, avctx, -1); if (ff_jni_init_jfields(env, &jfields, jni_amediacodeclist_mapping, 0, avctx) < 0) { goto done; } if (avctx->codec_id == AV_CODEC_ID_H264) { switch(avctx->profile) { case FF_PROFILE_H264_BASELINE: case FF_PROFILE_H264_CONSTRAINED_BASELINE: field_id = jfields.avc_profile_baseline_id; break; case FF_PROFILE_H264_MAIN: field_id = jfields.avc_profile_main_id; break; case FF_PROFILE_H264_EXTENDED: field_id = jfields.avc_profile_extended_id; break; case FF_PROFILE_H264_HIGH: field_id = jfields.avc_profile_high_id; break; case FF_PROFILE_H264_HIGH_10: case FF_PROFILE_H264_HIGH_10_INTRA: field_id = jfields.avc_profile_high10_id; break; case FF_PROFILE_H264_HIGH_422: case FF_PROFILE_H264_HIGH_422_INTRA: field_id = jfields.avc_profile_high422_id; break; case FF_PROFILE_H264_HIGH_444: case FF_PROFILE_H264_HIGH_444_INTRA: case FF_PROFILE_H264_HIGH_444_PREDICTIVE: field_id = jfields.avc_profile_high444_id; break; } } else if (avctx->codec_id == AV_CODEC_ID_HEVC) { switch (avctx->profile) { case FF_PROFILE_HEVC_MAIN: case FF_PROFILE_HEVC_MAIN_STILL_PICTURE: field_id = jfields.hevc_profile_main_id; break; case FF_PROFILE_HEVC_MAIN_10: field_id = jfields.hevc_profile_main10_id; break; } } if (field_id) { ret = (*env)->GetStaticIntField(env, jfields.codec_profile_level_class, field_id); if (ff_jni_exception_check(env, 1, avctx) < 0) { ret = -1; goto done; } } done: ff_jni_reset_jfields(env, &jfields, jni_amediacodeclist_mapping, 0, avctx); return ret; } char *ff_AMediaCodecList_getCodecNameByType(const char *mime, int profile, int encoder, void *log_ctx) { int ret; int i; int codec_count; int found_codec = 0; char *name = NULL; char *supported_type = NULL; JNIEnv *env = NULL; struct JNIAMediaCodecListFields jfields = { 0 }; struct JNIAMediaFormatFields mediaformat_jfields = { 0 }; jobject format = NULL; jobject codec = NULL; jobject codec_name = NULL; jobject info = NULL; jobject type = NULL; jobjectArray types = NULL; jobject capabilities = NULL; jobject profile_level = NULL; jobjectArray profile_levels = NULL; JNI_GET_ENV_OR_RETURN(env, log_ctx, NULL); if ((ret = ff_jni_init_jfields(env, &jfields, jni_amediacodeclist_mapping, 0, log_ctx)) < 0) { goto done; } if ((ret = ff_jni_init_jfields(env, &mediaformat_jfields, jni_amediaformat_mapping, 0, log_ctx)) < 0) { goto done; } codec_count = (*env)->CallStaticIntMethod(env, jfields.mediacodec_list_class, jfields.get_codec_count_id); if (ff_jni_exception_check(env, 1, log_ctx) < 0) { goto done; } for(i = 0; i < codec_count; i++) { int j; int type_count; int is_encoder; info = (*env)->CallStaticObjectMethod(env, jfields.mediacodec_list_class, jfields.get_codec_info_at_id, i); if (ff_jni_exception_check(env, 1, log_ctx) < 0) { goto done; } types = (*env)->CallObjectMethod(env, info, jfields.get_supported_types_id); if (ff_jni_exception_check(env, 1, log_ctx) < 0) { goto done; } is_encoder = (*env)->CallBooleanMethod(env, info, jfields.is_encoder_id); if (ff_jni_exception_check(env, 1, log_ctx) < 0) { goto done; } if (is_encoder != encoder) { goto done_with_info; } type_count = (*env)->GetArrayLength(env, types); for (j = 0; j < type_count; j++) { int k; int profile_count; type = (*env)->GetObjectArrayElement(env, types, j); if (ff_jni_exception_check(env, 1, log_ctx) < 0) { goto done; } supported_type = ff_jni_jstring_to_utf_chars(env, type, log_ctx); if (!supported_type) { goto done; } if (!av_strcasecmp(supported_type, mime)) { codec_name = (*env)->CallObjectMethod(env, info, jfields.get_name_id); if (ff_jni_exception_check(env, 1, log_ctx) < 0) { goto done; } name = ff_jni_jstring_to_utf_chars(env, codec_name, log_ctx); if (!name) { goto done; } /* Skip software decoders */ if ( strstr(name, "OMX.google") || strstr(name, "OMX.ffmpeg") || (strstr(name, "OMX.SEC") && strstr(name, ".sw.")) || !strcmp(name, "OMX.qcom.video.decoder.hevcswvdec")) { av_freep(&name); goto done_with_type; } capabilities = (*env)->CallObjectMethod(env, info, jfields.get_codec_capabilities_id, type); if (ff_jni_exception_check(env, 1, log_ctx) < 0) { goto done; } profile_levels = (*env)->GetObjectField(env, capabilities, jfields.profile_levels_id); if (ff_jni_exception_check(env, 1, log_ctx) < 0) { goto done; } profile_count = (*env)->GetArrayLength(env, profile_levels); if (!profile_count) { found_codec = 1; } for (k = 0; k < profile_count; k++) { int supported_profile = 0; if (profile < 0) { found_codec = 1; break; } profile_level = (*env)->GetObjectArrayElement(env, profile_levels, k); if (ff_jni_exception_check(env, 1, log_ctx) < 0) { goto done; } supported_profile = (*env)->GetIntField(env, profile_level, jfields.profile_id); if (ff_jni_exception_check(env, 1, log_ctx) < 0) { goto done; } found_codec = profile == supported_profile; if (profile_level) { (*env)->DeleteLocalRef(env, profile_level); profile_level = NULL; } if (found_codec) { break; } } } done_with_type: if (profile_levels) { (*env)->DeleteLocalRef(env, profile_levels); profile_levels = NULL; } if (capabilities) { (*env)->DeleteLocalRef(env, capabilities); capabilities = NULL; } if (type) { (*env)->DeleteLocalRef(env, type); type = NULL; } av_freep(&supported_type); if (found_codec) { break; } av_freep(&name); } done_with_info: if (info) { (*env)->DeleteLocalRef(env, info); info = NULL; } if (types) { (*env)->DeleteLocalRef(env, types); types = NULL; } if (found_codec) { break; } } done: if (format) { (*env)->DeleteLocalRef(env, format); } if (codec) { (*env)->DeleteLocalRef(env, codec); } if (codec_name) { (*env)->DeleteLocalRef(env, codec_name); } if (info) { (*env)->DeleteLocalRef(env, info); } if (type) { (*env)->DeleteLocalRef(env, type); } if (types) { (*env)->DeleteLocalRef(env, types); } if (capabilities) { (*env)->DeleteLocalRef(env, capabilities); } if (profile_level) { (*env)->DeleteLocalRef(env, profile_level); } if (profile_levels) { (*env)->DeleteLocalRef(env, profile_levels); } av_freep(&supported_type); ff_jni_reset_jfields(env, &jfields, jni_amediacodeclist_mapping, 0, log_ctx); ff_jni_reset_jfields(env, &mediaformat_jfields, jni_amediaformat_mapping, 0, log_ctx); if (!found_codec) { av_freep(&name); } return name; } FFAMediaFormat *ff_AMediaFormat_new(void) { JNIEnv *env = NULL; FFAMediaFormat *format = NULL; jobject object = NULL; format = av_mallocz(sizeof(FFAMediaFormat)); if (!format) { return NULL; } format->class = &amediaformat_class; env = ff_jni_get_env(format); if (!env) { av_freep(&format); return NULL; } if (ff_jni_init_jfields(env, &format->jfields, jni_amediaformat_mapping, 1, format) < 0) { goto fail; } object = (*env)->NewObject(env, format->jfields.mediaformat_class, format->jfields.init_id); if (!object) { goto fail; } format->object = (*env)->NewGlobalRef(env, object); if (!format->object) { goto fail; } fail: if (object) { (*env)->DeleteLocalRef(env, object); } if (!format->object) { ff_jni_reset_jfields(env, &format->jfields, jni_amediaformat_mapping, 1, format); av_freep(&format); } return format; } static FFAMediaFormat *ff_AMediaFormat_newFromObject(void *object) { JNIEnv *env = NULL; FFAMediaFormat *format = NULL; format = av_mallocz(sizeof(FFAMediaFormat)); if (!format) { return NULL; } format->class = &amediaformat_class; env = ff_jni_get_env(format); if (!env) { av_freep(&format); return NULL; } if (ff_jni_init_jfields(env, &format->jfields, jni_amediaformat_mapping, 1, format) < 0) { goto fail; } format->object = (*env)->NewGlobalRef(env, object); if (!format->object) { goto fail; } return format; fail: ff_jni_reset_jfields(env, &format->jfields, jni_amediaformat_mapping, 1, format); av_freep(&format); return NULL; } int ff_AMediaFormat_delete(FFAMediaFormat* format) { int ret = 0; JNIEnv *env = NULL; if (!format) { return 0; } JNI_GET_ENV_OR_RETURN(env, format, AVERROR_EXTERNAL); (*env)->DeleteGlobalRef(env, format->object); format->object = NULL; ff_jni_reset_jfields(env, &format->jfields, jni_amediaformat_mapping, 1, format); av_freep(&format); return ret; } char* ff_AMediaFormat_toString(FFAMediaFormat* format) { char *ret = NULL; JNIEnv *env = NULL; jstring description = NULL; av_assert0(format != NULL); JNI_GET_ENV_OR_RETURN(env, format, NULL); description = (*env)->CallObjectMethod(env, format->object, format->jfields.to_string_id); if (ff_jni_exception_check(env, 1, NULL) < 0) { goto fail; } ret = ff_jni_jstring_to_utf_chars(env, description, format); fail: if (description) { (*env)->DeleteLocalRef(env, description); } return ret; } int ff_AMediaFormat_getInt32(FFAMediaFormat* format, const char *name, int32_t *out) { int ret = 1; JNIEnv *env = NULL; jstring key = NULL; jboolean contains_key; av_assert0(format != NULL); JNI_GET_ENV_OR_RETURN(env, format, 0); key = ff_jni_utf_chars_to_jstring(env, name, format); if (!key) { ret = 0; goto fail; } contains_key = (*env)->CallBooleanMethod(env, format->object, format->jfields.contains_key_id, key); if (!contains_key || (ret = ff_jni_exception_check(env, 1, format)) < 0) { ret = 0; goto fail; } *out = (*env)->CallIntMethod(env, format->object, format->jfields.get_integer_id, key); if ((ret = ff_jni_exception_check(env, 1, format)) < 0) { ret = 0; goto fail; } ret = 1; fail: if (key) { (*env)->DeleteLocalRef(env, key); } return ret; } int ff_AMediaFormat_getInt64(FFAMediaFormat* format, const char *name, int64_t *out) { int ret = 1; JNIEnv *env = NULL; jstring key = NULL; jboolean contains_key; av_assert0(format != NULL); JNI_GET_ENV_OR_RETURN(env, format, 0); key = ff_jni_utf_chars_to_jstring(env, name, format); if (!key) { ret = 0; goto fail; } contains_key = (*env)->CallBooleanMethod(env, format->object, format->jfields.contains_key_id, key); if (!contains_key || (ret = ff_jni_exception_check(env, 1, format)) < 0) { ret = 0; goto fail; } *out = (*env)->CallLongMethod(env, format->object, format->jfields.get_long_id, key); if ((ret = ff_jni_exception_check(env, 1, format)) < 0) { ret = 0; goto fail; } ret = 1; fail: if (key) { (*env)->DeleteLocalRef(env, key); } return ret; } int ff_AMediaFormat_getFloat(FFAMediaFormat* format, const char *name, float *out) { int ret = 1; JNIEnv *env = NULL; jstring key = NULL; jboolean contains_key; av_assert0(format != NULL); JNI_GET_ENV_OR_RETURN(env, format, 0); key = ff_jni_utf_chars_to_jstring(env, name, format); if (!key) { ret = 0; goto fail; } contains_key = (*env)->CallBooleanMethod(env, format->object, format->jfields.contains_key_id, key); if (!contains_key || (ret = ff_jni_exception_check(env, 1, format)) < 0) { ret = 0; goto fail; } *out = (*env)->CallFloatMethod(env, format->object, format->jfields.get_float_id, key); if ((ret = ff_jni_exception_check(env, 1, format)) < 0) { ret = 0; goto fail; } ret = 1; fail: if (key) { (*env)->DeleteLocalRef(env, key); } return ret; } int ff_AMediaFormat_getBuffer(FFAMediaFormat* format, const char *name, void** data, size_t *size) { int ret = 1; JNIEnv *env = NULL; jstring key = NULL; jboolean contains_key; jobject result = NULL; av_assert0(format != NULL); JNI_GET_ENV_OR_RETURN(env, format, 0); key = ff_jni_utf_chars_to_jstring(env, name, format); if (!key) { ret = 0; goto fail; } contains_key = (*env)->CallBooleanMethod(env, format->object, format->jfields.contains_key_id, key); if (!contains_key || (ret = ff_jni_exception_check(env, 1, format)) < 0) { ret = 0; goto fail; } result = (*env)->CallObjectMethod(env, format->object, format->jfields.get_bytebuffer_id, key); if ((ret = ff_jni_exception_check(env, 1, format)) < 0) { ret = 0; goto fail; } *data = (*env)->GetDirectBufferAddress(env, result); *size = (*env)->GetDirectBufferCapacity(env, result); if (*data && *size) { void *src = *data; *data = av_malloc(*size); if (!*data) { ret = 0; goto fail; } memcpy(*data, src, *size); } ret = 1; fail: if (key) { (*env)->DeleteLocalRef(env, key); } if (result) { (*env)->DeleteLocalRef(env, result); } return ret; } int ff_AMediaFormat_getString(FFAMediaFormat* format, const char *name, const char **out) { int ret = 1; JNIEnv *env = NULL; jstring key = NULL; jboolean contains_key; jstring result = NULL; av_assert0(format != NULL); JNI_GET_ENV_OR_RETURN(env, format, 0); key = ff_jni_utf_chars_to_jstring(env, name, format); if (!key) { ret = 0; goto fail; } contains_key = (*env)->CallBooleanMethod(env, format->object, format->jfields.contains_key_id, key); if (!contains_key || (ret = ff_jni_exception_check(env, 1, format)) < 0) { ret = 0; goto fail; } result = (*env)->CallObjectMethod(env, format->object, format->jfields.get_string_id, key); if ((ret = ff_jni_exception_check(env, 1, format)) < 0) { ret = 0; goto fail; } *out = ff_jni_jstring_to_utf_chars(env, result, format); if (!*out) { ret = 0; goto fail; } ret = 1; fail: if (key) { (*env)->DeleteLocalRef(env, key); } if (result) { (*env)->DeleteLocalRef(env, result); } return ret; } void ff_AMediaFormat_setInt32(FFAMediaFormat* format, const char* name, int32_t value) { JNIEnv *env = NULL; jstring key = NULL; av_assert0(format != NULL); JNI_GET_ENV_OR_RETURN_VOID(env, format); key = ff_jni_utf_chars_to_jstring(env, name, format); if (!key) { goto fail; } (*env)->CallVoidMethod(env, format->object, format->jfields.set_integer_id, key, value); if (ff_jni_exception_check(env, 1, format) < 0) { goto fail; } fail: if (key) { (*env)->DeleteLocalRef(env, key); } } void ff_AMediaFormat_setInt64(FFAMediaFormat* format, const char* name, int64_t value) { JNIEnv *env = NULL; jstring key = NULL; av_assert0(format != NULL); JNI_GET_ENV_OR_RETURN_VOID(env, format); key = ff_jni_utf_chars_to_jstring(env, name, format); if (!key) { goto fail; } (*env)->CallVoidMethod(env, format->object, format->jfields.set_long_id, key, value); if (ff_jni_exception_check(env, 1, format) < 0) { goto fail; } fail: if (key) { (*env)->DeleteLocalRef(env, key); } } void ff_AMediaFormat_setFloat(FFAMediaFormat* format, const char* name, float value) { JNIEnv *env = NULL; jstring key = NULL; av_assert0(format != NULL); JNI_GET_ENV_OR_RETURN_VOID(env, format); key = ff_jni_utf_chars_to_jstring(env, name, format); if (!key) { goto fail; } (*env)->CallVoidMethod(env, format->object, format->jfields.set_float_id, key, value); if (ff_jni_exception_check(env, 1, format) < 0) { goto fail; } fail: if (key) { (*env)->DeleteLocalRef(env, key); } } void ff_AMediaFormat_setString(FFAMediaFormat* format, const char* name, const char* value) { JNIEnv *env = NULL; jstring key = NULL; jstring string = NULL; av_assert0(format != NULL); JNI_GET_ENV_OR_RETURN_VOID(env, format); key = ff_jni_utf_chars_to_jstring(env, name, format); if (!key) { goto fail; } string = ff_jni_utf_chars_to_jstring(env, value, format); if (!string) { goto fail; } (*env)->CallVoidMethod(env, format->object, format->jfields.set_string_id, key, string); if (ff_jni_exception_check(env, 1, format) < 0) { goto fail; } fail: if (key) { (*env)->DeleteLocalRef(env, key); } if (string) { (*env)->DeleteLocalRef(env, string); } } void ff_AMediaFormat_setBuffer(FFAMediaFormat* format, const char* name, void* data, size_t size) { JNIEnv *env = NULL; jstring key = NULL; jobject buffer = NULL; void *buffer_data = NULL; av_assert0(format != NULL); JNI_GET_ENV_OR_RETURN_VOID(env, format); key = ff_jni_utf_chars_to_jstring(env, name, format); if (!key) { goto fail; } if (!data || !size) { goto fail; } buffer_data = av_malloc(size); if (!buffer_data) { goto fail; } memcpy(buffer_data, data, size); buffer = (*env)->NewDirectByteBuffer(env, buffer_data, size); if (!buffer) { goto fail; } (*env)->CallVoidMethod(env, format->object, format->jfields.set_bytebuffer_id, key, buffer); if (ff_jni_exception_check(env, 1, format) < 0) { goto fail; } fail: if (key) { (*env)->DeleteLocalRef(env, key); } if (buffer) { (*env)->DeleteLocalRef(env, buffer); } } static int codec_init_static_fields(FFAMediaCodec *codec) { int ret = 0; JNIEnv *env = NULL; JNI_GET_ENV_OR_RETURN(env, codec, AVERROR_EXTERNAL); codec->INFO_TRY_AGAIN_LATER = (*env)->GetStaticIntField(env, codec->jfields.mediacodec_class, codec->jfields.info_try_again_later_id); if ((ret = ff_jni_exception_check(env, 1, codec)) < 0) { goto fail; } codec->BUFFER_FLAG_CODEC_CONFIG = (*env)->GetStaticIntField(env, codec->jfields.mediacodec_class, codec->jfields.buffer_flag_codec_config_id); if ((ret = ff_jni_exception_check(env, 1, codec)) < 0) { goto fail; } codec->BUFFER_FLAG_END_OF_STREAM = (*env)->GetStaticIntField(env, codec->jfields.mediacodec_class, codec->jfields.buffer_flag_end_of_stream_id); if ((ret = ff_jni_exception_check(env, 1, codec)) < 0) { goto fail; } if (codec->jfields.buffer_flag_key_frame_id) { codec->BUFFER_FLAG_KEY_FRAME = (*env)->GetStaticIntField(env, codec->jfields.mediacodec_class, codec->jfields.buffer_flag_key_frame_id); if ((ret = ff_jni_exception_check(env, 1, codec)) < 0) { goto fail; } } codec->CONFIGURE_FLAG_ENCODE = (*env)->GetStaticIntField(env, codec->jfields.mediacodec_class, codec->jfields.configure_flag_encode_id); if ((ret = ff_jni_exception_check(env, 1, codec)) < 0) { goto fail; } codec->INFO_TRY_AGAIN_LATER = (*env)->GetStaticIntField(env, codec->jfields.mediacodec_class, codec->jfields.info_try_again_later_id); if ((ret = ff_jni_exception_check(env, 1, codec)) < 0) { goto fail; } codec->INFO_OUTPUT_BUFFERS_CHANGED = (*env)->GetStaticIntField(env, codec->jfields.mediacodec_class, codec->jfields.info_output_buffers_changed_id); if ((ret = ff_jni_exception_check(env, 1, codec)) < 0) { goto fail; } codec->INFO_OUTPUT_FORMAT_CHANGED = (*env)->GetStaticIntField(env, codec->jfields.mediacodec_class, codec->jfields.info_output_format_changed_id); if ((ret = ff_jni_exception_check(env, 1, codec)) < 0) { goto fail; } fail: return ret; } #define CREATE_CODEC_BY_NAME 0 #define CREATE_DECODER_BY_TYPE 1 #define CREATE_ENCODER_BY_TYPE 2 static inline FFAMediaCodec *codec_create(int method, const char *arg) { int ret = -1; JNIEnv *env = NULL; FFAMediaCodec *codec = NULL; jstring jarg = NULL; jobject object = NULL; jobject buffer_info = NULL; jmethodID create_id = NULL; codec = av_mallocz(sizeof(FFAMediaCodec)); if (!codec) { return NULL; } codec->class = &amediacodec_class; env = ff_jni_get_env(codec); if (!env) { av_freep(&codec); return NULL; } if (ff_jni_init_jfields(env, &codec->jfields, jni_amediacodec_mapping, 1, codec) < 0) { goto fail; } jarg = ff_jni_utf_chars_to_jstring(env, arg, codec); if (!jarg) { goto fail; } switch (method) { case CREATE_CODEC_BY_NAME: create_id = codec->jfields.create_by_codec_name_id; break; case CREATE_DECODER_BY_TYPE: create_id = codec->jfields.create_decoder_by_type_id; break; case CREATE_ENCODER_BY_TYPE: create_id = codec->jfields.create_encoder_by_type_id; break; default: av_assert0(0); } object = (*env)->CallStaticObjectMethod(env, codec->jfields.mediacodec_class, create_id, jarg); if (ff_jni_exception_check(env, 1, codec) < 0) { goto fail; } codec->object = (*env)->NewGlobalRef(env, object); if (!codec->object) { goto fail; } if (codec_init_static_fields(codec) < 0) { goto fail; } if (codec->jfields.get_input_buffer_id && codec->jfields.get_output_buffer_id) { codec->has_get_i_o_buffer = 1; } buffer_info = (*env)->NewObject(env, codec->jfields.mediainfo_class, codec->jfields.init_id); if (ff_jni_exception_check(env, 1, codec) < 0) { goto fail; } codec->buffer_info = (*env)->NewGlobalRef(env, buffer_info); if (!codec->buffer_info) { goto fail; } ret = 0; fail: if (jarg) { (*env)->DeleteLocalRef(env, jarg); } if (object) { (*env)->DeleteLocalRef(env, object); } if (buffer_info) { (*env)->DeleteLocalRef(env, buffer_info); } if (ret < 0) { if (codec->object) { (*env)->DeleteGlobalRef(env, codec->object); } if (codec->buffer_info) { (*env)->DeleteGlobalRef(env, codec->buffer_info); } ff_jni_reset_jfields(env, &codec->jfields, jni_amediacodec_mapping, 1, codec); av_freep(&codec); } return codec; } #define DECLARE_FF_AMEDIACODEC_CREATE_FUNC(name, method) \ FFAMediaCodec *ff_AMediaCodec_##name(const char *arg) \ { \ return codec_create(method, arg); \ } \ DECLARE_FF_AMEDIACODEC_CREATE_FUNC(createCodecByName, CREATE_CODEC_BY_NAME) DECLARE_FF_AMEDIACODEC_CREATE_FUNC(createDecoderByType, CREATE_DECODER_BY_TYPE) DECLARE_FF_AMEDIACODEC_CREATE_FUNC(createEncoderByType, CREATE_ENCODER_BY_TYPE) int ff_AMediaCodec_delete(FFAMediaCodec* codec) { int ret = 0; JNIEnv *env = NULL; if (!codec) { return 0; } JNI_GET_ENV_OR_RETURN(env, codec, AVERROR_EXTERNAL); (*env)->CallVoidMethod(env, codec->object, codec->jfields.release_id); if (ff_jni_exception_check(env, 1, codec) < 0) { ret = AVERROR_EXTERNAL; } (*env)->DeleteGlobalRef(env, codec->object); codec->object = NULL; (*env)->DeleteGlobalRef(env, codec->buffer_info); codec->buffer_info = NULL; ff_jni_reset_jfields(env, &codec->jfields, jni_amediacodec_mapping, 1, codec); av_freep(&codec); return ret; } char *ff_AMediaCodec_getName(FFAMediaCodec *codec) { char *ret = NULL; JNIEnv *env = NULL; jobject *name = NULL; JNI_GET_ENV_OR_RETURN(env, codec, NULL); name = (*env)->CallObjectMethod(env, codec->object, codec->jfields.get_name_id); if (ff_jni_exception_check(env, 1, codec) < 0) { goto fail; } ret = ff_jni_jstring_to_utf_chars(env, name, codec); fail: return ret; } int ff_AMediaCodec_configure(FFAMediaCodec* codec, const FFAMediaFormat* format, void* surface, void *crypto, uint32_t flags) { int ret = 0; JNIEnv *env = NULL; JNI_GET_ENV_OR_RETURN(env, codec, AVERROR_EXTERNAL); (*env)->CallVoidMethod(env, codec->object, codec->jfields.configure_id, format->object, surface, NULL, flags); if (ff_jni_exception_check(env, 1, codec) < 0) { ret = AVERROR_EXTERNAL; goto fail; } fail: return ret; } int ff_AMediaCodec_start(FFAMediaCodec* codec) { int ret = 0; JNIEnv *env = NULL; JNI_GET_ENV_OR_RETURN(env, codec, AVERROR_EXTERNAL); (*env)->CallVoidMethod(env, codec->object, codec->jfields.start_id); if (ff_jni_exception_check(env, 1, codec) < 0) { ret = AVERROR_EXTERNAL; goto fail; } fail: return ret; } int ff_AMediaCodec_stop(FFAMediaCodec* codec) { int ret = 0; JNIEnv *env = NULL; JNI_GET_ENV_OR_RETURN(env, codec, AVERROR_EXTERNAL); (*env)->CallVoidMethod(env, codec->object, codec->jfields.stop_id); if (ff_jni_exception_check(env, 1, codec) < 0) { ret = AVERROR_EXTERNAL; goto fail; } fail: return ret; } int ff_AMediaCodec_flush(FFAMediaCodec* codec) { int ret = 0; JNIEnv *env = NULL; JNI_GET_ENV_OR_RETURN(env, codec, AVERROR_EXTERNAL); (*env)->CallVoidMethod(env, codec->object, codec->jfields.flush_id); if (ff_jni_exception_check(env, 1, codec) < 0) { ret = AVERROR_EXTERNAL; goto fail; } fail: return ret; } int ff_AMediaCodec_releaseOutputBuffer(FFAMediaCodec* codec, size_t idx, int render) { int ret = 0; JNIEnv *env = NULL; JNI_GET_ENV_OR_RETURN(env, codec, AVERROR_EXTERNAL); (*env)->CallVoidMethod(env, codec->object, codec->jfields.release_output_buffer_id, (jint)idx, (jboolean)render); if (ff_jni_exception_check(env, 1, codec) < 0) { ret = AVERROR_EXTERNAL; goto fail; } fail: return ret; } int ff_AMediaCodec_releaseOutputBufferAtTime(FFAMediaCodec *codec, size_t idx, int64_t timestampNs) { int ret = 0; JNIEnv *env = NULL; JNI_GET_ENV_OR_RETURN(env, codec, AVERROR_EXTERNAL); (*env)->CallVoidMethod(env, codec->object, codec->jfields.release_output_buffer_at_time_id, (jint)idx, timestampNs); if (ff_jni_exception_check(env, 1, codec) < 0) { ret = AVERROR_EXTERNAL; goto fail; } fail: return ret; } ssize_t ff_AMediaCodec_dequeueInputBuffer(FFAMediaCodec* codec, int64_t timeoutUs) { int ret = 0; JNIEnv *env = NULL; JNI_GET_ENV_OR_RETURN(env, codec, AVERROR_EXTERNAL); ret = (*env)->CallIntMethod(env, codec->object, codec->jfields.dequeue_input_buffer_id, timeoutUs); if (ff_jni_exception_check(env, 1, codec) < 0) { ret = AVERROR_EXTERNAL; goto fail; } fail: return ret; } int ff_AMediaCodec_queueInputBuffer(FFAMediaCodec* codec, size_t idx, off_t offset, size_t size, uint64_t time, uint32_t flags) { int ret = 0; JNIEnv *env = NULL; JNI_GET_ENV_OR_RETURN(env, codec, AVERROR_EXTERNAL); (*env)->CallVoidMethod(env, codec->object, codec->jfields.queue_input_buffer_id, (jint)idx, (jint)offset, (jint)size, time, flags); if ((ret = ff_jni_exception_check(env, 1, codec)) < 0) { ret = AVERROR_EXTERNAL; goto fail; } fail: return ret; } ssize_t ff_AMediaCodec_dequeueOutputBuffer(FFAMediaCodec* codec, FFAMediaCodecBufferInfo *info, int64_t timeoutUs) { int ret = 0; JNIEnv *env = NULL; JNI_GET_ENV_OR_RETURN(env, codec, AVERROR_EXTERNAL); ret = (*env)->CallIntMethod(env, codec->object, codec->jfields.dequeue_output_buffer_id, codec->buffer_info, timeoutUs); if (ff_jni_exception_check(env, 1, codec) < 0) { return AVERROR_EXTERNAL; } info->flags = (*env)->GetIntField(env, codec->buffer_info, codec->jfields.flags_id); if (ff_jni_exception_check(env, 1, codec) < 0) { return AVERROR_EXTERNAL; } info->offset = (*env)->GetIntField(env, codec->buffer_info, codec->jfields.offset_id); if (ff_jni_exception_check(env, 1, codec) < 0) { return AVERROR_EXTERNAL; } info->presentationTimeUs = (*env)->GetLongField(env, codec->buffer_info, codec->jfields.presentation_time_us_id); if (ff_jni_exception_check(env, 1, codec) < 0) { return AVERROR_EXTERNAL; } info->size = (*env)->GetIntField(env, codec->buffer_info, codec->jfields.size_id); if (ff_jni_exception_check(env, 1, codec) < 0) { return AVERROR_EXTERNAL; } return ret; } uint8_t* ff_AMediaCodec_getInputBuffer(FFAMediaCodec* codec, size_t idx, size_t *out_size) { uint8_t *ret = NULL; JNIEnv *env = NULL; jobject buffer = NULL; jobject input_buffers = NULL; JNI_GET_ENV_OR_RETURN(env, codec, NULL); if (codec->has_get_i_o_buffer) { buffer = (*env)->CallObjectMethod(env, codec->object, codec->jfields.get_input_buffer_id, (jint)idx); if (ff_jni_exception_check(env, 1, codec) < 0) { goto fail; } } else { if (!codec->input_buffers) { input_buffers = (*env)->CallObjectMethod(env, codec->object, codec->jfields.get_input_buffers_id); if (ff_jni_exception_check(env, 1, codec) < 0) { goto fail; } codec->input_buffers = (*env)->NewGlobalRef(env, input_buffers); if (ff_jni_exception_check(env, 1, codec) < 0) { goto fail; } } buffer = (*env)->GetObjectArrayElement(env, codec->input_buffers, idx); if (ff_jni_exception_check(env, 1, codec) < 0) { goto fail; } } ret = (*env)->GetDirectBufferAddress(env, buffer); *out_size = (*env)->GetDirectBufferCapacity(env, buffer); fail: if (buffer) { (*env)->DeleteLocalRef(env, buffer); } if (input_buffers) { (*env)->DeleteLocalRef(env, input_buffers); } return ret; } uint8_t* ff_AMediaCodec_getOutputBuffer(FFAMediaCodec* codec, size_t idx, size_t *out_size) { uint8_t *ret = NULL; JNIEnv *env = NULL; jobject buffer = NULL; jobject output_buffers = NULL; JNI_GET_ENV_OR_RETURN(env, codec, NULL); if (codec->has_get_i_o_buffer) { buffer = (*env)->CallObjectMethod(env, codec->object, codec->jfields.get_output_buffer_id, (jint)idx); if (ff_jni_exception_check(env, 1, codec) < 0) { goto fail; } } else { if (!codec->output_buffers) { output_buffers = (*env)->CallObjectMethod(env, codec->object, codec->jfields.get_output_buffers_id); if (ff_jni_exception_check(env, 1, codec) < 0) { goto fail; } codec->output_buffers = (*env)->NewGlobalRef(env, output_buffers); if (ff_jni_exception_check(env, 1, codec) < 0) { goto fail; } } buffer = (*env)->GetObjectArrayElement(env, codec->output_buffers, idx); if (ff_jni_exception_check(env, 1, codec) < 0) { goto fail; } } ret = (*env)->GetDirectBufferAddress(env, buffer); *out_size = (*env)->GetDirectBufferCapacity(env, buffer); fail: if (buffer) { (*env)->DeleteLocalRef(env, buffer); } if (output_buffers) { (*env)->DeleteLocalRef(env, output_buffers); } return ret; } FFAMediaFormat* ff_AMediaCodec_getOutputFormat(FFAMediaCodec* codec) { FFAMediaFormat *ret = NULL; JNIEnv *env = NULL; jobject mediaformat = NULL; JNI_GET_ENV_OR_RETURN(env, codec, NULL); mediaformat = (*env)->CallObjectMethod(env, codec->object, codec->jfields.get_output_format_id); if (ff_jni_exception_check(env, 1, codec) < 0) { goto fail; } ret = ff_AMediaFormat_newFromObject(mediaformat); fail: if (mediaformat) { (*env)->DeleteLocalRef(env, mediaformat); } return ret; } int ff_AMediaCodec_infoTryAgainLater(FFAMediaCodec *codec, ssize_t idx) { return idx == codec->INFO_TRY_AGAIN_LATER; } int ff_AMediaCodec_infoOutputBuffersChanged(FFAMediaCodec *codec, ssize_t idx) { return idx == codec->INFO_OUTPUT_BUFFERS_CHANGED; } int ff_AMediaCodec_infoOutputFormatChanged(FFAMediaCodec *codec, ssize_t idx) { return idx == codec->INFO_OUTPUT_FORMAT_CHANGED; } int ff_AMediaCodec_getBufferFlagCodecConfig(FFAMediaCodec *codec) { return codec->BUFFER_FLAG_CODEC_CONFIG; } int ff_AMediaCodec_getBufferFlagEndOfStream(FFAMediaCodec *codec) { return codec->BUFFER_FLAG_END_OF_STREAM; } int ff_AMediaCodec_getBufferFlagKeyFrame(FFAMediaCodec *codec) { return codec->BUFFER_FLAG_KEY_FRAME; } int ff_AMediaCodec_getConfigureFlagEncode(FFAMediaCodec *codec) { return codec->CONFIGURE_FLAG_ENCODE; } int ff_AMediaCodec_cleanOutputBuffers(FFAMediaCodec *codec) { int ret = 0; if (!codec->has_get_i_o_buffer) { if (codec->output_buffers) { JNIEnv *env = NULL; env = ff_jni_get_env(codec); if (!env) { ret = AVERROR_EXTERNAL; goto fail; } (*env)->DeleteGlobalRef(env, codec->output_buffers); codec->output_buffers = NULL; } } fail: return ret; } int ff_Build_SDK_INT(AVCodecContext *avctx) { int ret = -1; JNIEnv *env = NULL; jclass versionClass; jfieldID sdkIntFieldID; JNI_GET_ENV_OR_RETURN(env, avctx, -1); versionClass = (*env)->FindClass(env, "android/os/Build$VERSION"); sdkIntFieldID = (*env)->GetStaticFieldID(env, versionClass, "SDK_INT", "I"); ret = (*env)->GetStaticIntField(env, versionClass, sdkIntFieldID); (*env)->DeleteLocalRef(env, versionClass); return ret; }
null
null
null
null
71,053
32,502
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
32,502
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef THIRD_PARTY_BLINK_RENDERER_CORE_LOADER_SUBRESOURCE_INTEGRITY_HELPER_H_ #define THIRD_PARTY_BLINK_RENDERER_CORE_LOADER_SUBRESOURCE_INTEGRITY_HELPER_H_ #include "third_party/blink/renderer/core/core_export.h" #include "third_party/blink/renderer/core/inspector/console_message.h" #include "third_party/blink/renderer/platform/loader/subresource_integrity.h" #include "third_party/blink/renderer/platform/wtf/allocator.h" namespace blink { class ExecutionContext; class CORE_EXPORT SubresourceIntegrityHelper final { STATIC_ONLY(SubresourceIntegrityHelper); public: static void DoReport(ExecutionContext&, const SubresourceIntegrity::ReportInfo&); static void GetConsoleMessages(const SubresourceIntegrity::ReportInfo&, HeapVector<Member<ConsoleMessage>>*); static SubresourceIntegrity::IntegrityFeatures GetFeatures(ExecutionContext*); }; } // namespace blink #endif
null
null
null
null
29,365
27,217
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
192,212
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * comedi_pci.c * Comedi PCI driver specific functions. * * COMEDI - Linux Control and Measurement Device Interface * Copyright (C) 1997-2000 David A. Schleef <[email protected]> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/module.h> #include <linux/interrupt.h> #include "comedi_pci.h" /** * comedi_to_pci_dev() - Return PCI device attached to COMEDI device * @dev: COMEDI device. * * Assuming @dev->hw_dev is non-%NULL, it is assumed to be pointing to a * a &struct device embedded in a &struct pci_dev. * * Return: Attached PCI device if @dev->hw_dev is non-%NULL. * Return %NULL if @dev->hw_dev is %NULL. */ struct pci_dev *comedi_to_pci_dev(struct comedi_device *dev) { return dev->hw_dev ? to_pci_dev(dev->hw_dev) : NULL; } EXPORT_SYMBOL_GPL(comedi_to_pci_dev); /** * comedi_pci_enable() - Enable the PCI device and request the regions * @dev: COMEDI device. * * Assuming @dev->hw_dev is non-%NULL, it is assumed to be pointing to a * a &struct device embedded in a &struct pci_dev. Enable the PCI device * and request its regions. Set @dev->ioenabled to %true if successful, * otherwise undo what was done. * * Calls to comedi_pci_enable() and comedi_pci_disable() cannot be nested. * * Return: * 0 on success, * -%ENODEV if @dev->hw_dev is %NULL, * -%EBUSY if regions busy, * or some negative error number if failed to enable PCI device. * */ int comedi_pci_enable(struct comedi_device *dev) { struct pci_dev *pcidev = comedi_to_pci_dev(dev); int rc; if (!pcidev) return -ENODEV; rc = pci_enable_device(pcidev); if (rc < 0) return rc; rc = pci_request_regions(pcidev, dev->board_name); if (rc < 0) pci_disable_device(pcidev); else dev->ioenabled = true; return rc; } EXPORT_SYMBOL_GPL(comedi_pci_enable); /** * comedi_pci_disable() - Release the regions and disable the PCI device * @dev: COMEDI device. * * Assuming @dev->hw_dev is non-%NULL, it is assumed to be pointing to a * a &struct device embedded in a &struct pci_dev. If the earlier call * to comedi_pci_enable() was successful, release the PCI device's regions * and disable it. Reset @dev->ioenabled back to %false. */ void comedi_pci_disable(struct comedi_device *dev) { struct pci_dev *pcidev = comedi_to_pci_dev(dev); if (pcidev && dev->ioenabled) { pci_release_regions(pcidev); pci_disable_device(pcidev); } dev->ioenabled = false; } EXPORT_SYMBOL_GPL(comedi_pci_disable); /** * comedi_pci_detach() - A generic "detach" handler for PCI COMEDI drivers * @dev: COMEDI device. * * COMEDI drivers for PCI devices that need no special clean-up of private data * and have no ioremapped regions other than that pointed to by @dev->mmio may * use this function as its "detach" handler called by the COMEDI core when a * COMEDI device is being detached from the low-level driver. It may be also * called from a more specific "detach" handler that does additional clean-up. * * Free the IRQ if @dev->irq is non-zero, iounmap @dev->mmio if it is * non-%NULL, and call comedi_pci_disable() to release the PCI device's regions * and disable it. */ void comedi_pci_detach(struct comedi_device *dev) { struct pci_dev *pcidev = comedi_to_pci_dev(dev); if (!pcidev || !dev->ioenabled) return; if (dev->irq) { free_irq(dev->irq, dev); dev->irq = 0; } if (dev->mmio) { iounmap(dev->mmio); dev->mmio = NULL; } comedi_pci_disable(dev); } EXPORT_SYMBOL_GPL(comedi_pci_detach); /** * comedi_pci_auto_config() - Configure/probe a PCI COMEDI device * @pcidev: PCI device. * @driver: Registered COMEDI driver. * @context: Driver specific data, passed to comedi_auto_config(). * * Typically called from the pci_driver (*probe) function. Auto-configure * a COMEDI device, using the &struct device embedded in *@pcidev as the * hardware device. The @context value gets passed through to @driver's * "auto_attach" handler. The "auto_attach" handler may call * comedi_to_pci_dev() on the passed in COMEDI device to recover @pcidev. * * Return: The result of calling comedi_auto_config() (0 on success, or * a negative error number on failure). */ int comedi_pci_auto_config(struct pci_dev *pcidev, struct comedi_driver *driver, unsigned long context) { return comedi_auto_config(&pcidev->dev, driver, context); } EXPORT_SYMBOL_GPL(comedi_pci_auto_config); /** * comedi_pci_auto_unconfig() - Unconfigure/remove a PCI COMEDI device * @pcidev: PCI device. * * Typically called from the pci_driver (*remove) function. Auto-unconfigure * a COMEDI device attached to this PCI device, using a pointer to the * &struct device embedded in *@pcidev as the hardware device. The COMEDI * driver's "detach" handler will be called during unconfiguration of the * COMEDI device. * * Note that the COMEDI device may have already been unconfigured using the * %COMEDI_DEVCONFIG ioctl, in which case this attempt to unconfigure it * again should be ignored. */ void comedi_pci_auto_unconfig(struct pci_dev *pcidev) { comedi_auto_unconfig(&pcidev->dev); } EXPORT_SYMBOL_GPL(comedi_pci_auto_unconfig); /** * comedi_pci_driver_register() - Register a PCI COMEDI driver * @comedi_driver: COMEDI driver to be registered. * @pci_driver: PCI driver to be registered. * * This function is called from the module_init() of PCI COMEDI driver modules * to register the COMEDI driver and the PCI driver. Do not call it directly, * use the module_comedi_pci_driver() helper macro instead. * * Return: 0 on success, or a negative error number on failure. */ int comedi_pci_driver_register(struct comedi_driver *comedi_driver, struct pci_driver *pci_driver) { int ret; ret = comedi_driver_register(comedi_driver); if (ret < 0) return ret; ret = pci_register_driver(pci_driver); if (ret < 0) { comedi_driver_unregister(comedi_driver); return ret; } return 0; } EXPORT_SYMBOL_GPL(comedi_pci_driver_register); /** * comedi_pci_driver_unregister() - Unregister a PCI COMEDI driver * @comedi_driver: COMEDI driver to be unregistered. * @pci_driver: PCI driver to be unregistered. * * This function is called from the module_exit() of PCI COMEDI driver modules * to unregister the PCI driver and the COMEDI driver. Do not call it * directly, use the module_comedi_pci_driver() helper macro instead. */ void comedi_pci_driver_unregister(struct comedi_driver *comedi_driver, struct pci_driver *pci_driver) { pci_unregister_driver(pci_driver); comedi_driver_unregister(comedi_driver); } EXPORT_SYMBOL_GPL(comedi_pci_driver_unregister); static int __init comedi_pci_init(void) { return 0; } module_init(comedi_pci_init); static void __exit comedi_pci_exit(void) { } module_exit(comedi_pci_exit); MODULE_AUTHOR("http://www.comedi.org"); MODULE_DESCRIPTION("Comedi PCI interface module"); MODULE_LICENSE("GPL");
null
null
null
null
100,559
2,262
null
train_val
04b570817b2b38e35675b17328239746212f4c3f
155,319
FFmpeg
0
https://github.com/FFmpeg/FFmpeg
2018-06-01 01:23:12+05:30
/* * G.723.1 compatible decoder * Copyright (c) 2006 Benjamin Larsson * Copyright (c) 2010 Mohamed Naufal Basheer * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * G.723.1 compatible decoder */ #include "libavutil/channel_layout.h" #include "libavutil/mem.h" #include "libavutil/opt.h" #define BITSTREAM_READER_LE #include "acelp_vectors.h" #include "avcodec.h" #include "celp_filters.h" #include "celp_math.h" #include "get_bits.h" #include "internal.h" #include "g723_1.h" #define CNG_RANDOM_SEED 12345 static av_cold int g723_1_decode_init(AVCodecContext *avctx) { G723_1_Context *p = avctx->priv_data; avctx->channel_layout = AV_CH_LAYOUT_MONO; avctx->sample_fmt = AV_SAMPLE_FMT_S16; avctx->channels = 1; p->pf_gain = 1 << 12; memcpy(p->prev_lsp, dc_lsp, LPC_ORDER * sizeof(*p->prev_lsp)); memcpy(p->sid_lsp, dc_lsp, LPC_ORDER * sizeof(*p->sid_lsp)); p->cng_random_seed = CNG_RANDOM_SEED; p->past_frame_type = SID_FRAME; return 0; } /** * Unpack the frame into parameters. * * @param p the context * @param buf pointer to the input buffer * @param buf_size size of the input buffer */ static int unpack_bitstream(G723_1_Context *p, const uint8_t *buf, int buf_size) { GetBitContext gb; int ad_cb_len; int temp, info_bits, i; init_get_bits(&gb, buf, buf_size * 8); /* Extract frame type and rate info */ info_bits = get_bits(&gb, 2); if (info_bits == 3) { p->cur_frame_type = UNTRANSMITTED_FRAME; return 0; } /* Extract 24 bit lsp indices, 8 bit for each band */ p->lsp_index[2] = get_bits(&gb, 8); p->lsp_index[1] = get_bits(&gb, 8); p->lsp_index[0] = get_bits(&gb, 8); if (info_bits == 2) { p->cur_frame_type = SID_FRAME; p->subframe[0].amp_index = get_bits(&gb, 6); return 0; } /* Extract the info common to both rates */ p->cur_rate = info_bits ? RATE_5300 : RATE_6300; p->cur_frame_type = ACTIVE_FRAME; p->pitch_lag[0] = get_bits(&gb, 7); if (p->pitch_lag[0] > 123) /* test if forbidden code */ return -1; p->pitch_lag[0] += PITCH_MIN; p->subframe[1].ad_cb_lag = get_bits(&gb, 2); p->pitch_lag[1] = get_bits(&gb, 7); if (p->pitch_lag[1] > 123) return -1; p->pitch_lag[1] += PITCH_MIN; p->subframe[3].ad_cb_lag = get_bits(&gb, 2); p->subframe[0].ad_cb_lag = 1; p->subframe[2].ad_cb_lag = 1; for (i = 0; i < SUBFRAMES; i++) { /* Extract combined gain */ temp = get_bits(&gb, 12); ad_cb_len = 170; p->subframe[i].dirac_train = 0; if (p->cur_rate == RATE_6300 && p->pitch_lag[i >> 1] < SUBFRAME_LEN - 2) { p->subframe[i].dirac_train = temp >> 11; temp &= 0x7FF; ad_cb_len = 85; } p->subframe[i].ad_cb_gain = FASTDIV(temp, GAIN_LEVELS); if (p->subframe[i].ad_cb_gain < ad_cb_len) { p->subframe[i].amp_index = temp - p->subframe[i].ad_cb_gain * GAIN_LEVELS; } else { return -1; } } p->subframe[0].grid_index = get_bits1(&gb); p->subframe[1].grid_index = get_bits1(&gb); p->subframe[2].grid_index = get_bits1(&gb); p->subframe[3].grid_index = get_bits1(&gb); if (p->cur_rate == RATE_6300) { skip_bits1(&gb); /* skip reserved bit */ /* Compute pulse_pos index using the 13-bit combined position index */ temp = get_bits(&gb, 13); p->subframe[0].pulse_pos = temp / 810; temp -= p->subframe[0].pulse_pos * 810; p->subframe[1].pulse_pos = FASTDIV(temp, 90); temp -= p->subframe[1].pulse_pos * 90; p->subframe[2].pulse_pos = FASTDIV(temp, 9); p->subframe[3].pulse_pos = temp - p->subframe[2].pulse_pos * 9; p->subframe[0].pulse_pos = (p->subframe[0].pulse_pos << 16) + get_bits(&gb, 16); p->subframe[1].pulse_pos = (p->subframe[1].pulse_pos << 14) + get_bits(&gb, 14); p->subframe[2].pulse_pos = (p->subframe[2].pulse_pos << 16) + get_bits(&gb, 16); p->subframe[3].pulse_pos = (p->subframe[3].pulse_pos << 14) + get_bits(&gb, 14); p->subframe[0].pulse_sign = get_bits(&gb, 6); p->subframe[1].pulse_sign = get_bits(&gb, 5); p->subframe[2].pulse_sign = get_bits(&gb, 6); p->subframe[3].pulse_sign = get_bits(&gb, 5); } else { /* 5300 bps */ p->subframe[0].pulse_pos = get_bits(&gb, 12); p->subframe[1].pulse_pos = get_bits(&gb, 12); p->subframe[2].pulse_pos = get_bits(&gb, 12); p->subframe[3].pulse_pos = get_bits(&gb, 12); p->subframe[0].pulse_sign = get_bits(&gb, 4); p->subframe[1].pulse_sign = get_bits(&gb, 4); p->subframe[2].pulse_sign = get_bits(&gb, 4); p->subframe[3].pulse_sign = get_bits(&gb, 4); } return 0; } /** * Bitexact implementation of sqrt(val/2). */ static int16_t square_root(unsigned val) { av_assert2(!(val & 0x80000000)); return (ff_sqrt(val << 1) >> 1) & (~1); } /** * Generate fixed codebook excitation vector. * * @param vector decoded excitation vector * @param subfrm current subframe * @param cur_rate current bitrate * @param pitch_lag closed loop pitch lag * @param index current subframe index */ static void gen_fcb_excitation(int16_t *vector, G723_1_Subframe *subfrm, enum Rate cur_rate, int pitch_lag, int index) { int temp, i, j; memset(vector, 0, SUBFRAME_LEN * sizeof(*vector)); if (cur_rate == RATE_6300) { if (subfrm->pulse_pos >= max_pos[index]) return; /* Decode amplitudes and positions */ j = PULSE_MAX - pulses[index]; temp = subfrm->pulse_pos; for (i = 0; i < SUBFRAME_LEN / GRID_SIZE; i++) { temp -= combinatorial_table[j][i]; if (temp >= 0) continue; temp += combinatorial_table[j++][i]; if (subfrm->pulse_sign & (1 << (PULSE_MAX - j))) { vector[subfrm->grid_index + GRID_SIZE * i] = -fixed_cb_gain[subfrm->amp_index]; } else { vector[subfrm->grid_index + GRID_SIZE * i] = fixed_cb_gain[subfrm->amp_index]; } if (j == PULSE_MAX) break; } if (subfrm->dirac_train == 1) ff_g723_1_gen_dirac_train(vector, pitch_lag); } else { /* 5300 bps */ int cb_gain = fixed_cb_gain[subfrm->amp_index]; int cb_shift = subfrm->grid_index; int cb_sign = subfrm->pulse_sign; int cb_pos = subfrm->pulse_pos; int offset, beta, lag; for (i = 0; i < 8; i += 2) { offset = ((cb_pos & 7) << 3) + cb_shift + i; vector[offset] = (cb_sign & 1) ? cb_gain : -cb_gain; cb_pos >>= 3; cb_sign >>= 1; } /* Enhance harmonic components */ lag = pitch_contrib[subfrm->ad_cb_gain << 1] + pitch_lag + subfrm->ad_cb_lag - 1; beta = pitch_contrib[(subfrm->ad_cb_gain << 1) + 1]; if (lag < SUBFRAME_LEN - 2) { for (i = lag; i < SUBFRAME_LEN; i++) vector[i] += beta * vector[i - lag] >> 15; } } } /** * Estimate maximum auto-correlation around pitch lag. * * @param buf buffer with offset applied * @param offset offset of the excitation vector * @param ccr_max pointer to the maximum auto-correlation * @param pitch_lag decoded pitch lag * @param length length of autocorrelation * @param dir forward lag(1) / backward lag(-1) */ static int autocorr_max(const int16_t *buf, int offset, int *ccr_max, int pitch_lag, int length, int dir) { int limit, ccr, lag = 0; int i; pitch_lag = FFMIN(PITCH_MAX - 3, pitch_lag); if (dir > 0) limit = FFMIN(FRAME_LEN + PITCH_MAX - offset - length, pitch_lag + 3); else limit = pitch_lag + 3; for (i = pitch_lag - 3; i <= limit; i++) { ccr = ff_g723_1_dot_product(buf, buf + dir * i, length); if (ccr > *ccr_max) { *ccr_max = ccr; lag = i; } } return lag; } /** * Calculate pitch postfilter optimal and scaling gains. * * @param lag pitch postfilter forward/backward lag * @param ppf pitch postfilter parameters * @param cur_rate current bitrate * @param tgt_eng target energy * @param ccr cross-correlation * @param res_eng residual energy */ static void comp_ppf_gains(int lag, PPFParam *ppf, enum Rate cur_rate, int tgt_eng, int ccr, int res_eng) { int pf_residual; /* square of postfiltered residual */ int temp1, temp2; ppf->index = lag; temp1 = tgt_eng * res_eng >> 1; temp2 = ccr * ccr << 1; if (temp2 > temp1) { if (ccr >= res_eng) { ppf->opt_gain = ppf_gain_weight[cur_rate]; } else { ppf->opt_gain = (ccr << 15) / res_eng * ppf_gain_weight[cur_rate] >> 15; } /* pf_res^2 = tgt_eng + 2*ccr*gain + res_eng*gain^2 */ temp1 = (tgt_eng << 15) + (ccr * ppf->opt_gain << 1); temp2 = (ppf->opt_gain * ppf->opt_gain >> 15) * res_eng; pf_residual = av_sat_add32(temp1, temp2 + (1 << 15)) >> 16; if (tgt_eng >= pf_residual << 1) { temp1 = 0x7fff; } else { temp1 = (tgt_eng << 14) / pf_residual; } /* scaling_gain = sqrt(tgt_eng/pf_res^2) */ ppf->sc_gain = square_root(temp1 << 16); } else { ppf->opt_gain = 0; ppf->sc_gain = 0x7fff; } ppf->opt_gain = av_clip_int16(ppf->opt_gain * ppf->sc_gain >> 15); } /** * Calculate pitch postfilter parameters. * * @param p the context * @param offset offset of the excitation vector * @param pitch_lag decoded pitch lag * @param ppf pitch postfilter parameters * @param cur_rate current bitrate */ static void comp_ppf_coeff(G723_1_Context *p, int offset, int pitch_lag, PPFParam *ppf, enum Rate cur_rate) { int16_t scale; int i; int temp1, temp2; /* * 0 - target energy * 1 - forward cross-correlation * 2 - forward residual energy * 3 - backward cross-correlation * 4 - backward residual energy */ int energy[5] = {0, 0, 0, 0, 0}; int16_t *buf = p->audio + LPC_ORDER + offset; int fwd_lag = autocorr_max(buf, offset, &energy[1], pitch_lag, SUBFRAME_LEN, 1); int back_lag = autocorr_max(buf, offset, &energy[3], pitch_lag, SUBFRAME_LEN, -1); ppf->index = 0; ppf->opt_gain = 0; ppf->sc_gain = 0x7fff; /* Case 0, Section 3.6 */ if (!back_lag && !fwd_lag) return; /* Compute target energy */ energy[0] = ff_g723_1_dot_product(buf, buf, SUBFRAME_LEN); /* Compute forward residual energy */ if (fwd_lag) energy[2] = ff_g723_1_dot_product(buf + fwd_lag, buf + fwd_lag, SUBFRAME_LEN); /* Compute backward residual energy */ if (back_lag) energy[4] = ff_g723_1_dot_product(buf - back_lag, buf - back_lag, SUBFRAME_LEN); /* Normalize and shorten */ temp1 = 0; for (i = 0; i < 5; i++) temp1 = FFMAX(energy[i], temp1); scale = ff_g723_1_normalize_bits(temp1, 31); for (i = 0; i < 5; i++) energy[i] = (energy[i] << scale) >> 16; if (fwd_lag && !back_lag) { /* Case 1 */ comp_ppf_gains(fwd_lag, ppf, cur_rate, energy[0], energy[1], energy[2]); } else if (!fwd_lag) { /* Case 2 */ comp_ppf_gains(-back_lag, ppf, cur_rate, energy[0], energy[3], energy[4]); } else { /* Case 3 */ /* * Select the largest of energy[1]^2/energy[2] * and energy[3]^2/energy[4] */ temp1 = energy[4] * ((energy[1] * energy[1] + (1 << 14)) >> 15); temp2 = energy[2] * ((energy[3] * energy[3] + (1 << 14)) >> 15); if (temp1 >= temp2) { comp_ppf_gains(fwd_lag, ppf, cur_rate, energy[0], energy[1], energy[2]); } else { comp_ppf_gains(-back_lag, ppf, cur_rate, energy[0], energy[3], energy[4]); } } } /** * Classify frames as voiced/unvoiced. * * @param p the context * @param pitch_lag decoded pitch_lag * @param exc_eng excitation energy estimation * @param scale scaling factor of exc_eng * * @return residual interpolation index if voiced, 0 otherwise */ static int comp_interp_index(G723_1_Context *p, int pitch_lag, int *exc_eng, int *scale) { int offset = PITCH_MAX + 2 * SUBFRAME_LEN; int16_t *buf = p->audio + LPC_ORDER; int index, ccr, tgt_eng, best_eng, temp; *scale = ff_g723_1_scale_vector(buf, p->excitation, FRAME_LEN + PITCH_MAX); buf += offset; /* Compute maximum backward cross-correlation */ ccr = 0; index = autocorr_max(buf, offset, &ccr, pitch_lag, SUBFRAME_LEN * 2, -1); ccr = av_sat_add32(ccr, 1 << 15) >> 16; /* Compute target energy */ tgt_eng = ff_g723_1_dot_product(buf, buf, SUBFRAME_LEN * 2); *exc_eng = av_sat_add32(tgt_eng, 1 << 15) >> 16; if (ccr <= 0) return 0; /* Compute best energy */ best_eng = ff_g723_1_dot_product(buf - index, buf - index, SUBFRAME_LEN * 2); best_eng = av_sat_add32(best_eng, 1 << 15) >> 16; temp = best_eng * *exc_eng >> 3; if (temp < ccr * ccr) { return index; } else return 0; } /** * Perform residual interpolation based on frame classification. * * @param buf decoded excitation vector * @param out output vector * @param lag decoded pitch lag * @param gain interpolated gain * @param rseed seed for random number generator */ static void residual_interp(int16_t *buf, int16_t *out, int lag, int gain, int *rseed) { int i; if (lag) { /* Voiced */ int16_t *vector_ptr = buf + PITCH_MAX; /* Attenuate */ for (i = 0; i < lag; i++) out[i] = vector_ptr[i - lag] * 3 >> 2; av_memcpy_backptr((uint8_t*)(out + lag), lag * sizeof(*out), (FRAME_LEN - lag) * sizeof(*out)); } else { /* Unvoiced */ for (i = 0; i < FRAME_LEN; i++) { *rseed = (int16_t)(*rseed * 521 + 259); out[i] = gain * *rseed >> 15; } memset(buf, 0, (FRAME_LEN + PITCH_MAX) * sizeof(*buf)); } } /** * Perform IIR filtering. * * @param fir_coef FIR coefficients * @param iir_coef IIR coefficients * @param src source vector * @param dest destination vector * @param width width of the output, 16 bits(0) / 32 bits(1) */ #define iir_filter(fir_coef, iir_coef, src, dest, width)\ {\ int m, n;\ int res_shift = 16 & ~-(width);\ int in_shift = 16 - res_shift;\ \ for (m = 0; m < SUBFRAME_LEN; m++) {\ int64_t filter = 0;\ for (n = 1; n <= LPC_ORDER; n++) {\ filter -= (fir_coef)[n - 1] * (src)[m - n] -\ (iir_coef)[n - 1] * ((dest)[m - n] >> in_shift);\ }\ \ (dest)[m] = av_clipl_int32(((src)[m] * 65536) + (filter * 8) +\ (1 << 15)) >> res_shift;\ }\ } /** * Adjust gain of postfiltered signal. * * @param p the context * @param buf postfiltered output vector * @param energy input energy coefficient */ static void gain_scale(G723_1_Context *p, int16_t * buf, int energy) { int num, denom, gain, bits1, bits2; int i; num = energy; denom = 0; for (i = 0; i < SUBFRAME_LEN; i++) { int temp = buf[i] >> 2; temp *= temp; denom = av_sat_dadd32(denom, temp); } if (num && denom) { bits1 = ff_g723_1_normalize_bits(num, 31); bits2 = ff_g723_1_normalize_bits(denom, 31); num = num << bits1 >> 1; denom <<= bits2; bits2 = 5 + bits1 - bits2; bits2 = av_clip_uintp2(bits2, 5); gain = (num >> 1) / (denom >> 16); gain = square_root(gain << 16 >> bits2); } else { gain = 1 << 12; } for (i = 0; i < SUBFRAME_LEN; i++) { p->pf_gain = (15 * p->pf_gain + gain + (1 << 3)) >> 4; buf[i] = av_clip_int16((buf[i] * (p->pf_gain + (p->pf_gain >> 4)) + (1 << 10)) >> 11); } } /** * Perform formant filtering. * * @param p the context * @param lpc quantized lpc coefficients * @param buf input buffer * @param dst output buffer */ static void formant_postfilter(G723_1_Context *p, int16_t *lpc, int16_t *buf, int16_t *dst) { int16_t filter_coef[2][LPC_ORDER]; int filter_signal[LPC_ORDER + FRAME_LEN], *signal_ptr; int i, j, k; memcpy(buf, p->fir_mem, LPC_ORDER * sizeof(*buf)); memcpy(filter_signal, p->iir_mem, LPC_ORDER * sizeof(*filter_signal)); for (i = LPC_ORDER, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++) { for (k = 0; k < LPC_ORDER; k++) { filter_coef[0][k] = (-lpc[k] * postfilter_tbl[0][k] + (1 << 14)) >> 15; filter_coef[1][k] = (-lpc[k] * postfilter_tbl[1][k] + (1 << 14)) >> 15; } iir_filter(filter_coef[0], filter_coef[1], buf + i, filter_signal + i, 1); lpc += LPC_ORDER; } memcpy(p->fir_mem, buf + FRAME_LEN, LPC_ORDER * sizeof(int16_t)); memcpy(p->iir_mem, filter_signal + FRAME_LEN, LPC_ORDER * sizeof(int)); buf += LPC_ORDER; signal_ptr = filter_signal + LPC_ORDER; for (i = 0; i < SUBFRAMES; i++) { int temp; int auto_corr[2]; int scale, energy; /* Normalize */ scale = ff_g723_1_scale_vector(dst, buf, SUBFRAME_LEN); /* Compute auto correlation coefficients */ auto_corr[0] = ff_g723_1_dot_product(dst, dst + 1, SUBFRAME_LEN - 1); auto_corr[1] = ff_g723_1_dot_product(dst, dst, SUBFRAME_LEN); /* Compute reflection coefficient */ temp = auto_corr[1] >> 16; if (temp) { temp = (auto_corr[0] >> 2) / temp; } p->reflection_coef = (3 * p->reflection_coef + temp + 2) >> 2; temp = -p->reflection_coef >> 1 & ~3; /* Compensation filter */ for (j = 0; j < SUBFRAME_LEN; j++) { dst[j] = av_sat_dadd32(signal_ptr[j], (signal_ptr[j - 1] >> 16) * temp) >> 16; } /* Compute normalized signal energy */ temp = 2 * scale + 4; if (temp < 0) { energy = av_clipl_int32((int64_t)auto_corr[1] << -temp); } else energy = auto_corr[1] >> temp; gain_scale(p, dst, energy); buf += SUBFRAME_LEN; signal_ptr += SUBFRAME_LEN; dst += SUBFRAME_LEN; } } static int sid_gain_to_lsp_index(int gain) { if (gain < 0x10) return gain << 6; else if (gain < 0x20) return gain - 8 << 7; else return gain - 20 << 8; } static inline int cng_rand(int *state, int base) { *state = (*state * 521 + 259) & 0xFFFF; return (*state & 0x7FFF) * base >> 15; } static int estimate_sid_gain(G723_1_Context *p) { int i, shift, seg, seg2, t, val, val_add, x, y; shift = 16 - p->cur_gain * 2; if (shift > 0) { if (p->sid_gain == 0) { t = 0; } else if (shift >= 31 || (int32_t)((uint32_t)p->sid_gain << shift) >> shift != p->sid_gain) { if (p->sid_gain < 0) t = INT32_MIN; else t = INT32_MAX; } else t = p->sid_gain << shift; }else t = p->sid_gain >> -shift; x = av_clipl_int32(t * (int64_t)cng_filt[0] >> 16); if (x >= cng_bseg[2]) return 0x3F; if (x >= cng_bseg[1]) { shift = 4; seg = 3; } else { shift = 3; seg = (x >= cng_bseg[0]); } seg2 = FFMIN(seg, 3); val = 1 << shift; val_add = val >> 1; for (i = 0; i < shift; i++) { t = seg * 32 + (val << seg2); t *= t; if (x >= t) val += val_add; else val -= val_add; val_add >>= 1; } t = seg * 32 + (val << seg2); y = t * t - x; if (y <= 0) { t = seg * 32 + (val + 1 << seg2); t = t * t - x; val = (seg2 - 1) * 16 + val; if (t >= y) val++; } else { t = seg * 32 + (val - 1 << seg2); t = t * t - x; val = (seg2 - 1) * 16 + val; if (t >= y) val--; } return val; } static void generate_noise(G723_1_Context *p) { int i, j, idx, t; int off[SUBFRAMES]; int signs[SUBFRAMES / 2 * 11], pos[SUBFRAMES / 2 * 11]; int tmp[SUBFRAME_LEN * 2]; int16_t *vector_ptr; int64_t sum; int b0, c, delta, x, shift; p->pitch_lag[0] = cng_rand(&p->cng_random_seed, 21) + 123; p->pitch_lag[1] = cng_rand(&p->cng_random_seed, 19) + 123; for (i = 0; i < SUBFRAMES; i++) { p->subframe[i].ad_cb_gain = cng_rand(&p->cng_random_seed, 50) + 1; p->subframe[i].ad_cb_lag = cng_adaptive_cb_lag[i]; } for (i = 0; i < SUBFRAMES / 2; i++) { t = cng_rand(&p->cng_random_seed, 1 << 13); off[i * 2] = t & 1; off[i * 2 + 1] = ((t >> 1) & 1) + SUBFRAME_LEN; t >>= 2; for (j = 0; j < 11; j++) { signs[i * 11 + j] = ((t & 1) * 2 - 1) * (1 << 14); t >>= 1; } } idx = 0; for (i = 0; i < SUBFRAMES; i++) { for (j = 0; j < SUBFRAME_LEN / 2; j++) tmp[j] = j; t = SUBFRAME_LEN / 2; for (j = 0; j < pulses[i]; j++, idx++) { int idx2 = cng_rand(&p->cng_random_seed, t); pos[idx] = tmp[idx2] * 2 + off[i]; tmp[idx2] = tmp[--t]; } } vector_ptr = p->audio + LPC_ORDER; memcpy(vector_ptr, p->prev_excitation, PITCH_MAX * sizeof(*p->excitation)); for (i = 0; i < SUBFRAMES; i += 2) { ff_g723_1_gen_acb_excitation(vector_ptr, vector_ptr, p->pitch_lag[i >> 1], &p->subframe[i], p->cur_rate); ff_g723_1_gen_acb_excitation(vector_ptr + SUBFRAME_LEN, vector_ptr + SUBFRAME_LEN, p->pitch_lag[i >> 1], &p->subframe[i + 1], p->cur_rate); t = 0; for (j = 0; j < SUBFRAME_LEN * 2; j++) t |= FFABS(vector_ptr[j]); t = FFMIN(t, 0x7FFF); if (!t) { shift = 0; } else { shift = -10 + av_log2(t); if (shift < -2) shift = -2; } sum = 0; if (shift < 0) { for (j = 0; j < SUBFRAME_LEN * 2; j++) { t = vector_ptr[j] * (1 << -shift); sum += t * t; tmp[j] = t; } } else { for (j = 0; j < SUBFRAME_LEN * 2; j++) { t = vector_ptr[j] >> shift; sum += t * t; tmp[j] = t; } } b0 = 0; for (j = 0; j < 11; j++) b0 += tmp[pos[(i / 2) * 11 + j]] * signs[(i / 2) * 11 + j]; b0 = b0 * 2 * 2979LL + (1 << 29) >> 30; // approximated division by 11 c = p->cur_gain * (p->cur_gain * SUBFRAME_LEN >> 5); if (shift * 2 + 3 >= 0) c >>= shift * 2 + 3; else c <<= -(shift * 2 + 3); c = (av_clipl_int32(sum << 1) - c) * 2979LL >> 15; delta = b0 * b0 * 2 - c; if (delta <= 0) { x = -b0; } else { delta = square_root(delta); x = delta - b0; t = delta + b0; if (FFABS(t) < FFABS(x)) x = -t; } shift++; if (shift < 0) x >>= -shift; else x *= 1 << shift; x = av_clip(x, -10000, 10000); for (j = 0; j < 11; j++) { idx = (i / 2) * 11 + j; vector_ptr[pos[idx]] = av_clip_int16(vector_ptr[pos[idx]] + (x * signs[idx] >> 15)); } /* copy decoded data to serve as a history for the next decoded subframes */ memcpy(vector_ptr + PITCH_MAX, vector_ptr, sizeof(*vector_ptr) * SUBFRAME_LEN * 2); vector_ptr += SUBFRAME_LEN * 2; } /* Save the excitation for the next frame */ memcpy(p->prev_excitation, p->audio + LPC_ORDER + FRAME_LEN, PITCH_MAX * sizeof(*p->excitation)); } static int g723_1_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt) { G723_1_Context *p = avctx->priv_data; AVFrame *frame = data; const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; int dec_mode = buf[0] & 3; PPFParam ppf[SUBFRAMES]; int16_t cur_lsp[LPC_ORDER]; int16_t lpc[SUBFRAMES * LPC_ORDER]; int16_t acb_vector[SUBFRAME_LEN]; int16_t *out; int bad_frame = 0, i, j, ret; int16_t *audio = p->audio; if (buf_size < frame_size[dec_mode]) { if (buf_size) av_log(avctx, AV_LOG_WARNING, "Expected %d bytes, got %d - skipping packet\n", frame_size[dec_mode], buf_size); *got_frame_ptr = 0; return buf_size; } if (unpack_bitstream(p, buf, buf_size) < 0) { bad_frame = 1; if (p->past_frame_type == ACTIVE_FRAME) p->cur_frame_type = ACTIVE_FRAME; else p->cur_frame_type = UNTRANSMITTED_FRAME; } frame->nb_samples = FRAME_LEN; if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) return ret; out = (int16_t *)frame->data[0]; if (p->cur_frame_type == ACTIVE_FRAME) { if (!bad_frame) p->erased_frames = 0; else if (p->erased_frames != 3) p->erased_frames++; ff_g723_1_inverse_quant(cur_lsp, p->prev_lsp, p->lsp_index, bad_frame); ff_g723_1_lsp_interpolate(lpc, cur_lsp, p->prev_lsp); /* Save the lsp_vector for the next frame */ memcpy(p->prev_lsp, cur_lsp, LPC_ORDER * sizeof(*p->prev_lsp)); /* Generate the excitation for the frame */ memcpy(p->excitation, p->prev_excitation, PITCH_MAX * sizeof(*p->excitation)); if (!p->erased_frames) { int16_t *vector_ptr = p->excitation + PITCH_MAX; /* Update interpolation gain memory */ p->interp_gain = fixed_cb_gain[(p->subframe[2].amp_index + p->subframe[3].amp_index) >> 1]; for (i = 0; i < SUBFRAMES; i++) { gen_fcb_excitation(vector_ptr, &p->subframe[i], p->cur_rate, p->pitch_lag[i >> 1], i); ff_g723_1_gen_acb_excitation(acb_vector, &p->excitation[SUBFRAME_LEN * i], p->pitch_lag[i >> 1], &p->subframe[i], p->cur_rate); /* Get the total excitation */ for (j = 0; j < SUBFRAME_LEN; j++) { int v = av_clip_int16(vector_ptr[j] * 2); vector_ptr[j] = av_clip_int16(v + acb_vector[j]); } vector_ptr += SUBFRAME_LEN; } vector_ptr = p->excitation + PITCH_MAX; p->interp_index = comp_interp_index(p, p->pitch_lag[1], &p->sid_gain, &p->cur_gain); /* Perform pitch postfiltering */ if (p->postfilter) { i = PITCH_MAX; for (j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++) comp_ppf_coeff(p, i, p->pitch_lag[j >> 1], ppf + j, p->cur_rate); for (i = 0, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++) ff_acelp_weighted_vector_sum(p->audio + LPC_ORDER + i, vector_ptr + i, vector_ptr + i + ppf[j].index, ppf[j].sc_gain, ppf[j].opt_gain, 1 << 14, 15, SUBFRAME_LEN); } else { audio = vector_ptr - LPC_ORDER; } /* Save the excitation for the next frame */ memcpy(p->prev_excitation, p->excitation + FRAME_LEN, PITCH_MAX * sizeof(*p->excitation)); } else { p->interp_gain = (p->interp_gain * 3 + 2) >> 2; if (p->erased_frames == 3) { /* Mute output */ memset(p->excitation, 0, (FRAME_LEN + PITCH_MAX) * sizeof(*p->excitation)); memset(p->prev_excitation, 0, PITCH_MAX * sizeof(*p->excitation)); memset(frame->data[0], 0, (FRAME_LEN + LPC_ORDER) * sizeof(int16_t)); } else { int16_t *buf = p->audio + LPC_ORDER; /* Regenerate frame */ residual_interp(p->excitation, buf, p->interp_index, p->interp_gain, &p->random_seed); /* Save the excitation for the next frame */ memcpy(p->prev_excitation, buf + (FRAME_LEN - PITCH_MAX), PITCH_MAX * sizeof(*p->excitation)); } } p->cng_random_seed = CNG_RANDOM_SEED; } else { if (p->cur_frame_type == SID_FRAME) { p->sid_gain = sid_gain_to_lsp_index(p->subframe[0].amp_index); ff_g723_1_inverse_quant(p->sid_lsp, p->prev_lsp, p->lsp_index, 0); } else if (p->past_frame_type == ACTIVE_FRAME) { p->sid_gain = estimate_sid_gain(p); } if (p->past_frame_type == ACTIVE_FRAME) p->cur_gain = p->sid_gain; else p->cur_gain = (p->cur_gain * 7 + p->sid_gain) >> 3; generate_noise(p); ff_g723_1_lsp_interpolate(lpc, p->sid_lsp, p->prev_lsp); /* Save the lsp_vector for the next frame */ memcpy(p->prev_lsp, p->sid_lsp, LPC_ORDER * sizeof(*p->prev_lsp)); } p->past_frame_type = p->cur_frame_type; memcpy(p->audio, p->synth_mem, LPC_ORDER * sizeof(*p->audio)); for (i = LPC_ORDER, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++) ff_celp_lp_synthesis_filter(p->audio + i, &lpc[j * LPC_ORDER], audio + i, SUBFRAME_LEN, LPC_ORDER, 0, 1, 1 << 12); memcpy(p->synth_mem, p->audio + FRAME_LEN, LPC_ORDER * sizeof(*p->audio)); if (p->postfilter) { formant_postfilter(p, lpc, p->audio, out); } else { // if output is not postfiltered it should be scaled by 2 for (i = 0; i < FRAME_LEN; i++) out[i] = av_clip_int16(p->audio[LPC_ORDER + i] << 1); } *got_frame_ptr = 1; return frame_size[dec_mode]; } #define OFFSET(x) offsetof(G723_1_Context, x) #define AD AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_DECODING_PARAM static const AVOption options[] = { { "postfilter", "enable postfilter", OFFSET(postfilter), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, AD }, { NULL } }; static const AVClass g723_1dec_class = { .class_name = "G.723.1 decoder", .item_name = av_default_item_name, .option = options, .version = LIBAVUTIL_VERSION_INT, }; AVCodec ff_g723_1_decoder = { .name = "g723_1", .long_name = NULL_IF_CONFIG_SMALL("G.723.1"), .type = AVMEDIA_TYPE_AUDIO, .id = AV_CODEC_ID_G723_1, .priv_data_size = sizeof(G723_1_Context), .init = g723_1_decode_init, .decode = g723_1_decode_frame, .capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1, .priv_class = &g723_1dec_class, };
null
null
null
null
71,374
40,871
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
205,866
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
#ifndef _LINUX_RESET_CONTROLLER_H_ #define _LINUX_RESET_CONTROLLER_H_ #include <linux/list.h> struct reset_controller_dev; /** * struct reset_control_ops * * @reset: for self-deasserting resets, does all necessary * things to reset the device * @assert: manually assert the reset line, if supported * @deassert: manually deassert the reset line, if supported * @status: return the status of the reset line, if supported */ struct reset_control_ops { int (*reset)(struct reset_controller_dev *rcdev, unsigned long id); int (*assert)(struct reset_controller_dev *rcdev, unsigned long id); int (*deassert)(struct reset_controller_dev *rcdev, unsigned long id); int (*status)(struct reset_controller_dev *rcdev, unsigned long id); }; struct module; struct device_node; struct of_phandle_args; /** * struct reset_controller_dev - reset controller entity that might * provide multiple reset controls * @ops: a pointer to device specific struct reset_control_ops * @owner: kernel module of the reset controller driver * @list: internal list of reset controller devices * @reset_control_head: head of internal list of requested reset controls * @of_node: corresponding device tree node as phandle target * @of_reset_n_cells: number of cells in reset line specifiers * @of_xlate: translation function to translate from specifier as found in the * device tree to id as given to the reset control ops * @nr_resets: number of reset controls in this reset controller device */ struct reset_controller_dev { const struct reset_control_ops *ops; struct module *owner; struct list_head list; struct list_head reset_control_head; struct device_node *of_node; int of_reset_n_cells; int (*of_xlate)(struct reset_controller_dev *rcdev, const struct of_phandle_args *reset_spec); unsigned int nr_resets; }; int reset_controller_register(struct reset_controller_dev *rcdev); void reset_controller_unregister(struct reset_controller_dev *rcdev); struct device; int devm_reset_controller_register(struct device *dev, struct reset_controller_dev *rcdev); #endif
null
null
null
null
114,213
59,272
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
59,272
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_EXTENSIONS_API_CONTENT_SETTINGS_CONTENT_SETTINGS_HELPERS_H__ #define CHROME_BROWSER_EXTENSIONS_API_CONTENT_SETTINGS_CONTENT_SETTINGS_HELPERS_H__ #include <string> #include "components/content_settings/core/common/content_settings.h" #include "components/content_settings/core/common/content_settings_pattern.h" #include "components/content_settings/core/common/content_settings_types.h" namespace extensions { namespace content_settings_helpers { // Parses an extension match pattern and returns a corresponding // content settings pattern object. // If |pattern_str| is invalid or can't be converted to a content settings // pattern, |error| is set to the parsing error and an invalid pattern // is returned. ContentSettingsPattern ParseExtensionPattern(const std::string& pattern_str, std::string* error); // Converts a content settings type string to the corresponding // ContentSettingsType. Returns CONTENT_SETTINGS_TYPE_DEFAULT if the string // didn't specify a valid content settings type. ContentSettingsType StringToContentSettingsType( const std::string& content_type); // Returns a string representation of a ContentSettingsType. std::string ContentSettingsTypeToString(ContentSettingsType type); } // namespace content_settings_helpers } // namespace extensions #endif // CHROME_BROWSER_EXTENSIONS_API_CONTENT_SETTINGS_CONTENT_SETTINGS_HELPERS_H__
null
null
null
null
56,135
370
null
train_val
a6802e21d824e786d1e2a8440cf749a6e1a8d95f
160,498
ImageMagick
0
https://github.com/ImageMagick/ImageMagick
2017-07-18 18:28:29-04:00
/* Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization dedicated to making software imaging solutions freely available. You may not use this file except in compliance with the License. obtain a copy of the License at https://www.imagemagick.org/script/license.php Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. MagickCore private graphic gems methods. */ #ifndef MAGICKCORE_GEM_PRIVATE_H #define MAGICKCORE_GEM_PRIVATE_H #include "MagickCore/pixel-accessor.h" #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif #define D65X 0.950456 #define D65Y 1.0 #define D65Z 1.088754 #define CIEEpsilon (216.0/24389.0) #define CIEK (24389.0/27.0) extern MagickPrivate double GenerateDifferentialNoise(RandomInfo *,const Quantum,const NoiseType, const double); extern MagickPrivate size_t GetOptimalKernelWidth(const double,const double), GetOptimalKernelWidth1D(const double,const double), GetOptimalKernelWidth2D(const double,const double); extern MagickPrivate void ConvertHCLToRGB(const double,const double,const double,double *,double *, double *), ConvertHCLpToRGB(const double,const double,const double,double *,double *, double *), ConvertHSBToRGB(const double,const double,const double,double *,double *, double *), ConvertHSIToRGB(const double,const double,const double,double *,double *, double *), ConvertHSVToRGB(const double,const double,const double,double *,double *, double *), ConvertHWBToRGB(const double,const double,const double,double *,double *, double *), ConvertLCHabToRGB(const double,const double,const double,double *,double *, double *), ConvertLCHuvToRGB(const double,const double,const double,double *,double *, double *), ConvertRGBToHCL(const double,const double,const double,double *,double *, double *), ConvertRGBToHCLp(const double,const double,const double,double *,double *, double *), ConvertRGBToHSB(const double,const double,const double,double *,double *, double *), ConvertRGBToHSI(const double,const double,const double,double *,double *, double *), ConvertRGBToHSV(const double,const double,const double,double *,double *, double *), ConvertRGBToHWB(const double,const double,const double,double *,double *, double *), ConvertRGBToLCHab(const double,const double,const double,double *,double *, double *), ConvertRGBToLCHuv(const double,const double,const double,double *,double *, double *); static inline void ConvertLabToXYZ(const double L,const double a,const double b, double *X,double *Y,double *Z) { double x, y, z; assert(X != (double *) NULL); assert(Y != (double *) NULL); assert(Z != (double *) NULL); y=(L+16.0)/116.0; x=y+a/500.0; z=y-b/200.0; if ((x*x*x) > CIEEpsilon) x=(x*x*x); else x=(116.0*x-16.0)/CIEK; if ((y*y*y) > CIEEpsilon) y=(y*y*y); else y=L/CIEK; if ((z*z*z) > CIEEpsilon) z=(z*z*z); else z=(116.0*z-16.0)/CIEK; *X=D65X*x; *Y=D65Y*y; *Z=D65Z*z; } static inline void ConvertLuvToXYZ(const double L,const double u,const double v, double *X,double *Y,double *Z) { assert(X != (double *) NULL); assert(Y != (double *) NULL); assert(Z != (double *) NULL); if (L > (CIEK*CIEEpsilon)) *Y=(double) pow((L+16.0)/116.0,3.0); else *Y=L/CIEK; *X=((*Y*((39.0*L/(v+13.0*L*(9.0*D65Y/(D65X+15.0*D65Y+3.0*D65Z))))-5.0))+ 5.0*(*Y))/((((52.0*L/(u+13.0*L*(4.0*D65X/(D65X+15.0*D65Y+3.0*D65Z))))-1.0)/ 3.0)-(-1.0/3.0)); *Z=(*X*(((52.0*L/(u+13.0*L*(4.0*D65X/(D65X+15.0*D65Y+3.0*D65Z))))-1.0)/3.0))- 5.0*(*Y); } static inline void ConvertRGBToXYZ(const double red,const double green, const double blue,double *X,double *Y,double *Z) { double b, g, r; /* Convert RGB to XYZ colorspace. */ assert(X != (double *) NULL); assert(Y != (double *) NULL); assert(Z != (double *) NULL); r=QuantumScale*DecodePixelGamma(red); g=QuantumScale*DecodePixelGamma(green); b=QuantumScale*DecodePixelGamma(blue); *X=0.4124564*r+0.3575761*g+0.1804375*b; *Y=0.2126729*r+0.7151522*g+0.0721750*b; *Z=0.0193339*r+0.1191920*g+0.9503041*b; } static inline void ConvertXYZToLab(const double X,const double Y,const double Z, double *L,double *a,double *b) { double x, y, z; assert(L != (double *) NULL); assert(a != (double *) NULL); assert(b != (double *) NULL); if ((X/D65X) > CIEEpsilon) x=pow(X/D65X,1.0/3.0); else x=(CIEK*X/D65X+16.0)/116.0; if ((Y/D65Y) > CIEEpsilon) y=pow(Y/D65Y,1.0/3.0); else y=(CIEK*Y/D65Y+16.0)/116.0; if ((Z/D65Z) > CIEEpsilon) z=pow(Z/D65Z,1.0/3.0); else z=(CIEK*Z/D65Z+16.0)/116.0; *L=((116.0*y)-16.0)/100.0; *a=(500.0*(x-y))/255.0+0.5; *b=(200.0*(y-z))/255.0+0.5; } static inline void ConvertXYZToLuv(const double X,const double Y,const double Z, double *L,double *u,double *v) { double alpha; assert(L != (double *) NULL); assert(u != (double *) NULL); assert(v != (double *) NULL); if ((Y/D65Y) > CIEEpsilon) *L=(double) (116.0*pow(Y/D65Y,1.0/3.0)-16.0); else *L=CIEK*(Y/D65Y); alpha=PerceptibleReciprocal(X+15.0*Y+3.0*Z); *u=13.0*(*L)*((4.0*alpha*X)-(4.0*D65X/(D65X+15.0*D65Y+3.0*D65Z))); *v=13.0*(*L)*((9.0*alpha*Y)-(9.0*D65Y/(D65X+15.0*D65Y+3.0*D65Z))); *L/=100.0; *u=(*u+134.0)/354.0; *v=(*v+140.0)/262.0; } static inline void ConvertXYZToRGB(const double X,const double Y,const double Z, double *red,double *green,double *blue) { double b, g, r; assert(red != (double *) NULL); assert(green != (double *) NULL); assert(blue != (double *) NULL); r=3.2404542*X-1.5371385*Y-0.4985314*Z; g=(-0.9692660)*X+1.8760108*Y+0.0415560*Z; b=0.0556434*X-0.2040259*Y+1.0572252*Z; *red=EncodePixelGamma(QuantumRange*r); *green=EncodePixelGamma(QuantumRange*g); *blue=EncodePixelGamma(QuantumRange*b); } #if defined(__cplusplus) || defined(c_plusplus) } #endif #endif
null
null
null
null
72,791
69,511
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
69,511
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
/* * pthread_key_create.c * * Description: * POSIX thread functions which implement thread-specific data (TSD). * * -------------------------------------------------------------------------- * * Pthreads-win32 - POSIX Threads Library for Win32 * Copyright(C) 1998 John E. Bossom * Copyright(C) 1999,2005 Pthreads-win32 contributors * * Contact Email: [email protected] * * The current list of contributors is contained * in the file CONTRIBUTORS included with the source * code distribution. The list can also be seen at the * following World Wide Web location: * http://sources.redhat.com/pthreads-win32/contributors.html * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library in the file COPYING.LIB; * if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA */ #include "pthread.h" #include "implement.h" /* TLS_OUT_OF_INDEXES not defined on WinCE */ #if !defined(TLS_OUT_OF_INDEXES) #define TLS_OUT_OF_INDEXES 0xffffffff #endif int pthread_key_create (pthread_key_t * key, void (PTW32_CDECL *destructor) (void *)) /* * ------------------------------------------------------ * DOCPUBLIC * This function creates a thread-specific data key visible * to all threads. All existing and new threads have a value * NULL for key until set using pthread_setspecific. When any * thread with a non-NULL value for key terminates, 'destructor' * is called with key's current value for that thread. * * PARAMETERS * key * pointer to an instance of pthread_key_t * * * DESCRIPTION * This function creates a thread-specific data key visible * to all threads. All existing and new threads have a value * NULL for key until set using pthread_setspecific. When any * thread with a non-NULL value for key terminates, 'destructor' * is called with key's current value for that thread. * * RESULTS * 0 successfully created semaphore, * EAGAIN insufficient resources or PTHREAD_KEYS_MAX * exceeded, * ENOMEM insufficient memory to create the key, * * ------------------------------------------------------ */ { int result = 0; pthread_key_t newkey; if ((newkey = (pthread_key_t) calloc (1, sizeof (*newkey))) == NULL) { result = ENOMEM; } else if ((newkey->key = TlsAlloc ()) == TLS_OUT_OF_INDEXES) { result = EAGAIN; free (newkey); newkey = NULL; } else if (destructor != NULL) { /* * Have to manage associations between thread and key; * Therefore, need a lock that allows competing threads * to gain exclusive access to the key->threads list. * * The mutex will only be created when it is first locked. */ newkey->keyLock = 0; newkey->destructor = destructor; } *key = newkey; return (result); }
null
null
null
null
66,374
12,890
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
12,890
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "components/spellcheck/renderer/custom_dictionary_engine.h" #include <stddef.h> #include "base/strings/utf_string_conversions.h" CustomDictionaryEngine::CustomDictionaryEngine() { } CustomDictionaryEngine::~CustomDictionaryEngine() { } void CustomDictionaryEngine::Init(const std::set<std::string>& custom_words) { dictionary_.clear(); // SpellingMenuOberver calls UTF16ToUTF8(word) to convert words for storage, // synchronization, and use in the custom dictionary engine. Since // (UTF8ToUTF16(UTF16ToUTF8(word)) == word) holds, the engine does not need to // normalize the strings. for (const std::string& word : custom_words) dictionary_.insert(base::UTF8ToUTF16(word)); } void CustomDictionaryEngine::OnCustomDictionaryChanged( const std::set<std::string>& words_added, const std::set<std::string>& words_removed) { for (const std::string& word : words_added) dictionary_.insert(base::UTF8ToUTF16(word)); for (const std::string& word : words_removed) dictionary_.erase(base::UTF8ToUTF16(word)); } bool CustomDictionaryEngine::SpellCheckWord( const base::string16& text, int misspelling_start, int misspelling_len) { // The text to be checked is empty on OSX(async) right now. // TODO(groby): Fix as part of async hook-up. (http://crbug.com/178241) return misspelling_start >= 0 && misspelling_len > 0 && size_t(misspelling_start + misspelling_len) <= text.length() && dictionary_.count(text.substr(misspelling_start, misspelling_len)) > 0; }
null
null
null
null
9,753
69,475
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
69,475
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
/* * pthread_attr_getstackaddr.c * * Description: * This translation unit implements operations on thread attribute objects. * * -------------------------------------------------------------------------- * * Pthreads-win32 - POSIX Threads Library for Win32 * Copyright(C) 1998 John E. Bossom * Copyright(C) 1999,2005 Pthreads-win32 contributors * * Contact Email: [email protected] * * The current list of contributors is contained * in the file CONTRIBUTORS included with the source * code distribution. The list can also be seen at the * following World Wide Web location: * http://sources.redhat.com/pthreads-win32/contributors.html * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library in the file COPYING.LIB; * if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA */ #include "pthread.h" #include "implement.h" /* ignore warning "unreferenced formal parameter" */ #if defined(_MSC_VER) #pragma warning( disable : 4100 ) #endif int pthread_attr_getstackaddr (const pthread_attr_t * attr, void **stackaddr) /* * ------------------------------------------------------ * DOCPUBLIC * This function determines the address of the stack * on which threads created with 'attr' will run. * * PARAMETERS * attr * pointer to an instance of pthread_attr_t * * stackaddr * pointer into which is returned the stack address. * * * DESCRIPTION * This function determines the address of the stack * on which threads created with 'attr' will run. * * NOTES: * 1) Function supported only if this macro is * defined: * * _POSIX_THREAD_ATTR_STACKADDR * * 2) Create only one thread for each stack * address.. * * RESULTS * 0 successfully retreived stack address, * EINVAL 'attr' is invalid * ENOSYS function not supported * * ------------------------------------------------------ */ { #if defined( _POSIX_THREAD_ATTR_STACKADDR ) if (ptw32_is_attr (attr) != 0) { return EINVAL; } *stackaddr = (*attr)->stackaddr; return 0; #else return ENOSYS; #endif /* _POSIX_THREAD_ATTR_STACKADDR */ }
null
null
null
null
66,338
16,871
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
16,871
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "components/viz/service/display/scoped_render_pass_texture.h" #include "base/bits.h" #include "base/logging.h" #include "components/viz/common/gpu/context_provider.h" #include "components/viz/common/resources/resource_format_utils.h" #include "gpu/GLES2/gl2extchromium.h" #include "gpu/command_buffer/client/gles2_interface.h" namespace viz { ScopedRenderPassTexture::ScopedRenderPassTexture() = default; ScopedRenderPassTexture::ScopedRenderPassTexture( ContextProvider* context_provider, const gfx::Size& size, ResourceFormat format, const gfx::ColorSpace& color_space, bool mipmap) : context_provider_(context_provider), size_(size), mipmap_(mipmap), color_space_(color_space) { DCHECK(context_provider_); gpu::gles2::GLES2Interface* gl = context_provider_->ContextGL(); const gpu::Capabilities& caps = context_provider_->ContextCapabilities(); gl->GenTextures(1, &gl_id_); gl->BindTexture(GL_TEXTURE_2D, gl_id_); gl->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); gl->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); gl->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); gl->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); // This texture will be bound as a framebuffer, so optimize for that. if (caps.texture_usage) { gl->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_USAGE_ANGLE, GL_FRAMEBUFFER_ATTACHMENT_ANGLE); } if (caps.texture_storage) { GLint levels = 1; if (caps.texture_npot && mipmap_) levels += base::bits::Log2Floor(std::max(size_.width(), size_.height())); gl->TexStorage2DEXT(GL_TEXTURE_2D, levels, TextureStorageFormat(format), size_.width(), size_.height()); } else { gl->TexImage2D(GL_TEXTURE_2D, 0, GLInternalFormat(format), size_.width(), size_.height(), 0, GLDataFormat(format), GLDataType(format), nullptr); } } ScopedRenderPassTexture::~ScopedRenderPassTexture() { Free(); } ScopedRenderPassTexture::ScopedRenderPassTexture( ScopedRenderPassTexture&& other) { context_provider_ = other.context_provider_; size_ = other.size_; mipmap_ = other.mipmap_; color_space_ = other.color_space_; gl_id_ = other.gl_id_; mipmap_state_ = other.mipmap_state_; // When being moved, other will no longer hold this gl_id_. other.gl_id_ = 0; } ScopedRenderPassTexture& ScopedRenderPassTexture::operator=( ScopedRenderPassTexture&& other) { if (this != &other) { Free(); context_provider_ = other.context_provider_; size_ = other.size_; mipmap_ = other.mipmap_; color_space_ = other.color_space_; gl_id_ = other.gl_id_; mipmap_state_ = other.mipmap_state_; // When being moved, other will no longer hold this gl_id_. other.gl_id_ = 0; } return *this; } void ScopedRenderPassTexture::Free() { if (!gl_id_) return; gpu::gles2::GLES2Interface* gl = context_provider_->ContextGL(); gl->DeleteTextures(1, &gl_id_); gl_id_ = 0; } void ScopedRenderPassTexture::BindForSampling() { gpu::gles2::GLES2Interface* gl = context_provider_->ContextGL(); gl->BindTexture(GL_TEXTURE_2D, gl_id_); switch (mipmap_state_) { case INVALID: break; case GENERATE: // TODO(crbug.com/803286): npot texture always return false on ubuntu // desktop. The npot texture check is probably failing on desktop GL. DCHECK(context_provider_->ContextCapabilities().texture_npot); gl->GenerateMipmap(GL_TEXTURE_2D); mipmap_state_ = VALID; FALLTHROUGH; case VALID: gl->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR); break; } } } // namespace viz
null
null
null
null
13,734
7,024
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
172,019
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
#ifndef _ASM_M32R_SERIAL_H #define _ASM_M32R_SERIAL_H /* include/asm-m32r/serial.h */ #define BASE_BAUD 115200 #endif /* _ASM_M32R_SERIAL_H */
null
null
null
null
80,366