idx
int64 0
63k
| question
stringlengths 61
4.03k
| target
stringlengths 6
1.23k
|
---|---|---|
60,100 |
def get_hostname ( self ) : self . oem_init ( ) try : return self . _oem . get_hostname ( ) except exc . UnsupportedFunctionality : return self . get_mci ( )
|
Get the hostname used by the BMC in various contexts
|
60,101 |
def set_hostname ( self , hostname ) : self . oem_init ( ) try : return self . _oem . set_hostname ( hostname ) except exc . UnsupportedFunctionality : return self . set_mci ( hostname )
|
Set the hostname to be used by the BMC in various contexts .
|
60,102 |
def get_channel_access ( self , channel = None , read_mode = 'volatile' ) : if channel is None : channel = self . get_network_channel ( ) data = [ ] data . append ( channel & 0b00001111 ) b = 0 read_modes = { 'non_volatile' : 1 , 'volatile' : 2 , } b |= ( read_modes [ read_mode ] << 6 ) & 0b11000000 data . append ( b ) response = self . raw_command ( netfn = 0x06 , command = 0x41 , data = data ) if 'error' in response : raise Exception ( response [ 'error' ] ) data = response [ 'data' ] if len ( data ) != 2 : raise Exception ( 'expecting 2 data bytes' ) r = { } r [ 'alerting' ] = data [ 0 ] & 0b10000000 > 0 r [ 'per_msg_auth' ] = data [ 0 ] & 0b01000000 > 0 r [ 'user_level_auth' ] = data [ 0 ] & 0b00100000 > 0 access_modes = { 0 : 'disabled' , 1 : 'pre_boot' , 2 : 'always' , 3 : 'shared' } r [ 'access_mode' ] = access_modes [ data [ 0 ] & 0b00000011 ] privilege_levels = { 0 : 'reserved' , 1 : 'callback' , 2 : 'user' , 3 : 'operator' , 4 : 'administrator' , 5 : 'proprietary' , } r [ 'privilege_level' ] = privilege_levels [ data [ 1 ] & 0b00001111 ] return r
|
Get channel access
|
60,103 |
def get_channel_info ( self , channel = None ) : if channel is None : channel = self . get_network_channel ( ) data = [ ] data . append ( channel & 0b00001111 ) response = self . raw_command ( netfn = 0x06 , command = 0x42 , data = data ) if 'error' in response : raise Exception ( response [ 'error' ] ) data = response [ 'data' ] if len ( data ) != 9 : raise Exception ( 'expecting 10 data bytes got: {0}' . format ( data ) ) r = { } r [ 'Actual channel' ] = data [ 0 ] & 0b00000111 channel_medium_types = { 0 : 'reserved' , 1 : 'IPMB' , 2 : 'ICMB v1.0' , 3 : 'ICMB v0.9' , 4 : '802.3 LAN' , 5 : 'Asynch. Serial/Modem (RS-232)' , 6 : 'Other LAN' , 7 : 'PCI SMBus' , 8 : 'SMBus v1.0/1.1' , 9 : 'SMBus v2.0' , 0x0a : 'reserved for USB 1.x' , 0x0b : 'reserved for USB 2.x' , 0x0c : 'System Interface (KCS, SMIC, or BT)' , } t = data [ 1 ] & 0b01111111 if t in channel_medium_types : r [ 'Channel Medium type' ] = channel_medium_types [ t ] else : r [ 'Channel Medium type' ] = 'OEM {:02X}' . format ( t ) r [ '5-bit Channel IPMI Messaging Protocol Type' ] = data [ 2 ] & 0b00001111 session_supports = { 0 : 'no_session' , 1 : 'single' , 2 : 'multi' , 3 : 'auto' } r [ 'session_support' ] = session_supports [ ( data [ 3 ] & 0b11000000 ) >> 6 ] r [ 'active_session_count' ] = data [ 3 ] & 0b00111111 r [ 'Vendor ID' ] = [ data [ 4 ] , data [ 5 ] , data [ 6 ] ] r [ 'Auxiliary Channel Info' ] = [ data [ 7 ] , data [ 8 ] ] return r
|
Get channel info
|
60,104 |
def get_firmware ( self , components = ( ) ) : self . oem_init ( ) mcinfo = self . xraw_command ( netfn = 6 , command = 1 ) bmcver = '{0}.{1}' . format ( ord ( mcinfo [ 'data' ] [ 2 ] ) , hex ( ord ( mcinfo [ 'data' ] [ 3 ] ) ) [ 2 : ] ) return self . _oem . get_oem_firmware ( bmcver , components )
|
Retrieve OEM Firmware information
|
60,105 |
def update_firmware ( self , file , data = None , progress = None , bank = None ) : self . oem_init ( ) if progress is None : progress = lambda x : True return self . _oem . update_firmware ( file , data , progress , bank )
|
Send file to BMC to perform firmware update
|
60,106 |
def attach_remote_media ( self , url , username = None , password = None ) : self . oem_init ( ) return self . _oem . attach_remote_media ( url , username , password )
|
Attach remote media by url
|
60,107 |
def upload_media ( self , filename , progress = None ) : self . oem_init ( ) return self . _oem . upload_media ( filename , progress )
|
Upload a file to be hosted on the target BMC
|
60,108 |
def process_event ( self , event , ipmicmd , seldata ) : event [ 'oem_handler' ] = None evdata = event [ 'event_data_bytes' ] if evdata [ 0 ] & 0b11000000 == 0b10000000 : event [ 'oem_byte2' ] = evdata [ 1 ] if evdata [ 0 ] & 0b110000 == 0b100000 : event [ 'oem_byte3' ] = evdata [ 2 ]
|
Modify an event according with OEM understanding .
|
60,109 |
def _got_session ( self , response ) : if 'error' in response : self . _print_error ( response [ 'error' ] ) return if not self . ipmi_session : self . callgotsession = response return response = self . ipmi_session . raw_command ( netfn = 0x6 , command = 0x48 , data = ( 1 , 1 , 192 , 0 , 0 , 0 ) ) sol_activate_codes = { 0x81 : 'SOL is disabled' , 0x82 : 'Maximum SOL session count reached' , 0x83 : 'Cannot activate payload with encryption' , 0x84 : 'Cannot activate payload without encryption' , } if 'code' in response and response [ 'code' ] : if response [ 'code' ] in constants . ipmi_completion_codes : self . _print_error ( constants . ipmi_completion_codes [ response [ 'code' ] ] ) return elif response [ 'code' ] == 0x80 : if self . force_session and not self . retriedpayload : self . retriedpayload = 1 sessrsp = self . ipmi_session . raw_command ( netfn = 0x6 , command = 0x49 , data = ( 1 , 1 , 0 , 0 , 0 , 0 ) ) self . _got_session ( sessrsp ) return else : self . _print_error ( 'SOL Session active for another client' ) return elif response [ 'code' ] in sol_activate_codes : self . _print_error ( sol_activate_codes [ response [ 'code' ] ] ) return else : self . _print_error ( 'SOL encountered Unrecognized error code %d' % response [ 'code' ] ) return if 'error' in response : self . _print_error ( response [ 'error' ] ) return self . activated = True data = response [ 'data' ] self . maxoutcount = ( data [ 5 ] << 8 ) + data [ 4 ] valid_ports = ( self . port , struct . unpack ( '<H' , struct . pack ( '>H' , self . port ) ) [ 0 ] ) if ( data [ 8 ] + ( data [ 9 ] << 8 ) ) not in valid_ports : raise NotImplementedError ( "Non-standard SOL Port Number" ) if self . ipmi_session . sol_handler is not None : self . ipmi_session . sol_handler ( { 'error' : 'Session Disconnected' } ) self . keepaliveid = self . ipmi_session . register_keepalive ( cmd = { 'netfn' : 6 , 'command' : 0x4b , 'data' : ( 1 , 1 ) } , callback = self . _got_payload_instance_info ) self . ipmi_session . sol_handler = self . _got_sol_payload self . connected = True self . _sendpendingoutput ( )
|
Private function to navigate SOL payload activation
|
60,110 |
def _got_cons_input ( self , handle ) : self . _addpendingdata ( handle . read ( ) ) if not self . awaitingack : self . _sendpendingoutput ( )
|
Callback for handle events detected by ipmi session
|
60,111 |
def close ( self ) : if self . ipmi_session : self . ipmi_session . unregister_keepalive ( self . keepaliveid ) if self . activated : try : self . ipmi_session . raw_command ( netfn = 6 , command = 0x49 , data = ( 1 , 1 , 0 , 0 , 0 , 0 ) ) except exc . IpmiException : pass
|
Shut down an SOL session
|
60,112 |
def _got_sol_payload ( self , payload ) : if type ( payload ) == dict : self . activated = False self . _print_error ( payload ) return newseq = payload [ 0 ] & 0b1111 ackseq = payload [ 1 ] & 0b1111 ackcount = payload [ 2 ] nacked = payload [ 3 ] & 0b1000000 breakdetected = payload [ 3 ] & 0b10000 remdata = "" remdatalen = 0 flag = 0 if not self . poweredon : flag |= 0b1100000 if not self . activated : flag |= 0b1010000 if newseq != 0 : if len ( payload ) > 4 : remdatalen = len ( payload [ 4 : ] ) remdata = bytes ( payload [ 4 : ] ) if newseq == self . remseq : if remdatalen > self . lastsize : remdata = bytes ( remdata [ 4 + self . lastsize : ] ) else : remdata = "" else : self . remseq = newseq self . lastsize = remdatalen ackpayload = bytearray ( ( 0 , self . remseq , remdatalen , flag ) ) try : self . send_payload ( ackpayload , retry = False ) except exc . IpmiException : self . close ( ) if remdata : self . _print_data ( remdata ) if self . myseq != 0 and ackseq == self . myseq : self . awaitingack = False if nacked and not breakdetected : newtext = self . lastpayload [ 4 + ackcount : ] with self . outputlock : if ( self . pendingoutput and not isinstance ( self . pendingoutput [ 0 ] , dict ) ) : self . pendingoutput [ 0 ] = newtext + self . pendingoutput [ 0 ] else : self . pendingoutput = [ newtext ] + self . pendingoutput self . _sendpendingoutput ( ) elif ackseq != 0 and self . awaitingack : self . send_payload ( payload = self . lastpayload )
|
SOL payload callback
|
60,113 |
def is_fpc ( self ) : if self . has_imm or self . has_xcc : return None if self . _fpc_variant is not None : return self . _fpc_variant fpc_ids = ( ( 19046 , 32 , 1063 ) , ( 20301 , 32 , 462 ) ) smm_id = ( 19046 , 32 , 1180 ) currid = ( self . oemid [ 'manufacturer_id' ] , self . oemid [ 'device_id' ] , self . oemid [ 'product_id' ] ) if currid in fpc_ids : self . _fpc_variant = 6 elif currid == smm_id : self . _fpc_variant = 2 return self . _fpc_variant
|
True if the target is a Lenovo nextscale fan power controller
|
60,114 |
def has_tsm ( self ) : if ( self . oemid [ 'manufacturer_id' ] == 19046 and self . oemid [ 'device_id' ] == 32 ) : try : self . ipmicmd . xraw_command ( netfn = 0x3a , command = 0xf ) except pygexc . IpmiException as ie : if ie . ipmicode == 193 : return False raise return True return False
|
True if this particular server have a TSM based service processor
|
60,115 |
def set_oem_capping_enabled ( self , enable ) : if enable : statecode = 1 else : statecode = 0 if self . has_tsm : self . ipmicmd . xraw_command ( netfn = 0x3a , command = 0x1a , data = ( 3 , statecode ) ) return True
|
Set PSU based power capping
|
60,116 |
def decode_wireformat_uuid ( rawguid ) : if isinstance ( rawguid , list ) : rawguid = bytearray ( rawguid ) lebytes = struct . unpack_from ( '<IHH' , buffer ( rawguid [ : 8 ] ) ) bebytes = struct . unpack_from ( '>HHI' , buffer ( rawguid [ 8 : ] ) ) return '{0:08X}-{1:04X}-{2:04X}-{3:04X}-{4:04X}{5:08X}' . format ( lebytes [ 0 ] , lebytes [ 1 ] , lebytes [ 2 ] , bebytes [ 0 ] , bebytes [ 1 ] , bebytes [ 2 ] )
|
Decode a wire format UUID
|
60,117 |
def urlsplit ( url ) : proto , rest = url . split ( ':' , 1 ) host = '' if rest [ : 2 ] == '//' : host , rest = rest [ 2 : ] . split ( '/' , 1 ) rest = '/' + rest return proto , host , rest
|
Split an arbitrary url into protocol host rest
|
60,118 |
def get_ipv4 ( hostname ) : addrinfo = socket . getaddrinfo ( hostname , None , socket . AF_INET , socket . SOCK_STREAM ) return [ addrinfo [ x ] [ 4 ] [ 0 ] for x in range ( len ( addrinfo ) ) ]
|
Get list of ipv4 addresses for hostname
|
60,119 |
def _aespad ( data ) : currlen = len ( data ) + 1 neededpad = currlen % 16 if neededpad : neededpad = 16 - neededpad padval = 1 pad = bytearray ( neededpad ) while padval <= neededpad : pad [ padval - 1 ] = padval padval += 1 pad . append ( neededpad ) return pad
|
ipmi demands a certain pad scheme per table 13 - 20 AES - CBC encrypted payload fields .
|
60,120 |
def _make_bridge_request_msg ( self , channel , netfn , command ) : head = bytearray ( ( constants . IPMI_BMC_ADDRESS , constants . netfn_codes [ 'application' ] << 2 ) ) check_sum = _checksum ( * head ) boday = bytearray ( ( 0x81 , self . seqlun , constants . IPMI_SEND_MESSAGE_CMD , 0x40 | channel ) ) self . _add_request_entry ( ( constants . netfn_codes [ 'application' ] + 1 , self . seqlun , constants . IPMI_SEND_MESSAGE_CMD ) ) return head + bytearray ( ( check_sum , ) ) + boday
|
This function generate message for bridge request . It is a part of ipmi payload .
|
60,121 |
def wait_for_rsp ( cls , timeout = None , callout = True ) : global iosockets curtime = _monotonic_time ( ) if timeout != 0 : with util . protect ( WAITING_SESSIONS ) : for session , parms in dictitems ( cls . waiting_sessions ) : if parms [ 'timeout' ] <= curtime : timeout = 0 break if ( timeout is not None and timeout < parms [ 'timeout' ] - curtime ) : continue timeout = parms [ 'timeout' ] - curtime with util . protect ( KEEPALIVE_SESSIONS ) : for session , parms in dictitems ( cls . keepalive_sessions ) : if parms [ 'timeout' ] <= curtime : timeout = 0 break if ( timeout is not None and timeout < parms [ 'timeout' ] - curtime ) : continue timeout = parms [ 'timeout' ] - curtime while cls . iterwaiters : waiter = cls . iterwaiters . pop ( ) waiter ( { 'success' : True } ) if timeout is not None : timeout = 0 if timeout is None : return 0 if _poller ( timeout = timeout ) : while sessionqueue : relsession = sessionqueue . popleft ( ) relsession . process_pktqueue ( ) sessionstodel = [ ] sessionstokeepalive = [ ] with util . protect ( KEEPALIVE_SESSIONS ) : for session , parms in dictitems ( cls . keepalive_sessions ) : if parms [ 'timeout' ] < curtime and not session . _isincommand ( ) : cls . keepalive_sessions [ session ] [ 'timeout' ] = _monotonic_time ( ) + MAX_IDLE - ( random . random ( ) * 4.9 ) sessionstokeepalive . append ( session ) for session in sessionstokeepalive : session . _keepalive ( ) with util . protect ( WAITING_SESSIONS ) : for session , parms in dictitems ( cls . waiting_sessions ) : if parms [ 'timeout' ] < curtime : sessionstodel . append ( session ) for session in sessionstodel : cls . waiting_sessions . pop ( session , None ) for session in sessionstodel : session . _timedout ( ) return len ( cls . waiting_sessions )
|
IPMI Session Event loop iteration
|
60,122 |
def register_keepalive ( self , cmd , callback ) : regid = random . random ( ) if self . _customkeepalives is None : self . _customkeepalives = { regid : ( cmd , callback ) } else : while regid in self . _customkeepalives : regid = random . random ( ) self . _customkeepalives [ regid ] = ( cmd , callback ) return regid
|
Register custom keepalive IPMI command
|
60,123 |
def _keepalive ( self ) : try : keptalive = False if self . _customkeepalives : kaids = list ( self . _customkeepalives . keys ( ) ) for keepalive in kaids : try : cmd , callback = self . _customkeepalives [ keepalive ] except TypeError : break except KeyError : continue if callable ( cmd ) : cmd ( ) continue keptalive = True cmd [ 'callback' ] = self . _keepalive_wrapper ( callback ) self . raw_command ( ** cmd ) if not keptalive : if self . incommand : return self . raw_command ( netfn = 6 , command = 1 , callback = self . _keepalive_wrapper ( None ) ) except exc . IpmiException : self . _mark_broken ( )
|
Performs a keepalive to avoid idle disconnect
|
60,124 |
def download ( self , url , file ) : if isinstance ( file , str ) or isinstance ( file , unicode ) : file = open ( file , 'wb' ) webclient = self . dupe ( ) webclient . request ( 'GET' , url ) rsp = webclient . getresponse ( ) self . _currdl = rsp self . _dlfile = file for chunk in iter ( lambda : rsp . read ( 16384 ) , '' ) : file . write ( chunk ) self . _currdl = None file . close ( )
|
Download a file to filename or file object
|
60,125 |
def upload ( self , url , filename , data = None , formname = None , otherfields = ( ) ) : if data is None : data = open ( filename , 'rb' ) self . _upbuffer = StringIO . StringIO ( get_upload_form ( filename , data , formname , otherfields ) ) ulheaders = self . stdheaders . copy ( ) ulheaders [ 'Content-Type' ] = 'multipart/form-data; boundary=' + BND ulheaders [ 'Content-Length' ] = len ( uploadforms [ filename ] ) self . ulsize = len ( uploadforms [ filename ] ) webclient = self . dupe ( ) webclient . request ( 'POST' , url , self . _upbuffer , ulheaders ) rsp = webclient . getresponse ( ) try : del uploadforms [ filename ] except KeyError : pass self . rspstatus = rsp . status if rsp . status != 200 : raise Exception ( 'Unexpected response in file upload: ' + rsp . read ( ) ) return rsp . read ( )
|
Upload a file to the url
|
60,126 |
def parse_inventory_category_entry ( raw , fields ) : r = raw obj = { } bytes_read = 0 discard = False for field in fields : value = struct . unpack_from ( field . fmt , r ) [ 0 ] read = struct . calcsize ( field . fmt ) bytes_read += read r = r [ read : ] if field . presence and not bool ( value ) : discard = True if not field . include : continue if ( field . fmt [ - 1 ] == "s" ) : value = value . rstrip ( "\x00" ) if ( field . mapper and value in field . mapper ) : value = field . mapper [ value ] if ( field . valuefunc ) : value = field . valuefunc ( value ) if not field . multivaluefunc : obj [ field . name ] = value else : for key in value : obj [ key ] = value [ key ] if discard : obj = None return bytes_read , obj
|
Parses one entry in an inventory category .
|
60,127 |
def sessionless_data ( self , data , sockaddr ) : if len ( data ) < 22 : return data = bytearray ( data ) if not ( data [ 0 ] == 6 and data [ 2 : 4 ] == b'\xff\x07' ) : return if data [ 4 ] == 6 : payloadtype = data [ 5 ] if payloadtype not in ( 0 , 16 ) : return if payloadtype == 16 : ServerSession ( self . authdata , self . kg , sockaddr , self . serversocket , data [ 16 : ] , self . uuid , bmc = self ) return data = data [ 2 : ] myaddr , netfnlun = struct . unpack ( '2B' , bytes ( data [ 14 : 16 ] ) ) netfn = ( netfnlun & 0b11111100 ) >> 2 mylun = netfnlun & 0b11 if netfn == 6 : if data [ 19 ] == 0x38 : verchannel , level = struct . unpack ( '2B' , bytes ( data [ 20 : 22 ] ) ) version = verchannel & 0b10000000 if version != 0b10000000 : return channel = verchannel & 0b1111 if channel != 0xe : return ( clientaddr , clientlun ) = struct . unpack ( 'BB' , bytes ( data [ 17 : 19 ] ) ) clientseq = clientlun >> 2 clientlun &= 0b11 level &= 0b1111 self . send_auth_cap ( myaddr , mylun , clientaddr , clientlun , clientseq , sockaddr )
|
Examines unsolocited packet and decides appropriate action .
|
60,128 |
def set_kg ( self , kg ) : try : self . kg = kg . encode ( 'utf-8' ) except AttributeError : self . kg = kg
|
Sets the Kg for the BMC to use
|
60,129 |
def source_debianize_name ( name ) : "make name acceptable as a Debian source package name" name = name . replace ( '_' , '-' ) name = name . replace ( '.' , '-' ) name = name . lower ( ) return name
|
make name acceptable as a Debian source package name
|
60,130 |
def get_date_822 ( ) : cmd = '/bin/date' if not os . path . exists ( cmd ) : raise ValueError ( '%s command does not exist.' % cmd ) args = [ cmd , '-R' ] result = get_cmd_stdout ( args ) . strip ( ) result = normstr ( result ) return result
|
return output of 822 - date command
|
60,131 |
def make_tarball ( tarball_fname , directory , cwd = None ) : "create a tarball from a directory" if tarball_fname . endswith ( '.gz' ) : opts = 'czf' else : opts = 'cf' args = [ '/bin/tar' , opts , tarball_fname , directory ] process_command ( args , cwd = cwd )
|
create a tarball from a directory
|
60,132 |
def expand_tarball ( tarball_fname , cwd = None ) : "expand a tarball" if tarball_fname . endswith ( '.gz' ) : opts = 'xzf' elif tarball_fname . endswith ( '.bz2' ) : opts = 'xjf' else : opts = 'xf' args = [ '/bin/tar' , opts , tarball_fname ] process_command ( args , cwd = cwd )
|
expand a tarball
|
60,133 |
def expand_zip ( zip_fname , cwd = None ) : "expand a zip" unzip_path = '/usr/bin/unzip' if not os . path . exists ( unzip_path ) : log . error ( 'ERROR: {} does not exist' . format ( unzip_path ) ) sys . exit ( 1 ) args = [ unzip_path , zip_fname ] res = subprocess . Popen ( [ args [ 0 ] , '-l' , args [ 1 ] ] , cwd = cwd , stdout = subprocess . PIPE , stderr = subprocess . PIPE , ) contents = [ ] for line in res . stdout . readlines ( ) [ 3 : - 2 ] : contents . append ( line . split ( ) [ - 1 ] ) commonprefix = os . path . commonprefix ( contents ) if not commonprefix : extdir = os . path . join ( cwd , os . path . basename ( zip_fname [ : - 4 ] ) ) args . extend ( [ '-d' , os . path . abspath ( extdir ) ] ) process_command ( args , cwd = cwd )
|
expand a zip
|
60,134 |
def parse_vals ( cfg , section , option ) : try : vals = cfg . get ( section , option ) except ConfigParser . NoSectionError as err : if section != 'DEFAULT' : vals = cfg . get ( 'DEFAULT' , option ) else : raise err vals = vals . split ( '#' ) [ 0 ] vals = vals . strip ( ) vals = vals . split ( ',' ) vals = [ v . strip ( ) for v in vals ] vals = [ v for v in vals if len ( v ) ] return vals
|
parse comma separated values in debian control file style from . cfg
|
60,135 |
def parse_val ( cfg , section , option ) : vals = parse_vals ( cfg , section , option ) if len ( vals ) == 0 : return '' else : assert len ( vals ) == 1 , ( section , option , vals , type ( vals ) ) return vals [ 0 ]
|
extract a single value from . cfg
|
60,136 |
def check_cfg_files ( cfg_files , module_name ) : cfg = ConfigParser . SafeConfigParser ( ) cfg . read ( cfg_files ) if cfg . has_section ( module_name ) : section_items = cfg . items ( module_name ) else : section_items = [ ] default_items = cfg . items ( 'DEFAULT' ) n_items = len ( section_items ) + len ( default_items ) if n_items == 0 : log . warn ( 'configuration files were specified, but no options were ' 'found in "%s" or "DEFAULT" sections.' % ( module_name , ) )
|
check if the configuration files actually specify something
|
60,137 |
def _build_url ( self , host , handler ) : scheme = 'https' if self . use_https else 'http' return '%s://%s/%s' % ( scheme , host , handler )
|
Build a url for our request based on the host handler and use_http property
|
60,138 |
def setting ( self , opt , val ) : opt = opt . encode ( ) if isinstance ( val , basestring ) : fluid_settings_setstr ( self . settings , opt , val ) elif isinstance ( val , int ) : fluid_settings_setint ( self . settings , opt , val ) elif isinstance ( val , float ) : fluid_settings_setnum ( self . settings , opt , val )
|
change an arbitrary synth setting type - smart
|
60,139 |
def start ( self , driver = None , device = None , midi_driver = None ) : if driver is not None : assert ( driver in [ 'alsa' , 'oss' , 'jack' , 'portaudio' , 'sndmgr' , 'coreaudio' , 'Direct Sound' , 'pulseaudio' ] ) fluid_settings_setstr ( self . settings , b'audio.driver' , driver . encode ( ) ) if device is not None : fluid_settings_setstr ( self . settings , str ( 'audio.%s.device' % ( driver ) ) . encode ( ) , device . encode ( ) ) self . audio_driver = new_fluid_audio_driver ( self . settings , self . synth ) if midi_driver is not None : assert ( midi_driver in [ 'alsa_seq' , 'alsa_raw' , 'oss' , 'winmidi' , 'midishare' , 'coremidi' ] ) fluid_settings_setstr ( self . settings , b'midi.driver' , midi_driver . encode ( ) ) self . router = new_fluid_midi_router ( self . settings , fluid_synth_handle_midi_event , self . synth ) fluid_synth_set_midi_router ( self . synth , self . router ) self . midi_driver = new_fluid_midi_driver ( self . settings , fluid_midi_router_handle_midi_event , self . router )
|
Start audio output driver in separate background thread
|
60,140 |
def sfload ( self , filename , update_midi_preset = 0 ) : return fluid_synth_sfload ( self . synth , filename . encode ( ) , update_midi_preset )
|
Load SoundFont and return its ID
|
60,141 |
def channel_info ( self , chan ) : info = fluid_synth_channel_info_t ( ) fluid_synth_get_channel_info ( self . synth , chan , byref ( info ) ) return ( info . sfont_id , info . bank , info . program , info . name )
|
get soundfont bank prog preset name of channel
|
60,142 |
def decompress_messages ( self , partitions_offmsgs ) : for pomsg in partitions_offmsgs : if pomsg [ 'message' ] : pomsg [ 'message' ] = self . decompress_fun ( pomsg [ 'message' ] ) yield pomsg
|
Decompress pre - defined compressed fields for each message .
|
60,143 |
def _init_offsets ( self , batchsize ) : upper_offsets = previous_lower_offsets = self . _lower_offsets if not upper_offsets : upper_offsets = self . latest_offsets self . _upper_offsets = { p : o for p , o in upper_offsets . items ( ) if o > self . _min_lower_offsets [ p ] } if self . _dupes : for p in list ( six . iterkeys ( self . _dupes ) ) : if p not in self . _upper_offsets : db = self . _dupes . pop ( p ) db . close ( ) os . remove ( db . filename ) partition_batchsize = 0 if self . _upper_offsets : partition_batchsize = max ( int ( batchsize * self . __scan_excess ) , batchsize ) self . _lower_offsets = self . _upper_offsets . copy ( ) total_offsets_run = 0 for p in sorted ( self . _upper_offsets . keys ( ) ) : if total_offsets_run > 0 and partition_batchsize > batchsize : partition_batchsize = batchsize if partition_batchsize > 0 : self . _lower_offsets [ p ] = max ( self . _upper_offsets [ p ] - partition_batchsize , self . _min_lower_offsets [ p ] ) offsets_run = self . _upper_offsets [ p ] - self . _lower_offsets [ p ] total_offsets_run += offsets_run partition_batchsize = partition_batchsize - offsets_run else : break log . info ( 'Offset run: %d' , total_offsets_run ) if previous_lower_offsets is not None and set ( previous_lower_offsets . keys ( ) ) != set ( self . _lower_offsets ) : self . _create_scan_consumer ( self . _lower_offsets . keys ( ) ) self . _update_offsets ( self . _lower_offsets ) log . info ( 'Initial offsets for topic %s: %s' , self . _topic , repr ( self . _lower_offsets ) ) log . info ( 'Target offsets for topic %s: %s' , self . _topic , repr ( self . _upper_offsets ) ) return batchsize
|
Compute new initial and target offsets and do other maintenance tasks
|
60,144 |
def _filter_deleted_records ( self , batches ) : for batch in batches : for record in batch : if not self . must_delete_record ( record ) : yield record
|
Filter out deleted records
|
60,145 |
def get_catalog ( mid ) : if isinstance ( mid , _uuid . UUID ) : mid = mid . hex return _get_catalog ( mid )
|
Return catalog entry for the specified ID .
|
60,146 |
def _convert_entry ( self , entry ) : result = { } for key , value in entry . items ( ) : if isinstance ( value , list ) : result [ key ] = [ self . _convert_field ( key , val ) for val in value ] else : result [ key ] = self . _convert_field ( key , value ) return result
|
Convert entire journal entry utilising _convert_field .
|
60,147 |
def add_match ( self , * args , ** kwargs ) : args = list ( args ) args . extend ( _make_line ( key , val ) for key , val in kwargs . items ( ) ) for arg in args : super ( Reader , self ) . add_match ( arg )
|
Add one or more matches to the filter journal log entries .
|
60,148 |
def get_next ( self , skip = 1 ) : r if super ( Reader , self ) . _next ( skip ) : entry = super ( Reader , self ) . _get_all ( ) if entry : entry [ '__REALTIME_TIMESTAMP' ] = self . _get_realtime ( ) entry [ '__MONOTONIC_TIMESTAMP' ] = self . _get_monotonic ( ) entry [ '__CURSOR' ] = self . _get_cursor ( ) return self . _convert_entry ( entry ) return dict ( )
|
r Return the next log entry as a dictionary .
|
60,149 |
def query_unique ( self , field ) : return set ( self . _convert_field ( field , value ) for value in super ( Reader , self ) . query_unique ( field ) )
|
Return a list of unique values appearing in the journal for the given field .
|
60,150 |
def wait ( self , timeout = None ) : us = - 1 if timeout is None else int ( timeout * 1000000 ) return super ( Reader , self ) . wait ( us )
|
Wait for a change in the journal .
|
60,151 |
def seek_realtime ( self , realtime ) : if isinstance ( realtime , _datetime . datetime ) : realtime = int ( float ( realtime . strftime ( "%s.%f" ) ) * 1000000 ) elif not isinstance ( realtime , int ) : realtime = int ( realtime * 1000000 ) return super ( Reader , self ) . seek_realtime ( realtime )
|
Seek to a matching journal entry nearest to timestamp time .
|
60,152 |
def seek_monotonic ( self , monotonic , bootid = None ) : if isinstance ( monotonic , _datetime . timedelta ) : monotonic = monotonic . total_seconds ( ) monotonic = int ( monotonic * 1000000 ) if isinstance ( bootid , _uuid . UUID ) : bootid = bootid . hex return super ( Reader , self ) . seek_monotonic ( monotonic , bootid )
|
Seek to a matching journal entry nearest to monotonic time .
|
60,153 |
def log_level ( self , level ) : if 0 <= level <= 7 : for i in range ( level + 1 ) : self . add_match ( PRIORITY = "%d" % i ) else : raise ValueError ( "Log level must be 0 <= level <= 7" )
|
Set maximum log level by setting matches for PRIORITY .
|
60,154 |
def messageid_match ( self , messageid ) : if isinstance ( messageid , _uuid . UUID ) : messageid = messageid . hex self . add_match ( MESSAGE_ID = messageid )
|
Add match for log entries with specified messageid .
|
60,155 |
def this_boot ( self , bootid = None ) : if bootid is None : bootid = _id128 . get_boot ( ) . hex else : bootid = getattr ( bootid , 'hex' , bootid ) self . add_match ( _BOOT_ID = bootid )
|
Add match for _BOOT_ID for current boot or the specified boot ID .
|
60,156 |
def this_machine ( self , machineid = None ) : if machineid is None : machineid = _id128 . get_machine ( ) . hex else : machineid = getattr ( machineid , 'hex' , machineid ) self . add_match ( _MACHINE_ID = machineid )
|
Add match for _MACHINE_ID equal to the ID of this machine .
|
60,157 |
def emit ( self , record ) : try : msg = self . format ( record ) pri = self . map_priority ( record . levelno ) extras = self . _extra . copy ( ) if record . exc_text : extras [ 'EXCEPTION_TEXT' ] = record . exc_text if record . exc_info : extras [ 'EXCEPTION_INFO' ] = record . exc_info if record . args : extras [ 'CODE_ARGS' ] = str ( record . args ) extras . update ( record . __dict__ ) self . send ( msg , PRIORITY = format ( pri ) , LOGGER = record . name , THREAD_NAME = record . threadName , PROCESS_NAME = record . processName , CODE_FILE = record . pathname , CODE_LINE = record . lineno , CODE_FUNC = record . funcName , ** extras ) except Exception : self . handleError ( record )
|
Write record as a journal event .
|
60,158 |
def listen_fds ( unset_environment = True ) : num = _listen_fds ( unset_environment ) return list ( range ( LISTEN_FDS_START , LISTEN_FDS_START + num ) )
|
Return a list of socket activated descriptors
|
60,159 |
def connect ( self ) : self . _socket = socket . socket ( socket . AF_INET , socket . SOCK_STREAM ) self . _socket . settimeout ( self . _connect_timeout ) SocketError . wrap ( self . _socket . connect , ( self . host , self . port ) ) self . _socket . settimeout ( None ) self . _socket_file = self . _socket . makefile ( 'rb' )
|
Connect to beanstalkd server .
|
60,160 |
def close ( self ) : try : self . _socket . sendall ( 'quit\r\n' ) except socket . error : pass try : self . _socket . close ( ) except socket . error : pass
|
Close connection to server .
|
60,161 |
def put ( self , body , priority = DEFAULT_PRIORITY , delay = 0 , ttr = DEFAULT_TTR ) : assert isinstance ( body , str ) , 'Job body must be a str instance' jid = self . _interact_value ( 'put %d %d %d %d\r\n%s\r\n' % ( priority , delay , ttr , len ( body ) , body ) , [ 'INSERTED' ] , [ 'JOB_TOO_BIG' , 'BURIED' , 'DRAINING' ] ) return int ( jid )
|
Put a job into the current tube . Returns job id .
|
60,162 |
def reserve ( self , timeout = None ) : if timeout is not None : command = 'reserve-with-timeout %d\r\n' % timeout else : command = 'reserve\r\n' try : return self . _interact_job ( command , [ 'RESERVED' ] , [ 'DEADLINE_SOON' , 'TIMED_OUT' ] ) except CommandFailed : exc = sys . exc_info ( ) [ 1 ] _ , status , results = exc . args if status == 'TIMED_OUT' : return None elif status == 'DEADLINE_SOON' : raise DeadlineSoon ( results )
|
Reserve a job from one of the watched tubes with optional timeout in seconds . Returns a Job object or None if the request times out .
|
60,163 |
def release ( self , jid , priority = DEFAULT_PRIORITY , delay = 0 ) : self . _interact ( 'release %d %d %d\r\n' % ( jid , priority , delay ) , [ 'RELEASED' , 'BURIED' ] , [ 'NOT_FOUND' ] )
|
Release a reserved job back into the ready queue .
|
60,164 |
def delete ( self ) : self . conn . delete ( self . jid ) self . reserved = False
|
Delete this job .
|
60,165 |
def release ( self , priority = None , delay = 0 ) : if self . reserved : self . conn . release ( self . jid , priority or self . _priority ( ) , delay ) self . reserved = False
|
Release this job back into the ready queue .
|
60,166 |
def bury ( self , priority = None ) : if self . reserved : self . conn . bury ( self . jid , priority or self . _priority ( ) ) self . reserved = False
|
Bury this job .
|
60,167 |
def abspath ( self ) : "Absolute path to the local storage" return Path ( os . path . abspath ( os . path . expanduser ( str ( self . path ) ) ) )
|
Absolute path to the local storage
|
60,168 |
def fetch ( self , fname , processor = None ) : self . _assert_file_in_registry ( fname ) if not self . abspath . exists ( ) : os . makedirs ( str ( self . abspath ) ) full_path = self . abspath / fname in_storage = full_path . exists ( ) if not in_storage : action = "download" elif in_storage and file_hash ( str ( full_path ) ) != self . registry [ fname ] : action = "update" else : action = "fetch" if action in ( "download" , "update" ) : action_word = dict ( download = "Downloading" , update = "Updating" ) warn ( "{} data file '{}' from remote data store '{}' to '{}'." . format ( action_word [ action ] , fname , self . get_url ( fname ) , str ( self . path ) ) ) self . _download_file ( fname ) if processor is not None : return processor ( str ( full_path ) , action , self ) return str ( full_path )
|
Get the absolute path to a file in the local storage .
|
60,169 |
def get_url ( self , fname ) : self . _assert_file_in_registry ( fname ) return self . urls . get ( fname , "" . join ( [ self . base_url , fname ] ) )
|
Get the full URL to download a file in the registry .
|
60,170 |
def _download_file ( self , fname ) : destination = self . abspath / fname source = self . get_url ( fname ) fout = tempfile . NamedTemporaryFile ( delete = False , dir = str ( self . abspath ) ) try : with fout : response = requests . get ( source , stream = True ) response . raise_for_status ( ) for chunk in response . iter_content ( chunk_size = 1024 ) : if chunk : fout . write ( chunk ) tmphash = file_hash ( fout . name ) if tmphash != self . registry [ fname ] : raise ValueError ( "Hash of downloaded file '{}' doesn't match the entry in the registry:" " Expected '{}' and got '{}'." . format ( fout . name , self . registry [ fname ] , tmphash ) ) if not os . path . exists ( str ( destination . parent ) ) : os . makedirs ( str ( destination . parent ) ) shutil . move ( fout . name , str ( destination ) ) except Exception : os . remove ( fout . name ) raise
|
Download a file from the remote data storage to the local storage .
|
60,171 |
def load_registry ( self , fname ) : with open ( fname ) as fin : for linenum , line in enumerate ( fin ) : elements = line . strip ( ) . split ( ) if len ( elements ) > 3 or len ( elements ) < 2 : raise IOError ( "Expected 2 or 3 elements in line {} but got {}." . format ( linenum , len ( elements ) ) ) file_name = elements [ 0 ] file_sha256 = elements [ 1 ] if len ( elements ) == 3 : file_url = elements [ 2 ] self . urls [ file_name ] = file_url self . registry [ file_name ] = file_sha256
|
Load entries from a file and add them to the registry .
|
60,172 |
def is_available ( self , fname ) : self . _assert_file_in_registry ( fname ) source = self . get_url ( fname ) response = requests . head ( source , allow_redirects = True ) return bool ( response . status_code == 200 )
|
Check availability of a remote file without downloading it .
|
60,173 |
def file_hash ( fname ) : chunksize = 65536 hasher = hashlib . sha256 ( ) with open ( fname , "rb" ) as fin : buff = fin . read ( chunksize ) while buff : hasher . update ( buff ) buff = fin . read ( chunksize ) return hasher . hexdigest ( )
|
Calculate the SHA256 hash of a given file .
|
60,174 |
def check_version ( version , fallback = "master" ) : parse = Version ( version ) if parse . local is not None : return fallback return version
|
Check that a version string is PEP440 compliant and there are no unreleased changes .
|
60,175 |
def make_registry ( directory , output , recursive = True ) : directory = Path ( directory ) if recursive : pattern = "**/*" else : pattern = "*" files = sorted ( [ str ( path . relative_to ( directory ) ) for path in directory . glob ( pattern ) if path . is_file ( ) ] ) hashes = [ file_hash ( str ( directory / fname ) ) for fname in files ] with open ( output , "w" ) as outfile : for fname , fhash in zip ( files , hashes ) : outfile . write ( "{} {}\n" . format ( fname . replace ( "\\" , "/" ) , fhash ) )
|
Make a registry of files and hashes for the given directory .
|
60,176 |
def loads ( s , ** kwargs ) : try : return _engine [ 0 ] ( s ) except _engine [ 2 ] : why = sys . exc_info ( ) [ 1 ] raise JSONError ( why )
|
Loads JSON object .
|
60,177 |
def dumps ( o , ** kwargs ) : try : return _engine [ 1 ] ( o ) except : ExceptionClass , why = sys . exc_info ( ) [ : 2 ] if any ( [ ( issubclass ( ExceptionClass , e ) ) for e in _engine [ 2 ] ] ) : raise JSONError ( why ) else : raise why
|
Dumps JSON object .
|
60,178 |
def from_table ( table , engine , limit = None ) : sql = select ( [ table ] ) if limit is not None : sql = sql . limit ( limit ) result_proxy = engine . execute ( sql ) return from_db_cursor ( result_proxy . cursor )
|
Select data in a database table and put into prettytable .
|
60,179 |
def from_data ( data ) : if len ( data ) == 0 : return None else : ptable = PrettyTable ( ) ptable . field_names = data [ 0 ] . keys ( ) for row in data : ptable . add_row ( row ) return ptable
|
Construct a Prettytable from list of rows .
|
60,180 |
def generate_table ( self , rows ) : table = PrettyTable ( ** self . kwargs ) for row in self . rows : if len ( row [ 0 ] ) < self . max_row_width : appends = self . max_row_width - len ( row [ 0 ] ) for i in range ( 1 , appends ) : row [ 0 ] . append ( "-" ) if row [ 1 ] is True : self . make_fields_unique ( row [ 0 ] ) table . field_names = row [ 0 ] else : table . add_row ( row [ 0 ] ) return table
|
Generates from a list of rows a PrettyTable object .
|
60,181 |
def sql_to_csv ( sql , engine , filepath , chunksize = 1000 , overwrite = False ) : if overwrite : if os . path . exists ( filepath ) : raise Exception ( "'%s' already exists!" % filepath ) import pandas as pd columns = [ str ( column . name ) for column in sql . columns ] with open ( filepath , "w" ) as f : df = pd . DataFrame ( [ ] , columns = columns ) df . to_csv ( f , header = True , index = False ) result_proxy = engine . execute ( sql ) while True : data = result_proxy . fetchmany ( chunksize ) if len ( data ) == 0 : break else : df = pd . DataFrame ( data , columns = columns ) df . to_csv ( f , header = False , index = False )
|
Export sql result to csv file .
|
60,182 |
def table_to_csv ( table , engine , filepath , chunksize = 1000 , overwrite = False ) : sql = select ( [ table ] ) sql_to_csv ( sql , engine , filepath , chunksize )
|
Export entire table to a csv file .
|
60,183 |
def update_all ( engine , table , data , upsert = False ) : data = ensure_list ( data ) ins = table . insert ( ) upd = table . update ( ) pk_cols = OrderedDict ( ) for column in table . _columns : if column . primary_key : pk_cols [ column . name ] = column data_to_insert = list ( ) if len ( pk_cols ) >= 2 : for row in data : result = engine . execute ( upd . where ( and_ ( * [ col == row [ name ] for name , col in pk_cols . items ( ) ] ) ) . values ( ** row ) ) if result . rowcount == 0 : data_to_insert . append ( row ) elif len ( pk_cols ) == 1 : for row in data : result = engine . execute ( upd . where ( [ col == row [ name ] for name , col in pk_cols . items ( ) ] [ 0 ] ) . values ( ** row ) ) if result . rowcount == 0 : data_to_insert . append ( row ) else : data_to_insert = data if upsert : if len ( data_to_insert ) : engine . execute ( ins , data_to_insert )
|
Update data by its primary_key column .
|
60,184 |
def upsert_all ( engine , table , data ) : update_all ( engine , table , data , upsert = True )
|
Update data by primary key columns . If not able to update do insert .
|
60,185 |
def pk_names ( cls ) : if cls . _cache_pk_names is None : cls . _cache_pk_names = cls . _get_primary_key_names ( ) return cls . _cache_pk_names
|
Primary key column name list .
|
60,186 |
def id_field_name ( cls ) : if cls . _cache_id_field_name is None : pk_names = cls . pk_names ( ) if len ( pk_names ) == 1 : cls . _cache_id_field_name = pk_names [ 0 ] else : raise ValueError ( "{classname} has more than 1 primary key!" . format ( classname = cls . __name__ ) ) return cls . _cache_id_field_name
|
If only one primary_key then return it . Otherwise raise ValueError .
|
60,187 |
def values ( self ) : return [ getattr ( self , c . name , None ) for c in self . __table__ . _columns ]
|
return list of value of all declared columns .
|
60,188 |
def items ( self ) : return [ ( c . name , getattr ( self , c . name , None ) ) for c in self . __table__ . _columns ]
|
return list of pair of name and value of all declared columns .
|
60,189 |
def to_dict ( self , include_null = True ) : if include_null : return dict ( self . items ( ) ) else : return { attr : value for attr , value in self . __dict__ . items ( ) if not attr . startswith ( "_sa_" ) }
|
Convert to dict .
|
60,190 |
def to_OrderedDict ( self , include_null = True ) : if include_null : return OrderedDict ( self . items ( ) ) else : items = list ( ) for c in self . __table__ . _columns : try : items . append ( ( c . name , self . __dict__ [ c . name ] ) ) except KeyError : pass return OrderedDict ( items )
|
Convert to OrderedDict .
|
60,191 |
def by_id ( cls , _id , engine_or_session ) : ses , auto_close = ensure_session ( engine_or_session ) obj = ses . query ( cls ) . get ( _id ) if auto_close : ses . close ( ) return obj
|
Get one object by primary_key value .
|
60,192 |
def by_sql ( cls , sql , engine_or_session ) : ses , auto_close = ensure_session ( engine_or_session ) result = ses . query ( cls ) . from_statement ( sql ) . all ( ) if auto_close : ses . close ( ) return result
|
Query with sql statement or texture sql .
|
60,193 |
def fixcode ( ** kwargs ) : repo_dir = Path ( __file__ ) . parent . absolute ( ) source_dir = Path ( repo_dir , package . __name__ ) if source_dir . exists ( ) : print ( "Source code locate at: '%s'." % source_dir ) print ( "Auto pep8 all python file ..." ) source_dir . autopep8 ( ** kwargs ) else : print ( "Source code directory not found!" ) unittest_dir = Path ( repo_dir , "tests" ) if unittest_dir . exists ( ) : print ( "Unittest code locate at: '%s'." % unittest_dir ) print ( "Auto pep8 all python file ..." ) unittest_dir . autopep8 ( ** kwargs ) else : print ( "Unittest code directory not found!" ) print ( "Complete!" )
|
auto pep8 format all python file in source code and tests dir .
|
60,194 |
def _get_rows ( self , options ) : if options [ "oldsortslice" ] : rows = copy . deepcopy ( self . _rows [ options [ "start" ] : options [ "end" ] ] ) else : rows = copy . deepcopy ( self . _rows ) if options [ "sortby" ] : sortindex = self . _field_names . index ( options [ "sortby" ] ) rows = [ [ row [ sortindex ] ] + row for row in rows ] rows . sort ( reverse = options [ "reversesort" ] , key = options [ "sort_key" ] ) rows = [ row [ 1 : ] for row in rows ] if not options [ "oldsortslice" ] : rows = rows [ options [ "start" ] : options [ "end" ] ] return rows
|
Return only those data rows that should be printed based on slicing and sorting .
|
60,195 |
def create_postgresql_pg8000 ( username , password , host , port , database , ** kwargs ) : return create_engine ( _create_postgresql_pg8000 ( username , password , host , port , database ) , ** kwargs )
|
create an engine connected to a postgresql database using pg8000 .
|
60,196 |
def create_postgresql_pygresql ( username , password , host , port , database , ** kwargs ) : return create_engine ( _create_postgresql_pygresql ( username , password , host , port , database ) , ** kwargs )
|
create an engine connected to a postgresql database using pygresql .
|
60,197 |
def create_postgresql_psycopg2cffi ( username , password , host , port , database , ** kwargs ) : return create_engine ( _create_postgresql_psycopg2cffi ( username , password , host , port , database ) , ** kwargs )
|
create an engine connected to a postgresql database using psycopg2cffi .
|
60,198 |
def create_postgresql_pypostgresql ( username , password , host , port , database , ** kwargs ) : return create_engine ( _create_postgresql_pypostgresql ( username , password , host , port , database ) , ** kwargs )
|
create an engine connected to a postgresql database using pypostgresql .
|
60,199 |
def create_mysql_mysqlconnector ( username , password , host , port , database , ** kwargs ) : return create_engine ( _create_mysql_mysqlconnector ( username , password , host , port , database ) , ** kwargs )
|
create an engine connected to a mysql database using mysqlconnector .
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.