idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
62,100
def get_suffix ( name ) : a = name . count ( "." ) if a : ext = name . split ( "." ) [ - 1 ] if ext in LANGS . keys ( ) : return ext return False else : return False
Check if file name have valid suffix for formatting . if have suffix return it else return False .
62,101
def _raise_for_status ( response ) : message = '' if 400 <= response . status < 500 : message = '%s Client Error: %s' % ( response . status , response . reason ) elif 500 <= response . status < 600 : message = '%s Server Error: %s' % ( response . status , response . reason ) else : return if response . status == 503 : raise ConnectionError ( message ) if response . headers . get ( "content-type" , "" ) . startswith ( "application/json" ) : data = json . loads ( response . data . decode ( 'utf-8' ) ) error = data . get ( 'error' , { } ) error_trace = data . get ( 'error_trace' , None ) if "results" in data : errors = [ res [ "error_message" ] for res in data [ "results" ] if res . get ( "error_message" ) ] if errors : raise ProgrammingError ( "\n" . join ( errors ) ) if isinstance ( error , dict ) : raise ProgrammingError ( error . get ( 'message' , '' ) , error_trace = error_trace ) raise ProgrammingError ( error , error_trace = error_trace ) raise ProgrammingError ( message )
make sure that only crate . exceptions are raised that are defined in the DB - API specification
62,102
def _server_url ( server ) : if not _HTTP_PAT . match ( server ) : server = 'http://%s' % server parsed = urlparse ( server ) url = '%s://%s' % ( parsed . scheme , parsed . netloc ) return url
Normalizes a given server string to an url
62,103
def sql ( self , stmt , parameters = None , bulk_parameters = None ) : if stmt is None : return None data = _create_sql_payload ( stmt , parameters , bulk_parameters ) logger . debug ( 'Sending request to %s with payload: %s' , self . path , data ) content = self . _json_request ( 'POST' , self . path , data = data ) logger . debug ( "JSON response for stmt(%s): %s" , stmt , content ) return content
Execute SQL stmt against the crate server .
62,104
def blob_put ( self , table , digest , data ) : response = self . _request ( 'PUT' , _blob_path ( table , digest ) , data = data ) if response . status == 201 : return True if response . status == 409 : return False if response . status in ( 400 , 404 ) : raise BlobLocationNotFoundException ( table , digest ) _raise_for_status ( response )
Stores the contents of the file like
62,105
def blob_get ( self , table , digest , chunk_size = 1024 * 128 ) : response = self . _request ( 'GET' , _blob_path ( table , digest ) , stream = True ) if response . status == 404 : raise DigestNotFoundException ( table , digest ) _raise_for_status ( response ) return response . stream ( amt = chunk_size )
Returns a file like object representing the contents of the blob with the given digest .
62,106
def blob_exists ( self , table , digest ) : response = self . _request ( 'HEAD' , _blob_path ( table , digest ) ) if response . status == 200 : return True elif response . status == 404 : return False _raise_for_status ( response )
Returns true if the blob with the given digest exists under the given table .
62,107
def _request ( self , method , path , server = None , ** kwargs ) : while True : next_server = server or self . _get_server ( ) try : response = self . server_pool [ next_server ] . request ( method , path , username = self . username , password = self . password , schema = self . schema , ** kwargs ) redirect_location = response . get_redirect_location ( ) if redirect_location and 300 <= response . status <= 308 : redirect_server = _server_url ( redirect_location ) self . _add_server ( redirect_server ) return self . _request ( method , path , server = redirect_server , ** kwargs ) if not server and response . status in SRV_UNAVAILABLE_STATUSES : with self . _lock : self . _drop_server ( next_server , response . reason ) else : return response except ( urllib3 . exceptions . MaxRetryError , urllib3 . exceptions . ReadTimeoutError , urllib3 . exceptions . SSLError , urllib3 . exceptions . HTTPError , urllib3 . exceptions . ProxyError , ) as ex : ex_message = _ex_to_message ( ex ) if server : raise ConnectionError ( "Server not available, exception: %s" % ex_message ) preserve_server = False if isinstance ( ex , urllib3 . exceptions . ProtocolError ) : preserve_server = any ( t in [ type ( arg ) for arg in ex . args ] for t in PRESERVE_ACTIVE_SERVER_EXCEPTIONS ) if ( not preserve_server ) : with self . _lock : self . _drop_server ( next_server , ex_message ) except Exception as e : raise ProgrammingError ( _ex_to_message ( e ) )
Execute a request to the cluster
62,108
def _json_request ( self , method , path , data ) : response = self . _request ( method , path , data = data ) _raise_for_status ( response ) if len ( response . data ) > 0 : return _json_from_response ( response ) return response . data
Issue request against the crate HTTP API .
62,109
def _get_server ( self ) : with self . _lock : inactive_server_count = len ( self . _inactive_servers ) for i in range ( inactive_server_count ) : try : ts , server , message = heapq . heappop ( self . _inactive_servers ) except IndexError : pass else : if ( ts + self . retry_interval ) > time ( ) : heapq . heappush ( self . _inactive_servers , ( ts , server , message ) ) else : self . _active_servers . append ( server ) logger . warn ( "Restored server %s into active pool" , server ) if not self . _active_servers : ts , server , message = heapq . heappop ( self . _inactive_servers ) self . _active_servers . append ( server ) logger . info ( "Restored server %s into active pool" , server ) server = self . _active_servers [ 0 ] self . _roundrobin ( ) return server
Get server to use for request . Also process inactive server list re - add them after given interval .
62,110
def _drop_server ( self , server , message ) : try : self . _active_servers . remove ( server ) except ValueError : pass else : heapq . heappush ( self . _inactive_servers , ( time ( ) , server , message ) ) logger . warning ( "Removed server %s from active pool" , server ) if not self . _active_servers : raise ConnectionError ( ( "No more Servers available, " "exception from last server: %s" ) % message )
Drop server from active list and adds it to the inactive ones .
62,111
def match ( column , term , match_type = None , options = None ) : return Match ( column , term , match_type , options )
Generates match predicate for fulltext search
62,112
def put ( self , f , digest = None ) : if digest : actual_digest = digest else : actual_digest = self . _compute_digest ( f ) created = self . conn . client . blob_put ( self . container_name , actual_digest , f ) if digest : return created return actual_digest
Upload a blob
62,113
def get ( self , digest , chunk_size = 1024 * 128 ) : return self . conn . client . blob_get ( self . container_name , digest , chunk_size )
Return the contents of a blob
62,114
def delete ( self , digest ) : return self . conn . client . blob_del ( self . container_name , digest )
Delete a blob
62,115
def exists ( self , digest ) : return self . conn . client . blob_exists ( self . container_name , digest )
Check if a blob exists
62,116
def next ( self ) : if self . rows is None : raise ProgrammingError ( "No result available. " + "execute() or executemany() must be called first." ) elif not self . _closed : return next ( self . rows ) else : raise ProgrammingError ( "Cursor closed" )
Return the next row of a query result set respecting if cursor was closed .
62,117
def duration ( self ) : if self . _closed or not self . _result or "duration" not in self . _result : return - 1 return self . _result . get ( "duration" , 0 )
This read - only attribute specifies the server - side duration of a query in milliseconds .
62,118
def rewrite_update ( clauseelement , multiparams , params ) : newmultiparams = [ ] _multiparams = multiparams [ 0 ] if len ( _multiparams ) == 0 : return clauseelement , multiparams , params for _params in _multiparams : newparams = { } for key , val in _params . items ( ) : if ( not isinstance ( val , MutableDict ) or ( not any ( val . _changed_keys ) and not any ( val . _deleted_keys ) ) ) : newparams [ key ] = val continue for subkey , subval in val . items ( ) : if subkey in val . _changed_keys : newparams [ "{0}['{1}']" . format ( key , subkey ) ] = subval for subkey in val . _deleted_keys : newparams [ "{0}['{1}']" . format ( key , subkey ) ] = None newmultiparams . append ( newparams ) _multiparams = ( newmultiparams , ) clause = clauseelement . values ( newmultiparams [ 0 ] ) clause . _crate_specific = True return clause , _multiparams , params
change the params to enable partial updates
62,119
def _get_crud_params ( compiler , stmt , ** kw ) : compiler . postfetch = [ ] compiler . insert_prefetch = [ ] compiler . update_prefetch = [ ] compiler . returning = [ ] if compiler . column_keys is None and stmt . parameters is None : return [ ( c , crud . _create_bind_param ( compiler , c , None , required = True ) ) for c in stmt . table . columns ] if stmt . _has_multi_parameters : stmt_parameters = stmt . parameters [ 0 ] else : stmt_parameters = stmt . parameters if SA_VERSION >= SA_1_1 : _column_as_key , _getattr_col_key , _col_bind_name = crud . _key_getters_for_crud_column ( compiler , stmt ) else : _column_as_key , _getattr_col_key , _col_bind_name = crud . _key_getters_for_crud_column ( compiler ) if compiler . column_keys is None : parameters = { } else : parameters = dict ( ( _column_as_key ( key ) , crud . REQUIRED ) for key in compiler . column_keys if not stmt_parameters or key not in stmt_parameters ) values = [ ] if stmt_parameters is not None : crud . _get_stmt_parameters_params ( compiler , parameters , stmt_parameters , _column_as_key , values , kw ) check_columns = { } crud . _scan_cols ( compiler , stmt , parameters , _getattr_col_key , _column_as_key , _col_bind_name , check_columns , values , kw ) if stmt . _has_multi_parameters : values = crud . _extend_values_for_multiparams ( compiler , stmt , values , kw ) return values
extract values from crud parameters
62,120
def get_tgt_for ( user ) : if not settings . CAS_PROXY_CALLBACK : raise CasConfigException ( "No proxy callback set in settings" ) try : return Tgt . objects . get ( username = user . username ) except ObjectDoesNotExist : logger . warning ( 'No ticket found for user {user}' . format ( user = user . username ) ) raise CasTicketException ( "no ticket found for user " + user . username )
Fetch a ticket granting ticket for a given user .
62,121
def get_proxy_ticket_for ( self , service ) : if not settings . CAS_PROXY_CALLBACK : raise CasConfigException ( "No proxy callback set in settings" ) params = { 'pgt' : self . tgt , 'targetService' : service } url = ( urljoin ( settings . CAS_SERVER_URL , 'proxy' ) + '?' + urlencode ( params ) ) page = urlopen ( url ) try : response = page . read ( ) tree = ElementTree . fromstring ( response ) if tree [ 0 ] . tag . endswith ( 'proxySuccess' ) : return tree [ 0 ] [ 0 ] . text else : logger . warning ( 'Failed to get proxy ticket' ) raise CasTicketException ( 'Failed to get proxy ticket: %s' % tree [ 0 ] . text . strip ( ) ) finally : page . close ( )
Verifies CAS 2 . 0 + XML - based authentication ticket .
62,122
def _internal_verify_cas ( ticket , service , suffix ) : params = { 'ticket' : ticket , 'service' : service } if settings . CAS_PROXY_CALLBACK : params [ 'pgtUrl' ] = settings . CAS_PROXY_CALLBACK url = ( urljoin ( settings . CAS_SERVER_URL , suffix ) + '?' + urlencode ( params ) ) page = urlopen ( url ) username = None try : response = page . read ( ) tree = ElementTree . fromstring ( response ) document = minidom . parseString ( response ) if tree [ 0 ] . tag . endswith ( 'authenticationSuccess' ) : if settings . CAS_RESPONSE_CALLBACKS : cas_response_callbacks ( tree ) username = tree [ 0 ] [ 0 ] . text pgt_el = document . getElementsByTagName ( 'cas:proxyGrantingTicket' ) if pgt_el : pgt = pgt_el [ 0 ] . firstChild . nodeValue try : pgtIou = _get_pgtiou ( pgt ) tgt = Tgt . objects . get ( username = username ) tgt . tgt = pgtIou . tgt tgt . save ( ) pgtIou . delete ( ) except Tgt . DoesNotExist : Tgt . objects . create ( username = username , tgt = pgtIou . tgt ) logger . info ( 'Creating TGT ticket for {user}' . format ( user = username ) ) pgtIou . delete ( ) except Exception as e : logger . warning ( 'Failed to do proxy authentication. {message}' . format ( message = e ) ) else : failure = document . getElementsByTagName ( 'cas:authenticationFailure' ) if failure : logger . warn ( 'Authentication failed from CAS server: %s' , failure [ 0 ] . firstChild . nodeValue ) except Exception as e : logger . error ( 'Failed to verify CAS authentication: {message}' . format ( message = e ) ) finally : page . close ( ) return username
Verifies CAS 2 . 0 and 3 . 0 XML - based authentication ticket .
62,123
def verify_proxy_ticket ( ticket , service ) : params = { 'ticket' : ticket , 'service' : service } url = ( urljoin ( settings . CAS_SERVER_URL , 'proxyValidate' ) + '?' + urlencode ( params ) ) page = urlopen ( url ) try : response = page . read ( ) tree = ElementTree . fromstring ( response ) if tree [ 0 ] . tag . endswith ( 'authenticationSuccess' ) : username = tree [ 0 ] [ 0 ] . text proxies = [ ] if len ( tree [ 0 ] ) > 1 : for element in tree [ 0 ] [ 1 ] : proxies . append ( element . text ) return { "username" : username , "proxies" : proxies } else : return None finally : page . close ( )
Verifies CAS 2 . 0 + XML - based proxy ticket .
62,124
def _get_pgtiou ( pgt ) : pgtIou = None retries_left = 5 if not settings . CAS_PGT_FETCH_WAIT : retries_left = 1 while not pgtIou and retries_left : try : return PgtIOU . objects . get ( tgt = pgt ) except PgtIOU . DoesNotExist : if settings . CAS_PGT_FETCH_WAIT : time . sleep ( 1 ) retries_left -= 1 logger . info ( 'Did not fetch ticket, trying again. {tries} tries left.' . format ( tries = retries_left ) ) raise CasTicketException ( "Could not find pgtIou for pgt %s" % pgt )
Returns a PgtIOU object given a pgt .
62,125
def gateway ( ) : if settings . CAS_GATEWAY == False : raise ImproperlyConfigured ( 'CAS_GATEWAY must be set to True' ) def wrap ( func ) : def wrapped_f ( * args ) : from cas . views import login request = args [ 0 ] try : is_authenticated = request . user . is_authenticated ( ) except TypeError : is_authenticated = request . user . is_authenticated if is_authenticated : pass else : path_with_params = request . path + '?' + urlencode ( request . GET . copy ( ) ) if request . GET . get ( 'ticket' ) : response = login ( request , path_with_params , False , True ) if isinstance ( response , HttpResponseRedirect ) : return response else : gatewayed = request . GET . get ( 'gatewayed' ) if gatewayed == 'true' : pass else : response = login ( request , path_with_params , False , True ) if isinstance ( response , HttpResponseRedirect ) : return response return func ( * args ) return wrapped_f return wrap
Authenticates single sign on session if ticket is available but doesn t redirect to sign in url otherwise .
62,126
def _service_url ( request , redirect_to = None , gateway = False ) : if settings . CAS_FORCE_SSL_SERVICE_URL : protocol = 'https://' else : protocol = ( 'http://' , 'https://' ) [ request . is_secure ( ) ] host = request . get_host ( ) service = protocol + host + request . path if redirect_to : if '?' in service : service += '&' else : service += '?' if gateway : gateway_params = [ ( REDIRECT_FIELD_NAME , redirect_to ) , ( 'gatewayed' , 'true' ) ] query_dict = request . GET . copy ( ) try : del query_dict [ 'ticket' ] except : pass query_list = query_dict . items ( ) for item in query_list : for index , item2 in enumerate ( gateway_params ) : if item [ 0 ] == item2 [ 0 ] : gateway_params . pop ( index ) extra_params = gateway_params + query_list sorted_params = sorted ( extra_params , key = itemgetter ( 0 ) ) service += urlencode ( sorted_params ) else : service += urlencode ( { REDIRECT_FIELD_NAME : redirect_to } ) return service
Generates application service URL for CAS
62,127
def proxy_callback ( request ) : pgtIou = request . GET . get ( 'pgtIou' ) tgt = request . GET . get ( 'pgtId' ) if not ( pgtIou and tgt ) : logger . info ( 'No pgtIou or tgt found in request.GET' ) return HttpResponse ( 'No pgtIOO' , content_type = "text/plain" ) try : PgtIOU . objects . create ( tgt = tgt , pgtIou = pgtIou , created = datetime . datetime . now ( ) ) request . session [ 'pgt-TICKET' ] = pgtIou return HttpResponse ( 'PGT ticket is: {ticket}' . format ( ticket = pgtIou ) , content_type = "text/plain" ) except Exception as e : logger . warning ( 'PGT storage failed. {message}' . format ( message = e ) ) return HttpResponse ( 'PGT storage failed for {request}' . format ( request = str ( request . GET ) ) , content_type = "text/plain" )
Handles CAS 2 . 0 + XML - based proxy callback call . Stores the proxy granting ticket in the database for future use .
62,128
def objectify ( func ) : @ functools . wraps ( func ) def wrapper ( * args , ** kwargs ) : try : payload = func ( * args , ** kwargs ) except requests . exceptions . ConnectionError as e : raise InternetConnectionError ( e ) return EventbriteObject . create ( payload ) return wrapper
Converts the returned value from a models . Payload to a models . EventbriteObject . Used by the access methods of the client . Eventbrite object
62,129
def get_user ( self , user_id = None ) : if user_id : return self . get ( '/users/{0}/' . format ( user_id ) ) return self . get ( '/users/me/' )
Returns a user for the specified user as user .
62,130
def get_event_attendees ( self , event_id , status = None , changed_since = None ) : data = { } if status : data [ 'status' ] = status if changed_since : data [ 'changed_since' ] = changed_since return self . get ( "/events/{0}/attendees/" . format ( event_id ) , data = data )
Returns a paginated response with a key of attendees containing a list of attendee .
62,131
def webhook_to_object ( self , webhook ) : if isinstance ( webhook , string_type ) : webhook = json . dumps ( webhook ) if not isinstance ( webhook , dict ) : webhook = get_webhook_from_request ( webhook ) try : webhook [ 'api_url' ] except KeyError : raise InvalidWebhook payload = self . get ( webhook [ 'api_url' ] ) return payload
Converts JSON sent by an Eventbrite Webhook to the appropriate Eventbrite object .
62,132
def get_params_from_page ( path , file_name , method_count ) : file_name = file_name . replace ( ".rst" , "" ) file_path = "{0}/../_build/html/endpoints/{1}/index.html" . format ( path , file_name ) soup = bs4 . BeautifulSoup ( open ( file_path ) ) section = soup . find_all ( 'div' , class_ = 'section' ) [ method_count ] tbody = section . find ( 'tbody' ) params = [ ] if tbody is not None : for row in tbody . find_all ( 'tr' ) : name , param_type , required , description = row . find_all ( 'td' ) required = required . text == 'Yes' param = dict ( name = name . text , type = param_type . text , required = required , description = description . text ) params . append ( param ) params = sorted ( params , key = lambda k : not k [ 'required' ] ) return params
This function accesses the rendered content . We must do this because how the params are not defined in the docs but rather the rendered HTML
62,133
def process_request ( self , request ) : restricted_request_uri = request . path . startswith ( reverse ( 'admin:index' ) or "cms-toolbar-login" in request . build_absolute_uri ( ) ) if restricted_request_uri and request . method == 'POST' : if AllowedIP . objects . count ( ) > 0 : if AllowedIP . objects . filter ( ip_address = "*" ) . count ( ) == 0 : request_ip = get_ip_address_from_request ( request ) if AllowedIP . objects . filter ( ip_address = request_ip ) . count ( ) == 0 : for regex_ip_range in AllowedIP . objects . filter ( ip_address__endswith = "*" ) : if re . match ( regex_ip_range . ip_address . replace ( "*" , ".*" ) , request_ip ) : return None return HttpResponseForbidden ( "Access to admin is denied." )
Check if the request is made form an allowed IP
62,134
def get_settings ( editor_override = None ) : flavor = getattr ( settings , "DJANGO_WYSIWYG_FLAVOR" , "yui" ) if editor_override is not None : flavor = editor_override return { "DJANGO_WYSIWYG_MEDIA_URL" : getattr ( settings , "DJANGO_WYSIWYG_MEDIA_URL" , urljoin ( settings . STATIC_URL , flavor ) + '/' ) , "DJANGO_WYSIWYG_FLAVOR" : flavor , }
Utility function to retrieve settings . py values with defaults
62,135
def get_auth ( self ) : return ( self . _cfgparse . get ( self . _section , 'username' ) , self . _cfgparse . get ( self . _section , 'password' ) )
Returns username from the configfile .
62,136
def connect ( config_file = qcs . default_filename , section = 'info' , remember_me = False , remember_me_always = False ) : conf = qcconf . QualysConnectConfig ( filename = config_file , section = section , remember_me = remember_me , remember_me_always = remember_me_always ) connect = qcconn . QGConnector ( conf . get_auth ( ) , conf . get_hostname ( ) , conf . proxies , conf . max_retries ) logger . info ( "Finished building connector." ) return connect
Return a QGAPIConnect object for v1 API pulling settings from config file .
62,137
def format_api_version ( self , api_version ) : if type ( api_version ) == str : api_version = api_version . lower ( ) if api_version [ 0 ] == 'v' and api_version [ 1 ] . isdigit ( ) : api_version = api_version [ 1 : ] if api_version in ( 'asset management' , 'assets' , 'tag' , 'tagging' , 'tags' ) : api_version = 'am' elif api_version in ( 'am2' ) : api_version = 'am2' elif api_version in ( 'webapp' , 'web application scanning' , 'webapp scanning' ) : api_version = 'was' elif api_version in ( 'pol' , 'pc' ) : api_version = 2 else : api_version = int ( api_version ) return api_version
Return QualysGuard API version for api_version specified .
62,138
def which_api_version ( self , api_call ) : if api_call . endswith ( '.php' ) : return 1 elif api_call . startswith ( 'api/2.0/' ) : return 2 elif '/am/' in api_call : return 'am' elif '/was/' in api_call : return 'was' return False
Return QualysGuard API version for api_call specified .
62,139
def url_api_version ( self , api_version ) : if api_version == 1 : url = "https://%s/msp/" % ( self . server , ) elif api_version == 2 : url = "https://%s/" % ( self . server , ) elif api_version == 'was' : url = "https://%s/qps/rest/3.0/" % ( self . server , ) elif api_version == 'am' : url = "https://%s/qps/rest/1.0/" % ( self . server , ) elif api_version == 'am2' : url = "https://%s/qps/rest/2.0/" % ( self . server , ) else : raise Exception ( "Unknown QualysGuard API Version Number (%s)" % ( api_version , ) ) logger . debug ( "Base url =\n%s" % ( url ) ) return url
Return base API url string for the QualysGuard api_version and server .
62,140
def format_http_method ( self , api_version , api_call , data ) : if api_version == 2 : return 'post' elif api_version == 1 : if api_call in self . api_methods [ '1 post' ] : return 'post' else : return 'get' elif api_version == 'was' : api_call_endpoint = api_call [ : api_call . rfind ( '/' ) + 1 ] if api_call_endpoint in self . api_methods [ 'was get' ] : return 'get' if data is None : if api_call_endpoint in self . api_methods [ 'was no data get' ] : return 'get' else : return 'post' else : return 'post' else : if api_call in self . api_methods [ 'am get' ] : return 'get' else : return 'post'
Return QualysGuard API http method with POST preferred ..
62,141
def preformat_call ( self , api_call ) : api_call_formatted = api_call . lstrip ( '/' ) api_call_formatted = api_call_formatted . rstrip ( '?' ) if api_call != api_call_formatted : logger . debug ( 'api_call post strip =\n%s' % api_call_formatted ) return api_call_formatted
Return properly formatted QualysGuard API call .
62,142
def format_call ( self , api_version , api_call ) : api_call = api_call . lstrip ( '/' ) api_call = api_call . rstrip ( '?' ) logger . debug ( 'api_call post strip =\n%s' % api_call ) if ( api_version == 2 and api_call [ - 1 ] != '/' ) : logger . debug ( 'Adding "/" to api_call.' ) api_call += '/' if api_call in self . api_methods_with_trailing_slash [ api_version ] : logger . debug ( 'Adding "/" to api_call.' ) api_call += '/' return api_call
Return properly formatted QualysGuard API call according to api_version etiquette .
62,143
def format_payload ( self , api_version , data ) : if ( api_version in ( 1 , 2 ) ) : if type ( data ) == str : logger . debug ( 'Converting string to dict:\n%s' % data ) data = data . lstrip ( '?' ) data = data . rstrip ( '&' ) data = parse_qs ( data ) logger . debug ( 'Converted:\n%s' % str ( data ) ) elif api_version in ( 'am' , 'was' , 'am2' ) : if type ( data ) == etree . _Element : logger . debug ( 'Converting lxml.builder.E to string' ) data = etree . tostring ( data ) logger . debug ( 'Converted:\n%s' % data ) return data
Return appropriate QualysGuard API call .
62,144
def travis_after ( ini , envlist ) : if os . environ . get ( 'TRAVIS_PULL_REQUEST' , 'false' ) != 'false' : return if not after_config_matches ( ini , envlist ) : return github_token = os . environ . get ( 'GITHUB_TOKEN' ) if not github_token : print ( 'No GitHub token given.' , file = sys . stderr ) sys . exit ( NO_GITHUB_TOKEN ) api_url = os . environ . get ( 'TRAVIS_API_URL' , 'https://api.travis-ci.org' ) build_id = os . environ . get ( 'TRAVIS_BUILD_ID' ) job_number = os . environ . get ( 'TRAVIS_JOB_NUMBER' ) try : polling_interval = int ( os . environ . get ( 'TRAVIS_POLLING_INTERVAL' , 5 ) ) except ValueError : print ( 'Invalid polling interval given: {0}' . format ( repr ( os . environ . get ( 'TRAVIS_POLLING_INTERVAL' ) ) ) , file = sys . stderr ) sys . exit ( INVALID_POLLING_INTERVAL ) if not all ( [ api_url , build_id , job_number ] ) : print ( 'Required Travis environment not given.' , file = sys . stderr ) sys . exit ( INCOMPLETE_TRAVIS_ENVIRONMENT ) job_statuses = get_job_statuses ( github_token , api_url , build_id , polling_interval , job_number ) if not all ( job_statuses ) : print ( 'Some jobs were not successful.' ) sys . exit ( JOBS_FAILED ) print ( 'All required jobs were successful.' )
Wait for all jobs to finish then exit successfully .
62,145
def after_config_matches ( ini , envlist ) : section = ini . sections . get ( 'travis:after' , { } ) if not section : return False if 'envlist' in section or 'toxenv' in section : if 'toxenv' in section : print ( 'The "toxenv" key of the [travis:after] section is ' 'deprecated in favor of the "envlist" key.' , file = sys . stderr ) toxenv = section . get ( 'toxenv' ) required = set ( split_env ( section . get ( 'envlist' , toxenv ) or '' ) ) actual = set ( envlist ) if required - actual : return False env_requirements = [ ( TRAVIS_FACTORS [ factor ] , value ) for factor , value in parse_dict ( section . get ( 'travis' , '' ) ) . items ( ) if factor in TRAVIS_FACTORS ] + [ ( name , value ) for name , value in parse_dict ( section . get ( 'env' , '' ) ) . items ( ) ] return all ( [ os . environ . get ( name ) == value for name , value in env_requirements ] )
Determine if this job should wait for the others .
62,146
def get_job_statuses ( github_token , api_url , build_id , polling_interval , job_number ) : auth = get_json ( '{api_url}/auth/github' . format ( api_url = api_url ) , data = { 'github_token' : github_token } ) [ 'access_token' ] while True : build = get_json ( '{api_url}/builds/{build_id}' . format ( api_url = api_url , build_id = build_id ) , auth = auth ) jobs = [ job for job in build [ 'jobs' ] if job [ 'number' ] != job_number and not job [ 'allow_failure' ] ] if all ( job [ 'finished_at' ] for job in jobs ) : break elif any ( job [ 'state' ] != 'passed' for job in jobs if job [ 'finished_at' ] ) : break print ( 'Waiting for jobs to complete: {job_numbers}' . format ( job_numbers = [ job [ 'number' ] for job in jobs if not job [ 'finished_at' ] ] ) ) time . sleep ( polling_interval ) return [ job [ 'state' ] == 'passed' for job in jobs ]
Wait for all the travis jobs to complete .
62,147
def get_json ( url , auth = None , data = None ) : headers = { 'Accept' : 'application/vnd.travis-ci.2+json' , 'User-Agent' : 'Travis/Tox-Travis-1.0a' , } if auth : headers [ 'Authorization' ] = 'token {auth}' . format ( auth = auth ) params = { } if data : headers [ 'Content-Type' ] = 'application/json' params [ 'data' ] = json . dumps ( data ) . encode ( 'utf-8' ) request = urllib2 . Request ( url , headers = headers , ** params ) response = urllib2 . urlopen ( request ) . read ( ) return json . loads ( response . decode ( 'utf-8' ) )
Make a GET request and return the response as parsed JSON .
62,148
def detect_envlist ( ini ) : declared_envs = get_declared_envs ( ini ) desired_factors = get_desired_factors ( ini ) desired_envs = [ '-' . join ( env ) for env in product ( * desired_factors ) ] return match_envs ( declared_envs , desired_envs , passthru = len ( desired_factors ) == 1 )
Default envlist automatically based on the Travis environment .
62,149
def autogen_envconfigs ( config , envs ) : prefix = 'tox' if config . toxinipath . basename == 'setup.cfg' else None reader = tox . config . SectionReader ( "tox" , config . _cfg , prefix = prefix ) distshare_default = "{homedir}/.tox/distshare" reader . addsubstitutions ( toxinidir = config . toxinidir , homedir = config . homedir ) reader . addsubstitutions ( toxworkdir = config . toxworkdir ) config . distdir = reader . getpath ( "distdir" , "{toxworkdir}/dist" ) reader . addsubstitutions ( distdir = config . distdir ) config . distshare = reader . getpath ( "distshare" , distshare_default ) reader . addsubstitutions ( distshare = config . distshare ) try : make_envconfig = tox . config . ParseIni . make_envconfig except AttributeError : make_envconfig = tox . config . parseini . make_envconfig make_envconfig = getattr ( make_envconfig , '__func__' , make_envconfig ) for env in envs : section = tox . config . testenvprefix + env config . envconfigs [ env ] = make_envconfig ( config , env , section , reader . _subs , config )
Make the envconfigs for undeclared envs .
62,150
def get_declared_envs ( ini ) : tox_section_name = 'tox:tox' if ini . path . endswith ( 'setup.cfg' ) else 'tox' tox_section = ini . sections . get ( tox_section_name , { } ) envlist = split_env ( tox_section . get ( 'envlist' , [ ] ) ) section_envs = [ section [ 8 : ] for section in sorted ( ini . sections , key = ini . lineof ) if section . startswith ( 'testenv:' ) ] return envlist + [ env for env in section_envs if env not in envlist ]
Get the full list of envs from the tox ini .
62,151
def get_version_info ( ) : overrides = os . environ . get ( '__TOX_TRAVIS_SYS_VERSION' ) if overrides : version , major , minor = overrides . split ( ',' ) [ : 3 ] major , minor = int ( major ) , int ( minor ) else : version , ( major , minor ) = sys . version , sys . version_info [ : 2 ] return version , major , minor
Get version info from the sys module .
62,152
def guess_python_env ( ) : version , major , minor = get_version_info ( ) if 'PyPy' in version : return 'pypy3' if major == 3 else 'pypy' return 'py{major}{minor}' . format ( major = major , minor = minor )
Guess the default python env to use .
62,153
def get_default_envlist ( version ) : if version in [ 'pypy' , 'pypy3' ] : return version match = re . match ( r'^(\d)\.(\d)(?:\.\d+)?$' , version or '' ) if match : major , minor = match . groups ( ) return 'py{major}{minor}' . format ( major = major , minor = minor ) return guess_python_env ( )
Parse a default tox env based on the version .
62,154
def get_desired_factors ( ini ) : travis_section = ini . sections . get ( 'travis' , { } ) found_factors = [ ( factor , parse_dict ( travis_section [ factor ] ) ) for factor in TRAVIS_FACTORS if factor in travis_section ] if 'tox:travis' in ini . sections : print ( 'The [tox:travis] section is deprecated in favor of' ' the "python" key of the [travis] section.' , file = sys . stderr ) found_factors . append ( ( 'python' , ini . sections [ 'tox:travis' ] ) ) version = os . environ . get ( 'TRAVIS_PYTHON_VERSION' ) if version : default_envlist = get_default_envlist ( version ) if not any ( factor == 'python' for factor , _ in found_factors ) : found_factors . insert ( 0 , ( 'python' , { version : default_envlist } ) ) python_factors = [ ( factor , mapping ) for factor , mapping in found_factors if version and factor == 'python' ] for _ , mapping in python_factors : mapping . setdefault ( version , default_envlist ) env_factors = [ ( TRAVIS_FACTORS [ factor ] , mapping ) for factor , mapping in found_factors ] + [ ( name , parse_dict ( value ) ) for name , value in ini . sections . get ( 'travis:env' , { } ) . items ( ) ] return [ split_env ( mapping [ os . environ [ name ] ] ) for name , mapping in env_factors if name in os . environ and os . environ [ name ] in mapping ]
Get the list of desired envs per declared factor .
62,155
def match_envs ( declared_envs , desired_envs , passthru ) : matched = [ declared for declared in declared_envs if any ( env_matches ( declared , desired ) for desired in desired_envs ) ] return desired_envs if not matched and passthru else matched
Determine the envs that match the desired_envs .
62,156
def env_matches ( declared , desired ) : desired_factors = desired . split ( '-' ) declared_factors = declared . split ( '-' ) return all ( factor in declared_factors for factor in desired_factors )
Determine if a declared env matches a desired env .
62,157
def override_ignore_outcome ( ini ) : travis_reader = tox . config . SectionReader ( "travis" , ini ) return travis_reader . getbool ( 'unignore_outcomes' , False )
Decide whether to override ignore_outcomes .
62,158
def tox_addoption ( parser ) : parser . add_argument ( '--travis-after' , dest = 'travis_after' , action = 'store_true' , help = 'Exit successfully after all Travis jobs complete successfully.' ) if 'TRAVIS' in os . environ : pypy_version_monkeypatch ( ) subcommand_test_monkeypatch ( tox_subcommand_test_post )
Add arguments and needed monkeypatches .
62,159
def tox_configure ( config ) : if 'TRAVIS' not in os . environ : return ini = config . _cfg if 'TOXENV' not in os . environ and not config . option . env : envlist = detect_envlist ( ini ) undeclared = set ( envlist ) - set ( config . envconfigs ) if undeclared : print ( 'Matching undeclared envs is deprecated. Be sure all the ' 'envs that Tox should run are declared in the tox config.' , file = sys . stderr ) autogen_envconfigs ( config , undeclared ) config . envlist = envlist if override_ignore_outcome ( ini ) : for envconfig in config . envconfigs . values ( ) : envconfig . ignore_outcome = False if config . option . travis_after : print ( 'The after all feature has been deprecated. Check out Travis\' ' 'build stages, which are a better solution. ' 'See https://tox-travis.readthedocs.io/en/stable/after.html ' 'for more details.' , file = sys . stderr )
Check for the presence of the added options .
62,160
def parse_dict ( value ) : lines = [ line . strip ( ) for line in value . strip ( ) . splitlines ( ) ] pairs = [ line . split ( ':' , 1 ) for line in lines if line ] return dict ( ( k . strip ( ) , v . strip ( ) ) for k , v in pairs )
Parse a dict value from the tox config .
62,161
def pypy_version_monkeypatch ( ) : version = os . environ . get ( 'TRAVIS_PYTHON_VERSION' ) if version and default_factors and version . startswith ( 'pypy3.3-' ) : default_factors [ 'pypy3' ] = 'python'
Patch Tox to work with non - default PyPy 3 versions .
62,162
def direction ( self ) : if _get_bit ( self . _mcp . iodir , self . _pin ) : return digitalio . Direction . INPUT return digitalio . Direction . OUTPUT
The direction of the pin either True for an input or False for an output .
62,163
def pull ( self ) : if _get_bit ( self . _mcp . gppu , self . _pin ) : return digitalio . Pull . UP return None
Enable or disable internal pull - up resistors for this pin . A value of digitalio . Pull . UP will enable a pull - up resistor and None will disable it . Pull - down resistors are NOT supported!
62,164
def get_throttled_read_event_count ( table_name , lookback_window_start = 15 , lookback_period = 5 ) : try : metrics = __get_aws_metric ( table_name , lookback_window_start , lookback_period , 'ReadThrottleEvents' ) except BotoServerError : raise if metrics : throttled_read_events = int ( metrics [ 0 ] [ 'Sum' ] ) else : throttled_read_events = 0 logger . info ( '{0} - Read throttle count: {1:d}' . format ( table_name , throttled_read_events ) ) return throttled_read_events
Returns the number of throttled read events during a given time frame
62,165
def get_throttled_by_consumed_read_percent ( table_name , lookback_window_start = 15 , lookback_period = 5 ) : try : metrics1 = __get_aws_metric ( table_name , lookback_window_start , lookback_period , 'ConsumedReadCapacityUnits' ) metrics2 = __get_aws_metric ( table_name , lookback_window_start , lookback_period , 'ReadThrottleEvents' ) except BotoServerError : raise if metrics1 and metrics2 : lookback_seconds = lookback_period * 60 throttled_by_consumed_read_percent = ( ( ( float ( metrics2 [ 0 ] [ 'Sum' ] ) / float ( lookback_seconds ) ) / ( float ( metrics1 [ 0 ] [ 'Sum' ] ) / float ( lookback_seconds ) ) ) * 100 ) else : throttled_by_consumed_read_percent = 0 logger . info ( '{0} - Throttled read percent by consumption: {1:.2f}%' . format ( table_name , throttled_by_consumed_read_percent ) ) return throttled_by_consumed_read_percent
Returns the number of throttled read events in percent of consumption
62,166
def get_throttled_by_consumed_write_percent ( table_name , lookback_window_start = 15 , lookback_period = 5 ) : try : metrics1 = __get_aws_metric ( table_name , lookback_window_start , lookback_period , 'ConsumedWriteCapacityUnits' ) metrics2 = __get_aws_metric ( table_name , lookback_window_start , lookback_period , 'WriteThrottleEvents' ) except BotoServerError : raise if metrics1 and metrics2 : lookback_seconds = lookback_period * 60 throttled_by_consumed_write_percent = ( ( ( float ( metrics2 [ 0 ] [ 'Sum' ] ) / float ( lookback_seconds ) ) / ( float ( metrics1 [ 0 ] [ 'Sum' ] ) / float ( lookback_seconds ) ) ) * 100 ) else : throttled_by_consumed_write_percent = 0 logger . info ( '{0} - Throttled write percent by consumption: {1:.2f}%' . format ( table_name , throttled_by_consumed_write_percent ) ) return throttled_by_consumed_write_percent
Returns the number of throttled write events in percent of consumption
62,167
def __get_aws_metric ( table_name , lookback_window_start , lookback_period , metric_name ) : try : now = datetime . utcnow ( ) start_time = now - timedelta ( minutes = lookback_window_start ) end_time = now - timedelta ( minutes = lookback_window_start - lookback_period ) return cloudwatch_connection . get_metric_statistics ( period = lookback_period * 60 , start_time = start_time , end_time = end_time , metric_name = metric_name , namespace = 'AWS/DynamoDB' , statistics = [ 'Sum' ] , dimensions = { 'TableName' : table_name } , unit = 'Count' ) except BotoServerError as error : logger . error ( 'Unknown boto error. Status: "{0}". ' 'Reason: "{1}". Message: {2}' . format ( error . status , error . reason , error . message ) ) raise
Returns a metric list from the AWS CloudWatch service may return None if no metric exists
62,168
def ensure_provisioning ( table_name , table_key , gsi_name , gsi_key , num_consec_read_checks , num_consec_write_checks ) : if get_global_option ( 'circuit_breaker_url' ) or get_gsi_option ( table_key , gsi_key , 'circuit_breaker_url' ) : if circuit_breaker . is_open ( table_name , table_key , gsi_name , gsi_key ) : logger . warning ( 'Circuit breaker is OPEN!' ) return ( 0 , 0 ) logger . info ( '{0} - Will ensure provisioning for global secondary index {1}' . format ( table_name , gsi_name ) ) __ensure_provisioning_alarm ( table_name , table_key , gsi_name , gsi_key ) try : read_update_needed , updated_read_units , num_consec_read_checks = __ensure_provisioning_reads ( table_name , table_key , gsi_name , gsi_key , num_consec_read_checks ) write_update_needed , updated_write_units , num_consec_write_checks = __ensure_provisioning_writes ( table_name , table_key , gsi_name , gsi_key , num_consec_write_checks ) if read_update_needed : num_consec_read_checks = 0 if write_update_needed : num_consec_write_checks = 0 if read_update_needed or write_update_needed : logger . info ( '{0} - GSI: {1} - Changing provisioning to {2:d} ' 'read units and {3:d} write units' . format ( table_name , gsi_name , int ( updated_read_units ) , int ( updated_write_units ) ) ) __update_throughput ( table_name , table_key , gsi_name , gsi_key , updated_read_units , updated_write_units ) else : logger . info ( '{0} - GSI: {1} - No need to change provisioning' . format ( table_name , gsi_name ) ) except JSONResponseError : raise except BotoServerError : raise return num_consec_read_checks , num_consec_write_checks
Ensure that provisioning is correct for Global Secondary Indexes
62,169
def __update_throughput ( table_name , table_key , gsi_name , gsi_key , read_units , write_units ) : try : current_ru = dynamodb . get_provisioned_gsi_read_units ( table_name , gsi_name ) current_wu = dynamodb . get_provisioned_gsi_write_units ( table_name , gsi_name ) except JSONResponseError : raise try : gsi_status = dynamodb . get_gsi_status ( table_name , gsi_name ) except JSONResponseError : raise logger . debug ( '{0} - GSI: {1} - GSI status is {2}' . format ( table_name , gsi_name , gsi_status ) ) if gsi_status != 'ACTIVE' : logger . warning ( '{0} - GSI: {1} - Not performing throughput changes when GSI ' 'status is {2}' . format ( table_name , gsi_name , gsi_status ) ) return if get_gsi_option ( table_key , gsi_key , 'always_decrease_rw_together' ) : read_units , write_units = __calculate_always_decrease_rw_values ( table_name , gsi_name , read_units , current_ru , write_units , current_wu ) if read_units == current_ru and write_units == current_wu : logger . info ( '{0} - GSI: {1} - No changes to perform' . format ( table_name , gsi_name ) ) return dynamodb . update_gsi_provisioning ( table_name , table_key , gsi_name , gsi_key , int ( read_units ) , int ( write_units ) )
Update throughput on the GSI
62,170
def is_open ( table_name = None , table_key = None , gsi_name = None , gsi_key = None ) : logger . debug ( 'Checking circuit breaker status' ) pattern = re . compile ( r'^(?P<scheme>http(s)?://)' r'((?P<username>.+):(?P<password>.+)@){0,1}' r'(?P<url>.*)$' ) url = timeout = None if gsi_name : url = get_gsi_option ( table_key , gsi_key , 'circuit_breaker_url' ) timeout = get_gsi_option ( table_key , gsi_key , 'circuit_breaker_timeout' ) elif table_name : url = get_table_option ( table_key , 'circuit_breaker_url' ) timeout = get_table_option ( table_key , 'circuit_breaker_timeout' ) if not url : url = get_global_option ( 'circuit_breaker_url' ) timeout = get_global_option ( 'circuit_breaker_timeout' ) match = pattern . match ( url ) if not match : logger . error ( 'Malformatted URL: {0}' . format ( url ) ) sys . exit ( 1 ) use_basic_auth = False if match . group ( 'username' ) and match . group ( 'password' ) : use_basic_auth = True auth = ( ) if use_basic_auth : url = '{scheme}{url}' . format ( scheme = match . group ( 'scheme' ) , url = match . group ( 'url' ) ) auth = ( match . group ( 'username' ) , match . group ( 'password' ) ) headers = { } if table_name : headers [ "x-table-name" ] = table_name if gsi_name : headers [ "x-gsi-name" ] = gsi_name try : response = requests . get ( url , auth = auth , timeout = timeout / 1000.00 , headers = headers ) if int ( response . status_code ) >= 200 and int ( response . status_code ) < 300 : logger . info ( 'Circuit breaker is closed' ) return False else : logger . warning ( 'Circuit breaker returned with status code {0:d}' . format ( response . status_code ) ) except requests . exceptions . SSLError as error : logger . warning ( 'Circuit breaker: {0}' . format ( error ) ) except requests . exceptions . Timeout as error : logger . warning ( 'Circuit breaker: {0}' . format ( error ) ) except requests . exceptions . ConnectionError as error : logger . warning ( 'Circuit breaker: {0}' . format ( error ) ) except requests . exceptions . HTTPError as error : logger . warning ( 'Circuit breaker: {0}' . format ( error ) ) except requests . exceptions . TooManyRedirects as error : logger . warning ( 'Circuit breaker: {0}' . format ( error ) ) except Exception as error : logger . error ( 'Unhandled exception: {0}' . format ( error ) ) logger . error ( 'Please file a bug at ' 'https://github.com/sebdah/dynamic-dynamodb/issues' ) return True
Checks whether the circuit breaker is open
62,171
def __get_connection_cloudwatch ( ) : region = get_global_option ( 'region' ) try : if ( get_global_option ( 'aws_access_key_id' ) and get_global_option ( 'aws_secret_access_key' ) ) : logger . debug ( 'Authenticating to CloudWatch using ' 'credentials in configuration file' ) connection = cloudwatch . connect_to_region ( region , aws_access_key_id = get_global_option ( 'aws_access_key_id' ) , aws_secret_access_key = get_global_option ( 'aws_secret_access_key' ) ) else : logger . debug ( 'Authenticating using boto\'s authentication handler' ) connection = cloudwatch . connect_to_region ( region ) except Exception as err : logger . error ( 'Failed connecting to CloudWatch: {0}' . format ( err ) ) logger . error ( 'Please report an issue at: ' 'https://github.com/sebdah/dynamic-dynamodb/issues' ) raise logger . debug ( 'Connected to CloudWatch in {0}' . format ( region ) ) return connection
Ensure connection to CloudWatch
62,172
def get_tables_and_gsis ( ) : table_names = set ( ) configured_tables = get_configured_tables ( ) not_used_tables = set ( configured_tables ) for table_instance in list_tables ( ) : for key_name in configured_tables : try : if re . match ( key_name , table_instance . table_name ) : logger . debug ( "Table {0} match with config key {1}" . format ( table_instance . table_name , key_name ) ) if table_instance . table_name in [ x [ 0 ] for x in table_names ] : logger . warning ( 'Table {0} matches more than one regexp in config, ' 'skipping this match: "{1}"' . format ( table_instance . table_name , key_name ) ) else : table_names . add ( ( table_instance . table_name , key_name ) ) not_used_tables . discard ( key_name ) else : logger . debug ( "Table {0} did not match with config key {1}" . format ( table_instance . table_name , key_name ) ) except re . error : logger . error ( 'Invalid regular expression: "{0}"' . format ( key_name ) ) sys . exit ( 1 ) if not_used_tables : logger . warning ( 'No tables matching the following configured ' 'tables found: {0}' . format ( ', ' . join ( not_used_tables ) ) ) return sorted ( table_names )
Get a set of tables and gsis and their configuration keys
62,173
def list_tables ( ) : tables = [ ] try : table_list = DYNAMODB_CONNECTION . list_tables ( ) while True : for table_name in table_list [ u'TableNames' ] : tables . append ( get_table ( table_name ) ) if u'LastEvaluatedTableName' in table_list : table_list = DYNAMODB_CONNECTION . list_tables ( table_list [ u'LastEvaluatedTableName' ] ) else : break except DynamoDBResponseError as error : dynamodb_error = error . body [ '__type' ] . rsplit ( '#' , 1 ) [ 1 ] if dynamodb_error == 'ResourceNotFoundException' : logger . error ( 'No tables found' ) elif dynamodb_error == 'AccessDeniedException' : logger . debug ( 'Your AWS API keys lack access to listing tables. ' 'That is an issue if you are trying to use regular ' 'expressions in your table configuration.' ) elif dynamodb_error == 'UnrecognizedClientException' : logger . error ( 'Invalid security token. Are your AWS API keys correct?' ) else : logger . error ( ( 'Unhandled exception: {0}: {1}. ' 'Please file a bug report at ' 'https://github.com/sebdah/dynamic-dynamodb/issues' ) . format ( dynamodb_error , error . body [ 'message' ] ) ) except JSONResponseError as error : logger . error ( 'Communication error: {0}' . format ( error ) ) sys . exit ( 1 ) return tables
Return list of DynamoDB tables available from AWS
62,174
def table_gsis ( table_name ) : try : desc = DYNAMODB_CONNECTION . describe_table ( table_name ) [ u'Table' ] except JSONResponseError : raise if u'GlobalSecondaryIndexes' in desc : return desc [ u'GlobalSecondaryIndexes' ] return [ ]
Returns a list of GSIs for the given table
62,175
def __get_connection_dynamodb ( retries = 3 ) : connected = False region = get_global_option ( 'region' ) while not connected : if ( get_global_option ( 'aws_access_key_id' ) and get_global_option ( 'aws_secret_access_key' ) ) : logger . debug ( 'Authenticating to DynamoDB using ' 'credentials in configuration file' ) connection = dynamodb2 . connect_to_region ( region , aws_access_key_id = get_global_option ( 'aws_access_key_id' ) , aws_secret_access_key = get_global_option ( 'aws_secret_access_key' ) ) else : logger . debug ( 'Authenticating using boto\'s authentication handler' ) connection = dynamodb2 . connect_to_region ( region ) if not connection : if retries == 0 : logger . error ( 'Failed to connect to DynamoDB. Giving up.' ) raise else : logger . error ( 'Failed to connect to DynamoDB. Retrying in 5 seconds' ) retries -= 1 time . sleep ( 5 ) else : connected = True logger . debug ( 'Connected to DynamoDB in {0}' . format ( region ) ) return connection
Ensure connection to DynamoDB
62,176
def __is_gsi_maintenance_window ( table_name , gsi_name , maintenance_windows ) : maintenance_window_list = [ ] for window in maintenance_windows . split ( ',' ) : try : start , end = window . split ( '-' , 1 ) except ValueError : logger . error ( '{0} - GSI: {1} - ' 'Malformatted maintenance window' . format ( table_name , gsi_name ) ) return False maintenance_window_list . append ( ( start , end ) ) now = datetime . datetime . utcnow ( ) . strftime ( '%H%M' ) for maintenance_window in maintenance_window_list : start = '' . join ( maintenance_window [ 0 ] . split ( ':' ) ) end = '' . join ( maintenance_window [ 1 ] . split ( ':' ) ) if now >= start and now <= end : return True return False
Checks that the current time is within the maintenance window
62,177
def publish_gsi_notification ( table_key , gsi_key , message , message_types , subject = None ) : topic = get_gsi_option ( table_key , gsi_key , 'sns_topic_arn' ) if not topic : return for message_type in message_types : if ( message_type in get_gsi_option ( table_key , gsi_key , 'sns_message_types' ) ) : __publish ( topic , message , subject ) return
Publish a notification for a specific GSI
62,178
def publish_table_notification ( table_key , message , message_types , subject = None ) : topic = get_table_option ( table_key , 'sns_topic_arn' ) if not topic : return for message_type in message_types : if message_type in get_table_option ( table_key , 'sns_message_types' ) : __publish ( topic , message , subject ) return
Publish a notification for a specific table
62,179
def __publish ( topic , message , subject = None ) : try : SNS_CONNECTION . publish ( topic = topic , message = message , subject = subject ) logger . info ( 'Sent SNS notification to {0}' . format ( topic ) ) except BotoServerError as error : logger . error ( 'Problem sending SNS notification: {0}' . format ( error . message ) ) return
Publish a message to a SNS topic
62,180
def __get_connection_SNS ( ) : region = get_global_option ( 'region' ) try : if ( get_global_option ( 'aws_access_key_id' ) and get_global_option ( 'aws_secret_access_key' ) ) : logger . debug ( 'Authenticating to SNS using ' 'credentials in configuration file' ) connection = sns . connect_to_region ( region , aws_access_key_id = get_global_option ( 'aws_access_key_id' ) , aws_secret_access_key = get_global_option ( 'aws_secret_access_key' ) ) else : logger . debug ( 'Authenticating using boto\'s authentication handler' ) connection = sns . connect_to_region ( region ) except Exception as err : logger . error ( 'Failed connecting to SNS: {0}' . format ( err ) ) logger . error ( 'Please report an issue at: ' 'https://github.com/sebdah/dynamic-dynamodb/issues' ) raise logger . debug ( 'Connected to SNS in {0}' . format ( region ) ) return connection
Ensure connection to SNS
62,181
def __calculate_always_decrease_rw_values ( table_name , read_units , provisioned_reads , write_units , provisioned_writes ) : if read_units <= provisioned_reads and write_units <= provisioned_writes : return ( read_units , write_units ) if read_units < provisioned_reads : logger . info ( '{0} - Reads could be decreased, but we are waiting for ' 'writes to get lower than the threshold before ' 'scaling down' . format ( table_name ) ) read_units = provisioned_reads elif write_units < provisioned_writes : logger . info ( '{0} - Writes could be decreased, but we are waiting for ' 'reads to get lower than the threshold before ' 'scaling down' . format ( table_name ) ) write_units = provisioned_writes return ( read_units , write_units )
Calculate values for always - decrease - rw - together
62,182
def __update_throughput ( table_name , key_name , read_units , write_units ) : try : current_ru = dynamodb . get_provisioned_table_read_units ( table_name ) current_wu = dynamodb . get_provisioned_table_write_units ( table_name ) except JSONResponseError : raise try : table_status = dynamodb . get_table_status ( table_name ) except JSONResponseError : raise logger . debug ( '{0} - Table status is {1}' . format ( table_name , table_status ) ) if table_status != 'ACTIVE' : logger . warning ( '{0} - Not performing throughput changes when table ' 'is {1}' . format ( table_name , table_status ) ) return if get_table_option ( key_name , 'always_decrease_rw_together' ) : read_units , write_units = __calculate_always_decrease_rw_values ( table_name , read_units , current_ru , write_units , current_wu ) if read_units == current_ru and write_units == current_wu : logger . info ( '{0} - No changes to perform' . format ( table_name ) ) return dynamodb . update_table_provisioning ( table_name , key_name , int ( read_units ) , int ( write_units ) )
Update throughput on the DynamoDB table
62,183
def get_configuration ( ) : configuration = { 'global' : { } , 'logging' : { } , 'tables' : ordereddict ( ) } cmd_line_options = command_line_parser . parse ( ) conf_file_options = None if 'config' in cmd_line_options : conf_file_options = config_file_parser . parse ( cmd_line_options [ 'config' ] ) configuration [ 'global' ] = __get_global_options ( cmd_line_options , conf_file_options ) configuration [ 'logging' ] = __get_logging_options ( cmd_line_options , conf_file_options ) if 'table_name' in cmd_line_options : configuration [ 'tables' ] = __get_cmd_table_options ( cmd_line_options ) else : configuration [ 'tables' ] = __get_config_table_options ( conf_file_options ) __check_gsi_rules ( configuration ) __check_logging_rules ( configuration ) __check_table_rules ( configuration ) return configuration
Get the configuration from command line and config files
62,184
def __get_cmd_table_options ( cmd_line_options ) : table_name = cmd_line_options [ 'table_name' ] options = { table_name : { } } for option in DEFAULT_OPTIONS [ 'table' ] . keys ( ) : options [ table_name ] [ option ] = DEFAULT_OPTIONS [ 'table' ] [ option ] if option in cmd_line_options : options [ table_name ] [ option ] = cmd_line_options [ option ] return options
Get all table options from the command line
62,185
def __get_config_table_options ( conf_file_options ) : options = ordereddict ( ) if not conf_file_options : return options for table_name in conf_file_options [ 'tables' ] : options [ table_name ] = { } for option in DEFAULT_OPTIONS [ 'table' ] . keys ( ) : options [ table_name ] [ option ] = DEFAULT_OPTIONS [ 'table' ] [ option ] if option not in conf_file_options [ 'tables' ] [ table_name ] : continue if option == 'sns_message_types' : try : raw_list = conf_file_options [ 'tables' ] [ table_name ] [ option ] options [ table_name ] [ option ] = [ i . strip ( ) for i in raw_list . split ( ',' ) ] except : print ( 'Error parsing the "sns-message-types" ' 'option: {0}' . format ( conf_file_options [ 'tables' ] [ table_name ] [ option ] ) ) else : options [ table_name ] [ option ] = conf_file_options [ 'tables' ] [ table_name ] [ option ] if 'gsis' in conf_file_options [ 'tables' ] [ table_name ] : for gsi_name in conf_file_options [ 'tables' ] [ table_name ] [ 'gsis' ] : for option in DEFAULT_OPTIONS [ 'gsi' ] . keys ( ) : opt = DEFAULT_OPTIONS [ 'gsi' ] [ option ] if 'gsis' not in options [ table_name ] : options [ table_name ] [ 'gsis' ] = { } if gsi_name not in options [ table_name ] [ 'gsis' ] : options [ table_name ] [ 'gsis' ] [ gsi_name ] = { } if ( option not in conf_file_options [ 'tables' ] [ table_name ] [ 'gsis' ] [ gsi_name ] ) : options [ table_name ] [ 'gsis' ] [ gsi_name ] [ option ] = opt continue if option == 'sns_message_types' : try : raw_list = conf_file_options [ 'tables' ] [ table_name ] [ 'gsis' ] [ gsi_name ] [ option ] opt = [ i . strip ( ) for i in raw_list . split ( ',' ) ] except : print ( 'Error parsing the "sns-message-types" ' 'option: {0}' . format ( conf_file_options [ 'tables' ] [ table_name ] [ 'gsis' ] [ gsi_name ] [ option ] ) ) else : opt = conf_file_options [ 'tables' ] [ table_name ] [ 'gsis' ] [ gsi_name ] [ option ] options [ table_name ] [ 'gsis' ] [ gsi_name ] [ option ] = opt return options
Get all table options from the config file
62,186
def __get_global_options ( cmd_line_options , conf_file_options = None ) : options = { } for option in DEFAULT_OPTIONS [ 'global' ] . keys ( ) : options [ option ] = DEFAULT_OPTIONS [ 'global' ] [ option ] if conf_file_options and option in conf_file_options : options [ option ] = conf_file_options [ option ] if cmd_line_options and option in cmd_line_options : options [ option ] = cmd_line_options [ option ] return options
Get all global options
62,187
def __get_logging_options ( cmd_line_options , conf_file_options = None ) : options = { } for option in DEFAULT_OPTIONS [ 'logging' ] . keys ( ) : options [ option ] = DEFAULT_OPTIONS [ 'logging' ] [ option ] if conf_file_options and option in conf_file_options : options [ option ] = conf_file_options [ option ] if cmd_line_options and option in cmd_line_options : options [ option ] = cmd_line_options [ option ] return options
Get all logging options
62,188
def __check_logging_rules ( configuration ) : valid_log_levels = [ 'debug' , 'info' , 'warning' , 'error' ] if configuration [ 'logging' ] [ 'log_level' ] . lower ( ) not in valid_log_levels : print ( 'Log level must be one of {0}' . format ( ', ' . join ( valid_log_levels ) ) ) sys . exit ( 1 )
Check that the logging values are proper
62,189
def is_consumed_over_proposed ( current_provisioning , proposed_provisioning , consumed_units_percent ) : consumption_based_current_provisioning = int ( math . ceil ( current_provisioning * ( consumed_units_percent / 100 ) ) ) return consumption_based_current_provisioning > proposed_provisioning
Determines if the currently consumed capacity is over the proposed capacity for this table
62,190
def __get_min_reads ( current_provisioning , min_provisioned_reads , log_tag ) : reads = 1 if min_provisioned_reads : reads = int ( min_provisioned_reads ) if reads > int ( current_provisioning * 2 ) : reads = int ( current_provisioning * 2 ) logger . debug ( '{0} - ' 'Cannot reach min-provisioned-reads as max scale up ' 'is 100% of current provisioning' . format ( log_tag ) ) logger . debug ( '{0} - Setting min provisioned reads to {1}' . format ( log_tag , min_provisioned_reads ) ) return reads
Get the minimum number of reads to current_provisioning
62,191
def __get_min_writes ( current_provisioning , min_provisioned_writes , log_tag ) : writes = 1 if min_provisioned_writes : writes = int ( min_provisioned_writes ) if writes > int ( current_provisioning * 2 ) : writes = int ( current_provisioning * 2 ) logger . debug ( '{0} - ' 'Cannot reach min-provisioned-writes as max scale up ' 'is 100% of current provisioning' . format ( log_tag ) ) logger . debug ( '{0} - Setting min provisioned writes to {1}' . format ( log_tag , min_provisioned_writes ) ) return writes
Get the minimum number of writes to current_provisioning
62,192
def restart ( self , * args , ** kwargs ) : self . stop ( ) try : self . start ( * args , ** kwargs ) except IOError : raise
Restart the daemon
62,193
def __parse_options ( config_file , section , options ) : configuration = { } for option in options : try : if option . get ( 'type' ) == 'str' : configuration [ option . get ( 'key' ) ] = config_file . get ( section , option . get ( 'option' ) ) elif option . get ( 'type' ) == 'int' : try : configuration [ option . get ( 'key' ) ] = config_file . getint ( section , option . get ( 'option' ) ) except ValueError : print ( 'Error: Expected an integer value for {0}' . format ( option . get ( 'option' ) ) ) sys . exit ( 1 ) elif option . get ( 'type' ) == 'float' : try : configuration [ option . get ( 'key' ) ] = config_file . getfloat ( section , option . get ( 'option' ) ) except ValueError : print ( 'Error: Expected an float value for {0}' . format ( option . get ( 'option' ) ) ) sys . exit ( 1 ) elif option . get ( 'type' ) == 'bool' : try : configuration [ option . get ( 'key' ) ] = config_file . getboolean ( section , option . get ( 'option' ) ) except ValueError : print ( 'Error: Expected an boolean value for {0}' . format ( option . get ( 'option' ) ) ) sys . exit ( 1 ) elif option . get ( 'type' ) == 'dict' : configuration [ option . get ( 'key' ) ] = ast . literal_eval ( config_file . get ( section , option . get ( 'option' ) ) ) else : configuration [ option . get ( 'key' ) ] = config_file . get ( section , option . get ( 'option' ) ) except ConfigParser . NoOptionError : if option . get ( 'required' ) : print ( 'Missing [{0}] option "{1}" in configuration' . format ( section , option . get ( 'option' ) ) ) sys . exit ( 1 ) return configuration
Parse the section options
62,194
def main ( ) : try : if get_global_option ( 'show_config' ) : print json . dumps ( config . get_configuration ( ) , indent = 2 ) elif get_global_option ( 'daemon' ) : daemon = DynamicDynamoDBDaemon ( '{0}/dynamic-dynamodb.{1}.pid' . format ( get_global_option ( 'pid_file_dir' ) , get_global_option ( 'instance' ) ) ) if get_global_option ( 'daemon' ) == 'start' : logger . debug ( 'Starting daemon' ) try : daemon . start ( ) logger . info ( 'Daemon started' ) except IOError as error : logger . error ( 'Could not create pid file: {0}' . format ( error ) ) logger . error ( 'Daemon not started' ) elif get_global_option ( 'daemon' ) == 'stop' : logger . debug ( 'Stopping daemon' ) daemon . stop ( ) logger . info ( 'Daemon stopped' ) sys . exit ( 0 ) elif get_global_option ( 'daemon' ) == 'restart' : logger . debug ( 'Restarting daemon' ) daemon . restart ( ) logger . info ( 'Daemon restarted' ) elif get_global_option ( 'daemon' ) in [ 'foreground' , 'fg' ] : logger . debug ( 'Starting daemon in foreground' ) daemon . run ( ) logger . info ( 'Daemon started in foreground' ) else : print ( 'Valid options for --daemon are start, ' 'stop, restart, and foreground' ) sys . exit ( 1 ) else : if get_global_option ( 'run_once' ) : execute ( ) else : while True : execute ( ) except Exception as error : logger . exception ( error )
Main function called from dynamic - dynamodb
62,195
def decode_tve_parameter ( data ) : ( nontve , ) = struct . unpack ( nontve_header , data [ : nontve_header_len ] ) if nontve == 1023 : ( size , ) = struct . unpack ( '!H' , data [ nontve_header_len : nontve_header_len + 2 ] ) ( subtype , ) = struct . unpack ( '!H' , data [ size - 4 : size - 2 ] ) param_name , param_fmt = ext_param_formats [ subtype ] ( unpacked , ) = struct . unpack ( param_fmt , data [ size - 2 : size ] ) return { param_name : unpacked } , size ( msgtype , ) = struct . unpack ( tve_header , data [ : tve_header_len ] ) if not msgtype & 0b10000000 : return None , 0 msgtype = msgtype & 0x7f try : param_name , param_fmt = tve_param_formats [ msgtype ] logger . debug ( 'found %s (type=%s)' , param_name , msgtype ) except KeyError : return None , 0 nbytes = struct . calcsize ( param_fmt ) end = tve_header_len + nbytes try : unpacked = struct . unpack ( param_fmt , data [ tve_header_len : end ] ) return { param_name : unpacked } , end except struct . error : return None , 0
Generic byte decoding function for TVE parameters .
62,196
def read ( filename ) : fname = os . path . join ( here , filename ) with codecs . open ( fname , encoding = 'utf-8' ) as f : return f . read ( )
Get the long description from a file .
62,197
def deserialize ( self ) : if self . msgbytes is None : raise LLRPError ( 'No message bytes to deserialize.' ) data = self . msgbytes msgtype , length , msgid = struct . unpack ( self . full_hdr_fmt , data [ : self . full_hdr_len ] ) ver = ( msgtype >> 10 ) & BITMASK ( 3 ) msgtype = msgtype & BITMASK ( 10 ) try : name = Message_Type2Name [ msgtype ] logger . debug ( 'deserializing %s command' , name ) decoder = Message_struct [ name ] [ 'decode' ] except KeyError : raise LLRPError ( 'Cannot find decoder for message type ' '{}' . format ( msgtype ) ) body = data [ self . full_hdr_len : length ] try : self . msgdict = { name : dict ( decoder ( body ) ) } self . msgdict [ name ] [ 'Ver' ] = ver self . msgdict [ name ] [ 'Type' ] = msgtype self . msgdict [ name ] [ 'ID' ] = msgid logger . debug ( 'done deserializing %s command' , name ) except ValueError : logger . exception ( 'Unable to decode body %s, %s' , body , decoder ( body ) ) except LLRPError : logger . exception ( 'Problem with %s message format' , name ) return '' return ''
Turns a sequence of bytes into a message dictionary .
62,198
def parseReaderConfig ( self , confdict ) : logger . debug ( 'parseReaderConfig input: %s' , confdict ) conf = { } for k , v in confdict . items ( ) : if not k . startswith ( 'Parameter' ) : continue ty = v [ 'Type' ] data = v [ 'Data' ] vendor = None subtype = None try : vendor , subtype = v [ 'Vendor' ] , v [ 'Subtype' ] except KeyError : pass if ty == 1023 : if vendor == 25882 and subtype == 37 : tempc = struct . unpack ( '!H' , data ) [ 0 ] conf . update ( temperature = tempc ) else : conf [ ty ] = data return conf
Parse a reader configuration dictionary .
62,199
def parseCapabilities ( self , capdict ) : gdc = capdict [ 'GeneralDeviceCapabilities' ] max_ant = gdc [ 'MaxNumberOfAntennaSupported' ] if max ( self . antennas ) > max_ant : reqd = ',' . join ( map ( str , self . antennas ) ) avail = ',' . join ( map ( str , range ( 1 , max_ant + 1 ) ) ) errmsg = ( 'Invalid antenna set specified: requested={},' ' available={}; ignoring invalid antennas' . format ( reqd , avail ) ) raise ReaderConfigurationError ( errmsg ) logger . debug ( 'set antennas: %s' , self . antennas ) bandcap = capdict [ 'RegulatoryCapabilities' ] [ 'UHFBandCapabilities' ] self . tx_power_table = self . parsePowerTable ( bandcap ) logger . debug ( 'tx_power_table: %s' , self . tx_power_table ) self . setTxPower ( self . tx_power ) regcap = capdict [ 'RegulatoryCapabilities' ] modes = regcap [ 'UHFBandCapabilities' ] [ 'UHFRFModeTable' ] mode_list = [ modes [ k ] for k in sorted ( modes . keys ( ) , key = natural_keys ) ] if self . mode_identifier is not None : logger . debug ( 'Setting mode from mode_identifier=%s' , self . mode_identifier ) try : mode = [ mo for mo in mode_list if mo [ 'ModeIdentifier' ] == self . mode_identifier ] [ 0 ] self . reader_mode = mode except IndexError : valid_modes = sorted ( mo [ 'ModeIdentifier' ] for mo in mode_list ) errstr = ( 'Invalid mode_identifier; valid mode_identifiers' ' are {}' . format ( valid_modes ) ) raise ReaderConfigurationError ( errstr ) if self . reader_mode and self . tari : if self . reader_mode [ 'MinTari' ] < self . tari < self . reader_mode [ 'MaxTari' ] : logger . debug ( 'Overriding mode Tari %s with requested Tari %s' , self . reader_mode [ 'MaxTari' ] , self . tari ) else : errstr = ( 'Requested Tari {} is incompatible with selected ' 'mode {}' . format ( self . tari , self . reader_mode ) ) logger . info ( 'using reader mode: %s' , self . reader_mode )
Parse a capabilities dictionary and adjust instance settings .