idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
1,800
def by_readings ( self , role_names = [ '' , 'Author' ] ) : if not spectator_apps . is_enabled ( 'reading' ) : raise ImproperlyConfigured ( "To use the CreatorManager.by_readings() method, 'spectator.reading' must by in INSTALLED_APPS." ) qs = self . get_queryset ( ) qs = qs . filter ( publication_roles__role_name__in = role_names ) . exclude ( publications__reading__isnull = True ) . annotate ( num_readings = Count ( 'publications__reading' ) ) . order_by ( '-num_readings' , 'name_sort' ) return qs
The Creators who have been most - read ordered by number of readings .
1,801
def by_events ( self , kind = None ) : if not spectator_apps . is_enabled ( 'events' ) : raise ImproperlyConfigured ( "To use the CreatorManager.by_events() method, 'spectator.events' must by in INSTALLED_APPS." ) qs = self . get_queryset ( ) if kind is not None : qs = qs . filter ( events__kind = kind ) qs = qs . annotate ( num_events = Count ( 'events' , distinct = True ) ) . order_by ( '-num_events' , 'name_sort' ) return qs
Get the Creators involved in the most Events .
1,802
def by_works ( self , kind = None , role_name = None ) : if not spectator_apps . is_enabled ( 'events' ) : raise ImproperlyConfigured ( "To use the CreatorManager.by_works() method, 'spectator.events' must by in INSTALLED_APPS." ) qs = self . get_queryset ( ) filter_kwargs = { } if kind is not None : filter_kwargs [ 'works__kind' ] = kind if role_name is not None : filter_kwargs [ 'work_roles__role_name' ] = role_name if filter_kwargs : qs = qs . filter ( ** filter_kwargs ) qs = qs . annotate ( num_works = Count ( 'works' , distinct = True ) ) . order_by ( '-num_works' , 'name_sort' ) return qs
Get the Creators involved in the most Works .
1,803
def index ( ) : page = request . values . get ( 'page' , 1 , type = int ) size = request . values . get ( 'size' , 2 , type = int ) search = ExampleSearch ( ) [ ( page - 1 ) * size : page * size ] if 'q' in request . values : search = search . query ( QueryString ( query = request . values . get ( 'q' ) ) ) search = search . sort ( request . values . get ( 'sort' , 'title' ) ) search = ExampleSearch . faceted_search ( search = search ) results = search . execute ( ) . to_dict ( ) return jsonify ( { 'hits' : results . get ( 'hits' ) } )
Query Elasticsearch using Invenio query syntax .
1,804
def clean_options ( self , using_keytab = False , principal = None , keytab_file = None , ccache_file = None , password = None ) : cleaned = { } if using_keytab : if principal is None : raise ValueError ( 'Principal is required when using key table.' ) princ_name = gssapi . names . Name ( principal , gssapi . names . NameType . kerberos_principal ) if keytab_file is None : cleaned [ 'keytab' ] = DEFAULT_KEYTAB elif not os . path . exists ( keytab_file ) : raise ValueError ( 'Keytab file {0} does not exist.' . format ( keytab_file ) ) else : cleaned [ 'keytab' ] = keytab_file else : if principal is None : principal = get_login ( ) princ_name = gssapi . names . Name ( principal , gssapi . names . NameType . user ) cleaned [ 'using_keytab' ] = using_keytab cleaned [ 'principal' ] = princ_name cleaned [ 'ccache' ] = ccache_file or DEFAULT_CCACHE cleaned [ 'password' ] = password return cleaned
Clean argument to related object
1,805
def init_with_keytab ( self ) : creds_opts = { 'usage' : 'initiate' , 'name' : self . _cleaned_options [ 'principal' ] , } store = { } if self . _cleaned_options [ 'keytab' ] != DEFAULT_KEYTAB : store [ 'client_keytab' ] = self . _cleaned_options [ 'keytab' ] if self . _cleaned_options [ 'ccache' ] != DEFAULT_CCACHE : store [ 'ccache' ] = self . _cleaned_options [ 'ccache' ] if store : creds_opts [ 'store' ] = store creds = gssapi . creds . Credentials ( ** creds_opts ) try : creds . lifetime except gssapi . exceptions . ExpiredCredentialsError : new_creds_opts = copy . deepcopy ( creds_opts ) if 'store' in new_creds_opts : new_creds_opts [ 'store' ] [ 'ccache' ] = _get_temp_ccache ( ) else : new_creds_opts [ 'store' ] = { 'ccache' : _get_temp_ccache ( ) } creds = gssapi . creds . Credentials ( ** new_creds_opts ) _store = None if self . _cleaned_options [ 'ccache' ] != DEFAULT_CCACHE : _store = { 'ccache' : store [ 'ccache' ] } creds . store ( usage = 'initiate' , store = _store , overwrite = True )
Initialize credential cache with keytab
1,806
def init_with_password ( self ) : creds_opts = { 'usage' : 'initiate' , 'name' : self . _cleaned_options [ 'principal' ] , } if self . _cleaned_options [ 'ccache' ] != DEFAULT_CCACHE : creds_opts [ 'store' ] = { 'ccache' : self . _cleaned_options [ 'ccache' ] } cred = gssapi . creds . Credentials ( ** creds_opts ) try : cred . lifetime except gssapi . exceptions . ExpiredCredentialsError : password = self . _cleaned_options [ 'password' ] if not password : if not sys . stdin . isatty ( ) : raise IOError ( 'krbContext is not running from a terminal. So, you ' 'need to run kinit with your principal manually before' ' anything goes.' ) password = getpass . getpass ( ) cred = gssapi . raw . acquire_cred_with_password ( self . _cleaned_options [ 'principal' ] , password ) ccache = self . _cleaned_options [ 'ccache' ] if ccache == DEFAULT_CCACHE : gssapi . raw . store_cred ( cred . creds , usage = 'initiate' , overwrite = True ) else : gssapi . raw . store_cred_into ( { 'ccache' : ccache } , cred . creds , usage = 'initiate' , overwrite = True )
Initialize credential cache with password
1,807
def templates ( self ) : templates = { } result = [ ] if self . entry_point_group_templates : result = self . load_entry_point_group_templates ( self . entry_point_group_templates ) or [ ] for template in result : for name , path in template . items ( ) : templates [ name ] = path return templates
Generate a dictionary with template names and file paths .
1,808
def register_mappings ( self , alias , package_name ) : if ES_VERSION [ 0 ] == 2 : try : resource_listdir ( package_name , 'v2' ) package_name += '.v2' except ( OSError , IOError ) as ex : if getattr ( ex , 'errno' , 0 ) != errno . ENOENT : raise warnings . warn ( "Having mappings in a path which doesn't specify the " "Elasticsearch version is deprecated. Please move your " "mappings to a subfolder named according to the " "Elasticsearch version which your mappings are intended " "for. (e.g. '{}/v2/{}')" . format ( package_name , alias ) , PendingDeprecationWarning ) else : package_name = '{}.v{}' . format ( package_name , ES_VERSION [ 0 ] ) def _walk_dir ( aliases , * parts ) : root_name = build_index_name ( self . app , * parts ) resource_name = os . path . join ( * parts ) if root_name not in aliases : self . number_of_indexes += 1 data = aliases . get ( root_name , { } ) for filename in resource_listdir ( package_name , resource_name ) : index_name = build_index_name ( self . app , * ( parts + ( filename , ) ) ) file_path = os . path . join ( resource_name , filename ) if resource_isdir ( package_name , file_path ) : _walk_dir ( data , * ( parts + ( filename , ) ) ) continue ext = os . path . splitext ( filename ) [ 1 ] if ext not in { '.json' , } : continue assert index_name not in data , 'Duplicate index' data [ index_name ] = self . mappings [ index_name ] = resource_filename ( package_name , os . path . join ( resource_name , filename ) ) self . number_of_indexes += 1 aliases [ root_name ] = data _walk_dir ( self . aliases , alias )
Register mappings from a package under given alias .
1,809
def register_templates ( self , directory ) : try : resource_listdir ( directory , 'v{}' . format ( ES_VERSION [ 0 ] ) ) directory = '{}/v{}' . format ( directory , ES_VERSION [ 0 ] ) except ( OSError , IOError ) as ex : if getattr ( ex , 'errno' , 0 ) == errno . ENOENT : raise OSError ( "Please move your templates to a subfolder named " "according to the Elasticsearch version " "which your templates are intended " "for. (e.g. '{}.v{}')" . format ( directory , ES_VERSION [ 0 ] ) ) result = { } module_name , parts = directory . split ( '.' ) [ 0 ] , directory . split ( '.' ) [ 1 : ] parts = tuple ( parts ) def _walk_dir ( parts ) : resource_name = os . path . join ( * parts ) for filename in resource_listdir ( module_name , resource_name ) : template_name = build_index_name ( self . app , * ( parts [ 1 : ] + ( filename , ) ) ) file_path = os . path . join ( resource_name , filename ) if resource_isdir ( module_name , file_path ) : _walk_dir ( ( parts + ( filename , ) ) ) continue ext = os . path . splitext ( filename ) [ 1 ] if ext not in { '.json' , } : continue result [ template_name ] = resource_filename ( module_name , os . path . join ( resource_name , filename ) ) _walk_dir ( parts ) return result
Register templates from the provided directory .
1,810
def _client_builder ( self ) : client_config = self . app . config . get ( 'SEARCH_CLIENT_CONFIG' ) or { } client_config . setdefault ( 'hosts' , self . app . config . get ( 'SEARCH_ELASTIC_HOSTS' ) ) client_config . setdefault ( 'connection_class' , RequestsHttpConnection ) return Elasticsearch ( ** client_config )
Build Elasticsearch client .
1,811
def client ( self ) : if self . _client is None : self . _client = self . _client_builder ( ) return self . _client
Return client for current application .
1,812
def flush_and_refresh ( self , index ) : self . client . indices . flush ( wait_if_ongoing = True , index = index ) self . client . indices . refresh ( index = index ) self . client . cluster . health ( wait_for_status = 'yellow' , request_timeout = 30 ) return True
Flush and refresh one or more indices .
1,813
def cluster_version ( self ) : versionstr = self . client . info ( ) [ 'version' ] [ 'number' ] return [ int ( x ) for x in versionstr . split ( '.' ) ]
Get version of Elasticsearch running on the cluster .
1,814
def active_aliases ( self ) : whitelisted_aliases = self . app . config . get ( 'SEARCH_MAPPINGS' ) if whitelisted_aliases is None : return self . aliases else : return { k : v for k , v in self . aliases . items ( ) if k in whitelisted_aliases }
Get a filtered list of aliases based on configuration .
1,815
def create ( self , ignore = None ) : ignore = ignore or [ ] def _create ( tree_or_filename , alias = None ) : for name , value in tree_or_filename . items ( ) : if isinstance ( value , dict ) : for result in _create ( value , alias = name ) : yield result else : with open ( value , 'r' ) as body : yield name , self . client . indices . create ( index = name , body = json . load ( body ) , ignore = ignore , ) if alias : yield alias , self . client . indices . put_alias ( index = list ( _get_indices ( tree_or_filename ) ) , name = alias , ignore = ignore , ) for result in _create ( self . active_aliases ) : yield result
Yield tuple with created index name and responses from a client .
1,816
def put_templates ( self , ignore = None ) : ignore = ignore or [ ] def _replace_prefix ( template_path , body ) : pattern = '__SEARCH_INDEX_PREFIX__' prefix = self . app . config [ 'SEARCH_INDEX_PREFIX' ] or '' if prefix : assert pattern in body , "You are using the prefix `{0}`, " "but the template `{1}` does not contain the " "pattern `{2}`." . format ( prefix , template_path , pattern ) return body . replace ( pattern , prefix ) def _put_template ( template ) : with open ( self . templates [ template ] , 'r' ) as fp : body = fp . read ( ) replaced_body = _replace_prefix ( self . templates [ template ] , body ) return self . templates [ template ] , current_search_client . indices . put_template ( name = template , body = json . loads ( replaced_body ) , ignore = ignore , ) for template in self . templates : yield _put_template ( template )
Yield tuple with registered template and response from client .
1,817
def delete ( self , ignore = None ) : ignore = ignore or [ ] def _delete ( tree_or_filename , alias = None ) : if alias : yield alias , self . client . indices . delete_alias ( index = list ( _get_indices ( tree_or_filename ) ) , name = alias , ignore = ignore , ) for name , value in tree_or_filename . items ( ) : if isinstance ( value , dict ) : for result in _delete ( value , alias = name ) : yield result else : yield name , self . client . indices . delete ( index = name , ignore = ignore , ) for result in _delete ( self . active_aliases ) : yield result
Yield tuple with deleted index name and responses from a client .
1,818
def main ( ) : try : opts , args = getopt . getopt ( sys . argv [ 1 : ] , "h:v" , [ "help" , "nack=" , "servers=" , "queues=" ] ) except getopt . GetoptError as err : print str ( err ) usage ( ) sys . exit ( ) nack = 0.0 verbose = False servers = "localhost:7712,localhost:7711" queues = "test" for o , a in opts : if o == "-v" : verbose = True elif o in ( "-h" , "--help" ) : usage ( ) sys . exit ( ) elif o in ( "--nack" ) : nack = float ( a ) elif o in ( "--servers" ) : servers = a elif o in ( "--queues" ) : queues = a else : assert False , "unhandled option" servers = servers . split ( "," ) queues = queues . split ( "," ) c = Client ( servers ) c . connect ( ) while True : jobs = c . get_job ( queues ) for queue_name , job_id , job in jobs : rnd = random . random ( ) if rnd >= nack : print ">>> received job:" , job_id c . ack_job ( job_id ) else : print ">>> bouncing job:" , job_id c . nack_job ( job_id )
Start the poor_consumer .
1,819
def connect ( self ) : self . connected_node = None for i , node in self . nodes . items ( ) : host , port = i . split ( ':' ) port = int ( port ) redis_client = redis . Redis ( host , port , ** self . client_kw_args ) try : ret = redis_client . execute_command ( 'HELLO' ) format_version , node_id = ret [ 0 ] , ret [ 1 ] others = ret [ 2 : ] self . nodes [ i ] = Node ( node_id , host , port , redis_client ) self . connected_node = self . nodes [ i ] except redis . exceptions . ConnectionError : pass if not self . connected_node : raise ConnectionError ( 'couldnt connect to any nodes' ) logger . info ( "connected to node %s" % self . connected_node )
Connect to one of the Disque nodes .
1,820
def execute_command ( self , * args , ** kwargs ) : try : return self . get_connection ( ) . execute_command ( * args , ** kwargs ) except ConnectionError as e : logger . warn ( 'trying to reconnect' ) self . connect ( ) logger . warn ( 'connected' ) raise
Execute a command on the connected server .
1,821
def add_job ( self , queue_name , job , timeout = 200 , replicate = None , delay = None , retry = None , ttl = None , maxlen = None , asynchronous = None ) : command = [ 'ADDJOB' , queue_name , job , timeout ] if replicate : command += [ 'REPLICATE' , replicate ] if delay : command += [ 'DELAY' , delay ] if retry is not None : command += [ 'RETRY' , retry ] if ttl : command += [ 'TTL' , ttl ] if maxlen : command += [ 'MAXLEN' , maxlen ] if asynchronous : command += [ 'ASYNC' ] logger . debug ( "sending job - %s" , command ) job_id = self . execute_command ( * command ) logger . debug ( "sent job - %s" , command ) logger . debug ( "job_id: %s " % job_id ) return job_id
Add a job to a queue .
1,822
def get_job ( self , queues , timeout = None , count = None , nohang = False , withcounters = False ) : assert queues command = [ 'GETJOB' ] if nohang : command += [ 'NOHANG' ] if timeout : command += [ 'TIMEOUT' , timeout ] if count : command += [ 'COUNT' , count ] if withcounters : command += [ 'WITHCOUNTERS' ] command += [ 'FROM' ] + queues results = self . execute_command ( * command ) if not results : return [ ] if withcounters : return [ ( job_id , queue_name , job , nacks , additional_deliveries ) for job_id , queue_name , job , _ , nacks , _ , additional_deliveries in results ] else : return [ ( job_id , queue_name , job ) for job_id , queue_name , job in results ]
Return some number of jobs from specified queues .
1,823
def show ( self , job_id , return_dict = False ) : rtn = self . execute_command ( 'SHOW' , job_id ) if return_dict : grouped = self . _grouper ( rtn , 2 ) rtn = dict ( ( a , b ) for a , b in grouped ) return rtn
Describe the job .
1,824
def pause ( self , queue_name , kw_in = None , kw_out = None , kw_all = None , kw_none = None , kw_state = None , kw_bcast = None ) : command = [ "PAUSE" , queue_name ] if kw_in : command += [ "in" ] if kw_out : command += [ "out" ] if kw_all : command += [ "all" ] if kw_none : command += [ "none" ] if kw_state : command += [ "state" ] if kw_bcast : command += [ "bcast" ] return self . execute_command ( * command )
Pause a queue .
1,825
def qscan ( self , cursor = 0 , count = None , busyloop = None , minlen = None , maxlen = None , importrate = None ) : command = [ "QSCAN" , cursor ] if count : command += [ "COUNT" , count ] if busyloop : command += [ "BUSYLOOP" ] if minlen : command += [ "MINLEN" , minlen ] if maxlen : command += [ "MAXLEN" , maxlen ] if importrate : command += [ "IMPORTRATE" , importrate ] return self . execute_command ( * command )
Iterate all the existing queues in the local node .
1,826
def jscan ( self , cursor = 0 , count = None , busyloop = None , queue = None , state = None , reply = None ) : command = [ "JSCAN" , cursor ] if count : command += [ "COUNT" , count ] if busyloop : command += [ "BUSYLOOP" ] if queue : command += [ "QUEUE" , queue ] if type ( state ) is list : for s in state : command += [ "STATE" , s ] if reply : command += [ "REPLY" , reply ] return self . execute_command ( * command )
Iterate all the existing jobs in the local node .
1,827
def build_index_name ( app , * parts ) : base_index = os . path . splitext ( '-' . join ( [ part for part in parts if part ] ) ) [ 0 ] return prefix_index ( app = app , index = base_index )
Build an index name from parts .
1,828
def es_version_check ( f ) : @ wraps ( f ) def inner ( * args , ** kwargs ) : cluster_ver = current_search . cluster_version [ 0 ] client_ver = ES_VERSION [ 0 ] if cluster_ver != client_ver : raise click . ClickException ( 'Elasticsearch version mismatch. Invenio was installed with ' 'Elasticsearch v{client_ver}.x support, but the cluster runs ' 'Elasticsearch v{cluster_ver}.x.' . format ( client_ver = client_ver , cluster_ver = cluster_ver , ) ) return f ( * args , ** kwargs ) return inner
Decorator to check Elasticsearch version .
1,829
def init ( force ) : click . secho ( 'Creating indexes...' , fg = 'green' , bold = True , file = sys . stderr ) with click . progressbar ( current_search . create ( ignore = [ 400 ] if force else None ) , length = current_search . number_of_indexes ) as bar : for name , response in bar : bar . label = name click . secho ( 'Putting templates...' , fg = 'green' , bold = True , file = sys . stderr ) with click . progressbar ( current_search . put_templates ( ignore = [ 400 ] if force else None ) , length = len ( current_search . templates . keys ( ) ) ) as bar : for response in bar : bar . label = response
Initialize registered aliases and mappings .
1,830
def destroy ( force ) : click . secho ( 'Destroying indexes...' , fg = 'red' , bold = True , file = sys . stderr ) with click . progressbar ( current_search . delete ( ignore = [ 400 , 404 ] if force else None ) , length = current_search . number_of_indexes ) as bar : for name , response in bar : bar . label = name
Destroy all indexes .
1,831
def delete ( index_name , force , verbose ) : result = current_search_client . indices . delete ( index = index_name , ignore = [ 400 , 404 ] if force else None , ) if verbose : click . echo ( json . dumps ( result ) )
Delete index by its name .
1,832
def put ( index_name , doc_type , identifier , body , force , verbose ) : result = current_search_client . index ( index = index_name , doc_type = doc_type or index_name , id = identifier , body = json . load ( body ) , op_type = 'index' if force or identifier is None else 'create' , ) if verbose : click . echo ( json . dumps ( result ) )
Index input data .
1,833
def get_records ( self , ids ) : return self . query ( Ids ( values = [ str ( id_ ) for id_ in ids ] ) )
Return records by their identifiers .
1,834
def faceted_search ( cls , query = None , filters = None , search = None ) : search_ = search or cls ( ) class RecordsFacetedSearch ( FacetedSearch ) : index = prefix_index ( app = current_app , index = search_ . _index [ 0 ] ) doc_types = getattr ( search_ . Meta , 'doc_types' , [ '_all' ] ) fields = getattr ( search_ . Meta , 'fields' , ( '*' , ) ) facets = getattr ( search_ . Meta , 'facets' , { } ) def search ( self ) : if ES_VERSION [ 0 ] > 2 : return search_ . response_class ( FacetedResponse ) return search_ . response_class ( partial ( FacetedResponse , self ) ) return RecordsFacetedSearch ( query = query , filters = filters or { } )
Return faceted search instance with defaults set .
1,835
def with_preference_param ( self ) : user_hash = self . _get_user_hash ( ) if user_hash : return self . params ( preference = user_hash ) return self
Add the preference param to the ES request and return a new Search .
1,836
def _get_user_agent ( self ) : user_agent = request . headers . get ( 'User-Agent' ) if user_agent : user_agent = user_agent . encode ( 'utf-8' ) return user_agent or ''
Retrieve the request s User - Agent if available .
1,837
def _get_user_hash ( self ) : if request : user_hash = '{ip}-{ua}' . format ( ip = request . remote_addr , ua = self . _get_user_agent ( ) ) alg = hashlib . md5 ( ) alg . update ( user_hash . encode ( 'utf8' ) ) return alg . hexdigest ( ) return None
Calculate a digest based on request s User - Agent and IP address .
1,838
def beautify ( filename = None , json_str = None ) : if filename is not None : with open ( filename ) as json_file : json_str = json . load ( json_file ) return json . dumps ( json_str , indent = 4 , sort_keys = True )
Beautify JSON string or file .
1,839
def replace ( pretty , old_str , new_str ) : out_str = '' line_number = 1 changes = 0 for line in pretty . splitlines ( keepends = True ) : new_line = line . replace ( old_str , new_str ) if line . find ( old_str ) != - 1 : logging . debug ( '%s' , line_number ) logging . debug ( '< %s' , line ) logging . debug ( '> %s' , new_line ) changes += 1 out_str += new_line line_number += 1 logging . info ( 'Total changes(%s): %s' , old_str , changes ) return out_str
Replace strings giving some info on where the replacement was done
1,840
def receive_connection ( ) : server = socket . socket ( socket . AF_INET , socket . SOCK_STREAM ) server . setsockopt ( socket . SOL_SOCKET , socket . SO_REUSEADDR , 1 ) server . bind ( ( "localhost" , 8080 ) ) server . listen ( 1 ) client = server . accept ( ) [ 0 ] server . close ( ) return client
Wait for and then return a connected socket ..
1,841
def send_message ( client , message ) : print ( message ) client . send ( "HTTP/1.1 200 OK\r\n\r\n{}" . format ( message ) . encode ( "utf-8" ) ) client . close ( )
Send message to client and close the connection .
1,842
def watch ( logger_name , level = DEBUG , out = stdout ) : watcher = Watcher ( logger_name ) watcher . watch ( level , out ) return watcher
Quick wrapper for using the Watcher .
1,843
def get_user_agent ( ) : from sys import platform , version_info template = "neobolt/{} Python/{}.{}.{}-{}-{} ({})" fields = ( version , ) + tuple ( version_info ) + ( platform , ) return template . format ( * fields )
Obtain the default user agent string sent to the server after a successful handshake .
1,844
def import_best ( c_module , py_module ) : from importlib import import_module from os import getenv pure_python = getenv ( "PURE_PYTHON" , "" ) if pure_python : return import_module ( py_module ) else : try : return import_module ( c_module ) except ImportError : return import_module ( py_module )
Import the best available module with C preferred to pure Python .
1,845
def hydrate ( self , values ) : def hydrate_ ( obj ) : if isinstance ( obj , Structure ) : try : f = self . hydration_functions [ obj . tag ] except KeyError : return obj else : return f ( * map ( hydrate_ , obj . fields ) ) elif isinstance ( obj , list ) : return list ( map ( hydrate_ , obj ) ) elif isinstance ( obj , dict ) : return { key : hydrate_ ( value ) for key , value in obj . items ( ) } else : return obj return tuple ( map ( hydrate_ , values ) )
Convert PackStream values into native values .
1,846
def authorize_url ( self , duration , scopes , state , implicit = False ) : if self . redirect_uri is None : raise InvalidInvocation ( "redirect URI not provided" ) if implicit and not isinstance ( self , UntrustedAuthenticator ) : raise InvalidInvocation ( "Only UntrustedAuthentictor instances can " "use the implicit grant flow." ) if implicit and duration != "temporary" : raise InvalidInvocation ( "The implicit grant flow only supports " "temporary access tokens." ) params = { "client_id" : self . client_id , "duration" : duration , "redirect_uri" : self . redirect_uri , "response_type" : "token" if implicit else "code" , "scope" : " " . join ( scopes ) , "state" : state , } url = self . _requestor . reddit_url + const . AUTHORIZATION_PATH request = Request ( "GET" , url , params = params ) return request . prepare ( ) . url
Return the URL used out - of - band to grant access to your application .
1,847
def revoke_token ( self , token , token_type = None ) : data = { "token" : token } if token_type is not None : data [ "token_type_hint" ] = token_type url = self . _requestor . reddit_url + const . REVOKE_TOKEN_PATH self . _post ( url , success_status = codes [ "no_content" ] , ** data )
Ask Reddit to revoke the provided token .
1,848
def authorize ( self , code ) : if self . _authenticator . redirect_uri is None : raise InvalidInvocation ( "redirect URI not provided" ) self . _request_token ( code = code , grant_type = "authorization_code" , redirect_uri = self . _authenticator . redirect_uri , )
Obtain and set authorization tokens based on code .
1,849
def refresh ( self ) : if self . refresh_token is None : raise InvalidInvocation ( "refresh token not provided" ) self . _request_token ( grant_type = "refresh_token" , refresh_token = self . refresh_token )
Obtain a new access token from the refresh_token .
1,850
def refresh ( self ) : grant_type = "https://oauth.reddit.com/grants/installed_client" self . _request_token ( grant_type = grant_type , device_id = self . _device_id )
Obtain a new access token .
1,851
def refresh ( self ) : self . _request_token ( grant_type = "password" , username = self . _username , password = self . _password , )
Obtain a new personal - use script type access token .
1,852
def request ( self , * args , ** kwargs ) : try : return self . _http . request ( * args , timeout = TIMEOUT , ** kwargs ) except Exception as exc : raise RequestException ( exc , args , kwargs )
Issue the HTTP request capturing any errors that may occur .
1,853
def _jamo_to_hangul_char ( lead , vowel , tail = 0 ) : lead = ord ( lead ) - _JAMO_LEAD_OFFSET vowel = ord ( vowel ) - _JAMO_VOWEL_OFFSET tail = ord ( tail ) - _JAMO_TAIL_OFFSET if tail else 0 return chr ( tail + ( vowel - 1 ) * 28 + ( lead - 1 ) * 588 + _JAMO_OFFSET )
Return the Hangul character for the given jamo characters .
1,854
def _get_unicode_name ( char ) : if char not in _JAMO_TO_NAME . keys ( ) and char not in _HCJ_TO_NAME . keys ( ) : raise InvalidJamoError ( "Not jamo or nameless jamo character" , char ) else : if is_hcj ( char ) : return _HCJ_TO_NAME [ char ] return _JAMO_TO_NAME [ char ]
Fetch the unicode name for jamo characters .
1,855
def is_jamo ( character ) : code = ord ( character ) return 0x1100 <= code <= 0x11FF or 0xA960 <= code <= 0xA97C or 0xD7B0 <= code <= 0xD7C6 or 0xD7CB <= code <= 0xD7FB or is_hcj ( character )
Test if a single character is a jamo character . Valid jamo includes all modern and archaic jamo as well as all HCJ . Non - assigned code points are invalid .
1,856
def is_jamo_compound ( character ) : if len ( character ) != 1 : return False if is_jamo ( character ) : return character in JAMO_COMPOUNDS return False
Test if a single character is a compound i . e . a consonant cluster double consonant or dipthong .
1,857
def get_jamo_class ( jamo ) : if jamo in JAMO_LEADS or jamo == chr ( 0x115F ) : return "lead" if jamo in JAMO_VOWELS or jamo == chr ( 0x1160 ) or 0x314F <= ord ( jamo ) <= 0x3163 : return "vowel" if jamo in JAMO_TAILS : return "tail" else : raise InvalidJamoError ( "Invalid or classless jamo argument." , jamo )
Determine if a jamo character is a lead vowel or tail . Integers and U + 11xx characters are valid arguments . HCJ consonants are not valid here .
1,858
def hangul_to_jamo ( hangul_string ) : return ( _ for _ in chain . from_iterable ( _hangul_char_to_jamo ( _ ) for _ in hangul_string ) )
Convert a string of Hangul to jamo . Arguments may be iterables of characters .
1,859
def jamo_to_hangul ( lead , vowel , tail = '' ) : lead = hcj_to_jamo ( lead , "lead" ) vowel = hcj_to_jamo ( vowel , "vowel" ) if not tail or ord ( tail ) == 0 : tail = None elif is_hcj ( tail ) : tail = hcj_to_jamo ( tail , "tail" ) if ( is_jamo ( lead ) and get_jamo_class ( lead ) == "lead" ) and ( is_jamo ( vowel ) and get_jamo_class ( vowel ) == "vowel" ) and ( ( not tail ) or ( is_jamo ( tail ) and get_jamo_class ( tail ) == "tail" ) ) : result = _jamo_to_hangul_char ( lead , vowel , tail ) if is_hangul_char ( result ) : return result raise InvalidJamoError ( "Could not synthesize characters to Hangul." , '\x00' )
Return the Hangul character for the given jamo input . Integers corresponding to U + 11xx jamo codepoints U + 11xx jamo characters or HCJ are valid inputs .
1,860
def compose_jamo ( * parts ) : for p in parts : if not ( type ( p ) == str and len ( p ) == 1 and 2 <= len ( parts ) <= 3 ) : raise TypeError ( "compose_jamo() expected 2-3 single characters " + "but received " + str ( parts ) , '\x00' ) hcparts = [ j2hcj ( _ ) for _ in parts ] hcparts = tuple ( hcparts ) if hcparts in _COMPONENTS_REVERSE_LOOKUP : return _COMPONENTS_REVERSE_LOOKUP [ hcparts ] raise InvalidJamoError ( "Could not synthesize characters to compound: " + ", " . join ( str ( _ ) + "(U+" + str ( hex ( ord ( _ ) ) ) [ 2 : ] + ")" for _ in parts ) , '\x00' )
Return the compound jamo for the given jamo input . Integers corresponding to U + 11xx jamo codepoints U + 11xx jamo characters or HCJ are valid inputs .
1,861
def synth_hangul ( string ) : raise NotImplementedError return '' . join ( [ '' . join ( '' . join ( jamo_to_hcj ( _ ) ) for _ in string ) ] )
Convert jamo characters in a string into hcj as much as possible .
1,862
def authorization_error_class ( response ) : message = response . headers . get ( "www-authenticate" ) if message : error = message . replace ( '"' , "" ) . rsplit ( "=" , 1 ) [ 1 ] else : error = response . status_code return _auth_error_mapping [ error ] ( response )
Return an exception instance that maps to the OAuth Error .
1,863
def _last_bookmark ( b0 , b1 ) : n = [ None , None ] _ , _ , n [ 0 ] = b0 . rpartition ( ":" ) _ , _ , n [ 1 ] = b1 . rpartition ( ":" ) for i in range ( 2 ) : try : n [ i ] = int ( n [ i ] ) except ValueError : raise ValueError ( "Invalid bookmark: {}" . format ( b0 ) ) return b0 if n [ 0 ] > n [ 1 ] else b1
Return the latest of two bookmarks by looking for the maximum integer value following the last colon in the bookmark string .
1,864
def connect ( address , ** config ) : ssl_context = make_ssl_context ( ** config ) last_error = None log_debug ( "[#0000] C: <RESOLVE> %s" , address ) resolver = Resolver ( custom_resolver = config . get ( "resolver" ) ) resolver . addresses . append ( address ) resolver . custom_resolve ( ) resolver . dns_resolve ( ) for resolved_address in resolver . addresses : try : s = _connect ( resolved_address , ** config ) s , der_encoded_server_certificate = _secure ( s , address [ 0 ] , ssl_context ) connection = _handshake ( s , resolved_address , der_encoded_server_certificate , ** config ) except Exception as error : last_error = error else : return connection if last_error is None : raise ServiceUnavailable ( "Failed to resolve addresses for %s" % address ) else : raise last_error
Connect and perform a handshake and return a valid Connection object assuming a protocol version can be agreed .
1,865
def _append ( self , signature , fields = ( ) , response = None ) : self . packer . pack_struct ( signature , fields ) self . output_buffer . chunk ( ) self . output_buffer . chunk ( ) self . responses . append ( response )
Add a message to the outgoing queue .
1,866
def reset ( self ) : def fail ( metadata ) : raise ProtocolError ( "RESET failed %r" % metadata ) log_debug ( "[#%04X] C: RESET" , self . local_port ) self . _append ( b"\x0F" , response = Response ( self , on_failure = fail ) ) self . sync ( )
Add a RESET message to the outgoing queue send it and consume all remaining messages .
1,867
def _send ( self ) : data = self . output_buffer . view ( ) if not data : return if self . closed ( ) : raise self . Error ( "Failed to write to closed connection {!r}" . format ( self . server . address ) ) if self . defunct ( ) : raise self . Error ( "Failed to write to defunct connection {!r}" . format ( self . server . address ) ) self . socket . sendall ( data ) self . output_buffer . clear ( )
Send all queued messages to the server .
1,868
def _fetch ( self ) : if self . closed ( ) : raise self . Error ( "Failed to read from closed connection {!r}" . format ( self . server . address ) ) if self . defunct ( ) : raise self . Error ( "Failed to read from defunct connection {!r}" . format ( self . server . address ) ) if not self . responses : return 0 , 0 self . _receive ( ) details , summary_signature , summary_metadata = self . _unpack ( ) if details : log_debug ( "[#%04X] S: RECORD * %d" , self . local_port , len ( details ) ) self . responses [ 0 ] . on_records ( details ) if summary_signature is None : return len ( details ) , 0 response = self . responses . popleft ( ) response . complete = True if summary_signature == b"\x70" : log_debug ( "[#%04X] S: SUCCESS %r" , self . local_port , summary_metadata ) response . on_success ( summary_metadata or { } ) elif summary_signature == b"\x7E" : self . _last_run_statement = None log_debug ( "[#%04X] S: IGNORED" , self . local_port ) response . on_ignored ( summary_metadata or { } ) elif summary_signature == b"\x7F" : self . _last_run_statement = None log_debug ( "[#%04X] S: FAILURE %r" , self . local_port , summary_metadata ) response . on_failure ( summary_metadata or { } ) else : self . _last_run_statement = None raise ProtocolError ( "Unexpected response message with signature %02X" % summary_signature ) return len ( details ) , 1
Receive at least one message from the server if available .
1,869
def sync ( self ) : self . send ( ) detail_count = summary_count = 0 while self . responses : response = self . responses [ 0 ] while not response . complete : detail_delta , summary_delta = self . fetch ( ) detail_count += detail_delta summary_count += summary_delta return detail_count , summary_count
Send and fetch all outstanding messages .
1,870
def acquire_direct ( self , address ) : if self . closed ( ) : raise ServiceUnavailable ( "Connection pool closed" ) with self . lock : try : connections = self . connections [ address ] except KeyError : connections = self . connections [ address ] = deque ( ) connection_acquisition_start_timestamp = perf_counter ( ) while True : for connection in list ( connections ) : if connection . closed ( ) or connection . defunct ( ) or connection . timedout ( ) : connections . remove ( connection ) continue if not connection . in_use : connection . in_use = True return connection infinite_connection_pool = ( self . _max_connection_pool_size < 0 or self . _max_connection_pool_size == float ( "inf" ) ) can_create_new_connection = infinite_connection_pool or len ( connections ) < self . _max_connection_pool_size if can_create_new_connection : try : connection = self . connector ( address ) except ServiceUnavailable : self . remove ( address ) raise else : connection . pool = self connection . in_use = True connections . append ( connection ) return connection span_timeout = self . _connection_acquisition_timeout - ( perf_counter ( ) - connection_acquisition_start_timestamp ) if span_timeout > 0 : self . cond . wait ( span_timeout ) if self . _connection_acquisition_timeout <= ( perf_counter ( ) - connection_acquisition_start_timestamp ) : raise ClientError ( "Failed to obtain a connection from pool within {!r}s" . format ( self . _connection_acquisition_timeout ) ) else : raise ClientError ( "Failed to obtain a connection from pool within {!r}s" . format ( self . _connection_acquisition_timeout ) )
Acquire a connection to a given address from the pool . The address supplied should always be an IP address not a host name .
1,871
def release ( self , connection ) : with self . lock : connection . in_use = False self . cond . notify_all ( )
Release a connection back into the pool . This method is thread safe .
1,872
def in_use_connection_count ( self , address ) : try : connections = self . connections [ address ] except KeyError : return 0 else : return sum ( 1 if connection . in_use else 0 for connection in connections )
Count the number of connections currently in use to a given address .
1,873
def deactivate ( self , address ) : with self . lock : try : connections = self . connections [ address ] except KeyError : return for conn in list ( connections ) : if not conn . in_use : connections . remove ( conn ) try : conn . close ( ) except IOError : pass if not connections : self . remove ( address )
Deactivate an address from the connection pool if present closing all idle connection to that address
1,874
def remove ( self , address ) : with self . lock : for connection in self . connections . pop ( address , ( ) ) : try : connection . close ( ) except IOError : pass
Remove an address from the connection pool if present closing all connections to that address .
1,875
def close ( self ) : if self . _closed : return try : with self . lock : if not self . _closed : self . _closed = True for address in list ( self . connections ) : self . remove ( address ) except TypeError as e : pass
Close all connections and empty the pool . This method is thread safe .
1,876
def on_records ( self , records ) : handler = self . handlers . get ( "on_records" ) if callable ( handler ) : handler ( records )
Called when one or more RECORD messages have been received .
1,877
def on_success ( self , metadata ) : handler = self . handlers . get ( "on_success" ) if callable ( handler ) : handler ( metadata ) handler = self . handlers . get ( "on_summary" ) if callable ( handler ) : handler ( )
Called when a SUCCESS message has been received .
1,878
def on_failure ( self , metadata ) : self . connection . reset ( ) handler = self . handlers . get ( "on_failure" ) if callable ( handler ) : handler ( metadata ) handler = self . handlers . get ( "on_summary" ) if callable ( handler ) : handler ( ) raise CypherError . hydrate ( ** metadata )
Called when a FAILURE message has been received .
1,879
def on_ignored ( self , metadata = None ) : handler = self . handlers . get ( "on_ignored" ) if callable ( handler ) : handler ( metadata ) handler = self . handlers . get ( "on_summary" ) if callable ( handler ) : handler ( )
Called when an IGNORED message has been received .
1,880
def cached_property ( prop ) : def cache_wrapper ( self ) : if not hasattr ( self , "_cache" ) : self . _cache = { } if prop . __name__ not in self . _cache : return_value = prop ( self ) if isgenerator ( return_value ) : return_value = tuple ( return_value ) self . _cache [ prop . __name__ ] = return_value return self . _cache [ prop . __name__ ] return property ( cache_wrapper )
A replacement for the property decorator that will only compute the attribute s value on the first call and serve a cached copy from then on .
1,881
def _convert_value_to_native ( value ) : if isinstance ( value , Counter32 ) : return int ( value . prettyPrint ( ) ) if isinstance ( value , Counter64 ) : return int ( value . prettyPrint ( ) ) if isinstance ( value , Gauge32 ) : return int ( value . prettyPrint ( ) ) if isinstance ( value , Integer ) : return int ( value . prettyPrint ( ) ) if isinstance ( value , Integer32 ) : return int ( value . prettyPrint ( ) ) if isinstance ( value , Unsigned32 ) : return int ( value . prettyPrint ( ) ) if isinstance ( value , IpAddress ) : return str ( value . prettyPrint ( ) ) if isinstance ( value , OctetString ) : try : return value . asOctets ( ) . decode ( value . encoding ) except UnicodeDecodeError : return value . asOctets ( ) if isinstance ( value , TimeTicks ) : return timedelta ( seconds = int ( value . prettyPrint ( ) ) / 100.0 ) return value
Converts pysnmp objects into native Python objects .
1,882
def get ( self , oid ) : snmpsecurity = self . _get_snmp_security ( ) try : engine_error , pdu_error , pdu_error_index , objects = self . _cmdgen . getCmd ( snmpsecurity , cmdgen . UdpTransportTarget ( ( self . host , self . port ) , timeout = self . timeout , retries = self . retries ) , oid , ) except Exception as e : raise SNMPError ( e ) if engine_error : raise SNMPError ( engine_error ) if pdu_error : raise SNMPError ( pdu_error . prettyPrint ( ) ) _ , value = objects [ 0 ] value = _convert_value_to_native ( value ) return value
Get a single OID value .
1,883
def table ( self , oid , columns = None , column_value_mapping = None , non_repeaters = 0 , max_repetitions = 20 , fetch_all_columns = True ) : snmpsecurity = self . _get_snmp_security ( ) base_oid = oid . strip ( "." ) if not fetch_all_columns and not columns : raise ValueError ( "please use the columns argument to " "indicate which columns to fetch" ) if fetch_all_columns : columns_to_fetch = [ "" ] else : columns_to_fetch = [ "." + str ( col_id ) for col_id in columns . keys ( ) ] full_obj_table = [ ] for col in columns_to_fetch : try : engine_error , pdu_error , pdu_error_index , obj_table = self . _cmdgen . bulkCmd ( snmpsecurity , cmdgen . UdpTransportTarget ( ( self . host , self . port ) , timeout = self . timeout , retries = self . retries ) , non_repeaters , max_repetitions , oid + col , ) except Exception as e : raise SNMPError ( e ) if engine_error : raise SNMPError ( engine_error ) if pdu_error : raise SNMPError ( pdu_error . prettyPrint ( ) ) try : while not str ( obj_table [ - 1 ] [ 0 ] [ 0 ] . getOid ( ) ) . lstrip ( "." ) . startswith ( base_oid + col + "." ) : obj_table . pop ( ) except IndexError : pass full_obj_table += obj_table t = Table ( columns = columns , column_value_mapping = column_value_mapping ) for row in full_obj_table : for name , value in row : oid = str ( name . getOid ( ) ) . strip ( "." ) value = _convert_value_to_native ( value ) column , row_id = oid [ len ( base_oid ) + 1 : ] . split ( "." , 1 ) t . _add_value ( int ( column ) , row_id , value ) return t
Get a table of values with the given OID prefix .
1,884
def get_parser ( ) : desc = Colors . LIGHTBLUE + textwrap . dedent ( ) + Colors . ENDC usage_info = Colors . LGREEN + textwrap . dedent ( ) + Colors . ENDC epi = Colors . LIGHTPURPLE + textwrap . dedent ( ) + Colors . ENDC parser = argparse . ArgumentParser ( add_help = True , formatter_class = argparse . RawTextHelpFormatter , usage = usage_info , description = desc , epilog = epi ) parser . add_argument ( 'mode' , action = 'store' , choices = range ( len ( MODES ) ) , type = int , help = 'Select mode of file download.\n' ' e.g: 0(rated) or 1(list).' ) parser . add_argument ( 'torr_page' , action = 'store' , choices = range ( len ( TORRENTS ) ) , type = int , help = 'Select tracking page to download from.\n' ' e.g: 0 to .. ' + str ( len ( TORRENTS ) - 1 ) + '.' ) parser . add_argument ( 'str_search' , action = 'store' , type = str , help = 'Input torrent string to search.\n' ' e.g: "String search"' ) return ( parser )
Load parser for command line arguments .
1,885
def insert ( args ) : string_search = args . str_search mode_search = MODES [ args . mode ] page = list ( TORRENTS [ args . torr_page ] . keys ( ) ) [ 0 ] key_search = TORRENTS [ args . torr_page ] [ page ] [ 'key_search' ] torrent_page = TORRENTS [ args . torr_page ] [ page ] [ 'page' ] domain = TORRENTS [ args . torr_page ] [ page ] [ 'domain' ] return ( [ args , string_search , mode_search , page , key_search , torrent_page , domain ] )
Insert args values into instance variables .
1,886
def run_it ( ) : initialize ( ) parser = get_parser ( ) args = None first_parse = True while ( True ) : if first_parse is True : first_parse = False args = parser . parse_args ( ) else : print ( textwrap . dedent ( ) ) print ( 'Or.. if you want to exit just write "' + Colors . LRED + 'Q' + Colors . ENDC + '" or "' + Colors . LRED + 'q' + Colors . ENDC + '".' ) input_parse = input ( '>> ' ) . replace ( "'" , "" ) . replace ( '"' , '' ) if input_parse in [ 'Q' , 'q' ] : sys . exit ( 1 ) args = parser . parse_args ( input_parse . split ( ' ' , 2 ) ) if args . str_search . strip ( ) == "" : print ( 'Please insert an appropiate non-empty string.' ) else : auto = AutoPy ( * insert ( args ) ) auto . get_content ( ) auto . select_torrent ( ) auto . download_torrent ( )
Search and download torrents until the user says it so .
1,887
def open_magnet ( self ) : if sys . platform . startswith ( 'linux' ) : subprocess . Popen ( [ 'xdg-open' , self . magnet ] , stdout = subprocess . PIPE , stderr = subprocess . PIPE ) elif sys . platform . startswith ( 'win32' ) : os . startfile ( self . magnet ) elif sys . platform . startswith ( 'cygwin' ) : os . startfile ( self . magnet ) elif sys . platform . startswith ( 'darwin' ) : subprocess . Popen ( [ 'open' , self . magnet ] , stdout = subprocess . PIPE , stderr = subprocess . PIPE ) else : subprocess . Popen ( [ 'xdg-open' , self . magnet ] , stdout = subprocess . PIPE , stderr = subprocess . PIPE )
Open magnet according to os .
1,888
def get_magnet ( self , url ) : content_most_rated = requests . get ( url ) rated_soup = BeautifulSoup ( content_most_rated . content , 'lxml' ) if self . page == 'torrent_project' : self . magnet = rated_soup . find ( 'a' , href = True , text = re . compile ( 'Download' ) ) [ 'href' ] elif self . page == 'the_pirate_bay' : self . magnet = rated_soup . find ( 'a' , href = True , text = re . compile ( 'Get this torrent' ) ) [ 'href' ] elif self . page == '1337x' : div1337 = rated_soup . find ( 'div' , { 'class' : 'torrent-category-detail' } ) self . magnet = div1337 . find ( 'a' , href = re . compile ( 'magnet' ) ) [ 'href' ] elif self . page == 'isohunt' : self . magnet = rated_soup . find ( 'a' , href = re . compile ( 'magnet' ) ) [ 'href' ] else : print ( 'Wrong page to get magnet!' ) sys . exit ( 1 )
Get magnet from torrent page . Url already got domain .
1,889
def download_torrent ( self ) : try : if self . back_to_menu is True : return if self . found_torrents is False : print ( 'Nothing found.' ) return if self . mode_search == 'best_rated' : print ( 'Downloading..' ) self . open_magnet ( ) elif self . mode_search == 'list' : if self . selected is not None : if self . page in [ 'eztv' , 'limetorrents' ] : self . magnet = self . hrefs [ int ( self . selected ) ] print ( 'Downloading..' ) self . open_magnet ( ) elif self . page in [ 'the_pirate_bay' , 'torrent_project' , '1337x' , 'isohunt' ] : url = self . hrefs [ int ( self . selected ) ] self . get_magnet ( url ) print ( 'Downloading..' ) self . open_magnet ( ) else : print ( 'Bad selected page.' ) else : print ( 'Nothing selected.' ) sys . exit ( 1 ) except Exception : print ( traceback . format_exc ( ) ) sys . exit ( 0 )
Download torrent .
1,890
def handle_select ( self ) : self . selected = input ( '>> ' ) if self . selected in [ 'Q' , 'q' ] : sys . exit ( 1 ) elif self . selected in [ 'B' , 'b' ] : self . back_to_menu = True return True elif is_num ( self . selected ) : if 0 <= int ( self . selected ) <= len ( self . hrefs ) - 1 : self . back_to_menu = False return True else : print ( Colors . FAIL + 'Wrong index. ' + 'Please select an appropiate one or other option.' + Colors . ENDC ) return False else : print ( Colors . FAIL + 'Invalid input. ' + 'Please select an appropiate one or other option.' + Colors . ENDC ) return False
Handle user s input in list mode .
1,891
def select_torrent ( self ) : try : self . found_torrents = not bool ( self . key_search in self . content_page . text ) if not self . found_torrents : print ( 'No torrents found.' ) sys . exit ( 1 ) self . soupify ( ) if self . mode_search == 'list' : self . build_table ( ) if len ( self . hrefs ) == 1 : print ( 'Press "0" to download it.' ) elif len ( self . hrefs ) >= 2 : print ( '\nSelect one of the following torrents. ' + 'Enter a number between: 0 and ' + str ( len ( self . hrefs ) - 1 ) ) print ( 'If you want to exit write "' + Colors . LRED + 'Q' + Colors . ENDC + '" or "' + Colors . LRED + 'q' + Colors . ENDC + '".' ) print ( 'If you want to go back to menu and search again write "' + Colors . LGREEN + 'B' + Colors . ENDC + '" or "' + Colors . LGREEN + 'b' + Colors . ENDC + '".' ) while not ( self . picked_choice ) : self . picked_choice = self . handle_select ( ) except Exception : print ( 'ERROR select_torrent: ' ) logging . error ( traceback . format_exc ( ) ) sys . exit ( 0 )
Select torrent .
1,892
def build_url ( self ) : url = requests . utils . requote_uri ( self . torrent_page + self . string_search ) if self . page == '1337x' : return ( url + '/1/' ) elif self . page == 'limetorrents' : return ( url + '/' ) else : return ( url )
Build appropiate encoded URL .
1,893
def get_content ( self ) : url = self . build_url ( ) try : self . content_page = requests . get ( url ) if not ( self . content_page . status_code == requests . codes . ok ) : self . content_page . raise_for_status ( ) except requests . exceptions . RequestException as ex : logging . info ( 'A requests exception has ocurred: ' + str ( ex ) ) logging . error ( traceback . format_exc ( ) ) sys . exit ( 0 )
Get content of the page through url .
1,894
def _recycle ( self ) : origin = self . _origin if origin == 0 : return False available = self . _extent - origin self . _data [ : available ] = self . _data [ origin : self . _extent ] self . _extent = available self . _origin = 0 return True
Reclaim buffer space before the origin .
1,895
def frame_message ( self ) : if self . _frame is not None : self . discard_message ( ) panes = [ ] p = origin = self . _origin extent = self . _extent while p < extent : available = extent - p if available < 2 : break chunk_size , = struct_unpack ( ">H" , self . _view [ p : ( p + 2 ) ] ) p += 2 if chunk_size == 0 : self . _limit = p self . _frame = MessageFrame ( memoryview ( self . _view [ origin : self . _limit ] ) , panes ) return True q = p + chunk_size panes . append ( ( p - origin , q - origin ) ) p = q return False
Construct a frame around the first complete message in the buffer .
1,896
def call ( self , request_function , set_header_callback , * args , ** kwargs ) : self . delay ( ) kwargs [ "headers" ] = set_header_callback ( ) response = request_function ( * args , ** kwargs ) self . update ( response . headers ) return response
Rate limit the call to request_function .
1,897
def delay ( self ) : if self . next_request_timestamp is None : return sleep_seconds = self . next_request_timestamp - time . time ( ) if sleep_seconds <= 0 : return message = "Sleeping: {:0.2f} seconds prior to" " call" . format ( sleep_seconds ) log . debug ( message ) time . sleep ( sleep_seconds )
Sleep for an amount of time to remain under the rate limit .
1,898
def update ( self , response_headers ) : if "x-ratelimit-remaining" not in response_headers : if self . remaining is not None : self . remaining -= 1 self . used += 1 return now = time . time ( ) prev_remaining = self . remaining seconds_to_reset = int ( response_headers [ "x-ratelimit-reset" ] ) self . remaining = float ( response_headers [ "x-ratelimit-remaining" ] ) self . used = int ( response_headers [ "x-ratelimit-used" ] ) self . reset_timestamp = now + seconds_to_reset if self . remaining <= 0 : self . next_request_timestamp = self . reset_timestamp return if prev_remaining is not None and prev_remaining > self . remaining : estimated_clients = prev_remaining - self . remaining else : estimated_clients = 1.0 self . next_request_timestamp = min ( self . reset_timestamp , now + ( estimated_clients * seconds_to_reset / self . remaining ) , )
Update the state of the rate limiter based on the response headers .
1,899
def custom_resolve ( self ) : if not callable ( self . custom_resolver ) : return new_addresses = [ ] for address in self . addresses : for new_address in self . custom_resolver ( address ) : new_addresses . append ( new_address ) self . addresses = new_addresses
If a custom resolver is defined perform custom resolution on the contained addresses .