idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
2,100
def _send_merge_commands ( self , config , file_config ) : if self . loaded is False : if self . _save_backup ( ) is False : raise MergeConfigException ( 'Error while storing backup ' 'config.' ) if self . ssh_connection is False : self . _open_ssh ( ) if file_config : if isinstance ( config , str ) : config = config . splitlines ( ) else : if isinstance ( config , str ) : config = str ( config ) . split ( ) self . ssh_device . send_config_set ( config ) self . loaded = True self . merge_config = True
Netmiko is being used to push set commands .
2,101
def compare_config ( self ) : if self . ssh_connection is False : self . _open_ssh ( ) self . ssh_device . exit_config_mode ( ) diff = self . ssh_device . send_command ( "show config diff" ) return diff . strip ( )
Netmiko is being used to obtain config diffs because pan - python doesn t support the needed command .
2,102
def commit_config ( self ) : if self . loaded : if self . ssh_connection is False : self . _open_ssh ( ) try : self . ssh_device . commit ( ) time . sleep ( 3 ) self . loaded = False self . changed = True except : if self . merge_config : raise MergeConfigException ( 'Error while commiting config' ) else : raise ReplaceConfigException ( 'Error while commiting config' ) else : raise ReplaceConfigException ( 'No config loaded.' )
Netmiko is being used to commit the configuration because it takes a better care of results compared to pan - python .
2,103
def rollback ( self ) : if self . changed : rollback_cmd = '<load><config><from>{0}</from></config></load>' . format ( self . backup_file ) self . device . op ( cmd = rollback_cmd ) time . sleep ( 5 ) if self . ssh_connection is False : self . _open_ssh ( ) try : self . ssh_device . commit ( ) self . loaded = False self . changed = False self . merge_config = False except : ReplaceConfigException ( "Error while loading backup config" )
Netmiko is being used to commit the rollback configuration because it takes a better care of results compared to pan - python .
2,104
def get_interfaces_ip ( self ) : def extract_ip_info ( parsed_intf_dict ) : intf = parsed_intf_dict [ 'name' ] _ip_info = { intf : { } } v4_ip = parsed_intf_dict . get ( 'ip' ) secondary_v4_ip = parsed_intf_dict . get ( 'addr' ) v6_ip = parsed_intf_dict . get ( 'addr6' ) if v4_ip != 'N/A' : address , pref = v4_ip . split ( '/' ) _ip_info [ intf ] . setdefault ( 'ipv4' , { } ) [ address ] = { 'prefix_length' : int ( pref ) } if secondary_v4_ip is not None : members = secondary_v4_ip [ 'member' ] if not isinstance ( members , list ) : members = [ members ] for address in members : address , pref = address . split ( '/' ) _ip_info [ intf ] . setdefault ( 'ipv4' , { } ) [ address ] = { 'prefix_length' : int ( pref ) } if v6_ip is not None : members = v6_ip [ 'member' ] if not isinstance ( members , list ) : members = [ members ] for address in members : address , pref = address . split ( '/' ) _ip_info [ intf ] . setdefault ( 'ipv6' , { } ) [ address ] = { 'prefix_length' : int ( pref ) } if _ip_info == { intf : { } } : _ip_info = { } return _ip_info ip_interfaces = { } cmd = "<show><interface>all</interface></show>" self . device . op ( cmd = cmd ) interface_info_xml = xmltodict . parse ( self . device . xml_root ( ) ) interface_info_json = json . dumps ( interface_info_xml [ 'response' ] [ 'result' ] [ 'ifnet' ] [ 'entry' ] ) interface_info = json . loads ( interface_info_json ) if isinstance ( interface_info , dict ) : interface_info = [ interface_info ] for interface_dict in interface_info : ip_info = extract_ip_info ( interface_dict ) if ip_info : ip_interfaces . update ( ip_info ) return ip_interfaces
Return IP interface data .
2,105
def refine ( self ) : newCSG = CSG ( ) for poly in self . polygons : verts = poly . vertices numVerts = len ( verts ) if numVerts == 0 : continue midPos = reduce ( operator . add , [ v . pos for v in verts ] ) / float ( numVerts ) midNormal = None if verts [ 0 ] . normal is not None : midNormal = poly . plane . normal midVert = Vertex ( midPos , midNormal ) newVerts = verts + [ verts [ i ] . interpolate ( verts [ ( i + 1 ) % numVerts ] , 0.5 ) for i in range ( numVerts ) ] + [ midVert ] i = 0 vs = [ newVerts [ i ] , newVerts [ i + numVerts ] , newVerts [ 2 * numVerts ] , newVerts [ 2 * numVerts - 1 ] ] newPoly = Polygon ( vs , poly . shared ) newPoly . shared = poly . shared newPoly . plane = poly . plane newCSG . polygons . append ( newPoly ) for i in range ( 1 , numVerts ) : vs = [ newVerts [ i ] , newVerts [ numVerts + i ] , newVerts [ 2 * numVerts ] , newVerts [ numVerts + i - 1 ] ] newPoly = Polygon ( vs , poly . shared ) newCSG . polygons . append ( newPoly ) return newCSG
Return a refined CSG . To each polygon a middle point is added to each edge and to the center of the polygon
2,106
def saveVTK ( self , filename ) : with open ( filename , 'w' ) as f : f . write ( '# vtk DataFile Version 3.0\n' ) f . write ( 'pycsg output\n' ) f . write ( 'ASCII\n' ) f . write ( 'DATASET POLYDATA\n' ) verts , cells , count = self . toVerticesAndPolygons ( ) f . write ( 'POINTS {0} float\n' . format ( len ( verts ) ) ) for v in verts : f . write ( '{0} {1} {2}\n' . format ( v [ 0 ] , v [ 1 ] , v [ 2 ] ) ) numCells = len ( cells ) f . write ( 'POLYGONS {0} {1}\n' . format ( numCells , count + numCells ) ) for cell in cells : f . write ( '{0} ' . format ( len ( cell ) ) ) for index in cell : f . write ( '{0} ' . format ( index ) ) f . write ( '\n' )
Save polygons in VTK file .
2,107
def inverse ( self ) : csg = self . clone ( ) map ( lambda p : p . flip ( ) , csg . polygons ) return csg
Return a new CSG solid with solid and empty space switched . This solid is not modified .
2,108
def load ( filename , * , gzipped = None , byteorder = 'big' ) : if gzipped is not None : return File . load ( filename , gzipped , byteorder ) with open ( filename , 'rb' ) as buff : magic_number = buff . read ( 2 ) buff . seek ( 0 ) if magic_number == b'\x1f\x8b' : buff = gzip . GzipFile ( fileobj = buff ) return File . from_buffer ( buff , byteorder )
Load the nbt file at the specified location . By default the function will figure out by itself if the file is gzipped before loading it . You can pass a boolean to the gzipped keyword only argument to specify explicitly whether the file is compressed or not . You can also use the byteorder keyword only argument to specify whether the file is little - endian or big - endian .
2,109
def from_buffer ( cls , buff , byteorder = 'big' ) : self = cls . parse ( buff , byteorder ) self . filename = getattr ( buff , 'name' , self . filename ) self . gzipped = isinstance ( buff , gzip . GzipFile ) self . byteorder = byteorder return self
Load nbt file from a file - like object . The buff argument can be either a standard io . BufferedReader for uncompressed nbt or a gzip . GzipFile for gzipped nbt data .
2,110
def load ( cls , filename , gzipped , byteorder = 'big' ) : open_file = gzip . open if gzipped else open with open_file ( filename , 'rb' ) as buff : return cls . from_buffer ( buff , byteorder )
Read parse and return the file at the specified location . The gzipped argument is used to indicate if the specified file is gzipped . The byteorder argument lets you specify whether the file is big - endian or little - endian .
2,111
def save ( self , filename = None , * , gzipped = None , byteorder = None ) : if gzipped is None : gzipped = self . gzipped if filename is None : filename = self . filename if filename is None : raise ValueError ( 'No filename specified' ) open_file = gzip . open if gzipped else open with open_file ( filename , 'wb' ) as buff : self . write ( buff , byteorder or self . byteorder )
Write the file at the specified location . The gzipped keyword only argument indicates if the file should be gzipped . The byteorder keyword only argument lets you specify whether the file should be big - endian or little - endian . If the method is called without any argument it will default to the instance attributes and use the file s filename gzipped and byteorder attributes . Calling the method without a filename will raise a ValueError if the filename of the file is None .
2,112
def return_collection ( collection_type ) : def outer_func ( func ) : @ functools . wraps ( func ) def inner_func ( self , * pargs , ** kwargs ) : result = func ( self , * pargs , ** kwargs ) return list ( map ( collection_type , result ) ) return inner_func return outer_func
Change method return value from raw API output to collection of models
2,113
def post_save_stop ( sender , instance , ** kwargs ) : from multigtfs . models . trip import Trip trip_ids = instance . stoptime_set . filter ( trip__shape = None ) . values_list ( 'trip_id' , flat = True ) . distinct ( ) for trip in Trip . objects . filter ( id__in = trip_ids ) : trip . update_geometry ( )
Update related objects when the Stop is updated
2,114
def _do_post_request_tasks ( self , response_data ) : try : sess_ops = response_data . get ( 'ops' , [ ] ) except AttributeError : pass else : self . _session_ops . extend ( sess_ops )
Handle actions that need to be done with every response
2,115
def _build_request ( self , method , url , params = None ) : full_params = self . _get_base_params ( ) if params is not None : full_params . update ( params ) try : request_func = lambda u , d : getattr ( self . _connector , method . lower ( ) ) ( u , params = d , headers = self . _request_headers ) except AttributeError : raise ApiException ( 'Invalid request method' ) def do_request ( ) : logger . debug ( 'Sending %s request "%s" with params: %r' , method , url , full_params ) try : resp = request_func ( url , full_params ) logger . debug ( 'Received response code: %d' , resp . status_code ) except requests . RequestException as err : raise ApiNetworkException ( err ) try : resp_json = resp . json ( ) except TypeError : resp_json = resp . json method_returns_list = False try : resp_json [ 'error' ] except TypeError : logger . warn ( 'Api method did not return map: %s' , method ) method_returns_list = True except KeyError : logger . warn ( 'Api method did not return map with error key: %s' , method ) if method_returns_list is None : raise ApiBadResponseException ( resp . content ) elif method_returns_list : data = resp_json else : try : if resp_json [ 'error' ] : raise ApiError ( '%s: %s' % ( resp_json [ 'code' ] , resp_json [ 'message' ] ) ) except KeyError : data = resp_json else : data = resp_json [ 'data' ] self . _do_post_request_tasks ( data ) self . _last_response = resp return data return do_request
Build a function to do an API request
2,116
def from_db_value ( self , value , expression , connection , context ) : if value is None : return value return self . parse_seconds ( value )
Handle data loaded from database .
2,117
def parse_seconds ( value ) : svalue = str ( value ) colons = svalue . count ( ':' ) if colons == 2 : hours , minutes , seconds = [ int ( v ) for v in svalue . split ( ':' ) ] elif colons == 1 : hours , minutes = [ int ( v ) for v in svalue . split ( ':' ) ] seconds = 0 elif colons == 0 : hours = 0 minutes = 0 seconds = int ( svalue ) else : raise ValueError ( 'Must be in seconds or HH:MM:SS format' ) return Seconds . from_hms ( hours , minutes , seconds )
Parse string into Seconds instances .
2,118
def get_prep_value ( self , value ) : if isinstance ( value , Seconds ) : return value . seconds elif value : return self . parse_seconds ( value ) . seconds else : return None
Prepare value for database storage .
2,119
def calculate_colorbar ( self ) : self . _base . _process_values ( ) self . _base . _find_range ( ) X , Y = self . _base . _mesh ( ) C = self . _base . _values [ : , np . newaxis ] return X , Y , C
Returns the positions and colors of all intervals inside the colorbar .
2,120
def get_media_formats ( self , media_id ) : url = ( SCRAPER . API_URL + 'media-' + media_id ) . format ( protocol = SCRAPER . PROTOCOL_INSECURE ) format_pattern = re . compile ( SCRAPER . VIDEO . FORMAT_PATTERN ) formats = { } for format , param in iteritems ( SCRAPER . VIDEO . FORMAT_PARAMS ) : resp = self . _connector . get ( url , params = { param : '1' } ) if not resp . ok : continue try : match = format_pattern . search ( resp . content ) except TypeError : match = format_pattern . search ( resp . text ) if match : formats [ format ] = ( int ( match . group ( 1 ) ) , int ( match . group ( 2 ) ) ) return formats
CR doesn t seem to provide the video_format and video_quality params through any of the APIs so we have to scrape the video page
2,121
def parse_nbt ( literal ) : parser = Parser ( tokenize ( literal ) ) tag = parser . parse ( ) cursor = parser . token_span [ 1 ] leftover = literal [ cursor : ] if leftover . strip ( ) : parser . token_span = cursor , cursor + len ( leftover ) raise parser . error ( f'Expected end of string but got {leftover!r}' ) return tag
Parse a literal nbt string and return the resulting tag .
2,122
def tokenize ( string ) : for match in TOKENS_REGEX . finditer ( string ) : yield Token ( match . lastgroup , match . group ( ) . strip ( ) , match . span ( ) )
Match and yield all the tokens of the input string .
2,123
def next ( self ) : self . current_token = next ( self . token_stream , None ) if self . current_token is None : self . token_span = self . token_span [ 1 ] , self . token_span [ 1 ] raise self . error ( 'Unexpected end of input' ) self . token_span = self . current_token . span return self
Move to the next token in the token stream .
2,124
def parse ( self ) : token_type = self . current_token . type . lower ( ) handler = getattr ( self , f'parse_{token_type}' , None ) if handler is None : raise self . error ( f'Invalid literal {self.current_token.value!r}' ) return handler ( )
Parse and return an nbt literal from the token stream .
2,125
def parse_number ( self ) : value = self . current_token . value suffix = value [ - 1 ] . lower ( ) try : if suffix in NUMBER_SUFFIXES : return NUMBER_SUFFIXES [ suffix ] ( value [ : - 1 ] ) return Double ( value ) if '.' in value else Int ( value ) except ( OutOfRange , ValueError ) : return String ( value )
Parse a number from the token stream .
2,126
def parse_string ( self ) : aliased_value = LITERAL_ALIASES . get ( self . current_token . value . lower ( ) ) if aliased_value is not None : return aliased_value return String ( self . current_token . value )
Parse a regular unquoted string from the token stream .
2,127
def collect_tokens_until ( self , token_type ) : self . next ( ) if self . current_token . type == token_type : return while True : yield self . current_token self . next ( ) if self . current_token . type == token_type : return if self . current_token . type != 'COMMA' : raise self . error ( f'Expected comma but got ' f'{self.current_token.value!r}' ) self . next ( )
Yield the item tokens in a comma - separated tag collection .
2,128
def parse_compound ( self ) : compound_tag = Compound ( ) for token in self . collect_tokens_until ( 'CLOSE_COMPOUND' ) : item_key = token . value if token . type not in ( 'NUMBER' , 'STRING' , 'QUOTED_STRING' ) : raise self . error ( f'Expected compound key but got {item_key!r}' ) if token . type == 'QUOTED_STRING' : item_key = self . unquote_string ( item_key ) if self . next ( ) . current_token . type != 'COLON' : raise self . error ( f'Expected colon but got ' f'{self.current_token.value!r}' ) self . next ( ) compound_tag [ item_key ] = self . parse ( ) return compound_tag
Parse a compound from the token stream .
2,129
def array_items ( self , number_type , * , number_suffix = '' ) : for token in self . collect_tokens_until ( 'CLOSE_BRACKET' ) : is_number = token . type == 'NUMBER' value = token . value . lower ( ) if not ( is_number and value . endswith ( number_suffix ) ) : raise self . error ( f'Invalid {number_type} array element ' f'{token.value!r}' ) yield int ( value . replace ( number_suffix , '' ) )
Parse and yield array items from the token stream .
2,130
def parse_list ( self ) : try : return List ( [ self . parse ( ) for _ in self . collect_tokens_until ( 'CLOSE_BRACKET' ) ] ) except IncompatibleItemType as exc : raise self . error ( f'Item {str(exc.item)!r} is not a ' f'{exc.subtype.__name__} tag' ) from None
Parse a list from the token stream .
2,131
def unquote_string ( self , string ) : value = string [ 1 : - 1 ] forbidden_sequences = { ESCAPE_SUBS [ STRING_QUOTES [ string [ 0 ] ] ] } valid_sequences = set ( ESCAPE_SEQUENCES ) - forbidden_sequences for seq in ESCAPE_REGEX . findall ( value ) : if seq not in valid_sequences : raise self . error ( f'Invalid escape sequence "{seq}"' ) for seq , sub in ESCAPE_SEQUENCES . items ( ) : value = value . replace ( seq , sub ) return value
Return the unquoted value of a quoted string .
2,132
def opener_from_zipfile ( zipfile ) : def opener ( filename ) : inner_file = zipfile . open ( filename ) if PY3 : from io import TextIOWrapper return TextIOWrapper ( inner_file ) else : return inner_file return opener
Returns a function that will open a file in a zipfile by name .
2,133
def write_text_rows ( writer , rows ) : for row in rows : try : writer . writerow ( row ) except UnicodeEncodeError : new_row = [ ] for item in row : if isinstance ( item , text_type ) : new_row . append ( item . encode ( 'utf-8' ) ) else : new_row . append ( item ) writer . writerow ( new_row )
Write CSV row data which may include text .
2,134
def serialize_tag ( tag , * , indent = None , compact = False , quote = None ) : serializer = Serializer ( indent = indent , compact = compact , quote = quote ) return serializer . serialize ( tag )
Serialize an nbt tag to its literal representation .
2,135
def depth ( self ) : if self . indentation is None : yield else : previous = self . previous_indent self . previous_indent = self . indent self . indent += self . indentation yield self . indent = self . previous_indent self . previous_indent = previous
Increase the level of indentation by one .
2,136
def should_expand ( self , tag ) : return self . indentation is not None and tag and ( not self . previous_indent or ( tag . serializer == 'list' and tag . subtype . serializer in ( 'array' , 'list' , 'compound' ) ) or ( tag . serializer == 'compound' ) )
Return whether the specified tag should be expanded .
2,137
def escape_string ( self , string ) : if self . quote : quote = self . quote else : found = QUOTE_REGEX . search ( string ) quote = STRING_QUOTES [ found . group ( ) ] if found else next ( iter ( STRING_QUOTES ) ) for match , seq in ESCAPE_SUBS . items ( ) : if match == quote or match not in STRING_QUOTES : string = string . replace ( match , seq ) return f'{quote}{string}{quote}'
Return the escaped literal representation of an nbt string .
2,138
def stringify_compound_key ( self , key ) : if UNQUOTED_COMPOUND_KEY . match ( key ) : return key return self . escape_string ( key )
Escape the compound key if it can t be represented unquoted .
2,139
def serialize ( self , tag ) : handler = getattr ( self , f'serialize_{tag.serializer}' , None ) if handler is None : raise TypeError ( f'Can\'t serialize {type(tag)!r} instance' ) return handler ( tag )
Return the literal representation of a tag .
2,140
def serialize_numeric ( self , tag ) : str_func = int . __str__ if isinstance ( tag , int ) else float . __str__ return str_func ( tag ) + tag . suffix
Return the literal representation of a numeric tag .
2,141
def serialize_array ( self , tag ) : elements = self . comma . join ( f'{el}{tag.item_suffix}' for el in tag ) return f'[{tag.array_prefix}{self.semicolon}{elements}]'
Return the literal representation of an array tag .
2,142
def serialize_list ( self , tag ) : separator , fmt = self . comma , '[{}]' with self . depth ( ) : if self . should_expand ( tag ) : separator , fmt = self . expand ( separator , fmt ) return fmt . format ( separator . join ( map ( self . serialize , tag ) ) )
Return the literal representation of a list tag .
2,143
def serialize_compound ( self , tag ) : separator , fmt = self . comma , '{{{}}}' with self . depth ( ) : if self . should_expand ( tag ) : separator , fmt = self . expand ( separator , fmt ) return fmt . format ( separator . join ( f'{self.stringify_compound_key(key)}{self.colon}{self.serialize(value)}' for key , value in tag . items ( ) ) )
Return the literal representation of a compound tag .
2,144
def populated_column_map ( self ) : column_map = [ ] cls = self . model for csv_name , field_pattern in cls . _column_map : if '__' in field_pattern : field_name = field_pattern . split ( '__' , 1 ) [ 0 ] else : field_name = field_pattern point_match = re_point . match ( field_name ) if point_match : field = None else : field = cls . _meta . get_field ( field_name ) if field and field . blank and not field . has_default ( ) : kwargs = { field_name : get_blank_value ( field ) } if self . exclude ( ** kwargs ) . exists ( ) : column_map . append ( ( csv_name , field_pattern ) ) else : column_map . append ( ( csv_name , field_pattern ) ) return column_map
Return the _column_map without unused optional fields
2,145
def in_feed ( self , feed ) : kwargs = { self . model . _rel_to_feed : feed } return self . filter ( ** kwargs )
Return the objects in the target feed
2,146
def make_android_api_method ( req_method , secure = True , version = 0 ) : def outer_func ( func ) : def inner_func ( self , ** kwargs ) : req_url = self . _build_request_url ( secure , func . __name__ , version ) req_func = self . _build_request ( req_method , req_url , params = kwargs ) response = req_func ( ) func ( self , response ) return response return inner_func return outer_func
Turn an AndroidApi s method into a function that builds the request sends it then passes the response to the actual method . Should be used as a decorator .
2,147
def _get_base_params ( self ) : base_params = { 'locale' : self . _get_locale ( ) , 'device_id' : ANDROID . DEVICE_ID , 'device_type' : ANDROID . APP_PACKAGE , 'access_token' : ANDROID . ACCESS_TOKEN , 'version' : ANDROID . APP_CODE , } base_params . update ( dict ( ( k , v ) for k , v in iteritems ( self . _state_params ) if v is not None ) ) return base_params
Get the params that will be included with every request
2,148
def is_premium ( self , media_type ) : if self . logged_in : if media_type in self . _user_data [ 'premium' ] : return True return False
Get if the session is premium for a given media type
2,149
def read_numeric ( fmt , buff , byteorder = 'big' ) : try : fmt = fmt [ byteorder ] return fmt . unpack ( buff . read ( fmt . size ) ) [ 0 ] except StructError : return 0 except KeyError as exc : raise ValueError ( 'Invalid byte order' ) from exc
Read a numeric value from a file - like object .
2,150
def write_numeric ( fmt , value , buff , byteorder = 'big' ) : try : buff . write ( fmt [ byteorder ] . pack ( value ) ) except KeyError as exc : raise ValueError ( 'Invalid byte order' ) from exc
Write a numeric value to a file - like object .
2,151
def read_string ( buff , byteorder = 'big' ) : length = read_numeric ( USHORT , buff , byteorder ) return buff . read ( length ) . decode ( 'utf-8' )
Read a string from a file - like object .
2,152
def write_string ( value , buff , byteorder = 'big' ) : data = value . encode ( 'utf-8' ) write_numeric ( USHORT , len ( data ) , buff , byteorder ) buff . write ( data )
Write a string to a file - like object .
2,153
def infer_list_subtype ( items ) : subtype = End for item in items : item_type = type ( item ) if not issubclass ( item_type , Base ) : continue if subtype is End : subtype = item_type if not issubclass ( subtype , List ) : return subtype elif subtype is not item_type : stype , itype = subtype , item_type generic = List while issubclass ( stype , List ) and issubclass ( itype , List ) : stype , itype = stype . subtype , itype . subtype generic = List [ generic ] if stype is End : subtype = item_type elif itype is not End : return generic . subtype return subtype
Infer a list subtype from a collection of items .
2,154
def cast_item ( cls , item ) : if not isinstance ( item , cls . subtype ) : incompatible = isinstance ( item , Base ) and not any ( issubclass ( cls . subtype , tag_type ) and isinstance ( item , tag_type ) for tag_type in cls . all_tags . values ( ) ) if incompatible : raise IncompatibleItemType ( item , cls . subtype ) try : return cls . subtype ( item ) except EndInstantiation : raise ValueError ( 'List tags without an explicit subtype must ' 'either be empty or instantiated with ' 'elements from which a subtype can be ' 'inferred' ) from None except ( IncompatibleItemType , CastError ) : raise except Exception as exc : raise CastError ( item , cls . subtype ) from exc return item
Cast list item to the appropriate tag type .
2,155
def merge ( self , other ) : for key , value in other . items ( ) : if key in self and ( isinstance ( self [ key ] , Compound ) and isinstance ( value , dict ) ) : self [ key ] . merge ( value ) else : self [ key ] = value
Recursively merge tags from another compound .
2,156
def decrypt_subtitle ( self , subtitle ) : return self . decrypt ( self . _build_encryption_key ( int ( subtitle . id ) ) , subtitle [ 'iv' ] [ 0 ] . text . decode ( 'base64' ) , subtitle [ 'data' ] [ 0 ] . text . decode ( 'base64' ) )
Decrypt encrypted subtitle data in high level model object
2,157
def decrypt ( self , encryption_key , iv , encrypted_data ) : logger . info ( 'Decrypting subtitles with length (%d bytes), key=%r' , len ( encrypted_data ) , encryption_key ) return zlib . decompress ( aes_decrypt ( encryption_key , iv , encrypted_data ) )
Decrypt encrypted subtitle data
2,158
def _build_encryption_key ( self , subtitle_id , key_size = ENCRYPTION_KEY_SIZE ) : sha1_hash = hashlib . new ( 'sha1' , self . _build_hash_secret ( ( 1 , 2 ) ) + self . _build_hash_magic ( subtitle_id ) ) . digest ( ) sha1_hash += '\x00' * max ( key_size - len ( sha1_hash ) , 0 ) return sha1_hash [ : key_size ]
Generate the encryption key for a given media item
2,159
def _build_hash_magic ( self , subtitle_id ) : media_magic = self . HASH_MAGIC_CONST ^ subtitle_id hash_magic = media_magic ^ media_magic >> 3 ^ media_magic * 32 return str ( hash_magic )
Build the other half of the encryption key hash
2,160
def _build_hash_secret ( self , seq_seed , seq_len = HASH_SECRET_LENGTH , mod_value = HASH_SECRET_MOD_CONST ) : fbn_seq = list ( seq_seed ) for i in range ( seq_len ) : fbn_seq . append ( fbn_seq [ - 1 ] + fbn_seq [ - 2 ] ) hash_secret = list ( map ( lambda c : chr ( c % mod_value + self . HASH_SECRET_CHAR_OFFSET ) , fbn_seq [ 2 : ] ) ) return '' . join ( hash_secret )
Build a seed for the hash based on the Fibonacci sequence
2,161
def format ( self , subtitles ) : logger . debug ( 'Formatting subtitles (id=%s) with %s' , subtitles . id , self . __class__ . __name__ ) return self . _format ( subtitles ) . encode ( 'utf-8' )
Turn a string containing the subs xml document into the formatted subtitle string
2,162
def require_session_started ( func ) : @ functools . wraps ( func ) def inner_func ( self , * pargs , ** kwargs ) : if not self . session_started : logger . info ( 'Starting session for required meta method' ) self . start_session ( ) return func ( self , * pargs , ** kwargs ) return inner_func
Check if API sessions are started and start them if not
2,163
def require_android_logged_in ( func ) : @ functools . wraps ( func ) @ require_session_started def inner_func ( self , * pargs , ** kwargs ) : if not self . _android_api . logged_in : logger . info ( 'Logging into android API for required meta method' ) if not self . has_credentials : raise ApiLoginFailure ( 'Login is required but no credentials were provided' ) self . _android_api . login ( account = self . _state [ 'username' ] , password = self . _state [ 'password' ] ) return func ( self , * pargs , ** kwargs ) return inner_func
Check if andoid API is logged in and login if not implies require_session_started
2,164
def optional_manga_logged_in ( func ) : @ functools . wraps ( func ) @ require_session_started def inner_func ( self , * pargs , ** kwargs ) : if not self . _manga_api . logged_in and self . has_credentials : logger . info ( 'Logging into android manga API for optional meta method' ) self . _manga_api . cr_login ( account = self . _state [ 'username' ] , password = self . _state [ 'password' ] ) return func ( self , * pargs , ** kwargs ) return inner_func
Check if andoid manga API is logged in and login if credentials were provided implies require_session_started
2,165
def require_ajax_logged_in ( func ) : @ functools . wraps ( func ) def inner_func ( self , * pargs , ** kwargs ) : if not self . _ajax_api . logged_in : logger . info ( 'Logging into AJAX API for required meta method' ) if not self . has_credentials : raise ApiLoginFailure ( 'Login is required but no credentials were provided' ) self . _ajax_api . User_Login ( name = self . _state [ 'username' ] , password = self . _state [ 'password' ] ) return func ( self , * pargs , ** kwargs ) return inner_func
Check if ajax API is logged in and login if not
2,166
def start_session ( self ) : self . _android_api . start_session ( ) self . _manga_api . cr_start_session ( ) return self . session_started
Start the underlying APIs sessions
2,167
def list_anime_series ( self , sort = META . SORT_ALPHA , limit = META . MAX_SERIES , offset = 0 ) : result = self . _android_api . list_series ( media_type = ANDROID . MEDIA_TYPE_ANIME , filter = sort , limit = limit , offset = offset ) return result
Get a list of anime series
2,168
def list_drama_series ( self , sort = META . SORT_ALPHA , limit = META . MAX_SERIES , offset = 0 ) : result = self . _android_api . list_series ( media_type = ANDROID . MEDIA_TYPE_DRAMA , filter = sort , limit = limit , offset = offset ) return result
Get a list of drama series
2,169
def list_manga_series ( self , filter = None , content_type = 'jp_manga' ) : result = self . _manga_api . list_series ( filter , content_type ) return result
Get a list of manga series
2,170
def search_anime_series ( self , query_string ) : result = self . _android_api . list_series ( media_type = ANDROID . MEDIA_TYPE_ANIME , filter = ANDROID . FILTER_PREFIX + query_string ) return result
Search anime series list by series name case - sensitive
2,171
def search_drama_series ( self , query_string ) : result = self . _android_api . list_series ( media_type = ANDROID . MEDIA_TYPE_DRAMA , filter = ANDROID . FILTER_PREFIX + query_string ) return result
Search drama series list by series name case - sensitive
2,172
def search_manga_series ( self , query_string ) : result = self . _manga_api . list_series ( ) return [ series for series in result if series [ 'locale' ] [ 'enUS' ] [ 'name' ] . lower ( ) . startswith ( query_string . lower ( ) ) ]
Search the manga series list by name case - insensitive
2,173
def list_media ( self , series , sort = META . SORT_DESC , limit = META . MAX_MEDIA , offset = 0 ) : params = { 'sort' : sort , 'offset' : offset , 'limit' : limit , } params . update ( self . _get_series_query_dict ( series ) ) result = self . _android_api . list_media ( ** params ) return result
List media for a given series or collection
2,174
def search_media ( self , series , query_string ) : params = { 'sort' : ANDROID . FILTER_PREFIX + query_string , } params . update ( self . _get_series_query_dict ( series ) ) result = self . _android_api . list_media ( ** params ) return result
Search for media from a series starting with query_string case - sensitive
2,175
def get_media_stream ( self , media_item , format , quality ) : result = self . _ajax_api . VideoPlayer_GetStandardConfig ( media_id = media_item . media_id , video_format = format , video_quality = quality ) return MediaStream ( result )
Get the stream data for a given media item
2,176
def unfold_subtitle_stub ( self , subtitle_stub ) : return Subtitle ( self . _ajax_api . Subtitle_GetXml ( subtitle_script_id = int ( subtitle_stub . id ) ) )
Turn a SubtitleStub into a full Subtitle object
2,177
def get_stream_formats ( self , media_item ) : scraper = ScraperApi ( self . _ajax_api . _connector ) formats = scraper . get_media_formats ( media_item . media_id ) return formats
Get the available media formats for a given media item
2,178
def list_queue ( self , media_types = [ META . TYPE_ANIME , META . TYPE_DRAMA ] ) : result = self . _android_api . queue ( media_types = '|' . join ( media_types ) ) return [ queue_item [ 'series' ] for queue_item in result ]
List the series in the queue optionally filtering by type of media
2,179
def add_to_queue ( self , series ) : result = self . _android_api . add_to_queue ( series_id = series . series_id ) return result
Add a series to the queue
2,180
def remove_from_queue ( self , series ) : result = self . _android_api . remove_from_queue ( series_id = series . series_id ) return result
Remove a series from the queue
2,181
def schema ( name , dct , * , strict = False ) : return type ( name , ( CompoundSchema , ) , { '__slots__' : ( ) , 'schema' : dct , 'strict' : strict } )
Create a compound tag schema . This function is a short convenience function that makes it easy to subclass the base CompoundSchema class . The name argument is the name of the class and dct should be a dictionnary containing the actual schema . The schema should map keys to tag types or other compound schemas . If the strict keyword only argument is set to True interacting with keys that are not defined in the schema will raise a TypeError .
2,182
def cast_item ( cls , key , value ) : schema_type = cls . schema . get ( key ) if schema_type is None : if cls . strict : raise TypeError ( f'Invalid key {key!r}' ) elif not isinstance ( value , schema_type ) : try : return schema_type ( value ) except CastError : raise except Exception as exc : raise CastError ( value , schema_type ) from exc return value
Cast schema item to the appropriate tag type .
2,183
def tarbz ( source_directory_path , output_file_full_path , silent = False ) : output_directory_path = output_file_full_path . rsplit ( "/" , 1 ) [ 0 ] create_folders ( output_directory_path ) full_tar_file_path = output_file_full_path + ".tbz" if path . exists ( full_tar_file_path ) : raise Exception ( "%s already exists, aborting." % ( full_tar_file_path ) ) tar_command = ( "tar jpcfvC %s %s %s" % ( full_tar_file_path , source_directory_path , "./" ) ) call ( tar_command , silent = silent ) return full_tar_file_path
Tars and bzips a directory preserving as much metadata as possible . Adds . tbz to the provided output file name .
2,184
def untarbz ( source_file_path , output_directory_path , silent = False ) : if not path . exists ( source_file_path ) : raise Exception ( "the provided tar file %s does not exist." % ( source_file_path ) ) if output_directory_path [ 0 : 1 ] == "./" : output_directory_path = path . abspath ( output_directory_path ) if output_directory_path [ 0 ] != "/" : raise Exception ( "your output directory path must start with '/' or './'; you used: %s" % ( output_directory_path ) ) create_folders ( output_directory_path ) if listdir ( output_directory_path ) : raise Exception ( "Your output directory isn't empty. Aborting as " + "exiting files are not overwritten by tar." ) untar_command = ( "tar jxfvkCp %s %s --atime-preserve " % ( source_file_path , output_directory_path ) ) call ( untar_command , silent = silent )
Restores your mongo database backup from a . tbz created using this library . This function will ensure that a directory is created at the file path if one does not exist already . If used in conjunction with this library s mongodump operation the backup data will be extracted directly into the provided directory path . This command will fail if the output directory is not empty as existing files with identical names are not overwritten by tar .
2,185
def value_contains ( self , value , attribute ) : for item in self [ attribute ] : if value in item : return True return False
Determine if any of the items in the value list for the given attribute contain value .
2,186
def clear_search_defaults ( self , args = None ) : if args is None : self . _search_defaults . clear ( ) else : for arg in args : if arg in self . _search_defaults : del self . _search_defaults [ arg ]
Clear all search defaults specified by the list of parameter names given as args . If args is not given then clear all existing search defaults .
2,187
def search ( self , filter , base_dn = None , attrs = None , scope = None , timeout = None , limit = None ) : if base_dn is None : base_dn = self . _search_defaults . get ( 'base_dn' , '' ) if attrs is None : attrs = self . _search_defaults . get ( 'attrs' , None ) if scope is None : scope = self . _search_defaults . get ( 'scope' , ldap . SCOPE_SUBTREE ) if timeout is None : timeout = self . _search_defaults . get ( 'timeout' , - 1 ) if limit is None : limit = self . _search_defaults . get ( 'limit' , 0 ) results = self . connection . search_ext_s ( base_dn , scope , filter , attrs , timeout = timeout , sizelimit = limit ) return self . to_items ( results )
Search the directory .
2,188
def get ( self , * args , ** kwargs ) : results = self . search ( * args , ** kwargs ) num_results = len ( results ) if num_results == 1 : return results [ 0 ] if num_results > 1 : raise MultipleObjectsFound ( ) raise ObjectNotFound ( )
Get a single object .
2,189
def authenticate ( self , dn = '' , password = '' ) : try : self . connection . simple_bind_s ( dn , password ) except tuple ( self . failed_authentication_exceptions ) : return False else : return True
Attempt to authenticate given dn and password using a bind operation . Return True if the bind is successful and return False there was an exception raised that is contained in self . failed_authentication_exceptions .
2,190
def compare ( self , dn , attr , value ) : return self . connection . compare_s ( dn , attr , value ) == 1
Compare the attr of the entry dn with given value .
2,191
def get_property_func ( key ) : def get_it ( obj ) : try : return getattr ( obj , key ) except AttributeError : return obj . tags . get ( key ) return get_it
Get the accessor function for an instance to look for key .
2,192
def list_billing ( region , filter_by_kwargs ) : conn = boto . ec2 . cloudwatch . connect_to_region ( region ) metrics = conn . list_metrics ( metric_name = 'EstimatedCharges' ) if filter_by_kwargs : filter_key = filter_by_kwargs . keys ( ) [ 0 ] filter_value = filter_by_kwargs . values ( ) [ 0 ] if filter_value : filtered_metrics = [ x for x in metrics if x . dimensions . get ( filter_key ) and x . dimensions . get ( filter_key ) [ 0 ] == filter_value ] else : filtered_metrics = [ x for x in metrics if not x . dimensions . get ( filter_key ) ] else : filtered_metrics = metrics return filtered_metrics
List available billing metrics
2,193
def list_ebs ( region , filter_by_kwargs ) : conn = boto . ec2 . connect_to_region ( region ) instances = conn . get_all_volumes ( ) return lookup ( instances , filter_by = filter_by_kwargs )
List running ebs volumes .
2,194
def list_elb ( region , filter_by_kwargs ) : conn = boto . ec2 . elb . connect_to_region ( region ) instances = conn . get_all_load_balancers ( ) return lookup ( instances , filter_by = filter_by_kwargs )
List all load balancers .
2,195
def list_rds ( region , filter_by_kwargs ) : conn = boto . rds . connect_to_region ( region ) instances = conn . get_all_dbinstances ( ) return lookup ( instances , filter_by = filter_by_kwargs )
List all RDS thingys .
2,196
def list_elasticache ( region , filter_by_kwargs ) : conn = boto . elasticache . connect_to_region ( region ) req = conn . describe_cache_clusters ( ) data = req [ "DescribeCacheClustersResponse" ] [ "DescribeCacheClustersResult" ] [ "CacheClusters" ] if filter_by_kwargs : clusters = [ x [ 'CacheClusterId' ] for x in data if x [ filter_by_kwargs . keys ( ) [ 0 ] ] == filter_by_kwargs . values ( ) [ 0 ] ] else : clusters = [ x [ 'CacheClusterId' ] for x in data ] return clusters
List all ElastiCache Clusters .
2,197
def list_autoscaling_group ( region , filter_by_kwargs ) : conn = boto . ec2 . autoscale . connect_to_region ( region ) groups = conn . get_all_groups ( ) return lookup ( groups , filter_by = filter_by_kwargs )
List all Auto Scaling Groups .
2,198
def list_sqs ( region , filter_by_kwargs ) : conn = boto . sqs . connect_to_region ( region ) queues = conn . get_all_queues ( ) return lookup ( queues , filter_by = filter_by_kwargs )
List all SQS Queues .
2,199
def list_kinesis_applications ( region , filter_by_kwargs ) : conn = boto . kinesis . connect_to_region ( region ) streams = conn . list_streams ( ) [ 'StreamNames' ] kinesis_streams = { } for stream_name in streams : shard_ids = [ ] shards = conn . describe_stream ( stream_name ) [ 'StreamDescription' ] [ 'Shards' ] for shard in shards : shard_ids . append ( shard [ 'ShardId' ] ) kinesis_streams [ stream_name ] = shard_ids return kinesis_streams
List all the kinesis applications along with the shards for each stream