idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
61,600
def set_fallback_resolution ( self , x_pixels_per_inch , y_pixels_per_inch ) : cairo . cairo_surface_set_fallback_resolution ( self . _pointer , x_pixels_per_inch , y_pixels_per_inch ) self . _check_status ( )
Set the horizontal and vertical resolution for image fallbacks .
61,601
def get_font_options ( self ) : font_options = FontOptions ( ) cairo . cairo_surface_get_font_options ( self . _pointer , font_options . _pointer ) return font_options
Retrieves the default font rendering options for the surface .
61,602
def set_device_scale ( self , x_scale , y_scale ) : cairo . cairo_surface_set_device_scale ( self . _pointer , x_scale , y_scale ) self . _check_status ( )
Sets a scale that is multiplied to the device coordinates determined by the CTM when drawing to surface .
61,603
def get_mime_data ( self , mime_type ) : buffer_address = ffi . new ( 'unsigned char **' ) buffer_length = ffi . new ( 'unsigned long *' ) mime_type = ffi . new ( 'char[]' , mime_type . encode ( 'utf8' ) ) cairo . cairo_surface_get_mime_data ( self . _pointer , mime_type , buffer_address , buffer_length ) return ( ffi . buffer ( buffer_address [ 0 ] , buffer_length [ 0 ] ) if buffer_address [ 0 ] != ffi . NULL else None )
Return mime data previously attached to surface using the specified mime type .
61,604
def write_to_png ( self , target = None ) : return_bytes = target is None if return_bytes : target = io . BytesIO ( ) if hasattr ( target , 'write' ) : write_func = _make_write_func ( target ) _check_status ( cairo . cairo_surface_write_to_png_stream ( self . _pointer , write_func , ffi . NULL ) ) else : _check_status ( cairo . cairo_surface_write_to_png ( self . _pointer , _encode_filename ( target ) ) ) if return_bytes : return target . getvalue ( )
Writes the contents of surface as a PNG image .
61,605
def create_from_png ( cls , source ) : if hasattr ( source , 'read' ) : read_func = _make_read_func ( source ) pointer = cairo . cairo_image_surface_create_from_png_stream ( read_func , ffi . NULL ) else : pointer = cairo . cairo_image_surface_create_from_png ( _encode_filename ( source ) ) self = object . __new__ ( cls ) Surface . __init__ ( self , pointer ) return self
Decode a PNG file into a new image surface .
61,606
def add_outline ( self , parent_id , utf8 , link_attribs , flags = None ) : if flags is None : flags = 0 value = cairo . cairo_pdf_surface_add_outline ( self . _pointer , parent_id , _encode_string ( utf8 ) , _encode_string ( link_attribs ) , flags ) self . _check_status ( ) return value
Add an item to the document outline hierarchy .
61,607
def set_metadata ( self , metadata , utf8 ) : cairo . cairo_pdf_surface_set_metadata ( self . _pointer , metadata , _encode_string ( utf8 ) ) self . _check_status ( )
Sets document metadata .
61,608
def set_thumbnail_size ( self , width , height ) : cairo . cairo_pdf_surface_set_thumbnail_size ( self . _pointer , width , height )
Set thumbnail image size for the current and all subsequent pages .
61,609
def dsc_comment ( self , comment ) : cairo . cairo_ps_surface_dsc_comment ( self . _pointer , _encode_string ( comment ) ) self . _check_status ( )
Emit a comment into the PostScript output for the given surface .
61,610
def set_document_unit ( self , unit ) : cairo . cairo_svg_surface_set_document_unit ( self . _pointer , unit ) self . _check_status ( )
Use specified unit for width and height of generated SVG file .
61,611
def get_document_unit ( self ) : unit = cairo . cairo_svg_surface_get_document_unit ( self . _pointer ) self . _check_status ( ) return unit
Get the unit of the SVG surface .
61,612
def get_extents ( self ) : extents = ffi . new ( 'cairo_rectangle_t *' ) if cairo . cairo_recording_surface_get_extents ( self . _pointer , extents ) : return ( extents . x , extents . y , extents . width , extents . height )
Return the extents of the recording - surface .
61,613
def add_color_stop_rgba ( self , offset , red , green , blue , alpha = 1 ) : cairo . cairo_pattern_add_color_stop_rgba ( self . _pointer , offset , red , green , blue , alpha ) self . _check_status ( )
Adds a translucent color stop to a gradient pattern .
61,614
def _encode_string ( string ) : if not isinstance ( string , bytes ) : string = string . encode ( 'utf8' ) return ffi . new ( 'char[]' , string )
Return a byte string encoding Unicode with UTF - 8 .
61,615
def text_to_glyphs ( self , x , y , text , with_clusters ) : glyphs = ffi . new ( 'cairo_glyph_t **' , ffi . NULL ) num_glyphs = ffi . new ( 'int *' ) if with_clusters : clusters = ffi . new ( 'cairo_text_cluster_t **' , ffi . NULL ) num_clusters = ffi . new ( 'int *' ) cluster_flags = ffi . new ( 'cairo_text_cluster_flags_t *' ) else : clusters = ffi . NULL num_clusters = ffi . NULL cluster_flags = ffi . NULL status = cairo . cairo_scaled_font_text_to_glyphs ( self . _pointer , x , y , _encode_string ( text ) , - 1 , glyphs , num_glyphs , clusters , num_clusters , cluster_flags ) glyphs = ffi . gc ( glyphs [ 0 ] , _keepref ( cairo , cairo . cairo_glyph_free ) ) if with_clusters : clusters = ffi . gc ( clusters [ 0 ] , _keepref ( cairo , cairo . cairo_text_cluster_free ) ) _check_status ( status ) glyphs = [ ( glyph . index , glyph . x , glyph . y ) for i in range ( num_glyphs [ 0 ] ) for glyph in [ glyphs [ i ] ] ] if with_clusters : clusters = [ ( cluster . num_bytes , cluster . num_glyphs ) for i in range ( num_clusters [ 0 ] ) for cluster in [ clusters [ i ] ] ] return glyphs , clusters , cluster_flags [ 0 ] else : return glyphs
Converts a string of text to a list of glyphs optionally with cluster mapping that can be used to render later using this scaled font .
61,616
def set_variations ( self , variations ) : if variations is None : variations = ffi . NULL else : variations = _encode_string ( variations ) cairo . cairo_font_options_set_variations ( self . _pointer , variations ) self . _check_status ( )
Sets the OpenType font variations for the font options object .
61,617
def get_variations ( self ) : variations = cairo . cairo_font_options_get_variations ( self . _pointer ) if variations != ffi . NULL : return ffi . string ( variations ) . decode ( 'utf8' , 'replace' )
Gets the OpenType font variations for the font options object .
61,618
def decode_to_pixbuf ( image_data , width = None , height = None ) : loader = ffi . gc ( gdk_pixbuf . gdk_pixbuf_loader_new ( ) , gobject . g_object_unref ) error = ffi . new ( 'GError **' ) if width and height : gdk_pixbuf . gdk_pixbuf_loader_set_size ( loader , width , height ) handle_g_error ( error , gdk_pixbuf . gdk_pixbuf_loader_write ( loader , ffi . new ( 'guchar[]' , image_data ) , len ( image_data ) , error ) ) handle_g_error ( error , gdk_pixbuf . gdk_pixbuf_loader_close ( loader , error ) ) format_ = gdk_pixbuf . gdk_pixbuf_loader_get_format ( loader ) format_name = ( ffi . string ( gdk_pixbuf . gdk_pixbuf_format_get_name ( format_ ) ) . decode ( 'ascii' ) if format_ != ffi . NULL else None ) pixbuf = gdk_pixbuf . gdk_pixbuf_loader_get_pixbuf ( loader ) if pixbuf == ffi . NULL : raise ImageLoadingError ( 'Not enough image data (got a NULL pixbuf.)' ) return Pixbuf ( pixbuf ) , format_name
Decode an image from memory with GDK - PixBuf . The file format is detected automatically .
61,619
def decode_to_image_surface ( image_data , width = None , height = None ) : pixbuf , format_name = decode_to_pixbuf ( image_data , width , height ) surface = ( pixbuf_to_cairo_gdk ( pixbuf ) if gdk is not None else pixbuf_to_cairo_slices ( pixbuf ) if not pixbuf . get_has_alpha ( ) else pixbuf_to_cairo_png ( pixbuf ) ) return surface , format_name
Decode an image from memory into a cairo surface . The file format is detected automatically .
61,620
def pixbuf_to_cairo_gdk ( pixbuf ) : dummy_context = Context ( ImageSurface ( constants . FORMAT_ARGB32 , 1 , 1 ) ) gdk . gdk_cairo_set_source_pixbuf ( dummy_context . _pointer , pixbuf . _pointer , 0 , 0 ) return dummy_context . get_source ( ) . get_surface ( )
Convert from PixBuf to ImageSurface using GDK .
61,621
def pixbuf_to_cairo_slices ( pixbuf ) : assert pixbuf . get_colorspace ( ) == gdk_pixbuf . GDK_COLORSPACE_RGB assert pixbuf . get_n_channels ( ) == 3 assert pixbuf . get_bits_per_sample ( ) == 8 width = pixbuf . get_width ( ) height = pixbuf . get_height ( ) rowstride = pixbuf . get_rowstride ( ) pixels = ffi . buffer ( pixbuf . get_pixels ( ) , pixbuf . get_byte_length ( ) ) pixels = pixels [ : ] cairo_stride = ImageSurface . format_stride_for_width ( constants . FORMAT_RGB24 , width ) data = bytearray ( cairo_stride * height ) big_endian = sys . byteorder == 'big' pixbuf_row_length = width * 3 cairo_row_length = width * 4 alpha = b'\xff' * width for y in range ( height ) : offset = rowstride * y end = offset + pixbuf_row_length red = pixels [ offset : end : 3 ] green = pixels [ offset + 1 : end : 3 ] blue = pixels [ offset + 2 : end : 3 ] offset = cairo_stride * y end = offset + cairo_row_length if big_endian : data [ offset : end : 4 ] = alpha data [ offset + 1 : end : 4 ] = red data [ offset + 2 : end : 4 ] = green data [ offset + 3 : end : 4 ] = blue else : data [ offset + 3 : end : 4 ] = alpha data [ offset + 2 : end : 4 ] = red data [ offset + 1 : end : 4 ] = green data [ offset : end : 4 ] = blue data = array ( 'B' , data ) return ImageSurface ( constants . FORMAT_RGB24 , width , height , data , cairo_stride )
Convert from PixBuf to ImageSurface using slice - based byte swapping .
61,622
def pixbuf_to_cairo_png ( pixbuf ) : buffer_pointer = ffi . new ( 'gchar **' ) buffer_size = ffi . new ( 'gsize *' ) error = ffi . new ( 'GError **' ) handle_g_error ( error , pixbuf . save_to_buffer ( buffer_pointer , buffer_size , ffi . new ( 'char[]' , b'png' ) , error , ffi . new ( 'char[]' , b'compression' ) , ffi . new ( 'char[]' , b'0' ) , ffi . NULL ) ) png_bytes = ffi . buffer ( buffer_pointer [ 0 ] , buffer_size [ 0 ] ) return ImageSurface . create_from_png ( BytesIO ( png_bytes ) )
Convert from PixBuf to ImageSurface by going through the PNG format .
61,623
def probePlayer ( requested_player = '' ) : ret_player = None if logger . isEnabledFor ( logging . INFO ) : logger . info ( "Probing available multimedia players..." ) implementedPlayers = Player . __subclasses__ ( ) if logger . isEnabledFor ( logging . INFO ) : logger . info ( "Implemented players: " + ", " . join ( [ player . PLAYER_CMD for player in implementedPlayers ] ) ) if requested_player : req = requested_player . split ( ',' ) for r_player in req : if r_player == 'vlc' : r_player = 'cvlc' for player in implementedPlayers : if player . PLAYER_CMD == r_player : ret_player = check_player ( player ) if ret_player is not None : return ret_player if ret_player is None : if logger . isEnabledFor ( logging . INFO ) : logger . info ( 'Requested player "{}" not supported' . format ( r_player ) ) else : for player in implementedPlayers : ret_player = check_player ( player ) if ret_player is not None : break return ret_player
Probes the multimedia players which are available on the host system .
61,624
def play ( self , name , streamUrl , encoding = '' ) : self . close ( ) self . name = name self . oldUserInput = { 'Input' : '' , 'Volume' : '' , 'Title' : '' } self . muted = False self . show_volume = True self . title_prefix = '' self . playback_is_on = False self . outputStream . write ( 'Station: "{}"' . format ( name ) , self . status_update_lock ) if logger . isEnabledFor ( logging . INFO ) : logger . info ( 'Selected Station: "{}"' . format ( name ) ) if encoding : self . _station_encoding = encoding else : self . _station_encoding = 'utf-8' opts = [ ] isPlayList = streamUrl . split ( "?" ) [ 0 ] [ - 3 : ] in [ 'm3u' , 'pls' ] opts = self . _buildStartOpts ( streamUrl , isPlayList ) self . process = subprocess . Popen ( opts , shell = False , stdout = subprocess . PIPE , stdin = subprocess . PIPE , stderr = subprocess . STDOUT ) t = threading . Thread ( target = self . updateStatus , args = ( self . status_update_lock , ) ) t . start ( ) try : self . connection_timeout_thread = threading . Timer ( self . playback_timeout , self . playback_timeout_handler ) self . connection_timeout_thread . start ( ) except : self . connection_timeout_thread = None if ( logger . isEnabledFor ( logging . ERROR ) ) : logger . error ( "playback detection thread start failed" ) if logger . isEnabledFor ( logging . INFO ) : logger . info ( "Player started" )
use a multimedia player to play a stream
61,625
def _sendCommand ( self , command ) : if ( self . process is not None ) : try : if logger . isEnabledFor ( logging . DEBUG ) : logger . debug ( "Command: {}" . format ( command ) . strip ( ) ) self . process . stdin . write ( command . encode ( 'utf-8' , 'replace' ) ) self . process . stdin . flush ( ) except : msg = "Error when sending: {}" if logger . isEnabledFor ( logging . ERROR ) : logger . error ( msg . format ( command ) . strip ( ) , exc_info = True )
send keystroke command to player
61,626
def _format_title_string ( self , title_string ) : return self . _title_string_format_text_tag ( title_string . replace ( self . icy_tokkens [ 0 ] , self . icy_title_prefix ) )
format mpv s title
61,627
def _format_title_string ( self , title_string ) : if "StreamTitle='" in title_string : tmp = title_string [ title_string . find ( "StreamTitle='" ) : ] . replace ( "StreamTitle='" , self . icy_title_prefix ) ret_string = tmp [ : tmp . find ( "';" ) ] else : ret_string = title_string if '"artist":"' in ret_string : ret_string = self . icy_title_prefix + ret_string [ ret_string . find ( '"artist":' ) + 10 : ] . replace ( '","title":"' , ' - ' ) . replace ( '"}\';' , '' ) return self . _title_string_format_text_tag ( ret_string )
format mplayer s title
61,628
def _format_volume_string ( self , volume_string ) : return '[' + volume_string [ volume_string . find ( self . volume_string ) : ] . replace ( ' %' , '%' ) . replace ( 'ume' , '' ) + '] '
format mplayer s volume
61,629
def _format_volume_string ( self , volume_string ) : self . actual_volume = int ( volume_string . split ( self . volume_string ) [ 1 ] . split ( ',' ) [ 0 ] . split ( ) [ 0 ] ) return '[Vol: {}%] ' . format ( int ( 100 * self . actual_volume / self . max_volume ) )
format vlc s volume
61,630
def _format_title_string ( self , title_string ) : sp = title_string . split ( self . icy_tokkens [ 0 ] ) if sp [ 0 ] == title_string : ret_string = title_string else : ret_string = self . icy_title_prefix + sp [ 1 ] return self . _title_string_format_text_tag ( ret_string )
format vlc s title
61,631
def _is_accepted_input ( self , input_string ) : ret = False accept_filter = ( self . volume_string , "http stream debug: " ) reject_filter = ( ) for n in accept_filter : if n in input_string : ret = True break if ret : for n in reject_filter : if n in input_string : ret = False break return ret
vlc input filtering
61,632
def _no_mute_on_stop_playback ( self ) : if self . ctrl_c_pressed : return if self . isPlaying ( ) : if self . actual_volume == - 1 : self . _get_volume ( ) while self . actual_volume == - 1 : pass if self . actual_volume == 0 : self . actual_volume = int ( self . max_volume * 0.25 ) self . _sendCommand ( 'volume {}\n' . format ( self . actual_volume ) ) if logger . isEnabledFor ( logging . DEBUG ) : logger . debug ( 'Unmuting VLC on exit: {} (25%)' . format ( self . actual_volume ) ) elif self . muted : if self . actual_volume > 0 : self . _sendCommand ( 'volume {}\n' . format ( self . actual_volume ) ) if logger . isEnabledFor ( logging . DEBUG ) : logger . debug ( 'VLC volume restored on exit: {0} ({1}%)' . format ( self . actual_volume , int ( 100 * self . actual_volume / self . max_volume ) ) ) self . show_volume = True
make sure vlc does not stop muted
61,633
def _check_stations_csv ( self , usr , root ) : if path . exists ( path . join ( usr , 'stations.csv' ) ) : return else : copyfile ( root , path . join ( usr , 'stations.csv' ) )
Reclocate a stations . csv copy in user home for easy manage . E . g . not need sudo when you add new station etc
61,634
def _is_playlist_in_config_dir ( self ) : if path . dirname ( self . stations_file ) == self . stations_dir : self . foreign_file = False self . foreign_filename_only_no_extension = '' else : self . foreign_file = True self . foreign_filename_only_no_extension = self . stations_filename_only_no_extension self . foreign_copy_asked = False
Check if a csv file is in the config dir
61,635
def _playlist_format_changed ( self ) : new_format = False for n in self . stations : if n [ 2 ] != '' : new_format = True break if self . new_format == new_format : return False else : return True
Check if we have new or old format and report if format has changed
61,636
def save_playlist_file ( self , stationFile = '' ) : if self . _playlist_format_changed ( ) : self . dirty_playlist = True self . new_format = not self . new_format if stationFile : st_file = stationFile else : st_file = self . stations_file if not self . dirty_playlist : if logger . isEnabledFor ( logging . DEBUG ) : logger . debug ( 'Playlist not modified...' ) return 0 st_new_file = st_file . replace ( '.csv' , '.txt' ) tmp_stations = self . stations [ : ] tmp_stations . reverse ( ) if self . new_format : tmp_stations . append ( [ '# Find lots more stations at http://www.iheart.com' , '' , '' ] ) else : tmp_stations . append ( [ '# Find lots more stations at http://www.iheart.com' , '' ] ) tmp_stations . reverse ( ) try : with open ( st_new_file , 'w' ) as cfgfile : writter = csv . writer ( cfgfile ) for a_station in tmp_stations : writter . writerow ( self . _format_playlist_row ( a_station ) ) except : if logger . isEnabledFor ( logging . DEBUG ) : logger . debug ( 'Cannot open playlist file for writing,,,' ) return - 1 try : move ( st_new_file , st_file ) except : if logger . isEnabledFor ( logging . DEBUG ) : logger . debug ( 'Cannot rename playlist file...' ) return - 2 self . dirty_playlist = False return 0
Save a playlist Create a txt file and write stations in it . Then rename it to final target
61,637
def _bytes_to_human ( self , B ) : KB = float ( 1024 ) MB = float ( KB ** 2 ) GB = float ( KB ** 3 ) TB = float ( KB ** 4 ) if B < KB : return '{0} B' . format ( B ) B = float ( B ) if KB <= B < MB : return '{0:.2f} KB' . format ( B / KB ) elif MB <= B < GB : return '{0:.2f} MB' . format ( B / MB ) elif GB <= B < TB : return '{0:.2f} GB' . format ( B / GB ) elif TB <= B : return '{0:.2f} TB' . format ( B / TB )
Return the given bytes as a human friendly KB MB GB or TB string
61,638
def append_station ( self , params , stationFile = '' ) : if self . new_format : if stationFile : st_file = stationFile else : st_file = self . stations_file st_file , ret = self . _get_playlist_abspath_from_data ( st_file ) if ret < - 1 : return ret try : with open ( st_file , 'a' ) as cfgfile : writter = csv . writer ( cfgfile ) writter . writerow ( params ) return 0 except : return - 5 else : self . stations . append ( [ params [ 0 ] , params [ 1 ] , params [ 2 ] ] ) self . dirty_playlist = True st_file , ret = self . _get_playlist_abspath_from_data ( stationFile ) if ret < - 1 : return ret ret = self . save_playlist_file ( st_file ) if ret < 0 : ret -= 4 return ret
Append a station to csv file
61,639
def _check_config_file ( self , usr ) : package_config_file = path . join ( path . dirname ( __file__ ) , 'config' ) user_config_file = path . join ( usr , 'config' ) if path . exists ( user_config_file + '.restore' ) : try : copyfile ( user_config_file + '.restore' , user_config_file ) remove ( self . user_config_file + '.restore' ) except : pass if not path . exists ( user_config_file ) : copyfile ( package_config_file , user_config_file )
Make sure a config file exists in the config dir
61,640
def save_config ( self ) : if not self . opts [ 'dirty_config' ] [ 1 ] : if logger . isEnabledFor ( logging . INFO ) : logger . info ( 'Config not saved (not modified)' ) return 1 txt = copyfile ( self . config_file , self . config_file + '.restore' ) if self . opts [ 'default_station' ] [ 1 ] is None : self . opts [ 'default_station' ] [ 1 ] = '-1' try : with open ( self . config_file , 'w' ) as cfgfile : cfgfile . write ( txt . format ( self . opts [ 'player' ] [ 1 ] , self . opts [ 'default_playlist' ] [ 1 ] , self . opts [ 'default_station' ] [ 1 ] , self . opts [ 'default_encoding' ] [ 1 ] , self . opts [ 'connection_timeout' ] [ 1 ] , self . opts [ 'theme' ] [ 1 ] , self . opts [ 'use_transparency' ] [ 1 ] , self . opts [ 'confirm_station_deletion' ] [ 1 ] , self . opts [ 'confirm_playlist_reload' ] [ 1 ] , self . opts [ 'auto_save_playlist' ] [ 1 ] ) ) except : if logger . isEnabledFor ( logging . ERROR ) : logger . error ( 'Error saving config' ) return - 1 try : remove ( self . config_file + '.restore' ) except : pass if logger . isEnabledFor ( logging . INFO ) : logger . info ( 'Config saved' ) self . opts [ 'dirty_config' ] [ 1 ] = False return 0
Save config file
61,641
def ctrl_c_handler ( self , signum , frame ) : self . ctrl_c_pressed = True if self . _cnf . dirty_playlist : self . saveCurrentPlaylist ( ) self . _cnf . save_config ( )
Try to auto save config on exit Do not check result!!!
61,642
def _goto_playing_station ( self , changing_playlist = False ) : if ( self . player . isPlaying ( ) or self . operation_mode == PLAYLIST_MODE ) and ( self . selection != self . playing or changing_playlist ) : if changing_playlist : self . startPos = 0 max_lines = self . bodyMaxY - 2 if logger . isEnabledFor ( logging . INFO ) : logger . info ( 'max_lines = {0}, self.playing = {1}' . format ( max_lines , self . playing ) ) if self . number_of_items < max_lines : self . startPos = 0 elif self . playing < self . startPos or self . playing >= self . startPos + max_lines : if logger . isEnabledFor ( logging . INFO ) : logger . info ( '=== _goto:adjusting startPos' ) if self . playing < max_lines : self . startPos = 0 if self . playing - int ( max_lines / 2 ) > 0 : self . startPos = self . playing - int ( max_lines / 2 ) elif self . playing > self . number_of_items - max_lines : self . startPos = self . number_of_items - max_lines else : self . startPos = int ( self . playing + 1 / max_lines ) - int ( max_lines / 2 ) if logger . isEnabledFor ( logging . INFO ) : logger . info ( '===== _goto:startPos = {0}, changing_playlist = {1}' . format ( self . startPos , changing_playlist ) ) self . selection = self . playing self . refreshBody ( )
make sure playing station is visible
61,643
def setStation ( self , number ) : if number < 0 : number = len ( self . stations ) - 1 elif number >= len ( self . stations ) : number = 0 self . selection = number maxDisplayedItems = self . bodyMaxY - 2 if self . selection - self . startPos >= maxDisplayedItems : self . startPos = self . selection - maxDisplayedItems + 1 elif self . selection < self . startPos : self . startPos = self . selection
Select the given station number
61,644
def _format_playlist_line ( self , lineNum , pad , station ) : line = "{0}. {1}" . format ( str ( lineNum + self . startPos + 1 ) . rjust ( pad ) , station [ 0 ] ) f_data = ' [{0}, {1}]' . format ( station [ 2 ] , station [ 1 ] ) if version_info < ( 3 , 0 ) : if len ( line . decode ( 'utf-8' , 'replace' ) ) + len ( f_data . decode ( 'utf-8' , 'replace' ) ) > self . bodyMaxX - 2 : f_data = ' [{0}]' . format ( station [ 1 ] ) if len ( line . decode ( 'utf-8' , 'replace' ) ) + len ( f_data . decode ( 'utf-8' , 'replace' ) ) > self . bodyMaxX - 2 : while len ( line . decode ( 'utf-8' , 'replace' ) ) + len ( f_data . decode ( 'utf-8' , 'replace' ) ) > self . bodyMaxX - 3 : f_data = f_data [ : - 1 ] f_data += ']' if len ( line . decode ( 'utf-8' , 'replace' ) ) + len ( f_data . decode ( 'utf-8' , 'replace' ) ) < self . maxX - 2 : while len ( line . decode ( 'utf-8' , 'replace' ) ) + len ( f_data . decode ( 'utf-8' , 'replace' ) ) < self . maxX - 2 : line += ' ' else : if len ( line ) + len ( f_data ) > self . bodyMaxX - 2 : f_data = ' [{0}]' . format ( station [ 1 ] ) if len ( line ) + len ( f_data ) > self . bodyMaxX - 2 : while len ( line ) + len ( f_data ) > self . bodyMaxX - 3 : f_data = f_data [ : - 1 ] f_data += ']' if len ( line ) + len ( f_data ) < self . maxX - 2 : while len ( line ) + len ( f_data ) < self . maxX - 2 : line += ' ' line += f_data return line
format playlist line so that if fills self . maxX
61,645
def _resize ( self , init = False ) : col , row = self . _selection_to_col_row ( self . selection ) if not ( self . startPos <= row <= self . startPos + self . list_maxY - 1 ) : while row > self . startPos : self . startPos += 1 while row < self . startPos + self . list_maxY - 1 : self . startPos -= 1 if init and row > self . list_maxY : new_startPos = self . _num_of_rows - self . list_maxY + 1 if row > new_startPos : if logger . isEnabledFor ( logging . DEBUG ) : logger . debug ( 'setting startPos at {}' . format ( new_startPos ) ) self . startPos = new_startPos self . refresh_selection ( )
if the selection at the end of the list try to scroll down
61,646
def _get_char ( self , win , char ) : def get_check_next_byte ( ) : char = win . getch ( ) if 128 <= char <= 191 : return char else : raise UnicodeError bytes = [ ] if char <= 127 : bytes . append ( char ) elif 192 <= char <= 223 : bytes . append ( char ) bytes . append ( get_check_next_byte ( ) ) elif 224 <= char <= 239 : bytes . append ( char ) bytes . append ( get_check_next_byte ( ) ) bytes . append ( get_check_next_byte ( ) ) elif 240 <= char <= 244 : bytes . append ( char ) bytes . append ( get_check_next_byte ( ) ) bytes . append ( get_check_next_byte ( ) ) bytes . append ( get_check_next_byte ( ) ) while 0 in bytes : bytes . remove ( 0 ) if version_info < ( 3 , 0 ) : out = '' . join ( [ chr ( b ) for b in bytes ] ) else : buf = bytearray ( bytes ) out = self . _decode_string ( buf ) return out
no zero byte allowed
61,647
def _get_history_next ( self ) : if self . _has_history : ret = self . _input_history . return_history ( 1 ) self . string = ret self . _curs_pos = len ( ret )
callback function for key down
61,648
def apply_transformations ( collection , transformations , select = None ) : for t in transformations : kwargs = dict ( t ) func = kwargs . pop ( 'name' ) cols = kwargs . pop ( 'input' , None ) if isinstance ( func , string_types ) : if func in ( 'and' , 'or' ) : func += '_' if not hasattr ( transform , func ) : raise ValueError ( "No transformation '%s' found!" % func ) func = getattr ( transform , func ) func ( collection , cols , ** kwargs ) if select is not None : transform . Select ( collection , select ) return collection
Apply all transformations to the variables in the collection .
61,649
def setup ( self , steps = None , drop_na = False , ** kwargs ) : input_nodes = None selectors = self . model . get ( 'input' , { } ) . copy ( ) selectors . update ( kwargs ) for i , b in enumerate ( self . steps ) : if steps is not None and i not in steps and b . name not in steps : continue b . setup ( input_nodes , drop_na = drop_na , ** selectors ) input_nodes = b . output_nodes
Set up the sequence of steps for analysis .
61,650
def setup ( self , input_nodes = None , drop_na = False , ** kwargs ) : self . output_nodes = [ ] input_nodes = input_nodes or self . input_nodes or [ ] if self . level != 'run' : kwargs = kwargs . copy ( ) kwargs . pop ( 'scan_length' , None ) collections = self . layout . get_collections ( self . level , drop_na = drop_na , ** kwargs ) objects = collections + input_nodes objects , kwargs = self . _filter_objects ( objects , kwargs ) groups = self . _group_objects ( objects ) model = self . model or { } X = model . get ( 'x' , [ ] ) for grp in groups : input_nodes = [ o for o in grp if isinstance ( o , AnalysisNode ) ] colls = list ( set ( grp ) - set ( input_nodes ) ) if input_nodes : node_coll = self . _concatenate_input_nodes ( input_nodes ) colls . append ( node_coll ) coll = merge_collections ( colls ) if len ( colls ) > 1 else colls [ 0 ] coll = apply_transformations ( coll , self . transformations ) if X : transform . Select ( coll , X ) node = AnalysisNode ( self . level , coll , self . contrasts , input_nodes , self . auto_contrasts ) self . output_nodes . append ( node )
Set up the Step and construct the design matrix .
61,651
def get_slice_info ( slice_times ) : slice_times = remove_duplicates ( slice_times ) slice_order = sorted ( range ( len ( slice_times ) ) , key = lambda k : slice_times [ k ] ) if slice_order == range ( len ( slice_order ) ) : slice_order_name = 'sequential ascending' elif slice_order == reversed ( range ( len ( slice_order ) ) ) : slice_order_name = 'sequential descending' elif slice_order [ 0 ] < slice_order [ 1 ] : slice_order_name = 'interleaved ascending' elif slice_order [ 0 ] > slice_order [ 1 ] : slice_order_name = 'interleaved descending' else : slice_order = [ str ( s ) for s in slice_order ] raise Exception ( 'Unknown slice order: [{0}]' . format ( ', ' . join ( slice_order ) ) ) return slice_order_name
Extract slice order from slice timing info .
61,652
def get_sizestr ( img ) : n_x , n_y , n_slices = img . shape [ : 3 ] import numpy as np voxel_dims = np . array ( img . header . get_zooms ( ) [ : 3 ] ) matrix_size = '{0}x{1}' . format ( num_to_str ( n_x ) , num_to_str ( n_y ) ) voxel_size = 'x' . join ( [ num_to_str ( s ) for s in voxel_dims ] ) fov = [ n_x , n_y ] * voxel_dims [ : 2 ] fov = 'x' . join ( [ num_to_str ( s ) for s in fov ] ) return n_slices , voxel_size , matrix_size , fov
Extract and reformat voxel size matrix size field of view and number of slices into pretty strings .
61,653
def add_config_paths ( ** kwargs ) : for k , path in kwargs . items ( ) : if not os . path . exists ( path ) : raise ValueError ( 'Configuration file "{}" does not exist' . format ( k ) ) if k in cf . get_option ( 'config_paths' ) : raise ValueError ( 'Configuration {!r} already exists' . format ( k ) ) kwargs . update ( ** cf . get_option ( 'config_paths' ) ) cf . set_option ( 'config_paths' , kwargs )
Add to the pool of available configuration files for BIDSLayout .
61,654
def add_derivatives ( self , path , ** kwargs ) : paths = listify ( path ) deriv_dirs = [ ] def check_for_description ( dir ) : dd = os . path . join ( dir , 'dataset_description.json' ) return os . path . exists ( dd ) for p in paths : p = os . path . abspath ( p ) if os . path . exists ( p ) : if check_for_description ( p ) : deriv_dirs . append ( p ) else : subdirs = [ d for d in os . listdir ( p ) if os . path . isdir ( os . path . join ( p , d ) ) ] for sd in subdirs : sd = os . path . join ( p , sd ) if check_for_description ( sd ) : deriv_dirs . append ( sd ) if not deriv_dirs : warnings . warn ( "Derivative indexing was enabled, but no valid " "derivatives datasets were found in any of the " "provided or default locations. Please make sure " "all derivatives datasets you intend to index " "contain a 'dataset_description.json' file, as " "described in the BIDS-derivatives specification." ) for deriv in deriv_dirs : dd = os . path . join ( deriv , 'dataset_description.json' ) with open ( dd , 'r' , encoding = 'utf-8' ) as ddfd : description = json . load ( ddfd ) pipeline_name = description . get ( 'PipelineDescription' , { } ) . get ( 'Name' ) if pipeline_name is None : raise ValueError ( "Every valid BIDS-derivatives dataset must " "have a PipelineDescription.Name field set " "inside dataset_description.json." ) if pipeline_name in self . derivatives : raise ValueError ( "Pipeline name '%s' has already been added " "to this BIDSLayout. Every added pipeline " "must have a unique name!" ) kwargs [ 'config' ] = kwargs . get ( 'config' ) or [ 'bids' , 'derivatives' ] kwargs [ 'sources' ] = kwargs . get ( 'sources' ) or self self . derivatives [ pipeline_name ] = BIDSLayout ( deriv , ** kwargs ) for deriv in self . derivatives . values ( ) : self . entities . update ( deriv . entities )
Add BIDS - Derivatives datasets to tracking .
61,655
def get_file ( self , filename , scope = 'all' ) : filename = os . path . abspath ( os . path . join ( self . root , filename ) ) layouts = self . _get_layouts_in_scope ( scope ) for ly in layouts : if filename in ly . files : return ly . files [ filename ] return None
Returns the BIDSFile object with the specified path .
61,656
def get_collections ( self , level , types = None , variables = None , merge = False , sampling_rate = None , skip_empty = False , ** kwargs ) : from bids . variables import load_variables index = load_variables ( self , types = types , levels = level , skip_empty = skip_empty , ** kwargs ) return index . get_collections ( level , variables , merge , sampling_rate = sampling_rate )
Return one or more variable Collections in the BIDS project .
61,657
def get_metadata ( self , path , include_entities = False , ** kwargs ) : f = self . get_file ( path ) self . metadata_index . index_file ( f . path ) if include_entities : entities = f . entities results = entities else : results = { } results . update ( self . metadata_index . file_index [ path ] ) return results
Return metadata found in JSON sidecars for the specified file .
61,658
def get_bval ( self , path , ** kwargs ) : result = self . get_nearest ( path , extensions = 'bval' , suffix = 'dwi' , all_ = True , ** kwargs ) return listify ( result ) [ 0 ]
Get bval file for passed path .
61,659
def copy_files ( self , files = None , path_patterns = None , symbolic_links = True , root = None , conflicts = 'fail' , ** kwargs ) : _files = self . get ( return_type = 'objects' , ** kwargs ) if files : _files = list ( set ( files ) . intersection ( _files ) ) for f in _files : f . copy ( path_patterns , symbolic_link = symbolic_links , root = self . root , conflicts = conflicts )
Copies one or more BIDSFiles to new locations defined by each BIDSFile s entities and the specified path_patterns .
61,660
def index_file ( self , f , overwrite = False ) : if isinstance ( f , six . string_types ) : f = self . layout . get_file ( f ) if f . path in self . file_index and not overwrite : return if 'suffix' not in f . entities : return md = self . _get_metadata ( f . path ) for md_key , md_val in md . items ( ) : if md_key not in self . key_index : self . key_index [ md_key ] = { } self . key_index [ md_key ] [ f . path ] = md_val self . file_index [ f . path ] [ md_key ] = md_val
Index metadata for the specified file .
61,661
def search ( self , files = None , defined_fields = None , ** kwargs ) : if defined_fields is None : defined_fields = [ ] all_keys = set ( defined_fields ) | set ( kwargs . keys ( ) ) if not all_keys : raise ValueError ( "At least one field to search on must be passed." ) if files is None : files = set ( self . layout . files . keys ( ) ) for f in files : self . index_file ( f ) filesets = [ set ( self . key_index . get ( k , [ ] ) ) for k in all_keys ] matches = reduce ( lambda x , y : x & y , filesets ) if files is not None : matches &= set ( files ) if not matches : return [ ] def check_matches ( f , key , val ) : if isinstance ( val , six . string_types ) and '*' in val : val = ( '^%s$' % val ) . replace ( '*' , ".*" ) return re . search ( str ( self . file_index [ f ] [ key ] ) , val ) is not None else : return val == self . file_index [ f ] [ key ] for k , val in kwargs . items ( ) : matches = list ( filter ( lambda x : check_matches ( x , k , val ) , matches ) ) if not matches : return [ ] return matches
Search files in the layout by metadata fields .
61,662
def auto_model ( layout , scan_length = None , one_vs_rest = False ) : base_name = split ( layout . root ) [ - 1 ] tasks = layout . entities [ 'task' ] . unique ( ) task_models = [ ] for task_name in tasks : model = OrderedDict ( ) model [ "Name" ] = "_" . join ( [ base_name , task_name ] ) model [ "Description" ] = ( "Autogenerated model for the %s task from %s" % ( task_name , base_name ) ) model [ "Input" ] = { "Task" : task_name } steps = [ ] transformations = OrderedDict ( Name = 'Factor' , Input = [ 'trial_type' ] ) run = OrderedDict ( Level = 'Run' , Name = 'Run' , Transformations = [ transformations ] ) run_nodes = load_variables ( layout , task = task_name , levels = [ 'run' ] , scan_length = scan_length ) evs = [ ] for n in run_nodes . nodes : evs . extend ( n . variables [ 'trial_type' ] . values . values ) trial_types = np . unique ( evs ) trial_type_factors = [ "trial_type." + tt for tt in trial_types ] run [ 'Transformations' ] . append ( OrderedDict ( Name = 'Convolve' , Input = trial_type_factors ) ) run_model = OrderedDict ( X = trial_type_factors ) run [ "Model" ] = run_model if one_vs_rest : contrasts = [ ] for i , tt in enumerate ( trial_types ) : cdict = OrderedDict ( ) if len ( trial_types ) > 1 : cdict [ "Name" ] = "run_" + tt + "_vs_others" else : cdict [ "Name" ] = "run_" + tt cdict [ "ConditionList" ] = trial_type_factors weights = np . ones ( len ( trial_types ) ) try : weights [ trial_types != tt ] = - 1.0 / ( len ( trial_types ) - 1 ) except ZeroDivisionError : pass cdict [ "Weights" ] = list ( weights ) cdict [ "Type" ] = "t" contrasts . append ( cdict ) run [ "Contrasts" ] = contrasts steps . append ( run ) if one_vs_rest : sessions = layout . get_sessions ( ) if len ( sessions ) > 1 : contrast_names = [ cc [ "Name" ] for cc in steps [ - 1 ] [ "Contrasts" ] ] steps . append ( _make_passthrough_contrast ( "Session" , contrast_names ) ) subjects = layout . get_subjects ( ) if len ( subjects ) > 1 : contrast_names = [ cc [ "Name" ] for cc in steps [ - 1 ] [ "Contrasts" ] ] steps . append ( _make_passthrough_contrast ( "Subject" , contrast_names ) ) contrast_names = [ cc [ "Name" ] for cc in steps [ - 1 ] [ "Contrasts" ] ] steps . append ( _make_passthrough_contrast ( "Dataset" , contrast_names ) ) model [ "Steps" ] = steps task_models . append ( model ) return task_models
Create a simple default model for each of the tasks in a BIDSLayout . Contrasts each trial type against all other trial types and trial types at the run level and then uses t - tests at each other level present to aggregate these results up .
61,663
def split ( self , grouper ) : data = self . to_df ( condition = True , entities = True ) data = data . drop ( 'condition' , axis = 1 ) subsets = [ ] for i , ( name , g ) in enumerate ( data . groupby ( grouper ) ) : name = '%s.%s' % ( self . name , name ) col = self . __class__ ( name = name , data = g , source = self . source , run_info = getattr ( self , 'run_info' , None ) ) subsets . append ( col ) return subsets
Split the current SparseRunVariable into multiple columns .
61,664
def select_rows ( self , rows ) : self . values = self . values . iloc [ rows ] self . index = self . index . iloc [ rows , : ] for prop in self . _property_columns : vals = getattr ( self , prop ) [ rows ] setattr ( self , prop , vals )
Truncate internal arrays to keep only the specified rows .
61,665
def split ( self , grouper ) : values = grouper . values * self . values . values df = pd . DataFrame ( values , columns = grouper . columns ) return [ DenseRunVariable ( name = '%s.%s' % ( self . name , name ) , values = df [ name ] . values , run_info = self . run_info , source = self . source , sampling_rate = self . sampling_rate ) for i , name in enumerate ( df . columns ) ]
Split the current DenseRunVariable into multiple columns .
61,666
def _build_entity_index ( self , run_info , sampling_rate ) : index = [ ] interval = int ( round ( 1000. / sampling_rate ) ) _timestamps = [ ] for run in run_info : reps = int ( math . ceil ( run . duration * sampling_rate ) ) ent_vals = list ( run . entities . values ( ) ) df = pd . DataFrame ( [ ent_vals ] * reps , columns = list ( run . entities . keys ( ) ) ) ts = pd . date_range ( 0 , periods = len ( df ) , freq = '%sms' % interval ) _timestamps . append ( ts . to_series ( ) ) index . append ( df ) self . timestamps = pd . concat ( _timestamps , axis = 0 , sort = True ) return pd . concat ( index , axis = 0 , sort = True ) . reset_index ( drop = True )
Build the entity index from run information .
61,667
def resample ( self , sampling_rate , inplace = False , kind = 'linear' ) : if not inplace : var = self . clone ( ) var . resample ( sampling_rate , True , kind ) return var if sampling_rate == self . sampling_rate : return old_sr = self . sampling_rate n = len ( self . index ) self . index = self . _build_entity_index ( self . run_info , sampling_rate ) x = np . arange ( n ) num = len ( self . index ) from scipy . interpolate import interp1d f = interp1d ( x , self . values . values . ravel ( ) , kind = kind ) x_new = np . linspace ( 0 , n - 1 , num = num ) self . values = pd . DataFrame ( f ( x_new ) ) assert len ( self . values ) == len ( self . index ) self . sampling_rate = sampling_rate
Resample the Variable to the specified sampling rate .
61,668
def to_df ( self , condition = True , entities = True , timing = True , sampling_rate = None ) : if sampling_rate not in ( None , self . sampling_rate ) : return self . resample ( sampling_rate ) . to_df ( condition , entities ) df = super ( DenseRunVariable , self ) . to_df ( condition , entities ) if timing : df [ 'onset' ] = self . timestamps . values . astype ( float ) / 1e+9 df [ 'duration' ] = 1. / self . sampling_rate return df
Convert to a DataFrame with columns for name and entities .
61,669
def get_collections ( self , unit , names = None , merge = False , sampling_rate = None , ** entities ) : nodes = self . get_nodes ( unit , entities ) var_sets = [ ] for n in nodes : var_set = list ( n . variables . values ( ) ) var_set = [ v for v in var_set if v . matches_entities ( entities ) ] if names is not None : var_set = [ v for v in var_set if v . name in names ] if unit != 'run' : var_set = [ v . filter ( entities ) for v in var_set ] var_sets . append ( var_set ) if merge : var_sets = [ list ( chain ( * var_sets ) ) ] results = [ ] for vs in var_sets : if not vs : continue if unit == 'run' : vs = clc . BIDSRunVariableCollection ( vs , sampling_rate ) else : vs = clc . BIDSVariableCollection ( vs ) results . append ( vs ) if merge : return results [ 0 ] if results else None return results
Retrieve variable data for a specified level in the Dataset .
61,670
def get_or_create_node ( self , level , entities , * args , ** kwargs ) : result = self . get_nodes ( level , entities ) if result : if len ( result ) > 1 : raise ValueError ( "More than one matching Node found! If you're" " expecting more than one Node, use " "get_nodes() instead of get_or_create_node()." ) return result [ 0 ] if level == 'run' : node = RunNode ( entities , * args , ** kwargs ) else : node = Node ( level , entities ) entities = dict ( entities , node_index = len ( self . nodes ) , level = level ) self . nodes . append ( node ) node_row = pd . Series ( entities ) self . index = self . index . append ( node_row , ignore_index = True ) return node
Retrieves a child Node based on the specified criteria creating a new Node if necessary .
61,671
def merge_collections ( collections , force_dense = False , sampling_rate = 'auto' ) : if len ( listify ( collections ) ) == 1 : return collections levels = set ( [ c . level for c in collections ] ) if len ( levels ) > 1 : raise ValueError ( "At the moment, it's only possible to merge " "Collections at the same level of analysis. You " "passed collections at levels: %s." % levels ) variables = list ( chain ( * [ c . variables . values ( ) for c in collections ] ) ) cls = collections [ 0 ] . __class__ variables = cls . merge_variables ( variables , sampling_rate = sampling_rate ) if isinstance ( collections [ 0 ] , BIDSRunVariableCollection ) : if sampling_rate == 'auto' : rates = [ var . sampling_rate for var in variables if isinstance ( var , DenseRunVariable ) ] sampling_rate = rates [ 0 ] if rates else None return cls ( variables , sampling_rate ) return cls ( variables )
Merge two or more collections at the same level of analysis .
61,672
def merge_variables ( variables , ** kwargs ) : var_dict = OrderedDict ( ) for v in variables : if v . name not in var_dict : var_dict [ v . name ] = [ ] var_dict [ v . name ] . append ( v ) return [ merge_variables ( vars_ , ** kwargs ) for vars_ in list ( var_dict . values ( ) ) ]
Concatenates Variables along row axis .
61,673
def to_df ( self , variables = None , format = 'wide' , fillna = np . nan , ** kwargs ) : if variables is None : variables = list ( self . variables . keys ( ) ) if not isinstance ( variables [ 0 ] , BIDSVariable ) : variables = [ v for v in self . variables . values ( ) if v . name in variables ] dfs = [ v . to_df ( ** kwargs ) for v in variables ] df = pd . concat ( dfs , axis = 0 , sort = True ) if format == 'long' : return df . reset_index ( drop = True ) . fillna ( fillna ) ind_cols = list ( set ( df . columns ) - { 'condition' , 'amplitude' } ) df [ 'amplitude' ] = df [ 'amplitude' ] . fillna ( 'n/a' ) df = df . pivot_table ( index = ind_cols , columns = 'condition' , values = 'amplitude' , aggfunc = 'first' ) df = df . reset_index ( ) . replace ( 'n/a' , fillna ) df . columns . name = None return df
Merge variables into a single pandas DataFrame .
61,674
def from_df ( cls , data , entities = None , source = 'contrast' ) : variables = [ ] for col in data . columns : _data = pd . DataFrame ( data [ col ] . values , columns = [ 'amplitude' ] ) if entities is not None : _data = pd . concat ( [ _data , entities ] , axis = 1 , sort = True ) variables . append ( SimpleVariable ( name = col , data = _data , source = source ) ) return BIDSVariableCollection ( variables )
Create a Collection from a pandas DataFrame .
61,675
def clone ( self ) : clone = copy ( self ) clone . variables = { k : v . clone ( ) for ( k , v ) in self . variables . items ( ) } return clone
Returns a shallow copy of the current instance except that all variables are deep - cloned .
61,676
def _index_entities ( self ) : all_ents = pd . DataFrame . from_records ( [ v . entities for v in self . variables . values ( ) ] ) constant = all_ents . apply ( lambda x : x . nunique ( ) == 1 ) if constant . empty : self . entities = { } else : keep = all_ents . columns [ constant ] ents = { k : all_ents [ k ] . dropna ( ) . iloc [ 0 ] for k in keep } self . entities = { k : v for k , v in ents . items ( ) if pd . notnull ( v ) }
Sets current instance s entities based on the existing index .
61,677
def match_variables ( self , pattern , return_type = 'name' ) : pattern = re . compile ( pattern ) vars_ = [ v for v in self . variables . values ( ) if pattern . search ( v . name ) ] return vars_ if return_type . startswith ( 'var' ) else [ v . name for v in vars_ ]
Return columns whose names match the provided regex pattern .
61,678
def to_df ( self , variables = None , format = 'wide' , sparse = True , sampling_rate = None , include_sparse = True , include_dense = True , ** kwargs ) : if not include_sparse and not include_dense : raise ValueError ( "You can't exclude both dense and sparse " "variables! That leaves nothing!" ) if variables is None : variables = list ( self . variables . keys ( ) ) if not include_sparse : variables = [ v for v in variables if isinstance ( self . variables [ v ] , DenseRunVariable ) ] if not include_dense : variables = [ v for v in variables if not isinstance ( self . variables [ v ] , DenseRunVariable ) ] if not variables : return None _vars = [ self . variables [ v ] for v in variables ] if sparse and all ( isinstance ( v , SimpleVariable ) for v in _vars ) : variables = _vars else : sampling_rate = sampling_rate or self . sampling_rate variables = list ( self . resample ( sampling_rate , variables , force_dense = True , in_place = False ) . values ( ) ) return super ( BIDSRunVariableCollection , self ) . to_df ( variables , format , ** kwargs )
Merge columns into a single pandas DataFrame .
61,679
def _transform ( self , var ) : self . collection . variables . pop ( var . name ) return var . values
Rename happens automatically in the base class so all we need to do is unset the original variable in the collection .
61,680
def replace_entities ( entities , pattern ) : ents = re . findall ( r'\{(.*?)\}' , pattern ) new_path = pattern for ent in ents : match = re . search ( r'([^|<]+)(<.*?>)?(\|.*)?' , ent ) if match is None : return None name , valid , default = match . groups ( ) default = default [ 1 : ] if default is not None else default if name in entities and valid is not None : ent_val = str ( entities [ name ] ) if not re . match ( valid [ 1 : - 1 ] , ent_val ) : if default is None : return None entities [ name ] = default ent_val = entities . get ( name , default ) if ent_val is None : return None new_path = new_path . replace ( '{%s}' % ent , str ( ent_val ) ) return new_path
Replaces all entity names in a given pattern with the corresponding values provided by entities .
61,681
def write_contents_to_file ( path , contents = None , link_to = None , content_mode = 'text' , root = None , conflicts = 'fail' ) : if root is None and not isabs ( path ) : root = os . getcwd ( ) if root : path = join ( root , path ) if exists ( path ) or islink ( path ) : if conflicts == 'fail' : msg = 'A file at path {} already exists.' raise ValueError ( msg . format ( path ) ) elif conflicts == 'skip' : msg = 'A file at path {} already exists, skipping writing file.' logging . warn ( msg . format ( path ) ) return elif conflicts == 'overwrite' : if isdir ( path ) : logging . warn ( 'New path is a directory, not going to ' 'overwrite it, skipping instead.' ) return os . remove ( path ) elif conflicts == 'append' : i = 1 while i < sys . maxsize : path_splits = splitext ( path ) path_splits [ 0 ] = path_splits [ 0 ] + '_%d' % i appended_filename = os . extsep . join ( path_splits ) if not exists ( appended_filename ) and not islink ( appended_filename ) : path = appended_filename break i += 1 else : raise ValueError ( 'Did not provide a valid conflicts parameter' ) if not exists ( dirname ( path ) ) : os . makedirs ( dirname ( path ) ) if link_to : os . symlink ( link_to , path ) elif contents : mode = 'wb' if content_mode == 'binary' else 'w' with open ( path , mode ) as f : f . write ( contents ) else : raise ValueError ( 'One of contents or link_to must be provided.' )
Uses provided filename patterns to write contents to a new path given a corresponding entity map .
61,682
def generate ( self , ** kwargs ) : descriptions = [ ] subjs = self . layout . get_subjects ( ** kwargs ) kwargs = { k : v for k , v in kwargs . items ( ) if k != 'subject' } for sid in subjs : descriptions . append ( self . _report_subject ( subject = sid , ** kwargs ) ) counter = Counter ( descriptions ) print ( 'Number of patterns detected: {0}' . format ( len ( counter . keys ( ) ) ) ) print ( utils . reminder ( ) ) return counter
Generate the methods section .
61,683
def _report_subject ( self , subject , ** kwargs ) : description_list = [ ] sessions = kwargs . pop ( 'session' , self . layout . get_sessions ( subject = subject , ** kwargs ) ) if not sessions : sessions = [ None ] elif not isinstance ( sessions , list ) : sessions = [ sessions ] for ses in sessions : niftis = self . layout . get ( subject = subject , extensions = 'nii.gz' , ** kwargs ) if niftis : description_list . append ( 'For session {0}:' . format ( ses ) ) description_list += parsing . parse_niftis ( self . layout , niftis , subject , self . config , session = ses ) metadata = self . layout . get_metadata ( niftis [ 0 ] . path ) else : raise Exception ( 'No niftis for subject {0}' . format ( subject ) ) if 'metadata' not in vars ( ) : raise Exception ( 'No valid jsons found. Cannot generate final ' 'paragraph.' ) description = '\n\t' . join ( description_list ) description = description . replace ( '\tFor session' , '\nFor session' ) description += '\n\n{0}' . format ( parsing . final_paragraph ( metadata ) ) return description
Write a report for a single subject .
61,684
def _gamma_difference_hrf ( tr , oversampling = 50 , time_length = 32. , onset = 0. , delay = 6 , undershoot = 16. , dispersion = 1. , u_dispersion = 1. , ratio = 0.167 ) : from scipy . stats import gamma dt = tr / oversampling time_stamps = np . linspace ( 0 , time_length , np . rint ( float ( time_length ) / dt ) . astype ( np . int ) ) time_stamps -= onset hrf = gamma . pdf ( time_stamps , delay / dispersion , dt / dispersion ) - ratio * gamma . pdf ( time_stamps , undershoot / u_dispersion , dt / u_dispersion ) hrf /= hrf . sum ( ) return hrf
Compute an hrf as the difference of two gamma functions
61,685
def spm_hrf ( tr , oversampling = 50 , time_length = 32. , onset = 0. ) : return _gamma_difference_hrf ( tr , oversampling , time_length , onset )
Implementation of the SPM hrf model
61,686
def glover_hrf ( tr , oversampling = 50 , time_length = 32. , onset = 0. ) : return _gamma_difference_hrf ( tr , oversampling , time_length , onset , delay = 6 , undershoot = 12. , dispersion = .9 , u_dispersion = .9 , ratio = .35 )
Implementation of the Glover hrf model
61,687
def spm_dispersion_derivative ( tr , oversampling = 50 , time_length = 32. , onset = 0. ) : dd = .01 dhrf = 1. / dd * ( - _gamma_difference_hrf ( tr , oversampling , time_length , onset , dispersion = 1. + dd ) + _gamma_difference_hrf ( tr , oversampling , time_length , onset ) ) return dhrf
Implementation of the SPM dispersion derivative hrf model
61,688
def glover_dispersion_derivative ( tr , oversampling = 50 , time_length = 32. , onset = 0. ) : dd = .01 dhrf = 1. / dd * ( - _gamma_difference_hrf ( tr , oversampling , time_length , onset , delay = 6 , undershoot = 12. , dispersion = .9 + dd , ratio = .35 ) + _gamma_difference_hrf ( tr , oversampling , time_length , onset , delay = 6 , undershoot = 12. , dispersion = .9 , ratio = .35 ) ) return dhrf
Implementation of the Glover dispersion derivative hrf model
61,689
def _sample_condition ( exp_condition , frame_times , oversampling = 50 , min_onset = - 24 ) : n = frame_times . size min_onset = float ( min_onset ) n_hr = ( ( n - 1 ) * 1. / ( frame_times . max ( ) - frame_times . min ( ) ) * ( frame_times . max ( ) * ( 1 + 1. / ( n - 1 ) ) - frame_times . min ( ) - min_onset ) * oversampling ) + 1 hr_frame_times = np . linspace ( frame_times . min ( ) + min_onset , frame_times . max ( ) * ( 1 + 1. / ( n - 1 ) ) , np . rint ( n_hr ) . astype ( np . int ) ) onsets , durations , values = tuple ( map ( np . asanyarray , exp_condition ) ) if ( onsets < frame_times [ 0 ] + min_onset ) . any ( ) : warnings . warn ( ( 'Some stimulus onsets are earlier than %s in the' ' experiment and are thus not considered in the model' % ( frame_times [ 0 ] + min_onset ) ) , UserWarning ) tmax = len ( hr_frame_times ) regressor = np . zeros_like ( hr_frame_times ) . astype ( np . float ) t_onset = np . minimum ( np . searchsorted ( hr_frame_times , onsets ) , tmax - 1 ) regressor [ t_onset ] += values t_offset = np . minimum ( np . searchsorted ( hr_frame_times , onsets + durations ) , tmax - 1 ) for i , t in enumerate ( t_offset ) : if t < ( tmax - 1 ) and t == t_onset [ i ] : t_offset [ i ] += 1 regressor [ t_offset ] -= values regressor = np . cumsum ( regressor ) return regressor , hr_frame_times
Make a possibly oversampled event regressor from condition information .
61,690
def _resample_regressor ( hr_regressor , hr_frame_times , frame_times ) : from scipy . interpolate import interp1d f = interp1d ( hr_frame_times , hr_regressor ) return f ( frame_times ) . T
this function sub - samples the regressors at frame times
61,691
def _orthogonalize ( X ) : if X . size == X . shape [ 0 ] : return X from scipy . linalg import pinv , norm for i in range ( 1 , X . shape [ 1 ] ) : X [ : , i ] -= np . dot ( np . dot ( X [ : , i ] , X [ : , : i ] ) , pinv ( X [ : , : i ] ) ) return X
Orthogonalize every column of design X w . r . t preceding columns
61,692
def _regressor_names ( con_name , hrf_model , fir_delays = None ) : if hrf_model in [ 'glover' , 'spm' , None ] : return [ con_name ] elif hrf_model in [ "glover + derivative" , 'spm + derivative' ] : return [ con_name , con_name + "_derivative" ] elif hrf_model in [ 'spm + derivative + dispersion' , 'glover + derivative + dispersion' ] : return [ con_name , con_name + "_derivative" , con_name + "_dispersion" ] elif hrf_model == 'fir' : return [ con_name + "_delay_%d" % i for i in fir_delays ]
Returns a list of regressor names computed from con - name and hrf type
61,693
def _hrf_kernel ( hrf_model , tr , oversampling = 50 , fir_delays = None ) : acceptable_hrfs = [ 'spm' , 'spm + derivative' , 'spm + derivative + dispersion' , 'fir' , 'glover' , 'glover + derivative' , 'glover + derivative + dispersion' , None ] if hrf_model == 'spm' : hkernel = [ spm_hrf ( tr , oversampling ) ] elif hrf_model == 'spm + derivative' : hkernel = [ spm_hrf ( tr , oversampling ) , spm_time_derivative ( tr , oversampling ) ] elif hrf_model == 'spm + derivative + dispersion' : hkernel = [ spm_hrf ( tr , oversampling ) , spm_time_derivative ( tr , oversampling ) , spm_dispersion_derivative ( tr , oversampling ) ] elif hrf_model == 'glover' : hkernel = [ glover_hrf ( tr , oversampling ) ] elif hrf_model == 'glover + derivative' : hkernel = [ glover_hrf ( tr , oversampling ) , glover_time_derivative ( tr , oversampling ) ] elif hrf_model == 'glover + derivative + dispersion' : hkernel = [ glover_hrf ( tr , oversampling ) , glover_time_derivative ( tr , oversampling ) , glover_dispersion_derivative ( tr , oversampling ) ] elif hrf_model == 'fir' : hkernel = [ np . hstack ( ( np . zeros ( f * oversampling ) , np . ones ( oversampling ) ) ) for f in fir_delays ] elif hrf_model is None : hkernel = [ np . hstack ( ( 1 , np . zeros ( oversampling - 1 ) ) ) ] else : raise ValueError ( '"{0}" is not a known hrf model. Use one of {1}' . format ( hrf_model , acceptable_hrfs ) ) return hkernel
Given the specification of the hemodynamic model and time parameters return the list of matching kernels
61,694
def compute_regressor ( exp_condition , hrf_model , frame_times , con_id = 'cond' , oversampling = 50 , fir_delays = None , min_onset = - 24 ) : tr = float ( frame_times . max ( ) ) / ( np . size ( frame_times ) - 1 ) hr_regressor , hr_frame_times = _sample_condition ( exp_condition , frame_times , oversampling , min_onset ) hkernel = _hrf_kernel ( hrf_model , tr , oversampling , fir_delays ) conv_reg = np . array ( [ np . convolve ( hr_regressor , h ) [ : hr_regressor . size ] for h in hkernel ] ) computed_regressors = _resample_regressor ( conv_reg , hr_frame_times , frame_times ) if hrf_model != 'fir' : computed_regressors = _orthogonalize ( computed_regressors ) reg_names = _regressor_names ( con_id , hrf_model , fir_delays = fir_delays ) return computed_regressors , reg_names
This is the main function to convolve regressors with hrf model
61,695
def matches_entities ( obj , entities , strict = False ) : if strict and set ( obj . entities . keys ( ) ) != set ( entities . keys ( ) ) : return False comm_ents = list ( set ( obj . entities . keys ( ) ) & set ( entities . keys ( ) ) ) for k in comm_ents : current = obj . entities [ k ] target = entities [ k ] if isinstance ( target , ( list , tuple ) ) : if current not in target : return False elif current != target : return False return True
Checks whether an object s entities match the input .
61,696
def check_path_matches_patterns ( path , patterns ) : path = os . path . abspath ( path ) for patt in patterns : if isinstance ( patt , six . string_types ) : if path == patt : return True elif patt . search ( path ) : return True return False
Check if the path matches at least one of the provided patterns .
61,697
def count ( self , files = False ) : return len ( self . files ) if files else len ( self . unique ( ) )
Returns a count of unique values or files .
61,698
def general_acquisition_info ( metadata ) : out_str = ( 'MR data were acquired using a {tesla}-Tesla {manu} {model} ' 'MRI scanner.' ) out_str = out_str . format ( tesla = metadata . get ( 'MagneticFieldStrength' , 'UNKNOWN' ) , manu = metadata . get ( 'Manufacturer' , 'MANUFACTURER' ) , model = metadata . get ( 'ManufacturersModelName' , 'MODEL' ) ) return out_str
General sentence on data acquisition . Should be first sentence in MRI data acquisition section .
61,699
def parse_niftis ( layout , niftis , subj , config , ** kwargs ) : kwargs = { k : v for k , v in kwargs . items ( ) if v is not None } description_list = [ ] skip_task = { } for nifti_struct in niftis : nii_file = nifti_struct . path metadata = layout . get_metadata ( nii_file ) if not metadata : LOGGER . warning ( 'No json file found for %s' , nii_file ) else : import nibabel as nib img = nib . load ( nii_file ) if not description_list : description_list . append ( general_acquisition_info ( metadata ) ) if nifti_struct . entities [ 'datatype' ] == 'func' : if not skip_task . get ( nifti_struct . entities [ 'task' ] , False ) : echos = layout . get_echoes ( subject = subj , extensions = 'nii.gz' , task = nifti_struct . entities [ 'task' ] , ** kwargs ) n_echos = len ( echos ) if n_echos > 0 : metadata [ 'EchoTime' ] = [ ] for echo in sorted ( echos ) : echo_struct = layout . get ( subject = subj , echo = echo , extensions = 'nii.gz' , task = nifti_struct . entities [ 'task' ] , ** kwargs ) [ 0 ] echo_file = echo_struct . path echo_meta = layout . get_metadata ( echo_file ) metadata [ 'EchoTime' ] . append ( echo_meta [ 'EchoTime' ] ) n_runs = len ( layout . get_runs ( subject = subj , task = nifti_struct . entities [ 'task' ] , ** kwargs ) ) description_list . append ( func_info ( nifti_struct . entities [ 'task' ] , n_runs , metadata , img , config ) ) skip_task [ nifti_struct . entities [ 'task' ] ] = True elif nifti_struct . entities [ 'datatype' ] == 'anat' : suffix = nifti_struct . entities [ 'suffix' ] if suffix . endswith ( 'w' ) : suffix = suffix [ : - 1 ] + '-weighted' description_list . append ( anat_info ( suffix , metadata , img , config ) ) elif nifti_struct . entities [ 'datatype' ] == 'dwi' : bval_file = nii_file . replace ( '.nii.gz' , '.bval' ) description_list . append ( dwi_info ( bval_file , metadata , img , config ) ) elif nifti_struct . entities [ 'datatype' ] == 'fmap' : description_list . append ( fmap_info ( metadata , img , config , layout ) ) return description_list
Loop through niftis in a BIDSLayout and generate the appropriate description type for each scan . Compile all of the descriptions into a list .