idx
int64 0
63k
| question
stringlengths 61
4.03k
| target
stringlengths 6
1.23k
|
---|---|---|
62,000 |
def _is_requirement ( line ) : line = line . strip ( ) return line and not ( line . startswith ( "-r" ) or line . startswith ( "#" ) )
|
Returns whether the line is a valid package requirement .
|
62,001 |
def render_to_response ( self , context ) : if self . redirect_if_one_result : if self . object_list . count ( ) == 1 and self . form . is_bound : return redirect ( self . object_list . get ( ) . get_absolute_url ( ) ) return super ( SearchMixin , self ) . render_to_response ( context )
|
When the user makes a search and there is only one result redirect to the result s detail page rather than rendering the list .
|
62,002 |
def clean_start_time ( self ) : start = self . cleaned_data . get ( 'start_time' ) if not start : return start active_entries = self . user . timepiece_entries . filter ( start_time__gte = start , end_time__isnull = True ) for entry in active_entries : output = ( 'The start time is on or before the current entry: ' '%s - %s starting at %s' % ( entry . project , entry . activity , entry . start_time . strftime ( '%H:%M:%S' ) ) ) raise forms . ValidationError ( output ) return start
|
Make sure that the start time doesn t come before the active entry
|
62,003 |
def clean ( self ) : active = utils . get_active_entry ( self . user ) start_time = self . cleaned_data . get ( 'start_time' , None ) end_time = self . cleaned_data . get ( 'end_time' , None ) if active and active . pk != self . instance . pk : if ( start_time and start_time > active . start_time ) or ( end_time and end_time > active . start_time ) : raise forms . ValidationError ( 'The start time or end time conflict with the active ' 'entry: {activity} on {project} starting at ' '{start_time}.' . format ( project = active . project , activity = active . activity , start_time = active . start_time . strftime ( '%H:%M:%S' ) , ) ) month_start = utils . get_month_start ( start_time ) next_month = month_start + relativedelta ( months = 1 ) entries = self . instance . user . timepiece_entries . filter ( Q ( status = Entry . APPROVED ) | Q ( status = Entry . INVOICED ) , start_time__gte = month_start , end_time__lt = next_month ) entry = self . instance if not self . acting_user . is_superuser : if ( entries . exists ( ) and not entry . id or entry . id and entry . status == Entry . INVOICED ) : message = 'You cannot add/edit entries after a timesheet has been ' 'approved or invoiced. Please correct the start and end times.' raise forms . ValidationError ( message ) return self . cleaned_data
|
If we re not editing the active entry ensure that this entry doesn t conflict with or come after the active entry .
|
62,004 |
def clock_in ( request ) : user = request . user active_entry = utils . get_active_entry ( user , select_for_update = True ) initial = dict ( [ ( k , v ) for k , v in request . GET . items ( ) ] ) data = request . POST or None form = ClockInForm ( data , initial = initial , user = user , active = active_entry ) if form . is_valid ( ) : entry = form . save ( ) message = 'You have clocked into {0} on {1}.' . format ( entry . activity . name , entry . project ) messages . info ( request , message ) return HttpResponseRedirect ( reverse ( 'dashboard' ) ) return render ( request , 'timepiece/entry/clock_in.html' , { 'form' : form , 'active' : active_entry , } )
|
For clocking the user into a project .
|
62,005 |
def toggle_pause ( request ) : entry = utils . get_active_entry ( request . user ) if not entry : raise Http404 entry . toggle_paused ( ) entry . save ( ) action = 'paused' if entry . is_paused else 'resumed' message = 'Your entry, {0} on {1}, has been {2}.' . format ( entry . activity . name , entry . project , action ) messages . info ( request , message ) return HttpResponseRedirect ( reverse ( 'dashboard' ) )
|
Allow the user to pause and unpause the active entry .
|
62,006 |
def reject_entry ( request , entry_id ) : return_url = request . GET . get ( 'next' , reverse ( 'dashboard' ) ) try : entry = Entry . no_join . get ( pk = entry_id ) except : message = 'No such log entry.' messages . error ( request , message ) return redirect ( return_url ) if entry . status == Entry . UNVERIFIED or entry . status == Entry . INVOICED : msg_text = 'This entry is unverified or is already invoiced.' messages . error ( request , msg_text ) return redirect ( return_url ) if request . POST . get ( 'Yes' ) : entry . status = Entry . UNVERIFIED entry . save ( ) msg_text = 'The entry\'s status was set to unverified.' messages . info ( request , msg_text ) return redirect ( return_url ) return render ( request , 'timepiece/entry/reject.html' , { 'entry' : entry , 'next' : request . GET . get ( 'next' ) , } )
|
Admins can reject an entry that has been verified or approved but not invoiced to set its status to unverified for the user to fix .
|
62,007 |
def delete_entry ( request , entry_id ) : try : entry = Entry . no_join . get ( pk = entry_id , user = request . user ) except Entry . DoesNotExist : message = 'No such entry found.' messages . info ( request , message ) url = request . GET . get ( 'next' , reverse ( 'dashboard' ) ) return HttpResponseRedirect ( url ) if request . method == 'POST' : key = request . POST . get ( 'key' , None ) if key and key == entry . delete_key : entry . delete ( ) message = 'Deleted {0} for {1}.' . format ( entry . activity . name , entry . project ) messages . info ( request , message ) url = request . GET . get ( 'next' , reverse ( 'dashboard' ) ) return HttpResponseRedirect ( url ) else : message = 'You are not authorized to delete this entry!' messages . error ( request , message ) return render ( request , 'timepiece/entry/delete.html' , { 'entry' : entry , } )
|
Give the user the ability to delete a log entry with a confirmation beforehand . If this method is invoked via a GET request a form asking for a confirmation of intent will be presented to the user . If this method is invoked via a POST request the entry will be deleted .
|
62,008 |
def get_hours_per_week ( self , user = None ) : try : profile = UserProfile . objects . get ( user = user or self . user ) except UserProfile . DoesNotExist : profile = None return profile . hours_per_week if profile else Decimal ( '40.00' )
|
Retrieves the number of hours the user should work per week .
|
62,009 |
def get_hours_for_week ( self , week_start = None ) : week_start = week_start if week_start else self . week_start week_end = week_start + relativedelta ( days = 7 ) return ProjectHours . objects . filter ( week_start__gte = week_start , week_start__lt = week_end )
|
Gets all ProjectHours entries in the 7 - day period beginning on week_start .
|
62,010 |
def get_users_from_project_hours ( self , project_hours ) : name = ( 'user__first_name' , 'user__last_name' ) users = project_hours . values_list ( 'user__id' , * name ) . distinct ( ) . order_by ( * name ) return users
|
Gets a list of the distinct users included in the project hours entries ordered by name .
|
62,011 |
def check_all ( self , all_entries , * args , ** kwargs ) : all_overlaps = 0 while True : try : user_entries = all_entries . next ( ) except StopIteration : return all_overlaps else : user_total_overlaps = self . check_entry ( user_entries , * args , ** kwargs ) all_overlaps += user_total_overlaps
|
Go through lists of entries find overlaps among each return the total
|
62,012 |
def check_entry ( self , entries , * args , ** kwargs ) : verbosity = kwargs . get ( 'verbosity' , 1 ) user_total_overlaps = 0 user = '' for index_a , entry_a in enumerate ( entries ) : if index_a == 0 : if args and verbosity >= 1 or verbosity >= 2 : self . show_name ( entry_a . user ) user = entry_a . user for index_b in range ( index_a , len ( entries ) ) : entry_b = entries [ index_b ] if entry_a . check_overlap ( entry_b ) : user_total_overlaps += 1 self . show_overlap ( entry_a , entry_b , verbosity = verbosity ) if user_total_overlaps and user and verbosity >= 1 : overlap_data = { 'first' : user . first_name , 'last' : user . last_name , 'total' : user_total_overlaps , } self . stdout . write ( 'Total overlapping entries for user ' + '%(first)s %(last)s: %(total)d' % overlap_data ) return user_total_overlaps
|
With a list of entries check each entry against every other
|
62,013 |
def find_start ( self , ** kwargs ) : week = kwargs . get ( 'week' , False ) month = kwargs . get ( 'month' , False ) year = kwargs . get ( 'year' , False ) days = kwargs . get ( 'days' , 0 ) start = timezone . now ( ) - relativedelta ( months = 1 , day = 1 ) if week : start = utils . get_week_start ( ) if month : start = timezone . now ( ) - relativedelta ( day = 1 ) if year : start = timezone . now ( ) - relativedelta ( day = 1 , month = 1 ) if days : start = timezone . now ( ) - relativedelta ( days = days ) start -= relativedelta ( hour = 0 , minute = 0 , second = 0 , microsecond = 0 ) return start
|
Determine the starting point of the query using CLI keyword arguments
|
62,014 |
def find_users ( self , * args ) : if args : names = reduce ( lambda query , arg : query | ( Q ( first_name__icontains = arg ) | Q ( last_name__icontains = arg ) ) , args , Q ( ) ) users = User . objects . filter ( names ) else : users = User . objects . all ( ) if not users . count ( ) and args : if len ( args ) == 1 : raise CommandError ( 'No user was found with the name %s' % args [ 0 ] ) else : arg_list = ', ' . join ( args ) raise CommandError ( 'No users found with the names: %s' % arg_list ) return users
|
Returns the users to search given names as args . Return all users if there are no args provided .
|
62,015 |
def find_entries ( self , users , start , * args , ** kwargs ) : forever = kwargs . get ( 'all' , False ) for user in users : if forever : entries = Entry . objects . filter ( user = user ) . order_by ( 'start_time' ) else : entries = Entry . objects . filter ( user = user , start_time__gte = start ) . order_by ( 'start_time' ) yield entries
|
Find all entries for all users from a given starting point . If no starting point is provided all entries are returned .
|
62,016 |
def cbv_decorator ( function_decorator ) : def class_decorator ( View ) : View . dispatch = method_decorator ( function_decorator ) ( View . dispatch ) return View return class_decorator
|
Allows a function - based decorator to be used on a CBV .
|
62,017 |
def date_totals ( entries , by ) : date_dict = { } for date , date_entries in groupby ( entries , lambda x : x [ 'date' ] ) : if isinstance ( date , datetime . datetime ) : date = date . date ( ) d_entries = list ( date_entries ) if by == 'user' : name = ' ' . join ( ( d_entries [ 0 ] [ 'user__first_name' ] , d_entries [ 0 ] [ 'user__last_name' ] ) ) elif by == 'project' : name = d_entries [ 0 ] [ 'project__name' ] else : name = d_entries [ 0 ] [ by ] pk = d_entries [ 0 ] [ by ] hours = get_hours_summary ( d_entries ) date_dict [ date ] = hours return name , pk , date_dict
|
Yield a user s name and a dictionary of their hours
|
62,018 |
def get_project_totals ( entries , date_headers , hour_type = None , overtime = False , total_column = False , by = 'user' ) : totals = [ 0 for date in date_headers ] rows = [ ] for thing , thing_entries in groupby ( entries , lambda x : x [ by ] ) : name , thing_id , date_dict = date_totals ( thing_entries , by ) dates = [ ] for index , day in enumerate ( date_headers ) : if isinstance ( day , datetime . datetime ) : day = day . date ( ) if hour_type : total = date_dict . get ( day , { } ) . get ( hour_type , 0 ) dates . append ( total ) else : billable = date_dict . get ( day , { } ) . get ( 'billable' , 0 ) nonbillable = date_dict . get ( day , { } ) . get ( 'non_billable' , 0 ) total = billable + nonbillable dates . append ( { 'day' : day , 'billable' : billable , 'nonbillable' : nonbillable , 'total' : total } ) totals [ index ] += total if total_column : dates . append ( sum ( dates ) ) if overtime : dates . append ( find_overtime ( dates ) ) dates = [ date or '' for date in dates ] rows . append ( ( name , thing_id , dates ) ) if total_column : totals . append ( sum ( totals ) ) totals = [ t or '' for t in totals ] yield ( rows , totals )
|
Yield hour totals grouped by user and date . Optionally including overtime .
|
62,019 |
def validate ( self , validation_instances , metrics , iteration = None ) : if not validation_instances or not metrics : return { } split_id = 'val%s' % iteration if iteration is not None else 'val' train_results = evaluate . evaluate ( self , validation_instances , metrics = metrics , split_id = split_id ) output . output_results ( train_results , split_id ) return train_results
|
Evaluate this model on validation_instances during training and output a report .
|
62,020 |
def predict_and_score ( self , eval_instances , random = False , verbosity = 0 ) : if hasattr ( self , '_using_default_separate' ) and self . _using_default_separate : raise NotImplementedError self . _using_default_combined = True return ( self . predict ( eval_instances , random = random , verbosity = verbosity ) , self . score ( eval_instances , verbosity = verbosity ) )
|
Return most likely outputs and scores for the particular set of outputs given in eval_instances as a tuple . Return value should be equivalent to the default implementation of
|
62,021 |
def load ( self , infile ) : model = pickle . load ( infile ) self . __dict__ . update ( model . __dict__ )
|
Deserialize a model from a stored file .
|
62,022 |
def iter_batches ( iterable , batch_size ) : sourceiter = iter ( iterable ) while True : batchiter = islice ( sourceiter , batch_size ) yield chain ( [ batchiter . next ( ) ] , batchiter )
|
Given a sequence or iterable yield batches from that iterable until it runs out . Note that this function returns a generator and also each batch will be a generator .
|
62,023 |
def gen_batches ( iterable , batch_size ) : def batches_thunk ( ) : return iter_batches ( iterable , batch_size ) try : length = len ( iterable ) except TypeError : return batches_thunk ( ) num_batches = ( length - 1 ) // batch_size + 1 return SizedGenerator ( batches_thunk , length = num_batches )
|
Returns a generator object that yields batches from iterable . See iter_batches for more details and caveats .
|
62,024 |
def inverted ( self ) : return Instance ( input = self . output , output = self . input , annotated_input = self . annotated_output , annotated_output = self . annotated_input , alt_inputs = self . alt_outputs , alt_outputs = self . alt_inputs , source = self . source )
|
Return a version of this instance with inputs replaced by outputs and vice versa .
|
62,025 |
def get_data_or_download ( dir_name , file_name , url = '' , size = 'unknown' ) : dname = os . path . join ( stanza . DATA_DIR , dir_name ) fname = os . path . join ( dname , file_name ) if not os . path . isdir ( dname ) : assert url , 'Could not locate data {}, and url was not specified. Cannot retrieve data.' . format ( dname ) os . makedirs ( dname ) if not os . path . isfile ( fname ) : assert url , 'Could not locate data {}, and url was not specified. Cannot retrieve data.' . format ( fname ) logging . warn ( 'downloading from {}. This file could potentially be *very* large! Actual size ({})' . format ( url , size ) ) with open ( fname , 'wb' ) as f : f . write ( get_from_url ( url ) ) return fname
|
Returns the data . if the data hasn t been downloaded then first download the data .
|
62,026 |
def add ( self , word , count = 1 ) : if word not in self : super ( Vocab , self ) . __setitem__ ( word , len ( self ) ) self . _counts [ word ] += count return self [ word ]
|
Add a word to the vocabulary and return its index .
|
62,027 |
def subset ( self , words ) : v = self . __class__ ( unk = self . _unk ) unique = lambda seq : len ( set ( seq ) ) == len ( seq ) assert unique ( words ) for w in words : if w in self : v . add ( w , count = self . count ( w ) ) return v
|
Get a new Vocab containing only the specified subset of words .
|
62,028 |
def _index2word ( self ) : compute_index2word = lambda : self . keys ( ) try : self . _index2word_cache except AttributeError : self . _index2word_cache = compute_index2word ( ) if len ( self . _index2word_cache ) != len ( self ) : self . _index2word_cache = compute_index2word ( ) return self . _index2word_cache
|
Mapping from indices to words .
|
62,029 |
def from_dict ( cls , word2index , unk , counts = None ) : try : if word2index [ unk ] != 0 : raise ValueError ( 'unk must be assigned index 0' ) except KeyError : raise ValueError ( 'word2index must have an entry for unk.' ) vals = set ( word2index . values ( ) ) n = len ( vals ) bijection = ( len ( word2index ) == n ) and ( vals == set ( range ( n ) ) ) if not bijection : raise ValueError ( 'word2index is not a bijection between N words and the integers 0 through N-1.' ) index2word = { idx : word for word , idx in word2index . iteritems ( ) } vocab = cls ( unk = unk ) for i in xrange ( n ) : vocab . add ( index2word [ i ] ) if counts : matching_entries = set ( word2index . keys ( ) ) == set ( counts . keys ( ) ) if not matching_entries : raise ValueError ( 'entries of word2index do not match counts (did you include UNK?)' ) vocab . _counts = counts return vocab
|
Create Vocab from an existing string to integer dictionary .
|
62,030 |
def to_file ( self , f ) : for word in self . _index2word : count = self . _counts [ word ] f . write ( u'{}\t{}\n' . format ( word , count ) . encode ( 'utf-8' ) )
|
Write vocab to a file .
|
62,031 |
def backfill_unk_emb ( self , E , filled_words ) : unk_emb = E [ self [ self . _unk ] ] for i , word in enumerate ( self ) : if word not in filled_words : E [ i ] = unk_emb
|
Backfills an embedding matrix with the embedding for the unknown token .
|
62,032 |
def best_gpu ( max_usage = USAGE_THRESHOLD , verbose = False ) : try : proc = subprocess . Popen ( "nvidia-smi" , stdout = subprocess . PIPE , stderr = subprocess . PIPE ) output , error = proc . communicate ( ) if error : raise Exception ( error ) except Exception , e : sys . stderr . write ( "Couldn't run nvidia-smi to find best GPU, using CPU: %s\n" % str ( e ) ) sys . stderr . write ( "(This is normal if you have no GPU or haven't configured CUDA.)\n" ) return "cpu" usages = parse_output ( output ) pct_usage = [ max ( u . mem , cpu_backoff ( u ) ) for u in usages ] max_usage = min ( max_usage , min ( pct_usage ) ) open_gpus = [ index for index , usage in enumerate ( usages ) if max ( usage . mem , cpu_backoff ( usage ) ) <= max_usage ] if verbose : print ( 'Best GPUs:' ) for index in open_gpus : print ( '%d: %s fan, %s mem, %s cpu' % ( index , format_percent ( usages [ index ] . fan ) , format_percent ( usages [ index ] . mem ) , format_percent ( usages [ index ] . cpu ) ) ) if open_gpus : result = "gpu" + str ( random . choice ( open_gpus ) ) else : result = "cpu" if verbose : print ( 'Chosen: ' + result ) return result
|
Return the name of a device to use either cpu or gpu0 gpu1 ... The least - used GPU with usage under the constant threshold will be chosen ; ties are broken randomly .
|
62,033 |
def evaluate ( learner , eval_data , metrics , metric_names = None , split_id = None , write_data = False ) : if metric_names is None : metric_names = [ ( metric . __name__ if hasattr ( metric , '__name__' ) else ( 'm%d' % i ) ) for i , metric in enumerate ( metrics ) ] split_prefix = split_id + '.' if split_id else '' if write_data : config . dump ( [ inst . __dict__ for inst in eval_data ] , 'data.%sjsons' % split_prefix , default = json_default , lines = True ) results = { split_prefix + 'num_params' : learner . num_params } predictions , scores = learner . predict_and_score ( eval_data ) config . dump ( predictions , 'predictions.%sjsons' % split_prefix , lines = True ) config . dump ( scores , 'scores.%sjsons' % split_prefix , lines = True ) for metric , metric_name in zip ( metrics , metric_names ) : prefix = split_prefix + ( metric_name + '.' if metric_name else '' ) inst_outputs = metric ( eval_data , predictions , scores , learner ) if metric_name in [ 'data' , 'predictions' , 'scores' ] : warnings . warn ( 'not outputting metric scores for metric "%s" because it would shadow ' 'another results file' ) else : config . dump ( inst_outputs , '%s.%sjsons' % ( metric_name , split_prefix ) , lines = True ) mean = np . mean ( inst_outputs ) gmean = np . exp ( np . log ( inst_outputs ) . mean ( ) ) sum = np . sum ( inst_outputs ) std = np . std ( inst_outputs ) results . update ( { prefix + 'mean' : mean , prefix + 'gmean' : gmean , prefix + 'sum' : sum , prefix + 'std' : std , } ) config . dump_pretty ( results , 'results.%sjson' % split_prefix ) return results
|
Evaluate learner on the instances in eval_data according to each metric in metric and return a dictionary summarizing the values of the metrics .
|
62,034 |
def json2pb ( pb , js , useFieldNumber = False ) : for field in pb . DESCRIPTOR . fields : if useFieldNumber : key = field . number else : key = field . name if key not in js : continue if field . type == FD . TYPE_MESSAGE : pass elif field . type in _js2ftype : ftype = _js2ftype [ field . type ] else : raise ParseError ( "Field %s.%s of type '%d' is not supported" % ( pb . __class__ . __name__ , field . name , field . type , ) ) value = js [ key ] if field . label == FD . LABEL_REPEATED : pb_value = getattr ( pb , field . name , None ) for v in value : if field . type == FD . TYPE_MESSAGE : json2pb ( pb_value . add ( ) , v , useFieldNumber = useFieldNumber ) else : pb_value . append ( ftype ( v ) ) else : if field . type == FD . TYPE_MESSAGE : json2pb ( getattr ( pb , field . name , None ) , value , useFieldNumber = useFieldNumber ) else : setattr ( pb , field . name , ftype ( value ) ) return pb
|
convert JSON string to google . protobuf . descriptor instance
|
62,035 |
def annotate_json ( self , text , annotators = None ) : doc = self . annotate ( text , annotators ) return doc . json
|
Return a JSON dict from the CoreNLP server containing annotations of the text .
|
62,036 |
def annotate_proto ( self , text , annotators = None ) : properties = { 'annotators' : ',' . join ( annotators or self . default_annotators ) , 'outputFormat' : 'serialized' , 'serializer' : 'edu.stanford.nlp.pipeline.ProtobufAnnotationSerializer' } r = self . _request ( text , properties ) buffer = r . content size , pos = _DecodeVarint ( buffer , 0 ) buffer = buffer [ pos : ( pos + size ) ] doc = CoreNLP_pb2 . Document ( ) doc . ParseFromString ( buffer ) return doc
|
Return a Document protocol buffer from the CoreNLP server containing annotations of the text .
|
62,037 |
def annotate ( self , text , annotators = None ) : doc_pb = self . annotate_proto ( text , annotators ) return AnnotatedDocument . from_pb ( doc_pb )
|
Return an AnnotatedDocument from the CoreNLP server .
|
62,038 |
def from_pb ( cls , pb ) : obj = cls . _from_pb ( pb ) obj . _pb = pb return obj
|
Instantiate the object from a protocol buffer .
|
62,039 |
def character_span ( self ) : begin , end = self . token_span return ( self . sentence [ begin ] . character_span [ 0 ] , self . sentence [ end - 1 ] . character_span [ - 1 ] )
|
Returns the character span of the token
|
62,040 |
def log_proto ( self , proto , step_num ) : self . summ_writer . add_summary ( proto , step_num ) return proto
|
Log a Summary protobuf to the event file .
|
62,041 |
def log ( self , key , val , step_num ) : try : ph , summ = self . summaries [ key ] except KeyError : with self . g . as_default ( ) : ph = tf . placeholder ( tf . float32 , ( ) , name = key ) summ = tf . scalar_summary ( key , ph ) self . summaries [ key ] = ( ph , summ ) summary_str = self . sess . run ( summ , { ph : val } ) self . summ_writer . add_summary ( summary_str , step_num ) return val
|
Directly log a scalar value to the event file .
|
62,042 |
def read_events ( stream ) : header_size = struct . calcsize ( '<QI' ) len_size = struct . calcsize ( '<Q' ) footer_size = struct . calcsize ( '<I' ) while True : header = stream . read ( header_size ) if len ( header ) == 0 : break elif len ( header ) < header_size : raise SummaryReaderException ( 'unexpected EOF (expected a %d-byte header, ' 'got %d bytes)' % ( header_size , len ( header ) ) ) data_len , len_crc = struct . unpack ( '<QI' , header ) len_crc_actual = masked_crc ( header [ : len_size ] ) if len_crc_actual != len_crc : raise SummaryReaderException ( 'incorrect length CRC (%d != %d)' % ( len_crc_actual , len_crc ) ) data = stream . read ( data_len ) if len ( data ) < data_len : raise SummaryReaderException ( 'unexpected EOF (expected %d bytes, got %d)' % ( data_len , len ( data ) ) ) yield Event . FromString ( data ) footer = stream . read ( footer_size ) if len ( footer ) < footer_size : raise SummaryReaderException ( 'unexpected EOF (expected a %d-byte footer, ' 'got %d bytes)' % ( footer_size , len ( footer ) ) ) data_crc , = struct . unpack ( '<I' , footer ) data_crc_actual = masked_crc ( data ) if data_crc_actual != data_crc : raise SummaryReaderException ( 'incorrect data CRC (%d != %d)' % ( data_crc_actual , data_crc ) )
|
Read and return as a generator a sequence of Event protos from file - like object stream .
|
62,043 |
def write_events ( stream , events ) : for event in events : data = event . SerializeToString ( ) len_field = struct . pack ( '<Q' , len ( data ) ) len_crc = struct . pack ( '<I' , masked_crc ( len_field ) ) data_crc = struct . pack ( '<I' , masked_crc ( data ) ) stream . write ( len_field ) stream . write ( len_crc ) stream . write ( data ) stream . write ( data_crc )
|
Write a sequence of Event protos to file - like object stream .
|
62,044 |
def log_image ( self , step , tag , val ) : if len ( val . shape ) != 3 : raise ValueError ( '`log_image` value should be a 3-D tensor, instead got shape %s' % ( val . shape , ) ) if val . shape [ 2 ] != 3 : raise ValueError ( 'Last dimension of `log_image` value should be 3 (RGB), ' 'instead got shape %s' % ( val . shape , ) ) fakefile = StringIO ( ) png . Writer ( size = ( val . shape [ 1 ] , val . shape [ 0 ] ) ) . write ( fakefile , val . reshape ( val . shape [ 0 ] , val . shape [ 1 ] * val . shape [ 2 ] ) ) encoded = fakefile . getvalue ( ) RGB = 3 image = Summary . Image ( height = val . shape [ 0 ] , width = val . shape [ 1 ] , colorspace = RGB , encoded_image_string = encoded ) summary = Summary ( value = [ Summary . Value ( tag = tag , image = image ) ] ) self . _add_event ( step , summary )
|
Write an image event .
|
62,045 |
def log_scalar ( self , step , tag , val ) : summary = Summary ( value = [ Summary . Value ( tag = tag , simple_value = float ( np . float32 ( val ) ) ) ] ) self . _add_event ( step , summary )
|
Write a scalar event .
|
62,046 |
def log_histogram ( self , step , tag , val ) : hist = Histogram ( ) hist . add ( val ) summary = Summary ( value = [ Summary . Value ( tag = tag , histo = hist . encode_to_proto ( ) ) ] ) self . _add_event ( step , summary )
|
Write a histogram event .
|
62,047 |
def options ( allow_partial = False , read = False ) : global _options if allow_partial : opts , extras = _options_parser . parse_known_args ( ) if opts . run_dir : mkdirp ( opts . run_dir ) return opts if _options is None : _options_parser . add_argument ( '-h' , '--help' , action = 'help' , default = argparse . SUPPRESS , help = 'show this help message and exit' ) _options = _options_parser . parse_args ( ) if _options . run_dir : mkdirp ( _options . run_dir , overwrite = _options . overwrite or read ) if not read : options_dump = vars ( _options ) del options_dump [ 'overwrite' ] del options_dump [ 'config' ] dump_pretty ( options_dump , 'config.json' ) return _options
|
Get the object containing the values of the parsed command line options .
|
62,048 |
def inner_products ( self , vec ) : products = self . array . dot ( vec ) return self . _word_to_score ( np . arange ( len ( products ) ) , products )
|
Get the inner product of a vector with every embedding .
|
62,049 |
def _word_to_score ( self , ids , scores ) : assert len ( ids . shape ) == 1 assert ids . shape == scores . shape w2s = { } for i in range ( len ( ids ) ) : w2s [ self . vocab . index2word ( ids [ i ] ) ] = scores [ i ] return w2s
|
Return a map from each word to its score .
|
62,050 |
def _init_lsh_forest ( self ) : import sklearn . neighbors lshf = sklearn . neighbors . LSHForest ( ) lshf . fit ( self . array ) return lshf
|
Construct an LSH forest for nearest neighbor search .
|
62,051 |
def to_dict ( self ) : d = { } for word , idx in self . vocab . iteritems ( ) : d [ word ] = self . array [ idx ] . tolist ( ) return d
|
Convert to dictionary .
|
62,052 |
def to_files ( self , array_file , vocab_file ) : logging . info ( 'Writing array...' ) np . save ( array_file , self . array ) logging . info ( 'Writing vocab...' ) self . vocab . to_file ( vocab_file )
|
Write the embedding matrix and the vocab to files .
|
62,053 |
def from_files ( cls , array_file , vocab_file ) : logging . info ( 'Loading array...' ) array = np . load ( array_file ) logging . info ( 'Loading vocab...' ) vocab = Vocab . from_file ( vocab_file ) return cls ( array , vocab )
|
Load the embedding matrix and the vocab from files .
|
62,054 |
def get_uuids ( ) : result = shell ( 'cl ls -w {} -u' . format ( worksheet ) ) uuids = result . split ( '\n' ) uuids = uuids [ 1 : - 1 ] return uuids
|
List all bundle UUIDs in the worksheet .
|
62,055 |
def open_file ( uuid , path ) : f = tempfile . NamedTemporaryFile ( ) f . close ( ) fname = f . name cmd = 'cl down -o {} -w {} {}/{}' . format ( fname , worksheet , uuid , path ) try : shell ( cmd ) except RuntimeError : try : os . remove ( fname ) except OSError : pass raise IOError ( 'Failed to open file {}/{}' . format ( uuid , path ) ) f = open ( fname ) yield f f . close ( ) os . remove ( fname )
|
Get the raw file content within a particular bundle at a particular path .
|
62,056 |
def load_img ( self , img_path ) : with open_file ( self . uuid , img_path ) as f : return mpimg . imread ( f )
|
Return an image object that can be immediately plotted with matplotlib
|
62,057 |
def output_results ( results , split_id = 'results' , output_stream = None ) : if output_stream is None : output_stream = sys . stdout output_stream . write ( '----- %s -----\n' % split_id ) for name in sorted ( results . keys ( ) ) : output_stream . write ( '%s: %s\n' % ( name , repr ( results [ name ] ) ) ) output_stream . flush ( )
|
Log results readably to output_stream with a header containing split_id .
|
62,058 |
def labels_to_onehots ( labels , num_classes ) : batch_size = labels . get_shape ( ) . as_list ( ) [ 0 ] with tf . name_scope ( "one_hot" ) : labels = tf . expand_dims ( labels , 1 ) indices = tf . expand_dims ( tf . range ( 0 , batch_size , 1 ) , 1 ) sparse_ptrs = tf . concat ( 1 , [ indices , labels ] , name = "ptrs" ) onehots = tf . sparse_to_dense ( sparse_ptrs , [ batch_size , num_classes ] , 1.0 , 0.0 ) return onehots
|
Convert a vector of integer class labels to a matrix of one - hot target vectors .
|
62,059 |
def start_task ( self , name , size ) : if len ( self . task_stack ) == 0 : self . start_time = datetime . datetime . now ( ) self . task_stack . append ( Task ( name , size , 0 ) )
|
Add a task to the stack . If for example name is Iteration and size is 10 progress on that task will be shown as
|
62,060 |
def progress ( self , p ) : self . task_stack [ - 1 ] = self . task_stack [ - 1 ] . _replace ( progress = p ) self . progress_report ( )
|
Update the current progress on the task at the top of the stack .
|
62,061 |
def end_task ( self ) : self . progress ( self . task_stack [ - 1 ] . size ) self . task_stack . pop ( )
|
Remove the current task from the stack .
|
62,062 |
def progress_report ( self , force = False ) : now = datetime . datetime . now ( ) if ( len ( self . task_stack ) > 1 or self . task_stack [ 0 ] > 0 ) and now - self . last_report < self . resolution and not force : return stack_printout = ', ' . join ( '%s %s of %s' % ( t . name , t . progress , t . size ) for t in self . task_stack ) frac_done = self . fraction_done ( ) if frac_done == 0.0 : now_str = now . strftime ( '%c' ) eta_str = 'unknown on %s' % now_str else : elapsed = ( now - self . start_time ) estimated_length = elapsed . total_seconds ( ) / frac_done eta = self . start_time + datetime . timedelta ( seconds = estimated_length ) eta_str = eta . strftime ( '%c' ) print '%s (~%d%% done, ETA %s)' % ( stack_printout , round ( frac_done * 100.0 ) , eta_str ) self . last_report = datetime . datetime . now ( )
|
Print the current progress .
|
62,063 |
def write_conll ( self , fname ) : if 'label' not in self . fields : raise InvalidFieldsException ( "dataset is not in CONLL format: missing label field" ) def instance_to_conll ( inst ) : tab = [ v for k , v in inst . items ( ) if k != 'label' ] return '{}\n{}' . format ( inst [ 'label' ] , '\n' . join ( [ '\t' . join ( [ '-' if e is None else str ( e ) for e in row ] ) for row in zip ( * tab ) ] ) ) with open ( fname , 'wb' ) as f : f . write ( '# {}' . format ( '\t' . join ( [ k for k in self . fields if k != 'label' ] ) ) ) for i , d in enumerate ( self ) : f . write ( '\n{}' . format ( instance_to_conll ( d ) ) ) if i != len ( self ) - 1 : f . write ( '\n' )
|
Serializes the dataset in CONLL format to fname
|
62,064 |
def convert ( self , converters , in_place = False ) : dataset = self if in_place else self . __class__ ( OrderedDict ( [ ( name , data [ : ] ) for name , data in self . fields . items ( ) ] ) ) for name , convert in converters . items ( ) : if name not in self . fields . keys ( ) : raise InvalidFieldsException ( 'Converter specified for non-existent field {}' . format ( name ) ) for i , d in enumerate ( dataset . fields [ name ] ) : dataset . fields [ name ] [ i ] = convert ( d ) return dataset
|
Applies transformations to the dataset .
|
62,065 |
def shuffle ( self ) : order = range ( len ( self ) ) random . shuffle ( order ) for name , data in self . fields . items ( ) : reindexed = [ ] for _ , i in enumerate ( order ) : reindexed . append ( data [ i ] ) self . fields [ name ] = reindexed return self
|
Re - indexes the dataset in random order
|
62,066 |
def pad ( cls , sequences , padding , pad_len = None ) : max_len = max ( [ len ( s ) for s in sequences ] ) pad_len = pad_len or max_len assert pad_len >= max_len , 'pad_len {} must be greater or equal to the longest sequence {}' . format ( pad_len , max_len ) for i , s in enumerate ( sequences ) : sequences [ i ] = [ padding ] * ( pad_len - len ( s ) ) + s return np . array ( sequences )
|
Pads a list of sequences such that they form a matrix .
|
62,067 |
def bleu ( eval_data , predictions , scores = 'ignored' , learner = 'ignored' ) : ref_groups = ( [ inst . output . split ( ) ] if isinstance ( inst . output , basestring ) else [ _maybe_tokenize ( r ) for r in inst . output ] for inst in eval_data ) return [ corpus_bleu ( ref_groups , [ p . split ( ) for p in predictions ] ) ]
|
Return corpus - level BLEU score of predictions using the output field of the instances in eval_data as references . This is returned as a length - 1 list of floats .
|
62,068 |
def squared_error ( eval_data , predictions , scores = 'ignored' , learner = 'ignored' ) : return [ np . sum ( ( np . array ( pred ) - np . array ( inst . output ) ) ** 2 ) for inst , pred in zip ( eval_data , predictions ) ]
|
Return the squared error of each prediction in predictions with respect to the correct output in eval_data .
|
62,069 |
def encrypt_variable ( variable , build_repo , * , tld = '.org' , public_key = None , travis_token = None , ** login_kwargs ) : if not isinstance ( variable , bytes ) : raise TypeError ( "variable should be bytes" ) if not b"=" in variable : raise ValueError ( "variable should be of the form 'VARIABLE=value'" ) if not public_key : _headers = { 'Content-Type' : 'application/json' , 'User-Agent' : 'MyClient/1.0.0' , } headersv2 = { ** _headers , ** Travis_APIv2 } headersv3 = { ** _headers , ** Travis_APIv3 } if travis_token : headersv3 [ 'Authorization' ] = 'token {}' . format ( travis_token ) res = requests . get ( 'https://api.travis-ci.com/repo/{build_repo}/key_pair/generated' . format ( build_repo = urllib . parse . quote ( build_repo , safe = '' ) ) , headers = headersv3 ) if res . json ( ) . get ( 'file' ) == 'not found' : raise RuntimeError ( "Could not find the Travis public key for %s" % build_repo ) public_key = res . json ( ) [ 'public_key' ] else : res = requests . get ( 'https://api.travis-ci{tld}/repos/{build_repo}/key' . format ( build_repo = build_repo , tld = tld ) , headers = headersv2 ) public_key = res . json ( ) [ 'key' ] if res . status_code == requests . codes . not_found : raise RuntimeError ( 'Could not find requested repo on Travis. Is Travis enabled?' ) res . raise_for_status ( ) public_key = public_key . replace ( "RSA PUBLIC KEY" , "PUBLIC KEY" ) . encode ( 'utf-8' ) key = serialization . load_pem_public_key ( public_key , backend = default_backend ( ) ) pad = padding . PKCS1v15 ( ) return base64 . b64encode ( key . encrypt ( variable , pad ) )
|
Encrypt an environment variable for build_repo for Travis
|
62,070 |
def encrypt_to_file ( contents , filename ) : if not filename . endswith ( '.enc' ) : raise ValueError ( "%s does not end with .enc" % filename ) key = Fernet . generate_key ( ) fer = Fernet ( key ) encrypted_file = fer . encrypt ( contents ) with open ( filename , 'wb' ) as f : f . write ( encrypted_file ) return key
|
Encrypts contents and writes it to filename .
|
62,071 |
def GitHub_login ( * , username = None , password = None , OTP = None , headers = None ) : if not username : username = input ( "What is your GitHub username? " ) if not password : password = getpass ( "Enter the GitHub password for {username}: " . format ( username = username ) ) headers = headers or { } if OTP : headers [ 'X-GitHub-OTP' ] = OTP auth = HTTPBasicAuth ( username , password ) r = requests . get ( 'https://api.github.com/' , auth = auth , headers = headers ) if r . status_code == 401 : two_factor = r . headers . get ( 'X-GitHub-OTP' ) if two_factor : if OTP : print ( red ( "Invalid authentication code" ) ) auth_header = base64 . urlsafe_b64encode ( bytes ( username + ':' + password , 'utf8' ) ) . decode ( ) login_kwargs = { 'auth' : None , 'headers' : { 'Authorization' : 'Basic {}' . format ( auth_header ) } } try : generate_GitHub_token ( ** login_kwargs ) except ( requests . exceptions . HTTPError , GitHubError ) : pass print ( "A two-factor authentication code is required:" , two_factor . split ( ';' ) [ 1 ] . strip ( ) ) OTP = input ( "Authentication code: " ) return GitHub_login ( username = username , password = password , OTP = OTP , headers = headers ) raise AuthenticationFailed ( "invalid username or password" ) GitHub_raise_for_status ( r ) return { 'auth' : auth , 'headers' : headers }
|
Login to GitHub .
|
62,072 |
def GitHub_post ( data , url , * , auth , headers ) : r = requests . post ( url , auth = auth , headers = headers , data = json . dumps ( data ) ) GitHub_raise_for_status ( r ) return r . json ( )
|
POST the data data to GitHub .
|
62,073 |
def get_travis_token ( * , GitHub_token = None , ** login_kwargs ) : _headers = { 'Content-Type' : 'application/json' , 'User-Agent' : 'MyClient/1.0.0' , } headersv2 = { ** _headers , ** Travis_APIv2 } token_id = None try : if not GitHub_token : print ( green ( "I need to generate a temporary token with GitHub to authenticate with Travis. You may get a warning email from GitHub about this." ) ) print ( green ( "It will be deleted immediately. If you still see it after this at https://github.com/settings/tokens after please delete it manually." ) ) tok_dict = generate_GitHub_token ( scopes = [ "read:org" , "user:email" , "repo" ] , note = "temporary token for doctr to auth against travis (delete me)" , ** login_kwargs ) GitHub_token = tok_dict [ 'token' ] token_id = tok_dict [ 'id' ] data = { 'github_token' : GitHub_token } res = requests . post ( 'https://api.travis-ci.com/auth/github' , data = json . dumps ( data ) , headers = headersv2 ) return res . json ( ) [ 'access_token' ] finally : if token_id : delete_GitHub_token ( token_id , ** login_kwargs )
|
Generate a temporary token for authenticating with Travis
|
62,074 |
def generate_GitHub_token ( * , note = "Doctr token for pushing to gh-pages from Travis" , scopes = None , ** login_kwargs ) : if scopes is None : scopes = [ 'public_repo' ] AUTH_URL = "https://api.github.com/authorizations" data = { "scopes" : scopes , "note" : note , "note_url" : "https://github.com/drdoctr/doctr" , "fingerprint" : str ( uuid . uuid4 ( ) ) , } return GitHub_post ( data , AUTH_URL , ** login_kwargs )
|
Generate a GitHub token for pushing from Travis
|
62,075 |
def delete_GitHub_token ( token_id , * , auth , headers ) : r = requests . delete ( 'https://api.github.com/authorizations/{id}' . format ( id = token_id ) , auth = auth , headers = headers ) GitHub_raise_for_status ( r )
|
Delete a temporary GitHub token
|
62,076 |
def upload_GitHub_deploy_key ( deploy_repo , ssh_key , * , read_only = False , title = "Doctr deploy key for pushing to gh-pages from Travis" , ** login_kwargs ) : DEPLOY_KEY_URL = "https://api.github.com/repos/{deploy_repo}/keys" . format ( deploy_repo = deploy_repo ) data = { "title" : title , "key" : ssh_key , "read_only" : read_only , } return GitHub_post ( data , DEPLOY_KEY_URL , ** login_kwargs )
|
Uploads a GitHub deploy key to deploy_repo .
|
62,077 |
def generate_ssh_key ( ) : key = rsa . generate_private_key ( backend = default_backend ( ) , public_exponent = 65537 , key_size = 4096 ) private_key = key . private_bytes ( serialization . Encoding . PEM , serialization . PrivateFormat . PKCS8 , serialization . NoEncryption ( ) ) public_key = key . public_key ( ) . public_bytes ( serialization . Encoding . OpenSSH , serialization . PublicFormat . OpenSSH ) return private_key , public_key
|
Generates an SSH deploy public and private key .
|
62,078 |
def guess_github_repo ( ) : p = subprocess . run ( [ 'git' , 'ls-remote' , '--get-url' , 'origin' ] , stdout = subprocess . PIPE , stderr = subprocess . PIPE , check = False ) if p . stderr or p . returncode : return False url = p . stdout . decode ( 'utf-8' ) . strip ( ) m = GIT_URL . fullmatch ( url ) if not m : return False return m . group ( 1 )
|
Guesses the github repo for the current directory
|
62,079 |
def get_config ( ) : p = Path ( '.travis.yml' ) if not p . exists ( ) : return { } with p . open ( ) as f : travis_config = yaml . safe_load ( f . read ( ) ) config = travis_config . get ( 'doctr' , { } ) if not isinstance ( config , dict ) : raise ValueError ( 'config is not a dict: {}' . format ( config ) ) return config
|
This load some configuration from the . travis . yml if file is present doctr key if present .
|
62,080 |
def decrypt_file ( file , key ) : if not file . endswith ( '.enc' ) : raise ValueError ( "%s does not end with .enc" % file ) fer = Fernet ( key ) with open ( file , 'rb' ) as f : decrypted_file = fer . decrypt ( f . read ( ) ) with open ( file [ : - 4 ] , 'wb' ) as f : f . write ( decrypted_file ) os . chmod ( file [ : - 4 ] , 0o600 )
|
Decrypts the file file .
|
62,081 |
def setup_deploy_key ( keypath = 'github_deploy_key' , key_ext = '.enc' , env_name = 'DOCTR_DEPLOY_ENCRYPTION_KEY' ) : key = os . environ . get ( env_name , os . environ . get ( "DOCTR_DEPLOY_ENCRYPTION_KEY" , None ) ) if not key : raise RuntimeError ( "{env_name} or DOCTR_DEPLOY_ENCRYPTION_KEY environment variable is not set. Make sure you followed the instructions from 'doctr configure' properly. You may need to re-run 'doctr configure' to fix this error." . format ( env_name = env_name ) ) if ( not os . path . isfile ( keypath + key_ext ) and os . path . isfile ( 'github_deploy_key' + key_ext ) ) : keypath = 'github_deploy_key' key_filename = os . path . basename ( keypath ) key = key . encode ( 'utf-8' ) decrypt_file ( keypath + key_ext , key ) key_path = os . path . expanduser ( "~/.ssh/" + key_filename ) os . makedirs ( os . path . expanduser ( "~/.ssh" ) , exist_ok = True ) os . rename ( keypath , key_path ) with open ( os . path . expanduser ( "~/.ssh/config" ) , 'a' ) as f : f . write ( "Host github.com" ' IdentityFile "%s"' " LogLevel ERROR\n" % key_path ) agent_info = subprocess . check_output ( [ 'ssh-agent' , '-s' ] ) agent_info = agent_info . decode ( 'utf-8' ) agent_info = agent_info . split ( ) AUTH_SOCK = agent_info [ 0 ] . split ( '=' ) [ 1 ] [ : - 1 ] AGENT_PID = agent_info [ 3 ] . split ( '=' ) [ 1 ] [ : - 1 ] os . putenv ( 'SSH_AUTH_SOCK' , AUTH_SOCK ) os . putenv ( 'SSH_AGENT_PID' , AGENT_PID ) run ( [ 'ssh-add' , os . path . expanduser ( '~/.ssh/' + key_filename ) ] )
|
Decrypts the deploy key and configures it with ssh
|
62,082 |
def get_token ( ) : token = os . environ . get ( "GH_TOKEN" , None ) if not token : token = "GH_TOKEN environment variable not set" token = token . encode ( 'utf-8' ) return token
|
Get the encrypted GitHub token in Travis .
|
62,083 |
def run ( args , shell = False , exit = True ) : if "GH_TOKEN" in os . environ : token = get_token ( ) else : token = b'' if not shell : command = ' ' . join ( map ( shlex . quote , args ) ) else : command = args command = command . replace ( token . decode ( 'utf-8' ) , '~' * len ( token ) ) print ( blue ( command ) ) sys . stdout . flush ( ) returncode = run_command_hiding_token ( args , token , shell = shell ) if exit and returncode != 0 : sys . exit ( red ( "%s failed: %s" % ( command , returncode ) ) ) return returncode
|
Run the command args .
|
62,084 |
def get_current_repo ( ) : remote_url = subprocess . check_output ( [ 'git' , 'config' , '--get' , 'remote.origin.url' ] ) . decode ( 'utf-8' ) _ , org , git_repo = remote_url . rsplit ( '.git' , 1 ) [ 0 ] . rsplit ( '/' , 2 ) return ( org + '/' + git_repo )
|
Get the GitHub repo name for the current directory .
|
62,085 |
def get_travis_branch ( ) : if os . environ . get ( "TRAVIS_PULL_REQUEST" , "" ) == "true" : return os . environ . get ( "TRAVIS_PULL_REQUEST_BRANCH" , "" ) else : return os . environ . get ( "TRAVIS_BRANCH" , "" )
|
Get the name of the branch that the PR is from .
|
62,086 |
def set_git_user_email ( ) : username = subprocess . run ( shlex . split ( 'git config user.name' ) , stdout = subprocess . PIPE ) . stdout . strip ( ) . decode ( 'utf-8' ) if not username or username == "Travis CI User" : run ( [ 'git' , 'config' , '--global' , 'user.name' , "Doctr (Travis CI)" ] ) else : print ( "Not setting git user name, as it's already set to %r" % username ) email = subprocess . run ( shlex . split ( 'git config user.email' ) , stdout = subprocess . PIPE ) . stdout . strip ( ) . decode ( 'utf-8' ) if not email or email == "[email protected]" : run ( [ 'git' , 'config' , '--global' , 'user.email' , '[email protected]' ] ) else : print ( "Not setting git user email, as it's already set to %r" % email )
|
Set global user and email for git user if not already present on system
|
62,087 |
def checkout_deploy_branch ( deploy_branch , canpush = True ) : create_deploy_branch ( deploy_branch , push = canpush ) remote_branch = "doctr_remote/{}" . format ( deploy_branch ) print ( "Checking out doctr working branch tracking" , remote_branch ) clear_working_branch ( ) if run ( [ 'git' , 'rev-parse' , '--verify' , remote_branch ] , exit = False ) == 0 : extra_args = [ '--track' , remote_branch ] else : extra_args = [ ] run ( [ 'git' , 'checkout' , '-b' , DOCTR_WORKING_BRANCH ] + extra_args ) print ( "Done" ) return canpush
|
Checkout the deploy branch creating it if it doesn t exist .
|
62,088 |
def deploy_branch_exists ( deploy_branch ) : remote_name = 'doctr_remote' branch_names = subprocess . check_output ( [ 'git' , 'branch' , '-r' ] ) . decode ( 'utf-8' ) . split ( ) return '{}/{}' . format ( remote_name , deploy_branch ) in branch_names
|
Check if there is a remote branch with name specified in deploy_branch .
|
62,089 |
def create_deploy_branch ( deploy_branch , push = True ) : if not deploy_branch_exists ( deploy_branch ) : print ( "Creating {} branch on doctr_remote" . format ( deploy_branch ) ) clear_working_branch ( ) run ( [ 'git' , 'checkout' , '--orphan' , DOCTR_WORKING_BRANCH ] ) run ( [ 'git' , 'rm' , '-rf' , '.' ] ) print ( "Adding .nojekyll file to working branch" ) run ( [ 'touch' , '.nojekyll' ] ) run ( [ 'git' , 'add' , '.nojekyll' ] ) run ( [ 'git' , 'commit' , '-m' , 'Create new {} branch with .nojekyll' . format ( deploy_branch ) ] ) if push : print ( "Pushing working branch to remote {} branch" . format ( deploy_branch ) ) run ( [ 'git' , 'push' , '-u' , 'doctr_remote' , '{}:{}' . format ( DOCTR_WORKING_BRANCH , deploy_branch ) ] ) run ( [ 'git' , 'checkout' , 'master' ] ) run ( [ 'git' , 'branch' , '-D' , DOCTR_WORKING_BRANCH ] ) run ( [ 'git' , 'fetch' , 'doctr_remote' ] ) return True return False
|
If there is no remote branch with name specified in deploy_branch create one .
|
62,090 |
def find_sphinx_build_dir ( ) : build = glob . glob ( '**/*build/html' , recursive = True ) if not build : raise RuntimeError ( "Could not find Sphinx build directory automatically" ) build_folder = build [ 0 ] return build_folder
|
Find build subfolder within sphinx docs directory .
|
62,091 |
def copy_to_tmp ( source ) : tmp_dir = tempfile . mkdtemp ( ) p = pathlib . Path ( source ) dirname = p . name or 'temp' new_dir = os . path . join ( tmp_dir , dirname ) if os . path . isdir ( source ) : shutil . copytree ( source , new_dir ) else : shutil . copy2 ( source , new_dir ) return new_dir
|
Copies source to a temporary directory and returns the copied location .
|
62,092 |
def is_subdir ( a , b ) : a , b = map ( os . path . abspath , [ a , b ] ) return os . path . commonpath ( [ a , b ] ) == b
|
Return true if a is a subdirectory of b
|
62,093 |
def sync_from_log ( src , dst , log_file , exclude = ( ) ) : from os . path import join , exists , isdir exclude = [ os . path . normpath ( i ) for i in exclude ] added , removed = [ ] , [ ] if not exists ( log_file ) : print ( "%s doesn't exist. Not removing any files." % log_file ) else : with open ( log_file ) as f : files = f . read ( ) . strip ( ) . split ( '\n' ) for new_f in files : new_f = new_f . strip ( ) if any ( is_subdir ( new_f , os . path . join ( dst , i ) ) for i in exclude ) : pass elif exists ( new_f ) : os . remove ( new_f ) removed . append ( new_f ) else : print ( "Warning: File %s doesn't exist." % new_f , file = sys . stderr ) if os . path . isdir ( src ) : if not src . endswith ( os . sep ) : src += os . sep files = glob . iglob ( join ( src , '**' ) , recursive = True ) else : files = [ src ] src = os . path . dirname ( src ) + os . sep if os . sep in src else '' os . makedirs ( dst , exist_ok = True ) for f in sorted ( files ) : if any ( is_subdir ( f , os . path . join ( src , i ) ) for i in exclude ) : continue new_f = join ( dst , f [ len ( src ) : ] ) if isdir ( f ) or f . endswith ( os . sep ) : os . makedirs ( new_f , exist_ok = True ) else : shutil . copy2 ( f , new_f ) added . append ( new_f ) if new_f in removed : removed . remove ( new_f ) with open ( log_file , 'w' ) as f : f . write ( '\n' . join ( added ) ) added . append ( log_file ) return added , removed
|
Sync the files in src to dst .
|
62,094 |
def push_docs ( deploy_branch = 'gh-pages' , retries = 5 ) : code = 1 while code and retries : print ( "Pulling" ) code = run ( [ 'git' , 'pull' , '-s' , 'recursive' , '-X' , 'ours' , 'doctr_remote' , deploy_branch ] , exit = False ) print ( "Pushing commit" ) code = run ( [ 'git' , 'push' , '-q' , 'doctr_remote' , '{}:{}' . format ( DOCTR_WORKING_BRANCH , deploy_branch ) ] , exit = False ) if code : retries -= 1 print ( "Push failed, retrying" ) time . sleep ( 1 ) else : return sys . exit ( "Giving up..." )
|
Push the changes to the branch named deploy_branch .
|
62,095 |
def clean_path ( p ) : p = os . path . expanduser ( p ) p = os . path . expandvars ( p ) p = os . path . abspath ( p ) return p
|
Clean a path by expanding user and environment variables and ensuring absolute path .
|
62,096 |
def load_file_template ( path ) : template = StringIO ( ) if not os . path . exists ( path ) : raise ValueError ( "path does not exist: %s" % path ) with open ( clean_path ( path ) , "rb" ) as infile : for line in infile : template . write ( line . decode ( "utf-8" ) ) return template
|
Load template from the specified filesystem path .
|
62,097 |
def load_package_template ( license , header = False ) : content = StringIO ( ) filename = 'template-%s-header.txt' if header else 'template-%s.txt' with resource_stream ( __name__ , filename % license ) as licfile : for line in licfile : content . write ( line . decode ( "utf-8" ) ) return content
|
Load license template distributed with package .
|
62,098 |
def extract_vars ( template ) : keys = set ( ) for match in re . finditer ( r"\{\{ (?P<key>\w+) \}\}" , template . getvalue ( ) ) : keys . add ( match . groups ( ) [ 0 ] ) return sorted ( list ( keys ) )
|
Extract variables from template . Variables are enclosed in double curly braces .
|
62,099 |
def generate_license ( template , context ) : out = StringIO ( ) content = template . getvalue ( ) for key in extract_vars ( template ) : if key not in context : raise ValueError ( "%s is missing from the template context" % key ) content = content . replace ( "{{ %s }}" % key , context [ key ] ) template . close ( ) out . write ( content ) return out
|
Generate a license by extracting variables from the template and replacing them with the corresponding values in the given context .
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.