idx
int64 0
63k
| question
stringlengths 61
4.03k
| target
stringlengths 6
1.23k
|
---|---|---|
2,300 |
def plot_boolean ( self , on , boolean_col , plot_col = None , boolean_label = None , boolean_value_map = { } , order = None , ax = None , alternative = "two-sided" , ** kwargs ) : cols , df = self . as_dataframe ( on , return_cols = True , ** kwargs ) plot_col = self . plot_col_from_cols ( cols = cols , plot_col = plot_col ) df = filter_not_null ( df , boolean_col ) df = filter_not_null ( df , plot_col ) if boolean_label : df [ boolean_label ] = df [ boolean_col ] boolean_col = boolean_label condition_value = None if boolean_value_map : assert set ( boolean_value_map . keys ( ) ) == set ( [ True , False ] ) , "Improper mapping of boolean column provided" df [ boolean_col ] = df [ boolean_col ] . map ( lambda v : boolean_value_map [ v ] ) condition_value = boolean_value_map [ True ] if df [ plot_col ] . dtype == "bool" : results = fishers_exact_plot ( data = df , condition1 = boolean_col , condition2 = plot_col , condition1_value = condition_value , alternative = alternative , order = order , ax = ax ) else : results = mann_whitney_plot ( data = df , condition = boolean_col , distribution = plot_col , condition_value = condition_value , alternative = alternative , order = order , ax = ax ) return results
|
Plot a comparison of boolean_col in the cohort on a given variable via on or col .
|
2,301 |
def plot_correlation ( self , on , x_col = None , plot_type = "jointplot" , stat_func = pearsonr , show_stat_func = True , plot_kwargs = { } , ** kwargs ) : if plot_type not in [ "boxplot" , "barplot" , "jointplot" , "regplot" ] : raise ValueError ( "Invalid plot_type %s" % plot_type ) plot_cols , df = self . as_dataframe ( on , return_cols = True , ** kwargs ) if len ( plot_cols ) != 2 : raise ValueError ( "Must be comparing two columns, but there are %d columns" % len ( plot_cols ) ) for plot_col in plot_cols : df = filter_not_null ( df , plot_col ) if x_col is None : x_col = plot_cols [ 0 ] y_col = plot_cols [ 1 ] else : if x_col == plot_cols [ 0 ] : y_col = plot_cols [ 1 ] else : y_col = plot_cols [ 0 ] series_x = df [ x_col ] series_y = df [ y_col ] coeff , p_value = stat_func ( series_x , series_y ) if plot_type == "jointplot" : plot = sb . jointplot ( data = df , x = x_col , y = y_col , stat_func = stat_func if show_stat_func else None , ** plot_kwargs ) elif plot_type == "regplot" : plot = sb . regplot ( data = df , x = x_col , y = y_col , ** plot_kwargs ) elif plot_type == "boxplot" : plot = stripboxplot ( data = df , x = x_col , y = y_col , ** plot_kwargs ) else : plot = sb . barplot ( data = df , x = x_col , y = y_col , ** plot_kwargs ) return CorrelationResults ( coeff = coeff , p_value = p_value , stat_func = stat_func , series_x = series_x , series_y = series_y , plot = plot )
|
Plot the correlation between two variables .
|
2,302 |
def _list_patient_ids ( self ) : results = [ ] for patient in self : results . append ( patient . id ) return ( results )
|
Utility function to return a list of patient ids in the Cohort
|
2,303 |
def summarize_provenance_per_cache ( self ) : provenance_summary = { } df = self . as_dataframe ( ) for cache in self . cache_names : cache_name = self . cache_names [ cache ] cache_provenance = None num_discrepant = 0 this_cache_dir = path . join ( self . cache_dir , cache_name ) if path . exists ( this_cache_dir ) : for patient_id in self . _list_patient_ids ( ) : patient_cache_dir = path . join ( this_cache_dir , patient_id ) try : this_provenance = self . load_provenance ( patient_cache_dir = patient_cache_dir ) except : this_provenance = None if this_provenance : if not ( cache_provenance ) : cache_provenance = this_provenance else : num_discrepant += compare_provenance ( this_provenance , cache_provenance ) if num_discrepant == 0 : provenance_summary [ cache_name ] = cache_provenance else : provenance_summary [ cache_name ] = None return ( provenance_summary )
|
Utility function to summarize provenance files for cached items used by a Cohort for each cache_dir that exists . Only existing cache_dirs are summarized .
|
2,304 |
def summarize_provenance ( self ) : provenance_per_cache = self . summarize_provenance_per_cache ( ) summary_provenance = None num_discrepant = 0 for cache in provenance_per_cache : if not ( summary_provenance ) : summary_provenance = provenance_per_cache [ cache ] summary_provenance_name = cache num_discrepant += compare_provenance ( provenance_per_cache [ cache ] , summary_provenance , left_outer_diff = "In %s but not in %s" % ( cache , summary_provenance_name ) , right_outer_diff = "In %s but not in %s" % ( summary_provenance_name , cache ) ) if num_discrepant == 0 : prov = summary_provenance else : prov = provenance_per_cache return ( prov )
|
Utility function to summarize provenance files for cached items used by a Cohort .
|
2,305 |
def summarize_data_sources ( self ) : provenance_file_summary = self . summarize_provenance ( ) dataframe_hash = self . summarize_dataframe ( ) results = { "provenance_file_summary" : provenance_file_summary , "dataframe_hash" : dataframe_hash } return ( results )
|
Utility function to summarize data source status for this Cohort useful for confirming the state of data used for an analysis
|
2,306 |
def strelka_somatic_variant_stats ( variant , variant_metadata ) : sample_info = variant_metadata [ "sample_info" ] assert len ( sample_info ) == 2 , "More than two samples found in the somatic VCF" tumor_stats = _strelka_variant_stats ( variant , sample_info [ "TUMOR" ] ) normal_stats = _strelka_variant_stats ( variant , sample_info [ "NORMAL" ] ) return SomaticVariantStats ( tumor_stats = tumor_stats , normal_stats = normal_stats )
|
Parse out the variant calling statistics for a given variant from a Strelka VCF
|
2,307 |
def _strelka_variant_stats ( variant , sample_info ) : if variant . is_deletion or variant . is_insertion : ref_depth = int ( sample_info [ 'TAR' ] [ 0 ] ) alt_depth = int ( sample_info [ 'TIR' ] [ 0 ] ) depth = ref_depth + alt_depth else : ref_depth = int ( sample_info [ variant . ref + "U" ] [ 0 ] ) alt_depth = int ( sample_info [ variant . alt + "U" ] [ 0 ] ) depth = alt_depth + ref_depth if depth > 0 : vaf = float ( alt_depth ) / depth else : vaf = None return VariantStats ( depth = depth , alt_depth = alt_depth , variant_allele_frequency = vaf )
|
Parse a single sample s variant calling statistics based on Strelka VCF output
|
2,308 |
def mutect_somatic_variant_stats ( variant , variant_metadata ) : sample_info = variant_metadata [ "sample_info" ] assert len ( sample_info ) == 2 , "More than two samples found in the somatic VCF" tumor_sample_infos = [ info for info in sample_info . values ( ) if info [ "GT" ] == "0/1" ] assert len ( tumor_sample_infos ) == 1 , "More than one tumor sample found in the VCF file" tumor_sample_info = tumor_sample_infos [ 0 ] normal_sample_info = [ info for info in sample_info . values ( ) if info [ "GT" ] != "0/1" ] [ 0 ] tumor_stats = _mutect_variant_stats ( variant , tumor_sample_info ) normal_stats = _mutect_variant_stats ( variant , normal_sample_info ) return SomaticVariantStats ( tumor_stats = tumor_stats , normal_stats = normal_stats )
|
Parse out the variant calling statistics for a given variant from a Mutect VCF
|
2,309 |
def maf_somatic_variant_stats ( variant , variant_metadata ) : tumor_stats = None normal_stats = None if "t_ref_count" in variant_metadata : tumor_stats = _maf_variant_stats ( variant , variant_metadata , prefix = "t" ) if "n_ref_count" in variant_metadata : normal_stats = _maf_variant_stats ( variant , variant_metadata , prefix = "n" ) return SomaticVariantStats ( tumor_stats = tumor_stats , normal_stats = normal_stats )
|
Parse out the variant calling statistics for a given variant from a MAF file
|
2,310 |
def _vcf_is_strelka ( variant_file , variant_metadata ) : if "strelka" in variant_file . lower ( ) : return True elif "NORMAL" in variant_metadata [ "sample_info" ] . keys ( ) : return True else : vcf_reader = vcf . Reader ( open ( variant_file , "r" ) ) try : vcf_type = vcf_reader . metadata [ "content" ] except KeyError : vcf_type = "" if "strelka" in vcf_type . lower ( ) : return True return False
|
Return True if variant_file given is in strelka format
|
2,311 |
def variant_stats_from_variant ( variant , metadata , merge_fn = ( lambda all_stats : max ( all_stats , key = ( lambda stats : stats . tumor_stats . depth ) ) ) ) : all_stats = [ ] for ( variant_file , variant_metadata ) in metadata . items ( ) : if _vcf_is_maf ( variant_file = variant_file ) : stats = maf_somatic_variant_stats ( variant , variant_metadata ) elif _vcf_is_strelka ( variant_file = variant_file , variant_metadata = variant_metadata ) : stats = strelka_somatic_variant_stats ( variant , variant_metadata ) elif _vcf_is_mutect ( variant_file = variant_file , variant_metadata = variant_metadata ) : stats = mutect_somatic_variant_stats ( variant , variant_metadata ) else : raise ValueError ( "Cannot parse sample fields, variant file {} is from an unsupported caller." . format ( variant_file ) ) all_stats . append ( stats ) return merge_fn ( all_stats )
|
Parse the variant calling stats from a variant called from multiple variant files . The stats are merged based on merge_fn
|
2,312 |
def load_ensembl_coverage ( cohort , coverage_path , min_tumor_depth , min_normal_depth = 0 , pageant_dir_fn = None ) : if pageant_dir_fn is None : pageant_dir_fn = lambda patient : patient . id columns_both = [ "depth1" , "depth2" , "onBP1" , "onBP2" , "numOnLoci" , "fracBPOn1" , "fracBPOn2" , "fracLociOn" , "offBP1" , "offBP2" , "numOffLoci" , "fracBPOff1" , "fracBPOff2" , "fracLociOff" , ] columns_single = [ "depth" , "onBP" , "numOnLoci" , "fracBPOn" , "fracLociOn" , "offBP" , "numOffLoci" , "fracBPOff" , "fracLociOff" ] if min_normal_depth < 0 : raise ValueError ( "min_normal_depth must be >= 0" ) use_tumor_only = ( min_normal_depth == 0 ) columns = columns_single if use_tumor_only else columns_both ensembl_loci_dfs = [ ] for patient in cohort : patient_ensembl_loci_df = pd . read_csv ( path . join ( coverage_path , pageant_dir_fn ( patient ) , "cdf.csv" ) , names = columns , header = 1 ) if use_tumor_only : depth_mask = ( patient_ensembl_loci_df . depth == min_tumor_depth ) else : depth_mask = ( ( patient_ensembl_loci_df . depth1 == min_normal_depth ) & ( patient_ensembl_loci_df . depth2 == min_tumor_depth ) ) patient_ensembl_loci_df = patient_ensembl_loci_df [ depth_mask ] assert len ( patient_ensembl_loci_df ) == 1 , ( "Incorrect number of tumor={}, normal={} depth loci results: {} for patient {}" . format ( min_tumor_depth , min_normal_depth , len ( patient_ensembl_loci_df ) , patient ) ) patient_ensembl_loci_df [ "patient_id" ] = patient . id ensembl_loci_dfs . append ( patient_ensembl_loci_df ) ensembl_loci_df = pd . concat ( ensembl_loci_dfs ) ensembl_loci_df [ "MB" ] = ensembl_loci_df . numOnLoci / 1000000.0 return ensembl_loci_df [ [ "patient_id" , "numOnLoci" , "MB" ] ]
|
Load in Pageant CoverageDepth results with Ensembl loci .
|
2,313 |
def vertical_percent ( plot , percent = 0.1 ) : plot_bottom , plot_top = plot . get_ylim ( ) return percent * ( plot_top - plot_bottom )
|
Using the size of the y axis return a fraction of that size .
|
2,314 |
def add_significance_indicator ( plot , col_a = 0 , col_b = 1 , significant = False ) : plot_bottom , plot_top = plot . get_ylim ( ) line_height = vertical_percent ( plot , 0.1 ) plot_top = plot_top + line_height plot . set_ylim ( top = plot_top + line_height * 2 ) color = "black" line_top = plot_top + line_height plot . plot ( [ col_a , col_a , col_b , col_b ] , [ plot_top , line_top , line_top , plot_top ] , lw = 1.5 , color = color ) indicator = "*" if significant else "ns" plot . text ( ( col_a + col_b ) * 0.5 , line_top , indicator , ha = "center" , va = "bottom" , color = color )
|
Add a p - value significance indicator .
|
2,315 |
def stripboxplot ( x , y , data , ax = None , significant = None , ** kwargs ) : ax = sb . boxplot ( x = x , y = y , data = data , ax = ax , fliersize = 0 , ** kwargs ) plot = sb . stripplot ( x = x , y = y , data = data , ax = ax , jitter = kwargs . pop ( "jitter" , 0.05 ) , color = kwargs . pop ( "color" , "0.3" ) , ** kwargs ) if data [ y ] . min ( ) >= 0 : hide_negative_y_ticks ( plot ) if significant is not None : add_significance_indicator ( plot = plot , significant = significant ) return plot
|
Overlay a stripplot on top of a boxplot .
|
2,316 |
def fishers_exact_plot ( data , condition1 , condition2 , ax = None , condition1_value = None , alternative = "two-sided" , ** kwargs ) : plot = sb . barplot ( x = condition1 , y = condition2 , ax = ax , data = data , ** kwargs ) plot . set_ylabel ( "Percent %s" % condition2 ) condition1_mask = get_condition_mask ( data , condition1 , condition1_value ) count_table = pd . crosstab ( data [ condition1 ] , data [ condition2 ] ) print ( count_table ) oddsratio , p_value = fisher_exact ( count_table , alternative = alternative ) add_significance_indicator ( plot = plot , significant = p_value <= 0.05 ) only_percentage_ticks ( plot ) if alternative != "two-sided" : raise ValueError ( "We need to better understand the one-sided Fisher's Exact test" ) sided_str = "two-sided" print ( "Fisher's Exact Test: OR: {}, p-value={} ({})" . format ( oddsratio , p_value , sided_str ) ) return FishersExactResults ( oddsratio = oddsratio , p_value = p_value , sided_str = sided_str , with_condition1_series = data [ condition1_mask ] [ condition2 ] , without_condition1_series = data [ ~ condition1_mask ] [ condition2 ] , plot = plot )
|
Perform a Fisher s exact test to compare to binary columns
|
2,317 |
def mann_whitney_plot ( data , condition , distribution , ax = None , condition_value = None , alternative = "two-sided" , skip_plot = False , ** kwargs ) : condition_mask = get_condition_mask ( data , condition , condition_value ) U , p_value = mannwhitneyu ( data [ condition_mask ] [ distribution ] , data [ ~ condition_mask ] [ distribution ] , alternative = alternative ) plot = None if not skip_plot : plot = stripboxplot ( x = condition , y = distribution , data = data , ax = ax , significant = p_value <= 0.05 , ** kwargs ) sided_str = sided_str_from_alternative ( alternative , condition ) print ( "Mann-Whitney test: U={}, p-value={} ({})" . format ( U , p_value , sided_str ) ) return MannWhitneyResults ( U = U , p_value = p_value , sided_str = sided_str , with_condition_series = data [ condition_mask ] [ distribution ] , without_condition_series = data [ ~ condition_mask ] [ distribution ] , plot = plot )
|
Create a box plot comparing a condition and perform a Mann Whitney test to compare the distribution in condition A v B
|
2,318 |
def roc_curve_plot ( data , value_column , outcome_column , bootstrap_samples = 100 , ax = None ) : scores = bootstrap_auc ( df = data , col = value_column , pred_col = outcome_column , n_bootstrap = bootstrap_samples ) mean_bootstrap_auc = scores . mean ( ) print ( "{}, Bootstrap (samples = {}) AUC:{}, std={}" . format ( value_column , bootstrap_samples , mean_bootstrap_auc , scores . std ( ) ) ) outcome = data [ outcome_column ] . astype ( int ) values = data [ value_column ] fpr , tpr , thresholds = roc_curve ( outcome , values ) if ax is None : ax = plt . gca ( ) roc_plot = ax . plot ( fpr , tpr , lw = 1 , label = value_column ) ax . set_xlim ( [ - 0.05 , 1.05 ] ) ax . set_ylim ( [ - 0.05 , 1.05 ] ) ax . set_xlabel ( 'False Positive Rate' ) ax . set_ylabel ( 'True Positive Rate' ) ax . legend ( loc = 2 , borderaxespad = 0. ) ax . set_title ( '{} ROC Curve (n={})' . format ( value_column , len ( values ) ) ) return ( mean_bootstrap_auc , roc_plot )
|
Create a ROC curve and compute the bootstrap AUC for the given variable and outcome
|
2,319 |
def _strip_column_name ( col_name , keep_paren_contents = True ) : new_col_name = col_name punctuation_to_text = { '<=' : 'le' , '>=' : 'ge' , '=<' : 'le' , '=>' : 'ge' , '<' : 'lt' , '>' : 'gt' , '#' : 'num' } for punctuation , punctuation_text in punctuation_to_text . items ( ) : new_col_name = new_col_name . replace ( punctuation , punctuation_text ) if not ( keep_paren_contents ) : new_col_name = re . sub ( '\([^)]*\)' , '' , new_col_name ) punct_pattern = '[\W_]+' punct_replacement = '_' new_col_name = re . sub ( punct_pattern , punct_replacement , new_col_name ) new_col_name = new_col_name . strip ( "_" ) return new_col_name . lower ( )
|
Utility script applying several regexs to a string . Intended to be used by strip_column_names .
|
2,320 |
def strip_column_names ( cols , keep_paren_contents = True ) : new_cols = [ _strip_column_name ( col , keep_paren_contents = keep_paren_contents ) for col in cols ] if len ( new_cols ) != len ( set ( new_cols ) ) : warn_str = 'Warning: strip_column_names (if run) would introduce duplicate names.' warn_str += ' Reverting column names to the original.' warnings . warn ( warn_str , Warning ) print ( 'Warning: strip_column_names would introduce duplicate names. Please fix & try again.' ) return dict ( zip ( cols , cols ) ) return dict ( zip ( cols , new_cols ) )
|
Utility script for renaming pandas columns to patsy - friendly names .
|
2,321 |
def set_attributes ( obj , additional_data ) : for key , value in additional_data . items ( ) : if hasattr ( obj , key ) : raise ValueError ( "Key %s in additional_data already exists in this object" % key ) setattr ( obj , _strip_column_name ( key ) , value )
|
Given an object and a dictionary give the object new attributes from that dictionary .
|
2,322 |
def return_obj ( cols , df , return_cols = False ) : df_holder = DataFrameHolder ( cols = cols , df = df ) return df_holder . return_self ( return_cols = return_cols )
|
Construct a DataFrameHolder and then return either that or the DataFrame .
|
2,323 |
def compare_provenance ( this_provenance , other_provenance , left_outer_diff = "In current but not comparison" , right_outer_diff = "In comparison but not current" ) : if ( not this_provenance or not other_provenance ) : return 0 this_items = set ( this_provenance . items ( ) ) other_items = set ( other_provenance . items ( ) ) new_diff = this_items . difference ( other_items ) old_diff = other_items . difference ( this_items ) warn_str = "" if len ( new_diff ) > 0 : warn_str += "%s: %s" % ( left_outer_diff , _provenance_str ( new_diff ) ) if len ( old_diff ) > 0 : warn_str += "%s: %s" % ( right_outer_diff , _provenance_str ( old_diff ) ) if len ( warn_str ) > 0 : warnings . warn ( warn_str , Warning ) return ( len ( new_diff ) + len ( old_diff ) )
|
Utility function to compare two abritrary provenance dicts returns number of discrepancies .
|
2,324 |
def generate_random_missense_variants ( num_variants = 10 , max_search = 100000 , reference = "GRCh37" ) : variants = [ ] for i in range ( max_search ) : bases = [ "A" , "C" , "T" , "G" ] random_ref = choice ( bases ) bases . remove ( random_ref ) random_alt = choice ( bases ) random_contig = choice ( [ "1" , "2" , "3" , "4" , "5" ] ) random_variant = Variant ( contig = random_contig , start = randint ( 1 , 1000000 ) , ref = random_ref , alt = random_alt , ensembl = reference ) try : effects = random_variant . effects ( ) for effect in effects : if isinstance ( effect , Substitution ) : variants . append ( random_variant ) break except : continue if len ( variants ) == num_variants : break return VariantCollection ( variants )
|
Generate a random collection of missense variants by trying random variants repeatedly .
|
2,325 |
def generate_simple_vcf ( filename , variant_collection ) : contigs = [ ] positions = [ ] refs = [ ] alts = [ ] for variant in variant_collection : contigs . append ( "chr" + variant . contig ) positions . append ( variant . start ) refs . append ( variant . ref ) alts . append ( variant . alt ) df = pd . DataFrame ( ) df [ "contig" ] = contigs df [ "position" ] = positions df [ "id" ] = [ "." ] * len ( variant_collection ) df [ "ref" ] = refs df [ "alt" ] = alts df [ "qual" ] = [ "." ] * len ( variant_collection ) df [ "filter" ] = [ "." ] * len ( variant_collection ) df [ "info" ] = [ "." ] * len ( variant_collection ) df [ "format" ] = [ "GT:AD:DP" ] * len ( variant_collection ) normal_ref_depths = [ randint ( 1 , 10 ) for v in variant_collection ] normal_alt_depths = [ randint ( 1 , 10 ) for v in variant_collection ] df [ "n1" ] = [ "0:%d,%d:%d" % ( normal_ref_depths [ i ] , normal_alt_depths [ i ] , normal_ref_depths [ i ] + normal_alt_depths [ i ] ) for i in range ( len ( variant_collection ) ) ] tumor_ref_depths = [ randint ( 1 , 10 ) for v in variant_collection ] tumor_alt_depths = [ randint ( 1 , 10 ) for v in variant_collection ] df [ "t1" ] = [ "0/1:%d,%d:%d" % ( tumor_ref_depths [ i ] , tumor_alt_depths [ i ] , tumor_ref_depths [ i ] + tumor_alt_depths [ i ] ) for i in range ( len ( variant_collection ) ) ] with open ( filename , "w" ) as f : f . write ( "##fileformat=VCFv4.1\n" ) f . write ( "##reference=file:///projects/ngs/resources/gatk/2.3/ucsc.hg19.fasta\n" ) with open ( filename , "a" ) as f : df . to_csv ( f , sep = "\t" , index = None , header = None )
|
Output a very simple metadata - free VCF for each variant in a variant_collection .
|
2,326 |
def list_folder ( self , path ) : try : folder_contents = [ ] for f in os . listdir ( path ) : attr = paramiko . SFTPAttributes . from_stat ( os . stat ( os . path . join ( path , f ) ) ) attr . filename = f folder_contents . append ( attr ) return folder_contents except OSError as e : return SFTPServer . convert_errno ( e . errno )
|
Looks up folder contents of path .
|
2,327 |
def filter_variants ( variant_collection , patient , filter_fn , ** kwargs ) : if filter_fn : return variant_collection . clone_with_new_elements ( [ variant for variant in variant_collection if filter_fn ( FilterableVariant ( variant = variant , variant_collection = variant_collection , patient = patient , ) , ** kwargs ) ] ) else : return variant_collection
|
Filter variants from the Variant Collection
|
2,328 |
def filter_effects ( effect_collection , variant_collection , patient , filter_fn , all_effects , ** kwargs ) : def top_priority_maybe ( effect_collection ) : if all_effects : return effect_collection return EffectCollection ( list ( effect_collection . top_priority_effect_per_variant ( ) . values ( ) ) ) def apply_filter_fn ( filter_fn , effect ) : applied = filter_fn ( FilterableEffect ( effect = effect , variant_collection = variant_collection , patient = patient ) , ** kwargs ) if hasattr ( effect , "alternate_effect" ) : applied_alternate = filter_fn ( FilterableEffect ( effect = effect . alternate_effect , variant_collection = variant_collection , patient = patient ) , ** kwargs ) return applied or applied_alternate return applied if filter_fn : return top_priority_maybe ( EffectCollection ( [ effect for effect in effect_collection if apply_filter_fn ( filter_fn , effect ) ] ) ) else : return top_priority_maybe ( effect_collection )
|
Filter variants from the Effect Collection
|
2,329 |
def count_lines_in ( filename ) : "Count lines in a file" f = open ( filename ) lines = 0 buf_size = 1024 * 1024 read_f = f . read buf = read_f ( buf_size ) while buf : lines += buf . count ( '\n' ) buf = read_f ( buf_size ) return lines
|
Count lines in a file
|
2,330 |
def view_name_from ( path ) : "Resolve a path to the full python module name of the related view function" try : return CACHED_VIEWS [ path ] except KeyError : view = resolve ( path ) module = path name = '' if hasattr ( view . func , '__module__' ) : module = resolve ( path ) . func . __module__ if hasattr ( view . func , '__name__' ) : name = resolve ( path ) . func . __name__ view = "%s.%s" % ( module , name ) CACHED_VIEWS [ path ] = view return view
|
Resolve a path to the full python module name of the related view function
|
2,331 |
def generate_table_from ( data ) : "Output a nicely formatted ascii table" table = Texttable ( max_width = 120 ) table . add_row ( [ "view" , "method" , "status" , "count" , "minimum" , "maximum" , "mean" , "stdev" , "queries" , "querytime" ] ) table . set_cols_align ( [ "l" , "l" , "l" , "r" , "r" , "r" , "r" , "r" , "r" , "r" ] ) for item in sorted ( data ) : mean = round ( sum ( data [ item ] [ 'times' ] ) / data [ item ] [ 'count' ] , 3 ) mean_sql = round ( sum ( data [ item ] [ 'sql' ] ) / data [ item ] [ 'count' ] , 3 ) mean_sqltime = round ( sum ( data [ item ] [ 'sqltime' ] ) / data [ item ] [ 'count' ] , 3 ) sdsq = sum ( [ ( i - mean ) ** 2 for i in data [ item ] [ 'times' ] ] ) try : stdev = '%.2f' % ( ( sdsq / ( len ( data [ item ] [ 'times' ] ) - 1 ) ) ** .5 ) except ZeroDivisionError : stdev = '0.00' minimum = "%.2f" % min ( data [ item ] [ 'times' ] ) maximum = "%.2f" % max ( data [ item ] [ 'times' ] ) table . add_row ( [ data [ item ] [ 'view' ] , data [ item ] [ 'method' ] , data [ item ] [ 'status' ] , data [ item ] [ 'count' ] , minimum , maximum , '%.3f' % mean , stdev , mean_sql , mean_sqltime ] ) return table . draw ( )
|
Output a nicely formatted ascii table
|
2,332 |
def analyze_log_file ( logfile , pattern , reverse_paths = True , progress = True ) : "Given a log file and regex group and extract the performance data" if progress : lines = count_lines_in ( logfile ) pbar = ProgressBar ( widgets = [ Percentage ( ) , Bar ( ) ] , maxval = lines + 1 ) . start ( ) counter = 0 data = { } compiled_pattern = compile ( pattern ) for line in fileinput . input ( [ logfile ] ) : if progress : counter = counter + 1 parsed = compiled_pattern . findall ( line ) [ 0 ] date = parsed [ 0 ] method = parsed [ 1 ] path = parsed [ 2 ] status = parsed [ 3 ] time = parsed [ 4 ] sql = parsed [ 5 ] sqltime = parsed [ 6 ] try : ignore = False for ignored_path in IGNORE_PATHS : compiled_path = compile ( ignored_path ) if compiled_path . match ( path ) : ignore = True if not ignore : if reverse_paths : view = view_name_from ( path ) else : view = path key = "%s-%s-%s" % ( view , status , method ) try : data [ key ] [ 'count' ] = data [ key ] [ 'count' ] + 1 data [ key ] [ 'times' ] . append ( float ( time ) ) data [ key ] [ 'sql' ] . append ( int ( sql ) ) data [ key ] [ 'sqltime' ] . append ( float ( sqltime ) ) except KeyError : data [ key ] = { 'count' : 1 , 'status' : status , 'view' : view , 'method' : method , 'times' : [ float ( time ) ] , 'sql' : [ int ( sql ) ] , 'sqltime' : [ float ( sqltime ) ] , } except Resolver404 : pass if progress : pbar . update ( counter ) if progress : pbar . finish ( ) return data
|
Given a log file and regex group and extract the performance data
|
2,333 |
def to_string ( self , limit = None ) : header = self . short_string ( ) if len ( self ) == 0 : return header contents = "" element_lines = [ " -- %s" % ( element , ) for element in self . elements [ : limit ] ] contents = "\n" . join ( element_lines ) if limit is not None and len ( self . elements ) > limit : contents += "\n ... and %d more" % ( len ( self ) - limit ) return "%s\n%s" % ( header , contents )
|
Create a string representation of this collection showing up to limit items .
|
2,334 |
def safe_log_error ( self , error : Exception , * info : str ) : self . __do_safe ( lambda : self . logger . error ( error , * info ) )
|
Log error failing silently on error
|
2,335 |
def safe_log_info ( self , * info : str ) : self . __do_safe ( lambda : self . logger . info ( * info ) )
|
Log info failing silently on error
|
2,336 |
def _default_client ( jws_client , reactor , key , alg ) : if jws_client is None : pool = HTTPConnectionPool ( reactor ) agent = Agent ( reactor , pool = pool ) jws_client = JWSClient ( HTTPClient ( agent = agent ) , key , alg ) return jws_client
|
Make a client if we didn t get one .
|
2,337 |
def _find_supported_challenge ( authzr , responders ) : matches = [ ( responder , challbs [ 0 ] ) for challbs in authzr . body . resolved_combinations for responder in responders if [ challb . typ for challb in challbs ] == [ responder . challenge_type ] ] if len ( matches ) == 0 : raise NoSupportedChallenges ( authzr ) else : return matches [ 0 ]
|
Find a challenge combination that consists of a single challenge that the responder can satisfy .
|
2,338 |
def answer_challenge ( authzr , client , responders ) : responder , challb = _find_supported_challenge ( authzr , responders ) response = challb . response ( client . key ) def _stop_responding ( ) : return maybeDeferred ( responder . stop_responding , authzr . body . identifier . value , challb . chall , response ) return ( maybeDeferred ( responder . start_responding , authzr . body . identifier . value , challb . chall , response ) . addCallback ( lambda _ : client . answer_challenge ( challb , response ) ) . addCallback ( lambda _ : _stop_responding ) )
|
Complete an authorization using a responder .
|
2,339 |
def poll_until_valid ( authzr , clock , client , timeout = 300.0 ) : def repoll ( result ) : authzr , retry_after = result if authzr . body . status in { STATUS_PENDING , STATUS_PROCESSING } : return ( deferLater ( clock , retry_after , lambda : None ) . addCallback ( lambda _ : client . poll ( authzr ) ) . addCallback ( repoll ) ) if authzr . body . status != STATUS_VALID : raise AuthorizationFailed ( authzr ) return authzr def cancel_timeout ( result ) : if timeout_call . active ( ) : timeout_call . cancel ( ) return result d = client . poll ( authzr ) . addCallback ( repoll ) timeout_call = clock . callLater ( timeout , d . cancel ) d . addBoth ( cancel_timeout ) return d
|
Poll an authorization until it is in a state other than pending or processing .
|
2,340 |
def from_url ( cls , reactor , url , key , alg = RS256 , jws_client = None ) : action = LOG_ACME_CONSUME_DIRECTORY ( url = url , key_type = key . typ , alg = alg . name ) with action . context ( ) : check_directory_url_type ( url ) jws_client = _default_client ( jws_client , reactor , key , alg ) return ( DeferredContext ( jws_client . get ( url . asText ( ) ) ) . addCallback ( json_content ) . addCallback ( messages . Directory . from_json ) . addCallback ( tap ( lambda d : action . add_success_fields ( directory = d ) ) ) . addCallback ( cls , reactor , key , jws_client ) . addActionFinish ( ) )
|
Construct a client from an ACME directory at a given URL .
|
2,341 |
def register ( self , new_reg = None ) : if new_reg is None : new_reg = messages . NewRegistration ( ) action = LOG_ACME_REGISTER ( registration = new_reg ) with action . context ( ) : return ( DeferredContext ( self . update_registration ( new_reg , uri = self . directory [ new_reg ] ) ) . addErrback ( self . _maybe_registered , new_reg ) . addCallback ( tap ( lambda r : action . add_success_fields ( registration = r ) ) ) . addActionFinish ( ) )
|
Create a new registration with the ACME server .
|
2,342 |
def _maybe_registered ( self , failure , new_reg ) : failure . trap ( ServerError ) response = failure . value . response if response . code == http . CONFLICT : reg = new_reg . update ( resource = messages . UpdateRegistration . resource_type ) uri = self . _maybe_location ( response ) return self . update_registration ( reg , uri = uri ) return failure
|
If the registration already exists we should just load it .
|
2,343 |
def agree_to_tos ( self , regr ) : return self . update_registration ( regr . update ( body = regr . body . update ( agreement = regr . terms_of_service ) ) )
|
Accept the terms - of - service for a registration .
|
2,344 |
def update_registration ( self , regr , uri = None ) : if uri is None : uri = regr . uri if isinstance ( regr , messages . RegistrationResource ) : message = messages . UpdateRegistration ( ** dict ( regr . body ) ) else : message = regr action = LOG_ACME_UPDATE_REGISTRATION ( uri = uri , registration = message ) with action . context ( ) : return ( DeferredContext ( self . _client . post ( uri , message ) ) . addCallback ( self . _parse_regr_response , uri = uri ) . addCallback ( self . _check_regr , regr ) . addCallback ( tap ( lambda r : action . add_success_fields ( registration = r ) ) ) . addActionFinish ( ) )
|
Submit a registration to the server to update it .
|
2,345 |
def _parse_regr_response ( self , response , uri = None , new_authzr_uri = None , terms_of_service = None ) : links = _parse_header_links ( response ) if u'terms-of-service' in links : terms_of_service = links [ u'terms-of-service' ] [ u'url' ] if u'next' in links : new_authzr_uri = links [ u'next' ] [ u'url' ] if new_authzr_uri is None : raise errors . ClientError ( '"next" link missing' ) return ( response . json ( ) . addCallback ( lambda body : messages . RegistrationResource ( body = messages . Registration . from_json ( body ) , uri = self . _maybe_location ( response , uri = uri ) , new_authzr_uri = new_authzr_uri , terms_of_service = terms_of_service ) ) )
|
Parse a registration response from the server .
|
2,346 |
def _check_regr ( self , regr , new_reg ) : body = getattr ( new_reg , 'body' , new_reg ) for k , v in body . items ( ) : if k == 'resource' or not v : continue if regr . body [ k ] != v : raise errors . UnexpectedUpdate ( regr ) if regr . body . key != self . key . public_key ( ) : raise errors . UnexpectedUpdate ( regr ) return regr
|
Check that a registration response contains the registration we were expecting .
|
2,347 |
def request_challenges ( self , identifier ) : action = LOG_ACME_CREATE_AUTHORIZATION ( identifier = identifier ) with action . context ( ) : message = messages . NewAuthorization ( identifier = identifier ) return ( DeferredContext ( self . _client . post ( self . directory [ message ] , message ) ) . addCallback ( self . _expect_response , http . CREATED ) . addCallback ( self . _parse_authorization ) . addCallback ( self . _check_authorization , identifier ) . addCallback ( tap ( lambda a : action . add_success_fields ( authorization = a ) ) ) . addActionFinish ( ) )
|
Create a new authorization .
|
2,348 |
def _expect_response ( cls , response , code ) : if response . code != code : raise errors . ClientError ( 'Expected {!r} response but got {!r}' . format ( code , response . code ) ) return response
|
Ensure we got the expected response code .
|
2,349 |
def _parse_authorization ( cls , response , uri = None ) : links = _parse_header_links ( response ) try : new_cert_uri = links [ u'next' ] [ u'url' ] except KeyError : raise errors . ClientError ( '"next" link missing' ) return ( response . json ( ) . addCallback ( lambda body : messages . AuthorizationResource ( body = messages . Authorization . from_json ( body ) , uri = cls . _maybe_location ( response , uri = uri ) , new_cert_uri = new_cert_uri ) ) )
|
Parse an authorization resource .
|
2,350 |
def _check_authorization ( cls , authzr , identifier ) : if authzr . body . identifier != identifier : raise errors . UnexpectedUpdate ( authzr ) return authzr
|
Check that the authorization we got is the one we expected .
|
2,351 |
def answer_challenge ( self , challenge_body , response ) : action = LOG_ACME_ANSWER_CHALLENGE ( challenge_body = challenge_body , response = response ) with action . context ( ) : return ( DeferredContext ( self . _client . post ( challenge_body . uri , response ) ) . addCallback ( self . _parse_challenge ) . addCallback ( self . _check_challenge , challenge_body ) . addCallback ( tap ( lambda c : action . add_success_fields ( challenge_resource = c ) ) ) . addActionFinish ( ) )
|
Respond to an authorization challenge .
|
2,352 |
def _parse_challenge ( cls , response ) : links = _parse_header_links ( response ) try : authzr_uri = links [ 'up' ] [ 'url' ] except KeyError : raise errors . ClientError ( '"up" link missing' ) return ( response . json ( ) . addCallback ( lambda body : messages . ChallengeResource ( authzr_uri = authzr_uri , body = messages . ChallengeBody . from_json ( body ) ) ) )
|
Parse a challenge resource .
|
2,353 |
def _check_challenge ( cls , challenge , challenge_body ) : if challenge . uri != challenge_body . uri : raise errors . UnexpectedUpdate ( challenge . uri ) return challenge
|
Check that the challenge resource we got is the one we expected .
|
2,354 |
def retry_after ( cls , response , default = 5 , _now = time . time ) : val = response . headers . getRawHeaders ( b'retry-after' , [ default ] ) [ 0 ] try : return int ( val ) except ValueError : return http . stringToDatetime ( val ) - _now ( )
|
Parse the Retry - After value from a response .
|
2,355 |
def request_issuance ( self , csr ) : action = LOG_ACME_REQUEST_CERTIFICATE ( ) with action . context ( ) : return ( DeferredContext ( self . _client . post ( self . directory [ csr ] , csr , content_type = DER_CONTENT_TYPE , headers = Headers ( { b'Accept' : [ DER_CONTENT_TYPE ] } ) ) ) . addCallback ( self . _expect_response , http . CREATED ) . addCallback ( self . _parse_certificate ) . addActionFinish ( ) )
|
Request a certificate .
|
2,356 |
def _parse_certificate ( cls , response ) : links = _parse_header_links ( response ) try : cert_chain_uri = links [ u'up' ] [ u'url' ] except KeyError : cert_chain_uri = None return ( response . content ( ) . addCallback ( lambda body : messages . CertificateResource ( uri = cls . _maybe_location ( response ) , cert_chain_uri = cert_chain_uri , body = body ) ) )
|
Parse a response containing a certificate resource .
|
2,357 |
def fetch_chain ( self , certr , max_length = 10 ) : action = LOG_ACME_FETCH_CHAIN ( ) with action . context ( ) : if certr . cert_chain_uri is None : return succeed ( [ ] ) elif max_length < 1 : raise errors . ClientError ( 'chain too long' ) return ( DeferredContext ( self . _client . get ( certr . cert_chain_uri , content_type = DER_CONTENT_TYPE , headers = Headers ( { b'Accept' : [ DER_CONTENT_TYPE ] } ) ) ) . addCallback ( self . _parse_certificate ) . addCallback ( lambda issuer : self . fetch_chain ( issuer , max_length = max_length - 1 ) . addCallback ( lambda chain : [ issuer ] + chain ) ) . addActionFinish ( ) )
|
Fetch the intermediary chain for a certificate .
|
2,358 |
def _wrap_in_jws ( self , nonce , obj ) : with LOG_JWS_SIGN ( key_type = self . _key . typ , alg = self . _alg . name , nonce = nonce ) : jobj = obj . json_dumps ( ) . encode ( ) return ( JWS . sign ( payload = jobj , key = self . _key , alg = self . _alg , nonce = nonce ) . json_dumps ( ) . encode ( ) )
|
Wrap JSONDeSerializable object in JWS .
|
2,359 |
def _check_response ( cls , response , content_type = JSON_CONTENT_TYPE ) : def _got_failure ( f ) : f . trap ( ValueError ) return None def _got_json ( jobj ) : if 400 <= response . code < 600 : if response_ct == JSON_ERROR_CONTENT_TYPE and jobj is not None : raise ServerError ( messages . Error . from_json ( jobj ) , response ) else : raise errors . ClientError ( response ) elif response_ct != content_type : raise errors . ClientError ( 'Unexpected response Content-Type: {0!r}' . format ( response_ct ) ) elif content_type == JSON_CONTENT_TYPE and jobj is None : raise errors . ClientError ( response ) return response response_ct = response . headers . getRawHeaders ( b'Content-Type' , [ None ] ) [ 0 ] action = LOG_JWS_CHECK_RESPONSE ( expected_content_type = content_type , response_content_type = response_ct ) with action . context ( ) : return ( DeferredContext ( response . json ( ) ) . addErrback ( _got_failure ) . addCallback ( _got_json ) . addActionFinish ( ) )
|
Check response content and its type .
|
2,360 |
def head ( self , url , * args , ** kwargs ) : with LOG_JWS_HEAD ( ) . context ( ) : return DeferredContext ( self . _send_request ( u'HEAD' , url , * args , ** kwargs ) ) . addActionFinish ( )
|
Send HEAD request without checking the response .
|
2,361 |
def get ( self , url , content_type = JSON_CONTENT_TYPE , ** kwargs ) : with LOG_JWS_GET ( ) . context ( ) : return ( DeferredContext ( self . _send_request ( u'GET' , url , ** kwargs ) ) . addCallback ( self . _check_response , content_type = content_type ) . addActionFinish ( ) )
|
Send GET request and check response .
|
2,362 |
def _add_nonce ( self , response ) : nonce = response . headers . getRawHeaders ( REPLAY_NONCE_HEADER , [ None ] ) [ 0 ] with LOG_JWS_ADD_NONCE ( raw_nonce = nonce ) as action : if nonce is None : raise errors . MissingNonce ( response ) else : try : decoded_nonce = Header . _fields [ 'nonce' ] . decode ( nonce . decode ( 'ascii' ) ) action . add_success_fields ( nonce = decoded_nonce ) except DeserializationError as error : raise errors . BadNonce ( nonce , error ) self . _nonces . add ( decoded_nonce ) return response
|
Store a nonce from a response we received .
|
2,363 |
def _get_nonce ( self , url ) : action = LOG_JWS_GET_NONCE ( ) if len ( self . _nonces ) > 0 : with action : nonce = self . _nonces . pop ( ) action . add_success_fields ( nonce = nonce ) return succeed ( nonce ) else : with action . context ( ) : return ( DeferredContext ( self . head ( url ) ) . addCallback ( self . _add_nonce ) . addCallback ( lambda _ : self . _nonces . pop ( ) ) . addCallback ( tap ( lambda nonce : action . add_success_fields ( nonce = nonce ) ) ) . addActionFinish ( ) )
|
Get a nonce to use in a request removing it from the nonces on hand .
|
2,364 |
def _post ( self , url , obj , content_type , ** kwargs ) : with LOG_JWS_POST ( ) . context ( ) : headers = kwargs . setdefault ( 'headers' , Headers ( ) ) headers . setRawHeaders ( b'content-type' , [ JSON_CONTENT_TYPE ] ) return ( DeferredContext ( self . _get_nonce ( url ) ) . addCallback ( self . _wrap_in_jws , obj ) . addCallback ( lambda data : self . _send_request ( u'POST' , url , data = data , ** kwargs ) ) . addCallback ( self . _add_nonce ) . addCallback ( self . _check_response , content_type = content_type ) . addActionFinish ( ) )
|
POST an object and check the response .
|
2,365 |
def post ( self , url , obj , content_type = JSON_CONTENT_TYPE , ** kwargs ) : def retry_bad_nonce ( f ) : f . trap ( ServerError ) if f . value . message . typ . split ( ':' ) [ - 1 ] == 'badNonce' : self . _nonces . clear ( ) self . _add_nonce ( f . value . response ) return self . _post ( url , obj , content_type , ** kwargs ) return f return ( self . _post ( url , obj , content_type , ** kwargs ) . addErrback ( retry_bad_nonce ) )
|
POST an object and check the response . Retry once if a badNonce error is received .
|
2,366 |
def _daemon_thread ( * a , ** kw ) : thread = Thread ( * a , ** kw ) thread . daemon = True return thread
|
Create a threading . Thread but always set daemon .
|
2,367 |
def _defer_to_worker ( deliver , worker , work , * args , ** kwargs ) : deferred = Deferred ( ) def wrapped_work ( ) : try : result = work ( * args , ** kwargs ) except BaseException : f = Failure ( ) deliver ( lambda : deferred . errback ( f ) ) else : deliver ( lambda : deferred . callback ( result ) ) worker . do ( wrapped_work ) return deferred
|
Run a task in a worker delivering the result as a Deferred in the reactor thread .
|
2,368 |
def _split_zone ( server_name , zone_name ) : server_name = server_name . rstrip ( u'.' ) zone_name = zone_name . rstrip ( u'.' ) if not ( server_name == zone_name or server_name . endswith ( u'.' + zone_name ) ) : raise NotInZone ( server_name = server_name , zone_name = zone_name ) return server_name [ : - len ( zone_name ) ] . rstrip ( u'.' )
|
Split the zone portion off from a DNS label .
|
2,369 |
def _get_existing ( driver , zone_name , server_name , validation ) : if zone_name is None : zones = sorted ( ( z for z in driver . list_zones ( ) if server_name . rstrip ( u'.' ) . endswith ( u'.' + z . domain . rstrip ( u'.' ) ) ) , key = lambda z : len ( z . domain ) , reverse = True ) if len ( zones ) == 0 : raise NotInZone ( server_name = server_name , zone_name = None ) else : zones = [ z for z in driver . list_zones ( ) if z . domain == zone_name ] if len ( zones ) == 0 : raise ZoneNotFound ( zone_name = zone_name ) zone = zones [ 0 ] subdomain = _split_zone ( server_name , zone . domain ) existing = [ record for record in zone . list_records ( ) if record . name == subdomain and record . type == 'TXT' and record . data == validation ] return zone , existing , subdomain
|
Get existing validation records .
|
2,370 |
def _validation ( response ) : h = hashlib . sha256 ( response . key_authorization . encode ( "utf-8" ) ) return b64encode ( h . digest ( ) ) . decode ( )
|
Get the validation value for a challenge response .
|
2,371 |
def load_or_create_client_key ( pem_path ) : acme_key_file = pem_path . asTextMode ( ) . child ( u'client.key' ) if acme_key_file . exists ( ) : key = serialization . load_pem_private_key ( acme_key_file . getContent ( ) , password = None , backend = default_backend ( ) ) else : key = generate_private_key ( u'rsa' ) acme_key_file . setContent ( key . private_bytes ( encoding = serialization . Encoding . PEM , format = serialization . PrivateFormat . TraditionalOpenSSL , encryption_algorithm = serialization . NoEncryption ( ) ) ) return JWKRSA ( key = key )
|
Load the client key from a directory creating it if it does not exist .
|
2,372 |
def _parse ( reactor , directory , pemdir , * args , ** kwargs ) : def colon_join ( items ) : return ':' . join ( [ item . replace ( ':' , '\\:' ) for item in items ] ) sub = colon_join ( list ( args ) + [ '=' . join ( item ) for item in kwargs . items ( ) ] ) pem_path = FilePath ( pemdir ) . asTextMode ( ) acme_key = load_or_create_client_key ( pem_path ) return AutoTLSEndpoint ( reactor = reactor , directory = directory , client_creator = partial ( Client . from_url , key = acme_key , alg = RS256 ) , cert_store = DirectoryStore ( pem_path ) , cert_mapping = HostDirectoryMap ( pem_path ) , sub_endpoint = serverFromString ( reactor , sub ) )
|
Parse a txacme endpoint description .
|
2,373 |
def lazyread ( f , delimiter ) : try : running = f . read ( 0 ) except Exception as e : if e . __class__ . __name__ == 'IncompleteReadError' : running = b'' else : raise while True : new_data = f . read ( 1024 ) if not new_data : yield running return running += new_data while delimiter in running : curr , running = running . split ( delimiter , 1 ) yield curr + delimiter
|
Generator which continually reads f to the next instance of delimiter .
|
2,374 |
def generate_private_key ( key_type ) : if key_type == u'rsa' : return rsa . generate_private_key ( public_exponent = 65537 , key_size = 2048 , backend = default_backend ( ) ) raise ValueError ( key_type )
|
Generate a random private key using sensible parameters .
|
2,375 |
def tap ( f ) : @ wraps ( f ) def _cb ( res , * a , ** kw ) : d = maybeDeferred ( f , res , * a , ** kw ) d . addCallback ( lambda ignored : res ) return d return _cb
|
Tap a Deferred callback chain with a function whose return value is ignored .
|
2,376 |
def decode_csr ( b64der ) : try : return x509 . load_der_x509_csr ( decode_b64jose ( b64der ) , default_backend ( ) ) except ValueError as error : raise DeserializationError ( error )
|
Decode JOSE Base - 64 DER - encoded CSR .
|
2,377 |
def csr_for_names ( names , key ) : if len ( names ) == 0 : raise ValueError ( 'Must have at least one name' ) if len ( names [ 0 ] ) > 64 : common_name = u'san.too.long.invalid' else : common_name = names [ 0 ] return ( x509 . CertificateSigningRequestBuilder ( ) . subject_name ( x509 . Name ( [ x509 . NameAttribute ( NameOID . COMMON_NAME , common_name ) ] ) ) . add_extension ( x509 . SubjectAlternativeName ( list ( map ( x509 . DNSName , names ) ) ) , critical = False ) . sign ( key , hashes . SHA256 ( ) , default_backend ( ) ) )
|
Generate a certificate signing request for the given names and private key .
|
2,378 |
def _wrap_parse ( code , filename ) : code = 'async def wrapper():\n' + indent ( code , ' ' ) return ast . parse ( code , filename = filename ) . body [ 0 ] . body [ 0 ] . value
|
async wrapper is required to avoid await calls raising a SyntaxError
|
2,379 |
def layers_to_solr ( self , layers ) : layers_dict_list = [ ] layers_success_ids = [ ] layers_errors_ids = [ ] for layer in layers : layer_dict , message = layer2dict ( layer ) if not layer_dict : layers_errors_ids . append ( [ layer . id , message ] ) LOGGER . error ( message ) else : layers_dict_list . append ( layer_dict ) layers_success_ids . append ( layer . id ) layers_json = json . dumps ( layers_dict_list ) try : url_solr_update = '%s/solr/hypermap/update/json/docs' % SEARCH_URL headers = { "content-type" : "application/json" } params = { "commitWithin" : 1500 } requests . post ( url_solr_update , data = layers_json , params = params , headers = headers ) LOGGER . info ( 'Solr synced for the given layers' ) except Exception : message = "Error saving solr records: %s" % sys . exc_info ( ) [ 1 ] layers_errors_ids . append ( [ - 1 , message ] ) LOGGER . error ( message ) return False , layers_errors_ids return True , layers_errors_ids
|
Sync n layers in Solr .
|
2,380 |
def layer_to_solr ( self , layer ) : success = True message = 'Synced layer id %s to Solr' % layer . id layer_dict , message = layer2dict ( layer ) if not layer_dict : success = False else : layer_json = json . dumps ( layer_dict ) try : url_solr_update = '%s/solr/hypermap/update/json/docs' % SEARCH_URL headers = { "content-type" : "application/json" } params = { "commitWithin" : 1500 } res = requests . post ( url_solr_update , data = layer_json , params = params , headers = headers ) res = res . json ( ) if 'error' in res : success = False message = "Error syncing layer id %s to Solr: %s" % ( layer . id , res [ "error" ] . get ( "msg" ) ) except Exception , e : success = False message = "Error syncing layer id %s to Solr: %s" % ( layer . id , sys . exc_info ( ) [ 1 ] ) LOGGER . error ( e , exc_info = True ) if success : LOGGER . info ( message ) else : LOGGER . error ( message ) return success , message
|
Sync a layer in Solr .
|
2,381 |
def clear_solr ( self , catalog = "hypermap" ) : solr_url = "{0}/solr/{1}" . format ( SEARCH_URL , catalog ) solr = pysolr . Solr ( solr_url , timeout = 60 ) solr . delete ( q = '*:*' ) LOGGER . debug ( 'Solr core cleared' )
|
Clear all indexes in the solr core
|
2,382 |
def create_service_from_endpoint ( endpoint , service_type , title = None , abstract = None , catalog = None ) : from models import Service if Service . objects . filter ( url = endpoint , catalog = catalog ) . count ( ) == 0 : request = requests . get ( endpoint ) if request . status_code == 200 : LOGGER . debug ( 'Creating a %s service for endpoint=%s catalog=%s' % ( service_type , endpoint , catalog ) ) service = Service ( type = service_type , url = endpoint , title = title , abstract = abstract , csw_type = 'service' , catalog = catalog ) service . save ( ) return service else : LOGGER . warning ( 'This endpoint is invalid, status code is %s' % request . status_code ) else : LOGGER . warning ( 'A service for this endpoint %s in catalog %s already exists' % ( endpoint , catalog ) ) return None
|
Create a service from an endpoint if it does not already exists .
|
2,383 |
def service_url_parse ( url ) : endpoint = get_sanitized_endpoint ( url ) url_split_list = url . split ( endpoint + '/' ) if len ( url_split_list ) != 0 : url_split_list = url_split_list [ 1 ] . split ( '/' ) else : raise Exception ( 'Wrong url parsed' ) parsed_url = [ s for s in url_split_list if '?' not in s if 'Server' not in s ] return parsed_url
|
Function that parses from url the service and folder of services .
|
2,384 |
def inverse_mercator ( xy ) : lon = ( xy [ 0 ] / 20037508.34 ) * 180 lat = ( xy [ 1 ] / 20037508.34 ) * 180 lat = 180 / math . pi * ( 2 * math . atan ( math . exp ( lat * math . pi / 180 ) ) - math . pi / 2 ) return ( lon , lat )
|
Given coordinates in spherical mercator return a lon lat tuple .
|
2,385 |
def get_wms_version_negotiate ( url , timeout = 10 ) : try : LOGGER . debug ( 'Trying a WMS 1.3.0 GetCapabilities request' ) return WebMapService ( url , version = '1.3.0' , timeout = timeout ) except Exception as err : LOGGER . warning ( 'WMS 1.3.0 support not found: %s' , err ) LOGGER . debug ( 'Trying a WMS 1.1.1 GetCapabilities request instead' ) return WebMapService ( url , version = '1.1.1' , timeout = timeout )
|
OWSLib wrapper function to perform version negotiation against owslib . wms . WebMapService
|
2,386 |
def get_sanitized_endpoint ( url ) : sanitized_url = url . rstrip ( ) esri_string = '/rest/services' if esri_string in url : match = re . search ( esri_string , sanitized_url ) sanitized_url = url [ 0 : ( match . start ( 0 ) + len ( esri_string ) ) ] return sanitized_url
|
Sanitize an endpoint as removing unneeded parameters
|
2,387 |
def get_esri_extent ( esriobj ) : extent = None srs = None if 'fullExtent' in esriobj . _json_struct : extent = esriobj . _json_struct [ 'fullExtent' ] if 'extent' in esriobj . _json_struct : extent = esriobj . _json_struct [ 'extent' ] try : srs = extent [ 'spatialReference' ] [ 'wkid' ] except KeyError , err : LOGGER . error ( err , exc_info = True ) return [ extent , srs ]
|
Get the extent of an ESRI resource
|
2,388 |
def bbox2wktpolygon ( bbox ) : minx = float ( bbox [ 0 ] ) miny = float ( bbox [ 1 ] ) maxx = float ( bbox [ 2 ] ) maxy = float ( bbox [ 3 ] ) return 'POLYGON((%.2f %.2f, %.2f %.2f, %.2f %.2f, %.2f %.2f, %.2f %.2f))' % ( minx , miny , minx , maxy , maxx , maxy , maxx , miny , minx , miny )
|
Return OGC WKT Polygon of a simple bbox list of strings
|
2,389 |
def get_solr_date ( pydate , is_negative ) : try : if isinstance ( pydate , datetime . datetime ) : solr_date = '%sZ' % pydate . isoformat ( ) [ 0 : 19 ] if is_negative : LOGGER . debug ( '%s This layer has a negative date' % solr_date ) solr_date = '-%s' % solr_date return solr_date else : return None except Exception , e : LOGGER . error ( e , exc_info = True ) return None
|
Returns a date in a valid Solr format from a string .
|
2,390 |
def get_date ( layer ) : date = None sign = '+' date_type = 1 layer_dates = layer . get_layer_dates ( ) if layer_dates : sign = layer_dates [ 0 ] [ 0 ] date = layer_dates [ 0 ] [ 1 ] date_type = layer_dates [ 0 ] [ 2 ] if date is None : date = layer . created if date . year > 2300 : date = None if date_type == 0 : date_type = "Detected" if date_type == 1 : date_type = "From Metadata" return get_solr_date ( date , ( sign == '-' ) ) , date_type
|
Returns a custom date representation . A date can be detected or from metadata . It can be a range or a simple date in isoformat .
|
2,391 |
def detect_metadata_url_scheme ( url ) : scheme = None url_lower = url . lower ( ) if any ( x in url_lower for x in [ 'wms' , 'service=wms' ] ) : scheme = 'OGC:WMS' if any ( x in url_lower for x in [ 'wmts' , 'service=wmts' ] ) : scheme = 'OGC:WMTS' elif all ( x in url for x in [ '/MapServer' , 'f=json' ] ) : scheme = 'ESRI:ArcGIS:MapServer' elif all ( x in url for x in [ '/ImageServer' , 'f=json' ] ) : scheme = 'ESRI:ArcGIS:ImageServer' return scheme
|
detect whether a url is a Service type that HHypermap supports
|
2,392 |
def serialize_checks ( check_set ) : check_set_list = [ ] for check in check_set . all ( ) [ : 25 ] : check_set_list . append ( { 'datetime' : check . checked_datetime . isoformat ( ) , 'value' : check . response_time , 'success' : 1 if check . success else 0 } ) return check_set_list
|
Serialize a check_set for raphael
|
2,393 |
def domains ( request ) : url = '' query = '*:*&facet=true&facet.limit=-1&facet.pivot=domain_name,service_id&wt=json&indent=true&rows=0' if settings . SEARCH_TYPE == 'elasticsearch' : url = '%s/select?q=%s' % ( settings . SEARCH_URL , query ) if settings . SEARCH_TYPE == 'solr' : url = '%s/solr/hypermap/select?q=%s' % ( settings . SEARCH_URL , query ) LOGGER . debug ( url ) response = urllib2 . urlopen ( url ) data = response . read ( ) . replace ( '\n' , '' ) layers_count = Layer . objects . all ( ) . count ( ) services_count = Service . objects . all ( ) . count ( ) template = loader . get_template ( 'aggregator/index.html' ) context = RequestContext ( request , { 'data' : data , 'layers_count' : layers_count , 'services_count' : services_count , } ) return HttpResponse ( template . render ( context ) )
|
A page with number of services and layers faceted on domains .
|
2,394 |
def tasks_runner ( request ) : cached_layers_number = 0 cached_layers = cache . get ( 'layers' ) if cached_layers : cached_layers_number = len ( cached_layers ) cached_deleted_layers_number = 0 cached_deleted_layers = cache . get ( 'deleted_layers' ) if cached_deleted_layers : cached_deleted_layers_number = len ( cached_deleted_layers ) if request . method == 'POST' : if 'check_all' in request . POST : if settings . REGISTRY_SKIP_CELERY : check_all_services ( ) else : check_all_services . delay ( ) if 'index_all' in request . POST : if settings . REGISTRY_SKIP_CELERY : index_all_layers ( ) else : index_all_layers . delay ( ) if 'index_cached' in request . POST : if settings . REGISTRY_SKIP_CELERY : index_cached_layers ( ) else : index_cached_layers . delay ( ) if 'drop_cached' in request . POST : cache . set ( 'layers' , None ) cache . set ( 'deleted_layers' , None ) if 'clear_index' in request . POST : if settings . REGISTRY_SKIP_CELERY : clear_index ( ) else : clear_index . delay ( ) if 'remove_index' in request . POST : if settings . REGISTRY_SKIP_CELERY : unindex_layers_with_issues ( ) else : unindex_layers_with_issues . delay ( ) return render ( request , 'aggregator/tasks_runner.html' , { 'cached_layers_number' : cached_layers_number , 'cached_deleted_layers_number' : cached_deleted_layers_number , } )
|
A page that let the admin to run global tasks .
|
2,395 |
def layer_mapproxy ( request , catalog_slug , layer_uuid , path_info ) : layer = get_object_or_404 ( Layer , uuid = layer_uuid , catalog__slug = catalog_slug ) if layer . service . type == 'Hypermap:WorldMap' : layer . service . url = layer . url mp , yaml_config = get_mapproxy ( layer ) query = request . META [ 'QUERY_STRING' ] if len ( query ) > 0 : path_info = path_info + '?' + query params = { } headers = { 'X-Script-Name' : '/registry/{0}/layer/{1}/map/' . format ( catalog_slug , layer . id ) , 'X-Forwarded-Host' : request . META [ 'HTTP_HOST' ] , 'HTTP_HOST' : request . META [ 'HTTP_HOST' ] , 'SERVER_NAME' : request . META [ 'SERVER_NAME' ] , } if path_info == '/config' : response = HttpResponse ( yaml_config , content_type = 'text/plain' ) return response mp_response = mp . get ( path_info , params , headers ) response = HttpResponse ( mp_response . body , status = mp_response . status_int ) for header , value in mp_response . headers . iteritems ( ) : response [ header ] = value return response
|
Get Layer with matching catalog and uuid
|
2,396 |
def parse_datetime ( date_str ) : is_common_era = True date_str_parts = date_str . split ( "-" ) if date_str_parts and date_str_parts [ 0 ] == '' : is_common_era = False if len ( date_str_parts ) == 2 : date_str = date_str + "-01-01T00:00:00Z" parsed_datetime = { 'is_common_era' : is_common_era , 'parsed_datetime' : None } if is_common_era : if date_str == '*' : return parsed_datetime default = datetime . datetime . now ( ) . replace ( hour = 0 , minute = 0 , second = 0 , microsecond = 0 , day = 1 , month = 1 ) parsed_datetime [ 'parsed_datetime' ] = parse ( date_str , default = default ) return parsed_datetime parsed_datetime [ 'parsed_datetime' ] = date_str return parsed_datetime
|
Parses a date string to date object . for BCE dates only supports the year part .
|
2,397 |
def query_ids ( self , ids ) : results = self . _get_repo_filter ( Layer . objects ) . filter ( uuid__in = ids ) . all ( ) if len ( results ) == 0 : results = self . _get_repo_filter ( Service . objects ) . filter ( uuid__in = ids ) . all ( ) return results
|
Query by list of identifiers
|
2,398 |
def query_domain ( self , domain , typenames , domainquerytype = 'list' , count = False ) : objects = self . _get_repo_filter ( Layer . objects ) if domainquerytype == 'range' : return [ tuple ( objects . aggregate ( Min ( domain ) , Max ( domain ) ) . values ( ) ) ] else : if count : return [ ( d [ domain ] , d [ '%s__count' % domain ] ) for d in objects . values ( domain ) . annotate ( Count ( domain ) ) ] else : return objects . values_list ( domain ) . distinct ( )
|
Query by property domain values
|
2,399 |
def query_source ( self , source ) : return self . _get_repo_filter ( Layer . objects ) . filter ( url = source )
|
Query by source
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.