id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
|
---|---|---|---|---|---|---|---|---|---|---|---|
2,100 | SoftwareDefinedBuildings/XBOS | apps/Data_quality_analysis/Wrapper.py | Wrapper.search | def search(self, file_name, imported_data=None):
""" Run models on different data configurations.
Note
----
The input json file should include ALL parameters.
Parameters
----------
file_name : str
Optional json file to read parameters.
imported_data : pd.DataFrame()
Pandas Dataframe containing data.
"""
resample_freq=['15T', 'h', 'd']
time_freq = {
'year' : [True, False, False, False, False],
'month' : [False, True, False, False, False],
'week' : [False, False, True, False, False],
'tod' : [False, False, False, True, False],
'dow' : [False, False, False, False, True],
}
optimal_score = float('-inf')
optimal_model = None
# CSV Files
if not imported_data:
with open(file_name) as f:
input_json = json.load(f)
import_json = input_json['Import']
imported_data = self.import_data(file_name=import_json['File Name'], folder_name=import_json['Folder Name'],
head_row=import_json['Head Row'], index_col=import_json['Index Col'],
convert_col=import_json['Convert Col'], concat_files=import_json['Concat Files'],
save_file=import_json['Save File'])
with open(file_name) as f:
input_json = json.load(f)
for x in resample_freq: # Resample data interval
input_json['Clean']['Frequency'] = x
for i in range(len(time_freq.items())): # Add time features
input_json['Preprocess']['Year'] = time_freq['year'][i]
input_json['Preprocess']['Month'] = time_freq['month'][i]
input_json['Preprocess']['Week'] = time_freq['week'][i]
input_json['Preprocess']['Time of Day'] = time_freq['tod'][i]
input_json['Preprocess']['Day of Week'] = time_freq['dow'][i]
# Putting comment in json file to indicate which parameters have been changed
time_feature = None
for key in time_freq:
if time_freq[key][i]:
time_feature = key
self.result['Comment'] = 'Freq: ' + x + ', ' + 'Time Feature: ' + time_feature
# Read parameters in input_json
self.read_json(file_name=None, input_json=input_json, imported_data=imported_data)
# Keep track of highest adj_r2 score
if self.result['Model']['Optimal Model\'s Metrics']['adj_r2'] > optimal_score:
optimal_score = self.result['Model']['Optimal Model\'s Metrics']['adj_r2']
optimal_model_file_name = self.results_folder_name + '/results-' + str(self.get_global_count()) + '.json'
# Wrapper.global_count += 1
print('Most optimal model: ', optimal_model_file_name)
freq = self.result['Comment'].split(' ')[1][:-1]
time_feat = self.result['Comment'].split(' ')[-1]
print('Freq: ', freq, 'Time Feature: ', time_feat) | python | def search(self, file_name, imported_data=None):
""" Run models on different data configurations.
Note
----
The input json file should include ALL parameters.
Parameters
----------
file_name : str
Optional json file to read parameters.
imported_data : pd.DataFrame()
Pandas Dataframe containing data.
"""
resample_freq=['15T', 'h', 'd']
time_freq = {
'year' : [True, False, False, False, False],
'month' : [False, True, False, False, False],
'week' : [False, False, True, False, False],
'tod' : [False, False, False, True, False],
'dow' : [False, False, False, False, True],
}
optimal_score = float('-inf')
optimal_model = None
# CSV Files
if not imported_data:
with open(file_name) as f:
input_json = json.load(f)
import_json = input_json['Import']
imported_data = self.import_data(file_name=import_json['File Name'], folder_name=import_json['Folder Name'],
head_row=import_json['Head Row'], index_col=import_json['Index Col'],
convert_col=import_json['Convert Col'], concat_files=import_json['Concat Files'],
save_file=import_json['Save File'])
with open(file_name) as f:
input_json = json.load(f)
for x in resample_freq: # Resample data interval
input_json['Clean']['Frequency'] = x
for i in range(len(time_freq.items())): # Add time features
input_json['Preprocess']['Year'] = time_freq['year'][i]
input_json['Preprocess']['Month'] = time_freq['month'][i]
input_json['Preprocess']['Week'] = time_freq['week'][i]
input_json['Preprocess']['Time of Day'] = time_freq['tod'][i]
input_json['Preprocess']['Day of Week'] = time_freq['dow'][i]
# Putting comment in json file to indicate which parameters have been changed
time_feature = None
for key in time_freq:
if time_freq[key][i]:
time_feature = key
self.result['Comment'] = 'Freq: ' + x + ', ' + 'Time Feature: ' + time_feature
# Read parameters in input_json
self.read_json(file_name=None, input_json=input_json, imported_data=imported_data)
# Keep track of highest adj_r2 score
if self.result['Model']['Optimal Model\'s Metrics']['adj_r2'] > optimal_score:
optimal_score = self.result['Model']['Optimal Model\'s Metrics']['adj_r2']
optimal_model_file_name = self.results_folder_name + '/results-' + str(self.get_global_count()) + '.json'
# Wrapper.global_count += 1
print('Most optimal model: ', optimal_model_file_name)
freq = self.result['Comment'].split(' ')[1][:-1]
time_feat = self.result['Comment'].split(' ')[-1]
print('Freq: ', freq, 'Time Feature: ', time_feat) | [
"def",
"search",
"(",
"self",
",",
"file_name",
",",
"imported_data",
"=",
"None",
")",
":",
"resample_freq",
"=",
"[",
"'15T'",
",",
"'h'",
",",
"'d'",
"]",
"time_freq",
"=",
"{",
"'year'",
":",
"[",
"True",
",",
"False",
",",
"False",
",",
"False",
",",
"False",
"]",
",",
"'month'",
":",
"[",
"False",
",",
"True",
",",
"False",
",",
"False",
",",
"False",
"]",
",",
"'week'",
":",
"[",
"False",
",",
"False",
",",
"True",
",",
"False",
",",
"False",
"]",
",",
"'tod'",
":",
"[",
"False",
",",
"False",
",",
"False",
",",
"True",
",",
"False",
"]",
",",
"'dow'",
":",
"[",
"False",
",",
"False",
",",
"False",
",",
"False",
",",
"True",
"]",
",",
"}",
"optimal_score",
"=",
"float",
"(",
"'-inf'",
")",
"optimal_model",
"=",
"None",
"# CSV Files",
"if",
"not",
"imported_data",
":",
"with",
"open",
"(",
"file_name",
")",
"as",
"f",
":",
"input_json",
"=",
"json",
".",
"load",
"(",
"f",
")",
"import_json",
"=",
"input_json",
"[",
"'Import'",
"]",
"imported_data",
"=",
"self",
".",
"import_data",
"(",
"file_name",
"=",
"import_json",
"[",
"'File Name'",
"]",
",",
"folder_name",
"=",
"import_json",
"[",
"'Folder Name'",
"]",
",",
"head_row",
"=",
"import_json",
"[",
"'Head Row'",
"]",
",",
"index_col",
"=",
"import_json",
"[",
"'Index Col'",
"]",
",",
"convert_col",
"=",
"import_json",
"[",
"'Convert Col'",
"]",
",",
"concat_files",
"=",
"import_json",
"[",
"'Concat Files'",
"]",
",",
"save_file",
"=",
"import_json",
"[",
"'Save File'",
"]",
")",
"with",
"open",
"(",
"file_name",
")",
"as",
"f",
":",
"input_json",
"=",
"json",
".",
"load",
"(",
"f",
")",
"for",
"x",
"in",
"resample_freq",
":",
"# Resample data interval",
"input_json",
"[",
"'Clean'",
"]",
"[",
"'Frequency'",
"]",
"=",
"x",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"time_freq",
".",
"items",
"(",
")",
")",
")",
":",
"# Add time features",
"input_json",
"[",
"'Preprocess'",
"]",
"[",
"'Year'",
"]",
"=",
"time_freq",
"[",
"'year'",
"]",
"[",
"i",
"]",
"input_json",
"[",
"'Preprocess'",
"]",
"[",
"'Month'",
"]",
"=",
"time_freq",
"[",
"'month'",
"]",
"[",
"i",
"]",
"input_json",
"[",
"'Preprocess'",
"]",
"[",
"'Week'",
"]",
"=",
"time_freq",
"[",
"'week'",
"]",
"[",
"i",
"]",
"input_json",
"[",
"'Preprocess'",
"]",
"[",
"'Time of Day'",
"]",
"=",
"time_freq",
"[",
"'tod'",
"]",
"[",
"i",
"]",
"input_json",
"[",
"'Preprocess'",
"]",
"[",
"'Day of Week'",
"]",
"=",
"time_freq",
"[",
"'dow'",
"]",
"[",
"i",
"]",
"# Putting comment in json file to indicate which parameters have been changed",
"time_feature",
"=",
"None",
"for",
"key",
"in",
"time_freq",
":",
"if",
"time_freq",
"[",
"key",
"]",
"[",
"i",
"]",
":",
"time_feature",
"=",
"key",
"self",
".",
"result",
"[",
"'Comment'",
"]",
"=",
"'Freq: '",
"+",
"x",
"+",
"', '",
"+",
"'Time Feature: '",
"+",
"time_feature",
"# Read parameters in input_json",
"self",
".",
"read_json",
"(",
"file_name",
"=",
"None",
",",
"input_json",
"=",
"input_json",
",",
"imported_data",
"=",
"imported_data",
")",
"# Keep track of highest adj_r2 score",
"if",
"self",
".",
"result",
"[",
"'Model'",
"]",
"[",
"'Optimal Model\\'s Metrics'",
"]",
"[",
"'adj_r2'",
"]",
">",
"optimal_score",
":",
"optimal_score",
"=",
"self",
".",
"result",
"[",
"'Model'",
"]",
"[",
"'Optimal Model\\'s Metrics'",
"]",
"[",
"'adj_r2'",
"]",
"optimal_model_file_name",
"=",
"self",
".",
"results_folder_name",
"+",
"'/results-'",
"+",
"str",
"(",
"self",
".",
"get_global_count",
"(",
")",
")",
"+",
"'.json'",
"# Wrapper.global_count += 1",
"print",
"(",
"'Most optimal model: '",
",",
"optimal_model_file_name",
")",
"freq",
"=",
"self",
".",
"result",
"[",
"'Comment'",
"]",
".",
"split",
"(",
"' '",
")",
"[",
"1",
"]",
"[",
":",
"-",
"1",
"]",
"time_feat",
"=",
"self",
".",
"result",
"[",
"'Comment'",
"]",
".",
"split",
"(",
"' '",
")",
"[",
"-",
"1",
"]",
"print",
"(",
"'Freq: '",
",",
"freq",
",",
"'Time Feature: '",
",",
"time_feat",
")"
] | Run models on different data configurations.
Note
----
The input json file should include ALL parameters.
Parameters
----------
file_name : str
Optional json file to read parameters.
imported_data : pd.DataFrame()
Pandas Dataframe containing data. | [
"Run",
"models",
"on",
"different",
"data",
"configurations",
"."
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/Data_quality_analysis/Wrapper.py#L302-L373 |
2,101 | SoftwareDefinedBuildings/XBOS | apps/Data_quality_analysis/Wrapper.py | Wrapper.clean_data | def clean_data(self, data, rename_col=None, drop_col=None,
resample=True, freq='h', resampler='mean',
interpolate=True, limit=1, method='linear',
remove_na=True, remove_na_how='any',
remove_outliers=True, sd_val=3,
remove_out_of_bounds=True, low_bound=0, high_bound=float('inf'),
save_file=True):
""" Cleans dataframe according to user specifications and stores result in self.cleaned_data.
Parameters
----------
data : pd.DataFrame()
Dataframe to be cleaned.
rename_col : list(str)
List of new column names.
drop_col : list(str)
Columns to be dropped.
resample : bool
Indicates whether to resample data or not.
freq : str
Resampling frequency i.e. d, h, 15T...
resampler : str
Resampling type i.e. mean, max.
interpolate : bool
Indicates whether to interpolate data or not.
limit : int
Interpolation limit.
method : str
Interpolation method.
remove_na : bool
Indicates whether to remove NAs or not.
remove_na_how : str
Specificies how to remove NA i.e. all, any...
remove_outliers : bool
Indicates whether to remove outliers or not.
sd_val : int
Standard Deviation Value (specifices how many SDs away is a point considered an outlier)
remove_out_of_bounds : bool
Indicates whether to remove out of bounds datapoints or not.
low_bound : int
Low bound of the data.
high_bound : int
High bound of the data.
save_file : bool
Specifies whether to save file or not. Defaults to True.
Returns
-------
pd.DataFrame()
Dataframe containing cleaned data.
"""
# Check to ensure data is a pandas dataframe
if not isinstance(data, pd.DataFrame):
raise TypeError('data has to be a pandas dataframe.')
# Create instance and clean the data
clean_data_obj = Clean_Data(data)
clean_data_obj.clean_data(resample=resample, freq=freq, resampler=resampler,
interpolate=interpolate, limit=limit, method=method,
remove_na=remove_na, remove_na_how=remove_na_how,
remove_outliers=remove_outliers, sd_val=sd_val,
remove_out_of_bounds=remove_out_of_bounds, low_bound=low_bound, high_bound=high_bound)
# Correlation plot
# fig = self.plot_data_obj.correlation_plot(clean_data_obj.cleaned_data)
# fig.savefig(self.results_folder_name + '/correlation_plot-' + str(Wrapper.global_count) + '.png')
if rename_col: # Rename columns of dataframe
clean_data_obj.rename_columns(rename_col)
if drop_col: # Drop columns of dataframe
clean_data_obj.drop_columns(drop_col)
# Store cleaned data in wrapper class
self.cleaned_data = clean_data_obj.cleaned_data
# Logging
self.result['Clean'] = {
'Rename Col': rename_col,
'Drop Col': drop_col,
'Resample': resample,
'Frequency': freq,
'Resampler': resampler,
'Interpolate': interpolate,
'Limit': limit,
'Method': method,
'Remove NA': remove_na,
'Remove NA How': remove_na_how,
'Remove Outliers': remove_outliers,
'SD Val': sd_val,
'Remove Out of Bounds': remove_out_of_bounds,
'Low Bound': low_bound,
'High Bound': str(high_bound) if high_bound == float('inf') else high_bound,
'Save File': save_file
}
if save_file:
f = self.results_folder_name + '/cleaned_data-' + str(self.get_global_count()) + '.csv'
self.cleaned_data.to_csv(f)
self.result['Clean']['Saved File'] = f
else:
self.result['Clean']['Saved File'] = ''
return self.cleaned_data | python | def clean_data(self, data, rename_col=None, drop_col=None,
resample=True, freq='h', resampler='mean',
interpolate=True, limit=1, method='linear',
remove_na=True, remove_na_how='any',
remove_outliers=True, sd_val=3,
remove_out_of_bounds=True, low_bound=0, high_bound=float('inf'),
save_file=True):
""" Cleans dataframe according to user specifications and stores result in self.cleaned_data.
Parameters
----------
data : pd.DataFrame()
Dataframe to be cleaned.
rename_col : list(str)
List of new column names.
drop_col : list(str)
Columns to be dropped.
resample : bool
Indicates whether to resample data or not.
freq : str
Resampling frequency i.e. d, h, 15T...
resampler : str
Resampling type i.e. mean, max.
interpolate : bool
Indicates whether to interpolate data or not.
limit : int
Interpolation limit.
method : str
Interpolation method.
remove_na : bool
Indicates whether to remove NAs or not.
remove_na_how : str
Specificies how to remove NA i.e. all, any...
remove_outliers : bool
Indicates whether to remove outliers or not.
sd_val : int
Standard Deviation Value (specifices how many SDs away is a point considered an outlier)
remove_out_of_bounds : bool
Indicates whether to remove out of bounds datapoints or not.
low_bound : int
Low bound of the data.
high_bound : int
High bound of the data.
save_file : bool
Specifies whether to save file or not. Defaults to True.
Returns
-------
pd.DataFrame()
Dataframe containing cleaned data.
"""
# Check to ensure data is a pandas dataframe
if not isinstance(data, pd.DataFrame):
raise TypeError('data has to be a pandas dataframe.')
# Create instance and clean the data
clean_data_obj = Clean_Data(data)
clean_data_obj.clean_data(resample=resample, freq=freq, resampler=resampler,
interpolate=interpolate, limit=limit, method=method,
remove_na=remove_na, remove_na_how=remove_na_how,
remove_outliers=remove_outliers, sd_val=sd_val,
remove_out_of_bounds=remove_out_of_bounds, low_bound=low_bound, high_bound=high_bound)
# Correlation plot
# fig = self.plot_data_obj.correlation_plot(clean_data_obj.cleaned_data)
# fig.savefig(self.results_folder_name + '/correlation_plot-' + str(Wrapper.global_count) + '.png')
if rename_col: # Rename columns of dataframe
clean_data_obj.rename_columns(rename_col)
if drop_col: # Drop columns of dataframe
clean_data_obj.drop_columns(drop_col)
# Store cleaned data in wrapper class
self.cleaned_data = clean_data_obj.cleaned_data
# Logging
self.result['Clean'] = {
'Rename Col': rename_col,
'Drop Col': drop_col,
'Resample': resample,
'Frequency': freq,
'Resampler': resampler,
'Interpolate': interpolate,
'Limit': limit,
'Method': method,
'Remove NA': remove_na,
'Remove NA How': remove_na_how,
'Remove Outliers': remove_outliers,
'SD Val': sd_val,
'Remove Out of Bounds': remove_out_of_bounds,
'Low Bound': low_bound,
'High Bound': str(high_bound) if high_bound == float('inf') else high_bound,
'Save File': save_file
}
if save_file:
f = self.results_folder_name + '/cleaned_data-' + str(self.get_global_count()) + '.csv'
self.cleaned_data.to_csv(f)
self.result['Clean']['Saved File'] = f
else:
self.result['Clean']['Saved File'] = ''
return self.cleaned_data | [
"def",
"clean_data",
"(",
"self",
",",
"data",
",",
"rename_col",
"=",
"None",
",",
"drop_col",
"=",
"None",
",",
"resample",
"=",
"True",
",",
"freq",
"=",
"'h'",
",",
"resampler",
"=",
"'mean'",
",",
"interpolate",
"=",
"True",
",",
"limit",
"=",
"1",
",",
"method",
"=",
"'linear'",
",",
"remove_na",
"=",
"True",
",",
"remove_na_how",
"=",
"'any'",
",",
"remove_outliers",
"=",
"True",
",",
"sd_val",
"=",
"3",
",",
"remove_out_of_bounds",
"=",
"True",
",",
"low_bound",
"=",
"0",
",",
"high_bound",
"=",
"float",
"(",
"'inf'",
")",
",",
"save_file",
"=",
"True",
")",
":",
"# Check to ensure data is a pandas dataframe",
"if",
"not",
"isinstance",
"(",
"data",
",",
"pd",
".",
"DataFrame",
")",
":",
"raise",
"TypeError",
"(",
"'data has to be a pandas dataframe.'",
")",
"# Create instance and clean the data",
"clean_data_obj",
"=",
"Clean_Data",
"(",
"data",
")",
"clean_data_obj",
".",
"clean_data",
"(",
"resample",
"=",
"resample",
",",
"freq",
"=",
"freq",
",",
"resampler",
"=",
"resampler",
",",
"interpolate",
"=",
"interpolate",
",",
"limit",
"=",
"limit",
",",
"method",
"=",
"method",
",",
"remove_na",
"=",
"remove_na",
",",
"remove_na_how",
"=",
"remove_na_how",
",",
"remove_outliers",
"=",
"remove_outliers",
",",
"sd_val",
"=",
"sd_val",
",",
"remove_out_of_bounds",
"=",
"remove_out_of_bounds",
",",
"low_bound",
"=",
"low_bound",
",",
"high_bound",
"=",
"high_bound",
")",
"# Correlation plot",
"# fig = self.plot_data_obj.correlation_plot(clean_data_obj.cleaned_data)",
"# fig.savefig(self.results_folder_name + '/correlation_plot-' + str(Wrapper.global_count) + '.png')",
"if",
"rename_col",
":",
"# Rename columns of dataframe",
"clean_data_obj",
".",
"rename_columns",
"(",
"rename_col",
")",
"if",
"drop_col",
":",
"# Drop columns of dataframe",
"clean_data_obj",
".",
"drop_columns",
"(",
"drop_col",
")",
"# Store cleaned data in wrapper class",
"self",
".",
"cleaned_data",
"=",
"clean_data_obj",
".",
"cleaned_data",
"# Logging",
"self",
".",
"result",
"[",
"'Clean'",
"]",
"=",
"{",
"'Rename Col'",
":",
"rename_col",
",",
"'Drop Col'",
":",
"drop_col",
",",
"'Resample'",
":",
"resample",
",",
"'Frequency'",
":",
"freq",
",",
"'Resampler'",
":",
"resampler",
",",
"'Interpolate'",
":",
"interpolate",
",",
"'Limit'",
":",
"limit",
",",
"'Method'",
":",
"method",
",",
"'Remove NA'",
":",
"remove_na",
",",
"'Remove NA How'",
":",
"remove_na_how",
",",
"'Remove Outliers'",
":",
"remove_outliers",
",",
"'SD Val'",
":",
"sd_val",
",",
"'Remove Out of Bounds'",
":",
"remove_out_of_bounds",
",",
"'Low Bound'",
":",
"low_bound",
",",
"'High Bound'",
":",
"str",
"(",
"high_bound",
")",
"if",
"high_bound",
"==",
"float",
"(",
"'inf'",
")",
"else",
"high_bound",
",",
"'Save File'",
":",
"save_file",
"}",
"if",
"save_file",
":",
"f",
"=",
"self",
".",
"results_folder_name",
"+",
"'/cleaned_data-'",
"+",
"str",
"(",
"self",
".",
"get_global_count",
"(",
")",
")",
"+",
"'.csv'",
"self",
".",
"cleaned_data",
".",
"to_csv",
"(",
"f",
")",
"self",
".",
"result",
"[",
"'Clean'",
"]",
"[",
"'Saved File'",
"]",
"=",
"f",
"else",
":",
"self",
".",
"result",
"[",
"'Clean'",
"]",
"[",
"'Saved File'",
"]",
"=",
"''",
"return",
"self",
".",
"cleaned_data"
] | Cleans dataframe according to user specifications and stores result in self.cleaned_data.
Parameters
----------
data : pd.DataFrame()
Dataframe to be cleaned.
rename_col : list(str)
List of new column names.
drop_col : list(str)
Columns to be dropped.
resample : bool
Indicates whether to resample data or not.
freq : str
Resampling frequency i.e. d, h, 15T...
resampler : str
Resampling type i.e. mean, max.
interpolate : bool
Indicates whether to interpolate data or not.
limit : int
Interpolation limit.
method : str
Interpolation method.
remove_na : bool
Indicates whether to remove NAs or not.
remove_na_how : str
Specificies how to remove NA i.e. all, any...
remove_outliers : bool
Indicates whether to remove outliers or not.
sd_val : int
Standard Deviation Value (specifices how many SDs away is a point considered an outlier)
remove_out_of_bounds : bool
Indicates whether to remove out of bounds datapoints or not.
low_bound : int
Low bound of the data.
high_bound : int
High bound of the data.
save_file : bool
Specifies whether to save file or not. Defaults to True.
Returns
-------
pd.DataFrame()
Dataframe containing cleaned data. | [
"Cleans",
"dataframe",
"according",
"to",
"user",
"specifications",
"and",
"stores",
"result",
"in",
"self",
".",
"cleaned_data",
"."
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/Data_quality_analysis/Wrapper.py#L439-L543 |
2,102 | SoftwareDefinedBuildings/XBOS | dashboards/sitedash/app.py | prevmonday | def prevmonday(num):
"""
Return unix SECOND timestamp of "num" mondays ago
"""
today = get_today()
lastmonday = today - timedelta(days=today.weekday(), weeks=num)
return lastmonday | python | def prevmonday(num):
"""
Return unix SECOND timestamp of "num" mondays ago
"""
today = get_today()
lastmonday = today - timedelta(days=today.weekday(), weeks=num)
return lastmonday | [
"def",
"prevmonday",
"(",
"num",
")",
":",
"today",
"=",
"get_today",
"(",
")",
"lastmonday",
"=",
"today",
"-",
"timedelta",
"(",
"days",
"=",
"today",
".",
"weekday",
"(",
")",
",",
"weeks",
"=",
"num",
")",
"return",
"lastmonday"
] | Return unix SECOND timestamp of "num" mondays ago | [
"Return",
"unix",
"SECOND",
"timestamp",
"of",
"num",
"mondays",
"ago"
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/dashboards/sitedash/app.py#L74-L80 |
2,103 | SoftwareDefinedBuildings/XBOS | apps/consumption/iec.py | med_filt | def med_filt(x, k=201):
"""Apply a length-k median filter to a 1D array x.
Boundaries are extended by repeating endpoints.
"""
if x.ndim > 1:
x = np.squeeze(x)
med = np.median(x)
assert k % 2 == 1, "Median filter length must be odd."
assert x.ndim == 1, "Input must be one-dimensional."
k2 = (k - 1) // 2
y = np.zeros((len(x), k), dtype=x.dtype)
y[:, k2] = x
for i in range(k2):
j = k2 - i
y[j:, i] = x[:-j]
y[:j, i] = x[0]
y[:-j, -(i + 1)] = x[j:]
y[-j:, -(i + 1)] = med
return np.median(y, axis=1) | python | def med_filt(x, k=201):
"""Apply a length-k median filter to a 1D array x.
Boundaries are extended by repeating endpoints.
"""
if x.ndim > 1:
x = np.squeeze(x)
med = np.median(x)
assert k % 2 == 1, "Median filter length must be odd."
assert x.ndim == 1, "Input must be one-dimensional."
k2 = (k - 1) // 2
y = np.zeros((len(x), k), dtype=x.dtype)
y[:, k2] = x
for i in range(k2):
j = k2 - i
y[j:, i] = x[:-j]
y[:j, i] = x[0]
y[:-j, -(i + 1)] = x[j:]
y[-j:, -(i + 1)] = med
return np.median(y, axis=1) | [
"def",
"med_filt",
"(",
"x",
",",
"k",
"=",
"201",
")",
":",
"if",
"x",
".",
"ndim",
">",
"1",
":",
"x",
"=",
"np",
".",
"squeeze",
"(",
"x",
")",
"med",
"=",
"np",
".",
"median",
"(",
"x",
")",
"assert",
"k",
"%",
"2",
"==",
"1",
",",
"\"Median filter length must be odd.\"",
"assert",
"x",
".",
"ndim",
"==",
"1",
",",
"\"Input must be one-dimensional.\"",
"k2",
"=",
"(",
"k",
"-",
"1",
")",
"//",
"2",
"y",
"=",
"np",
".",
"zeros",
"(",
"(",
"len",
"(",
"x",
")",
",",
"k",
")",
",",
"dtype",
"=",
"x",
".",
"dtype",
")",
"y",
"[",
":",
",",
"k2",
"]",
"=",
"x",
"for",
"i",
"in",
"range",
"(",
"k2",
")",
":",
"j",
"=",
"k2",
"-",
"i",
"y",
"[",
"j",
":",
",",
"i",
"]",
"=",
"x",
"[",
":",
"-",
"j",
"]",
"y",
"[",
":",
"j",
",",
"i",
"]",
"=",
"x",
"[",
"0",
"]",
"y",
"[",
":",
"-",
"j",
",",
"-",
"(",
"i",
"+",
"1",
")",
"]",
"=",
"x",
"[",
"j",
":",
"]",
"y",
"[",
"-",
"j",
":",
",",
"-",
"(",
"i",
"+",
"1",
")",
"]",
"=",
"med",
"return",
"np",
".",
"median",
"(",
"y",
",",
"axis",
"=",
"1",
")"
] | Apply a length-k median filter to a 1D array x.
Boundaries are extended by repeating endpoints. | [
"Apply",
"a",
"length",
"-",
"k",
"median",
"filter",
"to",
"a",
"1D",
"array",
"x",
".",
"Boundaries",
"are",
"extended",
"by",
"repeating",
"endpoints",
"."
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/consumption/iec.py#L114-L132 |
2,104 | SoftwareDefinedBuildings/XBOS | apps/data_analysis/XBOS_data_analytics/Wrapper.py | Wrapper.preprocess_data | def preprocess_data(self, data,
hdh_cpoint=65, cdh_cpoint=65, col_hdh_cdh=None,
col_degree=None, degree=None,
standardize=False, normalize=False,
year=False, month=False, week=False, tod=False, dow=False,
save_file=True):
""" Preprocesses dataframe according to user specifications and stores result in self.preprocessed_data.
Parameters
----------
data : pd.DataFrame()
Dataframe to be preprocessed.
hdh_cpoint : int
Heating degree hours. Defaults to 65.
cdh_cpoint : int
Cooling degree hours. Defaults to 65.
col_hdh_cdh : str
Column name which contains the outdoor air temperature.
col_degree : list(str)
Column to exponentiate.
degree : list(str)
Exponentiation degree.
standardize : bool
Standardize data.
normalize : bool
Normalize data.
year : bool
Year.
month : bool
Month.
week : bool
Week.
tod : bool
Time of Day.
dow : bool
Day of Week.
save_file : bool
Specifies whether to save file or not. Defaults to True.
Returns
-------
pd.DataFrame()
Dataframe containing preprocessed data.
"""
# Check to ensure data is a pandas dataframe
if not isinstance(data, pd.DataFrame):
raise SystemError('data has to be a pandas dataframe.')
# Create instance
preprocess_data_obj = Preprocess_Data(data)
if col_hdh_cdh:
preprocess_data_obj.add_degree_days(col=col_hdh_cdh, hdh_cpoint=hdh_cpoint, cdh_cpoint=cdh_cpoint)
preprocess_data_obj.add_col_features(col=col_degree, degree=degree)
if standardize:
preprocess_data_obj.standardize()
if normalize:
preprocess_data_obj.normalize()
preprocess_data_obj.add_time_features(year=year, month=month, week=week, tod=tod, dow=dow)
# Store preprocessed data in wrapper class
self.preprocessed_data = preprocess_data_obj.preprocessed_data
# Logging
self.result['Preprocess'] = {
'HDH CPoint': hdh_cpoint,
'CDH CPoint': cdh_cpoint,
'HDH CDH Calc Col': col_hdh_cdh,
'Col Degree': col_degree,
'Degree': degree,
'Standardize': standardize,
'Normalize': normalize,
'Year': year,
'Month': month,
'Week': week,
'Time of Day': tod,
'Day of Week': dow,
'Save File': save_file
}
if save_file:
f = self.results_folder_name + '/preprocessed_data-' + str(self.get_global_count()) + '.csv'
self.preprocessed_data.to_csv(f)
self.result['Preprocess']['Saved File'] = f
else:
self.result['Preprocess']['Saved File'] = ''
return self.preprocessed_data | python | def preprocess_data(self, data,
hdh_cpoint=65, cdh_cpoint=65, col_hdh_cdh=None,
col_degree=None, degree=None,
standardize=False, normalize=False,
year=False, month=False, week=False, tod=False, dow=False,
save_file=True):
""" Preprocesses dataframe according to user specifications and stores result in self.preprocessed_data.
Parameters
----------
data : pd.DataFrame()
Dataframe to be preprocessed.
hdh_cpoint : int
Heating degree hours. Defaults to 65.
cdh_cpoint : int
Cooling degree hours. Defaults to 65.
col_hdh_cdh : str
Column name which contains the outdoor air temperature.
col_degree : list(str)
Column to exponentiate.
degree : list(str)
Exponentiation degree.
standardize : bool
Standardize data.
normalize : bool
Normalize data.
year : bool
Year.
month : bool
Month.
week : bool
Week.
tod : bool
Time of Day.
dow : bool
Day of Week.
save_file : bool
Specifies whether to save file or not. Defaults to True.
Returns
-------
pd.DataFrame()
Dataframe containing preprocessed data.
"""
# Check to ensure data is a pandas dataframe
if not isinstance(data, pd.DataFrame):
raise SystemError('data has to be a pandas dataframe.')
# Create instance
preprocess_data_obj = Preprocess_Data(data)
if col_hdh_cdh:
preprocess_data_obj.add_degree_days(col=col_hdh_cdh, hdh_cpoint=hdh_cpoint, cdh_cpoint=cdh_cpoint)
preprocess_data_obj.add_col_features(col=col_degree, degree=degree)
if standardize:
preprocess_data_obj.standardize()
if normalize:
preprocess_data_obj.normalize()
preprocess_data_obj.add_time_features(year=year, month=month, week=week, tod=tod, dow=dow)
# Store preprocessed data in wrapper class
self.preprocessed_data = preprocess_data_obj.preprocessed_data
# Logging
self.result['Preprocess'] = {
'HDH CPoint': hdh_cpoint,
'CDH CPoint': cdh_cpoint,
'HDH CDH Calc Col': col_hdh_cdh,
'Col Degree': col_degree,
'Degree': degree,
'Standardize': standardize,
'Normalize': normalize,
'Year': year,
'Month': month,
'Week': week,
'Time of Day': tod,
'Day of Week': dow,
'Save File': save_file
}
if save_file:
f = self.results_folder_name + '/preprocessed_data-' + str(self.get_global_count()) + '.csv'
self.preprocessed_data.to_csv(f)
self.result['Preprocess']['Saved File'] = f
else:
self.result['Preprocess']['Saved File'] = ''
return self.preprocessed_data | [
"def",
"preprocess_data",
"(",
"self",
",",
"data",
",",
"hdh_cpoint",
"=",
"65",
",",
"cdh_cpoint",
"=",
"65",
",",
"col_hdh_cdh",
"=",
"None",
",",
"col_degree",
"=",
"None",
",",
"degree",
"=",
"None",
",",
"standardize",
"=",
"False",
",",
"normalize",
"=",
"False",
",",
"year",
"=",
"False",
",",
"month",
"=",
"False",
",",
"week",
"=",
"False",
",",
"tod",
"=",
"False",
",",
"dow",
"=",
"False",
",",
"save_file",
"=",
"True",
")",
":",
"# Check to ensure data is a pandas dataframe",
"if",
"not",
"isinstance",
"(",
"data",
",",
"pd",
".",
"DataFrame",
")",
":",
"raise",
"SystemError",
"(",
"'data has to be a pandas dataframe.'",
")",
"# Create instance",
"preprocess_data_obj",
"=",
"Preprocess_Data",
"(",
"data",
")",
"if",
"col_hdh_cdh",
":",
"preprocess_data_obj",
".",
"add_degree_days",
"(",
"col",
"=",
"col_hdh_cdh",
",",
"hdh_cpoint",
"=",
"hdh_cpoint",
",",
"cdh_cpoint",
"=",
"cdh_cpoint",
")",
"preprocess_data_obj",
".",
"add_col_features",
"(",
"col",
"=",
"col_degree",
",",
"degree",
"=",
"degree",
")",
"if",
"standardize",
":",
"preprocess_data_obj",
".",
"standardize",
"(",
")",
"if",
"normalize",
":",
"preprocess_data_obj",
".",
"normalize",
"(",
")",
"preprocess_data_obj",
".",
"add_time_features",
"(",
"year",
"=",
"year",
",",
"month",
"=",
"month",
",",
"week",
"=",
"week",
",",
"tod",
"=",
"tod",
",",
"dow",
"=",
"dow",
")",
"# Store preprocessed data in wrapper class",
"self",
".",
"preprocessed_data",
"=",
"preprocess_data_obj",
".",
"preprocessed_data",
"# Logging",
"self",
".",
"result",
"[",
"'Preprocess'",
"]",
"=",
"{",
"'HDH CPoint'",
":",
"hdh_cpoint",
",",
"'CDH CPoint'",
":",
"cdh_cpoint",
",",
"'HDH CDH Calc Col'",
":",
"col_hdh_cdh",
",",
"'Col Degree'",
":",
"col_degree",
",",
"'Degree'",
":",
"degree",
",",
"'Standardize'",
":",
"standardize",
",",
"'Normalize'",
":",
"normalize",
",",
"'Year'",
":",
"year",
",",
"'Month'",
":",
"month",
",",
"'Week'",
":",
"week",
",",
"'Time of Day'",
":",
"tod",
",",
"'Day of Week'",
":",
"dow",
",",
"'Save File'",
":",
"save_file",
"}",
"if",
"save_file",
":",
"f",
"=",
"self",
".",
"results_folder_name",
"+",
"'/preprocessed_data-'",
"+",
"str",
"(",
"self",
".",
"get_global_count",
"(",
")",
")",
"+",
"'.csv'",
"self",
".",
"preprocessed_data",
".",
"to_csv",
"(",
"f",
")",
"self",
".",
"result",
"[",
"'Preprocess'",
"]",
"[",
"'Saved File'",
"]",
"=",
"f",
"else",
":",
"self",
".",
"result",
"[",
"'Preprocess'",
"]",
"[",
"'Saved File'",
"]",
"=",
"''",
"return",
"self",
".",
"preprocessed_data"
] | Preprocesses dataframe according to user specifications and stores result in self.preprocessed_data.
Parameters
----------
data : pd.DataFrame()
Dataframe to be preprocessed.
hdh_cpoint : int
Heating degree hours. Defaults to 65.
cdh_cpoint : int
Cooling degree hours. Defaults to 65.
col_hdh_cdh : str
Column name which contains the outdoor air temperature.
col_degree : list(str)
Column to exponentiate.
degree : list(str)
Exponentiation degree.
standardize : bool
Standardize data.
normalize : bool
Normalize data.
year : bool
Year.
month : bool
Month.
week : bool
Week.
tod : bool
Time of Day.
dow : bool
Day of Week.
save_file : bool
Specifies whether to save file or not. Defaults to True.
Returns
-------
pd.DataFrame()
Dataframe containing preprocessed data. | [
"Preprocesses",
"dataframe",
"according",
"to",
"user",
"specifications",
"and",
"stores",
"result",
"in",
"self",
".",
"preprocessed_data",
"."
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/data_analysis/XBOS_data_analytics/Wrapper.py#L544-L634 |
2,105 | SoftwareDefinedBuildings/XBOS | apps/data_analysis/XBOS_data_analytics/Wrapper.py | Wrapper.model | def model(self, data,
ind_col=None, dep_col=None,
project_ind_col=None,
baseline_period=[None, None], projection_period=None, exclude_time_period=None,
alphas=np.logspace(-4,1,30),
cv=3, plot=True, figsize=None,
custom_model_func=None):
""" Split data into baseline and projection periods, run models on them and display metrics & plots.
Parameters
----------
data : pd.DataFrame()
Dataframe to model.
ind_col : list(str)
Independent column(s) of dataframe. Defaults to all columns except the last.
dep_col : str
Dependent column of dataframe.
project_ind_col : list(str)
Independent column(s) to use for projection. If none, use ind_col.
baseline_period : list(str)
List of time periods to split the data into baseline periods. It needs to have a start and an end date.
projection_period : list(str)
List of time periods to split the data into projection periods. It needs to have a start and an end date.
exclude_time_period : list(str)
List of time periods to exclude for modeling.
alphas : list(int)
List of alphas to run regression on.
cv : int
Number of folds for cross-validation.
plot : bool
Specifies whether to save plots or not.
figsize : tuple
Size of the plots.
custom_model_func : function
Model with specific hyper-parameters provided by user.
Returns
-------
dict
Metrics of the optimal/best model.
"""
# Check to ensure data is a pandas dataframe
if not isinstance(data, pd.DataFrame):
raise SystemError('data has to be a pandas dataframe.')
# Create instance
model_data_obj = Model_Data(data, ind_col, dep_col, alphas, cv, exclude_time_period, baseline_period, projection_period)
# Split data into baseline and projection
model_data_obj.split_data()
# Logging
self.result['Model'] = {
'Independent Col': ind_col,
'Dependent Col': dep_col,
'Projection Independent Col': project_ind_col,
'Baseline Period': baseline_period,
'Projection Period': projection_period,
'Exclude Time Period': exclude_time_period,
'Alphas': list(alphas),
'CV': cv,
'Plot': plot,
'Fig Size': figsize
}
# Runs all models on the data and returns optimal model
all_metrics = model_data_obj.run_models()
self.result['Model']['All Model\'s Metrics'] = all_metrics
# CHECK: Define custom model's parameter and return types in documentation.
if custom_model_func:
self.result['Model']['Custom Model\'s Metrics'] = model_data_obj.custom_model(custom_model_func)
# Fit optimal model to data
self.result['Model']['Optimal Model\'s Metrics'] = model_data_obj.best_model_fit()
if plot:
# Use project_ind_col if projecting into the future (no input data other than weather data)
input_col = model_data_obj.input_col if not project_ind_col else project_ind_col
fig, y_true, y_pred = self.plot_data_obj.baseline_projection_plot(model_data_obj.y_true, model_data_obj.y_pred,
model_data_obj.baseline_period, model_data_obj.projection_period,
model_data_obj.best_model_name, model_data_obj.best_metrics['adj_r2'],
model_data_obj.original_data,
input_col, model_data_obj.output_col,
model_data_obj.best_model,
self.result['Site'])
fig.savefig(self.results_folder_name + '/baseline_projection_plot-' + str(self.get_global_count()) + '.png')
if not y_true.empty and not y_pred.empty:
saving_absolute = (y_pred - y_true).sum()
saving_perc = (saving_absolute / y_pred.sum()) * 100
self.result['Energy Savings (%)'] = float(saving_perc)
self.result['Energy Savings (absolute)'] = saving_absolute
# Temporary
self.project_df['true'] = y_true
self.project_df['pred'] = y_pred
# Calculate uncertainity of savings
self.result['Uncertainity'] = self.uncertainity_equation(model_data_obj, y_true, y_pred, 0.9)
else:
print('y_true: ', y_true)
print('y_pred: ', y_pred)
print('Error: y_true and y_pred are empty. Default to -1.0 savings.')
self.result['Energy Savings (%)'] = float(-1.0)
self.result['Energy Savings (absolute)'] = float(-1.0)
return self.best_metrics | python | def model(self, data,
ind_col=None, dep_col=None,
project_ind_col=None,
baseline_period=[None, None], projection_period=None, exclude_time_period=None,
alphas=np.logspace(-4,1,30),
cv=3, plot=True, figsize=None,
custom_model_func=None):
""" Split data into baseline and projection periods, run models on them and display metrics & plots.
Parameters
----------
data : pd.DataFrame()
Dataframe to model.
ind_col : list(str)
Independent column(s) of dataframe. Defaults to all columns except the last.
dep_col : str
Dependent column of dataframe.
project_ind_col : list(str)
Independent column(s) to use for projection. If none, use ind_col.
baseline_period : list(str)
List of time periods to split the data into baseline periods. It needs to have a start and an end date.
projection_period : list(str)
List of time periods to split the data into projection periods. It needs to have a start and an end date.
exclude_time_period : list(str)
List of time periods to exclude for modeling.
alphas : list(int)
List of alphas to run regression on.
cv : int
Number of folds for cross-validation.
plot : bool
Specifies whether to save plots or not.
figsize : tuple
Size of the plots.
custom_model_func : function
Model with specific hyper-parameters provided by user.
Returns
-------
dict
Metrics of the optimal/best model.
"""
# Check to ensure data is a pandas dataframe
if not isinstance(data, pd.DataFrame):
raise SystemError('data has to be a pandas dataframe.')
# Create instance
model_data_obj = Model_Data(data, ind_col, dep_col, alphas, cv, exclude_time_period, baseline_period, projection_period)
# Split data into baseline and projection
model_data_obj.split_data()
# Logging
self.result['Model'] = {
'Independent Col': ind_col,
'Dependent Col': dep_col,
'Projection Independent Col': project_ind_col,
'Baseline Period': baseline_period,
'Projection Period': projection_period,
'Exclude Time Period': exclude_time_period,
'Alphas': list(alphas),
'CV': cv,
'Plot': plot,
'Fig Size': figsize
}
# Runs all models on the data and returns optimal model
all_metrics = model_data_obj.run_models()
self.result['Model']['All Model\'s Metrics'] = all_metrics
# CHECK: Define custom model's parameter and return types in documentation.
if custom_model_func:
self.result['Model']['Custom Model\'s Metrics'] = model_data_obj.custom_model(custom_model_func)
# Fit optimal model to data
self.result['Model']['Optimal Model\'s Metrics'] = model_data_obj.best_model_fit()
if plot:
# Use project_ind_col if projecting into the future (no input data other than weather data)
input_col = model_data_obj.input_col if not project_ind_col else project_ind_col
fig, y_true, y_pred = self.plot_data_obj.baseline_projection_plot(model_data_obj.y_true, model_data_obj.y_pred,
model_data_obj.baseline_period, model_data_obj.projection_period,
model_data_obj.best_model_name, model_data_obj.best_metrics['adj_r2'],
model_data_obj.original_data,
input_col, model_data_obj.output_col,
model_data_obj.best_model,
self.result['Site'])
fig.savefig(self.results_folder_name + '/baseline_projection_plot-' + str(self.get_global_count()) + '.png')
if not y_true.empty and not y_pred.empty:
saving_absolute = (y_pred - y_true).sum()
saving_perc = (saving_absolute / y_pred.sum()) * 100
self.result['Energy Savings (%)'] = float(saving_perc)
self.result['Energy Savings (absolute)'] = saving_absolute
# Temporary
self.project_df['true'] = y_true
self.project_df['pred'] = y_pred
# Calculate uncertainity of savings
self.result['Uncertainity'] = self.uncertainity_equation(model_data_obj, y_true, y_pred, 0.9)
else:
print('y_true: ', y_true)
print('y_pred: ', y_pred)
print('Error: y_true and y_pred are empty. Default to -1.0 savings.')
self.result['Energy Savings (%)'] = float(-1.0)
self.result['Energy Savings (absolute)'] = float(-1.0)
return self.best_metrics | [
"def",
"model",
"(",
"self",
",",
"data",
",",
"ind_col",
"=",
"None",
",",
"dep_col",
"=",
"None",
",",
"project_ind_col",
"=",
"None",
",",
"baseline_period",
"=",
"[",
"None",
",",
"None",
"]",
",",
"projection_period",
"=",
"None",
",",
"exclude_time_period",
"=",
"None",
",",
"alphas",
"=",
"np",
".",
"logspace",
"(",
"-",
"4",
",",
"1",
",",
"30",
")",
",",
"cv",
"=",
"3",
",",
"plot",
"=",
"True",
",",
"figsize",
"=",
"None",
",",
"custom_model_func",
"=",
"None",
")",
":",
"# Check to ensure data is a pandas dataframe",
"if",
"not",
"isinstance",
"(",
"data",
",",
"pd",
".",
"DataFrame",
")",
":",
"raise",
"SystemError",
"(",
"'data has to be a pandas dataframe.'",
")",
"# Create instance",
"model_data_obj",
"=",
"Model_Data",
"(",
"data",
",",
"ind_col",
",",
"dep_col",
",",
"alphas",
",",
"cv",
",",
"exclude_time_period",
",",
"baseline_period",
",",
"projection_period",
")",
"# Split data into baseline and projection",
"model_data_obj",
".",
"split_data",
"(",
")",
"# Logging",
"self",
".",
"result",
"[",
"'Model'",
"]",
"=",
"{",
"'Independent Col'",
":",
"ind_col",
",",
"'Dependent Col'",
":",
"dep_col",
",",
"'Projection Independent Col'",
":",
"project_ind_col",
",",
"'Baseline Period'",
":",
"baseline_period",
",",
"'Projection Period'",
":",
"projection_period",
",",
"'Exclude Time Period'",
":",
"exclude_time_period",
",",
"'Alphas'",
":",
"list",
"(",
"alphas",
")",
",",
"'CV'",
":",
"cv",
",",
"'Plot'",
":",
"plot",
",",
"'Fig Size'",
":",
"figsize",
"}",
"# Runs all models on the data and returns optimal model",
"all_metrics",
"=",
"model_data_obj",
".",
"run_models",
"(",
")",
"self",
".",
"result",
"[",
"'Model'",
"]",
"[",
"'All Model\\'s Metrics'",
"]",
"=",
"all_metrics",
"# CHECK: Define custom model's parameter and return types in documentation.",
"if",
"custom_model_func",
":",
"self",
".",
"result",
"[",
"'Model'",
"]",
"[",
"'Custom Model\\'s Metrics'",
"]",
"=",
"model_data_obj",
".",
"custom_model",
"(",
"custom_model_func",
")",
"# Fit optimal model to data",
"self",
".",
"result",
"[",
"'Model'",
"]",
"[",
"'Optimal Model\\'s Metrics'",
"]",
"=",
"model_data_obj",
".",
"best_model_fit",
"(",
")",
"if",
"plot",
":",
"# Use project_ind_col if projecting into the future (no input data other than weather data)",
"input_col",
"=",
"model_data_obj",
".",
"input_col",
"if",
"not",
"project_ind_col",
"else",
"project_ind_col",
"fig",
",",
"y_true",
",",
"y_pred",
"=",
"self",
".",
"plot_data_obj",
".",
"baseline_projection_plot",
"(",
"model_data_obj",
".",
"y_true",
",",
"model_data_obj",
".",
"y_pred",
",",
"model_data_obj",
".",
"baseline_period",
",",
"model_data_obj",
".",
"projection_period",
",",
"model_data_obj",
".",
"best_model_name",
",",
"model_data_obj",
".",
"best_metrics",
"[",
"'adj_r2'",
"]",
",",
"model_data_obj",
".",
"original_data",
",",
"input_col",
",",
"model_data_obj",
".",
"output_col",
",",
"model_data_obj",
".",
"best_model",
",",
"self",
".",
"result",
"[",
"'Site'",
"]",
")",
"fig",
".",
"savefig",
"(",
"self",
".",
"results_folder_name",
"+",
"'/baseline_projection_plot-'",
"+",
"str",
"(",
"self",
".",
"get_global_count",
"(",
")",
")",
"+",
"'.png'",
")",
"if",
"not",
"y_true",
".",
"empty",
"and",
"not",
"y_pred",
".",
"empty",
":",
"saving_absolute",
"=",
"(",
"y_pred",
"-",
"y_true",
")",
".",
"sum",
"(",
")",
"saving_perc",
"=",
"(",
"saving_absolute",
"/",
"y_pred",
".",
"sum",
"(",
")",
")",
"*",
"100",
"self",
".",
"result",
"[",
"'Energy Savings (%)'",
"]",
"=",
"float",
"(",
"saving_perc",
")",
"self",
".",
"result",
"[",
"'Energy Savings (absolute)'",
"]",
"=",
"saving_absolute",
"# Temporary",
"self",
".",
"project_df",
"[",
"'true'",
"]",
"=",
"y_true",
"self",
".",
"project_df",
"[",
"'pred'",
"]",
"=",
"y_pred",
"# Calculate uncertainity of savings",
"self",
".",
"result",
"[",
"'Uncertainity'",
"]",
"=",
"self",
".",
"uncertainity_equation",
"(",
"model_data_obj",
",",
"y_true",
",",
"y_pred",
",",
"0.9",
")",
"else",
":",
"print",
"(",
"'y_true: '",
",",
"y_true",
")",
"print",
"(",
"'y_pred: '",
",",
"y_pred",
")",
"print",
"(",
"'Error: y_true and y_pred are empty. Default to -1.0 savings.'",
")",
"self",
".",
"result",
"[",
"'Energy Savings (%)'",
"]",
"=",
"float",
"(",
"-",
"1.0",
")",
"self",
".",
"result",
"[",
"'Energy Savings (absolute)'",
"]",
"=",
"float",
"(",
"-",
"1.0",
")",
"return",
"self",
".",
"best_metrics"
] | Split data into baseline and projection periods, run models on them and display metrics & plots.
Parameters
----------
data : pd.DataFrame()
Dataframe to model.
ind_col : list(str)
Independent column(s) of dataframe. Defaults to all columns except the last.
dep_col : str
Dependent column of dataframe.
project_ind_col : list(str)
Independent column(s) to use for projection. If none, use ind_col.
baseline_period : list(str)
List of time periods to split the data into baseline periods. It needs to have a start and an end date.
projection_period : list(str)
List of time periods to split the data into projection periods. It needs to have a start and an end date.
exclude_time_period : list(str)
List of time periods to exclude for modeling.
alphas : list(int)
List of alphas to run regression on.
cv : int
Number of folds for cross-validation.
plot : bool
Specifies whether to save plots or not.
figsize : tuple
Size of the plots.
custom_model_func : function
Model with specific hyper-parameters provided by user.
Returns
-------
dict
Metrics of the optimal/best model. | [
"Split",
"data",
"into",
"baseline",
"and",
"projection",
"periods",
"run",
"models",
"on",
"them",
"and",
"display",
"metrics",
"&",
"plots",
"."
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/data_analysis/XBOS_data_analytics/Wrapper.py#L637-L749 |
2,106 | SoftwareDefinedBuildings/XBOS | python/xbos/services/pundat.py | make_dataframe | def make_dataframe(result):
"""
Turns the results of one of the data API calls into a pandas dataframe
"""
import pandas as pd
ret = {}
if isinstance(result,dict):
if 'timeseries' in result:
result = result['timeseries']
for uuid, data in result.items():
df = pd.DataFrame(data)
if len(df.columns) == 5: # statistical data
df.columns = ['time','min','mean','max','count']
else:
df.columns = ['time','value']
df['time'] = pd.to_datetime(df['time'],unit='ns')
df = df.set_index(df.pop('time'))
ret[uuid] = df
return ret | python | def make_dataframe(result):
"""
Turns the results of one of the data API calls into a pandas dataframe
"""
import pandas as pd
ret = {}
if isinstance(result,dict):
if 'timeseries' in result:
result = result['timeseries']
for uuid, data in result.items():
df = pd.DataFrame(data)
if len(df.columns) == 5: # statistical data
df.columns = ['time','min','mean','max','count']
else:
df.columns = ['time','value']
df['time'] = pd.to_datetime(df['time'],unit='ns')
df = df.set_index(df.pop('time'))
ret[uuid] = df
return ret | [
"def",
"make_dataframe",
"(",
"result",
")",
":",
"import",
"pandas",
"as",
"pd",
"ret",
"=",
"{",
"}",
"if",
"isinstance",
"(",
"result",
",",
"dict",
")",
":",
"if",
"'timeseries'",
"in",
"result",
":",
"result",
"=",
"result",
"[",
"'timeseries'",
"]",
"for",
"uuid",
",",
"data",
"in",
"result",
".",
"items",
"(",
")",
":",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"data",
")",
"if",
"len",
"(",
"df",
".",
"columns",
")",
"==",
"5",
":",
"# statistical data",
"df",
".",
"columns",
"=",
"[",
"'time'",
",",
"'min'",
",",
"'mean'",
",",
"'max'",
",",
"'count'",
"]",
"else",
":",
"df",
".",
"columns",
"=",
"[",
"'time'",
",",
"'value'",
"]",
"df",
"[",
"'time'",
"]",
"=",
"pd",
".",
"to_datetime",
"(",
"df",
"[",
"'time'",
"]",
",",
"unit",
"=",
"'ns'",
")",
"df",
"=",
"df",
".",
"set_index",
"(",
"df",
".",
"pop",
"(",
"'time'",
")",
")",
"ret",
"[",
"uuid",
"]",
"=",
"df",
"return",
"ret"
] | Turns the results of one of the data API calls into a pandas dataframe | [
"Turns",
"the",
"results",
"of",
"one",
"of",
"the",
"data",
"API",
"calls",
"into",
"a",
"pandas",
"dataframe"
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/python/xbos/services/pundat.py#L247-L265 |
2,107 | SoftwareDefinedBuildings/XBOS | python/xbos/services/pundat.py | DataClient.query | def query(self, query, archiver="", timeout=DEFAULT_TIMEOUT):
"""
Runs the given pundat query and returns the results as a Python object.
Arguments:
[query]: the query string
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver
"""
if archiver == "":
archiver = self.archivers[0]
nonce = random.randint(0, 2**32)
ev = threading.Event()
response = {}
def _handleresult(msg):
# decode, throw away if not correct nonce
got_response = False
error = getError(nonce, msg)
if error is not None:
got_response = True
response["error"] = error
metadata = getMetadata(nonce, msg)
if metadata is not None:
got_response = True
response["metadata"] = metadata
timeseries = getTimeseries(nonce, msg)
if timeseries is not None:
got_response = True
response["timeseries"] = timeseries
if got_response:
ev.set()
vk = self.vk[:-1] # remove last part of VK because archiver doesn't expect it
# set up receiving
self.c.subscribe("{0}/s.giles/_/i.archiver/signal/{1},queries".format(archiver, vk), _handleresult)
# execute query
q_struct = msgpack.packb({"Query": query, "Nonce": nonce})
po = PayloadObject((2,0,8,1), None, q_struct)
self.c.publish("{0}/s.giles/_/i.archiver/slot/query".format(archiver), payload_objects=(po,))
ev.wait(timeout)
if len(response) == 0: # no results
raise TimeoutException("Query of {0} timed out".format(query))
return response | python | def query(self, query, archiver="", timeout=DEFAULT_TIMEOUT):
"""
Runs the given pundat query and returns the results as a Python object.
Arguments:
[query]: the query string
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver
"""
if archiver == "":
archiver = self.archivers[0]
nonce = random.randint(0, 2**32)
ev = threading.Event()
response = {}
def _handleresult(msg):
# decode, throw away if not correct nonce
got_response = False
error = getError(nonce, msg)
if error is not None:
got_response = True
response["error"] = error
metadata = getMetadata(nonce, msg)
if metadata is not None:
got_response = True
response["metadata"] = metadata
timeseries = getTimeseries(nonce, msg)
if timeseries is not None:
got_response = True
response["timeseries"] = timeseries
if got_response:
ev.set()
vk = self.vk[:-1] # remove last part of VK because archiver doesn't expect it
# set up receiving
self.c.subscribe("{0}/s.giles/_/i.archiver/signal/{1},queries".format(archiver, vk), _handleresult)
# execute query
q_struct = msgpack.packb({"Query": query, "Nonce": nonce})
po = PayloadObject((2,0,8,1), None, q_struct)
self.c.publish("{0}/s.giles/_/i.archiver/slot/query".format(archiver), payload_objects=(po,))
ev.wait(timeout)
if len(response) == 0: # no results
raise TimeoutException("Query of {0} timed out".format(query))
return response | [
"def",
"query",
"(",
"self",
",",
"query",
",",
"archiver",
"=",
"\"\"",
",",
"timeout",
"=",
"DEFAULT_TIMEOUT",
")",
":",
"if",
"archiver",
"==",
"\"\"",
":",
"archiver",
"=",
"self",
".",
"archivers",
"[",
"0",
"]",
"nonce",
"=",
"random",
".",
"randint",
"(",
"0",
",",
"2",
"**",
"32",
")",
"ev",
"=",
"threading",
".",
"Event",
"(",
")",
"response",
"=",
"{",
"}",
"def",
"_handleresult",
"(",
"msg",
")",
":",
"# decode, throw away if not correct nonce",
"got_response",
"=",
"False",
"error",
"=",
"getError",
"(",
"nonce",
",",
"msg",
")",
"if",
"error",
"is",
"not",
"None",
":",
"got_response",
"=",
"True",
"response",
"[",
"\"error\"",
"]",
"=",
"error",
"metadata",
"=",
"getMetadata",
"(",
"nonce",
",",
"msg",
")",
"if",
"metadata",
"is",
"not",
"None",
":",
"got_response",
"=",
"True",
"response",
"[",
"\"metadata\"",
"]",
"=",
"metadata",
"timeseries",
"=",
"getTimeseries",
"(",
"nonce",
",",
"msg",
")",
"if",
"timeseries",
"is",
"not",
"None",
":",
"got_response",
"=",
"True",
"response",
"[",
"\"timeseries\"",
"]",
"=",
"timeseries",
"if",
"got_response",
":",
"ev",
".",
"set",
"(",
")",
"vk",
"=",
"self",
".",
"vk",
"[",
":",
"-",
"1",
"]",
"# remove last part of VK because archiver doesn't expect it",
"# set up receiving",
"self",
".",
"c",
".",
"subscribe",
"(",
"\"{0}/s.giles/_/i.archiver/signal/{1},queries\"",
".",
"format",
"(",
"archiver",
",",
"vk",
")",
",",
"_handleresult",
")",
"# execute query",
"q_struct",
"=",
"msgpack",
".",
"packb",
"(",
"{",
"\"Query\"",
":",
"query",
",",
"\"Nonce\"",
":",
"nonce",
"}",
")",
"po",
"=",
"PayloadObject",
"(",
"(",
"2",
",",
"0",
",",
"8",
",",
"1",
")",
",",
"None",
",",
"q_struct",
")",
"self",
".",
"c",
".",
"publish",
"(",
"\"{0}/s.giles/_/i.archiver/slot/query\"",
".",
"format",
"(",
"archiver",
")",
",",
"payload_objects",
"=",
"(",
"po",
",",
")",
")",
"ev",
".",
"wait",
"(",
"timeout",
")",
"if",
"len",
"(",
"response",
")",
"==",
"0",
":",
"# no results",
"raise",
"TimeoutException",
"(",
"\"Query of {0} timed out\"",
".",
"format",
"(",
"query",
")",
")",
"return",
"response"
] | Runs the given pundat query and returns the results as a Python object.
Arguments:
[query]: the query string
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver | [
"Runs",
"the",
"given",
"pundat",
"query",
"and",
"returns",
"the",
"results",
"as",
"a",
"Python",
"object",
"."
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/python/xbos/services/pundat.py#L61-L111 |
2,108 | SoftwareDefinedBuildings/XBOS | python/xbos/services/pundat.py | DataClient.uuids | def uuids(self, where, archiver="", timeout=DEFAULT_TIMEOUT):
"""
Using the given where-clause, finds all UUIDs that match
Arguments:
[where]: the where clause (e.g. 'path like "keti"', 'SourceName = "TED Main"')
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver
"""
resp = self.query("select uuid where {0}".format(where), archiver, timeout)
uuids = []
for r in resp["metadata"]:
uuids.append(r["uuid"])
return uuids | python | def uuids(self, where, archiver="", timeout=DEFAULT_TIMEOUT):
"""
Using the given where-clause, finds all UUIDs that match
Arguments:
[where]: the where clause (e.g. 'path like "keti"', 'SourceName = "TED Main"')
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver
"""
resp = self.query("select uuid where {0}".format(where), archiver, timeout)
uuids = []
for r in resp["metadata"]:
uuids.append(r["uuid"])
return uuids | [
"def",
"uuids",
"(",
"self",
",",
"where",
",",
"archiver",
"=",
"\"\"",
",",
"timeout",
"=",
"DEFAULT_TIMEOUT",
")",
":",
"resp",
"=",
"self",
".",
"query",
"(",
"\"select uuid where {0}\"",
".",
"format",
"(",
"where",
")",
",",
"archiver",
",",
"timeout",
")",
"uuids",
"=",
"[",
"]",
"for",
"r",
"in",
"resp",
"[",
"\"metadata\"",
"]",
":",
"uuids",
".",
"append",
"(",
"r",
"[",
"\"uuid\"",
"]",
")",
"return",
"uuids"
] | Using the given where-clause, finds all UUIDs that match
Arguments:
[where]: the where clause (e.g. 'path like "keti"', 'SourceName = "TED Main"')
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver | [
"Using",
"the",
"given",
"where",
"-",
"clause",
"finds",
"all",
"UUIDs",
"that",
"match"
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/python/xbos/services/pundat.py#L113-L127 |
2,109 | SoftwareDefinedBuildings/XBOS | python/xbos/services/pundat.py | DataClient.tags | def tags(self, where, archiver="", timeout=DEFAULT_TIMEOUT):
"""
Retrieves tags for all streams matching the given WHERE clause
Arguments:
[where]: the where clause (e.g. 'path like "keti"', 'SourceName = "TED Main"')
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver
"""
return self.query("select * where {0}".format(where), archiver, timeout).get('metadata',{}) | python | def tags(self, where, archiver="", timeout=DEFAULT_TIMEOUT):
"""
Retrieves tags for all streams matching the given WHERE clause
Arguments:
[where]: the where clause (e.g. 'path like "keti"', 'SourceName = "TED Main"')
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver
"""
return self.query("select * where {0}".format(where), archiver, timeout).get('metadata',{}) | [
"def",
"tags",
"(",
"self",
",",
"where",
",",
"archiver",
"=",
"\"\"",
",",
"timeout",
"=",
"DEFAULT_TIMEOUT",
")",
":",
"return",
"self",
".",
"query",
"(",
"\"select * where {0}\"",
".",
"format",
"(",
"where",
")",
",",
"archiver",
",",
"timeout",
")",
".",
"get",
"(",
"'metadata'",
",",
"{",
"}",
")"
] | Retrieves tags for all streams matching the given WHERE clause
Arguments:
[where]: the where clause (e.g. 'path like "keti"', 'SourceName = "TED Main"')
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver | [
"Retrieves",
"tags",
"for",
"all",
"streams",
"matching",
"the",
"given",
"WHERE",
"clause"
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/python/xbos/services/pundat.py#L129-L139 |
2,110 | SoftwareDefinedBuildings/XBOS | python/xbos/services/pundat.py | DataClient.tags_uuids | def tags_uuids(self, uuids, archiver="", timeout=DEFAULT_TIMEOUT):
"""
Retrieves tags for all streams with the provided UUIDs
Arguments:
[uuids]: list of UUIDs
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver
"""
if not isinstance(uuids, list):
uuids = [uuids]
where = " or ".join(['uuid = "{0}"'.format(uuid) for uuid in uuids])
return self.query("select * where {0}".format(where), archiver, timeout).get('metadata',{}) | python | def tags_uuids(self, uuids, archiver="", timeout=DEFAULT_TIMEOUT):
"""
Retrieves tags for all streams with the provided UUIDs
Arguments:
[uuids]: list of UUIDs
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver
"""
if not isinstance(uuids, list):
uuids = [uuids]
where = " or ".join(['uuid = "{0}"'.format(uuid) for uuid in uuids])
return self.query("select * where {0}".format(where), archiver, timeout).get('metadata',{}) | [
"def",
"tags_uuids",
"(",
"self",
",",
"uuids",
",",
"archiver",
"=",
"\"\"",
",",
"timeout",
"=",
"DEFAULT_TIMEOUT",
")",
":",
"if",
"not",
"isinstance",
"(",
"uuids",
",",
"list",
")",
":",
"uuids",
"=",
"[",
"uuids",
"]",
"where",
"=",
"\" or \"",
".",
"join",
"(",
"[",
"'uuid = \"{0}\"'",
".",
"format",
"(",
"uuid",
")",
"for",
"uuid",
"in",
"uuids",
"]",
")",
"return",
"self",
".",
"query",
"(",
"\"select * where {0}\"",
".",
"format",
"(",
"where",
")",
",",
"archiver",
",",
"timeout",
")",
".",
"get",
"(",
"'metadata'",
",",
"{",
"}",
")"
] | Retrieves tags for all streams with the provided UUIDs
Arguments:
[uuids]: list of UUIDs
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver | [
"Retrieves",
"tags",
"for",
"all",
"streams",
"with",
"the",
"provided",
"UUIDs"
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/python/xbos/services/pundat.py#L141-L154 |
2,111 | SoftwareDefinedBuildings/XBOS | python/xbos/services/pundat.py | DataClient.data | def data(self, where, start, end, archiver="", timeout=DEFAULT_TIMEOUT):
"""
With the given WHERE clause, retrieves all RAW data between the 2 given timestamps
Arguments:
[where]: the where clause (e.g. 'path like "keti"', 'SourceName = "TED Main"')
[start, end]: time references:
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver
"""
return self.query("select data in ({0}, {1}) where {2}".format(start, end, where), archiver, timeout).get('timeseries',{}) | python | def data(self, where, start, end, archiver="", timeout=DEFAULT_TIMEOUT):
"""
With the given WHERE clause, retrieves all RAW data between the 2 given timestamps
Arguments:
[where]: the where clause (e.g. 'path like "keti"', 'SourceName = "TED Main"')
[start, end]: time references:
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver
"""
return self.query("select data in ({0}, {1}) where {2}".format(start, end, where), archiver, timeout).get('timeseries',{}) | [
"def",
"data",
"(",
"self",
",",
"where",
",",
"start",
",",
"end",
",",
"archiver",
"=",
"\"\"",
",",
"timeout",
"=",
"DEFAULT_TIMEOUT",
")",
":",
"return",
"self",
".",
"query",
"(",
"\"select data in ({0}, {1}) where {2}\"",
".",
"format",
"(",
"start",
",",
"end",
",",
"where",
")",
",",
"archiver",
",",
"timeout",
")",
".",
"get",
"(",
"'timeseries'",
",",
"{",
"}",
")"
] | With the given WHERE clause, retrieves all RAW data between the 2 given timestamps
Arguments:
[where]: the where clause (e.g. 'path like "keti"', 'SourceName = "TED Main"')
[start, end]: time references:
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver | [
"With",
"the",
"given",
"WHERE",
"clause",
"retrieves",
"all",
"RAW",
"data",
"between",
"the",
"2",
"given",
"timestamps"
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/python/xbos/services/pundat.py#L156-L167 |
2,112 | SoftwareDefinedBuildings/XBOS | python/xbos/services/pundat.py | DataClient.data_uuids | def data_uuids(self, uuids, start, end, archiver="", timeout=DEFAULT_TIMEOUT):
"""
With the given list of UUIDs, retrieves all RAW data between the 2 given timestamps
Arguments:
[uuids]: list of UUIDs
[start, end]: time references:
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver
"""
if not isinstance(uuids, list):
uuids = [uuids]
where = " or ".join(['uuid = "{0}"'.format(uuid) for uuid in uuids])
return self.query("select data in ({0}, {1}) where {2}".format(start, end, where), archiver, timeout).get('timeseries',{}) | python | def data_uuids(self, uuids, start, end, archiver="", timeout=DEFAULT_TIMEOUT):
"""
With the given list of UUIDs, retrieves all RAW data between the 2 given timestamps
Arguments:
[uuids]: list of UUIDs
[start, end]: time references:
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver
"""
if not isinstance(uuids, list):
uuids = [uuids]
where = " or ".join(['uuid = "{0}"'.format(uuid) for uuid in uuids])
return self.query("select data in ({0}, {1}) where {2}".format(start, end, where), archiver, timeout).get('timeseries',{}) | [
"def",
"data_uuids",
"(",
"self",
",",
"uuids",
",",
"start",
",",
"end",
",",
"archiver",
"=",
"\"\"",
",",
"timeout",
"=",
"DEFAULT_TIMEOUT",
")",
":",
"if",
"not",
"isinstance",
"(",
"uuids",
",",
"list",
")",
":",
"uuids",
"=",
"[",
"uuids",
"]",
"where",
"=",
"\" or \"",
".",
"join",
"(",
"[",
"'uuid = \"{0}\"'",
".",
"format",
"(",
"uuid",
")",
"for",
"uuid",
"in",
"uuids",
"]",
")",
"return",
"self",
".",
"query",
"(",
"\"select data in ({0}, {1}) where {2}\"",
".",
"format",
"(",
"start",
",",
"end",
",",
"where",
")",
",",
"archiver",
",",
"timeout",
")",
".",
"get",
"(",
"'timeseries'",
",",
"{",
"}",
")"
] | With the given list of UUIDs, retrieves all RAW data between the 2 given timestamps
Arguments:
[uuids]: list of UUIDs
[start, end]: time references:
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver | [
"With",
"the",
"given",
"list",
"of",
"UUIDs",
"retrieves",
"all",
"RAW",
"data",
"between",
"the",
"2",
"given",
"timestamps"
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/python/xbos/services/pundat.py#L169-L183 |
2,113 | SoftwareDefinedBuildings/XBOS | python/xbos/services/pundat.py | DataClient.stats | def stats(self, where, start, end, pw, archiver="", timeout=DEFAULT_TIMEOUT):
"""
With the given WHERE clause, retrieves all statistical data between the 2 given timestamps, using the given pointwidth
Arguments:
[where]: the where clause (e.g. 'path like "keti"', 'SourceName = "TED Main"')
[start, end]: time references:
[pw]: pointwidth (window size of 2^pw nanoseconds)
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver
"""
return self.query("select statistical({3}) data in ({0}, {1}) where {2}".format(start, end, where, pw), archiver, timeout).get('timeseries',{}) | python | def stats(self, where, start, end, pw, archiver="", timeout=DEFAULT_TIMEOUT):
"""
With the given WHERE clause, retrieves all statistical data between the 2 given timestamps, using the given pointwidth
Arguments:
[where]: the where clause (e.g. 'path like "keti"', 'SourceName = "TED Main"')
[start, end]: time references:
[pw]: pointwidth (window size of 2^pw nanoseconds)
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver
"""
return self.query("select statistical({3}) data in ({0}, {1}) where {2}".format(start, end, where, pw), archiver, timeout).get('timeseries',{}) | [
"def",
"stats",
"(",
"self",
",",
"where",
",",
"start",
",",
"end",
",",
"pw",
",",
"archiver",
"=",
"\"\"",
",",
"timeout",
"=",
"DEFAULT_TIMEOUT",
")",
":",
"return",
"self",
".",
"query",
"(",
"\"select statistical({3}) data in ({0}, {1}) where {2}\"",
".",
"format",
"(",
"start",
",",
"end",
",",
"where",
",",
"pw",
")",
",",
"archiver",
",",
"timeout",
")",
".",
"get",
"(",
"'timeseries'",
",",
"{",
"}",
")"
] | With the given WHERE clause, retrieves all statistical data between the 2 given timestamps, using the given pointwidth
Arguments:
[where]: the where clause (e.g. 'path like "keti"', 'SourceName = "TED Main"')
[start, end]: time references:
[pw]: pointwidth (window size of 2^pw nanoseconds)
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver | [
"With",
"the",
"given",
"WHERE",
"clause",
"retrieves",
"all",
"statistical",
"data",
"between",
"the",
"2",
"given",
"timestamps",
"using",
"the",
"given",
"pointwidth"
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/python/xbos/services/pundat.py#L185-L197 |
2,114 | SoftwareDefinedBuildings/XBOS | python/xbos/services/pundat.py | DataClient.window | def window(self, where, start, end, width, archiver="", timeout=DEFAULT_TIMEOUT):
"""
With the given WHERE clause, retrieves all statistical data between the 2 given timestamps, using the given window size
Arguments:
[where]: the where clause (e.g. 'path like "keti"', 'SourceName = "TED Main"')
[start, end]: time references:
[width]: a time expression for the window size, e.g. "5s", "365d"
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver
"""
return self.query("select window({3}) data in ({0}, {1}) where {2}".format(start, end, where, width), archiver, timeout).get('timeseries',{}) | python | def window(self, where, start, end, width, archiver="", timeout=DEFAULT_TIMEOUT):
"""
With the given WHERE clause, retrieves all statistical data between the 2 given timestamps, using the given window size
Arguments:
[where]: the where clause (e.g. 'path like "keti"', 'SourceName = "TED Main"')
[start, end]: time references:
[width]: a time expression for the window size, e.g. "5s", "365d"
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver
"""
return self.query("select window({3}) data in ({0}, {1}) where {2}".format(start, end, where, width), archiver, timeout).get('timeseries',{}) | [
"def",
"window",
"(",
"self",
",",
"where",
",",
"start",
",",
"end",
",",
"width",
",",
"archiver",
"=",
"\"\"",
",",
"timeout",
"=",
"DEFAULT_TIMEOUT",
")",
":",
"return",
"self",
".",
"query",
"(",
"\"select window({3}) data in ({0}, {1}) where {2}\"",
".",
"format",
"(",
"start",
",",
"end",
",",
"where",
",",
"width",
")",
",",
"archiver",
",",
"timeout",
")",
".",
"get",
"(",
"'timeseries'",
",",
"{",
"}",
")"
] | With the given WHERE clause, retrieves all statistical data between the 2 given timestamps, using the given window size
Arguments:
[where]: the where clause (e.g. 'path like "keti"', 'SourceName = "TED Main"')
[start, end]: time references:
[width]: a time expression for the window size, e.g. "5s", "365d"
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver | [
"With",
"the",
"given",
"WHERE",
"clause",
"retrieves",
"all",
"statistical",
"data",
"between",
"the",
"2",
"given",
"timestamps",
"using",
"the",
"given",
"window",
"size"
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/python/xbos/services/pundat.py#L216-L228 |
2,115 | Danielhiversen/flux_led | flux_led/__main__.py | WifiLedBulb.brightness | def brightness(self):
"""Return current brightness 0-255.
For warm white return current led level. For RGB
calculate the HSV and return the 'value'.
"""
if self.mode == "ww":
return int(self.raw_state[9])
else:
_, _, v = colorsys.rgb_to_hsv(*self.getRgb())
return v | python | def brightness(self):
"""Return current brightness 0-255.
For warm white return current led level. For RGB
calculate the HSV and return the 'value'.
"""
if self.mode == "ww":
return int(self.raw_state[9])
else:
_, _, v = colorsys.rgb_to_hsv(*self.getRgb())
return v | [
"def",
"brightness",
"(",
"self",
")",
":",
"if",
"self",
".",
"mode",
"==",
"\"ww\"",
":",
"return",
"int",
"(",
"self",
".",
"raw_state",
"[",
"9",
"]",
")",
"else",
":",
"_",
",",
"_",
",",
"v",
"=",
"colorsys",
".",
"rgb_to_hsv",
"(",
"*",
"self",
".",
"getRgb",
"(",
")",
")",
"return",
"v"
] | Return current brightness 0-255.
For warm white return current led level. For RGB
calculate the HSV and return the 'value'. | [
"Return",
"current",
"brightness",
"0",
"-",
"255",
"."
] | 13e87e06ff7589356c83e084a6be768ad1290557 | https://github.com/Danielhiversen/flux_led/blob/13e87e06ff7589356c83e084a6be768ad1290557/flux_led/__main__.py#L544-L554 |
2,116 | kyrus/python-junit-xml | junit_xml/__init__.py | decode | def decode(var, encoding):
"""
If not already unicode, decode it.
"""
if PY2:
if isinstance(var, unicode):
ret = var
elif isinstance(var, str):
if encoding:
ret = var.decode(encoding)
else:
ret = unicode(var)
else:
ret = unicode(var)
else:
ret = str(var)
return ret | python | def decode(var, encoding):
"""
If not already unicode, decode it.
"""
if PY2:
if isinstance(var, unicode):
ret = var
elif isinstance(var, str):
if encoding:
ret = var.decode(encoding)
else:
ret = unicode(var)
else:
ret = unicode(var)
else:
ret = str(var)
return ret | [
"def",
"decode",
"(",
"var",
",",
"encoding",
")",
":",
"if",
"PY2",
":",
"if",
"isinstance",
"(",
"var",
",",
"unicode",
")",
":",
"ret",
"=",
"var",
"elif",
"isinstance",
"(",
"var",
",",
"str",
")",
":",
"if",
"encoding",
":",
"ret",
"=",
"var",
".",
"decode",
"(",
"encoding",
")",
"else",
":",
"ret",
"=",
"unicode",
"(",
"var",
")",
"else",
":",
"ret",
"=",
"unicode",
"(",
"var",
")",
"else",
":",
"ret",
"=",
"str",
"(",
"var",
")",
"return",
"ret"
] | If not already unicode, decode it. | [
"If",
"not",
"already",
"unicode",
"decode",
"it",
"."
] | 9bb2675bf0058742da04285dcdcf8781eee03db0 | https://github.com/kyrus/python-junit-xml/blob/9bb2675bf0058742da04285dcdcf8781eee03db0/junit_xml/__init__.py#L57-L73 |
2,117 | esheldon/fitsio | fitsio/util.py | cfitsio_version | def cfitsio_version(asfloat=False):
"""
Return the cfitsio version as a string.
"""
# use string version to avoid roundoffs
ver = '%0.3f' % _fitsio_wrap.cfitsio_version()
if asfloat:
return float(ver)
else:
return ver | python | def cfitsio_version(asfloat=False):
"""
Return the cfitsio version as a string.
"""
# use string version to avoid roundoffs
ver = '%0.3f' % _fitsio_wrap.cfitsio_version()
if asfloat:
return float(ver)
else:
return ver | [
"def",
"cfitsio_version",
"(",
"asfloat",
"=",
"False",
")",
":",
"# use string version to avoid roundoffs",
"ver",
"=",
"'%0.3f'",
"%",
"_fitsio_wrap",
".",
"cfitsio_version",
"(",
")",
"if",
"asfloat",
":",
"return",
"float",
"(",
"ver",
")",
"else",
":",
"return",
"ver"
] | Return the cfitsio version as a string. | [
"Return",
"the",
"cfitsio",
"version",
"as",
"a",
"string",
"."
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/util.py#L19-L28 |
2,118 | esheldon/fitsio | fitsio/util.py | is_little_endian | def is_little_endian(array):
"""
Return True if array is little endian, False otherwise.
Parameters
----------
array: numpy array
A numerical python array.
Returns
-------
Truth value:
True for little-endian
Notes
-----
Strings are neither big or little endian. The input must be a simple numpy
array, not an array with fields.
"""
if numpy.little_endian:
machine_little = True
else:
machine_little = False
byteorder = array.dtype.base.byteorder
return (byteorder == '<') or (machine_little and byteorder == '=') | python | def is_little_endian(array):
"""
Return True if array is little endian, False otherwise.
Parameters
----------
array: numpy array
A numerical python array.
Returns
-------
Truth value:
True for little-endian
Notes
-----
Strings are neither big or little endian. The input must be a simple numpy
array, not an array with fields.
"""
if numpy.little_endian:
machine_little = True
else:
machine_little = False
byteorder = array.dtype.base.byteorder
return (byteorder == '<') or (machine_little and byteorder == '=') | [
"def",
"is_little_endian",
"(",
"array",
")",
":",
"if",
"numpy",
".",
"little_endian",
":",
"machine_little",
"=",
"True",
"else",
":",
"machine_little",
"=",
"False",
"byteorder",
"=",
"array",
".",
"dtype",
".",
"base",
".",
"byteorder",
"return",
"(",
"byteorder",
"==",
"'<'",
")",
"or",
"(",
"machine_little",
"and",
"byteorder",
"==",
"'='",
")"
] | Return True if array is little endian, False otherwise.
Parameters
----------
array: numpy array
A numerical python array.
Returns
-------
Truth value:
True for little-endian
Notes
-----
Strings are neither big or little endian. The input must be a simple numpy
array, not an array with fields. | [
"Return",
"True",
"if",
"array",
"is",
"little",
"endian",
"False",
"otherwise",
"."
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/util.py#L73-L98 |
2,119 | esheldon/fitsio | fitsio/util.py | array_to_native | def array_to_native(array, inplace=False):
"""
Convert an array to the native byte order.
NOTE: the inplace keyword argument is not currently used.
"""
if numpy.little_endian:
machine_little = True
else:
machine_little = False
data_little = False
if array.dtype.names is None:
if array.dtype.base.byteorder == '|':
# strings and 1 byte integers
return array
data_little = is_little_endian(array)
else:
# assume all are same byte order: we only need to find one with
# little endian
for fname in array.dtype.names:
if is_little_endian(array[fname]):
data_little = True
break
if ((machine_little and not data_little)
or (not machine_little and data_little)):
output = array.byteswap(inplace)
else:
output = array
return output | python | def array_to_native(array, inplace=False):
"""
Convert an array to the native byte order.
NOTE: the inplace keyword argument is not currently used.
"""
if numpy.little_endian:
machine_little = True
else:
machine_little = False
data_little = False
if array.dtype.names is None:
if array.dtype.base.byteorder == '|':
# strings and 1 byte integers
return array
data_little = is_little_endian(array)
else:
# assume all are same byte order: we only need to find one with
# little endian
for fname in array.dtype.names:
if is_little_endian(array[fname]):
data_little = True
break
if ((machine_little and not data_little)
or (not machine_little and data_little)):
output = array.byteswap(inplace)
else:
output = array
return output | [
"def",
"array_to_native",
"(",
"array",
",",
"inplace",
"=",
"False",
")",
":",
"if",
"numpy",
".",
"little_endian",
":",
"machine_little",
"=",
"True",
"else",
":",
"machine_little",
"=",
"False",
"data_little",
"=",
"False",
"if",
"array",
".",
"dtype",
".",
"names",
"is",
"None",
":",
"if",
"array",
".",
"dtype",
".",
"base",
".",
"byteorder",
"==",
"'|'",
":",
"# strings and 1 byte integers",
"return",
"array",
"data_little",
"=",
"is_little_endian",
"(",
"array",
")",
"else",
":",
"# assume all are same byte order: we only need to find one with",
"# little endian",
"for",
"fname",
"in",
"array",
".",
"dtype",
".",
"names",
":",
"if",
"is_little_endian",
"(",
"array",
"[",
"fname",
"]",
")",
":",
"data_little",
"=",
"True",
"break",
"if",
"(",
"(",
"machine_little",
"and",
"not",
"data_little",
")",
"or",
"(",
"not",
"machine_little",
"and",
"data_little",
")",
")",
":",
"output",
"=",
"array",
".",
"byteswap",
"(",
"inplace",
")",
"else",
":",
"output",
"=",
"array",
"return",
"output"
] | Convert an array to the native byte order.
NOTE: the inplace keyword argument is not currently used. | [
"Convert",
"an",
"array",
"to",
"the",
"native",
"byte",
"order",
"."
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/util.py#L101-L134 |
2,120 | esheldon/fitsio | fitsio/util.py | mks | def mks(val):
"""
make sure the value is a string, paying mind to python3 vs 2
"""
if sys.version_info > (3, 0, 0):
if isinstance(val, bytes):
sval = str(val, 'utf-8')
else:
sval = str(val)
else:
sval = str(val)
return sval | python | def mks(val):
"""
make sure the value is a string, paying mind to python3 vs 2
"""
if sys.version_info > (3, 0, 0):
if isinstance(val, bytes):
sval = str(val, 'utf-8')
else:
sval = str(val)
else:
sval = str(val)
return sval | [
"def",
"mks",
"(",
"val",
")",
":",
"if",
"sys",
".",
"version_info",
">",
"(",
"3",
",",
"0",
",",
"0",
")",
":",
"if",
"isinstance",
"(",
"val",
",",
"bytes",
")",
":",
"sval",
"=",
"str",
"(",
"val",
",",
"'utf-8'",
")",
"else",
":",
"sval",
"=",
"str",
"(",
"val",
")",
"else",
":",
"sval",
"=",
"str",
"(",
"val",
")",
"return",
"sval"
] | make sure the value is a string, paying mind to python3 vs 2 | [
"make",
"sure",
"the",
"value",
"is",
"a",
"string",
"paying",
"mind",
"to",
"python3",
"vs",
"2"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/util.py#L143-L155 |
2,121 | esheldon/fitsio | fitsio/hdu/table.py | _get_col_dimstr | def _get_col_dimstr(tdim, is_string=False):
"""
not for variable length
"""
dimstr = ''
if tdim is None:
dimstr = 'array[bad TDIM]'
else:
if is_string:
if len(tdim) > 1:
dimstr = [str(d) for d in tdim[1:]]
else:
if len(tdim) > 1 or tdim[0] > 1:
dimstr = [str(d) for d in tdim]
if dimstr != '':
dimstr = ','.join(dimstr)
dimstr = 'array[%s]' % dimstr
return dimstr | python | def _get_col_dimstr(tdim, is_string=False):
"""
not for variable length
"""
dimstr = ''
if tdim is None:
dimstr = 'array[bad TDIM]'
else:
if is_string:
if len(tdim) > 1:
dimstr = [str(d) for d in tdim[1:]]
else:
if len(tdim) > 1 or tdim[0] > 1:
dimstr = [str(d) for d in tdim]
if dimstr != '':
dimstr = ','.join(dimstr)
dimstr = 'array[%s]' % dimstr
return dimstr | [
"def",
"_get_col_dimstr",
"(",
"tdim",
",",
"is_string",
"=",
"False",
")",
":",
"dimstr",
"=",
"''",
"if",
"tdim",
"is",
"None",
":",
"dimstr",
"=",
"'array[bad TDIM]'",
"else",
":",
"if",
"is_string",
":",
"if",
"len",
"(",
"tdim",
")",
">",
"1",
":",
"dimstr",
"=",
"[",
"str",
"(",
"d",
")",
"for",
"d",
"in",
"tdim",
"[",
"1",
":",
"]",
"]",
"else",
":",
"if",
"len",
"(",
"tdim",
")",
">",
"1",
"or",
"tdim",
"[",
"0",
"]",
">",
"1",
":",
"dimstr",
"=",
"[",
"str",
"(",
"d",
")",
"for",
"d",
"in",
"tdim",
"]",
"if",
"dimstr",
"!=",
"''",
":",
"dimstr",
"=",
"','",
".",
"join",
"(",
"dimstr",
")",
"dimstr",
"=",
"'array[%s]'",
"%",
"dimstr",
"return",
"dimstr"
] | not for variable length | [
"not",
"for",
"variable",
"length"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L2019-L2037 |
2,122 | esheldon/fitsio | fitsio/hdu/table.py | TableHDU.get_colname | def get_colname(self, colnum):
"""
Get the name associated with the given column number
parameters
----------
colnum: integer
The number for the column, zero offset
"""
if colnum < 0 or colnum > (len(self._colnames)-1):
raise ValueError(
"colnum out of range [0,%s-1]" % (0, len(self._colnames)))
return self._colnames[colnum] | python | def get_colname(self, colnum):
"""
Get the name associated with the given column number
parameters
----------
colnum: integer
The number for the column, zero offset
"""
if colnum < 0 or colnum > (len(self._colnames)-1):
raise ValueError(
"colnum out of range [0,%s-1]" % (0, len(self._colnames)))
return self._colnames[colnum] | [
"def",
"get_colname",
"(",
"self",
",",
"colnum",
")",
":",
"if",
"colnum",
"<",
"0",
"or",
"colnum",
">",
"(",
"len",
"(",
"self",
".",
"_colnames",
")",
"-",
"1",
")",
":",
"raise",
"ValueError",
"(",
"\"colnum out of range [0,%s-1]\"",
"%",
"(",
"0",
",",
"len",
"(",
"self",
".",
"_colnames",
")",
")",
")",
"return",
"self",
".",
"_colnames",
"[",
"colnum",
"]"
] | Get the name associated with the given column number
parameters
----------
colnum: integer
The number for the column, zero offset | [
"Get",
"the",
"name",
"associated",
"with",
"the",
"given",
"column",
"number"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L84-L96 |
2,123 | esheldon/fitsio | fitsio/hdu/table.py | TableHDU.write_column | def write_column(self, column, data, **keys):
"""
Write data to a column in this HDU
This HDU must be a table HDU.
parameters
----------
column: scalar string/integer
The column in which to write. Can be the name or number (0 offset)
column: ndarray
Numerical python array to write. This should match the
shape of the column. You are probably better using
fits.write_table() to be sure.
firstrow: integer, optional
At which row you should begin writing. Be sure you know what you
are doing! For appending see the append() method. Default 0.
"""
firstrow = keys.get('firstrow', 0)
colnum = self._extract_colnum(column)
# need it to be contiguous and native byte order. For now, make a
# copy. but we may be able to avoid this with some care.
if not data.flags['C_CONTIGUOUS']:
# this always makes a copy
data_send = numpy.ascontiguousarray(data)
# this is a copy, we can make sure it is native
# and modify in place if needed
array_to_native(data_send, inplace=True)
else:
# we can avoid the copy with a try-finally block and
# some logic
data_send = array_to_native(data, inplace=False)
if IS_PY3 and data_send.dtype.char == 'U':
# for python3, we convert unicode to ascii
# this will error if the character is not in ascii
data_send = data_send.astype('S', copy=False)
self._verify_column_data(colnum, data_send)
self._FITS.write_column(
self._ext+1, colnum+1, data_send,
firstrow=firstrow+1, write_bitcols=self.write_bitcols)
del data_send
self._update_info() | python | def write_column(self, column, data, **keys):
"""
Write data to a column in this HDU
This HDU must be a table HDU.
parameters
----------
column: scalar string/integer
The column in which to write. Can be the name or number (0 offset)
column: ndarray
Numerical python array to write. This should match the
shape of the column. You are probably better using
fits.write_table() to be sure.
firstrow: integer, optional
At which row you should begin writing. Be sure you know what you
are doing! For appending see the append() method. Default 0.
"""
firstrow = keys.get('firstrow', 0)
colnum = self._extract_colnum(column)
# need it to be contiguous and native byte order. For now, make a
# copy. but we may be able to avoid this with some care.
if not data.flags['C_CONTIGUOUS']:
# this always makes a copy
data_send = numpy.ascontiguousarray(data)
# this is a copy, we can make sure it is native
# and modify in place if needed
array_to_native(data_send, inplace=True)
else:
# we can avoid the copy with a try-finally block and
# some logic
data_send = array_to_native(data, inplace=False)
if IS_PY3 and data_send.dtype.char == 'U':
# for python3, we convert unicode to ascii
# this will error if the character is not in ascii
data_send = data_send.astype('S', copy=False)
self._verify_column_data(colnum, data_send)
self._FITS.write_column(
self._ext+1, colnum+1, data_send,
firstrow=firstrow+1, write_bitcols=self.write_bitcols)
del data_send
self._update_info() | [
"def",
"write_column",
"(",
"self",
",",
"column",
",",
"data",
",",
"*",
"*",
"keys",
")",
":",
"firstrow",
"=",
"keys",
".",
"get",
"(",
"'firstrow'",
",",
"0",
")",
"colnum",
"=",
"self",
".",
"_extract_colnum",
"(",
"column",
")",
"# need it to be contiguous and native byte order. For now, make a",
"# copy. but we may be able to avoid this with some care.",
"if",
"not",
"data",
".",
"flags",
"[",
"'C_CONTIGUOUS'",
"]",
":",
"# this always makes a copy",
"data_send",
"=",
"numpy",
".",
"ascontiguousarray",
"(",
"data",
")",
"# this is a copy, we can make sure it is native",
"# and modify in place if needed",
"array_to_native",
"(",
"data_send",
",",
"inplace",
"=",
"True",
")",
"else",
":",
"# we can avoid the copy with a try-finally block and",
"# some logic",
"data_send",
"=",
"array_to_native",
"(",
"data",
",",
"inplace",
"=",
"False",
")",
"if",
"IS_PY3",
"and",
"data_send",
".",
"dtype",
".",
"char",
"==",
"'U'",
":",
"# for python3, we convert unicode to ascii",
"# this will error if the character is not in ascii",
"data_send",
"=",
"data_send",
".",
"astype",
"(",
"'S'",
",",
"copy",
"=",
"False",
")",
"self",
".",
"_verify_column_data",
"(",
"colnum",
",",
"data_send",
")",
"self",
".",
"_FITS",
".",
"write_column",
"(",
"self",
".",
"_ext",
"+",
"1",
",",
"colnum",
"+",
"1",
",",
"data_send",
",",
"firstrow",
"=",
"firstrow",
"+",
"1",
",",
"write_bitcols",
"=",
"self",
".",
"write_bitcols",
")",
"del",
"data_send",
"self",
".",
"_update_info",
"(",
")"
] | Write data to a column in this HDU
This HDU must be a table HDU.
parameters
----------
column: scalar string/integer
The column in which to write. Can be the name or number (0 offset)
column: ndarray
Numerical python array to write. This should match the
shape of the column. You are probably better using
fits.write_table() to be sure.
firstrow: integer, optional
At which row you should begin writing. Be sure you know what you
are doing! For appending see the append() method. Default 0. | [
"Write",
"data",
"to",
"a",
"column",
"in",
"this",
"HDU"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L242-L290 |
2,124 | esheldon/fitsio | fitsio/hdu/table.py | TableHDU._verify_column_data | def _verify_column_data(self, colnum, data):
"""
verify the input data is of the correct type and shape
"""
this_dt = data.dtype.descr[0]
if len(data.shape) > 2:
this_shape = data.shape[1:]
elif len(data.shape) == 2 and data.shape[1] > 1:
this_shape = data.shape[1:]
else:
this_shape = ()
this_npy_type = this_dt[1][1:]
npy_type, isvar, istbit = self._get_tbl_numpy_dtype(colnum)
info = self._info['colinfo'][colnum]
if npy_type[0] in ['>', '<', '|']:
npy_type = npy_type[1:]
col_name = info['name']
col_tdim = info['tdim']
col_shape = _tdim2shape(
col_tdim, col_name, is_string=(npy_type[0] == 'S'))
if col_shape is None:
if this_shape == ():
this_shape = None
if col_shape is not None and not isinstance(col_shape, tuple):
col_shape = (col_shape,)
"""
print('column name:',col_name)
print(data.shape)
print('col tdim', info['tdim'])
print('column dtype:',npy_type)
print('input dtype:',this_npy_type)
print('column shape:',col_shape)
print('input shape:',this_shape)
print()
"""
# this mismatch is OK
if npy_type == 'i1' and this_npy_type == 'b1':
this_npy_type = 'i1'
if isinstance(self, AsciiTableHDU):
# we don't enforce types exact for ascii
if npy_type == 'i8' and this_npy_type in ['i2', 'i4']:
this_npy_type = 'i8'
elif npy_type == 'f8' and this_npy_type == 'f4':
this_npy_type = 'f8'
if this_npy_type != npy_type:
raise ValueError(
"bad input data for column '%s': "
"expected '%s', got '%s'" % (
col_name, npy_type, this_npy_type))
if this_shape != col_shape:
raise ValueError(
"bad input shape for column '%s': "
"expected '%s', got '%s'" % (col_name, col_shape, this_shape)) | python | def _verify_column_data(self, colnum, data):
"""
verify the input data is of the correct type and shape
"""
this_dt = data.dtype.descr[0]
if len(data.shape) > 2:
this_shape = data.shape[1:]
elif len(data.shape) == 2 and data.shape[1] > 1:
this_shape = data.shape[1:]
else:
this_shape = ()
this_npy_type = this_dt[1][1:]
npy_type, isvar, istbit = self._get_tbl_numpy_dtype(colnum)
info = self._info['colinfo'][colnum]
if npy_type[0] in ['>', '<', '|']:
npy_type = npy_type[1:]
col_name = info['name']
col_tdim = info['tdim']
col_shape = _tdim2shape(
col_tdim, col_name, is_string=(npy_type[0] == 'S'))
if col_shape is None:
if this_shape == ():
this_shape = None
if col_shape is not None and not isinstance(col_shape, tuple):
col_shape = (col_shape,)
"""
print('column name:',col_name)
print(data.shape)
print('col tdim', info['tdim'])
print('column dtype:',npy_type)
print('input dtype:',this_npy_type)
print('column shape:',col_shape)
print('input shape:',this_shape)
print()
"""
# this mismatch is OK
if npy_type == 'i1' and this_npy_type == 'b1':
this_npy_type = 'i1'
if isinstance(self, AsciiTableHDU):
# we don't enforce types exact for ascii
if npy_type == 'i8' and this_npy_type in ['i2', 'i4']:
this_npy_type = 'i8'
elif npy_type == 'f8' and this_npy_type == 'f4':
this_npy_type = 'f8'
if this_npy_type != npy_type:
raise ValueError(
"bad input data for column '%s': "
"expected '%s', got '%s'" % (
col_name, npy_type, this_npy_type))
if this_shape != col_shape:
raise ValueError(
"bad input shape for column '%s': "
"expected '%s', got '%s'" % (col_name, col_shape, this_shape)) | [
"def",
"_verify_column_data",
"(",
"self",
",",
"colnum",
",",
"data",
")",
":",
"this_dt",
"=",
"data",
".",
"dtype",
".",
"descr",
"[",
"0",
"]",
"if",
"len",
"(",
"data",
".",
"shape",
")",
">",
"2",
":",
"this_shape",
"=",
"data",
".",
"shape",
"[",
"1",
":",
"]",
"elif",
"len",
"(",
"data",
".",
"shape",
")",
"==",
"2",
"and",
"data",
".",
"shape",
"[",
"1",
"]",
">",
"1",
":",
"this_shape",
"=",
"data",
".",
"shape",
"[",
"1",
":",
"]",
"else",
":",
"this_shape",
"=",
"(",
")",
"this_npy_type",
"=",
"this_dt",
"[",
"1",
"]",
"[",
"1",
":",
"]",
"npy_type",
",",
"isvar",
",",
"istbit",
"=",
"self",
".",
"_get_tbl_numpy_dtype",
"(",
"colnum",
")",
"info",
"=",
"self",
".",
"_info",
"[",
"'colinfo'",
"]",
"[",
"colnum",
"]",
"if",
"npy_type",
"[",
"0",
"]",
"in",
"[",
"'>'",
",",
"'<'",
",",
"'|'",
"]",
":",
"npy_type",
"=",
"npy_type",
"[",
"1",
":",
"]",
"col_name",
"=",
"info",
"[",
"'name'",
"]",
"col_tdim",
"=",
"info",
"[",
"'tdim'",
"]",
"col_shape",
"=",
"_tdim2shape",
"(",
"col_tdim",
",",
"col_name",
",",
"is_string",
"=",
"(",
"npy_type",
"[",
"0",
"]",
"==",
"'S'",
")",
")",
"if",
"col_shape",
"is",
"None",
":",
"if",
"this_shape",
"==",
"(",
")",
":",
"this_shape",
"=",
"None",
"if",
"col_shape",
"is",
"not",
"None",
"and",
"not",
"isinstance",
"(",
"col_shape",
",",
"tuple",
")",
":",
"col_shape",
"=",
"(",
"col_shape",
",",
")",
"\"\"\"\n print('column name:',col_name)\n print(data.shape)\n print('col tdim', info['tdim'])\n print('column dtype:',npy_type)\n print('input dtype:',this_npy_type)\n print('column shape:',col_shape)\n print('input shape:',this_shape)\n print()\n \"\"\"",
"# this mismatch is OK",
"if",
"npy_type",
"==",
"'i1'",
"and",
"this_npy_type",
"==",
"'b1'",
":",
"this_npy_type",
"=",
"'i1'",
"if",
"isinstance",
"(",
"self",
",",
"AsciiTableHDU",
")",
":",
"# we don't enforce types exact for ascii",
"if",
"npy_type",
"==",
"'i8'",
"and",
"this_npy_type",
"in",
"[",
"'i2'",
",",
"'i4'",
"]",
":",
"this_npy_type",
"=",
"'i8'",
"elif",
"npy_type",
"==",
"'f8'",
"and",
"this_npy_type",
"==",
"'f4'",
":",
"this_npy_type",
"=",
"'f8'",
"if",
"this_npy_type",
"!=",
"npy_type",
":",
"raise",
"ValueError",
"(",
"\"bad input data for column '%s': \"",
"\"expected '%s', got '%s'\"",
"%",
"(",
"col_name",
",",
"npy_type",
",",
"this_npy_type",
")",
")",
"if",
"this_shape",
"!=",
"col_shape",
":",
"raise",
"ValueError",
"(",
"\"bad input shape for column '%s': \"",
"\"expected '%s', got '%s'\"",
"%",
"(",
"col_name",
",",
"col_shape",
",",
"this_shape",
")",
")"
] | verify the input data is of the correct type and shape | [
"verify",
"the",
"input",
"data",
"is",
"of",
"the",
"correct",
"type",
"and",
"shape"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L292-L356 |
2,125 | esheldon/fitsio | fitsio/hdu/table.py | TableHDU.write_var_column | def write_var_column(self, column, data, firstrow=0, **keys):
"""
Write data to a variable-length column in this HDU
This HDU must be a table HDU.
parameters
----------
column: scalar string/integer
The column in which to write. Can be the name or number (0 offset)
column: ndarray
Numerical python array to write. This must be an object array.
firstrow: integer, optional
At which row you should begin writing. Be sure you know what you
are doing! For appending see the append() method. Default 0.
"""
if not is_object(data):
raise ValueError("Only object fields can be written to "
"variable-length arrays")
colnum = self._extract_colnum(column)
self._FITS.write_var_column(self._ext+1, colnum+1, data,
firstrow=firstrow+1)
self._update_info() | python | def write_var_column(self, column, data, firstrow=0, **keys):
"""
Write data to a variable-length column in this HDU
This HDU must be a table HDU.
parameters
----------
column: scalar string/integer
The column in which to write. Can be the name or number (0 offset)
column: ndarray
Numerical python array to write. This must be an object array.
firstrow: integer, optional
At which row you should begin writing. Be sure you know what you
are doing! For appending see the append() method. Default 0.
"""
if not is_object(data):
raise ValueError("Only object fields can be written to "
"variable-length arrays")
colnum = self._extract_colnum(column)
self._FITS.write_var_column(self._ext+1, colnum+1, data,
firstrow=firstrow+1)
self._update_info() | [
"def",
"write_var_column",
"(",
"self",
",",
"column",
",",
"data",
",",
"firstrow",
"=",
"0",
",",
"*",
"*",
"keys",
")",
":",
"if",
"not",
"is_object",
"(",
"data",
")",
":",
"raise",
"ValueError",
"(",
"\"Only object fields can be written to \"",
"\"variable-length arrays\"",
")",
"colnum",
"=",
"self",
".",
"_extract_colnum",
"(",
"column",
")",
"self",
".",
"_FITS",
".",
"write_var_column",
"(",
"self",
".",
"_ext",
"+",
"1",
",",
"colnum",
"+",
"1",
",",
"data",
",",
"firstrow",
"=",
"firstrow",
"+",
"1",
")",
"self",
".",
"_update_info",
"(",
")"
] | Write data to a variable-length column in this HDU
This HDU must be a table HDU.
parameters
----------
column: scalar string/integer
The column in which to write. Can be the name or number (0 offset)
column: ndarray
Numerical python array to write. This must be an object array.
firstrow: integer, optional
At which row you should begin writing. Be sure you know what you
are doing! For appending see the append() method. Default 0. | [
"Write",
"data",
"to",
"a",
"variable",
"-",
"length",
"column",
"in",
"this",
"HDU"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L358-L382 |
2,126 | esheldon/fitsio | fitsio/hdu/table.py | TableHDU.insert_column | def insert_column(self, name, data, colnum=None):
"""
Insert a new column.
parameters
----------
name: string
The column name
data:
The data to write into the new column.
colnum: int, optional
The column number for the new column, zero-offset. Default
is to add the new column after the existing ones.
Notes
-----
This method is used un-modified by ascii tables as well.
"""
if name in self._colnames:
raise ValueError("column '%s' already exists" % name)
if IS_PY3 and data.dtype.char == 'U':
# fast dtype conversion using an empty array
# we could hack at the actual text description, but using
# the numpy API is probably safer
# this also avoids doing a dtype conversion on every array
# element which could b expensive
descr = numpy.empty(1).astype(data.dtype).astype('S').dtype.descr
else:
descr = data.dtype.descr
if len(descr) > 1:
raise ValueError("you can only insert a single column, "
"requested: %s" % descr)
this_descr = descr[0]
this_descr = [name, this_descr[1]]
if len(data.shape) > 1:
this_descr += [data.shape[1:]]
this_descr = tuple(this_descr)
name, fmt, dims = _npy2fits(
this_descr,
table_type=self._table_type_str)
if dims is not None:
dims = [dims]
if colnum is None:
new_colnum = len(self._info['colinfo']) + 1
else:
new_colnum = colnum+1
self._FITS.insert_col(self._ext+1, new_colnum, name, fmt, tdim=dims)
self._update_info()
self.write_column(name, data) | python | def insert_column(self, name, data, colnum=None):
"""
Insert a new column.
parameters
----------
name: string
The column name
data:
The data to write into the new column.
colnum: int, optional
The column number for the new column, zero-offset. Default
is to add the new column after the existing ones.
Notes
-----
This method is used un-modified by ascii tables as well.
"""
if name in self._colnames:
raise ValueError("column '%s' already exists" % name)
if IS_PY3 and data.dtype.char == 'U':
# fast dtype conversion using an empty array
# we could hack at the actual text description, but using
# the numpy API is probably safer
# this also avoids doing a dtype conversion on every array
# element which could b expensive
descr = numpy.empty(1).astype(data.dtype).astype('S').dtype.descr
else:
descr = data.dtype.descr
if len(descr) > 1:
raise ValueError("you can only insert a single column, "
"requested: %s" % descr)
this_descr = descr[0]
this_descr = [name, this_descr[1]]
if len(data.shape) > 1:
this_descr += [data.shape[1:]]
this_descr = tuple(this_descr)
name, fmt, dims = _npy2fits(
this_descr,
table_type=self._table_type_str)
if dims is not None:
dims = [dims]
if colnum is None:
new_colnum = len(self._info['colinfo']) + 1
else:
new_colnum = colnum+1
self._FITS.insert_col(self._ext+1, new_colnum, name, fmt, tdim=dims)
self._update_info()
self.write_column(name, data) | [
"def",
"insert_column",
"(",
"self",
",",
"name",
",",
"data",
",",
"colnum",
"=",
"None",
")",
":",
"if",
"name",
"in",
"self",
".",
"_colnames",
":",
"raise",
"ValueError",
"(",
"\"column '%s' already exists\"",
"%",
"name",
")",
"if",
"IS_PY3",
"and",
"data",
".",
"dtype",
".",
"char",
"==",
"'U'",
":",
"# fast dtype conversion using an empty array",
"# we could hack at the actual text description, but using",
"# the numpy API is probably safer",
"# this also avoids doing a dtype conversion on every array",
"# element which could b expensive",
"descr",
"=",
"numpy",
".",
"empty",
"(",
"1",
")",
".",
"astype",
"(",
"data",
".",
"dtype",
")",
".",
"astype",
"(",
"'S'",
")",
".",
"dtype",
".",
"descr",
"else",
":",
"descr",
"=",
"data",
".",
"dtype",
".",
"descr",
"if",
"len",
"(",
"descr",
")",
">",
"1",
":",
"raise",
"ValueError",
"(",
"\"you can only insert a single column, \"",
"\"requested: %s\"",
"%",
"descr",
")",
"this_descr",
"=",
"descr",
"[",
"0",
"]",
"this_descr",
"=",
"[",
"name",
",",
"this_descr",
"[",
"1",
"]",
"]",
"if",
"len",
"(",
"data",
".",
"shape",
")",
">",
"1",
":",
"this_descr",
"+=",
"[",
"data",
".",
"shape",
"[",
"1",
":",
"]",
"]",
"this_descr",
"=",
"tuple",
"(",
"this_descr",
")",
"name",
",",
"fmt",
",",
"dims",
"=",
"_npy2fits",
"(",
"this_descr",
",",
"table_type",
"=",
"self",
".",
"_table_type_str",
")",
"if",
"dims",
"is",
"not",
"None",
":",
"dims",
"=",
"[",
"dims",
"]",
"if",
"colnum",
"is",
"None",
":",
"new_colnum",
"=",
"len",
"(",
"self",
".",
"_info",
"[",
"'colinfo'",
"]",
")",
"+",
"1",
"else",
":",
"new_colnum",
"=",
"colnum",
"+",
"1",
"self",
".",
"_FITS",
".",
"insert_col",
"(",
"self",
".",
"_ext",
"+",
"1",
",",
"new_colnum",
",",
"name",
",",
"fmt",
",",
"tdim",
"=",
"dims",
")",
"self",
".",
"_update_info",
"(",
")",
"self",
".",
"write_column",
"(",
"name",
",",
"data",
")"
] | Insert a new column.
parameters
----------
name: string
The column name
data:
The data to write into the new column.
colnum: int, optional
The column number for the new column, zero-offset. Default
is to add the new column after the existing ones.
Notes
-----
This method is used un-modified by ascii tables as well. | [
"Insert",
"a",
"new",
"column",
"."
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L384-L439 |
2,127 | esheldon/fitsio | fitsio/hdu/table.py | TableHDU.append | def append(self, data, **keys):
"""
Append new rows to a table HDU
parameters
----------
data: ndarray or list of arrays
A numerical python array with fields (recarray) or a list of
arrays. Should have the same fields as the existing table. If only
a subset of the table columns are present, the other columns are
filled with zeros.
columns: list, optional
if a list of arrays is sent, also send the columns
of names or column numbers
"""
firstrow = self._info['nrows']
keys['firstrow'] = firstrow
self.write(data, **keys) | python | def append(self, data, **keys):
"""
Append new rows to a table HDU
parameters
----------
data: ndarray or list of arrays
A numerical python array with fields (recarray) or a list of
arrays. Should have the same fields as the existing table. If only
a subset of the table columns are present, the other columns are
filled with zeros.
columns: list, optional
if a list of arrays is sent, also send the columns
of names or column numbers
"""
firstrow = self._info['nrows']
keys['firstrow'] = firstrow
self.write(data, **keys) | [
"def",
"append",
"(",
"self",
",",
"data",
",",
"*",
"*",
"keys",
")",
":",
"firstrow",
"=",
"self",
".",
"_info",
"[",
"'nrows'",
"]",
"keys",
"[",
"'firstrow'",
"]",
"=",
"firstrow",
"self",
".",
"write",
"(",
"data",
",",
"*",
"*",
"keys",
")"
] | Append new rows to a table HDU
parameters
----------
data: ndarray or list of arrays
A numerical python array with fields (recarray) or a list of
arrays. Should have the same fields as the existing table. If only
a subset of the table columns are present, the other columns are
filled with zeros.
columns: list, optional
if a list of arrays is sent, also send the columns
of names or column numbers | [
"Append",
"new",
"rows",
"to",
"a",
"table",
"HDU"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L441-L462 |
2,128 | esheldon/fitsio | fitsio/hdu/table.py | TableHDU.delete_rows | def delete_rows(self, rows):
"""
Delete rows from the table
parameters
----------
rows: sequence or slice
The exact rows to delete as a sequence, or a slice.
examples
--------
# delete a range of rows
with fitsio.FITS(fname,'rw') as fits:
fits['mytable'].delete_rows(slice(3,20))
# delete specific rows
with fitsio.FITS(fname,'rw') as fits:
rows2delete = [3,88,76]
fits['mytable'].delete_rows(rows2delete)
"""
if rows is None:
return
# extract and convert to 1-offset for C routine
if isinstance(rows, slice):
rows = self._process_slice(rows)
if rows.step is not None and rows.step != 1:
rows = numpy.arange(
rows.start+1,
rows.stop+1,
rows.step,
)
else:
# rows must be 1-offset
rows = slice(rows.start+1, rows.stop+1)
else:
rows = self._extract_rows(rows)
# rows must be 1-offset
rows += 1
if isinstance(rows, slice):
self._FITS.delete_row_range(self._ext+1, rows.start, rows.stop)
else:
if rows.size == 0:
return
self._FITS.delete_rows(self._ext+1, rows)
self._update_info() | python | def delete_rows(self, rows):
"""
Delete rows from the table
parameters
----------
rows: sequence or slice
The exact rows to delete as a sequence, or a slice.
examples
--------
# delete a range of rows
with fitsio.FITS(fname,'rw') as fits:
fits['mytable'].delete_rows(slice(3,20))
# delete specific rows
with fitsio.FITS(fname,'rw') as fits:
rows2delete = [3,88,76]
fits['mytable'].delete_rows(rows2delete)
"""
if rows is None:
return
# extract and convert to 1-offset for C routine
if isinstance(rows, slice):
rows = self._process_slice(rows)
if rows.step is not None and rows.step != 1:
rows = numpy.arange(
rows.start+1,
rows.stop+1,
rows.step,
)
else:
# rows must be 1-offset
rows = slice(rows.start+1, rows.stop+1)
else:
rows = self._extract_rows(rows)
# rows must be 1-offset
rows += 1
if isinstance(rows, slice):
self._FITS.delete_row_range(self._ext+1, rows.start, rows.stop)
else:
if rows.size == 0:
return
self._FITS.delete_rows(self._ext+1, rows)
self._update_info() | [
"def",
"delete_rows",
"(",
"self",
",",
"rows",
")",
":",
"if",
"rows",
"is",
"None",
":",
"return",
"# extract and convert to 1-offset for C routine",
"if",
"isinstance",
"(",
"rows",
",",
"slice",
")",
":",
"rows",
"=",
"self",
".",
"_process_slice",
"(",
"rows",
")",
"if",
"rows",
".",
"step",
"is",
"not",
"None",
"and",
"rows",
".",
"step",
"!=",
"1",
":",
"rows",
"=",
"numpy",
".",
"arange",
"(",
"rows",
".",
"start",
"+",
"1",
",",
"rows",
".",
"stop",
"+",
"1",
",",
"rows",
".",
"step",
",",
")",
"else",
":",
"# rows must be 1-offset",
"rows",
"=",
"slice",
"(",
"rows",
".",
"start",
"+",
"1",
",",
"rows",
".",
"stop",
"+",
"1",
")",
"else",
":",
"rows",
"=",
"self",
".",
"_extract_rows",
"(",
"rows",
")",
"# rows must be 1-offset",
"rows",
"+=",
"1",
"if",
"isinstance",
"(",
"rows",
",",
"slice",
")",
":",
"self",
".",
"_FITS",
".",
"delete_row_range",
"(",
"self",
".",
"_ext",
"+",
"1",
",",
"rows",
".",
"start",
",",
"rows",
".",
"stop",
")",
"else",
":",
"if",
"rows",
".",
"size",
"==",
"0",
":",
"return",
"self",
".",
"_FITS",
".",
"delete_rows",
"(",
"self",
".",
"_ext",
"+",
"1",
",",
"rows",
")",
"self",
".",
"_update_info",
"(",
")"
] | Delete rows from the table
parameters
----------
rows: sequence or slice
The exact rows to delete as a sequence, or a slice.
examples
--------
# delete a range of rows
with fitsio.FITS(fname,'rw') as fits:
fits['mytable'].delete_rows(slice(3,20))
# delete specific rows
with fitsio.FITS(fname,'rw') as fits:
rows2delete = [3,88,76]
fits['mytable'].delete_rows(rows2delete) | [
"Delete",
"rows",
"from",
"the",
"table"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L464-L513 |
2,129 | esheldon/fitsio | fitsio/hdu/table.py | TableHDU.resize | def resize(self, nrows, front=False):
"""
Resize the table to the given size, removing or adding rows as
necessary. Note if expanding the table at the end, it is more
efficient to use the append function than resizing and then
writing.
New added rows are zerod, except for 'i1', 'u2' and 'u4' data types
which get -128,32768,2147483648 respectively
parameters
----------
nrows: int
new size of table
front: bool, optional
If True, add or remove rows from the front. Default
is False
"""
nrows_current = self.get_nrows()
if nrows == nrows_current:
return
if nrows < nrows_current:
rowdiff = nrows_current - nrows
if front:
# delete from the front
start = 0
stop = rowdiff
else:
# delete from the back
start = nrows
stop = nrows_current
self.delete_rows(slice(start, stop))
else:
rowdiff = nrows - nrows_current
if front:
# in this case zero is what we want, since the code inserts
firstrow = 0
else:
firstrow = nrows_current
self._FITS.insert_rows(self._ext+1, firstrow, rowdiff)
self._update_info() | python | def resize(self, nrows, front=False):
"""
Resize the table to the given size, removing or adding rows as
necessary. Note if expanding the table at the end, it is more
efficient to use the append function than resizing and then
writing.
New added rows are zerod, except for 'i1', 'u2' and 'u4' data types
which get -128,32768,2147483648 respectively
parameters
----------
nrows: int
new size of table
front: bool, optional
If True, add or remove rows from the front. Default
is False
"""
nrows_current = self.get_nrows()
if nrows == nrows_current:
return
if nrows < nrows_current:
rowdiff = nrows_current - nrows
if front:
# delete from the front
start = 0
stop = rowdiff
else:
# delete from the back
start = nrows
stop = nrows_current
self.delete_rows(slice(start, stop))
else:
rowdiff = nrows - nrows_current
if front:
# in this case zero is what we want, since the code inserts
firstrow = 0
else:
firstrow = nrows_current
self._FITS.insert_rows(self._ext+1, firstrow, rowdiff)
self._update_info() | [
"def",
"resize",
"(",
"self",
",",
"nrows",
",",
"front",
"=",
"False",
")",
":",
"nrows_current",
"=",
"self",
".",
"get_nrows",
"(",
")",
"if",
"nrows",
"==",
"nrows_current",
":",
"return",
"if",
"nrows",
"<",
"nrows_current",
":",
"rowdiff",
"=",
"nrows_current",
"-",
"nrows",
"if",
"front",
":",
"# delete from the front",
"start",
"=",
"0",
"stop",
"=",
"rowdiff",
"else",
":",
"# delete from the back",
"start",
"=",
"nrows",
"stop",
"=",
"nrows_current",
"self",
".",
"delete_rows",
"(",
"slice",
"(",
"start",
",",
"stop",
")",
")",
"else",
":",
"rowdiff",
"=",
"nrows",
"-",
"nrows_current",
"if",
"front",
":",
"# in this case zero is what we want, since the code inserts",
"firstrow",
"=",
"0",
"else",
":",
"firstrow",
"=",
"nrows_current",
"self",
".",
"_FITS",
".",
"insert_rows",
"(",
"self",
".",
"_ext",
"+",
"1",
",",
"firstrow",
",",
"rowdiff",
")",
"self",
".",
"_update_info",
"(",
")"
] | Resize the table to the given size, removing or adding rows as
necessary. Note if expanding the table at the end, it is more
efficient to use the append function than resizing and then
writing.
New added rows are zerod, except for 'i1', 'u2' and 'u4' data types
which get -128,32768,2147483648 respectively
parameters
----------
nrows: int
new size of table
front: bool, optional
If True, add or remove rows from the front. Default
is False | [
"Resize",
"the",
"table",
"to",
"the",
"given",
"size",
"removing",
"or",
"adding",
"rows",
"as",
"necessary",
".",
"Note",
"if",
"expanding",
"the",
"table",
"at",
"the",
"end",
"it",
"is",
"more",
"efficient",
"to",
"use",
"the",
"append",
"function",
"than",
"resizing",
"and",
"then",
"writing",
"."
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L515-L560 |
2,130 | esheldon/fitsio | fitsio/hdu/table.py | TableHDU.read | def read(self, **keys):
"""
read data from this HDU
By default, all data are read.
send columns= and rows= to select subsets of the data.
Table data are read into a recarray; use read_column() to get a single
column as an ordinary array. You can alternatively use slice notation
fits=fitsio.FITS(filename)
fits[ext][:]
fits[ext][2:5]
fits[ext][200:235:2]
fits[ext][rows]
fits[ext][cols][rows]
parameters
----------
columns: optional
An optional set of columns to read from table HDUs. Default is to
read all. Can be string or number. If a sequence, a recarray
is always returned. If a scalar, an ordinary array is returned.
rows: optional
An optional list of rows to read from table HDUS. Default is to
read all.
vstorage: string, optional
Over-ride the default method to store variable length columns. Can
be 'fixed' or 'object'. See docs on fitsio.FITS for details.
"""
columns = keys.get('columns', None)
rows = keys.get('rows', None)
if columns is not None:
if 'columns' in keys:
del keys['columns']
data = self.read_columns(columns, **keys)
elif rows is not None:
if 'rows' in keys:
del keys['rows']
data = self.read_rows(rows, **keys)
else:
data = self._read_all(**keys)
return data | python | def read(self, **keys):
"""
read data from this HDU
By default, all data are read.
send columns= and rows= to select subsets of the data.
Table data are read into a recarray; use read_column() to get a single
column as an ordinary array. You can alternatively use slice notation
fits=fitsio.FITS(filename)
fits[ext][:]
fits[ext][2:5]
fits[ext][200:235:2]
fits[ext][rows]
fits[ext][cols][rows]
parameters
----------
columns: optional
An optional set of columns to read from table HDUs. Default is to
read all. Can be string or number. If a sequence, a recarray
is always returned. If a scalar, an ordinary array is returned.
rows: optional
An optional list of rows to read from table HDUS. Default is to
read all.
vstorage: string, optional
Over-ride the default method to store variable length columns. Can
be 'fixed' or 'object'. See docs on fitsio.FITS for details.
"""
columns = keys.get('columns', None)
rows = keys.get('rows', None)
if columns is not None:
if 'columns' in keys:
del keys['columns']
data = self.read_columns(columns, **keys)
elif rows is not None:
if 'rows' in keys:
del keys['rows']
data = self.read_rows(rows, **keys)
else:
data = self._read_all(**keys)
return data | [
"def",
"read",
"(",
"self",
",",
"*",
"*",
"keys",
")",
":",
"columns",
"=",
"keys",
".",
"get",
"(",
"'columns'",
",",
"None",
")",
"rows",
"=",
"keys",
".",
"get",
"(",
"'rows'",
",",
"None",
")",
"if",
"columns",
"is",
"not",
"None",
":",
"if",
"'columns'",
"in",
"keys",
":",
"del",
"keys",
"[",
"'columns'",
"]",
"data",
"=",
"self",
".",
"read_columns",
"(",
"columns",
",",
"*",
"*",
"keys",
")",
"elif",
"rows",
"is",
"not",
"None",
":",
"if",
"'rows'",
"in",
"keys",
":",
"del",
"keys",
"[",
"'rows'",
"]",
"data",
"=",
"self",
".",
"read_rows",
"(",
"rows",
",",
"*",
"*",
"keys",
")",
"else",
":",
"data",
"=",
"self",
".",
"_read_all",
"(",
"*",
"*",
"keys",
")",
"return",
"data"
] | read data from this HDU
By default, all data are read.
send columns= and rows= to select subsets of the data.
Table data are read into a recarray; use read_column() to get a single
column as an ordinary array. You can alternatively use slice notation
fits=fitsio.FITS(filename)
fits[ext][:]
fits[ext][2:5]
fits[ext][200:235:2]
fits[ext][rows]
fits[ext][cols][rows]
parameters
----------
columns: optional
An optional set of columns to read from table HDUs. Default is to
read all. Can be string or number. If a sequence, a recarray
is always returned. If a scalar, an ordinary array is returned.
rows: optional
An optional list of rows to read from table HDUS. Default is to
read all.
vstorage: string, optional
Over-ride the default method to store variable length columns. Can
be 'fixed' or 'object'. See docs on fitsio.FITS for details. | [
"read",
"data",
"from",
"this",
"HDU"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L562-L606 |
2,131 | esheldon/fitsio | fitsio/hdu/table.py | TableHDU._read_all | def _read_all(self, **keys):
"""
Read all data in the HDU.
parameters
----------
vstorage: string, optional
Over-ride the default method to store variable length columns. Can
be 'fixed' or 'object'. See docs on fitsio.FITS for details.
lower: bool, optional
If True, force all columns names to lower case in output. Will over
ride the lower= keyword from construction.
upper: bool, optional
If True, force all columns names to upper case in output. Will over
ride the lower= keyword from construction.
"""
dtype, offsets, isvar = self.get_rec_dtype(**keys)
w, = numpy.where(isvar == True) # noqa
has_tbit = self._check_tbit()
if w.size > 0:
vstorage = keys.get('vstorage', self._vstorage)
colnums = self._extract_colnums()
rows = None
array = self._read_rec_with_var(colnums, rows, dtype,
offsets, isvar, vstorage)
elif has_tbit:
# drop down to read_columns since we can't stuff into a
# contiguous array
colnums = self._extract_colnums()
array = self.read_columns(colnums, **keys)
else:
firstrow = 1 # noqa - not used?
nrows = self._info['nrows']
array = numpy.zeros(nrows, dtype=dtype)
self._FITS.read_as_rec(self._ext+1, 1, nrows, array)
array = self._maybe_decode_fits_ascii_strings_to_unicode_py3(array)
for colnum, name in enumerate(array.dtype.names):
self._rescale_and_convert_field_inplace(
array,
name,
self._info['colinfo'][colnum]['tscale'],
self._info['colinfo'][colnum]['tzero'])
lower = keys.get('lower', False)
upper = keys.get('upper', False)
if self.lower or lower:
_names_to_lower_if_recarray(array)
elif self.upper or upper:
_names_to_upper_if_recarray(array)
self._maybe_trim_strings(array, **keys)
return array | python | def _read_all(self, **keys):
"""
Read all data in the HDU.
parameters
----------
vstorage: string, optional
Over-ride the default method to store variable length columns. Can
be 'fixed' or 'object'. See docs on fitsio.FITS for details.
lower: bool, optional
If True, force all columns names to lower case in output. Will over
ride the lower= keyword from construction.
upper: bool, optional
If True, force all columns names to upper case in output. Will over
ride the lower= keyword from construction.
"""
dtype, offsets, isvar = self.get_rec_dtype(**keys)
w, = numpy.where(isvar == True) # noqa
has_tbit = self._check_tbit()
if w.size > 0:
vstorage = keys.get('vstorage', self._vstorage)
colnums = self._extract_colnums()
rows = None
array = self._read_rec_with_var(colnums, rows, dtype,
offsets, isvar, vstorage)
elif has_tbit:
# drop down to read_columns since we can't stuff into a
# contiguous array
colnums = self._extract_colnums()
array = self.read_columns(colnums, **keys)
else:
firstrow = 1 # noqa - not used?
nrows = self._info['nrows']
array = numpy.zeros(nrows, dtype=dtype)
self._FITS.read_as_rec(self._ext+1, 1, nrows, array)
array = self._maybe_decode_fits_ascii_strings_to_unicode_py3(array)
for colnum, name in enumerate(array.dtype.names):
self._rescale_and_convert_field_inplace(
array,
name,
self._info['colinfo'][colnum]['tscale'],
self._info['colinfo'][colnum]['tzero'])
lower = keys.get('lower', False)
upper = keys.get('upper', False)
if self.lower or lower:
_names_to_lower_if_recarray(array)
elif self.upper or upper:
_names_to_upper_if_recarray(array)
self._maybe_trim_strings(array, **keys)
return array | [
"def",
"_read_all",
"(",
"self",
",",
"*",
"*",
"keys",
")",
":",
"dtype",
",",
"offsets",
",",
"isvar",
"=",
"self",
".",
"get_rec_dtype",
"(",
"*",
"*",
"keys",
")",
"w",
",",
"=",
"numpy",
".",
"where",
"(",
"isvar",
"==",
"True",
")",
"# noqa",
"has_tbit",
"=",
"self",
".",
"_check_tbit",
"(",
")",
"if",
"w",
".",
"size",
">",
"0",
":",
"vstorage",
"=",
"keys",
".",
"get",
"(",
"'vstorage'",
",",
"self",
".",
"_vstorage",
")",
"colnums",
"=",
"self",
".",
"_extract_colnums",
"(",
")",
"rows",
"=",
"None",
"array",
"=",
"self",
".",
"_read_rec_with_var",
"(",
"colnums",
",",
"rows",
",",
"dtype",
",",
"offsets",
",",
"isvar",
",",
"vstorage",
")",
"elif",
"has_tbit",
":",
"# drop down to read_columns since we can't stuff into a",
"# contiguous array",
"colnums",
"=",
"self",
".",
"_extract_colnums",
"(",
")",
"array",
"=",
"self",
".",
"read_columns",
"(",
"colnums",
",",
"*",
"*",
"keys",
")",
"else",
":",
"firstrow",
"=",
"1",
"# noqa - not used?",
"nrows",
"=",
"self",
".",
"_info",
"[",
"'nrows'",
"]",
"array",
"=",
"numpy",
".",
"zeros",
"(",
"nrows",
",",
"dtype",
"=",
"dtype",
")",
"self",
".",
"_FITS",
".",
"read_as_rec",
"(",
"self",
".",
"_ext",
"+",
"1",
",",
"1",
",",
"nrows",
",",
"array",
")",
"array",
"=",
"self",
".",
"_maybe_decode_fits_ascii_strings_to_unicode_py3",
"(",
"array",
")",
"for",
"colnum",
",",
"name",
"in",
"enumerate",
"(",
"array",
".",
"dtype",
".",
"names",
")",
":",
"self",
".",
"_rescale_and_convert_field_inplace",
"(",
"array",
",",
"name",
",",
"self",
".",
"_info",
"[",
"'colinfo'",
"]",
"[",
"colnum",
"]",
"[",
"'tscale'",
"]",
",",
"self",
".",
"_info",
"[",
"'colinfo'",
"]",
"[",
"colnum",
"]",
"[",
"'tzero'",
"]",
")",
"lower",
"=",
"keys",
".",
"get",
"(",
"'lower'",
",",
"False",
")",
"upper",
"=",
"keys",
".",
"get",
"(",
"'upper'",
",",
"False",
")",
"if",
"self",
".",
"lower",
"or",
"lower",
":",
"_names_to_lower_if_recarray",
"(",
"array",
")",
"elif",
"self",
".",
"upper",
"or",
"upper",
":",
"_names_to_upper_if_recarray",
"(",
"array",
")",
"self",
".",
"_maybe_trim_strings",
"(",
"array",
",",
"*",
"*",
"keys",
")",
"return",
"array"
] | Read all data in the HDU.
parameters
----------
vstorage: string, optional
Over-ride the default method to store variable length columns. Can
be 'fixed' or 'object'. See docs on fitsio.FITS for details.
lower: bool, optional
If True, force all columns names to lower case in output. Will over
ride the lower= keyword from construction.
upper: bool, optional
If True, force all columns names to upper case in output. Will over
ride the lower= keyword from construction. | [
"Read",
"all",
"data",
"in",
"the",
"HDU",
"."
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L608-L665 |
2,132 | esheldon/fitsio | fitsio/hdu/table.py | TableHDU.read_column | def read_column(self, col, **keys):
"""
Read the specified column
Alternatively, you can use slice notation
fits=fitsio.FITS(filename)
fits[ext][colname][:]
fits[ext][colname][2:5]
fits[ext][colname][200:235:2]
fits[ext][colname][rows]
Note, if reading multiple columns, it is more efficient to use
read(columns=) or slice notation with a list of column names.
parameters
----------
col: string/int, required
The column name or number.
rows: optional
An optional set of row numbers to read.
vstorage: string, optional
Over-ride the default method to store variable length columns. Can
be 'fixed' or 'object'. See docs on fitsio.FITS for details.
"""
res = self.read_columns([col], **keys)
colname = res.dtype.names[0]
data = res[colname]
self._maybe_trim_strings(data, **keys)
return data | python | def read_column(self, col, **keys):
"""
Read the specified column
Alternatively, you can use slice notation
fits=fitsio.FITS(filename)
fits[ext][colname][:]
fits[ext][colname][2:5]
fits[ext][colname][200:235:2]
fits[ext][colname][rows]
Note, if reading multiple columns, it is more efficient to use
read(columns=) or slice notation with a list of column names.
parameters
----------
col: string/int, required
The column name or number.
rows: optional
An optional set of row numbers to read.
vstorage: string, optional
Over-ride the default method to store variable length columns. Can
be 'fixed' or 'object'. See docs on fitsio.FITS for details.
"""
res = self.read_columns([col], **keys)
colname = res.dtype.names[0]
data = res[colname]
self._maybe_trim_strings(data, **keys)
return data | [
"def",
"read_column",
"(",
"self",
",",
"col",
",",
"*",
"*",
"keys",
")",
":",
"res",
"=",
"self",
".",
"read_columns",
"(",
"[",
"col",
"]",
",",
"*",
"*",
"keys",
")",
"colname",
"=",
"res",
".",
"dtype",
".",
"names",
"[",
"0",
"]",
"data",
"=",
"res",
"[",
"colname",
"]",
"self",
".",
"_maybe_trim_strings",
"(",
"data",
",",
"*",
"*",
"keys",
")",
"return",
"data"
] | Read the specified column
Alternatively, you can use slice notation
fits=fitsio.FITS(filename)
fits[ext][colname][:]
fits[ext][colname][2:5]
fits[ext][colname][200:235:2]
fits[ext][colname][rows]
Note, if reading multiple columns, it is more efficient to use
read(columns=) or slice notation with a list of column names.
parameters
----------
col: string/int, required
The column name or number.
rows: optional
An optional set of row numbers to read.
vstorage: string, optional
Over-ride the default method to store variable length columns. Can
be 'fixed' or 'object'. See docs on fitsio.FITS for details. | [
"Read",
"the",
"specified",
"column"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L667-L697 |
2,133 | esheldon/fitsio | fitsio/hdu/table.py | TableHDU.read_rows | def read_rows(self, rows, **keys):
"""
Read the specified rows.
parameters
----------
rows: list,array
A list or array of row indices.
vstorage: string, optional
Over-ride the default method to store variable length columns. Can
be 'fixed' or 'object'. See docs on fitsio.FITS for details.
lower: bool, optional
If True, force all columns names to lower case in output. Will over
ride the lower= keyword from construction.
upper: bool, optional
If True, force all columns names to upper case in output. Will over
ride the lower= keyword from construction.
"""
if rows is None:
# we actually want all rows!
return self._read_all()
if self._info['hdutype'] == ASCII_TBL:
keys['rows'] = rows
return self.read(**keys)
rows = self._extract_rows(rows)
dtype, offsets, isvar = self.get_rec_dtype(**keys)
w, = numpy.where(isvar == True) # noqa
if w.size > 0:
vstorage = keys.get('vstorage', self._vstorage)
colnums = self._extract_colnums()
return self._read_rec_with_var(
colnums, rows, dtype, offsets, isvar, vstorage)
else:
array = numpy.zeros(rows.size, dtype=dtype)
self._FITS.read_rows_as_rec(self._ext+1, array, rows)
array = self._maybe_decode_fits_ascii_strings_to_unicode_py3(array)
for colnum, name in enumerate(array.dtype.names):
self._rescale_and_convert_field_inplace(
array,
name,
self._info['colinfo'][colnum]['tscale'],
self._info['colinfo'][colnum]['tzero'])
lower = keys.get('lower', False)
upper = keys.get('upper', False)
if self.lower or lower:
_names_to_lower_if_recarray(array)
elif self.upper or upper:
_names_to_upper_if_recarray(array)
self._maybe_trim_strings(array, **keys)
return array | python | def read_rows(self, rows, **keys):
"""
Read the specified rows.
parameters
----------
rows: list,array
A list or array of row indices.
vstorage: string, optional
Over-ride the default method to store variable length columns. Can
be 'fixed' or 'object'. See docs on fitsio.FITS for details.
lower: bool, optional
If True, force all columns names to lower case in output. Will over
ride the lower= keyword from construction.
upper: bool, optional
If True, force all columns names to upper case in output. Will over
ride the lower= keyword from construction.
"""
if rows is None:
# we actually want all rows!
return self._read_all()
if self._info['hdutype'] == ASCII_TBL:
keys['rows'] = rows
return self.read(**keys)
rows = self._extract_rows(rows)
dtype, offsets, isvar = self.get_rec_dtype(**keys)
w, = numpy.where(isvar == True) # noqa
if w.size > 0:
vstorage = keys.get('vstorage', self._vstorage)
colnums = self._extract_colnums()
return self._read_rec_with_var(
colnums, rows, dtype, offsets, isvar, vstorage)
else:
array = numpy.zeros(rows.size, dtype=dtype)
self._FITS.read_rows_as_rec(self._ext+1, array, rows)
array = self._maybe_decode_fits_ascii_strings_to_unicode_py3(array)
for colnum, name in enumerate(array.dtype.names):
self._rescale_and_convert_field_inplace(
array,
name,
self._info['colinfo'][colnum]['tscale'],
self._info['colinfo'][colnum]['tzero'])
lower = keys.get('lower', False)
upper = keys.get('upper', False)
if self.lower or lower:
_names_to_lower_if_recarray(array)
elif self.upper or upper:
_names_to_upper_if_recarray(array)
self._maybe_trim_strings(array, **keys)
return array | [
"def",
"read_rows",
"(",
"self",
",",
"rows",
",",
"*",
"*",
"keys",
")",
":",
"if",
"rows",
"is",
"None",
":",
"# we actually want all rows!",
"return",
"self",
".",
"_read_all",
"(",
")",
"if",
"self",
".",
"_info",
"[",
"'hdutype'",
"]",
"==",
"ASCII_TBL",
":",
"keys",
"[",
"'rows'",
"]",
"=",
"rows",
"return",
"self",
".",
"read",
"(",
"*",
"*",
"keys",
")",
"rows",
"=",
"self",
".",
"_extract_rows",
"(",
"rows",
")",
"dtype",
",",
"offsets",
",",
"isvar",
"=",
"self",
".",
"get_rec_dtype",
"(",
"*",
"*",
"keys",
")",
"w",
",",
"=",
"numpy",
".",
"where",
"(",
"isvar",
"==",
"True",
")",
"# noqa",
"if",
"w",
".",
"size",
">",
"0",
":",
"vstorage",
"=",
"keys",
".",
"get",
"(",
"'vstorage'",
",",
"self",
".",
"_vstorage",
")",
"colnums",
"=",
"self",
".",
"_extract_colnums",
"(",
")",
"return",
"self",
".",
"_read_rec_with_var",
"(",
"colnums",
",",
"rows",
",",
"dtype",
",",
"offsets",
",",
"isvar",
",",
"vstorage",
")",
"else",
":",
"array",
"=",
"numpy",
".",
"zeros",
"(",
"rows",
".",
"size",
",",
"dtype",
"=",
"dtype",
")",
"self",
".",
"_FITS",
".",
"read_rows_as_rec",
"(",
"self",
".",
"_ext",
"+",
"1",
",",
"array",
",",
"rows",
")",
"array",
"=",
"self",
".",
"_maybe_decode_fits_ascii_strings_to_unicode_py3",
"(",
"array",
")",
"for",
"colnum",
",",
"name",
"in",
"enumerate",
"(",
"array",
".",
"dtype",
".",
"names",
")",
":",
"self",
".",
"_rescale_and_convert_field_inplace",
"(",
"array",
",",
"name",
",",
"self",
".",
"_info",
"[",
"'colinfo'",
"]",
"[",
"colnum",
"]",
"[",
"'tscale'",
"]",
",",
"self",
".",
"_info",
"[",
"'colinfo'",
"]",
"[",
"colnum",
"]",
"[",
"'tzero'",
"]",
")",
"lower",
"=",
"keys",
".",
"get",
"(",
"'lower'",
",",
"False",
")",
"upper",
"=",
"keys",
".",
"get",
"(",
"'upper'",
",",
"False",
")",
"if",
"self",
".",
"lower",
"or",
"lower",
":",
"_names_to_lower_if_recarray",
"(",
"array",
")",
"elif",
"self",
".",
"upper",
"or",
"upper",
":",
"_names_to_upper_if_recarray",
"(",
"array",
")",
"self",
".",
"_maybe_trim_strings",
"(",
"array",
",",
"*",
"*",
"keys",
")",
"return",
"array"
] | Read the specified rows.
parameters
----------
rows: list,array
A list or array of row indices.
vstorage: string, optional
Over-ride the default method to store variable length columns. Can
be 'fixed' or 'object'. See docs on fitsio.FITS for details.
lower: bool, optional
If True, force all columns names to lower case in output. Will over
ride the lower= keyword from construction.
upper: bool, optional
If True, force all columns names to upper case in output. Will over
ride the lower= keyword from construction. | [
"Read",
"the",
"specified",
"rows",
"."
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L699-L756 |
2,134 | esheldon/fitsio | fitsio/hdu/table.py | TableHDU.read_columns | def read_columns(self, columns, **keys):
"""
read a subset of columns from this binary table HDU
By default, all rows are read. Send rows= to select subsets of the
data. Table data are read into a recarray for multiple columns,
plain array for a single column.
parameters
----------
columns: list/array
An optional set of columns to read from table HDUs. Can be string
or number. If a sequence, a recarray is always returned. If a
scalar, an ordinary array is returned.
rows: list/array, optional
An optional list of rows to read from table HDUS. Default is to
read all.
vstorage: string, optional
Over-ride the default method to store variable length columns. Can
be 'fixed' or 'object'. See docs on fitsio.FITS for details.
lower: bool, optional
If True, force all columns names to lower case in output. Will over
ride the lower= keyword from construction.
upper: bool, optional
If True, force all columns names to upper case in output. Will over
ride the lower= keyword from construction.
"""
if self._info['hdutype'] == ASCII_TBL:
keys['columns'] = columns
return self.read(**keys)
rows = keys.get('rows', None)
# if columns is None, returns all. Guaranteed to be unique and sorted
colnums = self._extract_colnums(columns)
if isinstance(colnums, int):
# scalar sent, don't read as a recarray
return self.read_column(columns, **keys)
# if rows is None still returns None, and is correctly interpreted
# by the reader to mean all
rows = self._extract_rows(rows)
# this is the full dtype for all columns
dtype, offsets, isvar = self.get_rec_dtype(colnums=colnums, **keys)
w, = numpy.where(isvar == True) # noqa
if w.size > 0:
vstorage = keys.get('vstorage', self._vstorage)
array = self._read_rec_with_var(
colnums, rows, dtype, offsets, isvar, vstorage)
else:
if rows is None:
nrows = self._info['nrows']
else:
nrows = rows.size
array = numpy.zeros(nrows, dtype=dtype)
colnumsp = colnums[:].copy()
colnumsp[:] += 1
self._FITS.read_columns_as_rec(self._ext+1, colnumsp, array, rows)
array = self._maybe_decode_fits_ascii_strings_to_unicode_py3(array)
for i in xrange(colnums.size):
colnum = int(colnums[i])
name = array.dtype.names[i]
self._rescale_and_convert_field_inplace(
array,
name,
self._info['colinfo'][colnum]['tscale'],
self._info['colinfo'][colnum]['tzero'])
if (self._check_tbit(colnums=colnums)):
array = self._fix_tbit_dtype(array, colnums)
lower = keys.get('lower', False)
upper = keys.get('upper', False)
if self.lower or lower:
_names_to_lower_if_recarray(array)
elif self.upper or upper:
_names_to_upper_if_recarray(array)
self._maybe_trim_strings(array, **keys)
return array | python | def read_columns(self, columns, **keys):
"""
read a subset of columns from this binary table HDU
By default, all rows are read. Send rows= to select subsets of the
data. Table data are read into a recarray for multiple columns,
plain array for a single column.
parameters
----------
columns: list/array
An optional set of columns to read from table HDUs. Can be string
or number. If a sequence, a recarray is always returned. If a
scalar, an ordinary array is returned.
rows: list/array, optional
An optional list of rows to read from table HDUS. Default is to
read all.
vstorage: string, optional
Over-ride the default method to store variable length columns. Can
be 'fixed' or 'object'. See docs on fitsio.FITS for details.
lower: bool, optional
If True, force all columns names to lower case in output. Will over
ride the lower= keyword from construction.
upper: bool, optional
If True, force all columns names to upper case in output. Will over
ride the lower= keyword from construction.
"""
if self._info['hdutype'] == ASCII_TBL:
keys['columns'] = columns
return self.read(**keys)
rows = keys.get('rows', None)
# if columns is None, returns all. Guaranteed to be unique and sorted
colnums = self._extract_colnums(columns)
if isinstance(colnums, int):
# scalar sent, don't read as a recarray
return self.read_column(columns, **keys)
# if rows is None still returns None, and is correctly interpreted
# by the reader to mean all
rows = self._extract_rows(rows)
# this is the full dtype for all columns
dtype, offsets, isvar = self.get_rec_dtype(colnums=colnums, **keys)
w, = numpy.where(isvar == True) # noqa
if w.size > 0:
vstorage = keys.get('vstorage', self._vstorage)
array = self._read_rec_with_var(
colnums, rows, dtype, offsets, isvar, vstorage)
else:
if rows is None:
nrows = self._info['nrows']
else:
nrows = rows.size
array = numpy.zeros(nrows, dtype=dtype)
colnumsp = colnums[:].copy()
colnumsp[:] += 1
self._FITS.read_columns_as_rec(self._ext+1, colnumsp, array, rows)
array = self._maybe_decode_fits_ascii_strings_to_unicode_py3(array)
for i in xrange(colnums.size):
colnum = int(colnums[i])
name = array.dtype.names[i]
self._rescale_and_convert_field_inplace(
array,
name,
self._info['colinfo'][colnum]['tscale'],
self._info['colinfo'][colnum]['tzero'])
if (self._check_tbit(colnums=colnums)):
array = self._fix_tbit_dtype(array, colnums)
lower = keys.get('lower', False)
upper = keys.get('upper', False)
if self.lower or lower:
_names_to_lower_if_recarray(array)
elif self.upper or upper:
_names_to_upper_if_recarray(array)
self._maybe_trim_strings(array, **keys)
return array | [
"def",
"read_columns",
"(",
"self",
",",
"columns",
",",
"*",
"*",
"keys",
")",
":",
"if",
"self",
".",
"_info",
"[",
"'hdutype'",
"]",
"==",
"ASCII_TBL",
":",
"keys",
"[",
"'columns'",
"]",
"=",
"columns",
"return",
"self",
".",
"read",
"(",
"*",
"*",
"keys",
")",
"rows",
"=",
"keys",
".",
"get",
"(",
"'rows'",
",",
"None",
")",
"# if columns is None, returns all. Guaranteed to be unique and sorted",
"colnums",
"=",
"self",
".",
"_extract_colnums",
"(",
"columns",
")",
"if",
"isinstance",
"(",
"colnums",
",",
"int",
")",
":",
"# scalar sent, don't read as a recarray",
"return",
"self",
".",
"read_column",
"(",
"columns",
",",
"*",
"*",
"keys",
")",
"# if rows is None still returns None, and is correctly interpreted",
"# by the reader to mean all",
"rows",
"=",
"self",
".",
"_extract_rows",
"(",
"rows",
")",
"# this is the full dtype for all columns",
"dtype",
",",
"offsets",
",",
"isvar",
"=",
"self",
".",
"get_rec_dtype",
"(",
"colnums",
"=",
"colnums",
",",
"*",
"*",
"keys",
")",
"w",
",",
"=",
"numpy",
".",
"where",
"(",
"isvar",
"==",
"True",
")",
"# noqa",
"if",
"w",
".",
"size",
">",
"0",
":",
"vstorage",
"=",
"keys",
".",
"get",
"(",
"'vstorage'",
",",
"self",
".",
"_vstorage",
")",
"array",
"=",
"self",
".",
"_read_rec_with_var",
"(",
"colnums",
",",
"rows",
",",
"dtype",
",",
"offsets",
",",
"isvar",
",",
"vstorage",
")",
"else",
":",
"if",
"rows",
"is",
"None",
":",
"nrows",
"=",
"self",
".",
"_info",
"[",
"'nrows'",
"]",
"else",
":",
"nrows",
"=",
"rows",
".",
"size",
"array",
"=",
"numpy",
".",
"zeros",
"(",
"nrows",
",",
"dtype",
"=",
"dtype",
")",
"colnumsp",
"=",
"colnums",
"[",
":",
"]",
".",
"copy",
"(",
")",
"colnumsp",
"[",
":",
"]",
"+=",
"1",
"self",
".",
"_FITS",
".",
"read_columns_as_rec",
"(",
"self",
".",
"_ext",
"+",
"1",
",",
"colnumsp",
",",
"array",
",",
"rows",
")",
"array",
"=",
"self",
".",
"_maybe_decode_fits_ascii_strings_to_unicode_py3",
"(",
"array",
")",
"for",
"i",
"in",
"xrange",
"(",
"colnums",
".",
"size",
")",
":",
"colnum",
"=",
"int",
"(",
"colnums",
"[",
"i",
"]",
")",
"name",
"=",
"array",
".",
"dtype",
".",
"names",
"[",
"i",
"]",
"self",
".",
"_rescale_and_convert_field_inplace",
"(",
"array",
",",
"name",
",",
"self",
".",
"_info",
"[",
"'colinfo'",
"]",
"[",
"colnum",
"]",
"[",
"'tscale'",
"]",
",",
"self",
".",
"_info",
"[",
"'colinfo'",
"]",
"[",
"colnum",
"]",
"[",
"'tzero'",
"]",
")",
"if",
"(",
"self",
".",
"_check_tbit",
"(",
"colnums",
"=",
"colnums",
")",
")",
":",
"array",
"=",
"self",
".",
"_fix_tbit_dtype",
"(",
"array",
",",
"colnums",
")",
"lower",
"=",
"keys",
".",
"get",
"(",
"'lower'",
",",
"False",
")",
"upper",
"=",
"keys",
".",
"get",
"(",
"'upper'",
",",
"False",
")",
"if",
"self",
".",
"lower",
"or",
"lower",
":",
"_names_to_lower_if_recarray",
"(",
"array",
")",
"elif",
"self",
".",
"upper",
"or",
"upper",
":",
"_names_to_upper_if_recarray",
"(",
"array",
")",
"self",
".",
"_maybe_trim_strings",
"(",
"array",
",",
"*",
"*",
"keys",
")",
"return",
"array"
] | read a subset of columns from this binary table HDU
By default, all rows are read. Send rows= to select subsets of the
data. Table data are read into a recarray for multiple columns,
plain array for a single column.
parameters
----------
columns: list/array
An optional set of columns to read from table HDUs. Can be string
or number. If a sequence, a recarray is always returned. If a
scalar, an ordinary array is returned.
rows: list/array, optional
An optional list of rows to read from table HDUS. Default is to
read all.
vstorage: string, optional
Over-ride the default method to store variable length columns. Can
be 'fixed' or 'object'. See docs on fitsio.FITS for details.
lower: bool, optional
If True, force all columns names to lower case in output. Will over
ride the lower= keyword from construction.
upper: bool, optional
If True, force all columns names to upper case in output. Will over
ride the lower= keyword from construction. | [
"read",
"a",
"subset",
"of",
"columns",
"from",
"this",
"binary",
"table",
"HDU"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L758-L845 |
2,135 | esheldon/fitsio | fitsio/hdu/table.py | TableHDU.read_slice | def read_slice(self, firstrow, lastrow, step=1, **keys):
"""
Read the specified row slice from a table.
Read all rows between firstrow and lastrow (non-inclusive, as per
python slice notation). Note you must use slice notation for
images, e.g. f[ext][20:30, 40:50]
parameters
----------
firstrow: integer
The first row to read
lastrow: integer
The last row to read, non-inclusive. This follows the python list
slice convention that one does not include the last element.
step: integer, optional
Step between rows, default 1. e.g., if step is 2, skip every other
row.
vstorage: string, optional
Over-ride the default method to store variable length columns. Can
be 'fixed' or 'object'. See docs on fitsio.FITS for details.
lower: bool, optional
If True, force all columns names to lower case in output. Will over
ride the lower= keyword from construction.
upper: bool, optional
If True, force all columns names to upper case in output. Will over
ride the lower= keyword from construction.
"""
if self._info['hdutype'] == ASCII_TBL:
rows = numpy.arange(firstrow, lastrow, step, dtype='i8')
keys['rows'] = rows
return self.read_ascii(**keys)
step = keys.get('step', 1)
if self._info['hdutype'] == IMAGE_HDU:
raise ValueError("slices currently only supported for tables")
maxrow = self._info['nrows']
if firstrow < 0 or lastrow > maxrow:
raise ValueError(
"slice must specify a sub-range of [%d,%d]" % (0, maxrow))
dtype, offsets, isvar = self.get_rec_dtype(**keys)
w, = numpy.where(isvar == True) # noqa
if w.size > 0:
vstorage = keys.get('vstorage', self._vstorage)
rows = numpy.arange(firstrow, lastrow, step, dtype='i8')
colnums = self._extract_colnums()
array = self._read_rec_with_var(
colnums, rows, dtype, offsets, isvar, vstorage)
else:
if step != 1:
rows = numpy.arange(firstrow, lastrow, step, dtype='i8')
array = self.read(rows=rows)
else:
# no +1 because lastrow is non-inclusive
nrows = lastrow - firstrow
array = numpy.zeros(nrows, dtype=dtype)
# only first needs to be +1. This is becuase the c code is
# inclusive
self._FITS.read_as_rec(self._ext+1, firstrow+1, lastrow, array)
array = self._maybe_decode_fits_ascii_strings_to_unicode_py3(
array)
for colnum, name in enumerate(array.dtype.names):
self._rescale_and_convert_field_inplace(
array,
name,
self._info['colinfo'][colnum]['tscale'],
self._info['colinfo'][colnum]['tzero'])
lower = keys.get('lower', False)
upper = keys.get('upper', False)
if self.lower or lower:
_names_to_lower_if_recarray(array)
elif self.upper or upper:
_names_to_upper_if_recarray(array)
self._maybe_trim_strings(array, **keys)
return array | python | def read_slice(self, firstrow, lastrow, step=1, **keys):
"""
Read the specified row slice from a table.
Read all rows between firstrow and lastrow (non-inclusive, as per
python slice notation). Note you must use slice notation for
images, e.g. f[ext][20:30, 40:50]
parameters
----------
firstrow: integer
The first row to read
lastrow: integer
The last row to read, non-inclusive. This follows the python list
slice convention that one does not include the last element.
step: integer, optional
Step between rows, default 1. e.g., if step is 2, skip every other
row.
vstorage: string, optional
Over-ride the default method to store variable length columns. Can
be 'fixed' or 'object'. See docs on fitsio.FITS for details.
lower: bool, optional
If True, force all columns names to lower case in output. Will over
ride the lower= keyword from construction.
upper: bool, optional
If True, force all columns names to upper case in output. Will over
ride the lower= keyword from construction.
"""
if self._info['hdutype'] == ASCII_TBL:
rows = numpy.arange(firstrow, lastrow, step, dtype='i8')
keys['rows'] = rows
return self.read_ascii(**keys)
step = keys.get('step', 1)
if self._info['hdutype'] == IMAGE_HDU:
raise ValueError("slices currently only supported for tables")
maxrow = self._info['nrows']
if firstrow < 0 or lastrow > maxrow:
raise ValueError(
"slice must specify a sub-range of [%d,%d]" % (0, maxrow))
dtype, offsets, isvar = self.get_rec_dtype(**keys)
w, = numpy.where(isvar == True) # noqa
if w.size > 0:
vstorage = keys.get('vstorage', self._vstorage)
rows = numpy.arange(firstrow, lastrow, step, dtype='i8')
colnums = self._extract_colnums()
array = self._read_rec_with_var(
colnums, rows, dtype, offsets, isvar, vstorage)
else:
if step != 1:
rows = numpy.arange(firstrow, lastrow, step, dtype='i8')
array = self.read(rows=rows)
else:
# no +1 because lastrow is non-inclusive
nrows = lastrow - firstrow
array = numpy.zeros(nrows, dtype=dtype)
# only first needs to be +1. This is becuase the c code is
# inclusive
self._FITS.read_as_rec(self._ext+1, firstrow+1, lastrow, array)
array = self._maybe_decode_fits_ascii_strings_to_unicode_py3(
array)
for colnum, name in enumerate(array.dtype.names):
self._rescale_and_convert_field_inplace(
array,
name,
self._info['colinfo'][colnum]['tscale'],
self._info['colinfo'][colnum]['tzero'])
lower = keys.get('lower', False)
upper = keys.get('upper', False)
if self.lower or lower:
_names_to_lower_if_recarray(array)
elif self.upper or upper:
_names_to_upper_if_recarray(array)
self._maybe_trim_strings(array, **keys)
return array | [
"def",
"read_slice",
"(",
"self",
",",
"firstrow",
",",
"lastrow",
",",
"step",
"=",
"1",
",",
"*",
"*",
"keys",
")",
":",
"if",
"self",
".",
"_info",
"[",
"'hdutype'",
"]",
"==",
"ASCII_TBL",
":",
"rows",
"=",
"numpy",
".",
"arange",
"(",
"firstrow",
",",
"lastrow",
",",
"step",
",",
"dtype",
"=",
"'i8'",
")",
"keys",
"[",
"'rows'",
"]",
"=",
"rows",
"return",
"self",
".",
"read_ascii",
"(",
"*",
"*",
"keys",
")",
"step",
"=",
"keys",
".",
"get",
"(",
"'step'",
",",
"1",
")",
"if",
"self",
".",
"_info",
"[",
"'hdutype'",
"]",
"==",
"IMAGE_HDU",
":",
"raise",
"ValueError",
"(",
"\"slices currently only supported for tables\"",
")",
"maxrow",
"=",
"self",
".",
"_info",
"[",
"'nrows'",
"]",
"if",
"firstrow",
"<",
"0",
"or",
"lastrow",
">",
"maxrow",
":",
"raise",
"ValueError",
"(",
"\"slice must specify a sub-range of [%d,%d]\"",
"%",
"(",
"0",
",",
"maxrow",
")",
")",
"dtype",
",",
"offsets",
",",
"isvar",
"=",
"self",
".",
"get_rec_dtype",
"(",
"*",
"*",
"keys",
")",
"w",
",",
"=",
"numpy",
".",
"where",
"(",
"isvar",
"==",
"True",
")",
"# noqa",
"if",
"w",
".",
"size",
">",
"0",
":",
"vstorage",
"=",
"keys",
".",
"get",
"(",
"'vstorage'",
",",
"self",
".",
"_vstorage",
")",
"rows",
"=",
"numpy",
".",
"arange",
"(",
"firstrow",
",",
"lastrow",
",",
"step",
",",
"dtype",
"=",
"'i8'",
")",
"colnums",
"=",
"self",
".",
"_extract_colnums",
"(",
")",
"array",
"=",
"self",
".",
"_read_rec_with_var",
"(",
"colnums",
",",
"rows",
",",
"dtype",
",",
"offsets",
",",
"isvar",
",",
"vstorage",
")",
"else",
":",
"if",
"step",
"!=",
"1",
":",
"rows",
"=",
"numpy",
".",
"arange",
"(",
"firstrow",
",",
"lastrow",
",",
"step",
",",
"dtype",
"=",
"'i8'",
")",
"array",
"=",
"self",
".",
"read",
"(",
"rows",
"=",
"rows",
")",
"else",
":",
"# no +1 because lastrow is non-inclusive",
"nrows",
"=",
"lastrow",
"-",
"firstrow",
"array",
"=",
"numpy",
".",
"zeros",
"(",
"nrows",
",",
"dtype",
"=",
"dtype",
")",
"# only first needs to be +1. This is becuase the c code is",
"# inclusive",
"self",
".",
"_FITS",
".",
"read_as_rec",
"(",
"self",
".",
"_ext",
"+",
"1",
",",
"firstrow",
"+",
"1",
",",
"lastrow",
",",
"array",
")",
"array",
"=",
"self",
".",
"_maybe_decode_fits_ascii_strings_to_unicode_py3",
"(",
"array",
")",
"for",
"colnum",
",",
"name",
"in",
"enumerate",
"(",
"array",
".",
"dtype",
".",
"names",
")",
":",
"self",
".",
"_rescale_and_convert_field_inplace",
"(",
"array",
",",
"name",
",",
"self",
".",
"_info",
"[",
"'colinfo'",
"]",
"[",
"colnum",
"]",
"[",
"'tscale'",
"]",
",",
"self",
".",
"_info",
"[",
"'colinfo'",
"]",
"[",
"colnum",
"]",
"[",
"'tzero'",
"]",
")",
"lower",
"=",
"keys",
".",
"get",
"(",
"'lower'",
",",
"False",
")",
"upper",
"=",
"keys",
".",
"get",
"(",
"'upper'",
",",
"False",
")",
"if",
"self",
".",
"lower",
"or",
"lower",
":",
"_names_to_lower_if_recarray",
"(",
"array",
")",
"elif",
"self",
".",
"upper",
"or",
"upper",
":",
"_names_to_upper_if_recarray",
"(",
"array",
")",
"self",
".",
"_maybe_trim_strings",
"(",
"array",
",",
"*",
"*",
"keys",
")",
"return",
"array"
] | Read the specified row slice from a table.
Read all rows between firstrow and lastrow (non-inclusive, as per
python slice notation). Note you must use slice notation for
images, e.g. f[ext][20:30, 40:50]
parameters
----------
firstrow: integer
The first row to read
lastrow: integer
The last row to read, non-inclusive. This follows the python list
slice convention that one does not include the last element.
step: integer, optional
Step between rows, default 1. e.g., if step is 2, skip every other
row.
vstorage: string, optional
Over-ride the default method to store variable length columns. Can
be 'fixed' or 'object'. See docs on fitsio.FITS for details.
lower: bool, optional
If True, force all columns names to lower case in output. Will over
ride the lower= keyword from construction.
upper: bool, optional
If True, force all columns names to upper case in output. Will over
ride the lower= keyword from construction. | [
"Read",
"the",
"specified",
"row",
"slice",
"from",
"a",
"table",
"."
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L847-L931 |
2,136 | esheldon/fitsio | fitsio/hdu/table.py | TableHDU.get_rec_dtype | def get_rec_dtype(self, **keys):
"""
Get the dtype for the specified columns
parameters
----------
colnums: integer array
The column numbers, 0 offset
vstorage: string, optional
See docs in read_columns
"""
colnums = keys.get('colnums', None)
vstorage = keys.get('vstorage', self._vstorage)
if colnums is None:
colnums = self._extract_colnums()
descr = []
isvararray = numpy.zeros(len(colnums), dtype=numpy.bool)
for i, colnum in enumerate(colnums):
dt, isvar = self.get_rec_column_descr(colnum, vstorage)
descr.append(dt)
isvararray[i] = isvar
dtype = numpy.dtype(descr)
offsets = numpy.zeros(len(colnums), dtype='i8')
for i, n in enumerate(dtype.names):
offsets[i] = dtype.fields[n][1]
return dtype, offsets, isvararray | python | def get_rec_dtype(self, **keys):
"""
Get the dtype for the specified columns
parameters
----------
colnums: integer array
The column numbers, 0 offset
vstorage: string, optional
See docs in read_columns
"""
colnums = keys.get('colnums', None)
vstorage = keys.get('vstorage', self._vstorage)
if colnums is None:
colnums = self._extract_colnums()
descr = []
isvararray = numpy.zeros(len(colnums), dtype=numpy.bool)
for i, colnum in enumerate(colnums):
dt, isvar = self.get_rec_column_descr(colnum, vstorage)
descr.append(dt)
isvararray[i] = isvar
dtype = numpy.dtype(descr)
offsets = numpy.zeros(len(colnums), dtype='i8')
for i, n in enumerate(dtype.names):
offsets[i] = dtype.fields[n][1]
return dtype, offsets, isvararray | [
"def",
"get_rec_dtype",
"(",
"self",
",",
"*",
"*",
"keys",
")",
":",
"colnums",
"=",
"keys",
".",
"get",
"(",
"'colnums'",
",",
"None",
")",
"vstorage",
"=",
"keys",
".",
"get",
"(",
"'vstorage'",
",",
"self",
".",
"_vstorage",
")",
"if",
"colnums",
"is",
"None",
":",
"colnums",
"=",
"self",
".",
"_extract_colnums",
"(",
")",
"descr",
"=",
"[",
"]",
"isvararray",
"=",
"numpy",
".",
"zeros",
"(",
"len",
"(",
"colnums",
")",
",",
"dtype",
"=",
"numpy",
".",
"bool",
")",
"for",
"i",
",",
"colnum",
"in",
"enumerate",
"(",
"colnums",
")",
":",
"dt",
",",
"isvar",
"=",
"self",
".",
"get_rec_column_descr",
"(",
"colnum",
",",
"vstorage",
")",
"descr",
".",
"append",
"(",
"dt",
")",
"isvararray",
"[",
"i",
"]",
"=",
"isvar",
"dtype",
"=",
"numpy",
".",
"dtype",
"(",
"descr",
")",
"offsets",
"=",
"numpy",
".",
"zeros",
"(",
"len",
"(",
"colnums",
")",
",",
"dtype",
"=",
"'i8'",
")",
"for",
"i",
",",
"n",
"in",
"enumerate",
"(",
"dtype",
".",
"names",
")",
":",
"offsets",
"[",
"i",
"]",
"=",
"dtype",
".",
"fields",
"[",
"n",
"]",
"[",
"1",
"]",
"return",
"dtype",
",",
"offsets",
",",
"isvararray"
] | Get the dtype for the specified columns
parameters
----------
colnums: integer array
The column numbers, 0 offset
vstorage: string, optional
See docs in read_columns | [
"Get",
"the",
"dtype",
"for",
"the",
"specified",
"columns"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L933-L961 |
2,137 | esheldon/fitsio | fitsio/hdu/table.py | TableHDU._check_tbit | def _check_tbit(self, **keys):
"""
Check if one of the columns is a TBIT column
parameters
----------
colnums: integer array, optional
"""
colnums = keys.get('colnums', None)
if colnums is None:
colnums = self._extract_colnums()
has_tbit = False
for i, colnum in enumerate(colnums):
npy_type, isvar, istbit = self._get_tbl_numpy_dtype(colnum)
if (istbit):
has_tbit = True
break
return has_tbit | python | def _check_tbit(self, **keys):
"""
Check if one of the columns is a TBIT column
parameters
----------
colnums: integer array, optional
"""
colnums = keys.get('colnums', None)
if colnums is None:
colnums = self._extract_colnums()
has_tbit = False
for i, colnum in enumerate(colnums):
npy_type, isvar, istbit = self._get_tbl_numpy_dtype(colnum)
if (istbit):
has_tbit = True
break
return has_tbit | [
"def",
"_check_tbit",
"(",
"self",
",",
"*",
"*",
"keys",
")",
":",
"colnums",
"=",
"keys",
".",
"get",
"(",
"'colnums'",
",",
"None",
")",
"if",
"colnums",
"is",
"None",
":",
"colnums",
"=",
"self",
".",
"_extract_colnums",
"(",
")",
"has_tbit",
"=",
"False",
"for",
"i",
",",
"colnum",
"in",
"enumerate",
"(",
"colnums",
")",
":",
"npy_type",
",",
"isvar",
",",
"istbit",
"=",
"self",
".",
"_get_tbl_numpy_dtype",
"(",
"colnum",
")",
"if",
"(",
"istbit",
")",
":",
"has_tbit",
"=",
"True",
"break",
"return",
"has_tbit"
] | Check if one of the columns is a TBIT column
parameters
----------
colnums: integer array, optional | [
"Check",
"if",
"one",
"of",
"the",
"columns",
"is",
"a",
"TBIT",
"column"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L963-L983 |
2,138 | esheldon/fitsio | fitsio/hdu/table.py | TableHDU._fix_tbit_dtype | def _fix_tbit_dtype(self, array, colnums):
"""
If necessary, patch up the TBIT to convert to bool array
parameters
----------
array: record array
colnums: column numbers for lookup
"""
descr = array.dtype.descr
for i, colnum in enumerate(colnums):
npy_type, isvar, istbit = self._get_tbl_numpy_dtype(colnum)
if (istbit):
coldescr = list(descr[i])
coldescr[1] = '?'
descr[i] = tuple(coldescr)
return array.view(descr) | python | def _fix_tbit_dtype(self, array, colnums):
"""
If necessary, patch up the TBIT to convert to bool array
parameters
----------
array: record array
colnums: column numbers for lookup
"""
descr = array.dtype.descr
for i, colnum in enumerate(colnums):
npy_type, isvar, istbit = self._get_tbl_numpy_dtype(colnum)
if (istbit):
coldescr = list(descr[i])
coldescr[1] = '?'
descr[i] = tuple(coldescr)
return array.view(descr) | [
"def",
"_fix_tbit_dtype",
"(",
"self",
",",
"array",
",",
"colnums",
")",
":",
"descr",
"=",
"array",
".",
"dtype",
".",
"descr",
"for",
"i",
",",
"colnum",
"in",
"enumerate",
"(",
"colnums",
")",
":",
"npy_type",
",",
"isvar",
",",
"istbit",
"=",
"self",
".",
"_get_tbl_numpy_dtype",
"(",
"colnum",
")",
"if",
"(",
"istbit",
")",
":",
"coldescr",
"=",
"list",
"(",
"descr",
"[",
"i",
"]",
")",
"coldescr",
"[",
"1",
"]",
"=",
"'?'",
"descr",
"[",
"i",
"]",
"=",
"tuple",
"(",
"coldescr",
")",
"return",
"array",
".",
"view",
"(",
"descr",
")"
] | If necessary, patch up the TBIT to convert to bool array
parameters
----------
array: record array
colnums: column numbers for lookup | [
"If",
"necessary",
"patch",
"up",
"the",
"TBIT",
"to",
"convert",
"to",
"bool",
"array"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L985-L1002 |
2,139 | esheldon/fitsio | fitsio/hdu/table.py | TableHDU._get_simple_dtype_and_shape | def _get_simple_dtype_and_shape(self, colnum, rows=None):
"""
When reading a single column, we want the basic data
type and the shape of the array.
for scalar columns, shape is just nrows, otherwise
it is (nrows, dim1, dim2)
Note if rows= is sent and only a single row is requested,
the shape will be (dim2,dim2)
"""
# basic datatype
npy_type, isvar, istbit = self._get_tbl_numpy_dtype(colnum)
info = self._info['colinfo'][colnum]
name = info['name']
if rows is None:
nrows = self._info['nrows']
else:
nrows = rows.size
shape = None
tdim = info['tdim']
shape = _tdim2shape(tdim, name, is_string=(npy_type[0] == 'S'))
if shape is not None:
if nrows > 1:
if not isinstance(shape, tuple):
# vector
shape = (nrows, shape)
else:
# multi-dimensional
shape = tuple([nrows] + list(shape))
else:
# scalar
shape = nrows
return npy_type, shape | python | def _get_simple_dtype_and_shape(self, colnum, rows=None):
"""
When reading a single column, we want the basic data
type and the shape of the array.
for scalar columns, shape is just nrows, otherwise
it is (nrows, dim1, dim2)
Note if rows= is sent and only a single row is requested,
the shape will be (dim2,dim2)
"""
# basic datatype
npy_type, isvar, istbit = self._get_tbl_numpy_dtype(colnum)
info = self._info['colinfo'][colnum]
name = info['name']
if rows is None:
nrows = self._info['nrows']
else:
nrows = rows.size
shape = None
tdim = info['tdim']
shape = _tdim2shape(tdim, name, is_string=(npy_type[0] == 'S'))
if shape is not None:
if nrows > 1:
if not isinstance(shape, tuple):
# vector
shape = (nrows, shape)
else:
# multi-dimensional
shape = tuple([nrows] + list(shape))
else:
# scalar
shape = nrows
return npy_type, shape | [
"def",
"_get_simple_dtype_and_shape",
"(",
"self",
",",
"colnum",
",",
"rows",
"=",
"None",
")",
":",
"# basic datatype",
"npy_type",
",",
"isvar",
",",
"istbit",
"=",
"self",
".",
"_get_tbl_numpy_dtype",
"(",
"colnum",
")",
"info",
"=",
"self",
".",
"_info",
"[",
"'colinfo'",
"]",
"[",
"colnum",
"]",
"name",
"=",
"info",
"[",
"'name'",
"]",
"if",
"rows",
"is",
"None",
":",
"nrows",
"=",
"self",
".",
"_info",
"[",
"'nrows'",
"]",
"else",
":",
"nrows",
"=",
"rows",
".",
"size",
"shape",
"=",
"None",
"tdim",
"=",
"info",
"[",
"'tdim'",
"]",
"shape",
"=",
"_tdim2shape",
"(",
"tdim",
",",
"name",
",",
"is_string",
"=",
"(",
"npy_type",
"[",
"0",
"]",
"==",
"'S'",
")",
")",
"if",
"shape",
"is",
"not",
"None",
":",
"if",
"nrows",
">",
"1",
":",
"if",
"not",
"isinstance",
"(",
"shape",
",",
"tuple",
")",
":",
"# vector",
"shape",
"=",
"(",
"nrows",
",",
"shape",
")",
"else",
":",
"# multi-dimensional",
"shape",
"=",
"tuple",
"(",
"[",
"nrows",
"]",
"+",
"list",
"(",
"shape",
")",
")",
"else",
":",
"# scalar",
"shape",
"=",
"nrows",
"return",
"npy_type",
",",
"shape"
] | When reading a single column, we want the basic data
type and the shape of the array.
for scalar columns, shape is just nrows, otherwise
it is (nrows, dim1, dim2)
Note if rows= is sent and only a single row is requested,
the shape will be (dim2,dim2) | [
"When",
"reading",
"a",
"single",
"column",
"we",
"want",
"the",
"basic",
"data",
"type",
"and",
"the",
"shape",
"of",
"the",
"array",
"."
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L1004-L1041 |
2,140 | esheldon/fitsio | fitsio/hdu/table.py | TableHDU.get_rec_column_descr | def get_rec_column_descr(self, colnum, vstorage):
"""
Get a descriptor entry for the specified column.
parameters
----------
colnum: integer
The column number, 0 offset
vstorage: string
See docs in read_columns
"""
npy_type, isvar, istbit = self._get_tbl_numpy_dtype(colnum)
name = self._info['colinfo'][colnum]['name']
if isvar:
if vstorage == 'object':
descr = (name, 'O')
else:
tform = self._info['colinfo'][colnum]['tform']
max_size = _extract_vararray_max(tform)
if max_size <= 0:
name = self._info['colinfo'][colnum]['name']
mess = 'Will read as an object field'
if max_size < 0:
mess = "Column '%s': No maximum size: '%s'. %s"
mess = mess % (name, tform, mess)
warnings.warn(mess, FITSRuntimeWarning)
else:
mess = "Column '%s': Max size is zero: '%s'. %s"
mess = mess % (name, tform, mess)
warnings.warn(mess, FITSRuntimeWarning)
# we are forced to read this as an object array
return self.get_rec_column_descr(colnum, 'object')
if npy_type[0] == 'S':
# variable length string columns cannot
# themselves be arrays I don't think
npy_type = 'S%d' % max_size
descr = (name, npy_type)
elif npy_type[0] == 'U':
# variable length string columns cannot
# themselves be arrays I don't think
npy_type = 'U%d' % max_size
descr = (name, npy_type)
else:
descr = (name, npy_type, max_size)
else:
tdim = self._info['colinfo'][colnum]['tdim']
shape = _tdim2shape(
tdim, name,
is_string=(npy_type[0] == 'S' or npy_type[0] == 'U'))
if shape is not None:
descr = (name, npy_type, shape)
else:
descr = (name, npy_type)
return descr, isvar | python | def get_rec_column_descr(self, colnum, vstorage):
"""
Get a descriptor entry for the specified column.
parameters
----------
colnum: integer
The column number, 0 offset
vstorage: string
See docs in read_columns
"""
npy_type, isvar, istbit = self._get_tbl_numpy_dtype(colnum)
name = self._info['colinfo'][colnum]['name']
if isvar:
if vstorage == 'object':
descr = (name, 'O')
else:
tform = self._info['colinfo'][colnum]['tform']
max_size = _extract_vararray_max(tform)
if max_size <= 0:
name = self._info['colinfo'][colnum]['name']
mess = 'Will read as an object field'
if max_size < 0:
mess = "Column '%s': No maximum size: '%s'. %s"
mess = mess % (name, tform, mess)
warnings.warn(mess, FITSRuntimeWarning)
else:
mess = "Column '%s': Max size is zero: '%s'. %s"
mess = mess % (name, tform, mess)
warnings.warn(mess, FITSRuntimeWarning)
# we are forced to read this as an object array
return self.get_rec_column_descr(colnum, 'object')
if npy_type[0] == 'S':
# variable length string columns cannot
# themselves be arrays I don't think
npy_type = 'S%d' % max_size
descr = (name, npy_type)
elif npy_type[0] == 'U':
# variable length string columns cannot
# themselves be arrays I don't think
npy_type = 'U%d' % max_size
descr = (name, npy_type)
else:
descr = (name, npy_type, max_size)
else:
tdim = self._info['colinfo'][colnum]['tdim']
shape = _tdim2shape(
tdim, name,
is_string=(npy_type[0] == 'S' or npy_type[0] == 'U'))
if shape is not None:
descr = (name, npy_type, shape)
else:
descr = (name, npy_type)
return descr, isvar | [
"def",
"get_rec_column_descr",
"(",
"self",
",",
"colnum",
",",
"vstorage",
")",
":",
"npy_type",
",",
"isvar",
",",
"istbit",
"=",
"self",
".",
"_get_tbl_numpy_dtype",
"(",
"colnum",
")",
"name",
"=",
"self",
".",
"_info",
"[",
"'colinfo'",
"]",
"[",
"colnum",
"]",
"[",
"'name'",
"]",
"if",
"isvar",
":",
"if",
"vstorage",
"==",
"'object'",
":",
"descr",
"=",
"(",
"name",
",",
"'O'",
")",
"else",
":",
"tform",
"=",
"self",
".",
"_info",
"[",
"'colinfo'",
"]",
"[",
"colnum",
"]",
"[",
"'tform'",
"]",
"max_size",
"=",
"_extract_vararray_max",
"(",
"tform",
")",
"if",
"max_size",
"<=",
"0",
":",
"name",
"=",
"self",
".",
"_info",
"[",
"'colinfo'",
"]",
"[",
"colnum",
"]",
"[",
"'name'",
"]",
"mess",
"=",
"'Will read as an object field'",
"if",
"max_size",
"<",
"0",
":",
"mess",
"=",
"\"Column '%s': No maximum size: '%s'. %s\"",
"mess",
"=",
"mess",
"%",
"(",
"name",
",",
"tform",
",",
"mess",
")",
"warnings",
".",
"warn",
"(",
"mess",
",",
"FITSRuntimeWarning",
")",
"else",
":",
"mess",
"=",
"\"Column '%s': Max size is zero: '%s'. %s\"",
"mess",
"=",
"mess",
"%",
"(",
"name",
",",
"tform",
",",
"mess",
")",
"warnings",
".",
"warn",
"(",
"mess",
",",
"FITSRuntimeWarning",
")",
"# we are forced to read this as an object array",
"return",
"self",
".",
"get_rec_column_descr",
"(",
"colnum",
",",
"'object'",
")",
"if",
"npy_type",
"[",
"0",
"]",
"==",
"'S'",
":",
"# variable length string columns cannot",
"# themselves be arrays I don't think",
"npy_type",
"=",
"'S%d'",
"%",
"max_size",
"descr",
"=",
"(",
"name",
",",
"npy_type",
")",
"elif",
"npy_type",
"[",
"0",
"]",
"==",
"'U'",
":",
"# variable length string columns cannot",
"# themselves be arrays I don't think",
"npy_type",
"=",
"'U%d'",
"%",
"max_size",
"descr",
"=",
"(",
"name",
",",
"npy_type",
")",
"else",
":",
"descr",
"=",
"(",
"name",
",",
"npy_type",
",",
"max_size",
")",
"else",
":",
"tdim",
"=",
"self",
".",
"_info",
"[",
"'colinfo'",
"]",
"[",
"colnum",
"]",
"[",
"'tdim'",
"]",
"shape",
"=",
"_tdim2shape",
"(",
"tdim",
",",
"name",
",",
"is_string",
"=",
"(",
"npy_type",
"[",
"0",
"]",
"==",
"'S'",
"or",
"npy_type",
"[",
"0",
"]",
"==",
"'U'",
")",
")",
"if",
"shape",
"is",
"not",
"None",
":",
"descr",
"=",
"(",
"name",
",",
"npy_type",
",",
"shape",
")",
"else",
":",
"descr",
"=",
"(",
"name",
",",
"npy_type",
")",
"return",
"descr",
",",
"isvar"
] | Get a descriptor entry for the specified column.
parameters
----------
colnum: integer
The column number, 0 offset
vstorage: string
See docs in read_columns | [
"Get",
"a",
"descriptor",
"entry",
"for",
"the",
"specified",
"column",
"."
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L1043-L1100 |
2,141 | esheldon/fitsio | fitsio/hdu/table.py | TableHDU._read_rec_with_var | def _read_rec_with_var(
self, colnums, rows, dtype, offsets, isvar, vstorage):
"""
Read columns from a table into a rec array, including variable length
columns. This is special because, for efficiency, it involves reading
from the main table as normal but skipping the columns in the array
that are variable. Then reading the variable length columns, with
accounting for strides appropriately.
row and column numbers should be checked before calling this function
"""
colnumsp = colnums+1
if rows is None:
nrows = self._info['nrows']
else:
nrows = rows.size
array = numpy.zeros(nrows, dtype=dtype)
# read from the main table first
wnotvar, = numpy.where(isvar == False) # noqa
if wnotvar.size > 0:
# this will be contiguous (not true for slices)
thesecol = colnumsp[wnotvar]
theseoff = offsets[wnotvar]
self._FITS.read_columns_as_rec_byoffset(self._ext+1,
thesecol,
theseoff,
array,
rows)
for i in xrange(thesecol.size):
name = array.dtype.names[wnotvar[i]]
colnum = thesecol[i]-1
self._rescale_and_convert_field_inplace(
array,
name,
self._info['colinfo'][colnum]['tscale'],
self._info['colinfo'][colnum]['tzero'])
array = self._maybe_decode_fits_ascii_strings_to_unicode_py3(array)
# now read the variable length arrays we may be able to speed this up
# by storing directly instead of reading first into a list
wvar, = numpy.where(isvar == True) # noqa
if wvar.size > 0:
# this will be contiguous (not true for slices)
thesecol = colnumsp[wvar]
for i in xrange(thesecol.size):
colnump = thesecol[i]
name = array.dtype.names[wvar[i]]
dlist = self._FITS.read_var_column_as_list(
self._ext+1, colnump, rows)
if (isinstance(dlist[0], str) or
(IS_PY3 and isinstance(dlist[0], bytes))):
is_string = True
else:
is_string = False
if array[name].dtype.descr[0][1][1] == 'O':
# storing in object array
# get references to each, no copy made
for irow, item in enumerate(dlist):
if IS_PY3 and isinstance(item, bytes):
item = item.decode('ascii')
array[name][irow] = item
else:
for irow, item in enumerate(dlist):
if IS_PY3 and isinstance(item, bytes):
item = item.decode('ascii')
if is_string:
array[name][irow] = item
else:
ncopy = len(item)
if IS_PY3:
ts = array[name].dtype.descr[0][1][1]
if ts != 'S' and ts != 'U':
array[name][irow][0:ncopy] = item[:]
else:
array[name][irow] = item
else:
array[name][irow][0:ncopy] = item[:]
return array | python | def _read_rec_with_var(
self, colnums, rows, dtype, offsets, isvar, vstorage):
"""
Read columns from a table into a rec array, including variable length
columns. This is special because, for efficiency, it involves reading
from the main table as normal but skipping the columns in the array
that are variable. Then reading the variable length columns, with
accounting for strides appropriately.
row and column numbers should be checked before calling this function
"""
colnumsp = colnums+1
if rows is None:
nrows = self._info['nrows']
else:
nrows = rows.size
array = numpy.zeros(nrows, dtype=dtype)
# read from the main table first
wnotvar, = numpy.where(isvar == False) # noqa
if wnotvar.size > 0:
# this will be contiguous (not true for slices)
thesecol = colnumsp[wnotvar]
theseoff = offsets[wnotvar]
self._FITS.read_columns_as_rec_byoffset(self._ext+1,
thesecol,
theseoff,
array,
rows)
for i in xrange(thesecol.size):
name = array.dtype.names[wnotvar[i]]
colnum = thesecol[i]-1
self._rescale_and_convert_field_inplace(
array,
name,
self._info['colinfo'][colnum]['tscale'],
self._info['colinfo'][colnum]['tzero'])
array = self._maybe_decode_fits_ascii_strings_to_unicode_py3(array)
# now read the variable length arrays we may be able to speed this up
# by storing directly instead of reading first into a list
wvar, = numpy.where(isvar == True) # noqa
if wvar.size > 0:
# this will be contiguous (not true for slices)
thesecol = colnumsp[wvar]
for i in xrange(thesecol.size):
colnump = thesecol[i]
name = array.dtype.names[wvar[i]]
dlist = self._FITS.read_var_column_as_list(
self._ext+1, colnump, rows)
if (isinstance(dlist[0], str) or
(IS_PY3 and isinstance(dlist[0], bytes))):
is_string = True
else:
is_string = False
if array[name].dtype.descr[0][1][1] == 'O':
# storing in object array
# get references to each, no copy made
for irow, item in enumerate(dlist):
if IS_PY3 and isinstance(item, bytes):
item = item.decode('ascii')
array[name][irow] = item
else:
for irow, item in enumerate(dlist):
if IS_PY3 and isinstance(item, bytes):
item = item.decode('ascii')
if is_string:
array[name][irow] = item
else:
ncopy = len(item)
if IS_PY3:
ts = array[name].dtype.descr[0][1][1]
if ts != 'S' and ts != 'U':
array[name][irow][0:ncopy] = item[:]
else:
array[name][irow] = item
else:
array[name][irow][0:ncopy] = item[:]
return array | [
"def",
"_read_rec_with_var",
"(",
"self",
",",
"colnums",
",",
"rows",
",",
"dtype",
",",
"offsets",
",",
"isvar",
",",
"vstorage",
")",
":",
"colnumsp",
"=",
"colnums",
"+",
"1",
"if",
"rows",
"is",
"None",
":",
"nrows",
"=",
"self",
".",
"_info",
"[",
"'nrows'",
"]",
"else",
":",
"nrows",
"=",
"rows",
".",
"size",
"array",
"=",
"numpy",
".",
"zeros",
"(",
"nrows",
",",
"dtype",
"=",
"dtype",
")",
"# read from the main table first",
"wnotvar",
",",
"=",
"numpy",
".",
"where",
"(",
"isvar",
"==",
"False",
")",
"# noqa",
"if",
"wnotvar",
".",
"size",
">",
"0",
":",
"# this will be contiguous (not true for slices)",
"thesecol",
"=",
"colnumsp",
"[",
"wnotvar",
"]",
"theseoff",
"=",
"offsets",
"[",
"wnotvar",
"]",
"self",
".",
"_FITS",
".",
"read_columns_as_rec_byoffset",
"(",
"self",
".",
"_ext",
"+",
"1",
",",
"thesecol",
",",
"theseoff",
",",
"array",
",",
"rows",
")",
"for",
"i",
"in",
"xrange",
"(",
"thesecol",
".",
"size",
")",
":",
"name",
"=",
"array",
".",
"dtype",
".",
"names",
"[",
"wnotvar",
"[",
"i",
"]",
"]",
"colnum",
"=",
"thesecol",
"[",
"i",
"]",
"-",
"1",
"self",
".",
"_rescale_and_convert_field_inplace",
"(",
"array",
",",
"name",
",",
"self",
".",
"_info",
"[",
"'colinfo'",
"]",
"[",
"colnum",
"]",
"[",
"'tscale'",
"]",
",",
"self",
".",
"_info",
"[",
"'colinfo'",
"]",
"[",
"colnum",
"]",
"[",
"'tzero'",
"]",
")",
"array",
"=",
"self",
".",
"_maybe_decode_fits_ascii_strings_to_unicode_py3",
"(",
"array",
")",
"# now read the variable length arrays we may be able to speed this up",
"# by storing directly instead of reading first into a list",
"wvar",
",",
"=",
"numpy",
".",
"where",
"(",
"isvar",
"==",
"True",
")",
"# noqa",
"if",
"wvar",
".",
"size",
">",
"0",
":",
"# this will be contiguous (not true for slices)",
"thesecol",
"=",
"colnumsp",
"[",
"wvar",
"]",
"for",
"i",
"in",
"xrange",
"(",
"thesecol",
".",
"size",
")",
":",
"colnump",
"=",
"thesecol",
"[",
"i",
"]",
"name",
"=",
"array",
".",
"dtype",
".",
"names",
"[",
"wvar",
"[",
"i",
"]",
"]",
"dlist",
"=",
"self",
".",
"_FITS",
".",
"read_var_column_as_list",
"(",
"self",
".",
"_ext",
"+",
"1",
",",
"colnump",
",",
"rows",
")",
"if",
"(",
"isinstance",
"(",
"dlist",
"[",
"0",
"]",
",",
"str",
")",
"or",
"(",
"IS_PY3",
"and",
"isinstance",
"(",
"dlist",
"[",
"0",
"]",
",",
"bytes",
")",
")",
")",
":",
"is_string",
"=",
"True",
"else",
":",
"is_string",
"=",
"False",
"if",
"array",
"[",
"name",
"]",
".",
"dtype",
".",
"descr",
"[",
"0",
"]",
"[",
"1",
"]",
"[",
"1",
"]",
"==",
"'O'",
":",
"# storing in object array",
"# get references to each, no copy made",
"for",
"irow",
",",
"item",
"in",
"enumerate",
"(",
"dlist",
")",
":",
"if",
"IS_PY3",
"and",
"isinstance",
"(",
"item",
",",
"bytes",
")",
":",
"item",
"=",
"item",
".",
"decode",
"(",
"'ascii'",
")",
"array",
"[",
"name",
"]",
"[",
"irow",
"]",
"=",
"item",
"else",
":",
"for",
"irow",
",",
"item",
"in",
"enumerate",
"(",
"dlist",
")",
":",
"if",
"IS_PY3",
"and",
"isinstance",
"(",
"item",
",",
"bytes",
")",
":",
"item",
"=",
"item",
".",
"decode",
"(",
"'ascii'",
")",
"if",
"is_string",
":",
"array",
"[",
"name",
"]",
"[",
"irow",
"]",
"=",
"item",
"else",
":",
"ncopy",
"=",
"len",
"(",
"item",
")",
"if",
"IS_PY3",
":",
"ts",
"=",
"array",
"[",
"name",
"]",
".",
"dtype",
".",
"descr",
"[",
"0",
"]",
"[",
"1",
"]",
"[",
"1",
"]",
"if",
"ts",
"!=",
"'S'",
"and",
"ts",
"!=",
"'U'",
":",
"array",
"[",
"name",
"]",
"[",
"irow",
"]",
"[",
"0",
":",
"ncopy",
"]",
"=",
"item",
"[",
":",
"]",
"else",
":",
"array",
"[",
"name",
"]",
"[",
"irow",
"]",
"=",
"item",
"else",
":",
"array",
"[",
"name",
"]",
"[",
"irow",
"]",
"[",
"0",
":",
"ncopy",
"]",
"=",
"item",
"[",
":",
"]",
"return",
"array"
] | Read columns from a table into a rec array, including variable length
columns. This is special because, for efficiency, it involves reading
from the main table as normal but skipping the columns in the array
that are variable. Then reading the variable length columns, with
accounting for strides appropriately.
row and column numbers should be checked before calling this function | [
"Read",
"columns",
"from",
"a",
"table",
"into",
"a",
"rec",
"array",
"including",
"variable",
"length",
"columns",
".",
"This",
"is",
"special",
"because",
"for",
"efficiency",
"it",
"involves",
"reading",
"from",
"the",
"main",
"table",
"as",
"normal",
"but",
"skipping",
"the",
"columns",
"in",
"the",
"array",
"that",
"are",
"variable",
".",
"Then",
"reading",
"the",
"variable",
"length",
"columns",
"with",
"accounting",
"for",
"strides",
"appropriately",
"."
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L1102-L1188 |
2,142 | esheldon/fitsio | fitsio/hdu/table.py | TableHDU._extract_rows | def _extract_rows(self, rows):
"""
Extract an array of rows from an input scalar or sequence
"""
if rows is not None:
rows = numpy.array(rows, ndmin=1, copy=False, dtype='i8')
# returns unique, sorted
rows = numpy.unique(rows)
maxrow = self._info['nrows']-1
if rows[0] < 0 or rows[-1] > maxrow:
raise ValueError("rows must be in [%d,%d]" % (0, maxrow))
return rows | python | def _extract_rows(self, rows):
"""
Extract an array of rows from an input scalar or sequence
"""
if rows is not None:
rows = numpy.array(rows, ndmin=1, copy=False, dtype='i8')
# returns unique, sorted
rows = numpy.unique(rows)
maxrow = self._info['nrows']-1
if rows[0] < 0 or rows[-1] > maxrow:
raise ValueError("rows must be in [%d,%d]" % (0, maxrow))
return rows | [
"def",
"_extract_rows",
"(",
"self",
",",
"rows",
")",
":",
"if",
"rows",
"is",
"not",
"None",
":",
"rows",
"=",
"numpy",
".",
"array",
"(",
"rows",
",",
"ndmin",
"=",
"1",
",",
"copy",
"=",
"False",
",",
"dtype",
"=",
"'i8'",
")",
"# returns unique, sorted",
"rows",
"=",
"numpy",
".",
"unique",
"(",
"rows",
")",
"maxrow",
"=",
"self",
".",
"_info",
"[",
"'nrows'",
"]",
"-",
"1",
"if",
"rows",
"[",
"0",
"]",
"<",
"0",
"or",
"rows",
"[",
"-",
"1",
"]",
">",
"maxrow",
":",
"raise",
"ValueError",
"(",
"\"rows must be in [%d,%d]\"",
"%",
"(",
"0",
",",
"maxrow",
")",
")",
"return",
"rows"
] | Extract an array of rows from an input scalar or sequence | [
"Extract",
"an",
"array",
"of",
"rows",
"from",
"an",
"input",
"scalar",
"or",
"sequence"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L1190-L1202 |
2,143 | esheldon/fitsio | fitsio/hdu/table.py | TableHDU._process_slice | def _process_slice(self, arg):
"""
process the input slice for use calling the C code
"""
start = arg.start
stop = arg.stop
step = arg.step
nrows = self._info['nrows']
if step is None:
step = 1
if start is None:
start = 0
if stop is None:
stop = nrows
if start < 0:
start = nrows + start
if start < 0:
raise IndexError("Index out of bounds")
if stop < 0:
stop = nrows + start + 1
if stop < start:
# will return an empty struct
stop = start
if stop > nrows:
stop = nrows
return slice(start, stop, step) | python | def _process_slice(self, arg):
"""
process the input slice for use calling the C code
"""
start = arg.start
stop = arg.stop
step = arg.step
nrows = self._info['nrows']
if step is None:
step = 1
if start is None:
start = 0
if stop is None:
stop = nrows
if start < 0:
start = nrows + start
if start < 0:
raise IndexError("Index out of bounds")
if stop < 0:
stop = nrows + start + 1
if stop < start:
# will return an empty struct
stop = start
if stop > nrows:
stop = nrows
return slice(start, stop, step) | [
"def",
"_process_slice",
"(",
"self",
",",
"arg",
")",
":",
"start",
"=",
"arg",
".",
"start",
"stop",
"=",
"arg",
".",
"stop",
"step",
"=",
"arg",
".",
"step",
"nrows",
"=",
"self",
".",
"_info",
"[",
"'nrows'",
"]",
"if",
"step",
"is",
"None",
":",
"step",
"=",
"1",
"if",
"start",
"is",
"None",
":",
"start",
"=",
"0",
"if",
"stop",
"is",
"None",
":",
"stop",
"=",
"nrows",
"if",
"start",
"<",
"0",
":",
"start",
"=",
"nrows",
"+",
"start",
"if",
"start",
"<",
"0",
":",
"raise",
"IndexError",
"(",
"\"Index out of bounds\"",
")",
"if",
"stop",
"<",
"0",
":",
"stop",
"=",
"nrows",
"+",
"start",
"+",
"1",
"if",
"stop",
"<",
"start",
":",
"# will return an empty struct",
"stop",
"=",
"start",
"if",
"stop",
">",
"nrows",
":",
"stop",
"=",
"nrows",
"return",
"slice",
"(",
"start",
",",
"stop",
",",
"step",
")"
] | process the input slice for use calling the C code | [
"process",
"the",
"input",
"slice",
"for",
"use",
"calling",
"the",
"C",
"code"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L1204-L1234 |
2,144 | esheldon/fitsio | fitsio/hdu/table.py | TableHDU._slice2rows | def _slice2rows(self, start, stop, step=None):
"""
Convert a slice to an explicit array of rows
"""
nrows = self._info['nrows']
if start is None:
start = 0
if stop is None:
stop = nrows
if step is None:
step = 1
tstart = self._fix_range(start)
tstop = self._fix_range(stop)
if tstart == 0 and tstop == nrows:
# this is faster: if all fields are also requested, then a
# single fread will be done
return None
if stop < start:
raise ValueError("start is greater than stop in slice")
return numpy.arange(tstart, tstop, step, dtype='i8') | python | def _slice2rows(self, start, stop, step=None):
"""
Convert a slice to an explicit array of rows
"""
nrows = self._info['nrows']
if start is None:
start = 0
if stop is None:
stop = nrows
if step is None:
step = 1
tstart = self._fix_range(start)
tstop = self._fix_range(stop)
if tstart == 0 and tstop == nrows:
# this is faster: if all fields are also requested, then a
# single fread will be done
return None
if stop < start:
raise ValueError("start is greater than stop in slice")
return numpy.arange(tstart, tstop, step, dtype='i8') | [
"def",
"_slice2rows",
"(",
"self",
",",
"start",
",",
"stop",
",",
"step",
"=",
"None",
")",
":",
"nrows",
"=",
"self",
".",
"_info",
"[",
"'nrows'",
"]",
"if",
"start",
"is",
"None",
":",
"start",
"=",
"0",
"if",
"stop",
"is",
"None",
":",
"stop",
"=",
"nrows",
"if",
"step",
"is",
"None",
":",
"step",
"=",
"1",
"tstart",
"=",
"self",
".",
"_fix_range",
"(",
"start",
")",
"tstop",
"=",
"self",
".",
"_fix_range",
"(",
"stop",
")",
"if",
"tstart",
"==",
"0",
"and",
"tstop",
"==",
"nrows",
":",
"# this is faster: if all fields are also requested, then a",
"# single fread will be done",
"return",
"None",
"if",
"stop",
"<",
"start",
":",
"raise",
"ValueError",
"(",
"\"start is greater than stop in slice\"",
")",
"return",
"numpy",
".",
"arange",
"(",
"tstart",
",",
"tstop",
",",
"step",
",",
"dtype",
"=",
"'i8'",
")"
] | Convert a slice to an explicit array of rows | [
"Convert",
"a",
"slice",
"to",
"an",
"explicit",
"array",
"of",
"rows"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L1236-L1256 |
2,145 | esheldon/fitsio | fitsio/hdu/table.py | TableHDU._fix_range | def _fix_range(self, num, isslice=True):
"""
Ensure the input is within range.
If el=True, then don't treat as a slice element
"""
nrows = self._info['nrows']
if isslice:
# include the end
if num < 0:
num = nrows + (1+num)
elif num > nrows:
num = nrows
else:
# single element
if num < 0:
num = nrows + num
elif num > (nrows-1):
num = nrows-1
return num | python | def _fix_range(self, num, isslice=True):
"""
Ensure the input is within range.
If el=True, then don't treat as a slice element
"""
nrows = self._info['nrows']
if isslice:
# include the end
if num < 0:
num = nrows + (1+num)
elif num > nrows:
num = nrows
else:
# single element
if num < 0:
num = nrows + num
elif num > (nrows-1):
num = nrows-1
return num | [
"def",
"_fix_range",
"(",
"self",
",",
"num",
",",
"isslice",
"=",
"True",
")",
":",
"nrows",
"=",
"self",
".",
"_info",
"[",
"'nrows'",
"]",
"if",
"isslice",
":",
"# include the end",
"if",
"num",
"<",
"0",
":",
"num",
"=",
"nrows",
"+",
"(",
"1",
"+",
"num",
")",
"elif",
"num",
">",
"nrows",
":",
"num",
"=",
"nrows",
"else",
":",
"# single element",
"if",
"num",
"<",
"0",
":",
"num",
"=",
"nrows",
"+",
"num",
"elif",
"num",
">",
"(",
"nrows",
"-",
"1",
")",
":",
"num",
"=",
"nrows",
"-",
"1",
"return",
"num"
] | Ensure the input is within range.
If el=True, then don't treat as a slice element | [
"Ensure",
"the",
"input",
"is",
"within",
"range",
"."
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L1258-L1279 |
2,146 | esheldon/fitsio | fitsio/hdu/table.py | TableHDU._rescale_and_convert_field_inplace | def _rescale_and_convert_field_inplace(self, array, name, scale, zero):
"""
Apply fits scalings. Also, convert bool to proper
numpy boolean values
"""
self._rescale_array(array[name], scale, zero)
if array[name].dtype == numpy.bool:
array[name] = self._convert_bool_array(array[name])
return array | python | def _rescale_and_convert_field_inplace(self, array, name, scale, zero):
"""
Apply fits scalings. Also, convert bool to proper
numpy boolean values
"""
self._rescale_array(array[name], scale, zero)
if array[name].dtype == numpy.bool:
array[name] = self._convert_bool_array(array[name])
return array | [
"def",
"_rescale_and_convert_field_inplace",
"(",
"self",
",",
"array",
",",
"name",
",",
"scale",
",",
"zero",
")",
":",
"self",
".",
"_rescale_array",
"(",
"array",
"[",
"name",
"]",
",",
"scale",
",",
"zero",
")",
"if",
"array",
"[",
"name",
"]",
".",
"dtype",
"==",
"numpy",
".",
"bool",
":",
"array",
"[",
"name",
"]",
"=",
"self",
".",
"_convert_bool_array",
"(",
"array",
"[",
"name",
"]",
")",
"return",
"array"
] | Apply fits scalings. Also, convert bool to proper
numpy boolean values | [
"Apply",
"fits",
"scalings",
".",
"Also",
"convert",
"bool",
"to",
"proper",
"numpy",
"boolean",
"values"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L1281-L1289 |
2,147 | esheldon/fitsio | fitsio/hdu/table.py | TableHDU._rescale_array | def _rescale_array(self, array, scale, zero):
"""
Scale the input array
"""
if scale != 1.0:
sval = numpy.array(scale, dtype=array.dtype)
array *= sval
if zero != 0.0:
zval = numpy.array(zero, dtype=array.dtype)
array += zval | python | def _rescale_array(self, array, scale, zero):
"""
Scale the input array
"""
if scale != 1.0:
sval = numpy.array(scale, dtype=array.dtype)
array *= sval
if zero != 0.0:
zval = numpy.array(zero, dtype=array.dtype)
array += zval | [
"def",
"_rescale_array",
"(",
"self",
",",
"array",
",",
"scale",
",",
"zero",
")",
":",
"if",
"scale",
"!=",
"1.0",
":",
"sval",
"=",
"numpy",
".",
"array",
"(",
"scale",
",",
"dtype",
"=",
"array",
".",
"dtype",
")",
"array",
"*=",
"sval",
"if",
"zero",
"!=",
"0.0",
":",
"zval",
"=",
"numpy",
".",
"array",
"(",
"zero",
",",
"dtype",
"=",
"array",
".",
"dtype",
")",
"array",
"+=",
"zval"
] | Scale the input array | [
"Scale",
"the",
"input",
"array"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L1302-L1311 |
2,148 | esheldon/fitsio | fitsio/hdu/table.py | TableHDU._maybe_trim_strings | def _maybe_trim_strings(self, array, **keys):
"""
if requested, trim trailing white space from
all string fields in the input array
"""
trim_strings = keys.get('trim_strings', False)
if self.trim_strings or trim_strings:
_trim_strings(array) | python | def _maybe_trim_strings(self, array, **keys):
"""
if requested, trim trailing white space from
all string fields in the input array
"""
trim_strings = keys.get('trim_strings', False)
if self.trim_strings or trim_strings:
_trim_strings(array) | [
"def",
"_maybe_trim_strings",
"(",
"self",
",",
"array",
",",
"*",
"*",
"keys",
")",
":",
"trim_strings",
"=",
"keys",
".",
"get",
"(",
"'trim_strings'",
",",
"False",
")",
"if",
"self",
".",
"trim_strings",
"or",
"trim_strings",
":",
"_trim_strings",
"(",
"array",
")"
] | if requested, trim trailing white space from
all string fields in the input array | [
"if",
"requested",
"trim",
"trailing",
"white",
"space",
"from",
"all",
"string",
"fields",
"in",
"the",
"input",
"array"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L1313-L1320 |
2,149 | esheldon/fitsio | fitsio/hdu/table.py | TableHDU._get_tbl_numpy_dtype | def _get_tbl_numpy_dtype(self, colnum, include_endianness=True):
"""
Get numpy type for the input column
"""
table_type = self._info['hdutype']
table_type_string = _hdu_type_map[table_type]
try:
ftype = self._info['colinfo'][colnum]['eqtype']
if table_type == ASCII_TBL:
npy_type = _table_fits2npy_ascii[abs(ftype)]
else:
npy_type = _table_fits2npy[abs(ftype)]
except KeyError:
raise KeyError("unsupported %s fits data "
"type: %d" % (table_type_string, ftype))
istbit = False
if (ftype == 1):
istbit = True
isvar = False
if ftype < 0:
isvar = True
if include_endianness:
# if binary we will read the big endian bytes directly,
# if ascii we read into native byte order
if table_type == ASCII_TBL:
addstr = ''
else:
addstr = '>'
if npy_type not in ['u1', 'i1', 'S', 'U']:
npy_type = addstr+npy_type
if npy_type == 'S':
width = self._info['colinfo'][colnum]['width']
npy_type = 'S%d' % width
elif npy_type == 'U':
width = self._info['colinfo'][colnum]['width']
npy_type = 'U%d' % width
return npy_type, isvar, istbit | python | def _get_tbl_numpy_dtype(self, colnum, include_endianness=True):
"""
Get numpy type for the input column
"""
table_type = self._info['hdutype']
table_type_string = _hdu_type_map[table_type]
try:
ftype = self._info['colinfo'][colnum]['eqtype']
if table_type == ASCII_TBL:
npy_type = _table_fits2npy_ascii[abs(ftype)]
else:
npy_type = _table_fits2npy[abs(ftype)]
except KeyError:
raise KeyError("unsupported %s fits data "
"type: %d" % (table_type_string, ftype))
istbit = False
if (ftype == 1):
istbit = True
isvar = False
if ftype < 0:
isvar = True
if include_endianness:
# if binary we will read the big endian bytes directly,
# if ascii we read into native byte order
if table_type == ASCII_TBL:
addstr = ''
else:
addstr = '>'
if npy_type not in ['u1', 'i1', 'S', 'U']:
npy_type = addstr+npy_type
if npy_type == 'S':
width = self._info['colinfo'][colnum]['width']
npy_type = 'S%d' % width
elif npy_type == 'U':
width = self._info['colinfo'][colnum]['width']
npy_type = 'U%d' % width
return npy_type, isvar, istbit | [
"def",
"_get_tbl_numpy_dtype",
"(",
"self",
",",
"colnum",
",",
"include_endianness",
"=",
"True",
")",
":",
"table_type",
"=",
"self",
".",
"_info",
"[",
"'hdutype'",
"]",
"table_type_string",
"=",
"_hdu_type_map",
"[",
"table_type",
"]",
"try",
":",
"ftype",
"=",
"self",
".",
"_info",
"[",
"'colinfo'",
"]",
"[",
"colnum",
"]",
"[",
"'eqtype'",
"]",
"if",
"table_type",
"==",
"ASCII_TBL",
":",
"npy_type",
"=",
"_table_fits2npy_ascii",
"[",
"abs",
"(",
"ftype",
")",
"]",
"else",
":",
"npy_type",
"=",
"_table_fits2npy",
"[",
"abs",
"(",
"ftype",
")",
"]",
"except",
"KeyError",
":",
"raise",
"KeyError",
"(",
"\"unsupported %s fits data \"",
"\"type: %d\"",
"%",
"(",
"table_type_string",
",",
"ftype",
")",
")",
"istbit",
"=",
"False",
"if",
"(",
"ftype",
"==",
"1",
")",
":",
"istbit",
"=",
"True",
"isvar",
"=",
"False",
"if",
"ftype",
"<",
"0",
":",
"isvar",
"=",
"True",
"if",
"include_endianness",
":",
"# if binary we will read the big endian bytes directly,",
"# if ascii we read into native byte order",
"if",
"table_type",
"==",
"ASCII_TBL",
":",
"addstr",
"=",
"''",
"else",
":",
"addstr",
"=",
"'>'",
"if",
"npy_type",
"not",
"in",
"[",
"'u1'",
",",
"'i1'",
",",
"'S'",
",",
"'U'",
"]",
":",
"npy_type",
"=",
"addstr",
"+",
"npy_type",
"if",
"npy_type",
"==",
"'S'",
":",
"width",
"=",
"self",
".",
"_info",
"[",
"'colinfo'",
"]",
"[",
"colnum",
"]",
"[",
"'width'",
"]",
"npy_type",
"=",
"'S%d'",
"%",
"width",
"elif",
"npy_type",
"==",
"'U'",
":",
"width",
"=",
"self",
".",
"_info",
"[",
"'colinfo'",
"]",
"[",
"colnum",
"]",
"[",
"'width'",
"]",
"npy_type",
"=",
"'U%d'",
"%",
"width",
"return",
"npy_type",
",",
"isvar",
",",
"istbit"
] | Get numpy type for the input column | [
"Get",
"numpy",
"type",
"for",
"the",
"input",
"column"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L1353-L1393 |
2,150 | esheldon/fitsio | fitsio/hdu/table.py | TableHDU._process_args_as_rows_or_columns | def _process_args_as_rows_or_columns(self, arg, unpack=False):
"""
We must be able to interpret the args as as either a column name or
row number, or sequences thereof. Numpy arrays and slices are also
fine.
Examples:
'field'
35
[35,55,86]
['f1',f2',...]
Can also be tuples or arrays.
"""
flags = set()
#
if isinstance(arg, (tuple, list, numpy.ndarray)):
# a sequence was entered
if isstring(arg[0]):
result = arg
else:
result = arg
flags.add('isrows')
elif isstring(arg):
# a single string was entered
result = arg
elif isinstance(arg, slice):
if unpack:
flags.add('isrows')
result = self._slice2rows(arg.start, arg.stop, arg.step)
else:
flags.add('isrows')
flags.add('isslice')
result = self._process_slice(arg)
else:
# a single object was entered.
# Probably should apply some more checking on this
result = arg
flags.add('isrows')
if numpy.ndim(arg) == 0:
flags.add('isscalar')
return result, flags | python | def _process_args_as_rows_or_columns(self, arg, unpack=False):
"""
We must be able to interpret the args as as either a column name or
row number, or sequences thereof. Numpy arrays and slices are also
fine.
Examples:
'field'
35
[35,55,86]
['f1',f2',...]
Can also be tuples or arrays.
"""
flags = set()
#
if isinstance(arg, (tuple, list, numpy.ndarray)):
# a sequence was entered
if isstring(arg[0]):
result = arg
else:
result = arg
flags.add('isrows')
elif isstring(arg):
# a single string was entered
result = arg
elif isinstance(arg, slice):
if unpack:
flags.add('isrows')
result = self._slice2rows(arg.start, arg.stop, arg.step)
else:
flags.add('isrows')
flags.add('isslice')
result = self._process_slice(arg)
else:
# a single object was entered.
# Probably should apply some more checking on this
result = arg
flags.add('isrows')
if numpy.ndim(arg) == 0:
flags.add('isscalar')
return result, flags | [
"def",
"_process_args_as_rows_or_columns",
"(",
"self",
",",
"arg",
",",
"unpack",
"=",
"False",
")",
":",
"flags",
"=",
"set",
"(",
")",
"#",
"if",
"isinstance",
"(",
"arg",
",",
"(",
"tuple",
",",
"list",
",",
"numpy",
".",
"ndarray",
")",
")",
":",
"# a sequence was entered",
"if",
"isstring",
"(",
"arg",
"[",
"0",
"]",
")",
":",
"result",
"=",
"arg",
"else",
":",
"result",
"=",
"arg",
"flags",
".",
"add",
"(",
"'isrows'",
")",
"elif",
"isstring",
"(",
"arg",
")",
":",
"# a single string was entered",
"result",
"=",
"arg",
"elif",
"isinstance",
"(",
"arg",
",",
"slice",
")",
":",
"if",
"unpack",
":",
"flags",
".",
"add",
"(",
"'isrows'",
")",
"result",
"=",
"self",
".",
"_slice2rows",
"(",
"arg",
".",
"start",
",",
"arg",
".",
"stop",
",",
"arg",
".",
"step",
")",
"else",
":",
"flags",
".",
"add",
"(",
"'isrows'",
")",
"flags",
".",
"add",
"(",
"'isslice'",
")",
"result",
"=",
"self",
".",
"_process_slice",
"(",
"arg",
")",
"else",
":",
"# a single object was entered.",
"# Probably should apply some more checking on this",
"result",
"=",
"arg",
"flags",
".",
"add",
"(",
"'isrows'",
")",
"if",
"numpy",
".",
"ndim",
"(",
"arg",
")",
"==",
"0",
":",
"flags",
".",
"add",
"(",
"'isscalar'",
")",
"return",
"result",
",",
"flags"
] | We must be able to interpret the args as as either a column name or
row number, or sequences thereof. Numpy arrays and slices are also
fine.
Examples:
'field'
35
[35,55,86]
['f1',f2',...]
Can also be tuples or arrays. | [
"We",
"must",
"be",
"able",
"to",
"interpret",
"the",
"args",
"as",
"as",
"either",
"a",
"column",
"name",
"or",
"row",
"number",
"or",
"sequences",
"thereof",
".",
"Numpy",
"arrays",
"and",
"slices",
"are",
"also",
"fine",
"."
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L1395-L1437 |
2,151 | esheldon/fitsio | fitsio/hdu/table.py | TableHDU._extract_colnums | def _extract_colnums(self, columns=None):
"""
Extract an array of columns from the input
"""
if columns is None:
return numpy.arange(self._ncol, dtype='i8')
if not isinstance(columns, (tuple, list, numpy.ndarray)):
# is a scalar
return self._extract_colnum(columns)
colnums = numpy.zeros(len(columns), dtype='i8')
for i in xrange(colnums.size):
colnums[i] = self._extract_colnum(columns[i])
# returns unique sorted
colnums = numpy.unique(colnums)
return colnums | python | def _extract_colnums(self, columns=None):
"""
Extract an array of columns from the input
"""
if columns is None:
return numpy.arange(self._ncol, dtype='i8')
if not isinstance(columns, (tuple, list, numpy.ndarray)):
# is a scalar
return self._extract_colnum(columns)
colnums = numpy.zeros(len(columns), dtype='i8')
for i in xrange(colnums.size):
colnums[i] = self._extract_colnum(columns[i])
# returns unique sorted
colnums = numpy.unique(colnums)
return colnums | [
"def",
"_extract_colnums",
"(",
"self",
",",
"columns",
"=",
"None",
")",
":",
"if",
"columns",
"is",
"None",
":",
"return",
"numpy",
".",
"arange",
"(",
"self",
".",
"_ncol",
",",
"dtype",
"=",
"'i8'",
")",
"if",
"not",
"isinstance",
"(",
"columns",
",",
"(",
"tuple",
",",
"list",
",",
"numpy",
".",
"ndarray",
")",
")",
":",
"# is a scalar",
"return",
"self",
".",
"_extract_colnum",
"(",
"columns",
")",
"colnums",
"=",
"numpy",
".",
"zeros",
"(",
"len",
"(",
"columns",
")",
",",
"dtype",
"=",
"'i8'",
")",
"for",
"i",
"in",
"xrange",
"(",
"colnums",
".",
"size",
")",
":",
"colnums",
"[",
"i",
"]",
"=",
"self",
".",
"_extract_colnum",
"(",
"columns",
"[",
"i",
"]",
")",
"# returns unique sorted",
"colnums",
"=",
"numpy",
".",
"unique",
"(",
"colnums",
")",
"return",
"colnums"
] | Extract an array of columns from the input | [
"Extract",
"an",
"array",
"of",
"columns",
"from",
"the",
"input"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L1494-L1511 |
2,152 | esheldon/fitsio | fitsio/hdu/table.py | TableHDU._extract_colnum | def _extract_colnum(self, col):
"""
Get the column number for the input column
"""
if isinteger(col):
colnum = col
if (colnum < 0) or (colnum > (self._ncol-1)):
raise ValueError(
"column number should be in [0,%d]" % (0, self._ncol-1))
else:
colstr = mks(col)
try:
if self.case_sensitive:
mess = "column name '%s' not found (case sensitive)" % col
colnum = self._colnames.index(colstr)
else:
mess \
= "column name '%s' not found (case insensitive)" % col
colnum = self._colnames_lower.index(colstr.lower())
except ValueError:
raise ValueError(mess)
return int(colnum) | python | def _extract_colnum(self, col):
"""
Get the column number for the input column
"""
if isinteger(col):
colnum = col
if (colnum < 0) or (colnum > (self._ncol-1)):
raise ValueError(
"column number should be in [0,%d]" % (0, self._ncol-1))
else:
colstr = mks(col)
try:
if self.case_sensitive:
mess = "column name '%s' not found (case sensitive)" % col
colnum = self._colnames.index(colstr)
else:
mess \
= "column name '%s' not found (case insensitive)" % col
colnum = self._colnames_lower.index(colstr.lower())
except ValueError:
raise ValueError(mess)
return int(colnum) | [
"def",
"_extract_colnum",
"(",
"self",
",",
"col",
")",
":",
"if",
"isinteger",
"(",
"col",
")",
":",
"colnum",
"=",
"col",
"if",
"(",
"colnum",
"<",
"0",
")",
"or",
"(",
"colnum",
">",
"(",
"self",
".",
"_ncol",
"-",
"1",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"column number should be in [0,%d]\"",
"%",
"(",
"0",
",",
"self",
".",
"_ncol",
"-",
"1",
")",
")",
"else",
":",
"colstr",
"=",
"mks",
"(",
"col",
")",
"try",
":",
"if",
"self",
".",
"case_sensitive",
":",
"mess",
"=",
"\"column name '%s' not found (case sensitive)\"",
"%",
"col",
"colnum",
"=",
"self",
".",
"_colnames",
".",
"index",
"(",
"colstr",
")",
"else",
":",
"mess",
"=",
"\"column name '%s' not found (case insensitive)\"",
"%",
"col",
"colnum",
"=",
"self",
".",
"_colnames_lower",
".",
"index",
"(",
"colstr",
".",
"lower",
"(",
")",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"mess",
")",
"return",
"int",
"(",
"colnum",
")"
] | Get the column number for the input column | [
"Get",
"the",
"column",
"number",
"for",
"the",
"input",
"column"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L1513-L1535 |
2,153 | esheldon/fitsio | fitsio/hdu/table.py | TableHDU._update_info | def _update_info(self):
"""
Call parent method and make sure this is in fact a
table HDU. Set some convenience data.
"""
super(TableHDU, self)._update_info()
if self._info['hdutype'] == IMAGE_HDU:
mess = "Extension %s is not a Table HDU" % self.ext
raise ValueError(mess)
if 'colinfo' in self._info:
self._colnames = [i['name'] for i in self._info['colinfo']]
self._colnames_lower = [
i['name'].lower() for i in self._info['colinfo']]
self._ncol = len(self._colnames) | python | def _update_info(self):
"""
Call parent method and make sure this is in fact a
table HDU. Set some convenience data.
"""
super(TableHDU, self)._update_info()
if self._info['hdutype'] == IMAGE_HDU:
mess = "Extension %s is not a Table HDU" % self.ext
raise ValueError(mess)
if 'colinfo' in self._info:
self._colnames = [i['name'] for i in self._info['colinfo']]
self._colnames_lower = [
i['name'].lower() for i in self._info['colinfo']]
self._ncol = len(self._colnames) | [
"def",
"_update_info",
"(",
"self",
")",
":",
"super",
"(",
"TableHDU",
",",
"self",
")",
".",
"_update_info",
"(",
")",
"if",
"self",
".",
"_info",
"[",
"'hdutype'",
"]",
"==",
"IMAGE_HDU",
":",
"mess",
"=",
"\"Extension %s is not a Table HDU\"",
"%",
"self",
".",
"ext",
"raise",
"ValueError",
"(",
"mess",
")",
"if",
"'colinfo'",
"in",
"self",
".",
"_info",
":",
"self",
".",
"_colnames",
"=",
"[",
"i",
"[",
"'name'",
"]",
"for",
"i",
"in",
"self",
".",
"_info",
"[",
"'colinfo'",
"]",
"]",
"self",
".",
"_colnames_lower",
"=",
"[",
"i",
"[",
"'name'",
"]",
".",
"lower",
"(",
")",
"for",
"i",
"in",
"self",
".",
"_info",
"[",
"'colinfo'",
"]",
"]",
"self",
".",
"_ncol",
"=",
"len",
"(",
"self",
".",
"_colnames",
")"
] | Call parent method and make sure this is in fact a
table HDU. Set some convenience data. | [
"Call",
"parent",
"method",
"and",
"make",
"sure",
"this",
"is",
"in",
"fact",
"a",
"table",
"HDU",
".",
"Set",
"some",
"convenience",
"data",
"."
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L1537-L1550 |
2,154 | esheldon/fitsio | fitsio/hdu/table.py | TableHDU._get_next_buffered_row | def _get_next_buffered_row(self):
"""
Get the next row for iteration.
"""
if self._iter_row == self._iter_nrows:
raise StopIteration
if self._row_buffer_index >= self._iter_row_buffer:
self._buffer_iter_rows(self._iter_row)
data = self._row_buffer[self._row_buffer_index]
self._iter_row += 1
self._row_buffer_index += 1
return data | python | def _get_next_buffered_row(self):
"""
Get the next row for iteration.
"""
if self._iter_row == self._iter_nrows:
raise StopIteration
if self._row_buffer_index >= self._iter_row_buffer:
self._buffer_iter_rows(self._iter_row)
data = self._row_buffer[self._row_buffer_index]
self._iter_row += 1
self._row_buffer_index += 1
return data | [
"def",
"_get_next_buffered_row",
"(",
"self",
")",
":",
"if",
"self",
".",
"_iter_row",
"==",
"self",
".",
"_iter_nrows",
":",
"raise",
"StopIteration",
"if",
"self",
".",
"_row_buffer_index",
">=",
"self",
".",
"_iter_row_buffer",
":",
"self",
".",
"_buffer_iter_rows",
"(",
"self",
".",
"_iter_row",
")",
"data",
"=",
"self",
".",
"_row_buffer",
"[",
"self",
".",
"_row_buffer_index",
"]",
"self",
".",
"_iter_row",
"+=",
"1",
"self",
".",
"_row_buffer_index",
"+=",
"1",
"return",
"data"
] | Get the next row for iteration. | [
"Get",
"the",
"next",
"row",
"for",
"iteration",
"."
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L1640-L1653 |
2,155 | esheldon/fitsio | fitsio/hdu/table.py | TableHDU._buffer_iter_rows | def _buffer_iter_rows(self, start):
"""
Read in the buffer for iteration
"""
self._row_buffer = self[start:start+self._iter_row_buffer]
# start back at the front of the buffer
self._row_buffer_index = 0 | python | def _buffer_iter_rows(self, start):
"""
Read in the buffer for iteration
"""
self._row_buffer = self[start:start+self._iter_row_buffer]
# start back at the front of the buffer
self._row_buffer_index = 0 | [
"def",
"_buffer_iter_rows",
"(",
"self",
",",
"start",
")",
":",
"self",
".",
"_row_buffer",
"=",
"self",
"[",
"start",
":",
"start",
"+",
"self",
".",
"_iter_row_buffer",
"]",
"# start back at the front of the buffer",
"self",
".",
"_row_buffer_index",
"=",
"0"
] | Read in the buffer for iteration | [
"Read",
"in",
"the",
"buffer",
"for",
"iteration"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L1655-L1662 |
2,156 | esheldon/fitsio | fitsio/hdu/table.py | AsciiTableHDU.read | def read(self, **keys):
"""
read a data from an ascii table HDU
By default, all rows are read. Send rows= to select subsets of the
data. Table data are read into a recarray for multiple columns,
plain array for a single column.
parameters
----------
columns: list/array
An optional set of columns to read from table HDUs. Can be string
or number. If a sequence, a recarray is always returned. If a
scalar, an ordinary array is returned.
rows: list/array, optional
An optional list of rows to read from table HDUS. Default is to
read all.
vstorage: string, optional
Over-ride the default method to store variable length columns. Can
be 'fixed' or 'object'. See docs on fitsio.FITS for details.
lower: bool, optional
If True, force all columns names to lower case in output. Will over
ride the lower= keyword from construction.
upper: bool, optional
If True, force all columns names to upper case in output. Will over
ride the lower= keyword from construction.
"""
rows = keys.get('rows', None)
columns = keys.get('columns', None)
# if columns is None, returns all. Guaranteed to be unique and sorted
colnums = self._extract_colnums(columns)
if isinstance(colnums, int):
# scalar sent, don't read as a recarray
return self.read_column(columns, **keys)
rows = self._extract_rows(rows)
if rows is None:
nrows = self._info['nrows']
else:
nrows = rows.size
# if rows is None still returns None, and is correctly interpreted
# by the reader to mean all
rows = self._extract_rows(rows)
# this is the full dtype for all columns
dtype, offsets, isvar = self.get_rec_dtype(colnums=colnums, **keys)
array = numpy.zeros(nrows, dtype=dtype)
# note reading into existing data
wnotvar, = numpy.where(isvar == False) # noqa
if wnotvar.size > 0:
for i in wnotvar:
colnum = colnums[i]
name = array.dtype.names[i]
a = array[name].copy()
self._FITS.read_column(self._ext+1, colnum+1, a, rows)
array[name] = a
del a
array = self._maybe_decode_fits_ascii_strings_to_unicode_py3(array)
wvar, = numpy.where(isvar == True) # noqa
if wvar.size > 0:
for i in wvar:
colnum = colnums[i]
name = array.dtype.names[i]
dlist = self._FITS.read_var_column_as_list(
self._ext+1, colnum+1, rows)
if (isinstance(dlist[0], str) or
(IS_PY3 and isinstance(dlist[0], bytes))):
is_string = True
else:
is_string = False
if array[name].dtype.descr[0][1][1] == 'O':
# storing in object array
# get references to each, no copy made
for irow, item in enumerate(dlist):
if IS_PY3 and isinstance(item, bytes):
item = item.decode('ascii')
array[name][irow] = item
else:
for irow, item in enumerate(dlist):
if IS_PY3 and isinstance(item, bytes):
item = item.decode('ascii')
if is_string:
array[name][irow] = item
else:
ncopy = len(item)
array[name][irow][0:ncopy] = item[:]
lower = keys.get('lower', False)
upper = keys.get('upper', False)
if self.lower or lower:
_names_to_lower_if_recarray(array)
elif self.upper or upper:
_names_to_upper_if_recarray(array)
self._maybe_trim_strings(array, **keys)
return array | python | def read(self, **keys):
"""
read a data from an ascii table HDU
By default, all rows are read. Send rows= to select subsets of the
data. Table data are read into a recarray for multiple columns,
plain array for a single column.
parameters
----------
columns: list/array
An optional set of columns to read from table HDUs. Can be string
or number. If a sequence, a recarray is always returned. If a
scalar, an ordinary array is returned.
rows: list/array, optional
An optional list of rows to read from table HDUS. Default is to
read all.
vstorage: string, optional
Over-ride the default method to store variable length columns. Can
be 'fixed' or 'object'. See docs on fitsio.FITS for details.
lower: bool, optional
If True, force all columns names to lower case in output. Will over
ride the lower= keyword from construction.
upper: bool, optional
If True, force all columns names to upper case in output. Will over
ride the lower= keyword from construction.
"""
rows = keys.get('rows', None)
columns = keys.get('columns', None)
# if columns is None, returns all. Guaranteed to be unique and sorted
colnums = self._extract_colnums(columns)
if isinstance(colnums, int):
# scalar sent, don't read as a recarray
return self.read_column(columns, **keys)
rows = self._extract_rows(rows)
if rows is None:
nrows = self._info['nrows']
else:
nrows = rows.size
# if rows is None still returns None, and is correctly interpreted
# by the reader to mean all
rows = self._extract_rows(rows)
# this is the full dtype for all columns
dtype, offsets, isvar = self.get_rec_dtype(colnums=colnums, **keys)
array = numpy.zeros(nrows, dtype=dtype)
# note reading into existing data
wnotvar, = numpy.where(isvar == False) # noqa
if wnotvar.size > 0:
for i in wnotvar:
colnum = colnums[i]
name = array.dtype.names[i]
a = array[name].copy()
self._FITS.read_column(self._ext+1, colnum+1, a, rows)
array[name] = a
del a
array = self._maybe_decode_fits_ascii_strings_to_unicode_py3(array)
wvar, = numpy.where(isvar == True) # noqa
if wvar.size > 0:
for i in wvar:
colnum = colnums[i]
name = array.dtype.names[i]
dlist = self._FITS.read_var_column_as_list(
self._ext+1, colnum+1, rows)
if (isinstance(dlist[0], str) or
(IS_PY3 and isinstance(dlist[0], bytes))):
is_string = True
else:
is_string = False
if array[name].dtype.descr[0][1][1] == 'O':
# storing in object array
# get references to each, no copy made
for irow, item in enumerate(dlist):
if IS_PY3 and isinstance(item, bytes):
item = item.decode('ascii')
array[name][irow] = item
else:
for irow, item in enumerate(dlist):
if IS_PY3 and isinstance(item, bytes):
item = item.decode('ascii')
if is_string:
array[name][irow] = item
else:
ncopy = len(item)
array[name][irow][0:ncopy] = item[:]
lower = keys.get('lower', False)
upper = keys.get('upper', False)
if self.lower or lower:
_names_to_lower_if_recarray(array)
elif self.upper or upper:
_names_to_upper_if_recarray(array)
self._maybe_trim_strings(array, **keys)
return array | [
"def",
"read",
"(",
"self",
",",
"*",
"*",
"keys",
")",
":",
"rows",
"=",
"keys",
".",
"get",
"(",
"'rows'",
",",
"None",
")",
"columns",
"=",
"keys",
".",
"get",
"(",
"'columns'",
",",
"None",
")",
"# if columns is None, returns all. Guaranteed to be unique and sorted",
"colnums",
"=",
"self",
".",
"_extract_colnums",
"(",
"columns",
")",
"if",
"isinstance",
"(",
"colnums",
",",
"int",
")",
":",
"# scalar sent, don't read as a recarray",
"return",
"self",
".",
"read_column",
"(",
"columns",
",",
"*",
"*",
"keys",
")",
"rows",
"=",
"self",
".",
"_extract_rows",
"(",
"rows",
")",
"if",
"rows",
"is",
"None",
":",
"nrows",
"=",
"self",
".",
"_info",
"[",
"'nrows'",
"]",
"else",
":",
"nrows",
"=",
"rows",
".",
"size",
"# if rows is None still returns None, and is correctly interpreted",
"# by the reader to mean all",
"rows",
"=",
"self",
".",
"_extract_rows",
"(",
"rows",
")",
"# this is the full dtype for all columns",
"dtype",
",",
"offsets",
",",
"isvar",
"=",
"self",
".",
"get_rec_dtype",
"(",
"colnums",
"=",
"colnums",
",",
"*",
"*",
"keys",
")",
"array",
"=",
"numpy",
".",
"zeros",
"(",
"nrows",
",",
"dtype",
"=",
"dtype",
")",
"# note reading into existing data",
"wnotvar",
",",
"=",
"numpy",
".",
"where",
"(",
"isvar",
"==",
"False",
")",
"# noqa",
"if",
"wnotvar",
".",
"size",
">",
"0",
":",
"for",
"i",
"in",
"wnotvar",
":",
"colnum",
"=",
"colnums",
"[",
"i",
"]",
"name",
"=",
"array",
".",
"dtype",
".",
"names",
"[",
"i",
"]",
"a",
"=",
"array",
"[",
"name",
"]",
".",
"copy",
"(",
")",
"self",
".",
"_FITS",
".",
"read_column",
"(",
"self",
".",
"_ext",
"+",
"1",
",",
"colnum",
"+",
"1",
",",
"a",
",",
"rows",
")",
"array",
"[",
"name",
"]",
"=",
"a",
"del",
"a",
"array",
"=",
"self",
".",
"_maybe_decode_fits_ascii_strings_to_unicode_py3",
"(",
"array",
")",
"wvar",
",",
"=",
"numpy",
".",
"where",
"(",
"isvar",
"==",
"True",
")",
"# noqa",
"if",
"wvar",
".",
"size",
">",
"0",
":",
"for",
"i",
"in",
"wvar",
":",
"colnum",
"=",
"colnums",
"[",
"i",
"]",
"name",
"=",
"array",
".",
"dtype",
".",
"names",
"[",
"i",
"]",
"dlist",
"=",
"self",
".",
"_FITS",
".",
"read_var_column_as_list",
"(",
"self",
".",
"_ext",
"+",
"1",
",",
"colnum",
"+",
"1",
",",
"rows",
")",
"if",
"(",
"isinstance",
"(",
"dlist",
"[",
"0",
"]",
",",
"str",
")",
"or",
"(",
"IS_PY3",
"and",
"isinstance",
"(",
"dlist",
"[",
"0",
"]",
",",
"bytes",
")",
")",
")",
":",
"is_string",
"=",
"True",
"else",
":",
"is_string",
"=",
"False",
"if",
"array",
"[",
"name",
"]",
".",
"dtype",
".",
"descr",
"[",
"0",
"]",
"[",
"1",
"]",
"[",
"1",
"]",
"==",
"'O'",
":",
"# storing in object array",
"# get references to each, no copy made",
"for",
"irow",
",",
"item",
"in",
"enumerate",
"(",
"dlist",
")",
":",
"if",
"IS_PY3",
"and",
"isinstance",
"(",
"item",
",",
"bytes",
")",
":",
"item",
"=",
"item",
".",
"decode",
"(",
"'ascii'",
")",
"array",
"[",
"name",
"]",
"[",
"irow",
"]",
"=",
"item",
"else",
":",
"for",
"irow",
",",
"item",
"in",
"enumerate",
"(",
"dlist",
")",
":",
"if",
"IS_PY3",
"and",
"isinstance",
"(",
"item",
",",
"bytes",
")",
":",
"item",
"=",
"item",
".",
"decode",
"(",
"'ascii'",
")",
"if",
"is_string",
":",
"array",
"[",
"name",
"]",
"[",
"irow",
"]",
"=",
"item",
"else",
":",
"ncopy",
"=",
"len",
"(",
"item",
")",
"array",
"[",
"name",
"]",
"[",
"irow",
"]",
"[",
"0",
":",
"ncopy",
"]",
"=",
"item",
"[",
":",
"]",
"lower",
"=",
"keys",
".",
"get",
"(",
"'lower'",
",",
"False",
")",
"upper",
"=",
"keys",
".",
"get",
"(",
"'upper'",
",",
"False",
")",
"if",
"self",
".",
"lower",
"or",
"lower",
":",
"_names_to_lower_if_recarray",
"(",
"array",
")",
"elif",
"self",
".",
"upper",
"or",
"upper",
":",
"_names_to_upper_if_recarray",
"(",
"array",
")",
"self",
".",
"_maybe_trim_strings",
"(",
"array",
",",
"*",
"*",
"keys",
")",
"return",
"array"
] | read a data from an ascii table HDU
By default, all rows are read. Send rows= to select subsets of the
data. Table data are read into a recarray for multiple columns,
plain array for a single column.
parameters
----------
columns: list/array
An optional set of columns to read from table HDUs. Can be string
or number. If a sequence, a recarray is always returned. If a
scalar, an ordinary array is returned.
rows: list/array, optional
An optional list of rows to read from table HDUS. Default is to
read all.
vstorage: string, optional
Over-ride the default method to store variable length columns. Can
be 'fixed' or 'object'. See docs on fitsio.FITS for details.
lower: bool, optional
If True, force all columns names to lower case in output. Will over
ride the lower= keyword from construction.
upper: bool, optional
If True, force all columns names to upper case in output. Will over
ride the lower= keyword from construction. | [
"read",
"a",
"data",
"from",
"an",
"ascii",
"table",
"HDU"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L1713-L1816 |
2,157 | esheldon/fitsio | fitsio/hdu/table.py | TableColumnSubset.read | def read(self, **keys):
"""
Read the data from disk and return as a numpy array
"""
if self.is_scalar:
data = self.fitshdu.read_column(self.columns, **keys)
else:
c = keys.get('columns', None)
if c is None:
keys['columns'] = self.columns
data = self.fitshdu.read(**keys)
return data | python | def read(self, **keys):
"""
Read the data from disk and return as a numpy array
"""
if self.is_scalar:
data = self.fitshdu.read_column(self.columns, **keys)
else:
c = keys.get('columns', None)
if c is None:
keys['columns'] = self.columns
data = self.fitshdu.read(**keys)
return data | [
"def",
"read",
"(",
"self",
",",
"*",
"*",
"keys",
")",
":",
"if",
"self",
".",
"is_scalar",
":",
"data",
"=",
"self",
".",
"fitshdu",
".",
"read_column",
"(",
"self",
".",
"columns",
",",
"*",
"*",
"keys",
")",
"else",
":",
"c",
"=",
"keys",
".",
"get",
"(",
"'columns'",
",",
"None",
")",
"if",
"c",
"is",
"None",
":",
"keys",
"[",
"'columns'",
"]",
"=",
"self",
".",
"columns",
"data",
"=",
"self",
".",
"fitshdu",
".",
"read",
"(",
"*",
"*",
"keys",
")",
"return",
"data"
] | Read the data from disk and return as a numpy array | [
"Read",
"the",
"data",
"from",
"disk",
"and",
"return",
"as",
"a",
"numpy",
"array"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L1868-L1881 |
2,158 | esheldon/fitsio | fitsio/fitslib.py | read | def read(filename, ext=None, extver=None, **keys):
"""
Convenience function to read data from the specified FITS HDU
By default, all data are read. For tables, send columns= and rows= to
select subsets of the data. Table data are read into a recarray; use a
FITS object and read_column() to get a single column as an ordinary array.
For images, create a FITS object and use slice notation to read subsets.
Under the hood, a FITS object is constructed and data are read using
an associated FITSHDU object.
parameters
----------
filename: string
A filename.
ext: number or string, optional
The extension. Either the numerical extension from zero
or a string extension name. If not sent, data is read from
the first HDU that has data.
extver: integer, optional
FITS allows multiple extensions to have the same name (extname). These
extensions can optionally specify an EXTVER version number in the
header. Send extver= to select a particular version. If extver is not
sent, the first one will be selected. If ext is an integer, the extver
is ignored.
columns: list or array, optional
An optional set of columns to read from table HDUs. Default is to
read all. Can be string or number.
rows: optional
An optional list of rows to read from table HDUS. Default is to
read all.
header: bool, optional
If True, read the FITS header and return a tuple (data,header)
Default is False.
case_sensitive: bool, optional
Match column names and extension names with case-sensitivity. Default
is False.
lower: bool, optional
If True, force all columns names to lower case in output
upper: bool, optional
If True, force all columns names to upper case in output
vstorage: string, optional
Set the default method to store variable length columns. Can be
'fixed' or 'object'. See docs on fitsio.FITS for details.
"""
with FITS(filename, **keys) as fits:
header = keys.pop('header', False)
if ext is None:
for i in xrange(len(fits)):
if fits[i].has_data():
ext = i
break
if ext is None:
raise IOError("No extensions have data")
item = _make_item(ext, extver=extver)
data = fits[item].read(**keys)
if header:
h = fits[item].read_header()
return data, h
else:
return data | python | def read(filename, ext=None, extver=None, **keys):
"""
Convenience function to read data from the specified FITS HDU
By default, all data are read. For tables, send columns= and rows= to
select subsets of the data. Table data are read into a recarray; use a
FITS object and read_column() to get a single column as an ordinary array.
For images, create a FITS object and use slice notation to read subsets.
Under the hood, a FITS object is constructed and data are read using
an associated FITSHDU object.
parameters
----------
filename: string
A filename.
ext: number or string, optional
The extension. Either the numerical extension from zero
or a string extension name. If not sent, data is read from
the first HDU that has data.
extver: integer, optional
FITS allows multiple extensions to have the same name (extname). These
extensions can optionally specify an EXTVER version number in the
header. Send extver= to select a particular version. If extver is not
sent, the first one will be selected. If ext is an integer, the extver
is ignored.
columns: list or array, optional
An optional set of columns to read from table HDUs. Default is to
read all. Can be string or number.
rows: optional
An optional list of rows to read from table HDUS. Default is to
read all.
header: bool, optional
If True, read the FITS header and return a tuple (data,header)
Default is False.
case_sensitive: bool, optional
Match column names and extension names with case-sensitivity. Default
is False.
lower: bool, optional
If True, force all columns names to lower case in output
upper: bool, optional
If True, force all columns names to upper case in output
vstorage: string, optional
Set the default method to store variable length columns. Can be
'fixed' or 'object'. See docs on fitsio.FITS for details.
"""
with FITS(filename, **keys) as fits:
header = keys.pop('header', False)
if ext is None:
for i in xrange(len(fits)):
if fits[i].has_data():
ext = i
break
if ext is None:
raise IOError("No extensions have data")
item = _make_item(ext, extver=extver)
data = fits[item].read(**keys)
if header:
h = fits[item].read_header()
return data, h
else:
return data | [
"def",
"read",
"(",
"filename",
",",
"ext",
"=",
"None",
",",
"extver",
"=",
"None",
",",
"*",
"*",
"keys",
")",
":",
"with",
"FITS",
"(",
"filename",
",",
"*",
"*",
"keys",
")",
"as",
"fits",
":",
"header",
"=",
"keys",
".",
"pop",
"(",
"'header'",
",",
"False",
")",
"if",
"ext",
"is",
"None",
":",
"for",
"i",
"in",
"xrange",
"(",
"len",
"(",
"fits",
")",
")",
":",
"if",
"fits",
"[",
"i",
"]",
".",
"has_data",
"(",
")",
":",
"ext",
"=",
"i",
"break",
"if",
"ext",
"is",
"None",
":",
"raise",
"IOError",
"(",
"\"No extensions have data\"",
")",
"item",
"=",
"_make_item",
"(",
"ext",
",",
"extver",
"=",
"extver",
")",
"data",
"=",
"fits",
"[",
"item",
"]",
".",
"read",
"(",
"*",
"*",
"keys",
")",
"if",
"header",
":",
"h",
"=",
"fits",
"[",
"item",
"]",
".",
"read_header",
"(",
")",
"return",
"data",
",",
"h",
"else",
":",
"return",
"data"
] | Convenience function to read data from the specified FITS HDU
By default, all data are read. For tables, send columns= and rows= to
select subsets of the data. Table data are read into a recarray; use a
FITS object and read_column() to get a single column as an ordinary array.
For images, create a FITS object and use slice notation to read subsets.
Under the hood, a FITS object is constructed and data are read using
an associated FITSHDU object.
parameters
----------
filename: string
A filename.
ext: number or string, optional
The extension. Either the numerical extension from zero
or a string extension name. If not sent, data is read from
the first HDU that has data.
extver: integer, optional
FITS allows multiple extensions to have the same name (extname). These
extensions can optionally specify an EXTVER version number in the
header. Send extver= to select a particular version. If extver is not
sent, the first one will be selected. If ext is an integer, the extver
is ignored.
columns: list or array, optional
An optional set of columns to read from table HDUs. Default is to
read all. Can be string or number.
rows: optional
An optional list of rows to read from table HDUS. Default is to
read all.
header: bool, optional
If True, read the FITS header and return a tuple (data,header)
Default is False.
case_sensitive: bool, optional
Match column names and extension names with case-sensitivity. Default
is False.
lower: bool, optional
If True, force all columns names to lower case in output
upper: bool, optional
If True, force all columns names to upper case in output
vstorage: string, optional
Set the default method to store variable length columns. Can be
'fixed' or 'object'. See docs on fitsio.FITS for details. | [
"Convenience",
"function",
"to",
"read",
"data",
"from",
"the",
"specified",
"FITS",
"HDU"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/fitslib.py#L51-L117 |
2,159 | esheldon/fitsio | fitsio/fitslib.py | read_header | def read_header(filename, ext=0, extver=None, case_sensitive=False, **keys):
"""
Convenience function to read the header from the specified FITS HDU
The FITSHDR allows access to the values and comments by name and
number.
parameters
----------
filename: string
A filename.
ext: number or string, optional
The extension. Either the numerical extension from zero
or a string extension name. Default read primary header.
extver: integer, optional
FITS allows multiple extensions to have the same name (extname). These
extensions can optionally specify an EXTVER version number in the
header. Send extver= to select a particular version. If extver is not
sent, the first one will be selected. If ext is an integer, the extver
is ignored.
case_sensitive: bool, optional
Match extension names with case-sensitivity. Default is False.
"""
dont_create = 0
try:
hdunum = ext+1
except TypeError:
hdunum = None
_fits = _fitsio_wrap.FITS(filename, READONLY, dont_create)
if hdunum is None:
extname = mks(ext)
if extver is None:
extver_num = 0
else:
extver_num = extver
if not case_sensitive:
# the builtin movnam_hdu is not case sensitive
hdunum = _fits.movnam_hdu(ANY_HDU, extname, extver_num)
else:
# for case sensitivity we'll need to run through
# all the hdus
found = False
current_ext = 0
while True:
hdunum = current_ext+1
try:
hdu_type = _fits.movabs_hdu(hdunum) # noqa - not used
name, vers = _fits.get_hdu_name_version(hdunum)
if name == extname:
if extver is None:
# take the first match
found = True
break
else:
if extver_num == vers:
found = True
break
except OSError:
break
current_ext += 1
if not found:
raise IOError(
'hdu not found: %s (extver %s)' % (extname, extver))
return FITSHDR(_fits.read_header(hdunum)) | python | def read_header(filename, ext=0, extver=None, case_sensitive=False, **keys):
"""
Convenience function to read the header from the specified FITS HDU
The FITSHDR allows access to the values and comments by name and
number.
parameters
----------
filename: string
A filename.
ext: number or string, optional
The extension. Either the numerical extension from zero
or a string extension name. Default read primary header.
extver: integer, optional
FITS allows multiple extensions to have the same name (extname). These
extensions can optionally specify an EXTVER version number in the
header. Send extver= to select a particular version. If extver is not
sent, the first one will be selected. If ext is an integer, the extver
is ignored.
case_sensitive: bool, optional
Match extension names with case-sensitivity. Default is False.
"""
dont_create = 0
try:
hdunum = ext+1
except TypeError:
hdunum = None
_fits = _fitsio_wrap.FITS(filename, READONLY, dont_create)
if hdunum is None:
extname = mks(ext)
if extver is None:
extver_num = 0
else:
extver_num = extver
if not case_sensitive:
# the builtin movnam_hdu is not case sensitive
hdunum = _fits.movnam_hdu(ANY_HDU, extname, extver_num)
else:
# for case sensitivity we'll need to run through
# all the hdus
found = False
current_ext = 0
while True:
hdunum = current_ext+1
try:
hdu_type = _fits.movabs_hdu(hdunum) # noqa - not used
name, vers = _fits.get_hdu_name_version(hdunum)
if name == extname:
if extver is None:
# take the first match
found = True
break
else:
if extver_num == vers:
found = True
break
except OSError:
break
current_ext += 1
if not found:
raise IOError(
'hdu not found: %s (extver %s)' % (extname, extver))
return FITSHDR(_fits.read_header(hdunum)) | [
"def",
"read_header",
"(",
"filename",
",",
"ext",
"=",
"0",
",",
"extver",
"=",
"None",
",",
"case_sensitive",
"=",
"False",
",",
"*",
"*",
"keys",
")",
":",
"dont_create",
"=",
"0",
"try",
":",
"hdunum",
"=",
"ext",
"+",
"1",
"except",
"TypeError",
":",
"hdunum",
"=",
"None",
"_fits",
"=",
"_fitsio_wrap",
".",
"FITS",
"(",
"filename",
",",
"READONLY",
",",
"dont_create",
")",
"if",
"hdunum",
"is",
"None",
":",
"extname",
"=",
"mks",
"(",
"ext",
")",
"if",
"extver",
"is",
"None",
":",
"extver_num",
"=",
"0",
"else",
":",
"extver_num",
"=",
"extver",
"if",
"not",
"case_sensitive",
":",
"# the builtin movnam_hdu is not case sensitive",
"hdunum",
"=",
"_fits",
".",
"movnam_hdu",
"(",
"ANY_HDU",
",",
"extname",
",",
"extver_num",
")",
"else",
":",
"# for case sensitivity we'll need to run through",
"# all the hdus",
"found",
"=",
"False",
"current_ext",
"=",
"0",
"while",
"True",
":",
"hdunum",
"=",
"current_ext",
"+",
"1",
"try",
":",
"hdu_type",
"=",
"_fits",
".",
"movabs_hdu",
"(",
"hdunum",
")",
"# noqa - not used",
"name",
",",
"vers",
"=",
"_fits",
".",
"get_hdu_name_version",
"(",
"hdunum",
")",
"if",
"name",
"==",
"extname",
":",
"if",
"extver",
"is",
"None",
":",
"# take the first match",
"found",
"=",
"True",
"break",
"else",
":",
"if",
"extver_num",
"==",
"vers",
":",
"found",
"=",
"True",
"break",
"except",
"OSError",
":",
"break",
"current_ext",
"+=",
"1",
"if",
"not",
"found",
":",
"raise",
"IOError",
"(",
"'hdu not found: %s (extver %s)'",
"%",
"(",
"extname",
",",
"extver",
")",
")",
"return",
"FITSHDR",
"(",
"_fits",
".",
"read_header",
"(",
"hdunum",
")",
")"
] | Convenience function to read the header from the specified FITS HDU
The FITSHDR allows access to the values and comments by name and
number.
parameters
----------
filename: string
A filename.
ext: number or string, optional
The extension. Either the numerical extension from zero
or a string extension name. Default read primary header.
extver: integer, optional
FITS allows multiple extensions to have the same name (extname). These
extensions can optionally specify an EXTVER version number in the
header. Send extver= to select a particular version. If extver is not
sent, the first one will be selected. If ext is an integer, the extver
is ignored.
case_sensitive: bool, optional
Match extension names with case-sensitivity. Default is False. | [
"Convenience",
"function",
"to",
"read",
"the",
"header",
"from",
"the",
"specified",
"FITS",
"HDU"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/fitslib.py#L120-L190 |
2,160 | esheldon/fitsio | fitsio/fitslib.py | read_scamp_head | def read_scamp_head(fname, header=None):
"""
read a SCAMP .head file as a fits header FITSHDR object
parameters
----------
fname: string
The path to the SCAMP .head file
header: FITSHDR, optional
Optionally combine the header with the input one. The input can
be any object convertable to a FITSHDR object
returns
-------
header: FITSHDR
A fits header object of type FITSHDR
"""
with open(fname) as fobj:
lines = fobj.readlines()
lines = [l.strip() for l in lines if l[0:3] != 'END']
# if header is None an empty FITSHDR is created
hdr = FITSHDR(header)
for l in lines:
hdr.add_record(l)
return hdr | python | def read_scamp_head(fname, header=None):
"""
read a SCAMP .head file as a fits header FITSHDR object
parameters
----------
fname: string
The path to the SCAMP .head file
header: FITSHDR, optional
Optionally combine the header with the input one. The input can
be any object convertable to a FITSHDR object
returns
-------
header: FITSHDR
A fits header object of type FITSHDR
"""
with open(fname) as fobj:
lines = fobj.readlines()
lines = [l.strip() for l in lines if l[0:3] != 'END']
# if header is None an empty FITSHDR is created
hdr = FITSHDR(header)
for l in lines:
hdr.add_record(l)
return hdr | [
"def",
"read_scamp_head",
"(",
"fname",
",",
"header",
"=",
"None",
")",
":",
"with",
"open",
"(",
"fname",
")",
"as",
"fobj",
":",
"lines",
"=",
"fobj",
".",
"readlines",
"(",
")",
"lines",
"=",
"[",
"l",
".",
"strip",
"(",
")",
"for",
"l",
"in",
"lines",
"if",
"l",
"[",
"0",
":",
"3",
"]",
"!=",
"'END'",
"]",
"# if header is None an empty FITSHDR is created",
"hdr",
"=",
"FITSHDR",
"(",
"header",
")",
"for",
"l",
"in",
"lines",
":",
"hdr",
".",
"add_record",
"(",
"l",
")",
"return",
"hdr"
] | read a SCAMP .head file as a fits header FITSHDR object
parameters
----------
fname: string
The path to the SCAMP .head file
header: FITSHDR, optional
Optionally combine the header with the input one. The input can
be any object convertable to a FITSHDR object
returns
-------
header: FITSHDR
A fits header object of type FITSHDR | [
"read",
"a",
"SCAMP",
".",
"head",
"file",
"as",
"a",
"fits",
"header",
"FITSHDR",
"object"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/fitslib.py#L193-L223 |
2,161 | esheldon/fitsio | fitsio/fitslib.py | write | def write(filename, data, extname=None, extver=None, units=None,
compress=None, table_type='binary', header=None,
clobber=False, **keys):
"""
Convenience function to create a new HDU and write the data.
Under the hood, a FITS object is constructed. If you want to append rows
to an existing HDU, or modify data in an HDU, please construct a FITS
object.
parameters
----------
filename: string
A filename.
data:
Either a normal n-dimensional array or a recarray. Images are written
to a new IMAGE_HDU and recarrays are written to BINARY_TBl or
ASCII_TBL hdus.
extname: string, optional
An optional name for the new header unit.
extver: integer, optional
FITS allows multiple extensions to have the same name (extname).
These extensions can optionally specify an EXTVER version number in
the header. Send extver= to set a particular version, which will
be represented in the header with keyname EXTVER. The extver must
be an integer > 0. If extver is not sent, the first one will be
selected. If ext is an integer, the extver is ignored.
compress: string, optional
A string representing the compression algorithm for images,
default None.
Can be one of
'RICE'
'GZIP'
'GZIP_2'
'PLIO' (no unsigned or negative integers)
'HCOMPRESS'
(case-insensitive) See the cfitsio manual for details.
header: FITSHDR, list, dict, optional
A set of header keys to write. The keys are written before the data
is written to the table, preventing a resizing of the table area.
Can be one of these:
- FITSHDR object
- list of dictionaries containing 'name','value' and optionally
a 'comment' field; the order is preserved.
- a dictionary of keyword-value pairs; no comments are written
in this case, and the order is arbitrary.
Note required keywords such as NAXIS, XTENSION, etc are cleaed out.
clobber: bool, optional
If True, overwrite any existing file. Default is to append
a new extension on existing files.
ignore_empty: bool, optional
Default False. Unless set to True, only allow
empty HDUs in the zero extension.
table keywords
--------------
These keywords are only active when writing tables.
units: list
A list of strings representing units for each column.
table_type: string, optional
Either 'binary' or 'ascii', default 'binary'
Matching is case-insensitive
write_bitcols: bool, optional
Write boolean arrays in the FITS bitcols format, default False
"""
with FITS(filename, 'rw', clobber=clobber, **keys) as fits:
fits.write(data,
table_type=table_type,
units=units,
extname=extname,
extver=extver,
compress=compress,
header=header,
**keys) | python | def write(filename, data, extname=None, extver=None, units=None,
compress=None, table_type='binary', header=None,
clobber=False, **keys):
"""
Convenience function to create a new HDU and write the data.
Under the hood, a FITS object is constructed. If you want to append rows
to an existing HDU, or modify data in an HDU, please construct a FITS
object.
parameters
----------
filename: string
A filename.
data:
Either a normal n-dimensional array or a recarray. Images are written
to a new IMAGE_HDU and recarrays are written to BINARY_TBl or
ASCII_TBL hdus.
extname: string, optional
An optional name for the new header unit.
extver: integer, optional
FITS allows multiple extensions to have the same name (extname).
These extensions can optionally specify an EXTVER version number in
the header. Send extver= to set a particular version, which will
be represented in the header with keyname EXTVER. The extver must
be an integer > 0. If extver is not sent, the first one will be
selected. If ext is an integer, the extver is ignored.
compress: string, optional
A string representing the compression algorithm for images,
default None.
Can be one of
'RICE'
'GZIP'
'GZIP_2'
'PLIO' (no unsigned or negative integers)
'HCOMPRESS'
(case-insensitive) See the cfitsio manual for details.
header: FITSHDR, list, dict, optional
A set of header keys to write. The keys are written before the data
is written to the table, preventing a resizing of the table area.
Can be one of these:
- FITSHDR object
- list of dictionaries containing 'name','value' and optionally
a 'comment' field; the order is preserved.
- a dictionary of keyword-value pairs; no comments are written
in this case, and the order is arbitrary.
Note required keywords such as NAXIS, XTENSION, etc are cleaed out.
clobber: bool, optional
If True, overwrite any existing file. Default is to append
a new extension on existing files.
ignore_empty: bool, optional
Default False. Unless set to True, only allow
empty HDUs in the zero extension.
table keywords
--------------
These keywords are only active when writing tables.
units: list
A list of strings representing units for each column.
table_type: string, optional
Either 'binary' or 'ascii', default 'binary'
Matching is case-insensitive
write_bitcols: bool, optional
Write boolean arrays in the FITS bitcols format, default False
"""
with FITS(filename, 'rw', clobber=clobber, **keys) as fits:
fits.write(data,
table_type=table_type,
units=units,
extname=extname,
extver=extver,
compress=compress,
header=header,
**keys) | [
"def",
"write",
"(",
"filename",
",",
"data",
",",
"extname",
"=",
"None",
",",
"extver",
"=",
"None",
",",
"units",
"=",
"None",
",",
"compress",
"=",
"None",
",",
"table_type",
"=",
"'binary'",
",",
"header",
"=",
"None",
",",
"clobber",
"=",
"False",
",",
"*",
"*",
"keys",
")",
":",
"with",
"FITS",
"(",
"filename",
",",
"'rw'",
",",
"clobber",
"=",
"clobber",
",",
"*",
"*",
"keys",
")",
"as",
"fits",
":",
"fits",
".",
"write",
"(",
"data",
",",
"table_type",
"=",
"table_type",
",",
"units",
"=",
"units",
",",
"extname",
"=",
"extname",
",",
"extver",
"=",
"extver",
",",
"compress",
"=",
"compress",
",",
"header",
"=",
"header",
",",
"*",
"*",
"keys",
")"
] | Convenience function to create a new HDU and write the data.
Under the hood, a FITS object is constructed. If you want to append rows
to an existing HDU, or modify data in an HDU, please construct a FITS
object.
parameters
----------
filename: string
A filename.
data:
Either a normal n-dimensional array or a recarray. Images are written
to a new IMAGE_HDU and recarrays are written to BINARY_TBl or
ASCII_TBL hdus.
extname: string, optional
An optional name for the new header unit.
extver: integer, optional
FITS allows multiple extensions to have the same name (extname).
These extensions can optionally specify an EXTVER version number in
the header. Send extver= to set a particular version, which will
be represented in the header with keyname EXTVER. The extver must
be an integer > 0. If extver is not sent, the first one will be
selected. If ext is an integer, the extver is ignored.
compress: string, optional
A string representing the compression algorithm for images,
default None.
Can be one of
'RICE'
'GZIP'
'GZIP_2'
'PLIO' (no unsigned or negative integers)
'HCOMPRESS'
(case-insensitive) See the cfitsio manual for details.
header: FITSHDR, list, dict, optional
A set of header keys to write. The keys are written before the data
is written to the table, preventing a resizing of the table area.
Can be one of these:
- FITSHDR object
- list of dictionaries containing 'name','value' and optionally
a 'comment' field; the order is preserved.
- a dictionary of keyword-value pairs; no comments are written
in this case, and the order is arbitrary.
Note required keywords such as NAXIS, XTENSION, etc are cleaed out.
clobber: bool, optional
If True, overwrite any existing file. Default is to append
a new extension on existing files.
ignore_empty: bool, optional
Default False. Unless set to True, only allow
empty HDUs in the zero extension.
table keywords
--------------
These keywords are only active when writing tables.
units: list
A list of strings representing units for each column.
table_type: string, optional
Either 'binary' or 'ascii', default 'binary'
Matching is case-insensitive
write_bitcols: bool, optional
Write boolean arrays in the FITS bitcols format, default False | [
"Convenience",
"function",
"to",
"create",
"a",
"new",
"HDU",
"and",
"write",
"the",
"data",
"."
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/fitslib.py#L236-L317 |
2,162 | esheldon/fitsio | fitsio/fitslib.py | array2tabledef | def array2tabledef(data, table_type='binary', write_bitcols=False):
"""
Similar to descr2tabledef but if there are object columns a type
and max length will be extracted and used for the tabledef
"""
is_ascii = (table_type == 'ascii')
if data.dtype.fields is None:
raise ValueError("data must have fields")
names = []
names_nocase = {}
formats = []
dims = []
descr = data.dtype.descr
for d in descr:
# these have the form '<f4' or '|S25', etc. Extract the pure type
npy_dtype = d[1][1:]
if is_ascii:
if npy_dtype in ['u1', 'i1']:
raise ValueError(
"1-byte integers are not supported for "
"ascii tables: '%s'" % npy_dtype)
if npy_dtype in ['u2']:
raise ValueError(
"unsigned 2-byte integers are not supported for "
"ascii tables: '%s'" % npy_dtype)
if npy_dtype[0] == 'O':
# this will be a variable length column 1Pt(len) where t is the
# type and len is max length. Each element must be convertible to
# the same type as the first
name = d[0]
form, dim = npy_obj2fits(data, name)
elif npy_dtype[0] == "V":
continue
else:
name, form, dim = _npy2fits(
d, table_type=table_type, write_bitcols=write_bitcols)
if name == '':
raise ValueError("field name is an empty string")
"""
if is_ascii:
if dim is not None:
raise ValueError("array columns are not supported for "
"ascii tables")
"""
name_nocase = name.upper()
if name_nocase in names_nocase:
raise ValueError(
"duplicate column name found: '%s'. Note "
"FITS column names are not case sensitive" % name_nocase)
names.append(name)
names_nocase[name_nocase] = name_nocase
formats.append(form)
dims.append(dim)
return names, formats, dims | python | def array2tabledef(data, table_type='binary', write_bitcols=False):
"""
Similar to descr2tabledef but if there are object columns a type
and max length will be extracted and used for the tabledef
"""
is_ascii = (table_type == 'ascii')
if data.dtype.fields is None:
raise ValueError("data must have fields")
names = []
names_nocase = {}
formats = []
dims = []
descr = data.dtype.descr
for d in descr:
# these have the form '<f4' or '|S25', etc. Extract the pure type
npy_dtype = d[1][1:]
if is_ascii:
if npy_dtype in ['u1', 'i1']:
raise ValueError(
"1-byte integers are not supported for "
"ascii tables: '%s'" % npy_dtype)
if npy_dtype in ['u2']:
raise ValueError(
"unsigned 2-byte integers are not supported for "
"ascii tables: '%s'" % npy_dtype)
if npy_dtype[0] == 'O':
# this will be a variable length column 1Pt(len) where t is the
# type and len is max length. Each element must be convertible to
# the same type as the first
name = d[0]
form, dim = npy_obj2fits(data, name)
elif npy_dtype[0] == "V":
continue
else:
name, form, dim = _npy2fits(
d, table_type=table_type, write_bitcols=write_bitcols)
if name == '':
raise ValueError("field name is an empty string")
"""
if is_ascii:
if dim is not None:
raise ValueError("array columns are not supported for "
"ascii tables")
"""
name_nocase = name.upper()
if name_nocase in names_nocase:
raise ValueError(
"duplicate column name found: '%s'. Note "
"FITS column names are not case sensitive" % name_nocase)
names.append(name)
names_nocase[name_nocase] = name_nocase
formats.append(form)
dims.append(dim)
return names, formats, dims | [
"def",
"array2tabledef",
"(",
"data",
",",
"table_type",
"=",
"'binary'",
",",
"write_bitcols",
"=",
"False",
")",
":",
"is_ascii",
"=",
"(",
"table_type",
"==",
"'ascii'",
")",
"if",
"data",
".",
"dtype",
".",
"fields",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"data must have fields\"",
")",
"names",
"=",
"[",
"]",
"names_nocase",
"=",
"{",
"}",
"formats",
"=",
"[",
"]",
"dims",
"=",
"[",
"]",
"descr",
"=",
"data",
".",
"dtype",
".",
"descr",
"for",
"d",
"in",
"descr",
":",
"# these have the form '<f4' or '|S25', etc. Extract the pure type",
"npy_dtype",
"=",
"d",
"[",
"1",
"]",
"[",
"1",
":",
"]",
"if",
"is_ascii",
":",
"if",
"npy_dtype",
"in",
"[",
"'u1'",
",",
"'i1'",
"]",
":",
"raise",
"ValueError",
"(",
"\"1-byte integers are not supported for \"",
"\"ascii tables: '%s'\"",
"%",
"npy_dtype",
")",
"if",
"npy_dtype",
"in",
"[",
"'u2'",
"]",
":",
"raise",
"ValueError",
"(",
"\"unsigned 2-byte integers are not supported for \"",
"\"ascii tables: '%s'\"",
"%",
"npy_dtype",
")",
"if",
"npy_dtype",
"[",
"0",
"]",
"==",
"'O'",
":",
"# this will be a variable length column 1Pt(len) where t is the",
"# type and len is max length. Each element must be convertible to",
"# the same type as the first",
"name",
"=",
"d",
"[",
"0",
"]",
"form",
",",
"dim",
"=",
"npy_obj2fits",
"(",
"data",
",",
"name",
")",
"elif",
"npy_dtype",
"[",
"0",
"]",
"==",
"\"V\"",
":",
"continue",
"else",
":",
"name",
",",
"form",
",",
"dim",
"=",
"_npy2fits",
"(",
"d",
",",
"table_type",
"=",
"table_type",
",",
"write_bitcols",
"=",
"write_bitcols",
")",
"if",
"name",
"==",
"''",
":",
"raise",
"ValueError",
"(",
"\"field name is an empty string\"",
")",
"\"\"\"\n if is_ascii:\n if dim is not None:\n raise ValueError(\"array columns are not supported for \"\n \"ascii tables\")\n \"\"\"",
"name_nocase",
"=",
"name",
".",
"upper",
"(",
")",
"if",
"name_nocase",
"in",
"names_nocase",
":",
"raise",
"ValueError",
"(",
"\"duplicate column name found: '%s'. Note \"",
"\"FITS column names are not case sensitive\"",
"%",
"name_nocase",
")",
"names",
".",
"append",
"(",
"name",
")",
"names_nocase",
"[",
"name_nocase",
"]",
"=",
"name_nocase",
"formats",
".",
"append",
"(",
"form",
")",
"dims",
".",
"append",
"(",
"dim",
")",
"return",
"names",
",",
"formats",
",",
"dims"
] | Similar to descr2tabledef but if there are object columns a type
and max length will be extracted and used for the tabledef | [
"Similar",
"to",
"descr2tabledef",
"but",
"if",
"there",
"are",
"object",
"columns",
"a",
"type",
"and",
"max",
"length",
"will",
"be",
"extracted",
"and",
"used",
"for",
"the",
"tabledef"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/fitslib.py#L1237-L1298 |
2,163 | esheldon/fitsio | fitsio/fitslib.py | descr2tabledef | def descr2tabledef(descr, table_type='binary', write_bitcols=False):
"""
Create a FITS table def from the input numpy descriptor.
parameters
----------
descr: list
A numpy recarray type descriptor array.dtype.descr
returns
-------
names, formats, dims: tuple of lists
These are the ttyp, tform and tdim header entries
for each field. dim entries may be None
"""
names = []
formats = []
dims = []
for d in descr:
"""
npy_dtype = d[1][1:]
if is_ascii and npy_dtype in ['u1','i1']:
raise ValueError("1-byte integers are not supported for "
"ascii tables")
"""
if d[1][1] == 'O':
raise ValueError(
'cannot automatically declare a var column without '
'some data to determine max len')
name, form, dim = _npy2fits(
d, table_type=table_type, write_bitcols=write_bitcols)
if name == '':
raise ValueError("field name is an empty string")
"""
if is_ascii:
if dim is not None:
raise ValueError("array columns are not supported "
"for ascii tables")
"""
names.append(name)
formats.append(form)
dims.append(dim)
return names, formats, dims | python | def descr2tabledef(descr, table_type='binary', write_bitcols=False):
"""
Create a FITS table def from the input numpy descriptor.
parameters
----------
descr: list
A numpy recarray type descriptor array.dtype.descr
returns
-------
names, formats, dims: tuple of lists
These are the ttyp, tform and tdim header entries
for each field. dim entries may be None
"""
names = []
formats = []
dims = []
for d in descr:
"""
npy_dtype = d[1][1:]
if is_ascii and npy_dtype in ['u1','i1']:
raise ValueError("1-byte integers are not supported for "
"ascii tables")
"""
if d[1][1] == 'O':
raise ValueError(
'cannot automatically declare a var column without '
'some data to determine max len')
name, form, dim = _npy2fits(
d, table_type=table_type, write_bitcols=write_bitcols)
if name == '':
raise ValueError("field name is an empty string")
"""
if is_ascii:
if dim is not None:
raise ValueError("array columns are not supported "
"for ascii tables")
"""
names.append(name)
formats.append(form)
dims.append(dim)
return names, formats, dims | [
"def",
"descr2tabledef",
"(",
"descr",
",",
"table_type",
"=",
"'binary'",
",",
"write_bitcols",
"=",
"False",
")",
":",
"names",
"=",
"[",
"]",
"formats",
"=",
"[",
"]",
"dims",
"=",
"[",
"]",
"for",
"d",
"in",
"descr",
":",
"\"\"\"\n npy_dtype = d[1][1:]\n if is_ascii and npy_dtype in ['u1','i1']:\n raise ValueError(\"1-byte integers are not supported for \"\n \"ascii tables\")\n \"\"\"",
"if",
"d",
"[",
"1",
"]",
"[",
"1",
"]",
"==",
"'O'",
":",
"raise",
"ValueError",
"(",
"'cannot automatically declare a var column without '",
"'some data to determine max len'",
")",
"name",
",",
"form",
",",
"dim",
"=",
"_npy2fits",
"(",
"d",
",",
"table_type",
"=",
"table_type",
",",
"write_bitcols",
"=",
"write_bitcols",
")",
"if",
"name",
"==",
"''",
":",
"raise",
"ValueError",
"(",
"\"field name is an empty string\"",
")",
"\"\"\"\n if is_ascii:\n if dim is not None:\n raise ValueError(\"array columns are not supported \"\n \"for ascii tables\")\n \"\"\"",
"names",
".",
"append",
"(",
"name",
")",
"formats",
".",
"append",
"(",
"form",
")",
"dims",
".",
"append",
"(",
"dim",
")",
"return",
"names",
",",
"formats",
",",
"dims"
] | Create a FITS table def from the input numpy descriptor.
parameters
----------
descr: list
A numpy recarray type descriptor array.dtype.descr
returns
-------
names, formats, dims: tuple of lists
These are the ttyp, tform and tdim header entries
for each field. dim entries may be None | [
"Create",
"a",
"FITS",
"table",
"def",
"from",
"the",
"input",
"numpy",
"descriptor",
"."
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/fitslib.py#L1356-L1406 |
2,164 | esheldon/fitsio | fitsio/fitslib.py | get_tile_dims | def get_tile_dims(tile_dims, imshape):
"""
Just make sure the tile dims has the appropriate number of dimensions
"""
if tile_dims is None:
td = None
else:
td = numpy.array(tile_dims, dtype='i8')
nd = len(imshape)
if td.size != nd:
msg = "expected tile_dims to have %d dims, got %d" % (td.size, nd)
raise ValueError(msg)
return td | python | def get_tile_dims(tile_dims, imshape):
"""
Just make sure the tile dims has the appropriate number of dimensions
"""
if tile_dims is None:
td = None
else:
td = numpy.array(tile_dims, dtype='i8')
nd = len(imshape)
if td.size != nd:
msg = "expected tile_dims to have %d dims, got %d" % (td.size, nd)
raise ValueError(msg)
return td | [
"def",
"get_tile_dims",
"(",
"tile_dims",
",",
"imshape",
")",
":",
"if",
"tile_dims",
"is",
"None",
":",
"td",
"=",
"None",
"else",
":",
"td",
"=",
"numpy",
".",
"array",
"(",
"tile_dims",
",",
"dtype",
"=",
"'i8'",
")",
"nd",
"=",
"len",
"(",
"imshape",
")",
"if",
"td",
".",
"size",
"!=",
"nd",
":",
"msg",
"=",
"\"expected tile_dims to have %d dims, got %d\"",
"%",
"(",
"td",
".",
"size",
",",
"nd",
")",
"raise",
"ValueError",
"(",
"msg",
")",
"return",
"td"
] | Just make sure the tile dims has the appropriate number of dimensions | [
"Just",
"make",
"sure",
"the",
"tile",
"dims",
"has",
"the",
"appropriate",
"number",
"of",
"dimensions"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/fitslib.py#L1452-L1466 |
2,165 | esheldon/fitsio | fitsio/fitslib.py | _extract_table_type | def _extract_table_type(type):
"""
Get the numerical table type
"""
if isinstance(type, str):
type = type.lower()
if type[0:7] == 'binary':
table_type = BINARY_TBL
elif type[0:6] == 'ascii':
table_type = ASCII_TBL
else:
raise ValueError(
"table type string should begin with 'binary' or 'ascii' "
"(case insensitive)")
else:
type = int(type)
if type not in [BINARY_TBL, ASCII_TBL]:
raise ValueError(
"table type num should be BINARY_TBL (%d) or "
"ASCII_TBL (%d)" % (BINARY_TBL, ASCII_TBL))
table_type = type
return table_type | python | def _extract_table_type(type):
"""
Get the numerical table type
"""
if isinstance(type, str):
type = type.lower()
if type[0:7] == 'binary':
table_type = BINARY_TBL
elif type[0:6] == 'ascii':
table_type = ASCII_TBL
else:
raise ValueError(
"table type string should begin with 'binary' or 'ascii' "
"(case insensitive)")
else:
type = int(type)
if type not in [BINARY_TBL, ASCII_TBL]:
raise ValueError(
"table type num should be BINARY_TBL (%d) or "
"ASCII_TBL (%d)" % (BINARY_TBL, ASCII_TBL))
table_type = type
return table_type | [
"def",
"_extract_table_type",
"(",
"type",
")",
":",
"if",
"isinstance",
"(",
"type",
",",
"str",
")",
":",
"type",
"=",
"type",
".",
"lower",
"(",
")",
"if",
"type",
"[",
"0",
":",
"7",
"]",
"==",
"'binary'",
":",
"table_type",
"=",
"BINARY_TBL",
"elif",
"type",
"[",
"0",
":",
"6",
"]",
"==",
"'ascii'",
":",
"table_type",
"=",
"ASCII_TBL",
"else",
":",
"raise",
"ValueError",
"(",
"\"table type string should begin with 'binary' or 'ascii' \"",
"\"(case insensitive)\"",
")",
"else",
":",
"type",
"=",
"int",
"(",
"type",
")",
"if",
"type",
"not",
"in",
"[",
"BINARY_TBL",
",",
"ASCII_TBL",
"]",
":",
"raise",
"ValueError",
"(",
"\"table type num should be BINARY_TBL (%d) or \"",
"\"ASCII_TBL (%d)\"",
"%",
"(",
"BINARY_TBL",
",",
"ASCII_TBL",
")",
")",
"table_type",
"=",
"type",
"return",
"table_type"
] | Get the numerical table type | [
"Get",
"the",
"numerical",
"table",
"type"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/fitslib.py#L1496-L1518 |
2,166 | esheldon/fitsio | fitsio/fitslib.py | FITS.close | def close(self):
"""
Close the fits file and set relevant metadata to None
"""
if hasattr(self, '_FITS'):
if self._FITS is not None:
self._FITS.close()
self._FITS = None
self._filename = None
self.mode = None
self.charmode = None
self.intmode = None
self.hdu_list = None
self.hdu_map = None | python | def close(self):
"""
Close the fits file and set relevant metadata to None
"""
if hasattr(self, '_FITS'):
if self._FITS is not None:
self._FITS.close()
self._FITS = None
self._filename = None
self.mode = None
self.charmode = None
self.intmode = None
self.hdu_list = None
self.hdu_map = None | [
"def",
"close",
"(",
"self",
")",
":",
"if",
"hasattr",
"(",
"self",
",",
"'_FITS'",
")",
":",
"if",
"self",
".",
"_FITS",
"is",
"not",
"None",
":",
"self",
".",
"_FITS",
".",
"close",
"(",
")",
"self",
".",
"_FITS",
"=",
"None",
"self",
".",
"_filename",
"=",
"None",
"self",
".",
"mode",
"=",
"None",
"self",
".",
"charmode",
"=",
"None",
"self",
".",
"intmode",
"=",
"None",
"self",
".",
"hdu_list",
"=",
"None",
"self",
".",
"hdu_map",
"=",
"None"
] | Close the fits file and set relevant metadata to None | [
"Close",
"the",
"fits",
"file",
"and",
"set",
"relevant",
"metadata",
"to",
"None"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/fitslib.py#L409-L422 |
2,167 | esheldon/fitsio | fitsio/fitslib.py | FITS.movnam_hdu | def movnam_hdu(self, extname, hdutype=ANY_HDU, extver=0):
"""
Move to the indicated HDU by name
In general, it is not necessary to use this method explicitly.
returns the one-offset extension number
"""
extname = mks(extname)
hdu = self._FITS.movnam_hdu(hdutype, extname, extver)
return hdu | python | def movnam_hdu(self, extname, hdutype=ANY_HDU, extver=0):
"""
Move to the indicated HDU by name
In general, it is not necessary to use this method explicitly.
returns the one-offset extension number
"""
extname = mks(extname)
hdu = self._FITS.movnam_hdu(hdutype, extname, extver)
return hdu | [
"def",
"movnam_hdu",
"(",
"self",
",",
"extname",
",",
"hdutype",
"=",
"ANY_HDU",
",",
"extver",
"=",
"0",
")",
":",
"extname",
"=",
"mks",
"(",
"extname",
")",
"hdu",
"=",
"self",
".",
"_FITS",
".",
"movnam_hdu",
"(",
"hdutype",
",",
"extname",
",",
"extver",
")",
"return",
"hdu"
] | Move to the indicated HDU by name
In general, it is not necessary to use this method explicitly.
returns the one-offset extension number | [
"Move",
"to",
"the",
"indicated",
"HDU",
"by",
"name"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/fitslib.py#L452-L462 |
2,168 | esheldon/fitsio | fitsio/fitslib.py | FITS.reopen | def reopen(self):
"""
close and reopen the fits file with the same mode
"""
self._FITS.close()
del self._FITS
self._FITS = _fitsio_wrap.FITS(self._filename, self.intmode, 0)
self.update_hdu_list() | python | def reopen(self):
"""
close and reopen the fits file with the same mode
"""
self._FITS.close()
del self._FITS
self._FITS = _fitsio_wrap.FITS(self._filename, self.intmode, 0)
self.update_hdu_list() | [
"def",
"reopen",
"(",
"self",
")",
":",
"self",
".",
"_FITS",
".",
"close",
"(",
")",
"del",
"self",
".",
"_FITS",
"self",
".",
"_FITS",
"=",
"_fitsio_wrap",
".",
"FITS",
"(",
"self",
".",
"_filename",
",",
"self",
".",
"intmode",
",",
"0",
")",
"self",
".",
"update_hdu_list",
"(",
")"
] | close and reopen the fits file with the same mode | [
"close",
"and",
"reopen",
"the",
"fits",
"file",
"with",
"the",
"same",
"mode"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/fitslib.py#L464-L471 |
2,169 | esheldon/fitsio | fitsio/fitslib.py | FITS.write | def write(self, data, units=None, extname=None, extver=None,
compress=None, tile_dims=None,
header=None,
names=None,
table_type='binary', write_bitcols=False, **keys):
"""
Write the data to a new HDU.
This method is a wrapper. If this is an IMAGE_HDU, write_image is
called, otherwise write_table is called.
parameters
----------
data: ndarray
An n-dimensional image or an array with fields.
extname: string, optional
An optional extension name.
extver: integer, optional
FITS allows multiple extensions to have the same name (extname).
These extensions can optionally specify an EXTVER version number in
the header. Send extver= to set a particular version, which will
be represented in the header with keyname EXTVER. The extver must
be an integer > 0. If extver is not sent, the first one will be
selected. If ext is an integer, the extver is ignored.
header: FITSHDR, list, dict, optional
A set of header keys to write. Can be one of these:
- FITSHDR object
- list of dictionaries containing 'name','value' and optionally
a 'comment' field; the order is preserved.
- a dictionary of keyword-value pairs; no comments are written
in this case, and the order is arbitrary.
Note required keywords such as NAXIS, XTENSION, etc are cleaed out.
Image-only keywords:
compress: string, optional
A string representing the compression algorithm for images,
default None.
Can be one of
'RICE'
'GZIP'
'GZIP_2'
'PLIO' (no unsigned or negative integers)
'HCOMPRESS'
(case-insensitive) See the cfitsio manual for details.
Table-only keywords:
units: list/dec, optional:
A list of strings with units for each column.
table_type: string, optional
Either 'binary' or 'ascii', default 'binary'
Matching is case-insensitive
write_bitcols: bool, optional
Write boolean arrays in the FITS bitcols format, default False
restrictions
------------
The File must be opened READWRITE
"""
isimage = False
if data is None:
isimage = True
elif isinstance(data, numpy.ndarray):
if data.dtype.fields == None: # noqa - probably should be is None
isimage = True
if isimage:
self.write_image(data, extname=extname, extver=extver,
compress=compress, tile_dims=tile_dims,
header=header)
else:
self.write_table(data, units=units,
extname=extname, extver=extver, header=header,
names=names,
table_type=table_type,
write_bitcols=write_bitcols) | python | def write(self, data, units=None, extname=None, extver=None,
compress=None, tile_dims=None,
header=None,
names=None,
table_type='binary', write_bitcols=False, **keys):
"""
Write the data to a new HDU.
This method is a wrapper. If this is an IMAGE_HDU, write_image is
called, otherwise write_table is called.
parameters
----------
data: ndarray
An n-dimensional image or an array with fields.
extname: string, optional
An optional extension name.
extver: integer, optional
FITS allows multiple extensions to have the same name (extname).
These extensions can optionally specify an EXTVER version number in
the header. Send extver= to set a particular version, which will
be represented in the header with keyname EXTVER. The extver must
be an integer > 0. If extver is not sent, the first one will be
selected. If ext is an integer, the extver is ignored.
header: FITSHDR, list, dict, optional
A set of header keys to write. Can be one of these:
- FITSHDR object
- list of dictionaries containing 'name','value' and optionally
a 'comment' field; the order is preserved.
- a dictionary of keyword-value pairs; no comments are written
in this case, and the order is arbitrary.
Note required keywords such as NAXIS, XTENSION, etc are cleaed out.
Image-only keywords:
compress: string, optional
A string representing the compression algorithm for images,
default None.
Can be one of
'RICE'
'GZIP'
'GZIP_2'
'PLIO' (no unsigned or negative integers)
'HCOMPRESS'
(case-insensitive) See the cfitsio manual for details.
Table-only keywords:
units: list/dec, optional:
A list of strings with units for each column.
table_type: string, optional
Either 'binary' or 'ascii', default 'binary'
Matching is case-insensitive
write_bitcols: bool, optional
Write boolean arrays in the FITS bitcols format, default False
restrictions
------------
The File must be opened READWRITE
"""
isimage = False
if data is None:
isimage = True
elif isinstance(data, numpy.ndarray):
if data.dtype.fields == None: # noqa - probably should be is None
isimage = True
if isimage:
self.write_image(data, extname=extname, extver=extver,
compress=compress, tile_dims=tile_dims,
header=header)
else:
self.write_table(data, units=units,
extname=extname, extver=extver, header=header,
names=names,
table_type=table_type,
write_bitcols=write_bitcols) | [
"def",
"write",
"(",
"self",
",",
"data",
",",
"units",
"=",
"None",
",",
"extname",
"=",
"None",
",",
"extver",
"=",
"None",
",",
"compress",
"=",
"None",
",",
"tile_dims",
"=",
"None",
",",
"header",
"=",
"None",
",",
"names",
"=",
"None",
",",
"table_type",
"=",
"'binary'",
",",
"write_bitcols",
"=",
"False",
",",
"*",
"*",
"keys",
")",
":",
"isimage",
"=",
"False",
"if",
"data",
"is",
"None",
":",
"isimage",
"=",
"True",
"elif",
"isinstance",
"(",
"data",
",",
"numpy",
".",
"ndarray",
")",
":",
"if",
"data",
".",
"dtype",
".",
"fields",
"==",
"None",
":",
"# noqa - probably should be is None",
"isimage",
"=",
"True",
"if",
"isimage",
":",
"self",
".",
"write_image",
"(",
"data",
",",
"extname",
"=",
"extname",
",",
"extver",
"=",
"extver",
",",
"compress",
"=",
"compress",
",",
"tile_dims",
"=",
"tile_dims",
",",
"header",
"=",
"header",
")",
"else",
":",
"self",
".",
"write_table",
"(",
"data",
",",
"units",
"=",
"units",
",",
"extname",
"=",
"extname",
",",
"extver",
"=",
"extver",
",",
"header",
"=",
"header",
",",
"names",
"=",
"names",
",",
"table_type",
"=",
"table_type",
",",
"write_bitcols",
"=",
"write_bitcols",
")"
] | Write the data to a new HDU.
This method is a wrapper. If this is an IMAGE_HDU, write_image is
called, otherwise write_table is called.
parameters
----------
data: ndarray
An n-dimensional image or an array with fields.
extname: string, optional
An optional extension name.
extver: integer, optional
FITS allows multiple extensions to have the same name (extname).
These extensions can optionally specify an EXTVER version number in
the header. Send extver= to set a particular version, which will
be represented in the header with keyname EXTVER. The extver must
be an integer > 0. If extver is not sent, the first one will be
selected. If ext is an integer, the extver is ignored.
header: FITSHDR, list, dict, optional
A set of header keys to write. Can be one of these:
- FITSHDR object
- list of dictionaries containing 'name','value' and optionally
a 'comment' field; the order is preserved.
- a dictionary of keyword-value pairs; no comments are written
in this case, and the order is arbitrary.
Note required keywords such as NAXIS, XTENSION, etc are cleaed out.
Image-only keywords:
compress: string, optional
A string representing the compression algorithm for images,
default None.
Can be one of
'RICE'
'GZIP'
'GZIP_2'
'PLIO' (no unsigned or negative integers)
'HCOMPRESS'
(case-insensitive) See the cfitsio manual for details.
Table-only keywords:
units: list/dec, optional:
A list of strings with units for each column.
table_type: string, optional
Either 'binary' or 'ascii', default 'binary'
Matching is case-insensitive
write_bitcols: bool, optional
Write boolean arrays in the FITS bitcols format, default False
restrictions
------------
The File must be opened READWRITE | [
"Write",
"the",
"data",
"to",
"a",
"new",
"HDU",
"."
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/fitslib.py#L473-L549 |
2,170 | esheldon/fitsio | fitsio/fitslib.py | FITS.write_image | def write_image(self, img, extname=None, extver=None,
compress=None, tile_dims=None, header=None):
"""
Create a new image extension and write the data.
parameters
----------
img: ndarray
An n-dimensional image.
extname: string, optional
An optional extension name.
extver: integer, optional
FITS allows multiple extensions to have the same name (extname).
These extensions can optionally specify an EXTVER version number in
the header. Send extver= to set a particular version, which will
be represented in the header with keyname EXTVER. The extver must
be an integer > 0. If extver is not sent, the first one will be
selected. If ext is an integer, the extver is ignored.
compress: string, optional
A string representing the compression algorithm for images,
default None.
Can be one of
'RICE'
'GZIP'
'GZIP_2'
'PLIO' (no unsigned or negative integers)
'HCOMPRESS'
(case-insensitive) See the cfitsio manual for details.
header: FITSHDR, list, dict, optional
A set of header keys to write. Can be one of these:
- FITSHDR object
- list of dictionaries containing 'name','value' and optionally
a 'comment' field; the order is preserved.
- a dictionary of keyword-value pairs; no comments are written
in this case, and the order is arbitrary.
Note required keywords such as NAXIS, XTENSION, etc are cleaed out.
restrictions
------------
The File must be opened READWRITE
"""
self.create_image_hdu(img,
header=header,
extname=extname, extver=extver,
compress=compress, tile_dims=tile_dims)
if header is not None:
self[-1].write_keys(header)
self[-1]._update_info() | python | def write_image(self, img, extname=None, extver=None,
compress=None, tile_dims=None, header=None):
"""
Create a new image extension and write the data.
parameters
----------
img: ndarray
An n-dimensional image.
extname: string, optional
An optional extension name.
extver: integer, optional
FITS allows multiple extensions to have the same name (extname).
These extensions can optionally specify an EXTVER version number in
the header. Send extver= to set a particular version, which will
be represented in the header with keyname EXTVER. The extver must
be an integer > 0. If extver is not sent, the first one will be
selected. If ext is an integer, the extver is ignored.
compress: string, optional
A string representing the compression algorithm for images,
default None.
Can be one of
'RICE'
'GZIP'
'GZIP_2'
'PLIO' (no unsigned or negative integers)
'HCOMPRESS'
(case-insensitive) See the cfitsio manual for details.
header: FITSHDR, list, dict, optional
A set of header keys to write. Can be one of these:
- FITSHDR object
- list of dictionaries containing 'name','value' and optionally
a 'comment' field; the order is preserved.
- a dictionary of keyword-value pairs; no comments are written
in this case, and the order is arbitrary.
Note required keywords such as NAXIS, XTENSION, etc are cleaed out.
restrictions
------------
The File must be opened READWRITE
"""
self.create_image_hdu(img,
header=header,
extname=extname, extver=extver,
compress=compress, tile_dims=tile_dims)
if header is not None:
self[-1].write_keys(header)
self[-1]._update_info() | [
"def",
"write_image",
"(",
"self",
",",
"img",
",",
"extname",
"=",
"None",
",",
"extver",
"=",
"None",
",",
"compress",
"=",
"None",
",",
"tile_dims",
"=",
"None",
",",
"header",
"=",
"None",
")",
":",
"self",
".",
"create_image_hdu",
"(",
"img",
",",
"header",
"=",
"header",
",",
"extname",
"=",
"extname",
",",
"extver",
"=",
"extver",
",",
"compress",
"=",
"compress",
",",
"tile_dims",
"=",
"tile_dims",
")",
"if",
"header",
"is",
"not",
"None",
":",
"self",
"[",
"-",
"1",
"]",
".",
"write_keys",
"(",
"header",
")",
"self",
"[",
"-",
"1",
"]",
".",
"_update_info",
"(",
")"
] | Create a new image extension and write the data.
parameters
----------
img: ndarray
An n-dimensional image.
extname: string, optional
An optional extension name.
extver: integer, optional
FITS allows multiple extensions to have the same name (extname).
These extensions can optionally specify an EXTVER version number in
the header. Send extver= to set a particular version, which will
be represented in the header with keyname EXTVER. The extver must
be an integer > 0. If extver is not sent, the first one will be
selected. If ext is an integer, the extver is ignored.
compress: string, optional
A string representing the compression algorithm for images,
default None.
Can be one of
'RICE'
'GZIP'
'GZIP_2'
'PLIO' (no unsigned or negative integers)
'HCOMPRESS'
(case-insensitive) See the cfitsio manual for details.
header: FITSHDR, list, dict, optional
A set of header keys to write. Can be one of these:
- FITSHDR object
- list of dictionaries containing 'name','value' and optionally
a 'comment' field; the order is preserved.
- a dictionary of keyword-value pairs; no comments are written
in this case, and the order is arbitrary.
Note required keywords such as NAXIS, XTENSION, etc are cleaed out.
restrictions
------------
The File must be opened READWRITE | [
"Create",
"a",
"new",
"image",
"extension",
"and",
"write",
"the",
"data",
"."
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/fitslib.py#L551-L601 |
2,171 | esheldon/fitsio | fitsio/fitslib.py | FITS.create_image_hdu | def create_image_hdu(self,
img=None,
dims=None,
dtype=None,
extname=None,
extver=None,
compress=None,
tile_dims=None,
header=None):
"""
Create a new, empty image HDU and reload the hdu list. Either
create from an input image or from input dims and dtype
fits.create_image_hdu(image, ...)
fits.create_image_hdu(dims=dims, dtype=dtype)
If an image is sent, the data are also written.
You can write data into the new extension using
fits[extension].write(image)
Alternatively you can skip calling this function and instead just use
fits.write(image)
or
fits.write_image(image)
which will create the new image extension for you with the appropriate
structure, and write the data.
parameters
----------
img: ndarray, optional
An image with which to determine the properties of the HDU. The
data will be written.
dims: sequence, optional
A sequence describing the dimensions of the image to be created
on disk. You must also send a dtype=
dtype: numpy data type
When sending dims= also send the data type. Can be of the
various numpy data type declaration styles, e.g. 'f8',
numpy.float64.
extname: string, optional
An optional extension name.
extver: integer, optional
FITS allows multiple extensions to have the same name (extname).
These extensions can optionally specify an EXTVER version number in
the header. Send extver= to set a particular version, which will
be represented in the header with keyname EXTVER. The extver must
be an integer > 0. If extver is not sent, the first one will be
selected. If ext is an integer, the extver is ignored.
compress: string, optional
A string representing the compression algorithm for images,
default None.
Can be one of
'RICE'
'GZIP'
'GZIP_2'
'PLIO' (no unsigned or negative integers)
'HCOMPRESS'
(case-insensitive) See the cfitsio manual for details.
header: FITSHDR, list, dict, optional
This is only used to determine how many slots to reserve for
header keywords
restrictions
------------
The File must be opened READWRITE
"""
if (img is not None) or (img is None and dims is None):
from_image = True
elif dims is not None:
from_image = False
if from_image:
img2send = img
if img is not None:
dims = img.shape
dtstr = img.dtype.descr[0][1][1:]
if img.size == 0:
raise ValueError("data must have at least 1 row")
# data must be c-contiguous and native byte order
if not img.flags['C_CONTIGUOUS']:
# this always makes a copy
img2send = numpy.ascontiguousarray(img)
array_to_native(img2send, inplace=True)
else:
img2send = array_to_native(img, inplace=False)
if IS_PY3 and img2send.dtype.char == 'U':
# for python3, we convert unicode to ascii
# this will error if the character is not in ascii
img2send = img2send.astype('S', copy=False)
else:
self._ensure_empty_image_ok()
compress = None
tile_dims = None
# we get dims from the input image
dims2send = None
else:
# img was None and dims was sent
if dtype is None:
raise ValueError("send dtype= with dims=")
# this must work!
dtype = numpy.dtype(dtype)
dtstr = dtype.descr[0][1][1:]
# use the example image to build the type in C
img2send = numpy.zeros(1, dtype=dtype)
# sending an array simplifies access
dims2send = numpy.array(dims, dtype='i8', ndmin=1)
if img2send is not None:
if img2send.dtype.fields is not None:
raise ValueError(
"got record data type, expected regular ndarray")
if extname is None:
# will be ignored
extname = ""
else:
if not isstring(extname):
raise ValueError("extension name must be a string")
extname = mks(extname)
if extname is not None and extver is not None:
extver = check_extver(extver)
if extver is None:
# will be ignored
extver = 0
comptype = get_compress_type(compress)
tile_dims = get_tile_dims(tile_dims, dims)
if img2send is not None:
check_comptype_img(comptype, dtstr)
if header is not None:
nkeys = len(header)
else:
nkeys = 0
self._FITS.create_image_hdu(img2send,
nkeys,
dims=dims2send,
comptype=comptype,
tile_dims=tile_dims,
extname=extname,
extver=extver)
# don't rebuild the whole list unless this is the first hdu
# to be created
self.update_hdu_list(rebuild=False) | python | def create_image_hdu(self,
img=None,
dims=None,
dtype=None,
extname=None,
extver=None,
compress=None,
tile_dims=None,
header=None):
"""
Create a new, empty image HDU and reload the hdu list. Either
create from an input image or from input dims and dtype
fits.create_image_hdu(image, ...)
fits.create_image_hdu(dims=dims, dtype=dtype)
If an image is sent, the data are also written.
You can write data into the new extension using
fits[extension].write(image)
Alternatively you can skip calling this function and instead just use
fits.write(image)
or
fits.write_image(image)
which will create the new image extension for you with the appropriate
structure, and write the data.
parameters
----------
img: ndarray, optional
An image with which to determine the properties of the HDU. The
data will be written.
dims: sequence, optional
A sequence describing the dimensions of the image to be created
on disk. You must also send a dtype=
dtype: numpy data type
When sending dims= also send the data type. Can be of the
various numpy data type declaration styles, e.g. 'f8',
numpy.float64.
extname: string, optional
An optional extension name.
extver: integer, optional
FITS allows multiple extensions to have the same name (extname).
These extensions can optionally specify an EXTVER version number in
the header. Send extver= to set a particular version, which will
be represented in the header with keyname EXTVER. The extver must
be an integer > 0. If extver is not sent, the first one will be
selected. If ext is an integer, the extver is ignored.
compress: string, optional
A string representing the compression algorithm for images,
default None.
Can be one of
'RICE'
'GZIP'
'GZIP_2'
'PLIO' (no unsigned or negative integers)
'HCOMPRESS'
(case-insensitive) See the cfitsio manual for details.
header: FITSHDR, list, dict, optional
This is only used to determine how many slots to reserve for
header keywords
restrictions
------------
The File must be opened READWRITE
"""
if (img is not None) or (img is None and dims is None):
from_image = True
elif dims is not None:
from_image = False
if from_image:
img2send = img
if img is not None:
dims = img.shape
dtstr = img.dtype.descr[0][1][1:]
if img.size == 0:
raise ValueError("data must have at least 1 row")
# data must be c-contiguous and native byte order
if not img.flags['C_CONTIGUOUS']:
# this always makes a copy
img2send = numpy.ascontiguousarray(img)
array_to_native(img2send, inplace=True)
else:
img2send = array_to_native(img, inplace=False)
if IS_PY3 and img2send.dtype.char == 'U':
# for python3, we convert unicode to ascii
# this will error if the character is not in ascii
img2send = img2send.astype('S', copy=False)
else:
self._ensure_empty_image_ok()
compress = None
tile_dims = None
# we get dims from the input image
dims2send = None
else:
# img was None and dims was sent
if dtype is None:
raise ValueError("send dtype= with dims=")
# this must work!
dtype = numpy.dtype(dtype)
dtstr = dtype.descr[0][1][1:]
# use the example image to build the type in C
img2send = numpy.zeros(1, dtype=dtype)
# sending an array simplifies access
dims2send = numpy.array(dims, dtype='i8', ndmin=1)
if img2send is not None:
if img2send.dtype.fields is not None:
raise ValueError(
"got record data type, expected regular ndarray")
if extname is None:
# will be ignored
extname = ""
else:
if not isstring(extname):
raise ValueError("extension name must be a string")
extname = mks(extname)
if extname is not None and extver is not None:
extver = check_extver(extver)
if extver is None:
# will be ignored
extver = 0
comptype = get_compress_type(compress)
tile_dims = get_tile_dims(tile_dims, dims)
if img2send is not None:
check_comptype_img(comptype, dtstr)
if header is not None:
nkeys = len(header)
else:
nkeys = 0
self._FITS.create_image_hdu(img2send,
nkeys,
dims=dims2send,
comptype=comptype,
tile_dims=tile_dims,
extname=extname,
extver=extver)
# don't rebuild the whole list unless this is the first hdu
# to be created
self.update_hdu_list(rebuild=False) | [
"def",
"create_image_hdu",
"(",
"self",
",",
"img",
"=",
"None",
",",
"dims",
"=",
"None",
",",
"dtype",
"=",
"None",
",",
"extname",
"=",
"None",
",",
"extver",
"=",
"None",
",",
"compress",
"=",
"None",
",",
"tile_dims",
"=",
"None",
",",
"header",
"=",
"None",
")",
":",
"if",
"(",
"img",
"is",
"not",
"None",
")",
"or",
"(",
"img",
"is",
"None",
"and",
"dims",
"is",
"None",
")",
":",
"from_image",
"=",
"True",
"elif",
"dims",
"is",
"not",
"None",
":",
"from_image",
"=",
"False",
"if",
"from_image",
":",
"img2send",
"=",
"img",
"if",
"img",
"is",
"not",
"None",
":",
"dims",
"=",
"img",
".",
"shape",
"dtstr",
"=",
"img",
".",
"dtype",
".",
"descr",
"[",
"0",
"]",
"[",
"1",
"]",
"[",
"1",
":",
"]",
"if",
"img",
".",
"size",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"\"data must have at least 1 row\"",
")",
"# data must be c-contiguous and native byte order",
"if",
"not",
"img",
".",
"flags",
"[",
"'C_CONTIGUOUS'",
"]",
":",
"# this always makes a copy",
"img2send",
"=",
"numpy",
".",
"ascontiguousarray",
"(",
"img",
")",
"array_to_native",
"(",
"img2send",
",",
"inplace",
"=",
"True",
")",
"else",
":",
"img2send",
"=",
"array_to_native",
"(",
"img",
",",
"inplace",
"=",
"False",
")",
"if",
"IS_PY3",
"and",
"img2send",
".",
"dtype",
".",
"char",
"==",
"'U'",
":",
"# for python3, we convert unicode to ascii",
"# this will error if the character is not in ascii",
"img2send",
"=",
"img2send",
".",
"astype",
"(",
"'S'",
",",
"copy",
"=",
"False",
")",
"else",
":",
"self",
".",
"_ensure_empty_image_ok",
"(",
")",
"compress",
"=",
"None",
"tile_dims",
"=",
"None",
"# we get dims from the input image",
"dims2send",
"=",
"None",
"else",
":",
"# img was None and dims was sent",
"if",
"dtype",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"send dtype= with dims=\"",
")",
"# this must work!",
"dtype",
"=",
"numpy",
".",
"dtype",
"(",
"dtype",
")",
"dtstr",
"=",
"dtype",
".",
"descr",
"[",
"0",
"]",
"[",
"1",
"]",
"[",
"1",
":",
"]",
"# use the example image to build the type in C",
"img2send",
"=",
"numpy",
".",
"zeros",
"(",
"1",
",",
"dtype",
"=",
"dtype",
")",
"# sending an array simplifies access",
"dims2send",
"=",
"numpy",
".",
"array",
"(",
"dims",
",",
"dtype",
"=",
"'i8'",
",",
"ndmin",
"=",
"1",
")",
"if",
"img2send",
"is",
"not",
"None",
":",
"if",
"img2send",
".",
"dtype",
".",
"fields",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"got record data type, expected regular ndarray\"",
")",
"if",
"extname",
"is",
"None",
":",
"# will be ignored",
"extname",
"=",
"\"\"",
"else",
":",
"if",
"not",
"isstring",
"(",
"extname",
")",
":",
"raise",
"ValueError",
"(",
"\"extension name must be a string\"",
")",
"extname",
"=",
"mks",
"(",
"extname",
")",
"if",
"extname",
"is",
"not",
"None",
"and",
"extver",
"is",
"not",
"None",
":",
"extver",
"=",
"check_extver",
"(",
"extver",
")",
"if",
"extver",
"is",
"None",
":",
"# will be ignored",
"extver",
"=",
"0",
"comptype",
"=",
"get_compress_type",
"(",
"compress",
")",
"tile_dims",
"=",
"get_tile_dims",
"(",
"tile_dims",
",",
"dims",
")",
"if",
"img2send",
"is",
"not",
"None",
":",
"check_comptype_img",
"(",
"comptype",
",",
"dtstr",
")",
"if",
"header",
"is",
"not",
"None",
":",
"nkeys",
"=",
"len",
"(",
"header",
")",
"else",
":",
"nkeys",
"=",
"0",
"self",
".",
"_FITS",
".",
"create_image_hdu",
"(",
"img2send",
",",
"nkeys",
",",
"dims",
"=",
"dims2send",
",",
"comptype",
"=",
"comptype",
",",
"tile_dims",
"=",
"tile_dims",
",",
"extname",
"=",
"extname",
",",
"extver",
"=",
"extver",
")",
"# don't rebuild the whole list unless this is the first hdu",
"# to be created",
"self",
".",
"update_hdu_list",
"(",
"rebuild",
"=",
"False",
")"
] | Create a new, empty image HDU and reload the hdu list. Either
create from an input image or from input dims and dtype
fits.create_image_hdu(image, ...)
fits.create_image_hdu(dims=dims, dtype=dtype)
If an image is sent, the data are also written.
You can write data into the new extension using
fits[extension].write(image)
Alternatively you can skip calling this function and instead just use
fits.write(image)
or
fits.write_image(image)
which will create the new image extension for you with the appropriate
structure, and write the data.
parameters
----------
img: ndarray, optional
An image with which to determine the properties of the HDU. The
data will be written.
dims: sequence, optional
A sequence describing the dimensions of the image to be created
on disk. You must also send a dtype=
dtype: numpy data type
When sending dims= also send the data type. Can be of the
various numpy data type declaration styles, e.g. 'f8',
numpy.float64.
extname: string, optional
An optional extension name.
extver: integer, optional
FITS allows multiple extensions to have the same name (extname).
These extensions can optionally specify an EXTVER version number in
the header. Send extver= to set a particular version, which will
be represented in the header with keyname EXTVER. The extver must
be an integer > 0. If extver is not sent, the first one will be
selected. If ext is an integer, the extver is ignored.
compress: string, optional
A string representing the compression algorithm for images,
default None.
Can be one of
'RICE'
'GZIP'
'GZIP_2'
'PLIO' (no unsigned or negative integers)
'HCOMPRESS'
(case-insensitive) See the cfitsio manual for details.
header: FITSHDR, list, dict, optional
This is only used to determine how many slots to reserve for
header keywords
restrictions
------------
The File must be opened READWRITE | [
"Create",
"a",
"new",
"empty",
"image",
"HDU",
"and",
"reload",
"the",
"hdu",
"list",
".",
"Either",
"create",
"from",
"an",
"input",
"image",
"or",
"from",
"input",
"dims",
"and",
"dtype"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/fitslib.py#L606-L765 |
2,172 | esheldon/fitsio | fitsio/fitslib.py | FITS._ensure_empty_image_ok | def _ensure_empty_image_ok(self):
"""
If ignore_empty was not set to True, we only allow empty HDU for first
HDU and if there is no data there already
"""
if self.ignore_empty:
return
if len(self) > 1:
raise RuntimeError(
"Cannot write None image at extension %d" % len(self))
if 'ndims' in self[0]._info:
raise RuntimeError("Can only write None images to extension zero, "
"which already exists") | python | def _ensure_empty_image_ok(self):
"""
If ignore_empty was not set to True, we only allow empty HDU for first
HDU and if there is no data there already
"""
if self.ignore_empty:
return
if len(self) > 1:
raise RuntimeError(
"Cannot write None image at extension %d" % len(self))
if 'ndims' in self[0]._info:
raise RuntimeError("Can only write None images to extension zero, "
"which already exists") | [
"def",
"_ensure_empty_image_ok",
"(",
"self",
")",
":",
"if",
"self",
".",
"ignore_empty",
":",
"return",
"if",
"len",
"(",
"self",
")",
">",
"1",
":",
"raise",
"RuntimeError",
"(",
"\"Cannot write None image at extension %d\"",
"%",
"len",
"(",
"self",
")",
")",
"if",
"'ndims'",
"in",
"self",
"[",
"0",
"]",
".",
"_info",
":",
"raise",
"RuntimeError",
"(",
"\"Can only write None images to extension zero, \"",
"\"which already exists\"",
")"
] | If ignore_empty was not set to True, we only allow empty HDU for first
HDU and if there is no data there already | [
"If",
"ignore_empty",
"was",
"not",
"set",
"to",
"True",
"we",
"only",
"allow",
"empty",
"HDU",
"for",
"first",
"HDU",
"and",
"if",
"there",
"is",
"no",
"data",
"there",
"already"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/fitslib.py#L767-L780 |
2,173 | esheldon/fitsio | fitsio/fitslib.py | FITS.write_table | def write_table(self, data, table_type='binary',
names=None, formats=None, units=None,
extname=None, extver=None, header=None,
write_bitcols=False):
"""
Create a new table extension and write the data.
The table definition is taken from the fields in the input array. If
you want to append new rows to the table, access the HDU directly and
use the write() function, e.g.
fits[extension].append(data)
parameters
----------
data: recarray
A numpy array with fields. The table definition will be
determined from this array.
table_type: string, optional
Either 'binary' or 'ascii', default 'binary'
Matching is case-insensitive
extname: string, optional
An optional string for the extension name.
extver: integer, optional
FITS allows multiple extensions to have the same name (extname).
These extensions can optionally specify an EXTVER version number in
the header. Send extver= to set a particular version, which will
be represented in the header with keyname EXTVER. The extver must
be an integer > 0. If extver is not sent, the first one will be
selected. If ext is an integer, the extver is ignored.
units: list/dec, optional:
A list of strings with units for each column.
header: FITSHDR, list, dict, optional
A set of header keys to write. The keys are written before the data
is written to the table, preventing a resizing of the table area.
Can be one of these:
- FITSHDR object
- list of dictionaries containing 'name','value' and optionally
a 'comment' field; the order is preserved.
- a dictionary of keyword-value pairs; no comments are written
in this case, and the order is arbitrary.
Note required keywords such as NAXIS, XTENSION, etc are cleaed out.
write_bitcols: boolean, optional
Write boolean arrays in the FITS bitcols format, default False
restrictions
------------
The File must be opened READWRITE
"""
"""
if data.dtype.fields == None:
raise ValueError("data must have fields")
if data.size == 0:
raise ValueError("data must have at least 1 row")
"""
self.create_table_hdu(data=data,
header=header,
names=names,
units=units,
extname=extname,
extver=extver,
table_type=table_type,
write_bitcols=write_bitcols)
if header is not None:
self[-1].write_keys(header)
self[-1]._update_info()
self[-1].write(data, names=names) | python | def write_table(self, data, table_type='binary',
names=None, formats=None, units=None,
extname=None, extver=None, header=None,
write_bitcols=False):
"""
Create a new table extension and write the data.
The table definition is taken from the fields in the input array. If
you want to append new rows to the table, access the HDU directly and
use the write() function, e.g.
fits[extension].append(data)
parameters
----------
data: recarray
A numpy array with fields. The table definition will be
determined from this array.
table_type: string, optional
Either 'binary' or 'ascii', default 'binary'
Matching is case-insensitive
extname: string, optional
An optional string for the extension name.
extver: integer, optional
FITS allows multiple extensions to have the same name (extname).
These extensions can optionally specify an EXTVER version number in
the header. Send extver= to set a particular version, which will
be represented in the header with keyname EXTVER. The extver must
be an integer > 0. If extver is not sent, the first one will be
selected. If ext is an integer, the extver is ignored.
units: list/dec, optional:
A list of strings with units for each column.
header: FITSHDR, list, dict, optional
A set of header keys to write. The keys are written before the data
is written to the table, preventing a resizing of the table area.
Can be one of these:
- FITSHDR object
- list of dictionaries containing 'name','value' and optionally
a 'comment' field; the order is preserved.
- a dictionary of keyword-value pairs; no comments are written
in this case, and the order is arbitrary.
Note required keywords such as NAXIS, XTENSION, etc are cleaed out.
write_bitcols: boolean, optional
Write boolean arrays in the FITS bitcols format, default False
restrictions
------------
The File must be opened READWRITE
"""
"""
if data.dtype.fields == None:
raise ValueError("data must have fields")
if data.size == 0:
raise ValueError("data must have at least 1 row")
"""
self.create_table_hdu(data=data,
header=header,
names=names,
units=units,
extname=extname,
extver=extver,
table_type=table_type,
write_bitcols=write_bitcols)
if header is not None:
self[-1].write_keys(header)
self[-1]._update_info()
self[-1].write(data, names=names) | [
"def",
"write_table",
"(",
"self",
",",
"data",
",",
"table_type",
"=",
"'binary'",
",",
"names",
"=",
"None",
",",
"formats",
"=",
"None",
",",
"units",
"=",
"None",
",",
"extname",
"=",
"None",
",",
"extver",
"=",
"None",
",",
"header",
"=",
"None",
",",
"write_bitcols",
"=",
"False",
")",
":",
"\"\"\"\n if data.dtype.fields == None:\n raise ValueError(\"data must have fields\")\n if data.size == 0:\n raise ValueError(\"data must have at least 1 row\")\n \"\"\"",
"self",
".",
"create_table_hdu",
"(",
"data",
"=",
"data",
",",
"header",
"=",
"header",
",",
"names",
"=",
"names",
",",
"units",
"=",
"units",
",",
"extname",
"=",
"extname",
",",
"extver",
"=",
"extver",
",",
"table_type",
"=",
"table_type",
",",
"write_bitcols",
"=",
"write_bitcols",
")",
"if",
"header",
"is",
"not",
"None",
":",
"self",
"[",
"-",
"1",
"]",
".",
"write_keys",
"(",
"header",
")",
"self",
"[",
"-",
"1",
"]",
".",
"_update_info",
"(",
")",
"self",
"[",
"-",
"1",
"]",
".",
"write",
"(",
"data",
",",
"names",
"=",
"names",
")"
] | Create a new table extension and write the data.
The table definition is taken from the fields in the input array. If
you want to append new rows to the table, access the HDU directly and
use the write() function, e.g.
fits[extension].append(data)
parameters
----------
data: recarray
A numpy array with fields. The table definition will be
determined from this array.
table_type: string, optional
Either 'binary' or 'ascii', default 'binary'
Matching is case-insensitive
extname: string, optional
An optional string for the extension name.
extver: integer, optional
FITS allows multiple extensions to have the same name (extname).
These extensions can optionally specify an EXTVER version number in
the header. Send extver= to set a particular version, which will
be represented in the header with keyname EXTVER. The extver must
be an integer > 0. If extver is not sent, the first one will be
selected. If ext is an integer, the extver is ignored.
units: list/dec, optional:
A list of strings with units for each column.
header: FITSHDR, list, dict, optional
A set of header keys to write. The keys are written before the data
is written to the table, preventing a resizing of the table area.
Can be one of these:
- FITSHDR object
- list of dictionaries containing 'name','value' and optionally
a 'comment' field; the order is preserved.
- a dictionary of keyword-value pairs; no comments are written
in this case, and the order is arbitrary.
Note required keywords such as NAXIS, XTENSION, etc are cleaed out.
write_bitcols: boolean, optional
Write boolean arrays in the FITS bitcols format, default False
restrictions
------------
The File must be opened READWRITE | [
"Create",
"a",
"new",
"table",
"extension",
"and",
"write",
"the",
"data",
"."
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/fitslib.py#L782-L853 |
2,174 | esheldon/fitsio | fitsio/fitslib.py | FITS.create_table_hdu | def create_table_hdu(self, data=None, dtype=None,
header=None,
names=None, formats=None,
units=None, dims=None, extname=None, extver=None,
table_type='binary', write_bitcols=False):
"""
Create a new, empty table extension and reload the hdu list.
There are three ways to do it:
1) send a numpy dtype, from which the formats in the fits file will
be determined.
2) Send an array in data= keyword. this is required if you have
object fields for writing to variable length columns.
3) send the names,formats and dims yourself
You can then write data into the new extension using
fits[extension].write(array)
If you want to write to a single column
fits[extension].write_column(array)
But be careful as the other columns will be left zeroed.
Often you will instead just use write_table to do this all
atomically.
fits.write_table(recarray)
write_table will create the new table extension for you with the
appropriate fields.
parameters
----------
dtype: numpy dtype or descriptor, optional
If you have an array with fields, you can just send arr.dtype. You
can also use a list of tuples, e.g. [('x','f8'),('index','i4')] or
a dictionary representation.
data: a numpy array with fields, optional
or a dictionary
An array or dict from which to determine the table definition. You
must use this instead of sending a descriptor if you have object
array fields, as this is the only way to determine the type and max
size.
names: list of strings, optional
The list of field names
formats: list of strings, optional
The TFORM format strings for each field.
dims: list of strings, optional
An optional list of dimension strings for each field. Should
match the repeat count for the formats fields. Be careful of
the order since FITS is more like fortran. See the descr2tabledef
function.
table_type: string, optional
Either 'binary' or 'ascii', default 'binary'
Matching is case-insensitive
units: list of strings, optional
An optional list of unit strings for each field.
extname: string, optional
An optional extension name.
extver: integer, optional
FITS allows multiple extensions to have the same name (extname).
These extensions can optionally specify an EXTVER version number in
the header. Send extver= to set a particular version, which will
be represented in the header with keyname EXTVER. The extver must
be an integer > 0. If extver is not sent, the first one will be
selected. If ext is an integer, the extver is ignored.
write_bitcols: bool, optional
Write boolean arrays in the FITS bitcols format, default False
header: FITSHDR, list, dict, optional
This is only used to determine how many slots to reserve for
header keywords
restrictions
------------
The File must be opened READWRITE
"""
# record this for the TableHDU object
self.keys['write_bitcols'] = write_bitcols
# can leave as turn
table_type_int = _extract_table_type(table_type)
if data is not None:
if isinstance(data, numpy.ndarray):
names, formats, dims = array2tabledef(
data, table_type=table_type, write_bitcols=write_bitcols)
elif isinstance(data, (list, dict)):
names, formats, dims = collection2tabledef(
data, names=names, table_type=table_type,
write_bitcols=write_bitcols)
else:
raise ValueError(
"data must be an ndarray with fields or a dict")
elif dtype is not None:
dtype = numpy.dtype(dtype)
names, formats, dims = descr2tabledef(
dtype.
descr,
write_bitcols=write_bitcols,
table_type=table_type,
)
else:
if names is None or formats is None:
raise ValueError(
"send either dtype=, data=, or names= and formats=")
if not isinstance(names, list) or not isinstance(formats, list):
raise ValueError("names and formats should be lists")
if len(names) != len(formats):
raise ValueError("names and formats must be same length")
if dims is not None:
if not isinstance(dims, list):
raise ValueError("dims should be a list")
if len(dims) != len(names):
raise ValueError("names and dims must be same length")
if units is not None:
if not isinstance(units, list):
raise ValueError("units should be a list")
if len(units) != len(names):
raise ValueError("names and units must be same length")
if extname is None:
# will be ignored
extname = ""
else:
if not isstring(extname):
raise ValueError("extension name must be a string")
extname = mks(extname)
if extname is not None and extver is not None:
extver = check_extver(extver)
if extver is None:
# will be ignored
extver = 0
if extname is None:
# will be ignored
extname = ""
if header is not None:
nkeys = len(header)
else:
nkeys = 0
# note we can create extname in the c code for tables, but not images
self._FITS.create_table_hdu(table_type_int, nkeys,
names, formats, tunit=units, tdim=dims,
extname=extname, extver=extver)
# don't rebuild the whole list unless this is the first hdu
# to be created
self.update_hdu_list(rebuild=False) | python | def create_table_hdu(self, data=None, dtype=None,
header=None,
names=None, formats=None,
units=None, dims=None, extname=None, extver=None,
table_type='binary', write_bitcols=False):
"""
Create a new, empty table extension and reload the hdu list.
There are three ways to do it:
1) send a numpy dtype, from which the formats in the fits file will
be determined.
2) Send an array in data= keyword. this is required if you have
object fields for writing to variable length columns.
3) send the names,formats and dims yourself
You can then write data into the new extension using
fits[extension].write(array)
If you want to write to a single column
fits[extension].write_column(array)
But be careful as the other columns will be left zeroed.
Often you will instead just use write_table to do this all
atomically.
fits.write_table(recarray)
write_table will create the new table extension for you with the
appropriate fields.
parameters
----------
dtype: numpy dtype or descriptor, optional
If you have an array with fields, you can just send arr.dtype. You
can also use a list of tuples, e.g. [('x','f8'),('index','i4')] or
a dictionary representation.
data: a numpy array with fields, optional
or a dictionary
An array or dict from which to determine the table definition. You
must use this instead of sending a descriptor if you have object
array fields, as this is the only way to determine the type and max
size.
names: list of strings, optional
The list of field names
formats: list of strings, optional
The TFORM format strings for each field.
dims: list of strings, optional
An optional list of dimension strings for each field. Should
match the repeat count for the formats fields. Be careful of
the order since FITS is more like fortran. See the descr2tabledef
function.
table_type: string, optional
Either 'binary' or 'ascii', default 'binary'
Matching is case-insensitive
units: list of strings, optional
An optional list of unit strings for each field.
extname: string, optional
An optional extension name.
extver: integer, optional
FITS allows multiple extensions to have the same name (extname).
These extensions can optionally specify an EXTVER version number in
the header. Send extver= to set a particular version, which will
be represented in the header with keyname EXTVER. The extver must
be an integer > 0. If extver is not sent, the first one will be
selected. If ext is an integer, the extver is ignored.
write_bitcols: bool, optional
Write boolean arrays in the FITS bitcols format, default False
header: FITSHDR, list, dict, optional
This is only used to determine how many slots to reserve for
header keywords
restrictions
------------
The File must be opened READWRITE
"""
# record this for the TableHDU object
self.keys['write_bitcols'] = write_bitcols
# can leave as turn
table_type_int = _extract_table_type(table_type)
if data is not None:
if isinstance(data, numpy.ndarray):
names, formats, dims = array2tabledef(
data, table_type=table_type, write_bitcols=write_bitcols)
elif isinstance(data, (list, dict)):
names, formats, dims = collection2tabledef(
data, names=names, table_type=table_type,
write_bitcols=write_bitcols)
else:
raise ValueError(
"data must be an ndarray with fields or a dict")
elif dtype is not None:
dtype = numpy.dtype(dtype)
names, formats, dims = descr2tabledef(
dtype.
descr,
write_bitcols=write_bitcols,
table_type=table_type,
)
else:
if names is None or formats is None:
raise ValueError(
"send either dtype=, data=, or names= and formats=")
if not isinstance(names, list) or not isinstance(formats, list):
raise ValueError("names and formats should be lists")
if len(names) != len(formats):
raise ValueError("names and formats must be same length")
if dims is not None:
if not isinstance(dims, list):
raise ValueError("dims should be a list")
if len(dims) != len(names):
raise ValueError("names and dims must be same length")
if units is not None:
if not isinstance(units, list):
raise ValueError("units should be a list")
if len(units) != len(names):
raise ValueError("names and units must be same length")
if extname is None:
# will be ignored
extname = ""
else:
if not isstring(extname):
raise ValueError("extension name must be a string")
extname = mks(extname)
if extname is not None and extver is not None:
extver = check_extver(extver)
if extver is None:
# will be ignored
extver = 0
if extname is None:
# will be ignored
extname = ""
if header is not None:
nkeys = len(header)
else:
nkeys = 0
# note we can create extname in the c code for tables, but not images
self._FITS.create_table_hdu(table_type_int, nkeys,
names, formats, tunit=units, tdim=dims,
extname=extname, extver=extver)
# don't rebuild the whole list unless this is the first hdu
# to be created
self.update_hdu_list(rebuild=False) | [
"def",
"create_table_hdu",
"(",
"self",
",",
"data",
"=",
"None",
",",
"dtype",
"=",
"None",
",",
"header",
"=",
"None",
",",
"names",
"=",
"None",
",",
"formats",
"=",
"None",
",",
"units",
"=",
"None",
",",
"dims",
"=",
"None",
",",
"extname",
"=",
"None",
",",
"extver",
"=",
"None",
",",
"table_type",
"=",
"'binary'",
",",
"write_bitcols",
"=",
"False",
")",
":",
"# record this for the TableHDU object",
"self",
".",
"keys",
"[",
"'write_bitcols'",
"]",
"=",
"write_bitcols",
"# can leave as turn",
"table_type_int",
"=",
"_extract_table_type",
"(",
"table_type",
")",
"if",
"data",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"data",
",",
"numpy",
".",
"ndarray",
")",
":",
"names",
",",
"formats",
",",
"dims",
"=",
"array2tabledef",
"(",
"data",
",",
"table_type",
"=",
"table_type",
",",
"write_bitcols",
"=",
"write_bitcols",
")",
"elif",
"isinstance",
"(",
"data",
",",
"(",
"list",
",",
"dict",
")",
")",
":",
"names",
",",
"formats",
",",
"dims",
"=",
"collection2tabledef",
"(",
"data",
",",
"names",
"=",
"names",
",",
"table_type",
"=",
"table_type",
",",
"write_bitcols",
"=",
"write_bitcols",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"data must be an ndarray with fields or a dict\"",
")",
"elif",
"dtype",
"is",
"not",
"None",
":",
"dtype",
"=",
"numpy",
".",
"dtype",
"(",
"dtype",
")",
"names",
",",
"formats",
",",
"dims",
"=",
"descr2tabledef",
"(",
"dtype",
".",
"descr",
",",
"write_bitcols",
"=",
"write_bitcols",
",",
"table_type",
"=",
"table_type",
",",
")",
"else",
":",
"if",
"names",
"is",
"None",
"or",
"formats",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"send either dtype=, data=, or names= and formats=\"",
")",
"if",
"not",
"isinstance",
"(",
"names",
",",
"list",
")",
"or",
"not",
"isinstance",
"(",
"formats",
",",
"list",
")",
":",
"raise",
"ValueError",
"(",
"\"names and formats should be lists\"",
")",
"if",
"len",
"(",
"names",
")",
"!=",
"len",
"(",
"formats",
")",
":",
"raise",
"ValueError",
"(",
"\"names and formats must be same length\"",
")",
"if",
"dims",
"is",
"not",
"None",
":",
"if",
"not",
"isinstance",
"(",
"dims",
",",
"list",
")",
":",
"raise",
"ValueError",
"(",
"\"dims should be a list\"",
")",
"if",
"len",
"(",
"dims",
")",
"!=",
"len",
"(",
"names",
")",
":",
"raise",
"ValueError",
"(",
"\"names and dims must be same length\"",
")",
"if",
"units",
"is",
"not",
"None",
":",
"if",
"not",
"isinstance",
"(",
"units",
",",
"list",
")",
":",
"raise",
"ValueError",
"(",
"\"units should be a list\"",
")",
"if",
"len",
"(",
"units",
")",
"!=",
"len",
"(",
"names",
")",
":",
"raise",
"ValueError",
"(",
"\"names and units must be same length\"",
")",
"if",
"extname",
"is",
"None",
":",
"# will be ignored",
"extname",
"=",
"\"\"",
"else",
":",
"if",
"not",
"isstring",
"(",
"extname",
")",
":",
"raise",
"ValueError",
"(",
"\"extension name must be a string\"",
")",
"extname",
"=",
"mks",
"(",
"extname",
")",
"if",
"extname",
"is",
"not",
"None",
"and",
"extver",
"is",
"not",
"None",
":",
"extver",
"=",
"check_extver",
"(",
"extver",
")",
"if",
"extver",
"is",
"None",
":",
"# will be ignored",
"extver",
"=",
"0",
"if",
"extname",
"is",
"None",
":",
"# will be ignored",
"extname",
"=",
"\"\"",
"if",
"header",
"is",
"not",
"None",
":",
"nkeys",
"=",
"len",
"(",
"header",
")",
"else",
":",
"nkeys",
"=",
"0",
"# note we can create extname in the c code for tables, but not images",
"self",
".",
"_FITS",
".",
"create_table_hdu",
"(",
"table_type_int",
",",
"nkeys",
",",
"names",
",",
"formats",
",",
"tunit",
"=",
"units",
",",
"tdim",
"=",
"dims",
",",
"extname",
"=",
"extname",
",",
"extver",
"=",
"extver",
")",
"# don't rebuild the whole list unless this is the first hdu",
"# to be created",
"self",
".",
"update_hdu_list",
"(",
"rebuild",
"=",
"False",
")"
] | Create a new, empty table extension and reload the hdu list.
There are three ways to do it:
1) send a numpy dtype, from which the formats in the fits file will
be determined.
2) Send an array in data= keyword. this is required if you have
object fields for writing to variable length columns.
3) send the names,formats and dims yourself
You can then write data into the new extension using
fits[extension].write(array)
If you want to write to a single column
fits[extension].write_column(array)
But be careful as the other columns will be left zeroed.
Often you will instead just use write_table to do this all
atomically.
fits.write_table(recarray)
write_table will create the new table extension for you with the
appropriate fields.
parameters
----------
dtype: numpy dtype or descriptor, optional
If you have an array with fields, you can just send arr.dtype. You
can also use a list of tuples, e.g. [('x','f8'),('index','i4')] or
a dictionary representation.
data: a numpy array with fields, optional
or a dictionary
An array or dict from which to determine the table definition. You
must use this instead of sending a descriptor if you have object
array fields, as this is the only way to determine the type and max
size.
names: list of strings, optional
The list of field names
formats: list of strings, optional
The TFORM format strings for each field.
dims: list of strings, optional
An optional list of dimension strings for each field. Should
match the repeat count for the formats fields. Be careful of
the order since FITS is more like fortran. See the descr2tabledef
function.
table_type: string, optional
Either 'binary' or 'ascii', default 'binary'
Matching is case-insensitive
units: list of strings, optional
An optional list of unit strings for each field.
extname: string, optional
An optional extension name.
extver: integer, optional
FITS allows multiple extensions to have the same name (extname).
These extensions can optionally specify an EXTVER version number in
the header. Send extver= to set a particular version, which will
be represented in the header with keyname EXTVER. The extver must
be an integer > 0. If extver is not sent, the first one will be
selected. If ext is an integer, the extver is ignored.
write_bitcols: bool, optional
Write boolean arrays in the FITS bitcols format, default False
header: FITSHDR, list, dict, optional
This is only used to determine how many slots to reserve for
header keywords
restrictions
------------
The File must be opened READWRITE | [
"Create",
"a",
"new",
"empty",
"table",
"extension",
"and",
"reload",
"the",
"hdu",
"list",
"."
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/fitslib.py#L861-L1017 |
2,175 | esheldon/fitsio | fitsio/fitslib.py | FITS.update_hdu_list | def update_hdu_list(self, rebuild=True):
"""
Force an update of the entire HDU list
Normally you don't need to call this method directly
if rebuild is false or the hdu_list is not yet set, the list is
rebuilt from scratch
"""
if not hasattr(self, 'hdu_list'):
rebuild = True
if rebuild:
self.hdu_list = []
self.hdu_map = {}
# we don't know how many hdus there are, so iterate
# until we can't open any more
ext_start = 0
else:
# start from last
ext_start = len(self)
ext = ext_start
while True:
try:
self._append_hdu_info(ext)
except IOError:
break
except RuntimeError:
break
ext = ext + 1 | python | def update_hdu_list(self, rebuild=True):
"""
Force an update of the entire HDU list
Normally you don't need to call this method directly
if rebuild is false or the hdu_list is not yet set, the list is
rebuilt from scratch
"""
if not hasattr(self, 'hdu_list'):
rebuild = True
if rebuild:
self.hdu_list = []
self.hdu_map = {}
# we don't know how many hdus there are, so iterate
# until we can't open any more
ext_start = 0
else:
# start from last
ext_start = len(self)
ext = ext_start
while True:
try:
self._append_hdu_info(ext)
except IOError:
break
except RuntimeError:
break
ext = ext + 1 | [
"def",
"update_hdu_list",
"(",
"self",
",",
"rebuild",
"=",
"True",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'hdu_list'",
")",
":",
"rebuild",
"=",
"True",
"if",
"rebuild",
":",
"self",
".",
"hdu_list",
"=",
"[",
"]",
"self",
".",
"hdu_map",
"=",
"{",
"}",
"# we don't know how many hdus there are, so iterate",
"# until we can't open any more",
"ext_start",
"=",
"0",
"else",
":",
"# start from last",
"ext_start",
"=",
"len",
"(",
"self",
")",
"ext",
"=",
"ext_start",
"while",
"True",
":",
"try",
":",
"self",
".",
"_append_hdu_info",
"(",
"ext",
")",
"except",
"IOError",
":",
"break",
"except",
"RuntimeError",
":",
"break",
"ext",
"=",
"ext",
"+",
"1"
] | Force an update of the entire HDU list
Normally you don't need to call this method directly
if rebuild is false or the hdu_list is not yet set, the list is
rebuilt from scratch | [
"Force",
"an",
"update",
"of",
"the",
"entire",
"HDU",
"list"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/fitslib.py#L1019-L1051 |
2,176 | esheldon/fitsio | fitsio/fitslib.py | FITS.next | def next(self):
"""
Move to the next iteration
"""
if self._iter_index == len(self.hdu_list):
raise StopIteration
hdu = self.hdu_list[self._iter_index]
self._iter_index += 1
return hdu | python | def next(self):
"""
Move to the next iteration
"""
if self._iter_index == len(self.hdu_list):
raise StopIteration
hdu = self.hdu_list[self._iter_index]
self._iter_index += 1
return hdu | [
"def",
"next",
"(",
"self",
")",
":",
"if",
"self",
".",
"_iter_index",
"==",
"len",
"(",
"self",
".",
"hdu_list",
")",
":",
"raise",
"StopIteration",
"hdu",
"=",
"self",
".",
"hdu_list",
"[",
"self",
".",
"_iter_index",
"]",
"self",
".",
"_iter_index",
"+=",
"1",
"return",
"hdu"
] | Move to the next iteration | [
"Move",
"to",
"the",
"next",
"iteration"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/fitslib.py#L1101-L1109 |
2,177 | esheldon/fitsio | fitsio/fitslib.py | FITS._extract_item | def _extract_item(self, item):
"""
utility function to extract an "item", meaning
a extension number,name plus version.
"""
ver = 0
if isinstance(item, tuple):
ver_sent = True
nitem = len(item)
if nitem == 1:
ext = item[0]
elif nitem == 2:
ext, ver = item
else:
ver_sent = False
ext = item
return ext, ver, ver_sent | python | def _extract_item(self, item):
"""
utility function to extract an "item", meaning
a extension number,name plus version.
"""
ver = 0
if isinstance(item, tuple):
ver_sent = True
nitem = len(item)
if nitem == 1:
ext = item[0]
elif nitem == 2:
ext, ver = item
else:
ver_sent = False
ext = item
return ext, ver, ver_sent | [
"def",
"_extract_item",
"(",
"self",
",",
"item",
")",
":",
"ver",
"=",
"0",
"if",
"isinstance",
"(",
"item",
",",
"tuple",
")",
":",
"ver_sent",
"=",
"True",
"nitem",
"=",
"len",
"(",
"item",
")",
"if",
"nitem",
"==",
"1",
":",
"ext",
"=",
"item",
"[",
"0",
"]",
"elif",
"nitem",
"==",
"2",
":",
"ext",
",",
"ver",
"=",
"item",
"else",
":",
"ver_sent",
"=",
"False",
"ext",
"=",
"item",
"return",
"ext",
",",
"ver",
",",
"ver_sent"
] | utility function to extract an "item", meaning
a extension number,name plus version. | [
"utility",
"function",
"to",
"extract",
"an",
"item",
"meaning",
"a",
"extension",
"number",
"name",
"plus",
"version",
"."
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/fitslib.py#L1121-L1137 |
2,178 | esheldon/fitsio | fitsio/hdu/image.py | ImageHDU._update_info | def _update_info(self):
"""
Call parent method and make sure this is in fact a
image HDU. Set dims in C order
"""
super(ImageHDU, self)._update_info()
if self._info['hdutype'] != IMAGE_HDU:
mess = "Extension %s is not a Image HDU" % self.ext
raise ValueError(mess)
# convert to c order
if 'dims' in self._info:
self._info['dims'] = list(reversed(self._info['dims'])) | python | def _update_info(self):
"""
Call parent method and make sure this is in fact a
image HDU. Set dims in C order
"""
super(ImageHDU, self)._update_info()
if self._info['hdutype'] != IMAGE_HDU:
mess = "Extension %s is not a Image HDU" % self.ext
raise ValueError(mess)
# convert to c order
if 'dims' in self._info:
self._info['dims'] = list(reversed(self._info['dims'])) | [
"def",
"_update_info",
"(",
"self",
")",
":",
"super",
"(",
"ImageHDU",
",",
"self",
")",
".",
"_update_info",
"(",
")",
"if",
"self",
".",
"_info",
"[",
"'hdutype'",
"]",
"!=",
"IMAGE_HDU",
":",
"mess",
"=",
"\"Extension %s is not a Image HDU\"",
"%",
"self",
".",
"ext",
"raise",
"ValueError",
"(",
"mess",
")",
"# convert to c order",
"if",
"'dims'",
"in",
"self",
".",
"_info",
":",
"self",
".",
"_info",
"[",
"'dims'",
"]",
"=",
"list",
"(",
"reversed",
"(",
"self",
".",
"_info",
"[",
"'dims'",
"]",
")",
")"
] | Call parent method and make sure this is in fact a
image HDU. Set dims in C order | [
"Call",
"parent",
"method",
"and",
"make",
"sure",
"this",
"is",
"in",
"fact",
"a",
"image",
"HDU",
".",
"Set",
"dims",
"in",
"C",
"order"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/image.py#L37-L50 |
2,179 | esheldon/fitsio | fitsio/hdu/image.py | ImageHDU.reshape | def reshape(self, dims):
"""
reshape an existing image to the requested dimensions
parameters
----------
dims: sequence
Any sequence convertible to i8
"""
adims = numpy.array(dims, ndmin=1, dtype='i8')
self._FITS.reshape_image(self._ext+1, adims) | python | def reshape(self, dims):
"""
reshape an existing image to the requested dimensions
parameters
----------
dims: sequence
Any sequence convertible to i8
"""
adims = numpy.array(dims, ndmin=1, dtype='i8')
self._FITS.reshape_image(self._ext+1, adims) | [
"def",
"reshape",
"(",
"self",
",",
"dims",
")",
":",
"adims",
"=",
"numpy",
".",
"array",
"(",
"dims",
",",
"ndmin",
"=",
"1",
",",
"dtype",
"=",
"'i8'",
")",
"self",
".",
"_FITS",
".",
"reshape_image",
"(",
"self",
".",
"_ext",
"+",
"1",
",",
"adims",
")"
] | reshape an existing image to the requested dimensions
parameters
----------
dims: sequence
Any sequence convertible to i8 | [
"reshape",
"an",
"existing",
"image",
"to",
"the",
"requested",
"dimensions"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/image.py#L91-L102 |
2,180 | esheldon/fitsio | fitsio/hdu/image.py | ImageHDU.write | def write(self, img, start=0, **keys):
"""
Write the image into this HDU
If data already exist in this HDU, they will be overwritten. If the
image to write is larger than the image on disk, or if the start
position is such that the write would extend beyond the existing
dimensions, the on-disk image is expanded.
parameters
----------
img: ndarray
A simple numpy ndarray
start: integer or sequence
Where to start writing data. Can be an integer offset
into the entire array, or a sequence determining where
in N-dimensional space to start.
"""
dims = self.get_dims()
if img.dtype.fields is not None:
raise ValueError("got recarray, expected regular ndarray")
if img.size == 0:
raise ValueError("data must have at least 1 row")
# data must be c-contiguous and native byte order
if not img.flags['C_CONTIGUOUS']:
# this always makes a copy
img_send = numpy.ascontiguousarray(img)
array_to_native(img_send, inplace=True)
else:
img_send = array_to_native(img, inplace=False)
if IS_PY3 and img_send.dtype.char == 'U':
# for python3, we convert unicode to ascii
# this will error if the character is not in ascii
img_send = img_send.astype('S', copy=False)
if not numpy.isscalar(start):
# convert to scalar offset
# note we use the on-disk data type to get itemsize
offset = _convert_full_start_to_offset(dims, start)
else:
offset = start
# see if we need to resize the image
if self.has_data():
self._expand_if_needed(dims, img.shape, start, offset)
self._FITS.write_image(self._ext+1, img_send, offset+1)
self._update_info() | python | def write(self, img, start=0, **keys):
"""
Write the image into this HDU
If data already exist in this HDU, they will be overwritten. If the
image to write is larger than the image on disk, or if the start
position is such that the write would extend beyond the existing
dimensions, the on-disk image is expanded.
parameters
----------
img: ndarray
A simple numpy ndarray
start: integer or sequence
Where to start writing data. Can be an integer offset
into the entire array, or a sequence determining where
in N-dimensional space to start.
"""
dims = self.get_dims()
if img.dtype.fields is not None:
raise ValueError("got recarray, expected regular ndarray")
if img.size == 0:
raise ValueError("data must have at least 1 row")
# data must be c-contiguous and native byte order
if not img.flags['C_CONTIGUOUS']:
# this always makes a copy
img_send = numpy.ascontiguousarray(img)
array_to_native(img_send, inplace=True)
else:
img_send = array_to_native(img, inplace=False)
if IS_PY3 and img_send.dtype.char == 'U':
# for python3, we convert unicode to ascii
# this will error if the character is not in ascii
img_send = img_send.astype('S', copy=False)
if not numpy.isscalar(start):
# convert to scalar offset
# note we use the on-disk data type to get itemsize
offset = _convert_full_start_to_offset(dims, start)
else:
offset = start
# see if we need to resize the image
if self.has_data():
self._expand_if_needed(dims, img.shape, start, offset)
self._FITS.write_image(self._ext+1, img_send, offset+1)
self._update_info() | [
"def",
"write",
"(",
"self",
",",
"img",
",",
"start",
"=",
"0",
",",
"*",
"*",
"keys",
")",
":",
"dims",
"=",
"self",
".",
"get_dims",
"(",
")",
"if",
"img",
".",
"dtype",
".",
"fields",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"got recarray, expected regular ndarray\"",
")",
"if",
"img",
".",
"size",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"\"data must have at least 1 row\"",
")",
"# data must be c-contiguous and native byte order",
"if",
"not",
"img",
".",
"flags",
"[",
"'C_CONTIGUOUS'",
"]",
":",
"# this always makes a copy",
"img_send",
"=",
"numpy",
".",
"ascontiguousarray",
"(",
"img",
")",
"array_to_native",
"(",
"img_send",
",",
"inplace",
"=",
"True",
")",
"else",
":",
"img_send",
"=",
"array_to_native",
"(",
"img",
",",
"inplace",
"=",
"False",
")",
"if",
"IS_PY3",
"and",
"img_send",
".",
"dtype",
".",
"char",
"==",
"'U'",
":",
"# for python3, we convert unicode to ascii",
"# this will error if the character is not in ascii",
"img_send",
"=",
"img_send",
".",
"astype",
"(",
"'S'",
",",
"copy",
"=",
"False",
")",
"if",
"not",
"numpy",
".",
"isscalar",
"(",
"start",
")",
":",
"# convert to scalar offset",
"# note we use the on-disk data type to get itemsize",
"offset",
"=",
"_convert_full_start_to_offset",
"(",
"dims",
",",
"start",
")",
"else",
":",
"offset",
"=",
"start",
"# see if we need to resize the image",
"if",
"self",
".",
"has_data",
"(",
")",
":",
"self",
".",
"_expand_if_needed",
"(",
"dims",
",",
"img",
".",
"shape",
",",
"start",
",",
"offset",
")",
"self",
".",
"_FITS",
".",
"write_image",
"(",
"self",
".",
"_ext",
"+",
"1",
",",
"img_send",
",",
"offset",
"+",
"1",
")",
"self",
".",
"_update_info",
"(",
")"
] | Write the image into this HDU
If data already exist in this HDU, they will be overwritten. If the
image to write is larger than the image on disk, or if the start
position is such that the write would extend beyond the existing
dimensions, the on-disk image is expanded.
parameters
----------
img: ndarray
A simple numpy ndarray
start: integer or sequence
Where to start writing data. Can be an integer offset
into the entire array, or a sequence determining where
in N-dimensional space to start. | [
"Write",
"the",
"image",
"into",
"this",
"HDU"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/image.py#L104-L156 |
2,181 | esheldon/fitsio | fitsio/hdu/image.py | ImageHDU.read | def read(self, **keys):
"""
Read the image.
If the HDU is an IMAGE_HDU, read the corresponding image. Compression
and scaling are dealt with properly.
"""
if not self.has_data():
return None
dtype, shape = self._get_dtype_and_shape()
array = numpy.zeros(shape, dtype=dtype)
self._FITS.read_image(self._ext+1, array)
return array | python | def read(self, **keys):
"""
Read the image.
If the HDU is an IMAGE_HDU, read the corresponding image. Compression
and scaling are dealt with properly.
"""
if not self.has_data():
return None
dtype, shape = self._get_dtype_and_shape()
array = numpy.zeros(shape, dtype=dtype)
self._FITS.read_image(self._ext+1, array)
return array | [
"def",
"read",
"(",
"self",
",",
"*",
"*",
"keys",
")",
":",
"if",
"not",
"self",
".",
"has_data",
"(",
")",
":",
"return",
"None",
"dtype",
",",
"shape",
"=",
"self",
".",
"_get_dtype_and_shape",
"(",
")",
"array",
"=",
"numpy",
".",
"zeros",
"(",
"shape",
",",
"dtype",
"=",
"dtype",
")",
"self",
".",
"_FITS",
".",
"read_image",
"(",
"self",
".",
"_ext",
"+",
"1",
",",
"array",
")",
"return",
"array"
] | Read the image.
If the HDU is an IMAGE_HDU, read the corresponding image. Compression
and scaling are dealt with properly. | [
"Read",
"the",
"image",
"."
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/image.py#L158-L171 |
2,182 | esheldon/fitsio | fitsio/hdu/image.py | ImageHDU._get_dtype_and_shape | def _get_dtype_and_shape(self):
"""
Get the numpy dtype and shape for image
"""
npy_dtype = self._get_image_numpy_dtype()
if self._info['ndims'] != 0:
shape = self._info['dims']
else:
raise IOError("no image present in HDU")
return npy_dtype, shape | python | def _get_dtype_and_shape(self):
"""
Get the numpy dtype and shape for image
"""
npy_dtype = self._get_image_numpy_dtype()
if self._info['ndims'] != 0:
shape = self._info['dims']
else:
raise IOError("no image present in HDU")
return npy_dtype, shape | [
"def",
"_get_dtype_and_shape",
"(",
"self",
")",
":",
"npy_dtype",
"=",
"self",
".",
"_get_image_numpy_dtype",
"(",
")",
"if",
"self",
".",
"_info",
"[",
"'ndims'",
"]",
"!=",
"0",
":",
"shape",
"=",
"self",
".",
"_info",
"[",
"'dims'",
"]",
"else",
":",
"raise",
"IOError",
"(",
"\"no image present in HDU\"",
")",
"return",
"npy_dtype",
",",
"shape"
] | Get the numpy dtype and shape for image | [
"Get",
"the",
"numpy",
"dtype",
"and",
"shape",
"for",
"image"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/image.py#L173-L184 |
2,183 | esheldon/fitsio | fitsio/hdu/image.py | ImageHDU._get_image_numpy_dtype | def _get_image_numpy_dtype(self):
"""
Get the numpy dtype for the image
"""
try:
ftype = self._info['img_equiv_type']
npy_type = _image_bitpix2npy[ftype]
except KeyError:
raise KeyError("unsupported fits data type: %d" % ftype)
return npy_type | python | def _get_image_numpy_dtype(self):
"""
Get the numpy dtype for the image
"""
try:
ftype = self._info['img_equiv_type']
npy_type = _image_bitpix2npy[ftype]
except KeyError:
raise KeyError("unsupported fits data type: %d" % ftype)
return npy_type | [
"def",
"_get_image_numpy_dtype",
"(",
"self",
")",
":",
"try",
":",
"ftype",
"=",
"self",
".",
"_info",
"[",
"'img_equiv_type'",
"]",
"npy_type",
"=",
"_image_bitpix2npy",
"[",
"ftype",
"]",
"except",
"KeyError",
":",
"raise",
"KeyError",
"(",
"\"unsupported fits data type: %d\"",
"%",
"ftype",
")",
"return",
"npy_type"
] | Get the numpy dtype for the image | [
"Get",
"the",
"numpy",
"dtype",
"for",
"the",
"image"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/image.py#L186-L196 |
2,184 | esheldon/fitsio | fitsio/hdu/image.py | ImageHDU._read_image_slice | def _read_image_slice(self, arg):
"""
workhorse to read a slice
"""
if 'ndims' not in self._info:
raise ValueError("Attempt to slice empty extension")
if isinstance(arg, slice):
# one-dimensional, e.g. 2:20
return self._read_image_slice((arg,))
if not isinstance(arg, tuple):
raise ValueError("arguments must be slices, one for each "
"dimension, e.g. [2:5] or [2:5,8:25] etc.")
# should be a tuple of slices, one for each dimension
# e.g. [2:3, 8:100]
nd = len(arg)
if nd != self._info['ndims']:
raise ValueError("Got slice dimensions %d, "
"expected %d" % (nd, self._info['ndims']))
targ = arg
arg = []
for a in targ:
if isinstance(a, slice):
arg.append(a)
elif isinstance(a, int):
arg.append(slice(a, a+1, 1))
else:
raise ValueError("arguments must be slices, e.g. 2:12")
dims = self._info['dims']
arrdims = []
first = []
last = []
steps = []
# check the args and reverse dimensions since
# fits is backwards from numpy
dim = 0
for slc in arg:
start = slc.start
stop = slc.stop
step = slc.step
if start is None:
start = 0
if stop is None:
stop = dims[dim]
if step is None:
step = 1
if step < 1:
raise ValueError("slice steps must be >= 1")
if start < 0:
start = dims[dim] + start
if start < 0:
raise IndexError("Index out of bounds")
if stop < 0:
stop = dims[dim] + start + 1
# move to 1-offset
start = start + 1
if stop < start:
raise ValueError("python slices but include at least one "
"element, got %s" % slc)
if stop > dims[dim]:
stop = dims[dim]
first.append(start)
last.append(stop)
steps.append(step)
arrdims.append(stop-start+1)
dim += 1
first.reverse()
last.reverse()
steps.reverse()
first = numpy.array(first, dtype='i8')
last = numpy.array(last, dtype='i8')
steps = numpy.array(steps, dtype='i8')
npy_dtype = self._get_image_numpy_dtype()
array = numpy.zeros(arrdims, dtype=npy_dtype)
self._FITS.read_image_slice(self._ext+1, first, last, steps, array)
return array | python | def _read_image_slice(self, arg):
"""
workhorse to read a slice
"""
if 'ndims' not in self._info:
raise ValueError("Attempt to slice empty extension")
if isinstance(arg, slice):
# one-dimensional, e.g. 2:20
return self._read_image_slice((arg,))
if not isinstance(arg, tuple):
raise ValueError("arguments must be slices, one for each "
"dimension, e.g. [2:5] or [2:5,8:25] etc.")
# should be a tuple of slices, one for each dimension
# e.g. [2:3, 8:100]
nd = len(arg)
if nd != self._info['ndims']:
raise ValueError("Got slice dimensions %d, "
"expected %d" % (nd, self._info['ndims']))
targ = arg
arg = []
for a in targ:
if isinstance(a, slice):
arg.append(a)
elif isinstance(a, int):
arg.append(slice(a, a+1, 1))
else:
raise ValueError("arguments must be slices, e.g. 2:12")
dims = self._info['dims']
arrdims = []
first = []
last = []
steps = []
# check the args and reverse dimensions since
# fits is backwards from numpy
dim = 0
for slc in arg:
start = slc.start
stop = slc.stop
step = slc.step
if start is None:
start = 0
if stop is None:
stop = dims[dim]
if step is None:
step = 1
if step < 1:
raise ValueError("slice steps must be >= 1")
if start < 0:
start = dims[dim] + start
if start < 0:
raise IndexError("Index out of bounds")
if stop < 0:
stop = dims[dim] + start + 1
# move to 1-offset
start = start + 1
if stop < start:
raise ValueError("python slices but include at least one "
"element, got %s" % slc)
if stop > dims[dim]:
stop = dims[dim]
first.append(start)
last.append(stop)
steps.append(step)
arrdims.append(stop-start+1)
dim += 1
first.reverse()
last.reverse()
steps.reverse()
first = numpy.array(first, dtype='i8')
last = numpy.array(last, dtype='i8')
steps = numpy.array(steps, dtype='i8')
npy_dtype = self._get_image_numpy_dtype()
array = numpy.zeros(arrdims, dtype=npy_dtype)
self._FITS.read_image_slice(self._ext+1, first, last, steps, array)
return array | [
"def",
"_read_image_slice",
"(",
"self",
",",
"arg",
")",
":",
"if",
"'ndims'",
"not",
"in",
"self",
".",
"_info",
":",
"raise",
"ValueError",
"(",
"\"Attempt to slice empty extension\"",
")",
"if",
"isinstance",
"(",
"arg",
",",
"slice",
")",
":",
"# one-dimensional, e.g. 2:20",
"return",
"self",
".",
"_read_image_slice",
"(",
"(",
"arg",
",",
")",
")",
"if",
"not",
"isinstance",
"(",
"arg",
",",
"tuple",
")",
":",
"raise",
"ValueError",
"(",
"\"arguments must be slices, one for each \"",
"\"dimension, e.g. [2:5] or [2:5,8:25] etc.\"",
")",
"# should be a tuple of slices, one for each dimension",
"# e.g. [2:3, 8:100]",
"nd",
"=",
"len",
"(",
"arg",
")",
"if",
"nd",
"!=",
"self",
".",
"_info",
"[",
"'ndims'",
"]",
":",
"raise",
"ValueError",
"(",
"\"Got slice dimensions %d, \"",
"\"expected %d\"",
"%",
"(",
"nd",
",",
"self",
".",
"_info",
"[",
"'ndims'",
"]",
")",
")",
"targ",
"=",
"arg",
"arg",
"=",
"[",
"]",
"for",
"a",
"in",
"targ",
":",
"if",
"isinstance",
"(",
"a",
",",
"slice",
")",
":",
"arg",
".",
"append",
"(",
"a",
")",
"elif",
"isinstance",
"(",
"a",
",",
"int",
")",
":",
"arg",
".",
"append",
"(",
"slice",
"(",
"a",
",",
"a",
"+",
"1",
",",
"1",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"arguments must be slices, e.g. 2:12\"",
")",
"dims",
"=",
"self",
".",
"_info",
"[",
"'dims'",
"]",
"arrdims",
"=",
"[",
"]",
"first",
"=",
"[",
"]",
"last",
"=",
"[",
"]",
"steps",
"=",
"[",
"]",
"# check the args and reverse dimensions since",
"# fits is backwards from numpy",
"dim",
"=",
"0",
"for",
"slc",
"in",
"arg",
":",
"start",
"=",
"slc",
".",
"start",
"stop",
"=",
"slc",
".",
"stop",
"step",
"=",
"slc",
".",
"step",
"if",
"start",
"is",
"None",
":",
"start",
"=",
"0",
"if",
"stop",
"is",
"None",
":",
"stop",
"=",
"dims",
"[",
"dim",
"]",
"if",
"step",
"is",
"None",
":",
"step",
"=",
"1",
"if",
"step",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"\"slice steps must be >= 1\"",
")",
"if",
"start",
"<",
"0",
":",
"start",
"=",
"dims",
"[",
"dim",
"]",
"+",
"start",
"if",
"start",
"<",
"0",
":",
"raise",
"IndexError",
"(",
"\"Index out of bounds\"",
")",
"if",
"stop",
"<",
"0",
":",
"stop",
"=",
"dims",
"[",
"dim",
"]",
"+",
"start",
"+",
"1",
"# move to 1-offset",
"start",
"=",
"start",
"+",
"1",
"if",
"stop",
"<",
"start",
":",
"raise",
"ValueError",
"(",
"\"python slices but include at least one \"",
"\"element, got %s\"",
"%",
"slc",
")",
"if",
"stop",
">",
"dims",
"[",
"dim",
"]",
":",
"stop",
"=",
"dims",
"[",
"dim",
"]",
"first",
".",
"append",
"(",
"start",
")",
"last",
".",
"append",
"(",
"stop",
")",
"steps",
".",
"append",
"(",
"step",
")",
"arrdims",
".",
"append",
"(",
"stop",
"-",
"start",
"+",
"1",
")",
"dim",
"+=",
"1",
"first",
".",
"reverse",
"(",
")",
"last",
".",
"reverse",
"(",
")",
"steps",
".",
"reverse",
"(",
")",
"first",
"=",
"numpy",
".",
"array",
"(",
"first",
",",
"dtype",
"=",
"'i8'",
")",
"last",
"=",
"numpy",
".",
"array",
"(",
"last",
",",
"dtype",
"=",
"'i8'",
")",
"steps",
"=",
"numpy",
".",
"array",
"(",
"steps",
",",
"dtype",
"=",
"'i8'",
")",
"npy_dtype",
"=",
"self",
".",
"_get_image_numpy_dtype",
"(",
")",
"array",
"=",
"numpy",
".",
"zeros",
"(",
"arrdims",
",",
"dtype",
"=",
"npy_dtype",
")",
"self",
".",
"_FITS",
".",
"read_image_slice",
"(",
"self",
".",
"_ext",
"+",
"1",
",",
"first",
",",
"last",
",",
"steps",
",",
"array",
")",
"return",
"array"
] | workhorse to read a slice | [
"workhorse",
"to",
"read",
"a",
"slice"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/image.py#L206-L295 |
2,185 | esheldon/fitsio | fitsio/hdu/image.py | ImageHDU._expand_if_needed | def _expand_if_needed(self, dims, write_dims, start, offset):
"""
expand the on-disk image if the indended write will extend
beyond the existing dimensions
"""
from operator import mul
if numpy.isscalar(start):
start_is_scalar = True
else:
start_is_scalar = False
existing_size = reduce(mul, dims, 1)
required_size = offset + reduce(mul, write_dims, 1)
if required_size > existing_size:
print(
" required size:", required_size,
"existing size:", existing_size)
# we need to expand the image
ndim = len(dims)
idim = len(write_dims)
if start_is_scalar:
if start == 0:
start = [0]*ndim
else:
raise ValueError(
"When expanding "
"an existing image while writing, the start keyword "
"must have the same number of dimensions "
"as the image or be exactly 0, got %s " % start)
if idim != ndim:
raise ValueError(
"When expanding "
"an existing image while writing, the input image "
"must have the same number of dimensions "
"as the original. "
"Got %d instead of %d" % (idim, ndim))
new_dims = []
for i in xrange(ndim):
required_dim = start[i] + write_dims[i]
if required_dim < dims[i]:
# careful not to shrink the image!
dimsize = dims[i]
else:
dimsize = required_dim
new_dims.append(dimsize)
print(" reshaping image to:", new_dims)
self.reshape(new_dims) | python | def _expand_if_needed(self, dims, write_dims, start, offset):
"""
expand the on-disk image if the indended write will extend
beyond the existing dimensions
"""
from operator import mul
if numpy.isscalar(start):
start_is_scalar = True
else:
start_is_scalar = False
existing_size = reduce(mul, dims, 1)
required_size = offset + reduce(mul, write_dims, 1)
if required_size > existing_size:
print(
" required size:", required_size,
"existing size:", existing_size)
# we need to expand the image
ndim = len(dims)
idim = len(write_dims)
if start_is_scalar:
if start == 0:
start = [0]*ndim
else:
raise ValueError(
"When expanding "
"an existing image while writing, the start keyword "
"must have the same number of dimensions "
"as the image or be exactly 0, got %s " % start)
if idim != ndim:
raise ValueError(
"When expanding "
"an existing image while writing, the input image "
"must have the same number of dimensions "
"as the original. "
"Got %d instead of %d" % (idim, ndim))
new_dims = []
for i in xrange(ndim):
required_dim = start[i] + write_dims[i]
if required_dim < dims[i]:
# careful not to shrink the image!
dimsize = dims[i]
else:
dimsize = required_dim
new_dims.append(dimsize)
print(" reshaping image to:", new_dims)
self.reshape(new_dims) | [
"def",
"_expand_if_needed",
"(",
"self",
",",
"dims",
",",
"write_dims",
",",
"start",
",",
"offset",
")",
":",
"from",
"operator",
"import",
"mul",
"if",
"numpy",
".",
"isscalar",
"(",
"start",
")",
":",
"start_is_scalar",
"=",
"True",
"else",
":",
"start_is_scalar",
"=",
"False",
"existing_size",
"=",
"reduce",
"(",
"mul",
",",
"dims",
",",
"1",
")",
"required_size",
"=",
"offset",
"+",
"reduce",
"(",
"mul",
",",
"write_dims",
",",
"1",
")",
"if",
"required_size",
">",
"existing_size",
":",
"print",
"(",
"\" required size:\"",
",",
"required_size",
",",
"\"existing size:\"",
",",
"existing_size",
")",
"# we need to expand the image",
"ndim",
"=",
"len",
"(",
"dims",
")",
"idim",
"=",
"len",
"(",
"write_dims",
")",
"if",
"start_is_scalar",
":",
"if",
"start",
"==",
"0",
":",
"start",
"=",
"[",
"0",
"]",
"*",
"ndim",
"else",
":",
"raise",
"ValueError",
"(",
"\"When expanding \"",
"\"an existing image while writing, the start keyword \"",
"\"must have the same number of dimensions \"",
"\"as the image or be exactly 0, got %s \"",
"%",
"start",
")",
"if",
"idim",
"!=",
"ndim",
":",
"raise",
"ValueError",
"(",
"\"When expanding \"",
"\"an existing image while writing, the input image \"",
"\"must have the same number of dimensions \"",
"\"as the original. \"",
"\"Got %d instead of %d\"",
"%",
"(",
"idim",
",",
"ndim",
")",
")",
"new_dims",
"=",
"[",
"]",
"for",
"i",
"in",
"xrange",
"(",
"ndim",
")",
":",
"required_dim",
"=",
"start",
"[",
"i",
"]",
"+",
"write_dims",
"[",
"i",
"]",
"if",
"required_dim",
"<",
"dims",
"[",
"i",
"]",
":",
"# careful not to shrink the image!",
"dimsize",
"=",
"dims",
"[",
"i",
"]",
"else",
":",
"dimsize",
"=",
"required_dim",
"new_dims",
".",
"append",
"(",
"dimsize",
")",
"print",
"(",
"\" reshaping image to:\"",
",",
"new_dims",
")",
"self",
".",
"reshape",
"(",
"new_dims",
")"
] | expand the on-disk image if the indended write will extend
beyond the existing dimensions | [
"expand",
"the",
"on",
"-",
"disk",
"image",
"if",
"the",
"indended",
"write",
"will",
"extend",
"beyond",
"the",
"existing",
"dimensions"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/image.py#L297-L350 |
2,186 | esheldon/fitsio | fitsio/hdu/base.py | HDUBase.get_extname | def get_extname(self):
"""
Get the name for this extension, can be an empty string
"""
name = self._info['extname']
if name.strip() == '':
name = self._info['hduname']
return name.strip() | python | def get_extname(self):
"""
Get the name for this extension, can be an empty string
"""
name = self._info['extname']
if name.strip() == '':
name = self._info['hduname']
return name.strip() | [
"def",
"get_extname",
"(",
"self",
")",
":",
"name",
"=",
"self",
".",
"_info",
"[",
"'extname'",
"]",
"if",
"name",
".",
"strip",
"(",
")",
"==",
"''",
":",
"name",
"=",
"self",
".",
"_info",
"[",
"'hduname'",
"]",
"return",
"name",
".",
"strip",
"(",
")"
] | Get the name for this extension, can be an empty string | [
"Get",
"the",
"name",
"for",
"this",
"extension",
"can",
"be",
"an",
"empty",
"string"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/base.py#L59-L66 |
2,187 | esheldon/fitsio | fitsio/hdu/base.py | HDUBase.get_extver | def get_extver(self):
"""
Get the version for this extension.
Used when a name is given to multiple extensions
"""
ver = self._info['extver']
if ver == 0:
ver = self._info['hduver']
return ver | python | def get_extver(self):
"""
Get the version for this extension.
Used when a name is given to multiple extensions
"""
ver = self._info['extver']
if ver == 0:
ver = self._info['hduver']
return ver | [
"def",
"get_extver",
"(",
"self",
")",
":",
"ver",
"=",
"self",
".",
"_info",
"[",
"'extver'",
"]",
"if",
"ver",
"==",
"0",
":",
"ver",
"=",
"self",
".",
"_info",
"[",
"'hduver'",
"]",
"return",
"ver"
] | Get the version for this extension.
Used when a name is given to multiple extensions | [
"Get",
"the",
"version",
"for",
"this",
"extension",
"."
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/base.py#L68-L77 |
2,188 | esheldon/fitsio | fitsio/hdu/base.py | HDUBase.get_exttype | def get_exttype(self, num=False):
"""
Get the extension type
By default the result is a string that mirrors
the enumerated type names in cfitsio
'IMAGE_HDU', 'ASCII_TBL', 'BINARY_TBL'
which have numeric values
0 1 2
send num=True to get the numbers. The values
fitsio.IMAGE_HDU .ASCII_TBL, and .BINARY_TBL
are available for comparison
parameters
----------
num: bool, optional
Return the numeric values.
"""
if num:
return self._info['hdutype']
else:
name = _hdu_type_map[self._info['hdutype']]
return name | python | def get_exttype(self, num=False):
"""
Get the extension type
By default the result is a string that mirrors
the enumerated type names in cfitsio
'IMAGE_HDU', 'ASCII_TBL', 'BINARY_TBL'
which have numeric values
0 1 2
send num=True to get the numbers. The values
fitsio.IMAGE_HDU .ASCII_TBL, and .BINARY_TBL
are available for comparison
parameters
----------
num: bool, optional
Return the numeric values.
"""
if num:
return self._info['hdutype']
else:
name = _hdu_type_map[self._info['hdutype']]
return name | [
"def",
"get_exttype",
"(",
"self",
",",
"num",
"=",
"False",
")",
":",
"if",
"num",
":",
"return",
"self",
".",
"_info",
"[",
"'hdutype'",
"]",
"else",
":",
"name",
"=",
"_hdu_type_map",
"[",
"self",
".",
"_info",
"[",
"'hdutype'",
"]",
"]",
"return",
"name"
] | Get the extension type
By default the result is a string that mirrors
the enumerated type names in cfitsio
'IMAGE_HDU', 'ASCII_TBL', 'BINARY_TBL'
which have numeric values
0 1 2
send num=True to get the numbers. The values
fitsio.IMAGE_HDU .ASCII_TBL, and .BINARY_TBL
are available for comparison
parameters
----------
num: bool, optional
Return the numeric values. | [
"Get",
"the",
"extension",
"type"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/base.py#L79-L101 |
2,189 | esheldon/fitsio | fitsio/hdu/base.py | HDUBase.verify_checksum | def verify_checksum(self):
"""
Verify the checksum in the header for this HDU.
"""
res = self._FITS.verify_checksum(self._ext+1)
if res['dataok'] != 1:
raise ValueError("data checksum failed")
if res['hduok'] != 1:
raise ValueError("hdu checksum failed") | python | def verify_checksum(self):
"""
Verify the checksum in the header for this HDU.
"""
res = self._FITS.verify_checksum(self._ext+1)
if res['dataok'] != 1:
raise ValueError("data checksum failed")
if res['hduok'] != 1:
raise ValueError("hdu checksum failed") | [
"def",
"verify_checksum",
"(",
"self",
")",
":",
"res",
"=",
"self",
".",
"_FITS",
".",
"verify_checksum",
"(",
"self",
".",
"_ext",
"+",
"1",
")",
"if",
"res",
"[",
"'dataok'",
"]",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"\"data checksum failed\"",
")",
"if",
"res",
"[",
"'hduok'",
"]",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"\"hdu checksum failed\"",
")"
] | Verify the checksum in the header for this HDU. | [
"Verify",
"the",
"checksum",
"in",
"the",
"header",
"for",
"this",
"HDU",
"."
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/base.py#L153-L161 |
2,190 | esheldon/fitsio | fitsio/hdu/base.py | HDUBase.write_comment | def write_comment(self, comment):
"""
Write a comment into the header
"""
self._FITS.write_comment(self._ext+1, str(comment)) | python | def write_comment(self, comment):
"""
Write a comment into the header
"""
self._FITS.write_comment(self._ext+1, str(comment)) | [
"def",
"write_comment",
"(",
"self",
",",
"comment",
")",
":",
"self",
".",
"_FITS",
".",
"write_comment",
"(",
"self",
".",
"_ext",
"+",
"1",
",",
"str",
"(",
"comment",
")",
")"
] | Write a comment into the header | [
"Write",
"a",
"comment",
"into",
"the",
"header"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/base.py#L163-L167 |
2,191 | esheldon/fitsio | fitsio/hdu/base.py | HDUBase.write_key | def write_key(self, name, value, comment=""):
"""
Write the input value to the header
parameters
----------
name: string
Name of keyword to write/update
value: scalar
Value to write, can be string float or integer type,
including numpy scalar types.
comment: string, optional
An optional comment to write for this key
Notes
-----
Write COMMENT and HISTORY using the write_comment and write_history
methods
"""
if value is None:
self._FITS.write_undefined_key(self._ext+1,
str(name),
str(comment))
elif isinstance(value, bool):
if value:
v = 1
else:
v = 0
self._FITS.write_logical_key(self._ext+1,
str(name),
v,
str(comment))
elif isinstance(value, _stypes):
self._FITS.write_string_key(self._ext+1,
str(name),
str(value),
str(comment))
elif isinstance(value, _ftypes):
self._FITS.write_double_key(self._ext+1,
str(name),
float(value),
str(comment))
elif isinstance(value, _itypes):
self._FITS.write_long_key(self._ext+1,
str(name),
int(value),
str(comment))
elif isinstance(value, (tuple, list)):
vl = [str(el) for el in value]
sval = ','.join(vl)
self._FITS.write_string_key(self._ext+1,
str(name),
sval,
str(comment))
else:
sval = str(value)
mess = (
"warning, keyword '%s' has non-standard "
"value type %s, "
"Converting to string: '%s'")
warnings.warn(mess % (name, type(value), sval), FITSRuntimeWarning)
self._FITS.write_string_key(self._ext+1,
str(name),
sval,
str(comment)) | python | def write_key(self, name, value, comment=""):
"""
Write the input value to the header
parameters
----------
name: string
Name of keyword to write/update
value: scalar
Value to write, can be string float or integer type,
including numpy scalar types.
comment: string, optional
An optional comment to write for this key
Notes
-----
Write COMMENT and HISTORY using the write_comment and write_history
methods
"""
if value is None:
self._FITS.write_undefined_key(self._ext+1,
str(name),
str(comment))
elif isinstance(value, bool):
if value:
v = 1
else:
v = 0
self._FITS.write_logical_key(self._ext+1,
str(name),
v,
str(comment))
elif isinstance(value, _stypes):
self._FITS.write_string_key(self._ext+1,
str(name),
str(value),
str(comment))
elif isinstance(value, _ftypes):
self._FITS.write_double_key(self._ext+1,
str(name),
float(value),
str(comment))
elif isinstance(value, _itypes):
self._FITS.write_long_key(self._ext+1,
str(name),
int(value),
str(comment))
elif isinstance(value, (tuple, list)):
vl = [str(el) for el in value]
sval = ','.join(vl)
self._FITS.write_string_key(self._ext+1,
str(name),
sval,
str(comment))
else:
sval = str(value)
mess = (
"warning, keyword '%s' has non-standard "
"value type %s, "
"Converting to string: '%s'")
warnings.warn(mess % (name, type(value), sval), FITSRuntimeWarning)
self._FITS.write_string_key(self._ext+1,
str(name),
sval,
str(comment)) | [
"def",
"write_key",
"(",
"self",
",",
"name",
",",
"value",
",",
"comment",
"=",
"\"\"",
")",
":",
"if",
"value",
"is",
"None",
":",
"self",
".",
"_FITS",
".",
"write_undefined_key",
"(",
"self",
".",
"_ext",
"+",
"1",
",",
"str",
"(",
"name",
")",
",",
"str",
"(",
"comment",
")",
")",
"elif",
"isinstance",
"(",
"value",
",",
"bool",
")",
":",
"if",
"value",
":",
"v",
"=",
"1",
"else",
":",
"v",
"=",
"0",
"self",
".",
"_FITS",
".",
"write_logical_key",
"(",
"self",
".",
"_ext",
"+",
"1",
",",
"str",
"(",
"name",
")",
",",
"v",
",",
"str",
"(",
"comment",
")",
")",
"elif",
"isinstance",
"(",
"value",
",",
"_stypes",
")",
":",
"self",
".",
"_FITS",
".",
"write_string_key",
"(",
"self",
".",
"_ext",
"+",
"1",
",",
"str",
"(",
"name",
")",
",",
"str",
"(",
"value",
")",
",",
"str",
"(",
"comment",
")",
")",
"elif",
"isinstance",
"(",
"value",
",",
"_ftypes",
")",
":",
"self",
".",
"_FITS",
".",
"write_double_key",
"(",
"self",
".",
"_ext",
"+",
"1",
",",
"str",
"(",
"name",
")",
",",
"float",
"(",
"value",
")",
",",
"str",
"(",
"comment",
")",
")",
"elif",
"isinstance",
"(",
"value",
",",
"_itypes",
")",
":",
"self",
".",
"_FITS",
".",
"write_long_key",
"(",
"self",
".",
"_ext",
"+",
"1",
",",
"str",
"(",
"name",
")",
",",
"int",
"(",
"value",
")",
",",
"str",
"(",
"comment",
")",
")",
"elif",
"isinstance",
"(",
"value",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"vl",
"=",
"[",
"str",
"(",
"el",
")",
"for",
"el",
"in",
"value",
"]",
"sval",
"=",
"','",
".",
"join",
"(",
"vl",
")",
"self",
".",
"_FITS",
".",
"write_string_key",
"(",
"self",
".",
"_ext",
"+",
"1",
",",
"str",
"(",
"name",
")",
",",
"sval",
",",
"str",
"(",
"comment",
")",
")",
"else",
":",
"sval",
"=",
"str",
"(",
"value",
")",
"mess",
"=",
"(",
"\"warning, keyword '%s' has non-standard \"",
"\"value type %s, \"",
"\"Converting to string: '%s'\"",
")",
"warnings",
".",
"warn",
"(",
"mess",
"%",
"(",
"name",
",",
"type",
"(",
"value",
")",
",",
"sval",
")",
",",
"FITSRuntimeWarning",
")",
"self",
".",
"_FITS",
".",
"write_string_key",
"(",
"self",
".",
"_ext",
"+",
"1",
",",
"str",
"(",
"name",
")",
",",
"sval",
",",
"str",
"(",
"comment",
")",
")"
] | Write the input value to the header
parameters
----------
name: string
Name of keyword to write/update
value: scalar
Value to write, can be string float or integer type,
including numpy scalar types.
comment: string, optional
An optional comment to write for this key
Notes
-----
Write COMMENT and HISTORY using the write_comment and write_history
methods | [
"Write",
"the",
"input",
"value",
"to",
"the",
"header"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/base.py#L181-L247 |
2,192 | esheldon/fitsio | fitsio/hdu/base.py | HDUBase.write_keys | def write_keys(self, records_in, clean=True):
"""
Write the keywords to the header.
parameters
----------
records: FITSHDR or list or dict
Can be one of these:
- FITSHDR object
- list of dictionaries containing 'name','value' and optionally
a 'comment' field; the order is preserved.
- a dictionary of keyword-value pairs; no comments are written
in this case, and the order is arbitrary.
clean: boolean
If True, trim out the standard fits header keywords that are
created on HDU creation, such as EXTEND, SIMPLE, STTYPE, TFORM,
TDIM, XTENSION, BITPIX, NAXIS, etc.
Notes
-----
Input keys named COMMENT and HISTORY are written using the
write_comment and write_history methods.
"""
if isinstance(records_in, FITSHDR):
hdr = records_in
else:
hdr = FITSHDR(records_in)
if clean:
is_table = hasattr(self, '_table_type_str')
# is_table = isinstance(self, TableHDU)
hdr.clean(is_table=is_table)
for r in hdr.records():
name = r['name'].upper()
value = r['value']
if name == 'COMMENT':
self.write_comment(value)
elif name == 'HISTORY':
self.write_history(value)
elif name == 'CONTINUE':
self._write_continue(value)
else:
comment = r.get('comment', '')
self.write_key(name, value, comment=comment) | python | def write_keys(self, records_in, clean=True):
"""
Write the keywords to the header.
parameters
----------
records: FITSHDR or list or dict
Can be one of these:
- FITSHDR object
- list of dictionaries containing 'name','value' and optionally
a 'comment' field; the order is preserved.
- a dictionary of keyword-value pairs; no comments are written
in this case, and the order is arbitrary.
clean: boolean
If True, trim out the standard fits header keywords that are
created on HDU creation, such as EXTEND, SIMPLE, STTYPE, TFORM,
TDIM, XTENSION, BITPIX, NAXIS, etc.
Notes
-----
Input keys named COMMENT and HISTORY are written using the
write_comment and write_history methods.
"""
if isinstance(records_in, FITSHDR):
hdr = records_in
else:
hdr = FITSHDR(records_in)
if clean:
is_table = hasattr(self, '_table_type_str')
# is_table = isinstance(self, TableHDU)
hdr.clean(is_table=is_table)
for r in hdr.records():
name = r['name'].upper()
value = r['value']
if name == 'COMMENT':
self.write_comment(value)
elif name == 'HISTORY':
self.write_history(value)
elif name == 'CONTINUE':
self._write_continue(value)
else:
comment = r.get('comment', '')
self.write_key(name, value, comment=comment) | [
"def",
"write_keys",
"(",
"self",
",",
"records_in",
",",
"clean",
"=",
"True",
")",
":",
"if",
"isinstance",
"(",
"records_in",
",",
"FITSHDR",
")",
":",
"hdr",
"=",
"records_in",
"else",
":",
"hdr",
"=",
"FITSHDR",
"(",
"records_in",
")",
"if",
"clean",
":",
"is_table",
"=",
"hasattr",
"(",
"self",
",",
"'_table_type_str'",
")",
"# is_table = isinstance(self, TableHDU)",
"hdr",
".",
"clean",
"(",
"is_table",
"=",
"is_table",
")",
"for",
"r",
"in",
"hdr",
".",
"records",
"(",
")",
":",
"name",
"=",
"r",
"[",
"'name'",
"]",
".",
"upper",
"(",
")",
"value",
"=",
"r",
"[",
"'value'",
"]",
"if",
"name",
"==",
"'COMMENT'",
":",
"self",
".",
"write_comment",
"(",
"value",
")",
"elif",
"name",
"==",
"'HISTORY'",
":",
"self",
".",
"write_history",
"(",
"value",
")",
"elif",
"name",
"==",
"'CONTINUE'",
":",
"self",
".",
"_write_continue",
"(",
"value",
")",
"else",
":",
"comment",
"=",
"r",
".",
"get",
"(",
"'comment'",
",",
"''",
")",
"self",
".",
"write_key",
"(",
"name",
",",
"value",
",",
"comment",
"=",
"comment",
")"
] | Write the keywords to the header.
parameters
----------
records: FITSHDR or list or dict
Can be one of these:
- FITSHDR object
- list of dictionaries containing 'name','value' and optionally
a 'comment' field; the order is preserved.
- a dictionary of keyword-value pairs; no comments are written
in this case, and the order is arbitrary.
clean: boolean
If True, trim out the standard fits header keywords that are
created on HDU creation, such as EXTEND, SIMPLE, STTYPE, TFORM,
TDIM, XTENSION, BITPIX, NAXIS, etc.
Notes
-----
Input keys named COMMENT and HISTORY are written using the
write_comment and write_history methods. | [
"Write",
"the",
"keywords",
"to",
"the",
"header",
"."
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/base.py#L249-L295 |
2,193 | esheldon/fitsio | fitsio/hdu/base.py | HDUBase._update_info | def _update_info(self):
"""
Update metadata for this HDU
"""
try:
self._FITS.movabs_hdu(self._ext+1)
except IOError:
raise RuntimeError("no such hdu")
self._info = self._FITS.get_hdu_info(self._ext+1) | python | def _update_info(self):
"""
Update metadata for this HDU
"""
try:
self._FITS.movabs_hdu(self._ext+1)
except IOError:
raise RuntimeError("no such hdu")
self._info = self._FITS.get_hdu_info(self._ext+1) | [
"def",
"_update_info",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"_FITS",
".",
"movabs_hdu",
"(",
"self",
".",
"_ext",
"+",
"1",
")",
"except",
"IOError",
":",
"raise",
"RuntimeError",
"(",
"\"no such hdu\"",
")",
"self",
".",
"_info",
"=",
"self",
".",
"_FITS",
".",
"get_hdu_info",
"(",
"self",
".",
"_ext",
"+",
"1",
")"
] | Update metadata for this HDU | [
"Update",
"metadata",
"for",
"this",
"HDU"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/base.py#L322-L331 |
2,194 | esheldon/fitsio | fitsio/hdu/base.py | HDUBase._get_repr_list | def _get_repr_list(self):
"""
Get some representation data common to all HDU types
"""
spacing = ' '*2
text = ['']
text.append("%sfile: %s" % (spacing, self._filename))
text.append("%sextension: %d" % (spacing, self._info['hdunum']-1))
text.append(
"%stype: %s" % (spacing, _hdu_type_map[self._info['hdutype']]))
extname = self.get_extname()
if extname != "":
text.append("%sextname: %s" % (spacing, extname))
extver = self.get_extver()
if extver != 0:
text.append("%sextver: %s" % (spacing, extver))
return text, spacing | python | def _get_repr_list(self):
"""
Get some representation data common to all HDU types
"""
spacing = ' '*2
text = ['']
text.append("%sfile: %s" % (spacing, self._filename))
text.append("%sextension: %d" % (spacing, self._info['hdunum']-1))
text.append(
"%stype: %s" % (spacing, _hdu_type_map[self._info['hdutype']]))
extname = self.get_extname()
if extname != "":
text.append("%sextname: %s" % (spacing, extname))
extver = self.get_extver()
if extver != 0:
text.append("%sextver: %s" % (spacing, extver))
return text, spacing | [
"def",
"_get_repr_list",
"(",
"self",
")",
":",
"spacing",
"=",
"' '",
"*",
"2",
"text",
"=",
"[",
"''",
"]",
"text",
".",
"append",
"(",
"\"%sfile: %s\"",
"%",
"(",
"spacing",
",",
"self",
".",
"_filename",
")",
")",
"text",
".",
"append",
"(",
"\"%sextension: %d\"",
"%",
"(",
"spacing",
",",
"self",
".",
"_info",
"[",
"'hdunum'",
"]",
"-",
"1",
")",
")",
"text",
".",
"append",
"(",
"\"%stype: %s\"",
"%",
"(",
"spacing",
",",
"_hdu_type_map",
"[",
"self",
".",
"_info",
"[",
"'hdutype'",
"]",
"]",
")",
")",
"extname",
"=",
"self",
".",
"get_extname",
"(",
")",
"if",
"extname",
"!=",
"\"\"",
":",
"text",
".",
"append",
"(",
"\"%sextname: %s\"",
"%",
"(",
"spacing",
",",
"extname",
")",
")",
"extver",
"=",
"self",
".",
"get_extver",
"(",
")",
"if",
"extver",
"!=",
"0",
":",
"text",
".",
"append",
"(",
"\"%sextver: %s\"",
"%",
"(",
"spacing",
",",
"extver",
")",
")",
"return",
"text",
",",
"spacing"
] | Get some representation data common to all HDU types | [
"Get",
"some",
"representation",
"data",
"common",
"to",
"all",
"HDU",
"types"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/base.py#L333-L351 |
2,195 | esheldon/fitsio | fitsio/header.py | FITSHDR.add_record | def add_record(self, record_in):
"""
Add a new record. Strip quotes from around strings.
This will over-write if the key already exists, except
for COMMENT and HISTORY fields
parameters
-----------
record:
The record, either a dict or a header card string
or a FITSRecord or FITSCard
convert: bool, optional
If True, convert strings. E.g. '3' gets
converted to 3 and "'hello'" gets converted
to 'hello' and 'T'/'F' to True/False. Default
is False.
"""
if (isinstance(record_in, dict) and
'name' in record_in and 'value' in record_in):
record = {}
record.update(record_in)
else:
record = FITSRecord(record_in)
# only append when this name already exists if it is
# a comment or history field, otherwise simply over-write
key = record['name'].upper()
key_exists = key in self._record_map
if not key_exists or key in ('COMMENT', 'HISTORY', 'CONTINUE'):
# append new record
self._record_list.append(record)
index = len(self._record_list)-1
self._index_map[key] = index
else:
# over-write existing
index = self._index_map[key]
self._record_list[index] = record
self._record_map[key] = record | python | def add_record(self, record_in):
"""
Add a new record. Strip quotes from around strings.
This will over-write if the key already exists, except
for COMMENT and HISTORY fields
parameters
-----------
record:
The record, either a dict or a header card string
or a FITSRecord or FITSCard
convert: bool, optional
If True, convert strings. E.g. '3' gets
converted to 3 and "'hello'" gets converted
to 'hello' and 'T'/'F' to True/False. Default
is False.
"""
if (isinstance(record_in, dict) and
'name' in record_in and 'value' in record_in):
record = {}
record.update(record_in)
else:
record = FITSRecord(record_in)
# only append when this name already exists if it is
# a comment or history field, otherwise simply over-write
key = record['name'].upper()
key_exists = key in self._record_map
if not key_exists or key in ('COMMENT', 'HISTORY', 'CONTINUE'):
# append new record
self._record_list.append(record)
index = len(self._record_list)-1
self._index_map[key] = index
else:
# over-write existing
index = self._index_map[key]
self._record_list[index] = record
self._record_map[key] = record | [
"def",
"add_record",
"(",
"self",
",",
"record_in",
")",
":",
"if",
"(",
"isinstance",
"(",
"record_in",
",",
"dict",
")",
"and",
"'name'",
"in",
"record_in",
"and",
"'value'",
"in",
"record_in",
")",
":",
"record",
"=",
"{",
"}",
"record",
".",
"update",
"(",
"record_in",
")",
"else",
":",
"record",
"=",
"FITSRecord",
"(",
"record_in",
")",
"# only append when this name already exists if it is",
"# a comment or history field, otherwise simply over-write",
"key",
"=",
"record",
"[",
"'name'",
"]",
".",
"upper",
"(",
")",
"key_exists",
"=",
"key",
"in",
"self",
".",
"_record_map",
"if",
"not",
"key_exists",
"or",
"key",
"in",
"(",
"'COMMENT'",
",",
"'HISTORY'",
",",
"'CONTINUE'",
")",
":",
"# append new record",
"self",
".",
"_record_list",
".",
"append",
"(",
"record",
")",
"index",
"=",
"len",
"(",
"self",
".",
"_record_list",
")",
"-",
"1",
"self",
".",
"_index_map",
"[",
"key",
"]",
"=",
"index",
"else",
":",
"# over-write existing",
"index",
"=",
"self",
".",
"_index_map",
"[",
"key",
"]",
"self",
".",
"_record_list",
"[",
"index",
"]",
"=",
"record",
"self",
".",
"_record_map",
"[",
"key",
"]",
"=",
"record"
] | Add a new record. Strip quotes from around strings.
This will over-write if the key already exists, except
for COMMENT and HISTORY fields
parameters
-----------
record:
The record, either a dict or a header card string
or a FITSRecord or FITSCard
convert: bool, optional
If True, convert strings. E.g. '3' gets
converted to 3 and "'hello'" gets converted
to 'hello' and 'T'/'F' to True/False. Default
is False. | [
"Add",
"a",
"new",
"record",
".",
"Strip",
"quotes",
"from",
"around",
"strings",
"."
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/header.py#L133-L174 |
2,196 | esheldon/fitsio | fitsio/header.py | FITSHDR.get_comment | def get_comment(self, item):
"""
Get the comment for the requested entry
"""
key = item.upper()
if key not in self._record_map:
raise KeyError("unknown record: %s" % key)
if 'comment' not in self._record_map[key]:
return None
else:
return self._record_map[key]['comment'] | python | def get_comment(self, item):
"""
Get the comment for the requested entry
"""
key = item.upper()
if key not in self._record_map:
raise KeyError("unknown record: %s" % key)
if 'comment' not in self._record_map[key]:
return None
else:
return self._record_map[key]['comment'] | [
"def",
"get_comment",
"(",
"self",
",",
"item",
")",
":",
"key",
"=",
"item",
".",
"upper",
"(",
")",
"if",
"key",
"not",
"in",
"self",
".",
"_record_map",
":",
"raise",
"KeyError",
"(",
"\"unknown record: %s\"",
"%",
"key",
")",
"if",
"'comment'",
"not",
"in",
"self",
".",
"_record_map",
"[",
"key",
"]",
":",
"return",
"None",
"else",
":",
"return",
"self",
".",
"_record_map",
"[",
"key",
"]",
"[",
"'comment'",
"]"
] | Get the comment for the requested entry | [
"Get",
"the",
"comment",
"for",
"the",
"requested",
"entry"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/header.py#L180-L191 |
2,197 | esheldon/fitsio | fitsio/header.py | FITSHDR.delete | def delete(self, name):
"""
Delete the specified entry if it exists.
"""
if isinstance(name, (list, tuple)):
for xx in name:
self.delete(xx)
else:
if name in self._record_map:
del self._record_map[name]
self._record_list = [
r for r in self._record_list if r['name'] != name] | python | def delete(self, name):
"""
Delete the specified entry if it exists.
"""
if isinstance(name, (list, tuple)):
for xx in name:
self.delete(xx)
else:
if name in self._record_map:
del self._record_map[name]
self._record_list = [
r for r in self._record_list if r['name'] != name] | [
"def",
"delete",
"(",
"self",
",",
"name",
")",
":",
"if",
"isinstance",
"(",
"name",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"for",
"xx",
"in",
"name",
":",
"self",
".",
"delete",
"(",
"xx",
")",
"else",
":",
"if",
"name",
"in",
"self",
".",
"_record_map",
":",
"del",
"self",
".",
"_record_map",
"[",
"name",
"]",
"self",
".",
"_record_list",
"=",
"[",
"r",
"for",
"r",
"in",
"self",
".",
"_record_list",
"if",
"r",
"[",
"'name'",
"]",
"!=",
"name",
"]"
] | Delete the specified entry if it exists. | [
"Delete",
"the",
"specified",
"entry",
"if",
"it",
"exists",
"."
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/header.py#L205-L216 |
2,198 | esheldon/fitsio | fitsio/header.py | FITSHDR.clean | def clean(self, is_table=False):
"""
Remove reserved keywords from the header.
These are keywords that the fits writer must write in order
to maintain consistency between header and data.
keywords
--------
is_table: bool, optional
Set True if this is a table, so extra keywords will be cleaned
"""
rmnames = [
'SIMPLE', 'EXTEND', 'XTENSION', 'BITPIX', 'PCOUNT', 'GCOUNT',
'THEAP',
'EXTNAME',
'BLANK',
'ZQUANTIZ', 'ZDITHER0', 'ZIMAGE', 'ZCMPTYPE',
'ZSIMPLE', 'ZTENSION', 'ZPCOUNT', 'ZGCOUNT',
'ZBITPIX', 'ZEXTEND',
# 'FZTILELN','FZALGOR',
'CHECKSUM', 'DATASUM']
if is_table:
# these are not allowed in tables
rmnames += [
'BUNIT', 'BSCALE', 'BZERO',
]
self.delete(rmnames)
r = self._record_map.get('NAXIS', None)
if r is not None:
naxis = int(r['value'])
self.delete('NAXIS')
rmnames = ['NAXIS%d' % i for i in xrange(1, naxis+1)]
self.delete(rmnames)
r = self._record_map.get('ZNAXIS', None)
self.delete('ZNAXIS')
if r is not None:
znaxis = int(r['value'])
rmnames = ['ZNAXIS%d' % i for i in xrange(1, znaxis+1)]
self.delete(rmnames)
rmnames = ['ZTILE%d' % i for i in xrange(1, znaxis+1)]
self.delete(rmnames)
rmnames = ['ZNAME%d' % i for i in xrange(1, znaxis+1)]
self.delete(rmnames)
rmnames = ['ZVAL%d' % i for i in xrange(1, znaxis+1)]
self.delete(rmnames)
r = self._record_map.get('TFIELDS', None)
if r is not None:
tfields = int(r['value'])
self.delete('TFIELDS')
if tfields > 0:
nbase = [
'TFORM', 'TTYPE', 'TDIM', 'TUNIT', 'TSCAL', 'TZERO',
'TNULL', 'TDISP', 'TDMIN', 'TDMAX', 'TDESC', 'TROTA',
'TRPIX', 'TRVAL', 'TDELT', 'TCUNI',
# 'FZALG'
]
for i in xrange(1, tfields+1):
names = ['%s%d' % (n, i) for n in nbase]
self.delete(names) | python | def clean(self, is_table=False):
"""
Remove reserved keywords from the header.
These are keywords that the fits writer must write in order
to maintain consistency between header and data.
keywords
--------
is_table: bool, optional
Set True if this is a table, so extra keywords will be cleaned
"""
rmnames = [
'SIMPLE', 'EXTEND', 'XTENSION', 'BITPIX', 'PCOUNT', 'GCOUNT',
'THEAP',
'EXTNAME',
'BLANK',
'ZQUANTIZ', 'ZDITHER0', 'ZIMAGE', 'ZCMPTYPE',
'ZSIMPLE', 'ZTENSION', 'ZPCOUNT', 'ZGCOUNT',
'ZBITPIX', 'ZEXTEND',
# 'FZTILELN','FZALGOR',
'CHECKSUM', 'DATASUM']
if is_table:
# these are not allowed in tables
rmnames += [
'BUNIT', 'BSCALE', 'BZERO',
]
self.delete(rmnames)
r = self._record_map.get('NAXIS', None)
if r is not None:
naxis = int(r['value'])
self.delete('NAXIS')
rmnames = ['NAXIS%d' % i for i in xrange(1, naxis+1)]
self.delete(rmnames)
r = self._record_map.get('ZNAXIS', None)
self.delete('ZNAXIS')
if r is not None:
znaxis = int(r['value'])
rmnames = ['ZNAXIS%d' % i for i in xrange(1, znaxis+1)]
self.delete(rmnames)
rmnames = ['ZTILE%d' % i for i in xrange(1, znaxis+1)]
self.delete(rmnames)
rmnames = ['ZNAME%d' % i for i in xrange(1, znaxis+1)]
self.delete(rmnames)
rmnames = ['ZVAL%d' % i for i in xrange(1, znaxis+1)]
self.delete(rmnames)
r = self._record_map.get('TFIELDS', None)
if r is not None:
tfields = int(r['value'])
self.delete('TFIELDS')
if tfields > 0:
nbase = [
'TFORM', 'TTYPE', 'TDIM', 'TUNIT', 'TSCAL', 'TZERO',
'TNULL', 'TDISP', 'TDMIN', 'TDMAX', 'TDESC', 'TROTA',
'TRPIX', 'TRVAL', 'TDELT', 'TCUNI',
# 'FZALG'
]
for i in xrange(1, tfields+1):
names = ['%s%d' % (n, i) for n in nbase]
self.delete(names) | [
"def",
"clean",
"(",
"self",
",",
"is_table",
"=",
"False",
")",
":",
"rmnames",
"=",
"[",
"'SIMPLE'",
",",
"'EXTEND'",
",",
"'XTENSION'",
",",
"'BITPIX'",
",",
"'PCOUNT'",
",",
"'GCOUNT'",
",",
"'THEAP'",
",",
"'EXTNAME'",
",",
"'BLANK'",
",",
"'ZQUANTIZ'",
",",
"'ZDITHER0'",
",",
"'ZIMAGE'",
",",
"'ZCMPTYPE'",
",",
"'ZSIMPLE'",
",",
"'ZTENSION'",
",",
"'ZPCOUNT'",
",",
"'ZGCOUNT'",
",",
"'ZBITPIX'",
",",
"'ZEXTEND'",
",",
"# 'FZTILELN','FZALGOR',",
"'CHECKSUM'",
",",
"'DATASUM'",
"]",
"if",
"is_table",
":",
"# these are not allowed in tables",
"rmnames",
"+=",
"[",
"'BUNIT'",
",",
"'BSCALE'",
",",
"'BZERO'",
",",
"]",
"self",
".",
"delete",
"(",
"rmnames",
")",
"r",
"=",
"self",
".",
"_record_map",
".",
"get",
"(",
"'NAXIS'",
",",
"None",
")",
"if",
"r",
"is",
"not",
"None",
":",
"naxis",
"=",
"int",
"(",
"r",
"[",
"'value'",
"]",
")",
"self",
".",
"delete",
"(",
"'NAXIS'",
")",
"rmnames",
"=",
"[",
"'NAXIS%d'",
"%",
"i",
"for",
"i",
"in",
"xrange",
"(",
"1",
",",
"naxis",
"+",
"1",
")",
"]",
"self",
".",
"delete",
"(",
"rmnames",
")",
"r",
"=",
"self",
".",
"_record_map",
".",
"get",
"(",
"'ZNAXIS'",
",",
"None",
")",
"self",
".",
"delete",
"(",
"'ZNAXIS'",
")",
"if",
"r",
"is",
"not",
"None",
":",
"znaxis",
"=",
"int",
"(",
"r",
"[",
"'value'",
"]",
")",
"rmnames",
"=",
"[",
"'ZNAXIS%d'",
"%",
"i",
"for",
"i",
"in",
"xrange",
"(",
"1",
",",
"znaxis",
"+",
"1",
")",
"]",
"self",
".",
"delete",
"(",
"rmnames",
")",
"rmnames",
"=",
"[",
"'ZTILE%d'",
"%",
"i",
"for",
"i",
"in",
"xrange",
"(",
"1",
",",
"znaxis",
"+",
"1",
")",
"]",
"self",
".",
"delete",
"(",
"rmnames",
")",
"rmnames",
"=",
"[",
"'ZNAME%d'",
"%",
"i",
"for",
"i",
"in",
"xrange",
"(",
"1",
",",
"znaxis",
"+",
"1",
")",
"]",
"self",
".",
"delete",
"(",
"rmnames",
")",
"rmnames",
"=",
"[",
"'ZVAL%d'",
"%",
"i",
"for",
"i",
"in",
"xrange",
"(",
"1",
",",
"znaxis",
"+",
"1",
")",
"]",
"self",
".",
"delete",
"(",
"rmnames",
")",
"r",
"=",
"self",
".",
"_record_map",
".",
"get",
"(",
"'TFIELDS'",
",",
"None",
")",
"if",
"r",
"is",
"not",
"None",
":",
"tfields",
"=",
"int",
"(",
"r",
"[",
"'value'",
"]",
")",
"self",
".",
"delete",
"(",
"'TFIELDS'",
")",
"if",
"tfields",
">",
"0",
":",
"nbase",
"=",
"[",
"'TFORM'",
",",
"'TTYPE'",
",",
"'TDIM'",
",",
"'TUNIT'",
",",
"'TSCAL'",
",",
"'TZERO'",
",",
"'TNULL'",
",",
"'TDISP'",
",",
"'TDMIN'",
",",
"'TDMAX'",
",",
"'TDESC'",
",",
"'TROTA'",
",",
"'TRPIX'",
",",
"'TRVAL'",
",",
"'TDELT'",
",",
"'TCUNI'",
",",
"# 'FZALG'",
"]",
"for",
"i",
"in",
"xrange",
"(",
"1",
",",
"tfields",
"+",
"1",
")",
":",
"names",
"=",
"[",
"'%s%d'",
"%",
"(",
"n",
",",
"i",
")",
"for",
"n",
"in",
"nbase",
"]",
"self",
".",
"delete",
"(",
"names",
")"
] | Remove reserved keywords from the header.
These are keywords that the fits writer must write in order
to maintain consistency between header and data.
keywords
--------
is_table: bool, optional
Set True if this is a table, so extra keywords will be cleaned | [
"Remove",
"reserved",
"keywords",
"from",
"the",
"header",
"."
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/header.py#L218-L288 |
2,199 | esheldon/fitsio | fitsio/header.py | FITSHDR.get | def get(self, item, default_value=None):
"""
Get the requested header entry by keyword name
"""
found, name = self._contains_and_name(item)
if found:
return self._record_map[name]['value']
else:
return default_value | python | def get(self, item, default_value=None):
"""
Get the requested header entry by keyword name
"""
found, name = self._contains_and_name(item)
if found:
return self._record_map[name]['value']
else:
return default_value | [
"def",
"get",
"(",
"self",
",",
"item",
",",
"default_value",
"=",
"None",
")",
":",
"found",
",",
"name",
"=",
"self",
".",
"_contains_and_name",
"(",
"item",
")",
"if",
"found",
":",
"return",
"self",
".",
"_record_map",
"[",
"name",
"]",
"[",
"'value'",
"]",
"else",
":",
"return",
"default_value"
] | Get the requested header entry by keyword name | [
"Get",
"the",
"requested",
"header",
"entry",
"by",
"keyword",
"name"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/header.py#L290-L299 |
Subsets and Splits