body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
@model_info.command('gtrnadb')
@click.argument('filename', type=click.File('r'))
@click.argument('output', default='-', type=click.File('w'))
def gtrnadb_model_info(filename, output):
'\n Parse the metadata.tsv file from R2DT for gtrnadb models to\n produce something we can put in our database.\n '
r2dt.write_gtrnadb(filename, output) | -578,127,961,795,464,700 | Parse the metadata.tsv file from R2DT for gtrnadb models to
produce something we can put in our database. | rnacentral_pipeline/cli/r2dt.py | gtrnadb_model_info | RNAcentral/rnacentral-import-pipeline | python | @model_info.command('gtrnadb')
@click.argument('filename', type=click.File('r'))
@click.argument('output', default='-', type=click.File('w'))
def gtrnadb_model_info(filename, output):
'\n Parse the metadata.tsv file from R2DT for gtrnadb models to\n produce something we can put in our database.\n '
r2dt.write_gtrnadb(filename, output) |
@model_info.command('rnase-p')
@click.argument('filename', type=click.File('r'))
@click.argument('output', default='-', type=click.File('w'))
def rnase_p_model_info(filename, output):
'\n Parse the metadata.tsv file from R2DT for Ribovision models to\n produce something we can put in our database.\n '
r2dt.write_rnase_p(filename, output) | 2,337,609,762,601,865,000 | Parse the metadata.tsv file from R2DT for Ribovision models to
produce something we can put in our database. | rnacentral_pipeline/cli/r2dt.py | rnase_p_model_info | RNAcentral/rnacentral-import-pipeline | python | @model_info.command('rnase-p')
@click.argument('filename', type=click.File('r'))
@click.argument('output', default='-', type=click.File('w'))
def rnase_p_model_info(filename, output):
'\n Parse the metadata.tsv file from R2DT for Ribovision models to\n produce something we can put in our database.\n '
r2dt.write_rnase_p(filename, output) |
def __init__(self, devpath):
'Return a disc object'
self.devpath = devpath
self.mountpoint = ('/mnt' + devpath)
self.hasnicetitle = False
self.video_type = 'unknown'
self.ejected = False
self.updated = False
if (cfg['VIDEOTYPE'] != 'auto'):
self.video_type = cfg['VIDEOTYPE']
self.parse_udev()
self.get_pid() | 8,437,722,910,963,163,000 | Return a disc object | arm/models/models.py | __init__ | charmarkk/automatic-ripping-machine | python | def __init__(self, devpath):
self.devpath = devpath
self.mountpoint = ('/mnt' + devpath)
self.hasnicetitle = False
self.video_type = 'unknown'
self.ejected = False
self.updated = False
if (cfg['VIDEOTYPE'] != 'auto'):
self.video_type = cfg['VIDEOTYPE']
self.parse_udev()
self.get_pid() |
def parse_udev(self):
'Parse udev for properties of current disc'
context = pyudev.Context()
device = pyudev.Devices.from_device_file(context, self.devpath)
self.disctype = 'unknown'
for (key, value) in device.items():
if (key == 'ID_FS_LABEL'):
self.label = value
if (value == 'iso9660'):
self.disctype = 'data'
elif (key == 'ID_CDROM_MEDIA_BD'):
self.disctype = 'bluray'
elif (key == 'ID_CDROM_MEDIA_DVD'):
self.disctype = 'dvd'
elif (key == 'ID_CDROM_MEDIA_TRACK_COUNT_AUDIO'):
self.disctype = 'music'
else:
pass | 5,184,117,535,612,883,000 | Parse udev for properties of current disc | arm/models/models.py | parse_udev | charmarkk/automatic-ripping-machine | python | def parse_udev(self):
context = pyudev.Context()
device = pyudev.Devices.from_device_file(context, self.devpath)
self.disctype = 'unknown'
for (key, value) in device.items():
if (key == 'ID_FS_LABEL'):
self.label = value
if (value == 'iso9660'):
self.disctype = 'data'
elif (key == 'ID_CDROM_MEDIA_BD'):
self.disctype = 'bluray'
elif (key == 'ID_CDROM_MEDIA_DVD'):
self.disctype = 'dvd'
elif (key == 'ID_CDROM_MEDIA_TRACK_COUNT_AUDIO'):
self.disctype = 'music'
else:
pass |
def identify_audio_cd(self):
'\n Get the title for audio cds to use for the logfile name.\n\n Needs the job class passed into it so it can be forwarded to mb\n\n return - only the logfile - setup_logging() adds the full path\n '
disc_id = music_brainz.get_disc_id(self)
mb_title = music_brainz.get_title(disc_id, self)
if (mb_title == 'not identified'):
self.label = self.title = 'not identified'
logfile = 'music_cd.log'
new_log_file = f'music_cd_{round((time.time() * 100))}.log'
else:
logfile = f'{mb_title}.log'
new_log_file = f'{mb_title}_{round((time.time() * 100))}.log'
temp_log_full = os.path.join(cfg['LOGPATH'], logfile)
logfile = (new_log_file if os.path.isfile(temp_log_full) else logfile)
return logfile | 6,480,684,719,705,463,000 | Get the title for audio cds to use for the logfile name.
Needs the job class passed into it so it can be forwarded to mb
return - only the logfile - setup_logging() adds the full path | arm/models/models.py | identify_audio_cd | charmarkk/automatic-ripping-machine | python | def identify_audio_cd(self):
'\n Get the title for audio cds to use for the logfile name.\n\n Needs the job class passed into it so it can be forwarded to mb\n\n return - only the logfile - setup_logging() adds the full path\n '
disc_id = music_brainz.get_disc_id(self)
mb_title = music_brainz.get_title(disc_id, self)
if (mb_title == 'not identified'):
self.label = self.title = 'not identified'
logfile = 'music_cd.log'
new_log_file = f'music_cd_{round((time.time() * 100))}.log'
else:
logfile = f'{mb_title}.log'
new_log_file = f'{mb_title}_{round((time.time() * 100))}.log'
temp_log_full = os.path.join(cfg['LOGPATH'], logfile)
logfile = (new_log_file if os.path.isfile(temp_log_full) else logfile)
return logfile |
def __str__(self):
'Returns a string of the object'
s = (self.__class__.__name__ + ': ')
for (attr, value) in self.__dict__.items():
s = (((((s + '(') + str(attr)) + '=') + str(value)) + ') ')
return s | 5,233,745,533,802,784,000 | Returns a string of the object | arm/models/models.py | __str__ | charmarkk/automatic-ripping-machine | python | def __str__(self):
s = (self.__class__.__name__ + ': ')
for (attr, value) in self.__dict__.items():
s = (((((s + '(') + str(attr)) + '=') + str(value)) + ') ')
return s |
def pretty_table(self):
'Returns a string of the prettytable'
x = PrettyTable()
x.field_names = ['Config', 'Value']
x._max_width = {'Config': 50, 'Value': 60}
for (attr, value) in self.__dict__.items():
if (attr == 'config'):
x.add_row([str(attr), str(value.pretty_table())])
else:
x.add_row([str(attr), str(value)])
return str(x.get_string()) | -5,753,102,044,256,257,000 | Returns a string of the prettytable | arm/models/models.py | pretty_table | charmarkk/automatic-ripping-machine | python | def pretty_table(self):
x = PrettyTable()
x.field_names = ['Config', 'Value']
x._max_width = {'Config': 50, 'Value': 60}
for (attr, value) in self.__dict__.items():
if (attr == 'config'):
x.add_row([str(attr), str(value.pretty_table())])
else:
x.add_row([str(attr), str(value)])
return str(x.get_string()) |
def eject(self):
"Eject disc if it hasn't previously been ejected"
if (not self.ejected):
self.ejected = True
try:
if os.system(('umount ' + self.devpath)):
logging.debug(('we unmounted disc' + self.devpath))
if os.system(('eject ' + self.devpath)):
logging.debug(('we ejected disc' + self.devpath))
self.ejected = True
else:
logging.debug(('failed to eject' + self.devpath))
except Exception as e:
logging.debug(((self.devpath + " couldn't be ejected ") + str(e))) | 3,129,454,796,627,657,000 | Eject disc if it hasn't previously been ejected | arm/models/models.py | eject | charmarkk/automatic-ripping-machine | python | def eject(self):
if (not self.ejected):
self.ejected = True
try:
if os.system(('umount ' + self.devpath)):
logging.debug(('we unmounted disc' + self.devpath))
if os.system(('eject ' + self.devpath)):
logging.debug(('we ejected disc' + self.devpath))
self.ejected = True
else:
logging.debug(('failed to eject' + self.devpath))
except Exception as e:
logging.debug(((self.devpath + " couldn't be ejected ") + str(e))) |
def __init__(self, job_id, track_number, length, aspect_ratio, fps, main_feature, source, basename, filename):
'Return a track object'
self.job_id = job_id
self.track_number = track_number
self.length = length
self.aspect_ratio = aspect_ratio
self.fps = fps
self.main_feature = main_feature
self.source = source
self.basename = basename
self.filename = filename
self.ripped = False | 190,976,422,805,984,930 | Return a track object | arm/models/models.py | __init__ | charmarkk/automatic-ripping-machine | python | def __init__(self, job_id, track_number, length, aspect_ratio, fps, main_feature, source, basename, filename):
self.job_id = job_id
self.track_number = track_number
self.length = length
self.aspect_ratio = aspect_ratio
self.fps = fps
self.main_feature = main_feature
self.source = source
self.basename = basename
self.filename = filename
self.ripped = False |
def list_params(self):
'Returns a string of the object'
s = (self.__class__.__name__ + ': ')
for (attr, value) in self.__dict__.items():
if s:
s = (s + '\n')
if ((str(attr) in hidden_attribs) and value):
value = HIDDEN_VALUE
s = (((s + str(attr)) + ':') + str(value))
return s | 6,946,453,928,283,418,000 | Returns a string of the object | arm/models/models.py | list_params | charmarkk/automatic-ripping-machine | python | def list_params(self):
s = (self.__class__.__name__ + ': ')
for (attr, value) in self.__dict__.items():
if s:
s = (s + '\n')
if ((str(attr) in hidden_attribs) and value):
value = HIDDEN_VALUE
s = (((s + str(attr)) + ':') + str(value))
return s |
def __str__(self):
'Returns a string of the object'
s = (self.__class__.__name__ + ': ')
for (attr, value) in self.__dict__.items():
if ((str(attr) in hidden_attribs) and value):
value = HIDDEN_VALUE
s = (((((s + '(') + str(attr)) + '=') + str(value)) + ') ')
return s | -2,756,119,392,359,758,000 | Returns a string of the object | arm/models/models.py | __str__ | charmarkk/automatic-ripping-machine | python | def __str__(self):
s = (self.__class__.__name__ + ': ')
for (attr, value) in self.__dict__.items():
if ((str(attr) in hidden_attribs) and value):
value = HIDDEN_VALUE
s = (((((s + '(') + str(attr)) + '=') + str(value)) + ') ')
return s |
def pretty_table(self):
'Returns a string of the prettytable'
x = PrettyTable()
x.field_names = ['Config', 'Value']
x._max_width = {'Config': 20, 'Value': 30}
for (attr, value) in self.__dict__.items():
if ((str(attr) in hidden_attribs) and value):
value = HIDDEN_VALUE
x.add_row([str(attr), str(value)])
return str(x.get_string()) | 2,637,011,702,280,520,700 | Returns a string of the prettytable | arm/models/models.py | pretty_table | charmarkk/automatic-ripping-machine | python | def pretty_table(self):
x = PrettyTable()
x.field_names = ['Config', 'Value']
x._max_width = {'Config': 20, 'Value': 30}
for (attr, value) in self.__dict__.items():
if ((str(attr) in hidden_attribs) and value):
value = HIDDEN_VALUE
x.add_row([str(attr), str(value)])
return str(x.get_string()) |
def __str__(self):
'Returns a string of the object'
s = (self.__class__.__name__ + ': ')
for (attr, value) in self.__dict__.items():
s = (((((s + '(') + str(attr)) + '=') + str(value)) + ') ')
return s | 5,233,745,533,802,784,000 | Returns a string of the object | arm/models/models.py | __str__ | charmarkk/automatic-ripping-machine | python | def __str__(self):
s = (self.__class__.__name__ + ': ')
for (attr, value) in self.__dict__.items():
s = (((((s + '(') + str(attr)) + '=') + str(value)) + ') ')
return s |
def testDistributionGroupAppsDeleteRequest(self):
'Test DistributionGroupAppsDeleteRequest'
pass | -6,899,858,591,365,831,000 | Test DistributionGroupAppsDeleteRequest | sdks/python/test/test_DistributionGroupAppsDeleteRequest.py | testDistributionGroupAppsDeleteRequest | Brantone/appcenter-sdks | python | def testDistributionGroupAppsDeleteRequest(self):
pass |
def __fleiss_pi_linear__(dataset, **kwargs):
"\n Calculates Fleiss' :math:`\\pi` (or multi-:math:`\\pi`), originally proposed in\n [Fleiss1971]_, and is equivalent to Siegel and Castellan's :math:`K`\n [SiegelCastellan1988]_. For 2 coders, this is equivalent to Scott's :math:`\\pi`\n [Scott1955]_.\n "
metric_kwargs = dict(kwargs)
metric_kwargs['return_parts'] = True
return_parts = kwargs['return_parts']
if (len(set([len(coder_segs.values()) for coder_segs in dataset.values()])) != 1):
raise Exception('Unequal number of items contained.')
(all_numerators, all_denominators, _, coders_boundaries) = __actual_agreement_linear__(dataset, **metric_kwargs)
A_a = (Decimal(sum(all_numerators)) / sum(all_denominators))
p_e_segs = list()
for boundaries_info in coders_boundaries.values():
for item in boundaries_info:
(boundaries, total_boundaries) = item
p_e_seg = (Decimal(boundaries) / total_boundaries)
p_e_segs.append(p_e_seg)
P_e_seg = (Decimal(sum(p_e_segs)) / len(p_e_segs))
A_e = (P_e_seg ** 2)
pi = ((A_a - A_e) / (Decimal('1') - A_e))
if return_parts:
return (A_a, A_e)
else:
return pi | 5,795,092,333,693,014,000 | Calculates Fleiss' :math:`\pi` (or multi-:math:`\pi`), originally proposed in
[Fleiss1971]_, and is equivalent to Siegel and Castellan's :math:`K`
[SiegelCastellan1988]_. For 2 coders, this is equivalent to Scott's :math:`\pi`
[Scott1955]_. | segeval/agreement/pi.py | __fleiss_pi_linear__ | cfournie/segmentation.evaluation | python | def __fleiss_pi_linear__(dataset, **kwargs):
"\n Calculates Fleiss' :math:`\\pi` (or multi-:math:`\\pi`), originally proposed in\n [Fleiss1971]_, and is equivalent to Siegel and Castellan's :math:`K`\n [SiegelCastellan1988]_. For 2 coders, this is equivalent to Scott's :math:`\\pi`\n [Scott1955]_.\n "
metric_kwargs = dict(kwargs)
metric_kwargs['return_parts'] = True
return_parts = kwargs['return_parts']
if (len(set([len(coder_segs.values()) for coder_segs in dataset.values()])) != 1):
raise Exception('Unequal number of items contained.')
(all_numerators, all_denominators, _, coders_boundaries) = __actual_agreement_linear__(dataset, **metric_kwargs)
A_a = (Decimal(sum(all_numerators)) / sum(all_denominators))
p_e_segs = list()
for boundaries_info in coders_boundaries.values():
for item in boundaries_info:
(boundaries, total_boundaries) = item
p_e_seg = (Decimal(boundaries) / total_boundaries)
p_e_segs.append(p_e_seg)
P_e_seg = (Decimal(sum(p_e_segs)) / len(p_e_segs))
A_e = (P_e_seg ** 2)
pi = ((A_a - A_e) / (Decimal('1') - A_e))
if return_parts:
return (A_a, A_e)
else:
return pi |
def fleiss_pi_linear(dataset, **kwargs):
"\n Calculates Fleiss' :math:`\\pi` (or multi-:math:`\\pi`), originally proposed in\n [Fleiss1971]_, and is equivalent to Siegel and Castellan's :math:`K`\n [SiegelCastellan1988]_. For 2 coders, this is equivalent to Scott's :math:`\\pi`\n [Scott1955]_.\n "
return __fnc_metric__(__fleiss_pi_linear__, dataset, **kwargs) | 5,529,600,764,925,489,000 | Calculates Fleiss' :math:`\pi` (or multi-:math:`\pi`), originally proposed in
[Fleiss1971]_, and is equivalent to Siegel and Castellan's :math:`K`
[SiegelCastellan1988]_. For 2 coders, this is equivalent to Scott's :math:`\pi`
[Scott1955]_. | segeval/agreement/pi.py | fleiss_pi_linear | cfournie/segmentation.evaluation | python | def fleiss_pi_linear(dataset, **kwargs):
"\n Calculates Fleiss' :math:`\\pi` (or multi-:math:`\\pi`), originally proposed in\n [Fleiss1971]_, and is equivalent to Siegel and Castellan's :math:`K`\n [SiegelCastellan1988]_. For 2 coders, this is equivalent to Scott's :math:`\\pi`\n [Scott1955]_.\n "
return __fnc_metric__(__fleiss_pi_linear__, dataset, **kwargs) |
def _parse_general_counters(self, init_config):
'\n Return a dictionary for each job counter\n {\n counter_group_name: [\n counter_name\n ]\n }\n }\n '
job_counter = {}
if init_config.get('general_counters'):
for counter_group in init_config['general_counters']:
counter_group_name = counter_group.get('counter_group_name')
counters = counter_group.get('counters')
if (not counter_group_name):
raise Exception('"general_counters" must contain a valid "counter_group_name"')
if (not counters):
raise Exception('"general_counters" must contain a list of "counters"')
if (counter_group_name not in job_counter):
job_counter[counter_group_name] = []
for counter in counters:
counter_name = counter.get('counter_name')
if (not counter_name):
raise Exception('At least one "counter_name" should be specified in the list of "counters"')
job_counter[counter_group_name].append(counter_name)
return job_counter | -5,951,628,006,159,175,000 | Return a dictionary for each job counter
{
counter_group_name: [
counter_name
]
}
} | checks.d/mapreduce.py | _parse_general_counters | WPMedia/dd-agent | python | def _parse_general_counters(self, init_config):
'\n Return a dictionary for each job counter\n {\n counter_group_name: [\n counter_name\n ]\n }\n }\n '
job_counter = {}
if init_config.get('general_counters'):
for counter_group in init_config['general_counters']:
counter_group_name = counter_group.get('counter_group_name')
counters = counter_group.get('counters')
if (not counter_group_name):
raise Exception('"general_counters" must contain a valid "counter_group_name"')
if (not counters):
raise Exception('"general_counters" must contain a list of "counters"')
if (counter_group_name not in job_counter):
job_counter[counter_group_name] = []
for counter in counters:
counter_name = counter.get('counter_name')
if (not counter_name):
raise Exception('At least one "counter_name" should be specified in the list of "counters"')
job_counter[counter_group_name].append(counter_name)
return job_counter |
def _parse_job_specific_counters(self, init_config):
'\n Return a dictionary for each job counter\n {\n job_name: {\n counter_group_name: [\n counter_name\n ]\n }\n }\n }\n '
job_counter = {}
if init_config.get('job_specific_counters'):
for job in init_config['job_specific_counters']:
job_name = job.get('job_name')
metrics = job.get('metrics')
if (not job_name):
raise Exception('Counter metrics must have a "job_name"')
if (not metrics):
raise Exception('Jobs specified in counter metrics must contain at least one metric')
if (job_name not in job_counter):
job_counter[job_name] = {}
for metric in metrics:
counter_group_name = metric.get('counter_group_name')
counters = metric.get('counters')
if (not counter_group_name):
raise Exception('Each counter metric must contain a valid "counter_group_name"')
if (not counters):
raise Exception('Each counter metric must contain a list of "counters"')
if (counter_group_name not in job_counter[job_name]):
job_counter[job_name][counter_group_name] = []
for counter in counters:
counter_name = counter.get('counter_name')
if (not counter_name):
raise Exception('At least one "counter_name" should be specified in the list of "counters"')
job_counter[job_name][counter_group_name].append(counter_name)
return job_counter | -1,406,056,200,574,110,500 | Return a dictionary for each job counter
{
job_name: {
counter_group_name: [
counter_name
]
}
}
} | checks.d/mapreduce.py | _parse_job_specific_counters | WPMedia/dd-agent | python | def _parse_job_specific_counters(self, init_config):
'\n Return a dictionary for each job counter\n {\n job_name: {\n counter_group_name: [\n counter_name\n ]\n }\n }\n }\n '
job_counter = {}
if init_config.get('job_specific_counters'):
for job in init_config['job_specific_counters']:
job_name = job.get('job_name')
metrics = job.get('metrics')
if (not job_name):
raise Exception('Counter metrics must have a "job_name"')
if (not metrics):
raise Exception('Jobs specified in counter metrics must contain at least one metric')
if (job_name not in job_counter):
job_counter[job_name] = {}
for metric in metrics:
counter_group_name = metric.get('counter_group_name')
counters = metric.get('counters')
if (not counter_group_name):
raise Exception('Each counter metric must contain a valid "counter_group_name"')
if (not counters):
raise Exception('Each counter metric must contain a list of "counters"')
if (counter_group_name not in job_counter[job_name]):
job_counter[job_name][counter_group_name] = []
for counter in counters:
counter_name = counter.get('counter_name')
if (not counter_name):
raise Exception('At least one "counter_name" should be specified in the list of "counters"')
job_counter[job_name][counter_group_name].append(counter_name)
return job_counter |
def _get_running_app_ids(self, rm_address, **kwargs):
'\n Return a dictionary of {app_id: (app_name, tracking_url)} for the running MapReduce applications\n '
metrics_json = self._rest_request_to_json(rm_address, YARN_APPS_PATH, YARN_SERVICE_CHECK, states=YARN_APPLICATION_STATES, applicationTypes=YARN_APPLICATION_TYPES)
running_apps = {}
if metrics_json.get('apps'):
if (metrics_json['apps'].get('app') is not None):
for app_json in metrics_json['apps']['app']:
app_id = app_json.get('id')
tracking_url = app_json.get('trackingUrl')
app_name = app_json.get('name')
if (app_id and tracking_url and app_name):
running_apps[app_id] = (app_name, tracking_url)
return running_apps | -2,774,981,823,774,166,500 | Return a dictionary of {app_id: (app_name, tracking_url)} for the running MapReduce applications | checks.d/mapreduce.py | _get_running_app_ids | WPMedia/dd-agent | python | def _get_running_app_ids(self, rm_address, **kwargs):
'\n \n '
metrics_json = self._rest_request_to_json(rm_address, YARN_APPS_PATH, YARN_SERVICE_CHECK, states=YARN_APPLICATION_STATES, applicationTypes=YARN_APPLICATION_TYPES)
running_apps = {}
if metrics_json.get('apps'):
if (metrics_json['apps'].get('app') is not None):
for app_json in metrics_json['apps']['app']:
app_id = app_json.get('id')
tracking_url = app_json.get('trackingUrl')
app_name = app_json.get('name')
if (app_id and tracking_url and app_name):
running_apps[app_id] = (app_name, tracking_url)
return running_apps |
def _mapreduce_job_metrics(self, running_apps, addl_tags):
"\n Get metrics for each MapReduce job.\n Return a dictionary for each MapReduce job\n {\n job_id: {\n 'job_name': job_name,\n 'app_name': app_name,\n 'user_name': user_name,\n 'tracking_url': tracking_url\n }\n "
running_jobs = {}
for (app_id, (app_name, tracking_url)) in running_apps.iteritems():
metrics_json = self._rest_request_to_json(tracking_url, MAPREDUCE_JOBS_PATH, MAPREDUCE_SERVICE_CHECK)
if metrics_json.get('jobs'):
if metrics_json['jobs'].get('job'):
for job_json in metrics_json['jobs']['job']:
job_id = job_json.get('id')
job_name = job_json.get('name')
user_name = job_json.get('user')
if (job_id and job_name and user_name):
running_jobs[str(job_id)] = {'job_name': str(job_name), 'app_name': str(app_name), 'user_name': str(user_name), 'tracking_url': self._join_url_dir(tracking_url, MAPREDUCE_JOBS_PATH, job_id)}
tags = [('app_name:' + str(app_name)), ('user_name:' + str(user_name)), ('job_name:' + str(job_name))]
tags.extend(addl_tags)
self._set_metrics_from_json(tags, job_json, MAPREDUCE_JOB_METRICS)
return running_jobs | -1,703,499,876,109,679,600 | Get metrics for each MapReduce job.
Return a dictionary for each MapReduce job
{
job_id: {
'job_name': job_name,
'app_name': app_name,
'user_name': user_name,
'tracking_url': tracking_url
} | checks.d/mapreduce.py | _mapreduce_job_metrics | WPMedia/dd-agent | python | def _mapreduce_job_metrics(self, running_apps, addl_tags):
"\n Get metrics for each MapReduce job.\n Return a dictionary for each MapReduce job\n {\n job_id: {\n 'job_name': job_name,\n 'app_name': app_name,\n 'user_name': user_name,\n 'tracking_url': tracking_url\n }\n "
running_jobs = {}
for (app_id, (app_name, tracking_url)) in running_apps.iteritems():
metrics_json = self._rest_request_to_json(tracking_url, MAPREDUCE_JOBS_PATH, MAPREDUCE_SERVICE_CHECK)
if metrics_json.get('jobs'):
if metrics_json['jobs'].get('job'):
for job_json in metrics_json['jobs']['job']:
job_id = job_json.get('id')
job_name = job_json.get('name')
user_name = job_json.get('user')
if (job_id and job_name and user_name):
running_jobs[str(job_id)] = {'job_name': str(job_name), 'app_name': str(app_name), 'user_name': str(user_name), 'tracking_url': self._join_url_dir(tracking_url, MAPREDUCE_JOBS_PATH, job_id)}
tags = [('app_name:' + str(app_name)), ('user_name:' + str(user_name)), ('job_name:' + str(job_name))]
tags.extend(addl_tags)
self._set_metrics_from_json(tags, job_json, MAPREDUCE_JOB_METRICS)
return running_jobs |
def _mapreduce_job_counters_metrics(self, running_jobs, addl_tags):
'\n Get custom metrics specified for each counter\n '
for (job_id, job_metrics) in running_jobs.iteritems():
job_name = job_metrics['job_name']
if (self.general_counters or (job_name in self.job_specific_counters)):
job_specific_metrics = self.job_specific_counters.get(job_name)
metrics_json = self._rest_request_to_json(job_metrics['tracking_url'], 'counters', MAPREDUCE_SERVICE_CHECK)
if metrics_json.get('jobCounters'):
if metrics_json['jobCounters'].get('counterGroup'):
for counter_group in metrics_json['jobCounters']['counterGroup']:
group_name = counter_group.get('counterGroupName')
if group_name:
counter_metrics = set([])
if (job_specific_metrics and (group_name in job_specific_metrics)):
counter_metrics = counter_metrics.union(job_specific_metrics[group_name])
if (group_name in self.general_counters):
counter_metrics = counter_metrics.union(self.general_counters[group_name])
if counter_metrics:
if counter_group.get('counter'):
for counter in counter_group['counter']:
counter_name = counter.get('name')
if (counter_name and (counter_name in counter_metrics)):
tags = [('app_name:' + job_metrics.get('app_name')), ('user_name:' + job_metrics.get('user_name')), ('job_name:' + job_name), ('counter_name:' + str(counter_name).lower())]
tags.extend(addl_tags)
self._set_metrics_from_json(tags, counter, MAPREDUCE_JOB_COUNTER_METRICS) | 1,464,761,827,869,469,700 | Get custom metrics specified for each counter | checks.d/mapreduce.py | _mapreduce_job_counters_metrics | WPMedia/dd-agent | python | def _mapreduce_job_counters_metrics(self, running_jobs, addl_tags):
'\n \n '
for (job_id, job_metrics) in running_jobs.iteritems():
job_name = job_metrics['job_name']
if (self.general_counters or (job_name in self.job_specific_counters)):
job_specific_metrics = self.job_specific_counters.get(job_name)
metrics_json = self._rest_request_to_json(job_metrics['tracking_url'], 'counters', MAPREDUCE_SERVICE_CHECK)
if metrics_json.get('jobCounters'):
if metrics_json['jobCounters'].get('counterGroup'):
for counter_group in metrics_json['jobCounters']['counterGroup']:
group_name = counter_group.get('counterGroupName')
if group_name:
counter_metrics = set([])
if (job_specific_metrics and (group_name in job_specific_metrics)):
counter_metrics = counter_metrics.union(job_specific_metrics[group_name])
if (group_name in self.general_counters):
counter_metrics = counter_metrics.union(self.general_counters[group_name])
if counter_metrics:
if counter_group.get('counter'):
for counter in counter_group['counter']:
counter_name = counter.get('name')
if (counter_name and (counter_name in counter_metrics)):
tags = [('app_name:' + job_metrics.get('app_name')), ('user_name:' + job_metrics.get('user_name')), ('job_name:' + job_name), ('counter_name:' + str(counter_name).lower())]
tags.extend(addl_tags)
self._set_metrics_from_json(tags, counter, MAPREDUCE_JOB_COUNTER_METRICS) |
def _mapreduce_task_metrics(self, running_jobs, addl_tags):
"\n Get metrics for each MapReduce task\n Return a dictionary of {task_id: 'tracking_url'} for each MapReduce task\n "
for (job_id, job_stats) in running_jobs.iteritems():
metrics_json = self._rest_request_to_json(job_stats['tracking_url'], 'tasks', MAPREDUCE_SERVICE_CHECK)
if metrics_json.get('tasks'):
if metrics_json['tasks'].get('task'):
for task in metrics_json['tasks']['task']:
task_type = task.get('type')
if task_type:
tags = [('app_name:' + job_stats['app_name']), ('user_name:' + job_stats['user_name']), ('job_name:' + job_stats['job_name']), ('task_type:' + str(task_type).lower())]
tags.extend(addl_tags)
if (task_type == 'MAP'):
self._set_metrics_from_json(tags, task, MAPREDUCE_MAP_TASK_METRICS)
elif (task_type == 'REDUCE'):
self._set_metrics_from_json(tags, task, MAPREDUCE_REDUCE_TASK_METRICS) | -522,691,520,259,828,400 | Get metrics for each MapReduce task
Return a dictionary of {task_id: 'tracking_url'} for each MapReduce task | checks.d/mapreduce.py | _mapreduce_task_metrics | WPMedia/dd-agent | python | def _mapreduce_task_metrics(self, running_jobs, addl_tags):
"\n Get metrics for each MapReduce task\n Return a dictionary of {task_id: 'tracking_url'} for each MapReduce task\n "
for (job_id, job_stats) in running_jobs.iteritems():
metrics_json = self._rest_request_to_json(job_stats['tracking_url'], 'tasks', MAPREDUCE_SERVICE_CHECK)
if metrics_json.get('tasks'):
if metrics_json['tasks'].get('task'):
for task in metrics_json['tasks']['task']:
task_type = task.get('type')
if task_type:
tags = [('app_name:' + job_stats['app_name']), ('user_name:' + job_stats['user_name']), ('job_name:' + job_stats['job_name']), ('task_type:' + str(task_type).lower())]
tags.extend(addl_tags)
if (task_type == 'MAP'):
self._set_metrics_from_json(tags, task, MAPREDUCE_MAP_TASK_METRICS)
elif (task_type == 'REDUCE'):
self._set_metrics_from_json(tags, task, MAPREDUCE_REDUCE_TASK_METRICS) |
def _set_metrics_from_json(self, tags, metrics_json, metrics):
'\n Parse the JSON response and set the metrics\n '
for (status, (metric_name, metric_type)) in metrics.iteritems():
metric_status = metrics_json.get(status)
if (metric_status is not None):
self._set_metric(metric_name, metric_type, metric_status, tags) | -362,926,577,410,417,300 | Parse the JSON response and set the metrics | checks.d/mapreduce.py | _set_metrics_from_json | WPMedia/dd-agent | python | def _set_metrics_from_json(self, tags, metrics_json, metrics):
'\n \n '
for (status, (metric_name, metric_type)) in metrics.iteritems():
metric_status = metrics_json.get(status)
if (metric_status is not None):
self._set_metric(metric_name, metric_type, metric_status, tags) |
def _set_metric(self, metric_name, metric_type, value, tags=None, device_name=None):
'\n Set a metric\n '
if (metric_type == HISTOGRAM):
self.histogram(metric_name, value, tags=tags, device_name=device_name)
elif (metric_type == INCREMENT):
self.increment(metric_name, value, tags=tags, device_name=device_name)
else:
self.log.error(('Metric type "%s" unknown' % metric_type)) | -4,149,084,100,854,976,500 | Set a metric | checks.d/mapreduce.py | _set_metric | WPMedia/dd-agent | python | def _set_metric(self, metric_name, metric_type, value, tags=None, device_name=None):
'\n \n '
if (metric_type == HISTOGRAM):
self.histogram(metric_name, value, tags=tags, device_name=device_name)
elif (metric_type == INCREMENT):
self.increment(metric_name, value, tags=tags, device_name=device_name)
else:
self.log.error(('Metric type "%s" unknown' % metric_type)) |
def _rest_request_to_json(self, address, object_path, service_name, *args, **kwargs):
'\n Query the given URL and return the JSON response\n '
response_json = None
service_check_tags = [('url:%s' % self._get_url_base(address))]
url = address
if object_path:
url = self._join_url_dir(url, object_path)
if args:
for directory in args:
url = self._join_url_dir(url, directory)
self.log.debug(('Attempting to connect to "%s"' % url))
if kwargs:
query = '&'.join(['{0}={1}'.format(key, value) for (key, value) in kwargs.iteritems()])
url = urljoin(url, ('?' + query))
try:
response = requests.get(url, timeout=self.default_integration_http_timeout)
response.raise_for_status()
response_json = response.json()
except Timeout as e:
self.service_check(service_name, AgentCheck.CRITICAL, tags=service_check_tags, message='Request timeout: {0}, {1}'.format(url, e))
raise
except (HTTPError, InvalidURL, ConnectionError) as e:
self.service_check(service_name, AgentCheck.CRITICAL, tags=service_check_tags, message='Request failed: {0}, {1}'.format(url, e))
raise
except JSONDecodeError as e:
self.service_check(service_name, AgentCheck.CRITICAL, tags=service_check_tags, message='JSON Parse failed: {0}, {1}'.format(url, e))
raise
except ValueError as e:
self.service_check(service_name, AgentCheck.CRITICAL, tags=service_check_tags, message=str(e))
raise
return response_json | -6,006,647,949,090,792,000 | Query the given URL and return the JSON response | checks.d/mapreduce.py | _rest_request_to_json | WPMedia/dd-agent | python | def _rest_request_to_json(self, address, object_path, service_name, *args, **kwargs):
'\n \n '
response_json = None
service_check_tags = [('url:%s' % self._get_url_base(address))]
url = address
if object_path:
url = self._join_url_dir(url, object_path)
if args:
for directory in args:
url = self._join_url_dir(url, directory)
self.log.debug(('Attempting to connect to "%s"' % url))
if kwargs:
query = '&'.join(['{0}={1}'.format(key, value) for (key, value) in kwargs.iteritems()])
url = urljoin(url, ('?' + query))
try:
response = requests.get(url, timeout=self.default_integration_http_timeout)
response.raise_for_status()
response_json = response.json()
except Timeout as e:
self.service_check(service_name, AgentCheck.CRITICAL, tags=service_check_tags, message='Request timeout: {0}, {1}'.format(url, e))
raise
except (HTTPError, InvalidURL, ConnectionError) as e:
self.service_check(service_name, AgentCheck.CRITICAL, tags=service_check_tags, message='Request failed: {0}, {1}'.format(url, e))
raise
except JSONDecodeError as e:
self.service_check(service_name, AgentCheck.CRITICAL, tags=service_check_tags, message='JSON Parse failed: {0}, {1}'.format(url, e))
raise
except ValueError as e:
self.service_check(service_name, AgentCheck.CRITICAL, tags=service_check_tags, message=str(e))
raise
return response_json |
def _join_url_dir(self, url, *args):
'\n Join a URL with multiple directories\n '
for path in args:
url = (url.rstrip('/') + '/')
url = urljoin(url, path.lstrip('/'))
return url | 8,838,647,529,342,381,000 | Join a URL with multiple directories | checks.d/mapreduce.py | _join_url_dir | WPMedia/dd-agent | python | def _join_url_dir(self, url, *args):
'\n \n '
for path in args:
url = (url.rstrip('/') + '/')
url = urljoin(url, path.lstrip('/'))
return url |
def _get_url_base(self, url):
'\n Return the base of a URL\n '
s = urlsplit(url)
return urlunsplit([s.scheme, s.netloc, '', '', '']) | 8,414,673,978,274,218,000 | Return the base of a URL | checks.d/mapreduce.py | _get_url_base | WPMedia/dd-agent | python | def _get_url_base(self, url):
'\n \n '
s = urlsplit(url)
return urlunsplit([s.scheme, s.netloc, , , ]) |
def build_gui_help_add_sine_attr():
' Creates GUI for Make Stretchy IK '
window_name = 'build_gui_help_add_sine_attr'
if cmds.window(window_name, exists=True):
cmds.deleteUI(window_name, window=True)
cmds.window(window_name, title=(script_name + ' Help'), mnb=False, mxb=False, s=True)
cmds.window(window_name, e=True, s=True, wh=[1, 1])
cmds.columnLayout('main_column', p=window_name)
cmds.separator(h=12, style='none')
cmds.rowColumnLayout(nc=1, cw=[(1, 310)], cs=[(1, 10)], p='main_column')
cmds.rowColumnLayout(nc=1, cw=[(1, 300)], cs=[(1, 10)], p='main_column')
cmds.text((script_name + ' Help'), bgc=[0.4, 0.4, 0.4], fn='boldLabelFont', align='center')
cmds.separator(h=10, style='none', p='main_column')
cmds.rowColumnLayout(nc=1, cw=[(1, 300)], cs=[(1, 10)], p='main_column')
cmds.text(l='Create Sine attributes without using\nthird-party plugins or expressions.', align='center')
cmds.separator(h=5, style='none')
cmds.text(l='Select and object, then click on "Add Sine Attributes"', align='center')
cmds.separator(h=10, style='none')
cmds.text(l='Sine Attributes:', align='center', font='boldLabelFont')
cmds.text(l='Time: Multiplier for the time input (tick)', align='center')
cmds.text(l='Amplitude: Wave amplitude (how high it gets)', align='center')
cmds.text(l='Frequency: Wave frequency (how often it happens)', align='center')
cmds.text(l='Offset: Value added after calculation, offset.', align='center')
cmds.text(l='Tick: Time as seen by the sine system.', align='center')
cmds.text(l='Output: Result of the sine operation.', align='center')
cmds.text(l='Abs Output: Aboslute output. (no negative values)', align='center')
cmds.separator(h=10, style='none')
cmds.separator(h=15, style='none')
cmds.rowColumnLayout(nc=2, cw=[(1, 140), (2, 140)], cs=[(1, 10), (2, 0)], p='main_column')
cmds.text('Guilherme Trevisan ')
cmds.text(l='<a href="mailto:[email protected]">[email protected]</a>', hl=True, highlightColor=[1, 1, 1])
cmds.rowColumnLayout(nc=2, cw=[(1, 140), (2, 140)], cs=[(1, 10), (2, 0)], p='main_column')
cmds.separator(h=15, style='none')
cmds.text(l='<a href="https://github.com/TrevisanGMW">Github</a>', hl=True, highlightColor=[1, 1, 1])
cmds.separator(h=7, style='none')
cmds.rowColumnLayout(nc=1, cw=[(1, 300)], cs=[(1, 10)], p='main_column')
cmds.separator(h=10, style='none')
cmds.button(l='OK', h=30, c=(lambda args: close_help_gui()))
cmds.separator(h=8, style='none')
cmds.showWindow(window_name)
cmds.window(window_name, e=True, s=False)
qw = omui.MQtUtil.findWindow(window_name)
widget = wrapInstance(long(qw), QWidget)
icon = QIcon(':/question.png')
widget.setWindowIcon(icon)
def close_help_gui():
' Closes Help Window '
if cmds.window(window_name, exists=True):
cmds.deleteUI(window_name, window=True) | 8,131,861,237,518,420,000 | Creates GUI for Make Stretchy IK | python-scripts/gt_add_sine_attributes.py | build_gui_help_add_sine_attr | freemanpro/gt-tools | python | def build_gui_help_add_sine_attr():
' '
window_name = 'build_gui_help_add_sine_attr'
if cmds.window(window_name, exists=True):
cmds.deleteUI(window_name, window=True)
cmds.window(window_name, title=(script_name + ' Help'), mnb=False, mxb=False, s=True)
cmds.window(window_name, e=True, s=True, wh=[1, 1])
cmds.columnLayout('main_column', p=window_name)
cmds.separator(h=12, style='none')
cmds.rowColumnLayout(nc=1, cw=[(1, 310)], cs=[(1, 10)], p='main_column')
cmds.rowColumnLayout(nc=1, cw=[(1, 300)], cs=[(1, 10)], p='main_column')
cmds.text((script_name + ' Help'), bgc=[0.4, 0.4, 0.4], fn='boldLabelFont', align='center')
cmds.separator(h=10, style='none', p='main_column')
cmds.rowColumnLayout(nc=1, cw=[(1, 300)], cs=[(1, 10)], p='main_column')
cmds.text(l='Create Sine attributes without using\nthird-party plugins or expressions.', align='center')
cmds.separator(h=5, style='none')
cmds.text(l='Select and object, then click on "Add Sine Attributes"', align='center')
cmds.separator(h=10, style='none')
cmds.text(l='Sine Attributes:', align='center', font='boldLabelFont')
cmds.text(l='Time: Multiplier for the time input (tick)', align='center')
cmds.text(l='Amplitude: Wave amplitude (how high it gets)', align='center')
cmds.text(l='Frequency: Wave frequency (how often it happens)', align='center')
cmds.text(l='Offset: Value added after calculation, offset.', align='center')
cmds.text(l='Tick: Time as seen by the sine system.', align='center')
cmds.text(l='Output: Result of the sine operation.', align='center')
cmds.text(l='Abs Output: Aboslute output. (no negative values)', align='center')
cmds.separator(h=10, style='none')
cmds.separator(h=15, style='none')
cmds.rowColumnLayout(nc=2, cw=[(1, 140), (2, 140)], cs=[(1, 10), (2, 0)], p='main_column')
cmds.text('Guilherme Trevisan ')
cmds.text(l='<a href="mailto:[email protected]">[email protected]</a>', hl=True, highlightColor=[1, 1, 1])
cmds.rowColumnLayout(nc=2, cw=[(1, 140), (2, 140)], cs=[(1, 10), (2, 0)], p='main_column')
cmds.separator(h=15, style='none')
cmds.text(l='<a href="https://github.com/TrevisanGMW">Github</a>', hl=True, highlightColor=[1, 1, 1])
cmds.separator(h=7, style='none')
cmds.rowColumnLayout(nc=1, cw=[(1, 300)], cs=[(1, 10)], p='main_column')
cmds.separator(h=10, style='none')
cmds.button(l='OK', h=30, c=(lambda args: close_help_gui()))
cmds.separator(h=8, style='none')
cmds.showWindow(window_name)
cmds.window(window_name, e=True, s=False)
qw = omui.MQtUtil.findWindow(window_name)
widget = wrapInstance(long(qw), QWidget)
icon = QIcon(':/question.png')
widget.setWindowIcon(icon)
def close_help_gui():
' Closes Help Window '
if cmds.window(window_name, exists=True):
cmds.deleteUI(window_name, window=True) |
def add_sine_attributes(obj, sine_prefix='sine', tick_source_attr='time1.outTime', hide_unkeyable=True, add_absolute_output=False, nice_name_prefix=True):
' \n Create Sine function without using third-party plugins or expressions\n \n Parameters:\n obj (string): Name of the object\n sine (string): Prefix given to the name of the attributes (default is "sine")\n tick_source_attr (string): Name of the attribute used as the source for time. It uses the default "time1" node if nothing else is specified\n hide_unkeyable (bool): Hides the tick and output attributes\n add_absolute_output (bool): Also creates an output version that gives only positive numbers much like the abs() expression\n\n Returns:\n sine_output_attrs (list): A string with the name of the object and the name of the sine output attribute. E.g. "pSphere1.sineOutput"\n In case an absolute output is added, it will be the second object in the list. E.g. ["pSphere1.sineOutput", "pSphere1.sineAbsOutput"]\n If add_absolute_output is False the second attribute is None\n '
required_plugin = 'quatNodes'
if (not cmds.pluginInfo(required_plugin, q=True, loaded=True)):
cmds.loadPlugin(required_plugin, qt=False)
influence_suffix = 'Time'
amplitude_suffix = 'Amplitude'
frequency_suffix = 'Frequency'
offset_suffix = 'Offset'
output_suffix = 'Output'
tick_suffix = 'Tick'
abs_suffix = 'AbsOutput'
influence_attr = (sine_prefix + influence_suffix)
amplitude_attr = (sine_prefix + amplitude_suffix)
frequency_attr = (sine_prefix + frequency_suffix)
offset_attr = (sine_prefix + offset_suffix)
output_attr = (sine_prefix + output_suffix)
tick_attr = (sine_prefix + tick_suffix)
abs_attr = (sine_prefix + abs_suffix)
mdl_node = cmds.createNode('multDoubleLinear', name=(obj + '_multDoubleLiner'))
quat_node = cmds.createNode('eulerToQuat', name=(obj + '_eulerToQuat'))
multiply_node = cmds.createNode('multiplyDivide', name=(obj + '_amplitude_multiply'))
sum_node = cmds.createNode('plusMinusAverage', name=(obj + '_offset_sum'))
influence_multiply_node = cmds.createNode('multiplyDivide', name=(obj + '_influence_multiply'))
if nice_name_prefix:
cmds.addAttr(obj, ln=influence_attr, at='double', k=True, maxValue=1, minValue=0)
cmds.addAttr(obj, ln=amplitude_attr, at='double', k=True)
cmds.addAttr(obj, ln=frequency_attr, at='double', k=True)
cmds.addAttr(obj, ln=offset_attr, at='double', k=True)
cmds.addAttr(obj, ln=tick_attr, at='double', k=True)
cmds.addAttr(obj, ln=output_attr, at='double', k=True)
if add_absolute_output:
cmds.addAttr(obj, ln=abs_attr, at='double', k=True)
else:
cmds.addAttr(obj, ln=influence_attr, at='double', k=True, maxValue=1, minValue=0, nn=influence_suffix)
cmds.addAttr(obj, ln=amplitude_attr, at='double', k=True, nn=amplitude_suffix)
cmds.addAttr(obj, ln=frequency_attr, at='double', k=True, nn=frequency_suffix)
cmds.addAttr(obj, ln=offset_attr, at='double', k=True, nn=offset_suffix)
cmds.addAttr(obj, ln=tick_attr, at='double', k=True, nn=tick_suffix)
cmds.addAttr(obj, ln=output_attr, at='double', k=True, nn=output_suffix)
if add_absolute_output:
cmds.addAttr(obj, ln=abs_attr, at='double', k=True, nn=re.sub('(\\w)([A-Z])', '\\1 \\2', abs_suffix))
cmds.setAttr(((obj + '.') + influence_attr), 1)
cmds.setAttr(((obj + '.') + amplitude_attr), 1)
cmds.setAttr(((obj + '.') + frequency_attr), 10)
if hide_unkeyable:
cmds.setAttr(((obj + '.') + tick_attr), k=False)
cmds.setAttr(((obj + '.') + output_attr), k=False)
if (add_absolute_output and hide_unkeyable):
cmds.setAttr(((obj + '.') + abs_attr), k=False)
cmds.connectAttr(tick_source_attr, (influence_multiply_node + '.input1X'))
cmds.connectAttr((influence_multiply_node + '.outputX'), ((obj + '.') + tick_attr))
cmds.connectAttr(((obj + '.') + influence_attr), (influence_multiply_node + '.input2X'))
cmds.connectAttr(((obj + '.') + amplitude_attr), (multiply_node + '.input2X'))
cmds.connectAttr(((obj + '.') + frequency_attr), (mdl_node + '.input1'))
cmds.connectAttr(((obj + '.') + tick_attr), (mdl_node + '.input2'))
cmds.connectAttr(((obj + '.') + offset_attr), (sum_node + '.input1D[0]'))
cmds.connectAttr((mdl_node + '.output'), (quat_node + '.inputRotateX'))
cmds.connectAttr((quat_node + '.outputQuatX'), (multiply_node + '.input1X'))
cmds.connectAttr((multiply_node + '.outputX'), (sum_node + '.input1D[1]'))
cmds.connectAttr((sum_node + '.output1D'), ((obj + '.') + output_attr))
if add_absolute_output:
squared_node = cmds.createNode('multiplyDivide', name=(obj + '_abs_squared'))
reverse_squared_node = cmds.createNode('multiplyDivide', name=(obj + '_reverseAbs_multiply'))
cmds.setAttr((squared_node + '.operation'), 3)
cmds.setAttr((reverse_squared_node + '.operation'), 3)
cmds.setAttr((squared_node + '.input2X'), 2)
cmds.setAttr((reverse_squared_node + '.input2X'), 0.5)
cmds.connectAttr(((obj + '.') + output_attr), (squared_node + '.input1X'))
cmds.connectAttr((squared_node + '.outputX'), (reverse_squared_node + '.input1X'))
cmds.connectAttr((reverse_squared_node + '.outputX'), ((obj + '.') + abs_attr))
return [((obj + '.') + output_attr), ((obj + '.') + abs_attr)]
else:
return [((obj + '.') + output_attr), None] | -4,182,535,674,872,841,000 | Create Sine function without using third-party plugins or expressions
Parameters:
obj (string): Name of the object
sine (string): Prefix given to the name of the attributes (default is "sine")
tick_source_attr (string): Name of the attribute used as the source for time. It uses the default "time1" node if nothing else is specified
hide_unkeyable (bool): Hides the tick and output attributes
add_absolute_output (bool): Also creates an output version that gives only positive numbers much like the abs() expression
Returns:
sine_output_attrs (list): A string with the name of the object and the name of the sine output attribute. E.g. "pSphere1.sineOutput"
In case an absolute output is added, it will be the second object in the list. E.g. ["pSphere1.sineOutput", "pSphere1.sineAbsOutput"]
If add_absolute_output is False the second attribute is None | python-scripts/gt_add_sine_attributes.py | add_sine_attributes | freemanpro/gt-tools | python | def add_sine_attributes(obj, sine_prefix='sine', tick_source_attr='time1.outTime', hide_unkeyable=True, add_absolute_output=False, nice_name_prefix=True):
' \n Create Sine function without using third-party plugins or expressions\n \n Parameters:\n obj (string): Name of the object\n sine (string): Prefix given to the name of the attributes (default is "sine")\n tick_source_attr (string): Name of the attribute used as the source for time. It uses the default "time1" node if nothing else is specified\n hide_unkeyable (bool): Hides the tick and output attributes\n add_absolute_output (bool): Also creates an output version that gives only positive numbers much like the abs() expression\n\n Returns:\n sine_output_attrs (list): A string with the name of the object and the name of the sine output attribute. E.g. "pSphere1.sineOutput"\n In case an absolute output is added, it will be the second object in the list. E.g. ["pSphere1.sineOutput", "pSphere1.sineAbsOutput"]\n If add_absolute_output is False the second attribute is None\n '
required_plugin = 'quatNodes'
if (not cmds.pluginInfo(required_plugin, q=True, loaded=True)):
cmds.loadPlugin(required_plugin, qt=False)
influence_suffix = 'Time'
amplitude_suffix = 'Amplitude'
frequency_suffix = 'Frequency'
offset_suffix = 'Offset'
output_suffix = 'Output'
tick_suffix = 'Tick'
abs_suffix = 'AbsOutput'
influence_attr = (sine_prefix + influence_suffix)
amplitude_attr = (sine_prefix + amplitude_suffix)
frequency_attr = (sine_prefix + frequency_suffix)
offset_attr = (sine_prefix + offset_suffix)
output_attr = (sine_prefix + output_suffix)
tick_attr = (sine_prefix + tick_suffix)
abs_attr = (sine_prefix + abs_suffix)
mdl_node = cmds.createNode('multDoubleLinear', name=(obj + '_multDoubleLiner'))
quat_node = cmds.createNode('eulerToQuat', name=(obj + '_eulerToQuat'))
multiply_node = cmds.createNode('multiplyDivide', name=(obj + '_amplitude_multiply'))
sum_node = cmds.createNode('plusMinusAverage', name=(obj + '_offset_sum'))
influence_multiply_node = cmds.createNode('multiplyDivide', name=(obj + '_influence_multiply'))
if nice_name_prefix:
cmds.addAttr(obj, ln=influence_attr, at='double', k=True, maxValue=1, minValue=0)
cmds.addAttr(obj, ln=amplitude_attr, at='double', k=True)
cmds.addAttr(obj, ln=frequency_attr, at='double', k=True)
cmds.addAttr(obj, ln=offset_attr, at='double', k=True)
cmds.addAttr(obj, ln=tick_attr, at='double', k=True)
cmds.addAttr(obj, ln=output_attr, at='double', k=True)
if add_absolute_output:
cmds.addAttr(obj, ln=abs_attr, at='double', k=True)
else:
cmds.addAttr(obj, ln=influence_attr, at='double', k=True, maxValue=1, minValue=0, nn=influence_suffix)
cmds.addAttr(obj, ln=amplitude_attr, at='double', k=True, nn=amplitude_suffix)
cmds.addAttr(obj, ln=frequency_attr, at='double', k=True, nn=frequency_suffix)
cmds.addAttr(obj, ln=offset_attr, at='double', k=True, nn=offset_suffix)
cmds.addAttr(obj, ln=tick_attr, at='double', k=True, nn=tick_suffix)
cmds.addAttr(obj, ln=output_attr, at='double', k=True, nn=output_suffix)
if add_absolute_output:
cmds.addAttr(obj, ln=abs_attr, at='double', k=True, nn=re.sub('(\\w)([A-Z])', '\\1 \\2', abs_suffix))
cmds.setAttr(((obj + '.') + influence_attr), 1)
cmds.setAttr(((obj + '.') + amplitude_attr), 1)
cmds.setAttr(((obj + '.') + frequency_attr), 10)
if hide_unkeyable:
cmds.setAttr(((obj + '.') + tick_attr), k=False)
cmds.setAttr(((obj + '.') + output_attr), k=False)
if (add_absolute_output and hide_unkeyable):
cmds.setAttr(((obj + '.') + abs_attr), k=False)
cmds.connectAttr(tick_source_attr, (influence_multiply_node + '.input1X'))
cmds.connectAttr((influence_multiply_node + '.outputX'), ((obj + '.') + tick_attr))
cmds.connectAttr(((obj + '.') + influence_attr), (influence_multiply_node + '.input2X'))
cmds.connectAttr(((obj + '.') + amplitude_attr), (multiply_node + '.input2X'))
cmds.connectAttr(((obj + '.') + frequency_attr), (mdl_node + '.input1'))
cmds.connectAttr(((obj + '.') + tick_attr), (mdl_node + '.input2'))
cmds.connectAttr(((obj + '.') + offset_attr), (sum_node + '.input1D[0]'))
cmds.connectAttr((mdl_node + '.output'), (quat_node + '.inputRotateX'))
cmds.connectAttr((quat_node + '.outputQuatX'), (multiply_node + '.input1X'))
cmds.connectAttr((multiply_node + '.outputX'), (sum_node + '.input1D[1]'))
cmds.connectAttr((sum_node + '.output1D'), ((obj + '.') + output_attr))
if add_absolute_output:
squared_node = cmds.createNode('multiplyDivide', name=(obj + '_abs_squared'))
reverse_squared_node = cmds.createNode('multiplyDivide', name=(obj + '_reverseAbs_multiply'))
cmds.setAttr((squared_node + '.operation'), 3)
cmds.setAttr((reverse_squared_node + '.operation'), 3)
cmds.setAttr((squared_node + '.input2X'), 2)
cmds.setAttr((reverse_squared_node + '.input2X'), 0.5)
cmds.connectAttr(((obj + '.') + output_attr), (squared_node + '.input1X'))
cmds.connectAttr((squared_node + '.outputX'), (reverse_squared_node + '.input1X'))
cmds.connectAttr((reverse_squared_node + '.outputX'), ((obj + '.') + abs_attr))
return [((obj + '.') + output_attr), ((obj + '.') + abs_attr)]
else:
return [((obj + '.') + output_attr), None] |
def validate_operation():
' Checks elements one last time before running the script '
is_valid = False
stretchy_name = None
add_abs_output_value = cmds.checkBox(add_abs_output_chkbox, q=True, value=True)
add_prefix_nn_value = cmds.checkBox(add_prefix_nn_chkbox, q=True, value=True)
stretchy_prefix = cmds.textField(stretchy_system_prefix, q=True, text=True).replace(' ', '')
selection = (cmds.ls(selection=True) or [])
if (len(selection) > 0):
target = selection[0]
is_valid = True
else:
cmds.warning('Please select a target object to be the attribute holder.')
is_valid = False
if (stretchy_prefix != ''):
stretchy_name = stretchy_prefix
else:
stretchy_name = 'sine'
if is_valid:
current_attributes = (cmds.listAttr(target, r=True, s=True, userDefined=True) or [])
possible_conflicts = []
possible_conflicts.append((stretchy_name + 'Time'))
possible_conflicts.append((stretchy_name + 'Amplitude'))
possible_conflicts.append((stretchy_name + 'Frequency'))
possible_conflicts.append((stretchy_name + 'Offset'))
possible_conflicts.append((stretchy_name + 'Output'))
possible_conflicts.append((stretchy_name + 'Tick'))
possible_conflicts.append((stretchy_name + 'AbsOutput'))
for conflict in possible_conflicts:
for attr in current_attributes:
if (attr == conflict):
is_valid = False
if (not is_valid):
cmds.warning('The object selected has conflicting attributes. Please change the prefix or select another object.')
if is_valid:
if stretchy_name:
add_sine_attributes(target, sine_prefix=stretchy_name, tick_source_attr='time1.outTime', hide_unkeyable=False, add_absolute_output=add_abs_output_value, nice_name_prefix=add_prefix_nn_value)
cmds.select(target, r=True)
else:
add_sine_attributes(target, sine_prefix=stretchy_name, tick_source_attr='time1.outTime', hide_unkeyable=False, add_absolute_output=add_abs_output_value, nice_name_prefix=add_prefix_nn_value)
cmds.select(target, r=True) | -4,784,751,434,494,775,000 | Checks elements one last time before running the script | python-scripts/gt_add_sine_attributes.py | validate_operation | freemanpro/gt-tools | python | def validate_operation():
' '
is_valid = False
stretchy_name = None
add_abs_output_value = cmds.checkBox(add_abs_output_chkbox, q=True, value=True)
add_prefix_nn_value = cmds.checkBox(add_prefix_nn_chkbox, q=True, value=True)
stretchy_prefix = cmds.textField(stretchy_system_prefix, q=True, text=True).replace(' ', )
selection = (cmds.ls(selection=True) or [])
if (len(selection) > 0):
target = selection[0]
is_valid = True
else:
cmds.warning('Please select a target object to be the attribute holder.')
is_valid = False
if (stretchy_prefix != ):
stretchy_name = stretchy_prefix
else:
stretchy_name = 'sine'
if is_valid:
current_attributes = (cmds.listAttr(target, r=True, s=True, userDefined=True) or [])
possible_conflicts = []
possible_conflicts.append((stretchy_name + 'Time'))
possible_conflicts.append((stretchy_name + 'Amplitude'))
possible_conflicts.append((stretchy_name + 'Frequency'))
possible_conflicts.append((stretchy_name + 'Offset'))
possible_conflicts.append((stretchy_name + 'Output'))
possible_conflicts.append((stretchy_name + 'Tick'))
possible_conflicts.append((stretchy_name + 'AbsOutput'))
for conflict in possible_conflicts:
for attr in current_attributes:
if (attr == conflict):
is_valid = False
if (not is_valid):
cmds.warning('The object selected has conflicting attributes. Please change the prefix or select another object.')
if is_valid:
if stretchy_name:
add_sine_attributes(target, sine_prefix=stretchy_name, tick_source_attr='time1.outTime', hide_unkeyable=False, add_absolute_output=add_abs_output_value, nice_name_prefix=add_prefix_nn_value)
cmds.select(target, r=True)
else:
add_sine_attributes(target, sine_prefix=stretchy_name, tick_source_attr='time1.outTime', hide_unkeyable=False, add_absolute_output=add_abs_output_value, nice_name_prefix=add_prefix_nn_value)
cmds.select(target, r=True) |
def close_help_gui():
' Closes Help Window '
if cmds.window(window_name, exists=True):
cmds.deleteUI(window_name, window=True) | -6,909,947,570,174,779,000 | Closes Help Window | python-scripts/gt_add_sine_attributes.py | close_help_gui | freemanpro/gt-tools | python | def close_help_gui():
' '
if cmds.window(window_name, exists=True):
cmds.deleteUI(window_name, window=True) |
@event('manager.startup')
def init_parsers(manager):
'Prepare our list of parsing plugins and default parsers.'
for parser_type in PARSER_TYPES:
parsers[parser_type] = {}
for p in plugin.get_plugins(group=(parser_type + '_parser')):
parsers[parser_type][p.name.replace('parser_', '')] = p.instance
func_name = ('parse_' + parser_type)
default_parsers[parser_type] = max(iter(parsers[parser_type].items()), key=(lambda p: getattr(getattr(p[1], func_name), 'priority', 0)))[0]
log.debug(('setting default %s parser to %s. (options: %s)' % (parser_type, default_parsers[parser_type], parsers[parser_type]))) | 8,651,478,703,859,171,000 | Prepare our list of parsing plugins and default parsers. | flexget/plugins/parsers/plugin_parsing.py | init_parsers | jbones89/Flexget | python | @event('manager.startup')
def init_parsers(manager):
for parser_type in PARSER_TYPES:
parsers[parser_type] = {}
for p in plugin.get_plugins(group=(parser_type + '_parser')):
parsers[parser_type][p.name.replace('parser_', )] = p.instance
func_name = ('parse_' + parser_type)
default_parsers[parser_type] = max(iter(parsers[parser_type].items()), key=(lambda p: getattr(getattr(p[1], func_name), 'priority', 0)))[0]
log.debug(('setting default %s parser to %s. (options: %s)' % (parser_type, default_parsers[parser_type], parsers[parser_type]))) |
def parse_series(self, data, name=None, **kwargs):
'\n Use the selected series parser to parse series information from `data`\n\n :param data: The raw string to parse information from.\n :param name: The series name to parse data for. If not supplied, parser will attempt to guess series name\n automatically from `data`.\n\n :returns: An object containing the parsed information. The `valid` attribute will be set depending on success.\n '
parser = parsers['series'][selected_parsers.get('series', default_parsers.get('series'))]
return parser.parse_series(data, name=name, **kwargs) | 5,577,953,992,979,921,000 | Use the selected series parser to parse series information from `data`
:param data: The raw string to parse information from.
:param name: The series name to parse data for. If not supplied, parser will attempt to guess series name
automatically from `data`.
:returns: An object containing the parsed information. The `valid` attribute will be set depending on success. | flexget/plugins/parsers/plugin_parsing.py | parse_series | jbones89/Flexget | python | def parse_series(self, data, name=None, **kwargs):
'\n Use the selected series parser to parse series information from `data`\n\n :param data: The raw string to parse information from.\n :param name: The series name to parse data for. If not supplied, parser will attempt to guess series name\n automatically from `data`.\n\n :returns: An object containing the parsed information. The `valid` attribute will be set depending on success.\n '
parser = parsers['series'][selected_parsers.get('series', default_parsers.get('series'))]
return parser.parse_series(data, name=name, **kwargs) |
def parse_movie(self, data, **kwargs):
'\n Use the selected movie parser to parse movie information from `data`\n\n :param data: The raw string to parse information from\n\n :returns: An object containing the parsed information. The `valid` attribute will be set depending on success.\n '
parser = parsers['movie'][(selected_parsers.get('movie') or default_parsers['movie'])]
return parser.parse_movie(data, **kwargs) | 3,685,681,231,583,774,700 | Use the selected movie parser to parse movie information from `data`
:param data: The raw string to parse information from
:returns: An object containing the parsed information. The `valid` attribute will be set depending on success. | flexget/plugins/parsers/plugin_parsing.py | parse_movie | jbones89/Flexget | python | def parse_movie(self, data, **kwargs):
'\n Use the selected movie parser to parse movie information from `data`\n\n :param data: The raw string to parse information from\n\n :returns: An object containing the parsed information. The `valid` attribute will be set depending on success.\n '
parser = parsers['movie'][(selected_parsers.get('movie') or default_parsers['movie'])]
return parser.parse_movie(data, **kwargs) |
def save_users(users, filename='output.csv'):
"Save users out to a .csv file\n\n Each row will represent a user UID, following by all the user's students\n (if the user has any)\n\n INPUT:\n > users: set of User objects\n > filename: filename to save .csv to."
with open(filename, 'w') as file:
for (count, user) in enumerate(users):
file.write(str(user.get_uid()))
for student in user.get_students():
file.write((',' + str(student.get_uid())))
file.write('\n')
if ((count % 100) == 0):
file.flush()
return | -6,584,503,492,924,957,000 | Save users out to a .csv file
Each row will represent a user UID, following by all the user's students
(if the user has any)
INPUT:
> users: set of User objects
> filename: filename to save .csv to. | save_load.py | save_users | Garrett-R/infections | python | def save_users(users, filename='output.csv'):
"Save users out to a .csv file\n\n Each row will represent a user UID, following by all the user's students\n (if the user has any)\n\n INPUT:\n > users: set of User objects\n > filename: filename to save .csv to."
with open(filename, 'w') as file:
for (count, user) in enumerate(users):
file.write(str(user.get_uid()))
for student in user.get_students():
file.write((',' + str(student.get_uid())))
file.write('\n')
if ((count % 100) == 0):
file.flush()
return |
def load_users(filename):
"Load users from a .csv file\n\n Each row will represent a user uid, following by all the user's student\n (if the user has any). Note: the uid is not assumed to be an integer,\n so it read in as a string, which shouldn't matter anyway.\n\n TODO: we could probably speed this up by loading multiple lines at a time.\n\n INPUT:\n > filename: filename to read .csv from\n\n RETURN:\n > users: a set of User objects"
users = dict()
with open(filename, 'r') as file:
for line in file:
line = line.split('\n')[0]
split_line = line.split(',')
new_uid = _try_converting_to_int(split_line[0])
new_user = User(new_uid)
users.update({new_user.get_uid(): new_user})
with open(filename, 'r') as file:
for line in file:
line = line.split('\n')[0]
split_line = line.split(',')
current_uid = _try_converting_to_int(split_line[0])
for student_uid in split_line[1:]:
student_uid = _try_converting_to_int(student_uid)
users[current_uid].add_students(users[student_uid])
return set(users.values()) | 6,156,269,794,225,563,000 | Load users from a .csv file
Each row will represent a user uid, following by all the user's student
(if the user has any). Note: the uid is not assumed to be an integer,
so it read in as a string, which shouldn't matter anyway.
TODO: we could probably speed this up by loading multiple lines at a time.
INPUT:
> filename: filename to read .csv from
RETURN:
> users: a set of User objects | save_load.py | load_users | Garrett-R/infections | python | def load_users(filename):
"Load users from a .csv file\n\n Each row will represent a user uid, following by all the user's student\n (if the user has any). Note: the uid is not assumed to be an integer,\n so it read in as a string, which shouldn't matter anyway.\n\n TODO: we could probably speed this up by loading multiple lines at a time.\n\n INPUT:\n > filename: filename to read .csv from\n\n RETURN:\n > users: a set of User objects"
users = dict()
with open(filename, 'r') as file:
for line in file:
line = line.split('\n')[0]
split_line = line.split(',')
new_uid = _try_converting_to_int(split_line[0])
new_user = User(new_uid)
users.update({new_user.get_uid(): new_user})
with open(filename, 'r') as file:
for line in file:
line = line.split('\n')[0]
split_line = line.split(',')
current_uid = _try_converting_to_int(split_line[0])
for student_uid in split_line[1:]:
student_uid = _try_converting_to_int(student_uid)
users[current_uid].add_students(users[student_uid])
return set(users.values()) |
def check_best(self, metric_dict):
'\n Hook function, called after metrics are calculated\n '
if (metric_dict['bl_acc'] > self.best_value):
if (self.iters > 0):
LOGGER.text(f"Evaluation improved from {self.best_value} to {metric_dict['bl_acc']}", level=LoggerObserver.INFO)
self.best_value = metric_dict['bl_acc']
self.save_checkpoint('best')
elif self.visualize_when_val:
self.visualize_pred() | 969,269,354,907,937,000 | Hook function, called after metrics are calculated | theseus/classification/trainer/trainer.py | check_best | lannguyen0910/theseus | python | def check_best(self, metric_dict):
'\n \n '
if (metric_dict['bl_acc'] > self.best_value):
if (self.iters > 0):
LOGGER.text(f"Evaluation improved from {self.best_value} to {metric_dict['bl_acc']}", level=LoggerObserver.INFO)
self.best_value = metric_dict['bl_acc']
self.save_checkpoint('best')
elif self.visualize_when_val:
self.visualize_pred() |
def save_checkpoint(self, outname='last'):
'\n Save all information of the current iteration\n '
weights = {'model': self.model.model.state_dict(), 'optimizer': self.optimizer.state_dict(), 'iters': self.iters, 'best_value': self.best_value}
if (self.scaler is not None):
weights[self.scaler.state_dict_key] = self.scaler.state_dict()
self.checkpoint.save(weights, outname) | -3,763,531,432,588,769,300 | Save all information of the current iteration | theseus/classification/trainer/trainer.py | save_checkpoint | lannguyen0910/theseus | python | def save_checkpoint(self, outname='last'):
'\n \n '
weights = {'model': self.model.model.state_dict(), 'optimizer': self.optimizer.state_dict(), 'iters': self.iters, 'best_value': self.best_value}
if (self.scaler is not None):
weights[self.scaler.state_dict_key] = self.scaler.state_dict()
self.checkpoint.save(weights, outname) |
def load_checkpoint(self, path: str):
'\n Load all information the current iteration from checkpoint \n '
LOGGER.text('Loading checkpoints...', level=LoggerObserver.INFO)
state_dict = torch.load(path, map_location='cpu')
self.iters = load_state_dict(self.iters, state_dict, 'iters')
self.best_value = load_state_dict(self.best_value, state_dict, 'best_value')
self.scaler = load_state_dict(self.scaler, state_dict, self.scaler.state_dict_key) | -1,633,654,067,348,363,500 | Load all information the current iteration from checkpoint | theseus/classification/trainer/trainer.py | load_checkpoint | lannguyen0910/theseus | python | def load_checkpoint(self, path: str):
'\n \n '
LOGGER.text('Loading checkpoints...', level=LoggerObserver.INFO)
state_dict = torch.load(path, map_location='cpu')
self.iters = load_state_dict(self.iters, state_dict, 'iters')
self.best_value = load_state_dict(self.best_value, state_dict, 'best_value')
self.scaler = load_state_dict(self.scaler, state_dict, self.scaler.state_dict_key) |
def visualize_gt(self):
'\n Visualize dataloader for sanity check \n '
LOGGER.text('Visualizing dataset...', level=LoggerObserver.DEBUG)
visualizer = Visualizer()
batch = next(iter(self.trainloader))
images = batch['inputs']
batch = []
for (idx, inputs) in enumerate(images):
img_show = visualizer.denormalize(inputs)
img_cam = TFF.to_tensor(img_show)
batch.append(img_cam)
grid_img = visualizer.make_grid(batch)
fig = plt.figure(figsize=(8, 8))
plt.axis('off')
plt.imshow(grid_img)
plt.tight_layout(pad=0)
LOGGER.log([{'tag': 'Sanitycheck/batch/train', 'value': fig, 'type': LoggerObserver.FIGURE, 'kwargs': {'step': self.iters}}])
batch = next(iter(self.valloader))
images = batch['inputs']
batch = []
for (idx, inputs) in enumerate(images):
img_show = visualizer.denormalize(inputs)
img_cam = TFF.to_tensor(img_show)
batch.append(img_cam)
grid_img = visualizer.make_grid(batch)
fig = plt.figure(figsize=(8, 8))
plt.axis('off')
plt.imshow(grid_img)
plt.tight_layout(pad=0)
LOGGER.log([{'tag': 'Sanitycheck/batch/val', 'value': fig, 'type': LoggerObserver.FIGURE, 'kwargs': {'step': self.iters}}]) | 7,366,678,842,902,099,000 | Visualize dataloader for sanity check | theseus/classification/trainer/trainer.py | visualize_gt | lannguyen0910/theseus | python | def visualize_gt(self):
'\n \n '
LOGGER.text('Visualizing dataset...', level=LoggerObserver.DEBUG)
visualizer = Visualizer()
batch = next(iter(self.trainloader))
images = batch['inputs']
batch = []
for (idx, inputs) in enumerate(images):
img_show = visualizer.denormalize(inputs)
img_cam = TFF.to_tensor(img_show)
batch.append(img_cam)
grid_img = visualizer.make_grid(batch)
fig = plt.figure(figsize=(8, 8))
plt.axis('off')
plt.imshow(grid_img)
plt.tight_layout(pad=0)
LOGGER.log([{'tag': 'Sanitycheck/batch/train', 'value': fig, 'type': LoggerObserver.FIGURE, 'kwargs': {'step': self.iters}}])
batch = next(iter(self.valloader))
images = batch['inputs']
batch = []
for (idx, inputs) in enumerate(images):
img_show = visualizer.denormalize(inputs)
img_cam = TFF.to_tensor(img_show)
batch.append(img_cam)
grid_img = visualizer.make_grid(batch)
fig = plt.figure(figsize=(8, 8))
plt.axis('off')
plt.imshow(grid_img)
plt.tight_layout(pad=0)
LOGGER.log([{'tag': 'Sanitycheck/batch/val', 'value': fig, 'type': LoggerObserver.FIGURE, 'kwargs': {'step': self.iters}}]) |
@torch.enable_grad()
def visualize_pred(self):
'Visualize model prediction and CAM\n \n '
LOGGER.text('Visualizing model predictions...', level=LoggerObserver.DEBUG)
visualizer = Visualizer()
batch = next(iter(self.valloader))
images = batch['inputs']
targets = batch['targets']
self.model.eval()
model_name = self.model.model.name
grad_cam = CAMWrapper.get_method(name='gradcam', model=self.model.model.get_model(), model_name=model_name, use_cuda=next(self.model.parameters()).is_cuda)
(grayscale_cams, label_indices, scores) = grad_cam(images, return_probs=True)
gradcam_batch = []
pred_batch = []
for idx in range(len(grayscale_cams)):
image = images[idx]
target = targets[idx].item()
label = label_indices[idx]
grayscale_cam = grayscale_cams[idx, :]
score = scores[idx]
img_show = visualizer.denormalize(image)
visualizer.set_image(img_show)
if (self.valloader.dataset.classnames is not None):
label = self.valloader.dataset.classnames[label]
target = self.valloader.dataset.classnames[target]
if (label == target):
color = [0, 1, 0]
else:
color = [1, 0, 0]
visualizer.draw_label(f'''GT: {target}
P: {label}
C: {score:.4f}''', fontColor=color, fontScale=0.8, thickness=2, outline=None, offset=100)
img_cam = show_cam_on_image(img_show, grayscale_cam, use_rgb=True)
img_cam = TFF.to_tensor(img_cam)
gradcam_batch.append(img_cam)
pred_img = visualizer.get_image()
pred_img = TFF.to_tensor(pred_img)
pred_batch.append(pred_img)
if (idx == 63):
break
gradcam_grid_img = visualizer.make_grid(gradcam_batch)
fig = plt.figure(figsize=(8, 8))
plt.imshow(gradcam_grid_img)
plt.axis('off')
plt.tight_layout(pad=0)
LOGGER.log([{'tag': 'Validation/gradcam', 'value': fig, 'type': LoggerObserver.FIGURE, 'kwargs': {'step': self.iters}}])
pred_grid_img = visualizer.make_grid(pred_batch)
fig = plt.figure(figsize=(10, 10))
plt.imshow(pred_grid_img)
plt.axis('off')
plt.tight_layout(pad=0)
LOGGER.log([{'tag': 'Validation/prediction', 'value': fig, 'type': LoggerObserver.FIGURE, 'kwargs': {'step': self.iters}}])
self.optimizer.zero_grad() | -2,684,438,551,393,966,000 | Visualize model prediction and CAM | theseus/classification/trainer/trainer.py | visualize_pred | lannguyen0910/theseus | python | @torch.enable_grad()
def visualize_pred(self):
'\n \n '
LOGGER.text('Visualizing model predictions...', level=LoggerObserver.DEBUG)
visualizer = Visualizer()
batch = next(iter(self.valloader))
images = batch['inputs']
targets = batch['targets']
self.model.eval()
model_name = self.model.model.name
grad_cam = CAMWrapper.get_method(name='gradcam', model=self.model.model.get_model(), model_name=model_name, use_cuda=next(self.model.parameters()).is_cuda)
(grayscale_cams, label_indices, scores) = grad_cam(images, return_probs=True)
gradcam_batch = []
pred_batch = []
for idx in range(len(grayscale_cams)):
image = images[idx]
target = targets[idx].item()
label = label_indices[idx]
grayscale_cam = grayscale_cams[idx, :]
score = scores[idx]
img_show = visualizer.denormalize(image)
visualizer.set_image(img_show)
if (self.valloader.dataset.classnames is not None):
label = self.valloader.dataset.classnames[label]
target = self.valloader.dataset.classnames[target]
if (label == target):
color = [0, 1, 0]
else:
color = [1, 0, 0]
visualizer.draw_label(f'GT: {target}
P: {label}
C: {score:.4f}', fontColor=color, fontScale=0.8, thickness=2, outline=None, offset=100)
img_cam = show_cam_on_image(img_show, grayscale_cam, use_rgb=True)
img_cam = TFF.to_tensor(img_cam)
gradcam_batch.append(img_cam)
pred_img = visualizer.get_image()
pred_img = TFF.to_tensor(pred_img)
pred_batch.append(pred_img)
if (idx == 63):
break
gradcam_grid_img = visualizer.make_grid(gradcam_batch)
fig = plt.figure(figsize=(8, 8))
plt.imshow(gradcam_grid_img)
plt.axis('off')
plt.tight_layout(pad=0)
LOGGER.log([{'tag': 'Validation/gradcam', 'value': fig, 'type': LoggerObserver.FIGURE, 'kwargs': {'step': self.iters}}])
pred_grid_img = visualizer.make_grid(pred_batch)
fig = plt.figure(figsize=(10, 10))
plt.imshow(pred_grid_img)
plt.axis('off')
plt.tight_layout(pad=0)
LOGGER.log([{'tag': 'Validation/prediction', 'value': fig, 'type': LoggerObserver.FIGURE, 'kwargs': {'step': self.iters}}])
self.optimizer.zero_grad() |
def analyze_gt(self):
'\n Perform simple data analysis\n '
LOGGER.text('Analyzing datasets...', level=LoggerObserver.DEBUG)
analyzer = ClassificationAnalyzer()
analyzer.add_dataset(self.trainloader.dataset)
fig = analyzer.analyze(figsize=(10, 5))
LOGGER.log([{'tag': 'Sanitycheck/analysis/train', 'value': fig, 'type': LoggerObserver.FIGURE, 'kwargs': {'step': self.iters}}])
analyzer = ClassificationAnalyzer()
analyzer.add_dataset(self.valloader.dataset)
fig = analyzer.analyze(figsize=(10, 5))
LOGGER.log([{'tag': 'Sanitycheck/analysis/val', 'value': fig, 'type': LoggerObserver.FIGURE, 'kwargs': {'step': self.iters}}]) | 2,394,997,057,899,787,000 | Perform simple data analysis | theseus/classification/trainer/trainer.py | analyze_gt | lannguyen0910/theseus | python | def analyze_gt(self):
'\n \n '
LOGGER.text('Analyzing datasets...', level=LoggerObserver.DEBUG)
analyzer = ClassificationAnalyzer()
analyzer.add_dataset(self.trainloader.dataset)
fig = analyzer.analyze(figsize=(10, 5))
LOGGER.log([{'tag': 'Sanitycheck/analysis/train', 'value': fig, 'type': LoggerObserver.FIGURE, 'kwargs': {'step': self.iters}}])
analyzer = ClassificationAnalyzer()
analyzer.add_dataset(self.valloader.dataset)
fig = analyzer.analyze(figsize=(10, 5))
LOGGER.log([{'tag': 'Sanitycheck/analysis/val', 'value': fig, 'type': LoggerObserver.FIGURE, 'kwargs': {'step': self.iters}}]) |
def sanitycheck(self):
'Sanity check before training\n '
self.visualize_gt()
self.analyze_gt()
self.visualize_model()
self.evaluate_epoch() | 2,781,777,548,193,682,000 | Sanity check before training | theseus/classification/trainer/trainer.py | sanitycheck | lannguyen0910/theseus | python | def sanitycheck(self):
'\n '
self.visualize_gt()
self.analyze_gt()
self.visualize_model()
self.evaluate_epoch() |
def test_message_causes_disconnect(self, message):
'Add a p2p connection that sends a message and check that it disconnects.'
peer = self.nodes[0].add_p2p_connection(P2PInterface())
peer.send_message(message)
peer.wait_for_disconnect()
assert_equal(self.nodes[0].getconnectioncount(), 0) | 1,046,188,331,780,544,800 | Add a p2p connection that sends a message and check that it disconnects. | test/functional/p2p_nobloomfilter_messages.py | test_message_causes_disconnect | BakedInside/Beans-Core | python | def test_message_causes_disconnect(self, message):
peer = self.nodes[0].add_p2p_connection(P2PInterface())
peer.send_message(message)
peer.wait_for_disconnect()
assert_equal(self.nodes[0].getconnectioncount(), 0) |
def evaluate(datasource, select, result_table, model, label_name=None, model_params=None, result_column_names=[], pai_table=None):
'TBD\n '
if (model_params is None):
model_params = {}
validation_metrics = model_params.get('validation.metrics', 'accuracy_score')
validation_metrics = [m.strip() for m in validation_metrics.split(',')]
bst = xgb.Booster()
if isinstance(model, six.string_types):
with temp_file.TemporaryDirectory(as_cwd=True):
model = Model.load_from_db(datasource, model)
bst.load_model('my_model')
else:
assert isinstance(model, Model), ('not supported model type %s' % type(model))
bst.load_model('my_model')
model_params = model.get_meta('attributes')
fc_map_ir = model.get_meta('features')
train_label = model.get_meta('label')
train_label_desc = train_label.get_field_desc()[0]
if label_name:
train_label_desc.name = label_name
feature_columns = compile_ir_feature_columns(fc_map_ir, EstimatorType.XGBOOST)
field_descs = get_ordered_field_descs(fc_map_ir)
feature_column_names = [fd.name for fd in field_descs]
feature_metas = dict([(fd.name, fd.to_dict(dtype_to_string=True)) for fd in field_descs])
transform_fn = ComposedColumnTransformer(feature_column_names, *feature_columns['feature_columns'])
is_pai = (True if pai_table else False)
if is_pai:
conn = PaiIOConnection.from_table(pai_table)
else:
conn = db.connect_with_data_source(datasource)
with temp_file.TemporaryDirectory() as tmp_dir_name:
pred_fn = os.path.join(tmp_dir_name, 'predict.txt')
dpred = xgb_dataset(datasource=datasource, fn=pred_fn, dataset_sql=select, feature_metas=feature_metas, feature_column_names=feature_column_names, label_meta=train_label_desc.to_dict(dtype_to_string=True), cache=True, batch_size=10000, transform_fn=transform_fn, is_pai=is_pai, pai_table=pai_table, pai_single_file=True, feature_column_code=fc_map_ir)
for (i, pred_dmatrix) in enumerate(dpred):
if is_pai:
feature_file_name = pred_fn
else:
feature_file_name = (pred_fn + ('_%d' % i))
preds = _calc_predict_result(bst, pred_dmatrix, model_params)
_store_evaluate_result(preds, feature_file_name, train_label_desc, result_table, result_column_names, validation_metrics, conn)
conn.close() | -4,562,751,783,326,163,500 | TBD | python/runtime/step/xgboost/evaluate.py | evaluate | awsl-dbq/sqlflow | python | def evaluate(datasource, select, result_table, model, label_name=None, model_params=None, result_column_names=[], pai_table=None):
'\n '
if (model_params is None):
model_params = {}
validation_metrics = model_params.get('validation.metrics', 'accuracy_score')
validation_metrics = [m.strip() for m in validation_metrics.split(',')]
bst = xgb.Booster()
if isinstance(model, six.string_types):
with temp_file.TemporaryDirectory(as_cwd=True):
model = Model.load_from_db(datasource, model)
bst.load_model('my_model')
else:
assert isinstance(model, Model), ('not supported model type %s' % type(model))
bst.load_model('my_model')
model_params = model.get_meta('attributes')
fc_map_ir = model.get_meta('features')
train_label = model.get_meta('label')
train_label_desc = train_label.get_field_desc()[0]
if label_name:
train_label_desc.name = label_name
feature_columns = compile_ir_feature_columns(fc_map_ir, EstimatorType.XGBOOST)
field_descs = get_ordered_field_descs(fc_map_ir)
feature_column_names = [fd.name for fd in field_descs]
feature_metas = dict([(fd.name, fd.to_dict(dtype_to_string=True)) for fd in field_descs])
transform_fn = ComposedColumnTransformer(feature_column_names, *feature_columns['feature_columns'])
is_pai = (True if pai_table else False)
if is_pai:
conn = PaiIOConnection.from_table(pai_table)
else:
conn = db.connect_with_data_source(datasource)
with temp_file.TemporaryDirectory() as tmp_dir_name:
pred_fn = os.path.join(tmp_dir_name, 'predict.txt')
dpred = xgb_dataset(datasource=datasource, fn=pred_fn, dataset_sql=select, feature_metas=feature_metas, feature_column_names=feature_column_names, label_meta=train_label_desc.to_dict(dtype_to_string=True), cache=True, batch_size=10000, transform_fn=transform_fn, is_pai=is_pai, pai_table=pai_table, pai_single_file=True, feature_column_code=fc_map_ir)
for (i, pred_dmatrix) in enumerate(dpred):
if is_pai:
feature_file_name = pred_fn
else:
feature_file_name = (pred_fn + ('_%d' % i))
preds = _calc_predict_result(bst, pred_dmatrix, model_params)
_store_evaluate_result(preds, feature_file_name, train_label_desc, result_table, result_column_names, validation_metrics, conn)
conn.close() |
def _store_evaluate_result(preds, feature_file_name, label_desc, result_table, result_column_names, validation_metrics, conn):
'\n Save the evaluation result in the table.\n\n Args:\n preds: the prediction result.\n feature_file_name (str): the file path where the feature dumps.\n label_desc (FieldDesc): the label FieldDesc object.\n result_table (str): the result table name.\n result_column_names (list[str]): the result column names.\n validation_metrics (list[str]): the evaluation metric names.\n conn: the database connection object.\n\n Returns:\n None.\n '
y_test = []
with open(feature_file_name, 'r') as f:
for line in f.readlines():
row = [i for i in line.strip().split('\t')]
if (label_desc.dtype == DataType.INT64):
y_test.append(int(row[0]))
elif (label_desc.dtype == DataType.FLOAT32):
y_test.append(float(row[0]))
else:
raise TypeError('unsupported data type {}'.format(label_desc.dtype))
y_test = np.array(y_test)
evaluate_results = dict()
for metric_name in validation_metrics:
metric_name = metric_name.strip()
if (metric_name not in SKLEARN_METRICS):
raise ValueError(('unsupported metrics %s' % metric_name))
metric_func = getattr(sklearn.metrics, metric_name)
metric_value = metric_func(y_test, preds)
evaluate_results[metric_name] = metric_value
with db.buffered_db_writer(conn, result_table, result_column_names) as w:
row = ['0.0']
for mn in validation_metrics:
row.append(str(evaluate_results[mn]))
w.write(row) | -7,471,850,992,633,782,000 | Save the evaluation result in the table.
Args:
preds: the prediction result.
feature_file_name (str): the file path where the feature dumps.
label_desc (FieldDesc): the label FieldDesc object.
result_table (str): the result table name.
result_column_names (list[str]): the result column names.
validation_metrics (list[str]): the evaluation metric names.
conn: the database connection object.
Returns:
None. | python/runtime/step/xgboost/evaluate.py | _store_evaluate_result | awsl-dbq/sqlflow | python | def _store_evaluate_result(preds, feature_file_name, label_desc, result_table, result_column_names, validation_metrics, conn):
'\n Save the evaluation result in the table.\n\n Args:\n preds: the prediction result.\n feature_file_name (str): the file path where the feature dumps.\n label_desc (FieldDesc): the label FieldDesc object.\n result_table (str): the result table name.\n result_column_names (list[str]): the result column names.\n validation_metrics (list[str]): the evaluation metric names.\n conn: the database connection object.\n\n Returns:\n None.\n '
y_test = []
with open(feature_file_name, 'r') as f:
for line in f.readlines():
row = [i for i in line.strip().split('\t')]
if (label_desc.dtype == DataType.INT64):
y_test.append(int(row[0]))
elif (label_desc.dtype == DataType.FLOAT32):
y_test.append(float(row[0]))
else:
raise TypeError('unsupported data type {}'.format(label_desc.dtype))
y_test = np.array(y_test)
evaluate_results = dict()
for metric_name in validation_metrics:
metric_name = metric_name.strip()
if (metric_name not in SKLEARN_METRICS):
raise ValueError(('unsupported metrics %s' % metric_name))
metric_func = getattr(sklearn.metrics, metric_name)
metric_value = metric_func(y_test, preds)
evaluate_results[metric_name] = metric_value
with db.buffered_db_writer(conn, result_table, result_column_names) as w:
row = ['0.0']
for mn in validation_metrics:
row.append(str(evaluate_results[mn]))
w.write(row) |
def chunk_fg_comp_dict_by_nbls(fg_model_comps_dict, use_redundancy=False, grp_size_threshold=5):
"\n Order dict keys in order of number of baselines in each group\n\n\n chunk fit_groups in fg_model_comps_dict into chunks where all groups in the\n same chunk have the same number of baselines in each group.\n\n Parameters\n ----------\n fg_model_comps_dict: dict\n dictionary with keys that are tuples of tuples of 2-tuples (thats right, 3 levels)\n in the first level, each tuple represents a 'modeling group' visibilities in each\n modeling group are represented by a set of basis vectors that span all baselines in that\n group with elements raveled by baseline and then frequency. Each tuple in the modeling group is a\n 'redundant group' representing visibilities that we will represent with identical component coefficients\n each element of each 'redundant group' is a 2-tuple antenna pair. Our formalism easily accomodates modeling\n visibilities as redundant or non redundant (one simply needs to make each redundant group length 1).\n\n use_redundancy: bool, optional\n If False, break fitting groups with the same number of baselines in each redundant\n sub_group into different fitting groups with no redundancy in each\n redundant subgroup. This is to prevent fitting groups with single\n redundant groups of varying lengths from being lumped into different chunks\n increasing the number of chunks has a more significant impact on run-time\n then increasing the number of baselines in each chunk.\n default is False.\n Returns:\n fg_model_comps_dict_chunked: dict\n dictionary where each key is a 2-tuple (nbl, nvecs) referring to the number\n of baselines in each vector and the number of vectors. Each 2-tuple points to\n a dictionary where each key is the fitting group in fg_comps_dict that includes\n nbl baselines. Each key in the referenced dict points to an (nred_grps * nfreqs x nvecs)\n numpy.ndarray describing the modeling components for each fitting group in the chunk.\n\n "
chunked_keys = {}
maxvecs = {}
fg_model_comps_dict = copy.deepcopy(fg_model_comps_dict)
if (not use_redundancy):
keys_with_redundancy = list(fg_model_comps_dict.keys())
for fit_grp in keys_with_redundancy:
rlens = np.asarray([len(red_grp) for red_grp in fit_grp])
if (np.allclose(rlens, np.mean(rlens)) and (len(rlens) < grp_size_threshold)):
modeling_vectors = fg_model_comps_dict.pop(fit_grp)
for rednum in range(int(rlens[0])):
fit_grp_new = tuple([(red_grp[rednum],) for red_grp in fit_grp])
fg_model_comps_dict[fit_grp_new] = modeling_vectors
for fit_grp in fg_model_comps_dict:
nbl = 0
for red_grp in fit_grp:
for ap in red_grp:
nbl += 1
if (nbl in chunked_keys):
chunked_keys[nbl].append(fit_grp)
if (fg_model_comps_dict[fit_grp].shape[1] > maxvecs[nbl]):
maxvecs[nbl] = fg_model_comps_dict[fit_grp].shape[1]
else:
chunked_keys[nbl] = [fit_grp]
maxvecs[nbl] = fg_model_comps_dict[fit_grp].shape[1]
fg_model_comps_dict_chunked = {}
for nbl in chunked_keys:
fg_model_comps_dict_chunked[(nbl, maxvecs[nbl])] = {k: fg_model_comps_dict[k] for k in chunked_keys[nbl]}
return fg_model_comps_dict_chunked | -964,472,370,095,410,200 | Order dict keys in order of number of baselines in each group
chunk fit_groups in fg_model_comps_dict into chunks where all groups in the
same chunk have the same number of baselines in each group.
Parameters
----------
fg_model_comps_dict: dict
dictionary with keys that are tuples of tuples of 2-tuples (thats right, 3 levels)
in the first level, each tuple represents a 'modeling group' visibilities in each
modeling group are represented by a set of basis vectors that span all baselines in that
group with elements raveled by baseline and then frequency. Each tuple in the modeling group is a
'redundant group' representing visibilities that we will represent with identical component coefficients
each element of each 'redundant group' is a 2-tuple antenna pair. Our formalism easily accomodates modeling
visibilities as redundant or non redundant (one simply needs to make each redundant group length 1).
use_redundancy: bool, optional
If False, break fitting groups with the same number of baselines in each redundant
sub_group into different fitting groups with no redundancy in each
redundant subgroup. This is to prevent fitting groups with single
redundant groups of varying lengths from being lumped into different chunks
increasing the number of chunks has a more significant impact on run-time
then increasing the number of baselines in each chunk.
default is False.
Returns:
fg_model_comps_dict_chunked: dict
dictionary where each key is a 2-tuple (nbl, nvecs) referring to the number
of baselines in each vector and the number of vectors. Each 2-tuple points to
a dictionary where each key is the fitting group in fg_comps_dict that includes
nbl baselines. Each key in the referenced dict points to an (nred_grps * nfreqs x nvecs)
numpy.ndarray describing the modeling components for each fitting group in the chunk. | calamity/calibration.py | chunk_fg_comp_dict_by_nbls | aewallwi/calamity | python | def chunk_fg_comp_dict_by_nbls(fg_model_comps_dict, use_redundancy=False, grp_size_threshold=5):
"\n Order dict keys in order of number of baselines in each group\n\n\n chunk fit_groups in fg_model_comps_dict into chunks where all groups in the\n same chunk have the same number of baselines in each group.\n\n Parameters\n ----------\n fg_model_comps_dict: dict\n dictionary with keys that are tuples of tuples of 2-tuples (thats right, 3 levels)\n in the first level, each tuple represents a 'modeling group' visibilities in each\n modeling group are represented by a set of basis vectors that span all baselines in that\n group with elements raveled by baseline and then frequency. Each tuple in the modeling group is a\n 'redundant group' representing visibilities that we will represent with identical component coefficients\n each element of each 'redundant group' is a 2-tuple antenna pair. Our formalism easily accomodates modeling\n visibilities as redundant or non redundant (one simply needs to make each redundant group length 1).\n\n use_redundancy: bool, optional\n If False, break fitting groups with the same number of baselines in each redundant\n sub_group into different fitting groups with no redundancy in each\n redundant subgroup. This is to prevent fitting groups with single\n redundant groups of varying lengths from being lumped into different chunks\n increasing the number of chunks has a more significant impact on run-time\n then increasing the number of baselines in each chunk.\n default is False.\n Returns:\n fg_model_comps_dict_chunked: dict\n dictionary where each key is a 2-tuple (nbl, nvecs) referring to the number\n of baselines in each vector and the number of vectors. Each 2-tuple points to\n a dictionary where each key is the fitting group in fg_comps_dict that includes\n nbl baselines. Each key in the referenced dict points to an (nred_grps * nfreqs x nvecs)\n numpy.ndarray describing the modeling components for each fitting group in the chunk.\n\n "
chunked_keys = {}
maxvecs = {}
fg_model_comps_dict = copy.deepcopy(fg_model_comps_dict)
if (not use_redundancy):
keys_with_redundancy = list(fg_model_comps_dict.keys())
for fit_grp in keys_with_redundancy:
rlens = np.asarray([len(red_grp) for red_grp in fit_grp])
if (np.allclose(rlens, np.mean(rlens)) and (len(rlens) < grp_size_threshold)):
modeling_vectors = fg_model_comps_dict.pop(fit_grp)
for rednum in range(int(rlens[0])):
fit_grp_new = tuple([(red_grp[rednum],) for red_grp in fit_grp])
fg_model_comps_dict[fit_grp_new] = modeling_vectors
for fit_grp in fg_model_comps_dict:
nbl = 0
for red_grp in fit_grp:
for ap in red_grp:
nbl += 1
if (nbl in chunked_keys):
chunked_keys[nbl].append(fit_grp)
if (fg_model_comps_dict[fit_grp].shape[1] > maxvecs[nbl]):
maxvecs[nbl] = fg_model_comps_dict[fit_grp].shape[1]
else:
chunked_keys[nbl] = [fit_grp]
maxvecs[nbl] = fg_model_comps_dict[fit_grp].shape[1]
fg_model_comps_dict_chunked = {}
for nbl in chunked_keys:
fg_model_comps_dict_chunked[(nbl, maxvecs[nbl])] = {k: fg_model_comps_dict[k] for k in chunked_keys[nbl]}
return fg_model_comps_dict_chunked |
def tensorize_fg_model_comps_dict(fg_model_comps_dict, ants_map, nfreqs, use_redundancy=False, dtype=np.float32, notebook_progressbar=False, verbose=False, grp_size_threshold=5):
'Convert per-baseline model components into a Ndata x Ncomponent tensor\n\n Parameters\n ----------\n fg_model_comps_dict: dict\n dictionary where each key is a 2-tuple (nbl, nvecs) referring to the number\n of baselines in each vector and the number of vectors. Each 2-tuple points to\n a dictionary where each key is the fitting group in fg_comps_dict that includes\n nbl baselines. Each key in the referenced dict points to an (nred_grps * nfreqs x nvecs)\n numpy.ndarray describing the modeling components for each fitting group in the chunk.\n ants_map: dict mapping integers to integers\n map between each antenna number to a unique index between 0 and Nants_data\n (typically the index of each antenna in ants_map)\n nfreqs: int, optional\n number of frequency channels\n dtype: numpy.dtype\n tensor data types\n default is np.float32\n\n Returns\n -------\n fg_model_comps: list\n list of tf.Tensor objects where each tensor has shape (nvecs, ngrps, nbls, nfreqs)\n where nbls varies from tensor to tensor. Fitting groups with vectors that span nbls are lumped into the same\n modeling tensor along the ngrps axis. nvecs is chosen in chunk_fg_comp_dict_by_nbls\n to be the maximum number of vectors representing any of the ngrps baseline grps\n which means that many rows in nvecs will be zero. For example, if we are modeling with\n vectors that all span nbls=1 baseline and using delay-modes to model our data\n then nvecs will equal the largest number of delay modes necessary to model the wedge\n on all baselines even though the short baselines are described by far fewer modes\n on short baselines, most of the rows along the vector dimension will therefor be zero.\n This is wasteful of memory but it allows us to take advantage of the fast\n dense matrix operations on a GPU.\n\n corr_inds: list\n list of list of lists of 2-tuples. Hierarchy of lists is\n chunk\n group\n baseline - (int 2-tuple)\n\n '
echo(f'''{datetime.datetime.now()} Computing foreground components matrices...
''', verbose=verbose)
fg_model_comps_dict = chunk_fg_comp_dict_by_nbls(fg_model_comps_dict, use_redundancy=use_redundancy, grp_size_threshold=grp_size_threshold)
fg_model_comps = []
corr_inds = []
for (nbls, nvecs) in fg_model_comps_dict:
ngrps = len(fg_model_comps_dict[(nbls, nvecs)])
modeling_matrix = np.zeros((nvecs, ngrps, nbls, nfreqs))
corr_inds_chunk = []
for (grpnum, modeling_grp) in enumerate(fg_model_comps_dict[(nbls, nvecs)]):
corr_inds_grp = []
nbl = 0
for (rgrpnum, red_grp) in enumerate(modeling_grp):
nred = len(red_grp)
for ap in red_grp:
(i, j) = (ants_map[ap[0]], ants_map[ap[1]])
corr_inds_grp.append((i, j))
vecslice = slice(0, fg_model_comps_dict[(nbls, nvecs)][modeling_grp].shape[1])
compslice = slice((rgrpnum * nfreqs), ((rgrpnum + 1) * nfreqs))
dslice = slice((nbl * nfreqs), ((nbl + 1) * nfreqs))
modeling_matrix[(vecslice, grpnum, nbl)] = fg_model_comps_dict[(nbls, nvecs)][modeling_grp][compslice].T
nbl += 1
corr_inds_chunk.append(corr_inds_grp)
fg_model_comps.append(tf.convert_to_tensor(modeling_matrix, dtype=dtype))
corr_inds.append(corr_inds_chunk)
return (fg_model_comps, corr_inds) | -8,335,094,441,089,768,000 | Convert per-baseline model components into a Ndata x Ncomponent tensor
Parameters
----------
fg_model_comps_dict: dict
dictionary where each key is a 2-tuple (nbl, nvecs) referring to the number
of baselines in each vector and the number of vectors. Each 2-tuple points to
a dictionary where each key is the fitting group in fg_comps_dict that includes
nbl baselines. Each key in the referenced dict points to an (nred_grps * nfreqs x nvecs)
numpy.ndarray describing the modeling components for each fitting group in the chunk.
ants_map: dict mapping integers to integers
map between each antenna number to a unique index between 0 and Nants_data
(typically the index of each antenna in ants_map)
nfreqs: int, optional
number of frequency channels
dtype: numpy.dtype
tensor data types
default is np.float32
Returns
-------
fg_model_comps: list
list of tf.Tensor objects where each tensor has shape (nvecs, ngrps, nbls, nfreqs)
where nbls varies from tensor to tensor. Fitting groups with vectors that span nbls are lumped into the same
modeling tensor along the ngrps axis. nvecs is chosen in chunk_fg_comp_dict_by_nbls
to be the maximum number of vectors representing any of the ngrps baseline grps
which means that many rows in nvecs will be zero. For example, if we are modeling with
vectors that all span nbls=1 baseline and using delay-modes to model our data
then nvecs will equal the largest number of delay modes necessary to model the wedge
on all baselines even though the short baselines are described by far fewer modes
on short baselines, most of the rows along the vector dimension will therefor be zero.
This is wasteful of memory but it allows us to take advantage of the fast
dense matrix operations on a GPU.
corr_inds: list
list of list of lists of 2-tuples. Hierarchy of lists is
chunk
group
baseline - (int 2-tuple) | calamity/calibration.py | tensorize_fg_model_comps_dict | aewallwi/calamity | python | def tensorize_fg_model_comps_dict(fg_model_comps_dict, ants_map, nfreqs, use_redundancy=False, dtype=np.float32, notebook_progressbar=False, verbose=False, grp_size_threshold=5):
'Convert per-baseline model components into a Ndata x Ncomponent tensor\n\n Parameters\n ----------\n fg_model_comps_dict: dict\n dictionary where each key is a 2-tuple (nbl, nvecs) referring to the number\n of baselines in each vector and the number of vectors. Each 2-tuple points to\n a dictionary where each key is the fitting group in fg_comps_dict that includes\n nbl baselines. Each key in the referenced dict points to an (nred_grps * nfreqs x nvecs)\n numpy.ndarray describing the modeling components for each fitting group in the chunk.\n ants_map: dict mapping integers to integers\n map between each antenna number to a unique index between 0 and Nants_data\n (typically the index of each antenna in ants_map)\n nfreqs: int, optional\n number of frequency channels\n dtype: numpy.dtype\n tensor data types\n default is np.float32\n\n Returns\n -------\n fg_model_comps: list\n list of tf.Tensor objects where each tensor has shape (nvecs, ngrps, nbls, nfreqs)\n where nbls varies from tensor to tensor. Fitting groups with vectors that span nbls are lumped into the same\n modeling tensor along the ngrps axis. nvecs is chosen in chunk_fg_comp_dict_by_nbls\n to be the maximum number of vectors representing any of the ngrps baseline grps\n which means that many rows in nvecs will be zero. For example, if we are modeling with\n vectors that all span nbls=1 baseline and using delay-modes to model our data\n then nvecs will equal the largest number of delay modes necessary to model the wedge\n on all baselines even though the short baselines are described by far fewer modes\n on short baselines, most of the rows along the vector dimension will therefor be zero.\n This is wasteful of memory but it allows us to take advantage of the fast\n dense matrix operations on a GPU.\n\n corr_inds: list\n list of list of lists of 2-tuples. Hierarchy of lists is\n chunk\n group\n baseline - (int 2-tuple)\n\n '
echo(f'{datetime.datetime.now()} Computing foreground components matrices...
', verbose=verbose)
fg_model_comps_dict = chunk_fg_comp_dict_by_nbls(fg_model_comps_dict, use_redundancy=use_redundancy, grp_size_threshold=grp_size_threshold)
fg_model_comps = []
corr_inds = []
for (nbls, nvecs) in fg_model_comps_dict:
ngrps = len(fg_model_comps_dict[(nbls, nvecs)])
modeling_matrix = np.zeros((nvecs, ngrps, nbls, nfreqs))
corr_inds_chunk = []
for (grpnum, modeling_grp) in enumerate(fg_model_comps_dict[(nbls, nvecs)]):
corr_inds_grp = []
nbl = 0
for (rgrpnum, red_grp) in enumerate(modeling_grp):
nred = len(red_grp)
for ap in red_grp:
(i, j) = (ants_map[ap[0]], ants_map[ap[1]])
corr_inds_grp.append((i, j))
vecslice = slice(0, fg_model_comps_dict[(nbls, nvecs)][modeling_grp].shape[1])
compslice = slice((rgrpnum * nfreqs), ((rgrpnum + 1) * nfreqs))
dslice = slice((nbl * nfreqs), ((nbl + 1) * nfreqs))
modeling_matrix[(vecslice, grpnum, nbl)] = fg_model_comps_dict[(nbls, nvecs)][modeling_grp][compslice].T
nbl += 1
corr_inds_chunk.append(corr_inds_grp)
fg_model_comps.append(tf.convert_to_tensor(modeling_matrix, dtype=dtype))
corr_inds.append(corr_inds_chunk)
return (fg_model_comps, corr_inds) |
def tensorize_data(uvdata, corr_inds, ants_map, polarization, time, data_scale_factor=1.0, weights=None, nsamples_in_weights=False, dtype=np.float32):
'Convert data in uvdata object to a tensor\n\n Parameters\n ----------\n uvdata: UVData object\n UVData object containing data, flags, and nsamples to tensorize.\n corr_inds: list\n list of list of lists of 2-tuples. Hierarchy of lists is\n chunk\n group\n baseline - (int 2-tuple)\n ants_map: dict mapping integers to integers\n map between each antenna number to a unique index between 0 and Nants_data\n (typically the index of each antenna in ants_map)\n polarization: str\n pol-str of gain to extract.\n time: float\n time of data to convert to tensor.\n data_scale_factor: float, optional\n overall scaling factor to divide tensorized data by.\n default is 1.0\n weights: UVFlag object, optional\n UVFlag weights object containing weights to use for data fitting.\n default is None -> use nsamples * ~flags if nsamples_in_weights\n or ~flags if not nsamples_in_weights\n nsamples_in_weights: bool, optional\n If True and weights is None, generate weights proportional to nsamples.\n default is False.\n dtype: numpy.dtype\n data-type to store in tensor.\n default is np.float32\n\n Returns\n -------\n data_r: list of tf.Tensor objects\n list of tf.Tensor objects. Each tensor has shape (ngrps, nbls, nfreqs)\n where ngrps, nbls are the dimensions of each sublist in corr_inds\n and contain the real components of the baselines specified by these 2-tuples.\n data_i: list of tf.Tensor objects\n list of tf.Tensor objects. Each tensor has shape (ngrps, nbls, nfreqs)\n where ngrps, nbls are the dimensions of each sublist in corr_inds\n and contain the imag components of the baselines specified by these 2-tuples.\n wgts: tf.Tensor object\n list of tf.Tensor objects. Each tensor has shape (ngrps, nbls, nfreqs)\n where ngrps, nbls are the dimensions of each sublist in corr_inds\n and contain the weights of the baselines specified by these 2-tuples.\n '
ants_map_inv = {ants_map[i]: i for i in ants_map}
dshape = (uvdata.Nants_data, uvdata.Nants_data, uvdata.Nfreqs)
data_r = np.zeros(dshape, dtype=dtype)
data_i = np.zeros_like(data_r)
wgts = np.zeros_like(data_r)
wgtsum = 0.0
for chunk in corr_inds:
for fitgrp in chunk:
for (i, j) in fitgrp:
ap = (ants_map_inv[i], ants_map_inv[j])
bl = (ap + (polarization,))
(dinds1, dinds2, pol_ind) = uvdata._key2inds(bl)
if (len(dinds1) > 0):
dinds = dinds1
conjugate = False
pol_ind = pol_ind[0]
else:
dinds = dinds2
conjugate = True
pol_ind = pol_ind[1]
dind = dinds[np.where(np.isclose(uvdata.time_array[dinds], time, rtol=0.0, atol=1e-07))[0][0]]
data = uvdata.data_array[dind, 0, :, pol_ind].squeeze()
iflags = (~ uvdata.flag_array[dind, 0, :, pol_ind].squeeze())
nsamples = uvdata.nsample_array[dind, 0, :, pol_ind].squeeze()
data /= data_scale_factor
if conjugate:
data = np.conj(data)
data_r[(i, j)] = data.real.astype(dtype)
data_i[(i, j)] = data.imag.astype(dtype)
if (weights is None):
wgts[(i, j)] = iflags
if nsamples_in_weights:
wgts[(i, j)] *= nsamples
else:
if (ap in weights.get_antpairs()):
dinds = weights.antpair2ind(*ap)
else:
dinds = weights.antpair2ind(*ap[::(- 1)])
dind = dinds[np.where(np.isclose(weights.time_array[dinds], time, atol=1e-07, rtol=0.0))[0][0]]
polnum = np.where((weights.polarization_array == uvutils.polstr2num(polarization, x_orientation=weights.x_orientation)))[0][0]
wgts[(i, j)] = (weights.weights_array[dind, 0, :, polnum].astype(dtype) * iflags)
if nsamples_in_weights:
wgts[(i, j)] *= nsamples
wgtsum += np.sum(wgts[(i, j)])
data_r = tf.convert_to_tensor(data_r, dtype=dtype)
data_i = tf.convert_to_tensor(data_i, dtype=dtype)
wgts = tf.convert_to_tensor((wgts / wgtsum), dtype=dtype)
nchunks = len(corr_inds)
data_r = [tf.gather_nd(data_r, corr_inds[cnum]) for cnum in range(nchunks)]
data_i = [tf.gather_nd(data_i, corr_inds[cnum]) for cnum in range(nchunks)]
wgts = [tf.gather_nd(wgts, corr_inds[cnum]) for cnum in range(nchunks)]
return (data_r, data_i, wgts) | -2,030,708,951,956,363,000 | Convert data in uvdata object to a tensor
Parameters
----------
uvdata: UVData object
UVData object containing data, flags, and nsamples to tensorize.
corr_inds: list
list of list of lists of 2-tuples. Hierarchy of lists is
chunk
group
baseline - (int 2-tuple)
ants_map: dict mapping integers to integers
map between each antenna number to a unique index between 0 and Nants_data
(typically the index of each antenna in ants_map)
polarization: str
pol-str of gain to extract.
time: float
time of data to convert to tensor.
data_scale_factor: float, optional
overall scaling factor to divide tensorized data by.
default is 1.0
weights: UVFlag object, optional
UVFlag weights object containing weights to use for data fitting.
default is None -> use nsamples * ~flags if nsamples_in_weights
or ~flags if not nsamples_in_weights
nsamples_in_weights: bool, optional
If True and weights is None, generate weights proportional to nsamples.
default is False.
dtype: numpy.dtype
data-type to store in tensor.
default is np.float32
Returns
-------
data_r: list of tf.Tensor objects
list of tf.Tensor objects. Each tensor has shape (ngrps, nbls, nfreqs)
where ngrps, nbls are the dimensions of each sublist in corr_inds
and contain the real components of the baselines specified by these 2-tuples.
data_i: list of tf.Tensor objects
list of tf.Tensor objects. Each tensor has shape (ngrps, nbls, nfreqs)
where ngrps, nbls are the dimensions of each sublist in corr_inds
and contain the imag components of the baselines specified by these 2-tuples.
wgts: tf.Tensor object
list of tf.Tensor objects. Each tensor has shape (ngrps, nbls, nfreqs)
where ngrps, nbls are the dimensions of each sublist in corr_inds
and contain the weights of the baselines specified by these 2-tuples. | calamity/calibration.py | tensorize_data | aewallwi/calamity | python | def tensorize_data(uvdata, corr_inds, ants_map, polarization, time, data_scale_factor=1.0, weights=None, nsamples_in_weights=False, dtype=np.float32):
'Convert data in uvdata object to a tensor\n\n Parameters\n ----------\n uvdata: UVData object\n UVData object containing data, flags, and nsamples to tensorize.\n corr_inds: list\n list of list of lists of 2-tuples. Hierarchy of lists is\n chunk\n group\n baseline - (int 2-tuple)\n ants_map: dict mapping integers to integers\n map between each antenna number to a unique index between 0 and Nants_data\n (typically the index of each antenna in ants_map)\n polarization: str\n pol-str of gain to extract.\n time: float\n time of data to convert to tensor.\n data_scale_factor: float, optional\n overall scaling factor to divide tensorized data by.\n default is 1.0\n weights: UVFlag object, optional\n UVFlag weights object containing weights to use for data fitting.\n default is None -> use nsamples * ~flags if nsamples_in_weights\n or ~flags if not nsamples_in_weights\n nsamples_in_weights: bool, optional\n If True and weights is None, generate weights proportional to nsamples.\n default is False.\n dtype: numpy.dtype\n data-type to store in tensor.\n default is np.float32\n\n Returns\n -------\n data_r: list of tf.Tensor objects\n list of tf.Tensor objects. Each tensor has shape (ngrps, nbls, nfreqs)\n where ngrps, nbls are the dimensions of each sublist in corr_inds\n and contain the real components of the baselines specified by these 2-tuples.\n data_i: list of tf.Tensor objects\n list of tf.Tensor objects. Each tensor has shape (ngrps, nbls, nfreqs)\n where ngrps, nbls are the dimensions of each sublist in corr_inds\n and contain the imag components of the baselines specified by these 2-tuples.\n wgts: tf.Tensor object\n list of tf.Tensor objects. Each tensor has shape (ngrps, nbls, nfreqs)\n where ngrps, nbls are the dimensions of each sublist in corr_inds\n and contain the weights of the baselines specified by these 2-tuples.\n '
ants_map_inv = {ants_map[i]: i for i in ants_map}
dshape = (uvdata.Nants_data, uvdata.Nants_data, uvdata.Nfreqs)
data_r = np.zeros(dshape, dtype=dtype)
data_i = np.zeros_like(data_r)
wgts = np.zeros_like(data_r)
wgtsum = 0.0
for chunk in corr_inds:
for fitgrp in chunk:
for (i, j) in fitgrp:
ap = (ants_map_inv[i], ants_map_inv[j])
bl = (ap + (polarization,))
(dinds1, dinds2, pol_ind) = uvdata._key2inds(bl)
if (len(dinds1) > 0):
dinds = dinds1
conjugate = False
pol_ind = pol_ind[0]
else:
dinds = dinds2
conjugate = True
pol_ind = pol_ind[1]
dind = dinds[np.where(np.isclose(uvdata.time_array[dinds], time, rtol=0.0, atol=1e-07))[0][0]]
data = uvdata.data_array[dind, 0, :, pol_ind].squeeze()
iflags = (~ uvdata.flag_array[dind, 0, :, pol_ind].squeeze())
nsamples = uvdata.nsample_array[dind, 0, :, pol_ind].squeeze()
data /= data_scale_factor
if conjugate:
data = np.conj(data)
data_r[(i, j)] = data.real.astype(dtype)
data_i[(i, j)] = data.imag.astype(dtype)
if (weights is None):
wgts[(i, j)] = iflags
if nsamples_in_weights:
wgts[(i, j)] *= nsamples
else:
if (ap in weights.get_antpairs()):
dinds = weights.antpair2ind(*ap)
else:
dinds = weights.antpair2ind(*ap[::(- 1)])
dind = dinds[np.where(np.isclose(weights.time_array[dinds], time, atol=1e-07, rtol=0.0))[0][0]]
polnum = np.where((weights.polarization_array == uvutils.polstr2num(polarization, x_orientation=weights.x_orientation)))[0][0]
wgts[(i, j)] = (weights.weights_array[dind, 0, :, polnum].astype(dtype) * iflags)
if nsamples_in_weights:
wgts[(i, j)] *= nsamples
wgtsum += np.sum(wgts[(i, j)])
data_r = tf.convert_to_tensor(data_r, dtype=dtype)
data_i = tf.convert_to_tensor(data_i, dtype=dtype)
wgts = tf.convert_to_tensor((wgts / wgtsum), dtype=dtype)
nchunks = len(corr_inds)
data_r = [tf.gather_nd(data_r, corr_inds[cnum]) for cnum in range(nchunks)]
data_i = [tf.gather_nd(data_i, corr_inds[cnum]) for cnum in range(nchunks)]
wgts = [tf.gather_nd(wgts, corr_inds[cnum]) for cnum in range(nchunks)]
return (data_r, data_i, wgts) |
def renormalize(uvdata_reference_model, uvdata_deconv, gains, polarization, time, additional_flags=None):
'Remove arbitrary phase and amplitude from deconvolved model and gains.\n\n Parameters\n ----------\n uvdata_reference_model: UVData object\n Reference model for "true" visibilities.\n uvdata_deconv: UVData object\n "Deconvolved" data solved for in self-cal loop.\n gains: UVCal object\n Gains solved for in self-cal loop.\n polarization: str\n Polarization string to compute phase and amplitude correction for.\n additional_flags: np.ndarray\n Any additional flags you wish to use for excluding data from normalization\n fed as an np.ndarray with same shape as uvdata_reference_model and uvdata_deconv.\n default is None -> Only exclude data in flags from reference model and deconv from\n determinging normalization.\n Returns\n -------\n N/A: Modifies uvdata_deconv and gains in-place.\n '
polnum_data = np.where((uvdata_deconv.polarization_array == uvutils.polstr2num(polarization, x_orientation=uvdata_deconv.x_orientation)))[0][0]
bltsel = np.isclose(uvdata_deconv.time_array, time, atol=1e-07, rtol=0.0)
selection = ((~ uvdata_deconv.flag_array[bltsel, :, :, polnum_data]) & (~ uvdata_reference_model.flag_array[bltsel, :, :, polnum_data]))
if (additional_flags is not None):
selection = (selection & (~ additional_flags[bltsel, :, :, polnum_data]))
data_ratio = (uvdata_reference_model.data_array[bltsel, :, :, polnum_data][selection] / uvdata_deconv.data_array[bltsel, :, :, polnum_data][selection])
data_ratio[(~ np.isfinite(data_ratio))] = np.nan
scale_factor_phase = np.angle(np.nanmean(data_ratio))
scale_factor_abs = np.sqrt(np.nanmean((np.abs(data_ratio) ** 2.0)))
scale_factor = scale_factor_abs
uvdata_deconv.data_array[bltsel, :, :, polnum_data] *= scale_factor
polnum_gains = np.where((gains.jones_array == uvutils.polstr2num(polarization, x_orientation=uvdata_deconv.x_orientation)))[0][0]
gindt = np.where(np.isclose(gains.time_array, time, atol=1e-07, rtol=0.0))[0][0]
gains.gain_array[:, :, :, gindt, polnum_gains] *= (scale_factor ** (- 0.5)) | 2,437,212,031,765,043,700 | Remove arbitrary phase and amplitude from deconvolved model and gains.
Parameters
----------
uvdata_reference_model: UVData object
Reference model for "true" visibilities.
uvdata_deconv: UVData object
"Deconvolved" data solved for in self-cal loop.
gains: UVCal object
Gains solved for in self-cal loop.
polarization: str
Polarization string to compute phase and amplitude correction for.
additional_flags: np.ndarray
Any additional flags you wish to use for excluding data from normalization
fed as an np.ndarray with same shape as uvdata_reference_model and uvdata_deconv.
default is None -> Only exclude data in flags from reference model and deconv from
determinging normalization.
Returns
-------
N/A: Modifies uvdata_deconv and gains in-place. | calamity/calibration.py | renormalize | aewallwi/calamity | python | def renormalize(uvdata_reference_model, uvdata_deconv, gains, polarization, time, additional_flags=None):
'Remove arbitrary phase and amplitude from deconvolved model and gains.\n\n Parameters\n ----------\n uvdata_reference_model: UVData object\n Reference model for "true" visibilities.\n uvdata_deconv: UVData object\n "Deconvolved" data solved for in self-cal loop.\n gains: UVCal object\n Gains solved for in self-cal loop.\n polarization: str\n Polarization string to compute phase and amplitude correction for.\n additional_flags: np.ndarray\n Any additional flags you wish to use for excluding data from normalization\n fed as an np.ndarray with same shape as uvdata_reference_model and uvdata_deconv.\n default is None -> Only exclude data in flags from reference model and deconv from\n determinging normalization.\n Returns\n -------\n N/A: Modifies uvdata_deconv and gains in-place.\n '
polnum_data = np.where((uvdata_deconv.polarization_array == uvutils.polstr2num(polarization, x_orientation=uvdata_deconv.x_orientation)))[0][0]
bltsel = np.isclose(uvdata_deconv.time_array, time, atol=1e-07, rtol=0.0)
selection = ((~ uvdata_deconv.flag_array[bltsel, :, :, polnum_data]) & (~ uvdata_reference_model.flag_array[bltsel, :, :, polnum_data]))
if (additional_flags is not None):
selection = (selection & (~ additional_flags[bltsel, :, :, polnum_data]))
data_ratio = (uvdata_reference_model.data_array[bltsel, :, :, polnum_data][selection] / uvdata_deconv.data_array[bltsel, :, :, polnum_data][selection])
data_ratio[(~ np.isfinite(data_ratio))] = np.nan
scale_factor_phase = np.angle(np.nanmean(data_ratio))
scale_factor_abs = np.sqrt(np.nanmean((np.abs(data_ratio) ** 2.0)))
scale_factor = scale_factor_abs
uvdata_deconv.data_array[bltsel, :, :, polnum_data] *= scale_factor
polnum_gains = np.where((gains.jones_array == uvutils.polstr2num(polarization, x_orientation=uvdata_deconv.x_orientation)))[0][0]
gindt = np.where(np.isclose(gains.time_array, time, atol=1e-07, rtol=0.0))[0][0]
gains.gain_array[:, :, :, gindt, polnum_gains] *= (scale_factor ** (- 0.5)) |
def tensorize_gains(uvcal, polarization, time, dtype=np.float32):
'Helper function to extract gains into fitting tensors.\n\n Parameters\n ----------\n uvcal: UVCal object\n UVCal object holding gain data to tensorize.\n polarization: str\n pol-str of gain to extract.\n time: float\n JD of time to convert to tensor.\n dtype: numpy.dtype\n dtype of tensors to output.\n\n Returns\n -------\n gains_re: tf.Tensor object.\n tensor object holding real component of gains\n for time_index and polarization\n shape is Nant x Nfreq\n gains_im: tf.Tensor object.\n tensor object holding imag component of gains\n for time_index and polarization\n shape is Nant x Nfreq\n\n '
polnum = np.where((uvcal.jones_array == uvutils.polstr2num(polarization, x_orientation=uvcal.x_orientation)))[0][0]
gindt = np.where(np.isclose(uvcal.time_array, time, atol=1e-07, rtol=0.0))[0][0]
gains_re = tf.convert_to_tensor(uvcal.gain_array[:, 0, :, gindt, polnum].squeeze().real, dtype=dtype)
gains_im = tf.convert_to_tensor(uvcal.gain_array[:, 0, :, gindt, polnum].squeeze().imag, dtype=dtype)
return (gains_re, gains_im) | 933,491,289,463,469,600 | Helper function to extract gains into fitting tensors.
Parameters
----------
uvcal: UVCal object
UVCal object holding gain data to tensorize.
polarization: str
pol-str of gain to extract.
time: float
JD of time to convert to tensor.
dtype: numpy.dtype
dtype of tensors to output.
Returns
-------
gains_re: tf.Tensor object.
tensor object holding real component of gains
for time_index and polarization
shape is Nant x Nfreq
gains_im: tf.Tensor object.
tensor object holding imag component of gains
for time_index and polarization
shape is Nant x Nfreq | calamity/calibration.py | tensorize_gains | aewallwi/calamity | python | def tensorize_gains(uvcal, polarization, time, dtype=np.float32):
'Helper function to extract gains into fitting tensors.\n\n Parameters\n ----------\n uvcal: UVCal object\n UVCal object holding gain data to tensorize.\n polarization: str\n pol-str of gain to extract.\n time: float\n JD of time to convert to tensor.\n dtype: numpy.dtype\n dtype of tensors to output.\n\n Returns\n -------\n gains_re: tf.Tensor object.\n tensor object holding real component of gains\n for time_index and polarization\n shape is Nant x Nfreq\n gains_im: tf.Tensor object.\n tensor object holding imag component of gains\n for time_index and polarization\n shape is Nant x Nfreq\n\n '
polnum = np.where((uvcal.jones_array == uvutils.polstr2num(polarization, x_orientation=uvcal.x_orientation)))[0][0]
gindt = np.where(np.isclose(uvcal.time_array, time, atol=1e-07, rtol=0.0))[0][0]
gains_re = tf.convert_to_tensor(uvcal.gain_array[:, 0, :, gindt, polnum].squeeze().real, dtype=dtype)
gains_im = tf.convert_to_tensor(uvcal.gain_array[:, 0, :, gindt, polnum].squeeze().imag, dtype=dtype)
return (gains_re, gains_im) |
def yield_fg_model_array(nants, nfreqs, fg_model_comps, fg_coeffs, corr_inds):
'Compute tensor foreground model.\n\n Parameters\n ----------\n nants: int\n number of antennas in data to model.\n freqs: int\n number of frequencies in data to model.\n fg_model_comps: list\n list of fg modeling tf.Tensor objects\n representing foreground modeling vectors.\n Each tensor is (nvecs, ngrps, nbls, nfreqs)\n fg_coeffs: list\n list of fg modeling tf.Tensor objects\n representing foreground modeling coefficients.\n Each tensor is (nvecs, ngrps, 1, 1)\n corr_inds: list\n list of list of lists of 2-tuples. Hierarchy of lists is\n chunk\n group\n baseline - (int 2-tuple)\n\n Returns\n -------\n model: tf.Tensor object\n nants x nants x nfreqs model of the visibility data\n '
model = np.zeros((nants, nants, nfreqs))
nchunks = len(fg_model_comps)
for cnum in range(nchunks):
ngrps = fg_model_comps[cnum].shape[1]
gchunk = tf.reduce_sum((fg_coeffs[cnum] * fg_model_comps[cnum]), axis=0).numpy()
for gnum in range(ngrps):
for (blnum, (i, j)) in enumerate(corr_inds[cnum][gnum]):
model[(i, j)] = gchunk[(gnum, blnum)]
return model | -4,389,784,291,805,237,000 | Compute tensor foreground model.
Parameters
----------
nants: int
number of antennas in data to model.
freqs: int
number of frequencies in data to model.
fg_model_comps: list
list of fg modeling tf.Tensor objects
representing foreground modeling vectors.
Each tensor is (nvecs, ngrps, nbls, nfreqs)
fg_coeffs: list
list of fg modeling tf.Tensor objects
representing foreground modeling coefficients.
Each tensor is (nvecs, ngrps, 1, 1)
corr_inds: list
list of list of lists of 2-tuples. Hierarchy of lists is
chunk
group
baseline - (int 2-tuple)
Returns
-------
model: tf.Tensor object
nants x nants x nfreqs model of the visibility data | calamity/calibration.py | yield_fg_model_array | aewallwi/calamity | python | def yield_fg_model_array(nants, nfreqs, fg_model_comps, fg_coeffs, corr_inds):
'Compute tensor foreground model.\n\n Parameters\n ----------\n nants: int\n number of antennas in data to model.\n freqs: int\n number of frequencies in data to model.\n fg_model_comps: list\n list of fg modeling tf.Tensor objects\n representing foreground modeling vectors.\n Each tensor is (nvecs, ngrps, nbls, nfreqs)\n fg_coeffs: list\n list of fg modeling tf.Tensor objects\n representing foreground modeling coefficients.\n Each tensor is (nvecs, ngrps, 1, 1)\n corr_inds: list\n list of list of lists of 2-tuples. Hierarchy of lists is\n chunk\n group\n baseline - (int 2-tuple)\n\n Returns\n -------\n model: tf.Tensor object\n nants x nants x nfreqs model of the visibility data\n '
model = np.zeros((nants, nants, nfreqs))
nchunks = len(fg_model_comps)
for cnum in range(nchunks):
ngrps = fg_model_comps[cnum].shape[1]
gchunk = tf.reduce_sum((fg_coeffs[cnum] * fg_model_comps[cnum]), axis=0).numpy()
for gnum in range(ngrps):
for (blnum, (i, j)) in enumerate(corr_inds[cnum][gnum]):
model[(i, j)] = gchunk[(gnum, blnum)]
return model |
def fit_gains_and_foregrounds(g_r, g_i, fg_r, fg_i, data_r, data_i, wgts, fg_comps, corr_inds, use_min=False, tol=1e-14, maxsteps=10000, optimizer='Adamax', freeze_model=False, verbose=False, notebook_progressbar=False, dtype=np.float32, graph_mode=False, n_profile_steps=0, profile_log_dir='./logdir', sky_model_r=None, sky_model_i=None, model_regularization=None, graph_args_dict=None, **opt_kwargs):
'Run optimization loop to fit gains and foreground components.\n\n Parameters\n ----------\n g_r: tf.Tensor object.\n tf.Tensor object holding real parts of gains.\n g_i: tf.Tensor object.\n tf.Tensor object holding imag parts of gains.\n fg_r: list\n list of tf.Tensor objects. Each has shape (nvecs, ngrps, 1, 1)\n tf.Tensor object holding foreground coeffs.\n fg_i: list\n list of tf.Tensor objects. Each has shape (nvecs, ngrps, 1, 1)\n tf.Tensor object holding imag coeffs.\n data_r: list\n list of tf.Tensor objects. Each has shape (ngrps, nbls, nfreqs)\n real part of data to fit.\n data_i: list\n list of tf.Tensor objects. Each has shape (ngrps, nbls, nfreqs)\n imag part of data to fit.\n wgts: list\n list of tf.Tensor objects. Each has shape (ngrps, nbls, nfreqs)\n fg_comps: list:\n list of tf.Tensor objects. Each has shape (nvecs, ngrps, nbls, nfreqs)\n represents vectors to be used in modeling visibilities.\n corr_inds: list\n list of list of lists of 2-tuples. Hierarchy of lists is\n chunk\n group\n baseline - (int 2-tuple)\n use_min: bool, optional\n if True, use the value that minimizes the loss function\n regardless of where optimization loop ended up\n (prevents overshooting due to excess momentum)\n tol: float, optional\n halt optimization loop once the loss changes by less then this value.\n default is 1e-14\n maxsteps: int, optional\n maximum number of opt.minimize calls before halting.\n default is 10000\n optimizer: string\n Name of optimizer. See OPTIMIZERS dictionary which contains optimizers described in\n https://www.tensorflow.org/api_docs/python/tf/keras/optimizers\n default is \'Adamax\'\n freeze_model: bool, optional\n Only optimize loss function wrt gain variables. This is effectively traditional model-based calibration\n with sky_model as the model (but projected onto the foreground basis vectors).\n default is False.\n verbose: bool, optional\n lots of text output\n default is False.\n notebook_progressbar: bool, optional\n use progress bar optimized for notebook output.\n default is False.\n graph_mode: bool, optional\n if True, compile gradient update step in graph mode to speed up\n runtime by ~2-3x. I\'ve found that this helps on CPUs but on GPUs\n it actually increases runtime by a similar factor.\n n_profile_steps: bool, optional\n number of steps to run profiling on\n default is 0.\n profile_log_dir: str, optional\n directory to save profile logs to\n default is \'./logdir\'\n sky_model_r: list of tf.Tensor objects, optional\n chunked tensors containing model in same format as data_r\n sky_model_i: list of tf.Tensor objects, optional\n chunked tensors containing model in the same format as data_i\n model_regularization: str, optional\n type of model regularization to perform. Currently support "sum"\n where the sums of real and imaginary parts (across all bls and freqs)\n are constrained to be the same as the sum of real and imag parts\n of data.\n opt_kwargs: kwarg dict\n additional kwargs for tf.opt.Optimizer(). See tensorflow docs.\n\n Returns\n -------\n g_r_opt: tf.Tensor object\n real part of optimized gains.\n g_i_opt: tf.Tensor object\n imag part of optimized gains.\n fg_r_opt: tf.Tensor object\n real part of foreground coeffs.\n fg_i_opt: tf.Tensor object.\n imag part of optimized foreground coeffs.\n fit_history: dict\n dictionary containing fit history for each time-step and polarization in the data with fields:\n \'loss_history\': list of values of the loss function in each minimization iteration.\n '
if (graph_args_dict is None):
graph_args_dict = {}
echo(f'Using {str(dtype)} precision.')
echo(f'{datetime.datetime.now()} Provided the following opt_kwargs')
for k in opt_kwargs:
echo(f'{k}: {opt_kwargs[k]}')
opt = OPTIMIZERS[optimizer](**opt_kwargs)
fit_history = {'loss': []}
min_loss = 9e+99
nants = g_r.shape[0]
nfreqs = g_r.shape[1]
ant0_inds = []
ant1_inds = []
nchunks = len(fg_comps)
for cnum in range(nchunks):
ant0_chunk = []
ant1_chunk = []
ngrps = len(corr_inds[cnum])
for gnum in range(ngrps):
ant0_grp = []
ant1_grp = []
for cpair in corr_inds[cnum][gnum]:
ant0_grp.append(cpair[0])
ant1_grp.append(cpair[1])
ant0_chunk.append(ant0_grp)
ant1_chunk.append(ant1_grp)
ant0_inds.append(ant0_chunk)
ant1_inds.append(ant1_chunk)
g_r = tf.Variable(g_r)
g_i = tf.Variable(g_i)
if (not freeze_model):
fg_r = [tf.Variable(fgr) for fgr in fg_r]
fg_i = [tf.Variable(fgi) for fgi in fg_i]
vars = (([g_r, g_i] + fg_r) + fg_i)
else:
vars = [g_r, g_i]
echo(f'{datetime.datetime.now()} Performing gradient descent on {np.prod(g_r.shape)} complex gain parameters...', verbose=verbose)
if (not freeze_model):
echo(f'Performing gradient descent on total of {int(np.sum([(fgr.shape[0] * fgr.shape[1]) for fgr in fg_r]))} complex foreground parameters', verbose=verbose)
echo(f"Foreground Parameters grouped into chunks of shape ((nvecs, ngrps): nbls) {[((str(fgr.shape[:2]) + ':') + str(dc.shape[1])) for (fgr, dc) in zip(fg_r, data_r)]}", verbose=verbose)
if (model_regularization == 'sum'):
prior_r_sum = tf.reduce_sum(tf.stack([tf.reduce_sum((sky_model_r[cnum] * wgts[cnum])) for cnum in range(nchunks)]))
prior_i_sum = tf.reduce_sum(tf.stack([tf.reduce_sum((sky_model_i[cnum] * wgts[cnum])) for cnum in range(nchunks)]))
def loss_function():
return mse_chunked_sum_regularized(g_r=g_r, g_i=g_i, fg_r=fg_r, fg_i=fg_i, fg_comps=fg_comps, nchunks=nchunks, data_r=data_r, data_i=data_i, wgts=wgts, ant0_inds=ant0_inds, ant1_inds=ant1_inds, dtype=dtype, prior_r_sum=prior_r_sum, prior_i_sum=prior_i_sum)
else:
def loss_function():
return mse_chunked(g_r=g_r, g_i=g_i, fg_r=fg_r, fg_i=fg_i, fg_comps=fg_comps, nchunks=nchunks, data_r=data_r, data_i=data_i, wgts=wgts, ant0_inds=ant0_inds, ant1_inds=ant1_inds, dtype=dtype)
def train_step_code():
with tf.GradientTape() as tape:
loss = loss_function()
grads = tape.gradient(loss, vars)
opt.apply_gradients(zip(grads, vars))
return loss
if graph_mode:
@tf.function(**graph_args_dict)
def train_step():
return train_step_code()
else:
def train_step():
return train_step_code()
if (n_profile_steps > 0):
echo(f'{datetime.datetime.now()} Profiling with {n_profile_steps}. And writing output to {profile_log_dir}...')
tf.profiler.experimental.start(profile_log_dir)
for step in PBARS[notebook_progressbar](range(n_profile_steps)):
with tf.profiler.experimental.Trace('train', step_num=step):
train_step()
tf.profiler.experimental.stop()
echo(f'''{datetime.datetime.now()} Building Computational Graph...
''', verbose=verbose)
loss = train_step()
echo(f'''{datetime.datetime.now()} Performing Gradient Descent. Initial MSE of {loss:.2e}...
''', verbose=verbose)
for step in PBARS[notebook_progressbar](range(maxsteps)):
loss = train_step()
fit_history['loss'].append(loss.numpy())
if (use_min and (fit_history['loss'][(- 1)] < min_loss)):
min_loss = fit_history['loss'][(- 1)]
g_r_opt = g_r.value()
g_i_opt = g_i.value()
if (not freeze_model):
fg_r_opt = [fgr.value() for fgr in fg_r]
fg_i_opt = [fgi.value() for fgi in fg_i]
if ((step >= 1) and (np.abs((fit_history['loss'][(- 1)] - fit_history['loss'][(- 2)])) < tol)):
echo(f'''Tolerance thresshold met with delta of {np.abs((fit_history['loss'][(- 1)] - fit_history['loss'][(- 2)])):.2e}. Terminating...
''', verbose=verbose)
break
if (not use_min):
min_loss = fit_history['loss'][(- 1)]
g_r_opt = g_r.value()
g_i_opt = g_i.value()
if (not freeze_model):
fg_r_opt = [fgr.value() for fgr in fg_r]
fg_i_opt = [fgi.value() for fgi in fg_i]
else:
fg_r_opt = fg_r
fg_i_opt = fg_i
echo(f'''{datetime.datetime.now()} Finished Gradient Descent. MSE of {min_loss:.2e}...
''', verbose=verbose)
return (g_r_opt, g_i_opt, fg_r_opt, fg_i_opt, fit_history) | 4,280,224,059,098,685,400 | Run optimization loop to fit gains and foreground components.
Parameters
----------
g_r: tf.Tensor object.
tf.Tensor object holding real parts of gains.
g_i: tf.Tensor object.
tf.Tensor object holding imag parts of gains.
fg_r: list
list of tf.Tensor objects. Each has shape (nvecs, ngrps, 1, 1)
tf.Tensor object holding foreground coeffs.
fg_i: list
list of tf.Tensor objects. Each has shape (nvecs, ngrps, 1, 1)
tf.Tensor object holding imag coeffs.
data_r: list
list of tf.Tensor objects. Each has shape (ngrps, nbls, nfreqs)
real part of data to fit.
data_i: list
list of tf.Tensor objects. Each has shape (ngrps, nbls, nfreqs)
imag part of data to fit.
wgts: list
list of tf.Tensor objects. Each has shape (ngrps, nbls, nfreqs)
fg_comps: list:
list of tf.Tensor objects. Each has shape (nvecs, ngrps, nbls, nfreqs)
represents vectors to be used in modeling visibilities.
corr_inds: list
list of list of lists of 2-tuples. Hierarchy of lists is
chunk
group
baseline - (int 2-tuple)
use_min: bool, optional
if True, use the value that minimizes the loss function
regardless of where optimization loop ended up
(prevents overshooting due to excess momentum)
tol: float, optional
halt optimization loop once the loss changes by less then this value.
default is 1e-14
maxsteps: int, optional
maximum number of opt.minimize calls before halting.
default is 10000
optimizer: string
Name of optimizer. See OPTIMIZERS dictionary which contains optimizers described in
https://www.tensorflow.org/api_docs/python/tf/keras/optimizers
default is 'Adamax'
freeze_model: bool, optional
Only optimize loss function wrt gain variables. This is effectively traditional model-based calibration
with sky_model as the model (but projected onto the foreground basis vectors).
default is False.
verbose: bool, optional
lots of text output
default is False.
notebook_progressbar: bool, optional
use progress bar optimized for notebook output.
default is False.
graph_mode: bool, optional
if True, compile gradient update step in graph mode to speed up
runtime by ~2-3x. I've found that this helps on CPUs but on GPUs
it actually increases runtime by a similar factor.
n_profile_steps: bool, optional
number of steps to run profiling on
default is 0.
profile_log_dir: str, optional
directory to save profile logs to
default is './logdir'
sky_model_r: list of tf.Tensor objects, optional
chunked tensors containing model in same format as data_r
sky_model_i: list of tf.Tensor objects, optional
chunked tensors containing model in the same format as data_i
model_regularization: str, optional
type of model regularization to perform. Currently support "sum"
where the sums of real and imaginary parts (across all bls and freqs)
are constrained to be the same as the sum of real and imag parts
of data.
opt_kwargs: kwarg dict
additional kwargs for tf.opt.Optimizer(). See tensorflow docs.
Returns
-------
g_r_opt: tf.Tensor object
real part of optimized gains.
g_i_opt: tf.Tensor object
imag part of optimized gains.
fg_r_opt: tf.Tensor object
real part of foreground coeffs.
fg_i_opt: tf.Tensor object.
imag part of optimized foreground coeffs.
fit_history: dict
dictionary containing fit history for each time-step and polarization in the data with fields:
'loss_history': list of values of the loss function in each minimization iteration. | calamity/calibration.py | fit_gains_and_foregrounds | aewallwi/calamity | python | def fit_gains_and_foregrounds(g_r, g_i, fg_r, fg_i, data_r, data_i, wgts, fg_comps, corr_inds, use_min=False, tol=1e-14, maxsteps=10000, optimizer='Adamax', freeze_model=False, verbose=False, notebook_progressbar=False, dtype=np.float32, graph_mode=False, n_profile_steps=0, profile_log_dir='./logdir', sky_model_r=None, sky_model_i=None, model_regularization=None, graph_args_dict=None, **opt_kwargs):
'Run optimization loop to fit gains and foreground components.\n\n Parameters\n ----------\n g_r: tf.Tensor object.\n tf.Tensor object holding real parts of gains.\n g_i: tf.Tensor object.\n tf.Tensor object holding imag parts of gains.\n fg_r: list\n list of tf.Tensor objects. Each has shape (nvecs, ngrps, 1, 1)\n tf.Tensor object holding foreground coeffs.\n fg_i: list\n list of tf.Tensor objects. Each has shape (nvecs, ngrps, 1, 1)\n tf.Tensor object holding imag coeffs.\n data_r: list\n list of tf.Tensor objects. Each has shape (ngrps, nbls, nfreqs)\n real part of data to fit.\n data_i: list\n list of tf.Tensor objects. Each has shape (ngrps, nbls, nfreqs)\n imag part of data to fit.\n wgts: list\n list of tf.Tensor objects. Each has shape (ngrps, nbls, nfreqs)\n fg_comps: list:\n list of tf.Tensor objects. Each has shape (nvecs, ngrps, nbls, nfreqs)\n represents vectors to be used in modeling visibilities.\n corr_inds: list\n list of list of lists of 2-tuples. Hierarchy of lists is\n chunk\n group\n baseline - (int 2-tuple)\n use_min: bool, optional\n if True, use the value that minimizes the loss function\n regardless of where optimization loop ended up\n (prevents overshooting due to excess momentum)\n tol: float, optional\n halt optimization loop once the loss changes by less then this value.\n default is 1e-14\n maxsteps: int, optional\n maximum number of opt.minimize calls before halting.\n default is 10000\n optimizer: string\n Name of optimizer. See OPTIMIZERS dictionary which contains optimizers described in\n https://www.tensorflow.org/api_docs/python/tf/keras/optimizers\n default is \'Adamax\'\n freeze_model: bool, optional\n Only optimize loss function wrt gain variables. This is effectively traditional model-based calibration\n with sky_model as the model (but projected onto the foreground basis vectors).\n default is False.\n verbose: bool, optional\n lots of text output\n default is False.\n notebook_progressbar: bool, optional\n use progress bar optimized for notebook output.\n default is False.\n graph_mode: bool, optional\n if True, compile gradient update step in graph mode to speed up\n runtime by ~2-3x. I\'ve found that this helps on CPUs but on GPUs\n it actually increases runtime by a similar factor.\n n_profile_steps: bool, optional\n number of steps to run profiling on\n default is 0.\n profile_log_dir: str, optional\n directory to save profile logs to\n default is \'./logdir\'\n sky_model_r: list of tf.Tensor objects, optional\n chunked tensors containing model in same format as data_r\n sky_model_i: list of tf.Tensor objects, optional\n chunked tensors containing model in the same format as data_i\n model_regularization: str, optional\n type of model regularization to perform. Currently support "sum"\n where the sums of real and imaginary parts (across all bls and freqs)\n are constrained to be the same as the sum of real and imag parts\n of data.\n opt_kwargs: kwarg dict\n additional kwargs for tf.opt.Optimizer(). See tensorflow docs.\n\n Returns\n -------\n g_r_opt: tf.Tensor object\n real part of optimized gains.\n g_i_opt: tf.Tensor object\n imag part of optimized gains.\n fg_r_opt: tf.Tensor object\n real part of foreground coeffs.\n fg_i_opt: tf.Tensor object.\n imag part of optimized foreground coeffs.\n fit_history: dict\n dictionary containing fit history for each time-step and polarization in the data with fields:\n \'loss_history\': list of values of the loss function in each minimization iteration.\n '
if (graph_args_dict is None):
graph_args_dict = {}
echo(f'Using {str(dtype)} precision.')
echo(f'{datetime.datetime.now()} Provided the following opt_kwargs')
for k in opt_kwargs:
echo(f'{k}: {opt_kwargs[k]}')
opt = OPTIMIZERS[optimizer](**opt_kwargs)
fit_history = {'loss': []}
min_loss = 9e+99
nants = g_r.shape[0]
nfreqs = g_r.shape[1]
ant0_inds = []
ant1_inds = []
nchunks = len(fg_comps)
for cnum in range(nchunks):
ant0_chunk = []
ant1_chunk = []
ngrps = len(corr_inds[cnum])
for gnum in range(ngrps):
ant0_grp = []
ant1_grp = []
for cpair in corr_inds[cnum][gnum]:
ant0_grp.append(cpair[0])
ant1_grp.append(cpair[1])
ant0_chunk.append(ant0_grp)
ant1_chunk.append(ant1_grp)
ant0_inds.append(ant0_chunk)
ant1_inds.append(ant1_chunk)
g_r = tf.Variable(g_r)
g_i = tf.Variable(g_i)
if (not freeze_model):
fg_r = [tf.Variable(fgr) for fgr in fg_r]
fg_i = [tf.Variable(fgi) for fgi in fg_i]
vars = (([g_r, g_i] + fg_r) + fg_i)
else:
vars = [g_r, g_i]
echo(f'{datetime.datetime.now()} Performing gradient descent on {np.prod(g_r.shape)} complex gain parameters...', verbose=verbose)
if (not freeze_model):
echo(f'Performing gradient descent on total of {int(np.sum([(fgr.shape[0] * fgr.shape[1]) for fgr in fg_r]))} complex foreground parameters', verbose=verbose)
echo(f"Foreground Parameters grouped into chunks of shape ((nvecs, ngrps): nbls) {[((str(fgr.shape[:2]) + ':') + str(dc.shape[1])) for (fgr, dc) in zip(fg_r, data_r)]}", verbose=verbose)
if (model_regularization == 'sum'):
prior_r_sum = tf.reduce_sum(tf.stack([tf.reduce_sum((sky_model_r[cnum] * wgts[cnum])) for cnum in range(nchunks)]))
prior_i_sum = tf.reduce_sum(tf.stack([tf.reduce_sum((sky_model_i[cnum] * wgts[cnum])) for cnum in range(nchunks)]))
def loss_function():
return mse_chunked_sum_regularized(g_r=g_r, g_i=g_i, fg_r=fg_r, fg_i=fg_i, fg_comps=fg_comps, nchunks=nchunks, data_r=data_r, data_i=data_i, wgts=wgts, ant0_inds=ant0_inds, ant1_inds=ant1_inds, dtype=dtype, prior_r_sum=prior_r_sum, prior_i_sum=prior_i_sum)
else:
def loss_function():
return mse_chunked(g_r=g_r, g_i=g_i, fg_r=fg_r, fg_i=fg_i, fg_comps=fg_comps, nchunks=nchunks, data_r=data_r, data_i=data_i, wgts=wgts, ant0_inds=ant0_inds, ant1_inds=ant1_inds, dtype=dtype)
def train_step_code():
with tf.GradientTape() as tape:
loss = loss_function()
grads = tape.gradient(loss, vars)
opt.apply_gradients(zip(grads, vars))
return loss
if graph_mode:
@tf.function(**graph_args_dict)
def train_step():
return train_step_code()
else:
def train_step():
return train_step_code()
if (n_profile_steps > 0):
echo(f'{datetime.datetime.now()} Profiling with {n_profile_steps}. And writing output to {profile_log_dir}...')
tf.profiler.experimental.start(profile_log_dir)
for step in PBARS[notebook_progressbar](range(n_profile_steps)):
with tf.profiler.experimental.Trace('train', step_num=step):
train_step()
tf.profiler.experimental.stop()
echo(f'{datetime.datetime.now()} Building Computational Graph...
', verbose=verbose)
loss = train_step()
echo(f'{datetime.datetime.now()} Performing Gradient Descent. Initial MSE of {loss:.2e}...
', verbose=verbose)
for step in PBARS[notebook_progressbar](range(maxsteps)):
loss = train_step()
fit_history['loss'].append(loss.numpy())
if (use_min and (fit_history['loss'][(- 1)] < min_loss)):
min_loss = fit_history['loss'][(- 1)]
g_r_opt = g_r.value()
g_i_opt = g_i.value()
if (not freeze_model):
fg_r_opt = [fgr.value() for fgr in fg_r]
fg_i_opt = [fgi.value() for fgi in fg_i]
if ((step >= 1) and (np.abs((fit_history['loss'][(- 1)] - fit_history['loss'][(- 2)])) < tol)):
echo(f'Tolerance thresshold met with delta of {np.abs((fit_history['loss'][(- 1)] - fit_history['loss'][(- 2)])):.2e}. Terminating...
', verbose=verbose)
break
if (not use_min):
min_loss = fit_history['loss'][(- 1)]
g_r_opt = g_r.value()
g_i_opt = g_i.value()
if (not freeze_model):
fg_r_opt = [fgr.value() for fgr in fg_r]
fg_i_opt = [fgi.value() for fgi in fg_i]
else:
fg_r_opt = fg_r
fg_i_opt = fg_i
echo(f'{datetime.datetime.now()} Finished Gradient Descent. MSE of {min_loss:.2e}...
', verbose=verbose)
return (g_r_opt, g_i_opt, fg_r_opt, fg_i_opt, fit_history) |
def insert_model_into_uvdata_tensor(uvdata, time, polarization, ants_map, red_grps, model_r, model_i, scale_factor=1.0):
'Insert fitted tensor values back into uvdata object for tensor mode.\n\n Parameters\n ----------\n uvdata: UVData object\n uvdata object to insert model data into.\n time: float\n JD of time to insert.\n polarization: str\n polarization to insert.\n ants_map: dict mapping integers to integers\n map between each antenna number to a unique index between 0 and Nants_data\n (typically the index of each antenna in ants_map)\n red_grps: list of lists of int 2-tuples\n a list of lists of 2-tuples where all antenna pairs within each sublist\n are redundant with eachother. Assumes that conjugates are correctly taken.\n model_r: np.ndarray\n an Nants_data x Nants_data x Nfreqs np.ndarray with real parts of data\n model_i: np.ndarray\n an Nants_data x Nants_data x Nfreqs np.ndarray with imag parts of model\n scale_factor: float, optional\n overall scaling factor to divide tensorized data by.\n default is 1.0\n\n Returns\n -------\n N/A: Modifies uvdata inplace.\n\n '
antpairs_data = uvdata.get_antpairs()
polnum = np.where((uvdata.polarization_array == uvutils.polstr2num(polarization, x_orientation=uvdata.x_orientation)))[0][0]
for red_grp in red_grps:
for ap in red_grp:
(i, j) = (ants_map[ap[0]], ants_map[ap[1]])
if (ap in antpairs_data):
dinds = uvdata.antpair2ind(ap)
dinds = dinds[np.where(np.isclose(time, uvdata.time_array[dinds], atol=1e-07, rtol=0.0))[0][0]]
model = (model_r[(i, j)] + (1j * model_i[(i, j)]))
else:
dinds = uvdata.antpair2ind(ap[::(- 1)])
dinds = dinds[np.where(np.isclose(time, uvdata.time_array[dinds], atol=1e-07, rtol=0.0))[0][0]]
model = (model_r[(i, j)] - (1j * model_i[(i, j)]))
uvdata.data_array[dinds, 0, :, polnum] = (model * scale_factor) | -9,176,988,501,350,762,000 | Insert fitted tensor values back into uvdata object for tensor mode.
Parameters
----------
uvdata: UVData object
uvdata object to insert model data into.
time: float
JD of time to insert.
polarization: str
polarization to insert.
ants_map: dict mapping integers to integers
map between each antenna number to a unique index between 0 and Nants_data
(typically the index of each antenna in ants_map)
red_grps: list of lists of int 2-tuples
a list of lists of 2-tuples where all antenna pairs within each sublist
are redundant with eachother. Assumes that conjugates are correctly taken.
model_r: np.ndarray
an Nants_data x Nants_data x Nfreqs np.ndarray with real parts of data
model_i: np.ndarray
an Nants_data x Nants_data x Nfreqs np.ndarray with imag parts of model
scale_factor: float, optional
overall scaling factor to divide tensorized data by.
default is 1.0
Returns
-------
N/A: Modifies uvdata inplace. | calamity/calibration.py | insert_model_into_uvdata_tensor | aewallwi/calamity | python | def insert_model_into_uvdata_tensor(uvdata, time, polarization, ants_map, red_grps, model_r, model_i, scale_factor=1.0):
'Insert fitted tensor values back into uvdata object for tensor mode.\n\n Parameters\n ----------\n uvdata: UVData object\n uvdata object to insert model data into.\n time: float\n JD of time to insert.\n polarization: str\n polarization to insert.\n ants_map: dict mapping integers to integers\n map between each antenna number to a unique index between 0 and Nants_data\n (typically the index of each antenna in ants_map)\n red_grps: list of lists of int 2-tuples\n a list of lists of 2-tuples where all antenna pairs within each sublist\n are redundant with eachother. Assumes that conjugates are correctly taken.\n model_r: np.ndarray\n an Nants_data x Nants_data x Nfreqs np.ndarray with real parts of data\n model_i: np.ndarray\n an Nants_data x Nants_data x Nfreqs np.ndarray with imag parts of model\n scale_factor: float, optional\n overall scaling factor to divide tensorized data by.\n default is 1.0\n\n Returns\n -------\n N/A: Modifies uvdata inplace.\n\n '
antpairs_data = uvdata.get_antpairs()
polnum = np.where((uvdata.polarization_array == uvutils.polstr2num(polarization, x_orientation=uvdata.x_orientation)))[0][0]
for red_grp in red_grps:
for ap in red_grp:
(i, j) = (ants_map[ap[0]], ants_map[ap[1]])
if (ap in antpairs_data):
dinds = uvdata.antpair2ind(ap)
dinds = dinds[np.where(np.isclose(time, uvdata.time_array[dinds], atol=1e-07, rtol=0.0))[0][0]]
model = (model_r[(i, j)] + (1j * model_i[(i, j)]))
else:
dinds = uvdata.antpair2ind(ap[::(- 1)])
dinds = dinds[np.where(np.isclose(time, uvdata.time_array[dinds], atol=1e-07, rtol=0.0))[0][0]]
model = (model_r[(i, j)] - (1j * model_i[(i, j)]))
uvdata.data_array[dinds, 0, :, polnum] = (model * scale_factor) |
def insert_gains_into_uvcal(uvcal, time, polarization, gains_re, gains_im):
'Insert tensorized gains back into uvcal object\n\n Parameters\n ----------\n uvdata: UVData object\n uvdata object to insert model data into.\n time: float\n JD of time to insert.\n polarization: str\n polarization to insert.\n gains_re: dict with int keys and tf.Tensor object values\n dictionary mapping i antenna numbers to Nfreq 1d tf.Tensor object\n representing the real component of the complex gain for antenna i.\n gains_im: dict with int keys and tf.Tensor object values\n dictionary mapping j antenna numbers to Nfreq 1d tf.Tensor object\n representing the imag component of the complex gain for antenna j.\n\n Returns\n -------\n N/A: Modifies uvcal inplace.\n '
polnum = np.where((uvcal.jones_array == uvutils.polstr2num(polarization, x_orientation=uvcal.x_orientation)))[0][0]
gindt = np.where(np.isclose(uvcal.time_array, time, atol=1e-07, rtol=0.0))[0][0]
for ant_index in range(uvcal.Nants_data):
uvcal.gain_array[ant_index, 0, :, gindt, polnum] = (gains_re[ant_index].numpy() + (1j * gains_im[ant_index].numpy())) | 5,082,459,756,504,565,000 | Insert tensorized gains back into uvcal object
Parameters
----------
uvdata: UVData object
uvdata object to insert model data into.
time: float
JD of time to insert.
polarization: str
polarization to insert.
gains_re: dict with int keys and tf.Tensor object values
dictionary mapping i antenna numbers to Nfreq 1d tf.Tensor object
representing the real component of the complex gain for antenna i.
gains_im: dict with int keys and tf.Tensor object values
dictionary mapping j antenna numbers to Nfreq 1d tf.Tensor object
representing the imag component of the complex gain for antenna j.
Returns
-------
N/A: Modifies uvcal inplace. | calamity/calibration.py | insert_gains_into_uvcal | aewallwi/calamity | python | def insert_gains_into_uvcal(uvcal, time, polarization, gains_re, gains_im):
'Insert tensorized gains back into uvcal object\n\n Parameters\n ----------\n uvdata: UVData object\n uvdata object to insert model data into.\n time: float\n JD of time to insert.\n polarization: str\n polarization to insert.\n gains_re: dict with int keys and tf.Tensor object values\n dictionary mapping i antenna numbers to Nfreq 1d tf.Tensor object\n representing the real component of the complex gain for antenna i.\n gains_im: dict with int keys and tf.Tensor object values\n dictionary mapping j antenna numbers to Nfreq 1d tf.Tensor object\n representing the imag component of the complex gain for antenna j.\n\n Returns\n -------\n N/A: Modifies uvcal inplace.\n '
polnum = np.where((uvcal.jones_array == uvutils.polstr2num(polarization, x_orientation=uvcal.x_orientation)))[0][0]
gindt = np.where(np.isclose(uvcal.time_array, time, atol=1e-07, rtol=0.0))[0][0]
for ant_index in range(uvcal.Nants_data):
uvcal.gain_array[ant_index, 0, :, gindt, polnum] = (gains_re[ant_index].numpy() + (1j * gains_im[ant_index].numpy())) |
def tensorize_fg_coeffs(data, wgts, fg_model_comps, notebook_progressbar=False, verbose=False):
'Initialize foreground coefficient tensors from uvdata and modeling component dictionaries.\n\n\n Parameters\n ----------\n data: list\n list of tf.Tensor objects, each with shape (ngrps, nbls, nfreqs)\n representing data\n wgts: list\n list of tf.Tensor objects, each with shape (ngrps, nbls, nfreqs)\n representing weights.\n fg_model_comps: list\n list of fg modeling tf.Tensor objects\n representing foreground modeling vectors.\n Each tensor is (nvecs, ngrps, nbls, nfreqs)\n see description in tensorize_fg_model_comps_dict\n docstring.\n notebook_progressbar: bool, optional\n use progress bar optimized for notebook output.\n default is False.\n verbose: bool, optional\n lots of text output\n default is False.\n Returns\n -------\n fg_coeffs_re: tf.Tensor object\n 1d tensor containing real parts of coeffs for each modeling vector.\n ordering is over foreground modeling vector per redundant group and then\n redundant group in the order of groups appearing in red_grps\n fg_coeffs_im: tf.Tensor object\n 1d tensor containing imag parts of coeffs for each modeling vector.\n ordering is over foreground modeling vector per redundant group and then\n redundant group in the order of groups appearing in red_grps\n '
echo(f'''{datetime.datetime.now()} Computing initial foreground coefficient guesses using linear-leastsq...
''', verbose=verbose)
fg_coeffs = []
nchunks = len(data)
binary_wgts = [tf.convert_to_tensor((~ np.isclose(wgts[cnum].numpy(), 0.0)), dtype=wgts[cnum].dtype) for cnum in range(nchunks)]
for cnum in PBARS[notebook_progressbar](range(nchunks)):
fg_coeff_chunk = []
ngrps = data[cnum].shape[0]
ndata = (data[cnum].shape[1] * data[cnum].shape[2])
nvecs = fg_model_comps[cnum].shape[0]
for gnum in range(ngrps):
nonzero_rows = np.where(np.all(np.isclose(fg_model_comps[cnum][:, gnum].numpy().reshape(nvecs, ndata), 0.0), axis=1))[0]
if (len(nonzero_rows) > 0):
nvecs_nonzero = np.min(nonzero_rows)
else:
nvecs_nonzero = nvecs
fg_coeff_chunk.append(tf.reshape(tf.linalg.lstsq(tf.transpose(tf.reshape(fg_model_comps[cnum][:, gnum], (nvecs, ndata)))[:, :nvecs_nonzero], tf.reshape((data[cnum][gnum] * binary_wgts[cnum][gnum]), (ndata, 1))), (nvecs_nonzero,)))
fg_coeff_chunk[(- 1)] = tf.pad(fg_coeff_chunk[(- 1)], [(0, (nvecs - nvecs_nonzero))])
fg_coeff_chunk = tf.reshape(tf.transpose(tf.stack(fg_coeff_chunk)), (nvecs, ngrps, 1, 1))
fg_coeffs.append(fg_coeff_chunk)
echo(f'''{datetime.datetime.now()} Finished initial foreground coefficient guesses...
''', verbose=verbose)
return fg_coeffs | -1,908,258,831,102,641,400 | Initialize foreground coefficient tensors from uvdata and modeling component dictionaries.
Parameters
----------
data: list
list of tf.Tensor objects, each with shape (ngrps, nbls, nfreqs)
representing data
wgts: list
list of tf.Tensor objects, each with shape (ngrps, nbls, nfreqs)
representing weights.
fg_model_comps: list
list of fg modeling tf.Tensor objects
representing foreground modeling vectors.
Each tensor is (nvecs, ngrps, nbls, nfreqs)
see description in tensorize_fg_model_comps_dict
docstring.
notebook_progressbar: bool, optional
use progress bar optimized for notebook output.
default is False.
verbose: bool, optional
lots of text output
default is False.
Returns
-------
fg_coeffs_re: tf.Tensor object
1d tensor containing real parts of coeffs for each modeling vector.
ordering is over foreground modeling vector per redundant group and then
redundant group in the order of groups appearing in red_grps
fg_coeffs_im: tf.Tensor object
1d tensor containing imag parts of coeffs for each modeling vector.
ordering is over foreground modeling vector per redundant group and then
redundant group in the order of groups appearing in red_grps | calamity/calibration.py | tensorize_fg_coeffs | aewallwi/calamity | python | def tensorize_fg_coeffs(data, wgts, fg_model_comps, notebook_progressbar=False, verbose=False):
'Initialize foreground coefficient tensors from uvdata and modeling component dictionaries.\n\n\n Parameters\n ----------\n data: list\n list of tf.Tensor objects, each with shape (ngrps, nbls, nfreqs)\n representing data\n wgts: list\n list of tf.Tensor objects, each with shape (ngrps, nbls, nfreqs)\n representing weights.\n fg_model_comps: list\n list of fg modeling tf.Tensor objects\n representing foreground modeling vectors.\n Each tensor is (nvecs, ngrps, nbls, nfreqs)\n see description in tensorize_fg_model_comps_dict\n docstring.\n notebook_progressbar: bool, optional\n use progress bar optimized for notebook output.\n default is False.\n verbose: bool, optional\n lots of text output\n default is False.\n Returns\n -------\n fg_coeffs_re: tf.Tensor object\n 1d tensor containing real parts of coeffs for each modeling vector.\n ordering is over foreground modeling vector per redundant group and then\n redundant group in the order of groups appearing in red_grps\n fg_coeffs_im: tf.Tensor object\n 1d tensor containing imag parts of coeffs for each modeling vector.\n ordering is over foreground modeling vector per redundant group and then\n redundant group in the order of groups appearing in red_grps\n '
echo(f'{datetime.datetime.now()} Computing initial foreground coefficient guesses using linear-leastsq...
', verbose=verbose)
fg_coeffs = []
nchunks = len(data)
binary_wgts = [tf.convert_to_tensor((~ np.isclose(wgts[cnum].numpy(), 0.0)), dtype=wgts[cnum].dtype) for cnum in range(nchunks)]
for cnum in PBARS[notebook_progressbar](range(nchunks)):
fg_coeff_chunk = []
ngrps = data[cnum].shape[0]
ndata = (data[cnum].shape[1] * data[cnum].shape[2])
nvecs = fg_model_comps[cnum].shape[0]
for gnum in range(ngrps):
nonzero_rows = np.where(np.all(np.isclose(fg_model_comps[cnum][:, gnum].numpy().reshape(nvecs, ndata), 0.0), axis=1))[0]
if (len(nonzero_rows) > 0):
nvecs_nonzero = np.min(nonzero_rows)
else:
nvecs_nonzero = nvecs
fg_coeff_chunk.append(tf.reshape(tf.linalg.lstsq(tf.transpose(tf.reshape(fg_model_comps[cnum][:, gnum], (nvecs, ndata)))[:, :nvecs_nonzero], tf.reshape((data[cnum][gnum] * binary_wgts[cnum][gnum]), (ndata, 1))), (nvecs_nonzero,)))
fg_coeff_chunk[(- 1)] = tf.pad(fg_coeff_chunk[(- 1)], [(0, (nvecs - nvecs_nonzero))])
fg_coeff_chunk = tf.reshape(tf.transpose(tf.stack(fg_coeff_chunk)), (nvecs, ngrps, 1, 1))
fg_coeffs.append(fg_coeff_chunk)
echo(f'{datetime.datetime.now()} Finished initial foreground coefficient guesses...
', verbose=verbose)
return fg_coeffs |
def get_auto_weights(uvdata, delay_extent=25.0):
'\n inverse variance weights from interpolated autocorrelation data\n\n Parameters\n ----------\n uvdata: UVData object\n UVData object containing autocorrelation data to use for computing inverse noise weights.\n offset: float, optional\n Fit autocorrelation to delay components with this width.\n\n Returns\n -------\n data_weights: UVFlag object\n UFlag in flag-mode where flags contain original data flags and weights contain autocorr weights.\n '
dpss_components = modeling.yield_dpss_model_comps_bl_grp(0.0, uvdata.freq_array[0], offset=delay_extent)
data_weights = UVFlag(uvdata, mode='flag')
data_weights.weights_array = np.zeros(uvdata.data_array.shape)
auto_fit_dict = {}
bls = uvdata.get_antpairpols()
for bl in bls:
if (bl[0] == bl[1]):
d_wf = uvdata.get_data(bl)
w_wf = (~ uvdata.get_flags(bl))
auto_fit_dict[bl] = []
for (ds, fs) in zip(d_wf, w_wf):
nunflagged = np.count_nonzero(fs)
amat = tf.convert_to_tensor(dpss_components[fs])
dvec = tf.reshape(tf.convert_to_tensor(ds[fs].real), (nunflagged, 1))
model = (dpss_components @ tf.linalg.lstsq(amat, dvec).numpy().squeeze())
auto_fit_dict[bl].append(model)
auto_fit_dict[bl] = np.atleast_2d(np.asarray(auto_fit_dict[bl]))
for bl in bls:
smooth_weights = (1.0 / (auto_fit_dict[(bl[0], bl[0], bl[(- 1)])] * auto_fit_dict[(bl[1], bl[1], bl[(- 1)])]))
smooth_weights *= (~ uvdata.get_flags(bl))
dinds = data_weights.antpair2ind(*bl[:2])
polnum = np.where((data_weights.polarization_array == uvutils.polstr2num(bl[(- 1)], x_orientation=data_weights.x_orientation)))[0][0]
data_weights.weights_array[dinds, 0, :, polnum] = smooth_weights
return data_weights | -1,132,488,393,028,415,200 | inverse variance weights from interpolated autocorrelation data
Parameters
----------
uvdata: UVData object
UVData object containing autocorrelation data to use for computing inverse noise weights.
offset: float, optional
Fit autocorrelation to delay components with this width.
Returns
-------
data_weights: UVFlag object
UFlag in flag-mode where flags contain original data flags and weights contain autocorr weights. | calamity/calibration.py | get_auto_weights | aewallwi/calamity | python | def get_auto_weights(uvdata, delay_extent=25.0):
'\n inverse variance weights from interpolated autocorrelation data\n\n Parameters\n ----------\n uvdata: UVData object\n UVData object containing autocorrelation data to use for computing inverse noise weights.\n offset: float, optional\n Fit autocorrelation to delay components with this width.\n\n Returns\n -------\n data_weights: UVFlag object\n UFlag in flag-mode where flags contain original data flags and weights contain autocorr weights.\n '
dpss_components = modeling.yield_dpss_model_comps_bl_grp(0.0, uvdata.freq_array[0], offset=delay_extent)
data_weights = UVFlag(uvdata, mode='flag')
data_weights.weights_array = np.zeros(uvdata.data_array.shape)
auto_fit_dict = {}
bls = uvdata.get_antpairpols()
for bl in bls:
if (bl[0] == bl[1]):
d_wf = uvdata.get_data(bl)
w_wf = (~ uvdata.get_flags(bl))
auto_fit_dict[bl] = []
for (ds, fs) in zip(d_wf, w_wf):
nunflagged = np.count_nonzero(fs)
amat = tf.convert_to_tensor(dpss_components[fs])
dvec = tf.reshape(tf.convert_to_tensor(ds[fs].real), (nunflagged, 1))
model = (dpss_components @ tf.linalg.lstsq(amat, dvec).numpy().squeeze())
auto_fit_dict[bl].append(model)
auto_fit_dict[bl] = np.atleast_2d(np.asarray(auto_fit_dict[bl]))
for bl in bls:
smooth_weights = (1.0 / (auto_fit_dict[(bl[0], bl[0], bl[(- 1)])] * auto_fit_dict[(bl[1], bl[1], bl[(- 1)])]))
smooth_weights *= (~ uvdata.get_flags(bl))
dinds = data_weights.antpair2ind(*bl[:2])
polnum = np.where((data_weights.polarization_array == uvutils.polstr2num(bl[(- 1)], x_orientation=data_weights.x_orientation)))[0][0]
data_weights.weights_array[dinds, 0, :, polnum] = smooth_weights
return data_weights |
def calibrate_and_model_tensor(uvdata, fg_model_comps_dict, gains=None, freeze_model=False, optimizer='Adamax', tol=1e-14, maxsteps=10000, include_autos=False, verbose=False, sky_model=None, dtype=np.float32, use_min=False, use_redundancy=False, notebook_progressbar=False, correct_resid=False, correct_model=True, weights=None, nsamples_in_weights=True, graph_mode=False, grp_size_threshold=5, n_profile_steps=0, profile_log_dir='./logdir', model_regularization='sum', init_guesses_from_previous_time_step=False, skip_threshold=0.5, use_model_snr_weights=False, **opt_kwargs):
"Perform simultaneous calibration and foreground fitting using tensors.\n\n\n Parameters\n ----------\n uvdata: UVData object\n uvdata objet of data to be calibrated.\n fg_model_comps_dict: dictionary\n dictionary with keys that are tuples of tuples of 2-tuples (thats right, 3 levels)\n in the first level, each tuple represents a 'modeling group' visibilities in each\n modeling group are represented by a set of basis vectors that span all baselines in that\n group with elements raveled by baseline and then frequency. Each tuple in the modeling group is a\n 'redundant group' representing visibilities that we will represent with identical component coefficients\n each element of each 'redundant group' is a 2-tuple antenna pair. Our formalism easily accomodates modeling\n visibilities as redundant or non redundant (one simply needs to make each redundant group length 1).\n values are real numpy arrays with size (Ngrp * Nfreqs) * Ncomponents\n gains: UVCal object\n UVCal with initial gain estimates.\n There many smart ways to obtain initial gain estimates\n but this is beyond the scope of calamity (for example, firstcal, logcal, sky-based cal).\n Users can determine initial gains with their favorite established cal algorithm.\n default is None -> start with unity gains.\n WARNING: At the present, the flags in gains are not propagated/used! Make sure flags in uvdata object!\n freeze_model: bool, optional\n Only optimize loss function wrt gain variables. This is effectively traditional model-based calibration\n with sky_model as the model (but projected onto the foreground basis vectors).\n default is False.\n optimizer: string\n Name of optimizer. See OPTIMIZERS dictionary which contains optimizers described in\n https://www.tensorflow.org/api_docs/python/tf/keras/optimizers\n default is 'Adamax'\n tol: float, optional\n halting condition for optimizer loop. Stop loop when the change in the cost function falls\n below tol.\n default is 1e-14\n maxsteps: int, optional\n maximum number of opt.minimize calls before halting.\n default is 10000\n include_autos: bool, optional\n include autocorrelations in fitting.\n default is False.\n verbose: bool, optional\n generate lots of text.\n default is False.\n sky_model: UVData object, optional\n a sky-model to use for initial estimates of foreground coeffs and\n to set overall flux scale and phases.\n Note that this model is not used to obtain initial gain estimates.\n These must be provided through the gains argument.\n dtype: numpy dtype, optional\n the float precision to be used in tensorflow gradient descent.\n runtime scales roughly inversely linear with precision.\n default is np.float32\n use_min: bool, optional\n If True, use the set of parameters that determine minimum as the ML params\n If False, use the last set of parameters visited by the optimization loop.\n use_redundancy: bool, optional\n if true, solve for one set of foreground coeffs per redundant baseline group\n instead of per baseline.\n notebook_progressbar: bool, optional\n use progress bar optimized for notebook output.\n default is False.\n red_tol: float, optional\n tolerance for determining baselines redundant (meters)\n default is 1.0\n correct_resid: bool, optional\n if True, gain correct residual.\n default is False\n correct_model: bool, optional\n if True, gain correct model.\n default is False\n weights: UVFlag object, optional.\n UVFlag weights object containing weights to use for data fitting.\n default is None -> use nsamples * ~flags if nsamples_in_weights\n or ~flags if not nsamples_in_weights\n nsamples_in_weights: bool, optional\n If True and weights is None, generate weights proportional to nsamples.\n default is True.\n graph_mode: bool, optional\n if True, compile gradient update step in graph mode to speed up\n runtime by ~2-3x. I've found that this helps on CPUs but on GPUs\n it actually increases runtime by a similar factor.\n n_profile_steps: bool, optional\n number of steps to run profiling on\n default is 0.\n profile_log_dir: str, optional\n directory to save profile logs to\n default is './logdir'\n model_regularization: str, optional\n option to regularize model\n supported 'post_hoc', 'sum'\n default is 'post_hoc'\n which sets sum of amps equal and sum of phases equal.\n init_guesses_from_previous_time_step: bool, optional\n if True, then use foreground coeffs and gains from previous time-step to\n initialize gains for next time step.\n skip_threshold: float, optional\n if less then this fraction of data is unflagged on a particular poltime,\n flag the entire poltime.\n opt_kwargs: kwarg_dict\n kwargs for tf.optimizers\n\n Returns\n -------\n model: UVData object\n uvdata object containing model of the foregrounds\n resid: UVData object\n uvdata object containing resids which are the data minus\n the model with gains multiplied and then with the gains divided out.\n gains: UVCal object\n uvcal object containing estimates of the gain solutions. These solutions\n are not referenced to any sky model and are likely orders of\n fit_history:\n dictionary containing fit history with fields:\n 'loss_history': list of values of the loss function in each minimization iteration.\n "
antpairs_data = uvdata.get_antpairs()
if (not include_autos):
antpairs_data = set([ap for ap in antpairs_data if (ap[0] != ap[1])])
uvdata = uvdata.select(inplace=False, bls=[ap for ap in antpairs_data])
resid = copy.deepcopy(uvdata)
model = copy.deepcopy(uvdata)
model.data_array[:] = 0.0
model.flag_array[:] = False
red_grps = []
for fit_grp in fg_model_comps_dict.keys():
for red_grp in fit_grp:
red_grps.append(red_grp)
if (gains is None):
echo(f'''{datetime.datetime.now()} Gains are None. Initializing gains starting with unity...
''', verbose=verbose)
gains = cal_utils.blank_uvcal_from_uvdata(uvdata)
if ((sky_model is None) and (model_regularization is not None)):
echo(f'''{datetime.datetime.now()} Sky model is None. Initializing from data...
''', verbose=verbose)
sky_model = cal_utils.apply_gains(uvdata, gains)
else:
sky_model = sky_model.select(inplace=False, bls=[ap for ap in antpairs_data])
fit_history = {}
ants_map = {ant: i for (i, ant) in enumerate(gains.ant_array)}
(fg_model_comps, corr_inds) = tensorize_fg_model_comps_dict(fg_model_comps_dict=fg_model_comps_dict, ants_map=ants_map, dtype=dtype, nfreqs=sky_model.Nfreqs, verbose=verbose, notebook_progressbar=notebook_progressbar, use_redundancy=use_redundancy, grp_size_threshold=grp_size_threshold)
echo(f'''{datetime.datetime.now()}Finished Converting Foreground Modeling Components to Tensors...
''', verbose=verbose)
del fg_model_comps_dict
for (polnum, pol) in enumerate(uvdata.get_pols()):
echo(f'''{datetime.datetime.now()} Working on pol {pol}, {(polnum + 1)} of {uvdata.Npols}...
''', verbose=verbose)
fit_history_p = {}
first_time = True
for (time_index, time) in enumerate(np.unique(uvdata.time_array)):
echo(f'''{datetime.datetime.now()} Working on time {(time_index + 1)} of {uvdata.Ntimes}...
''', verbose=verbose)
bltsel = np.isclose(uvdata.time_array, time, atol=1e-07, rtol=0.0)
frac_unflagged = (np.count_nonzero((~ uvdata.flag_array[bltsel, 0, :, polnum])) / (uvdata.Nbls * uvdata.Nfreqs))
if (frac_unflagged >= skip_threshold):
rmsdata = np.sqrt(np.mean((np.abs(uvdata.data_array[bltsel, 0, :, polnum][(~ uvdata.flag_array[bltsel, 0, :, polnum])]) ** 2.0)))
echo(f'''{datetime.datetime.now()} Tensorizing data...
''', verbose=verbose)
(data_r, data_i, wgts) = tensorize_data(uvdata, corr_inds=corr_inds, ants_map=ants_map, polarization=pol, time=time, data_scale_factor=rmsdata, weights=weights, nsamples_in_weights=nsamples_in_weights, dtype=dtype)
if (sky_model is not None):
echo(f'''{datetime.datetime.now()} Tensorizing sky model...
''', verbose=verbose)
(sky_model_r, sky_model_i, _) = tensorize_data(sky_model, corr_inds=corr_inds, ants_map=ants_map, polarization=pol, time=time, data_scale_factor=rmsdata, weights=weights, dtype=dtype)
else:
(sky_model_r, sky_model_i) = (None, None)
if (first_time or (not init_guesses_from_previous_time_step)):
first_time = False
echo(f'''{datetime.datetime.now()} Tensorizing Gains...
''', verbose=verbose)
(g_r, g_i) = tensorize_gains(gains, dtype=dtype, time=time, polarization=pol)
echo(f'''{datetime.datetime.now()} Tensorizing Foreground coeffs...
''', verbose=verbose)
fg_r = tensorize_fg_coeffs(data=data_r, wgts=wgts, fg_model_comps=fg_model_comps, verbose=verbose, notebook_progressbar=notebook_progressbar)
fg_i = tensorize_fg_coeffs(data=data_i, wgts=wgts, fg_model_comps=fg_model_comps, verbose=verbose, notebook_progressbar=notebook_progressbar)
if use_model_snr_weights:
wgts_model = [fg_model(fgr, fgi, fgc) for (fgr, fgi, fgc) in zip(fg_r, fg_i, fg_model_comps)]
wgts = [((tf.square(wm[0]) + tf.square(wm[1])) * w) for (wm, w) in zip(wgts_model, wgts)]
del wgts_model
wgts_sum = np.sum([np.sum(w) for w in wgts])
wgts = [(w / wgts_sum) for w in wgts]
(g_r, g_i, fg_r, fg_i, fit_history_p[time_index]) = fit_gains_and_foregrounds(g_r=g_r, g_i=g_i, fg_r=fg_r, fg_i=fg_i, data_r=data_r, data_i=data_i, wgts=wgts, fg_comps=fg_model_comps, corr_inds=corr_inds, optimizer=optimizer, use_min=use_min, freeze_model=freeze_model, notebook_progressbar=notebook_progressbar, verbose=verbose, tol=tol, dtype=dtype, maxsteps=maxsteps, graph_mode=graph_mode, n_profile_steps=n_profile_steps, profile_log_dir=profile_log_dir, sky_model_r=sky_model_r, sky_model_i=sky_model_i, model_regularization=model_regularization, **opt_kwargs)
insert_model_into_uvdata_tensor(uvdata=model, time=time, polarization=pol, ants_map=ants_map, red_grps=red_grps, model_r=yield_fg_model_array(fg_model_comps=fg_model_comps, fg_coeffs=fg_r, corr_inds=corr_inds, nants=uvdata.Nants_data, nfreqs=uvdata.Nfreqs), model_i=yield_fg_model_array(fg_model_comps=fg_model_comps, fg_coeffs=fg_i, corr_inds=corr_inds, nants=uvdata.Nants_data, nfreqs=uvdata.Nfreqs), scale_factor=rmsdata)
insert_gains_into_uvcal(uvcal=gains, time=time, polarization=pol, gains_re=g_r, gains_im=g_i)
else:
echo(f'''{datetime.datetime.now()}: Only {(frac_unflagged * 100)}-percent of data unflagged. Skipping...
''', verbose=verbose)
flag_poltime(resid, time=time, polarization=pol)
flag_poltime(gains, time=time, polarization=pol)
flag_poltime(model, time=time, polarization=pol)
fit_history[polnum] = 'skipped!'
if ((not freeze_model) and (model_regularization == 'post_hoc') and np.any((~ model.flag_array[bltsel]))):
renormalize(uvdata_reference_model=sky_model, uvdata_deconv=model, gains=gains, polarization=pol, time=time, additional_flags=uvdata.flag_array)
fit_history[polnum] = fit_history_p
model_with_gains = cal_utils.apply_gains(model, gains, inverse=True)
if (not correct_model):
model = model_with_gains
resid.data_array -= model_with_gains.data_array
resid.data_array[model_with_gains.flag_array] = 0.0
resid.data_array[uvdata.flag_array] = 0.0
if correct_resid:
resid = cal_utils.apply_gains(resid, gains)
return (model, resid, gains, fit_history) | 6,425,277,343,294,833,000 | Perform simultaneous calibration and foreground fitting using tensors.
Parameters
----------
uvdata: UVData object
uvdata objet of data to be calibrated.
fg_model_comps_dict: dictionary
dictionary with keys that are tuples of tuples of 2-tuples (thats right, 3 levels)
in the first level, each tuple represents a 'modeling group' visibilities in each
modeling group are represented by a set of basis vectors that span all baselines in that
group with elements raveled by baseline and then frequency. Each tuple in the modeling group is a
'redundant group' representing visibilities that we will represent with identical component coefficients
each element of each 'redundant group' is a 2-tuple antenna pair. Our formalism easily accomodates modeling
visibilities as redundant or non redundant (one simply needs to make each redundant group length 1).
values are real numpy arrays with size (Ngrp * Nfreqs) * Ncomponents
gains: UVCal object
UVCal with initial gain estimates.
There many smart ways to obtain initial gain estimates
but this is beyond the scope of calamity (for example, firstcal, logcal, sky-based cal).
Users can determine initial gains with their favorite established cal algorithm.
default is None -> start with unity gains.
WARNING: At the present, the flags in gains are not propagated/used! Make sure flags in uvdata object!
freeze_model: bool, optional
Only optimize loss function wrt gain variables. This is effectively traditional model-based calibration
with sky_model as the model (but projected onto the foreground basis vectors).
default is False.
optimizer: string
Name of optimizer. See OPTIMIZERS dictionary which contains optimizers described in
https://www.tensorflow.org/api_docs/python/tf/keras/optimizers
default is 'Adamax'
tol: float, optional
halting condition for optimizer loop. Stop loop when the change in the cost function falls
below tol.
default is 1e-14
maxsteps: int, optional
maximum number of opt.minimize calls before halting.
default is 10000
include_autos: bool, optional
include autocorrelations in fitting.
default is False.
verbose: bool, optional
generate lots of text.
default is False.
sky_model: UVData object, optional
a sky-model to use for initial estimates of foreground coeffs and
to set overall flux scale and phases.
Note that this model is not used to obtain initial gain estimates.
These must be provided through the gains argument.
dtype: numpy dtype, optional
the float precision to be used in tensorflow gradient descent.
runtime scales roughly inversely linear with precision.
default is np.float32
use_min: bool, optional
If True, use the set of parameters that determine minimum as the ML params
If False, use the last set of parameters visited by the optimization loop.
use_redundancy: bool, optional
if true, solve for one set of foreground coeffs per redundant baseline group
instead of per baseline.
notebook_progressbar: bool, optional
use progress bar optimized for notebook output.
default is False.
red_tol: float, optional
tolerance for determining baselines redundant (meters)
default is 1.0
correct_resid: bool, optional
if True, gain correct residual.
default is False
correct_model: bool, optional
if True, gain correct model.
default is False
weights: UVFlag object, optional.
UVFlag weights object containing weights to use for data fitting.
default is None -> use nsamples * ~flags if nsamples_in_weights
or ~flags if not nsamples_in_weights
nsamples_in_weights: bool, optional
If True and weights is None, generate weights proportional to nsamples.
default is True.
graph_mode: bool, optional
if True, compile gradient update step in graph mode to speed up
runtime by ~2-3x. I've found that this helps on CPUs but on GPUs
it actually increases runtime by a similar factor.
n_profile_steps: bool, optional
number of steps to run profiling on
default is 0.
profile_log_dir: str, optional
directory to save profile logs to
default is './logdir'
model_regularization: str, optional
option to regularize model
supported 'post_hoc', 'sum'
default is 'post_hoc'
which sets sum of amps equal and sum of phases equal.
init_guesses_from_previous_time_step: bool, optional
if True, then use foreground coeffs and gains from previous time-step to
initialize gains for next time step.
skip_threshold: float, optional
if less then this fraction of data is unflagged on a particular poltime,
flag the entire poltime.
opt_kwargs: kwarg_dict
kwargs for tf.optimizers
Returns
-------
model: UVData object
uvdata object containing model of the foregrounds
resid: UVData object
uvdata object containing resids which are the data minus
the model with gains multiplied and then with the gains divided out.
gains: UVCal object
uvcal object containing estimates of the gain solutions. These solutions
are not referenced to any sky model and are likely orders of
fit_history:
dictionary containing fit history with fields:
'loss_history': list of values of the loss function in each minimization iteration. | calamity/calibration.py | calibrate_and_model_tensor | aewallwi/calamity | python | def calibrate_and_model_tensor(uvdata, fg_model_comps_dict, gains=None, freeze_model=False, optimizer='Adamax', tol=1e-14, maxsteps=10000, include_autos=False, verbose=False, sky_model=None, dtype=np.float32, use_min=False, use_redundancy=False, notebook_progressbar=False, correct_resid=False, correct_model=True, weights=None, nsamples_in_weights=True, graph_mode=False, grp_size_threshold=5, n_profile_steps=0, profile_log_dir='./logdir', model_regularization='sum', init_guesses_from_previous_time_step=False, skip_threshold=0.5, use_model_snr_weights=False, **opt_kwargs):
"Perform simultaneous calibration and foreground fitting using tensors.\n\n\n Parameters\n ----------\n uvdata: UVData object\n uvdata objet of data to be calibrated.\n fg_model_comps_dict: dictionary\n dictionary with keys that are tuples of tuples of 2-tuples (thats right, 3 levels)\n in the first level, each tuple represents a 'modeling group' visibilities in each\n modeling group are represented by a set of basis vectors that span all baselines in that\n group with elements raveled by baseline and then frequency. Each tuple in the modeling group is a\n 'redundant group' representing visibilities that we will represent with identical component coefficients\n each element of each 'redundant group' is a 2-tuple antenna pair. Our formalism easily accomodates modeling\n visibilities as redundant or non redundant (one simply needs to make each redundant group length 1).\n values are real numpy arrays with size (Ngrp * Nfreqs) * Ncomponents\n gains: UVCal object\n UVCal with initial gain estimates.\n There many smart ways to obtain initial gain estimates\n but this is beyond the scope of calamity (for example, firstcal, logcal, sky-based cal).\n Users can determine initial gains with their favorite established cal algorithm.\n default is None -> start with unity gains.\n WARNING: At the present, the flags in gains are not propagated/used! Make sure flags in uvdata object!\n freeze_model: bool, optional\n Only optimize loss function wrt gain variables. This is effectively traditional model-based calibration\n with sky_model as the model (but projected onto the foreground basis vectors).\n default is False.\n optimizer: string\n Name of optimizer. See OPTIMIZERS dictionary which contains optimizers described in\n https://www.tensorflow.org/api_docs/python/tf/keras/optimizers\n default is 'Adamax'\n tol: float, optional\n halting condition for optimizer loop. Stop loop when the change in the cost function falls\n below tol.\n default is 1e-14\n maxsteps: int, optional\n maximum number of opt.minimize calls before halting.\n default is 10000\n include_autos: bool, optional\n include autocorrelations in fitting.\n default is False.\n verbose: bool, optional\n generate lots of text.\n default is False.\n sky_model: UVData object, optional\n a sky-model to use for initial estimates of foreground coeffs and\n to set overall flux scale and phases.\n Note that this model is not used to obtain initial gain estimates.\n These must be provided through the gains argument.\n dtype: numpy dtype, optional\n the float precision to be used in tensorflow gradient descent.\n runtime scales roughly inversely linear with precision.\n default is np.float32\n use_min: bool, optional\n If True, use the set of parameters that determine minimum as the ML params\n If False, use the last set of parameters visited by the optimization loop.\n use_redundancy: bool, optional\n if true, solve for one set of foreground coeffs per redundant baseline group\n instead of per baseline.\n notebook_progressbar: bool, optional\n use progress bar optimized for notebook output.\n default is False.\n red_tol: float, optional\n tolerance for determining baselines redundant (meters)\n default is 1.0\n correct_resid: bool, optional\n if True, gain correct residual.\n default is False\n correct_model: bool, optional\n if True, gain correct model.\n default is False\n weights: UVFlag object, optional.\n UVFlag weights object containing weights to use for data fitting.\n default is None -> use nsamples * ~flags if nsamples_in_weights\n or ~flags if not nsamples_in_weights\n nsamples_in_weights: bool, optional\n If True and weights is None, generate weights proportional to nsamples.\n default is True.\n graph_mode: bool, optional\n if True, compile gradient update step in graph mode to speed up\n runtime by ~2-3x. I've found that this helps on CPUs but on GPUs\n it actually increases runtime by a similar factor.\n n_profile_steps: bool, optional\n number of steps to run profiling on\n default is 0.\n profile_log_dir: str, optional\n directory to save profile logs to\n default is './logdir'\n model_regularization: str, optional\n option to regularize model\n supported 'post_hoc', 'sum'\n default is 'post_hoc'\n which sets sum of amps equal and sum of phases equal.\n init_guesses_from_previous_time_step: bool, optional\n if True, then use foreground coeffs and gains from previous time-step to\n initialize gains for next time step.\n skip_threshold: float, optional\n if less then this fraction of data is unflagged on a particular poltime,\n flag the entire poltime.\n opt_kwargs: kwarg_dict\n kwargs for tf.optimizers\n\n Returns\n -------\n model: UVData object\n uvdata object containing model of the foregrounds\n resid: UVData object\n uvdata object containing resids which are the data minus\n the model with gains multiplied and then with the gains divided out.\n gains: UVCal object\n uvcal object containing estimates of the gain solutions. These solutions\n are not referenced to any sky model and are likely orders of\n fit_history:\n dictionary containing fit history with fields:\n 'loss_history': list of values of the loss function in each minimization iteration.\n "
antpairs_data = uvdata.get_antpairs()
if (not include_autos):
antpairs_data = set([ap for ap in antpairs_data if (ap[0] != ap[1])])
uvdata = uvdata.select(inplace=False, bls=[ap for ap in antpairs_data])
resid = copy.deepcopy(uvdata)
model = copy.deepcopy(uvdata)
model.data_array[:] = 0.0
model.flag_array[:] = False
red_grps = []
for fit_grp in fg_model_comps_dict.keys():
for red_grp in fit_grp:
red_grps.append(red_grp)
if (gains is None):
echo(f'{datetime.datetime.now()} Gains are None. Initializing gains starting with unity...
', verbose=verbose)
gains = cal_utils.blank_uvcal_from_uvdata(uvdata)
if ((sky_model is None) and (model_regularization is not None)):
echo(f'{datetime.datetime.now()} Sky model is None. Initializing from data...
', verbose=verbose)
sky_model = cal_utils.apply_gains(uvdata, gains)
else:
sky_model = sky_model.select(inplace=False, bls=[ap for ap in antpairs_data])
fit_history = {}
ants_map = {ant: i for (i, ant) in enumerate(gains.ant_array)}
(fg_model_comps, corr_inds) = tensorize_fg_model_comps_dict(fg_model_comps_dict=fg_model_comps_dict, ants_map=ants_map, dtype=dtype, nfreqs=sky_model.Nfreqs, verbose=verbose, notebook_progressbar=notebook_progressbar, use_redundancy=use_redundancy, grp_size_threshold=grp_size_threshold)
echo(f'{datetime.datetime.now()}Finished Converting Foreground Modeling Components to Tensors...
', verbose=verbose)
del fg_model_comps_dict
for (polnum, pol) in enumerate(uvdata.get_pols()):
echo(f'{datetime.datetime.now()} Working on pol {pol}, {(polnum + 1)} of {uvdata.Npols}...
', verbose=verbose)
fit_history_p = {}
first_time = True
for (time_index, time) in enumerate(np.unique(uvdata.time_array)):
echo(f'{datetime.datetime.now()} Working on time {(time_index + 1)} of {uvdata.Ntimes}...
', verbose=verbose)
bltsel = np.isclose(uvdata.time_array, time, atol=1e-07, rtol=0.0)
frac_unflagged = (np.count_nonzero((~ uvdata.flag_array[bltsel, 0, :, polnum])) / (uvdata.Nbls * uvdata.Nfreqs))
if (frac_unflagged >= skip_threshold):
rmsdata = np.sqrt(np.mean((np.abs(uvdata.data_array[bltsel, 0, :, polnum][(~ uvdata.flag_array[bltsel, 0, :, polnum])]) ** 2.0)))
echo(f'{datetime.datetime.now()} Tensorizing data...
', verbose=verbose)
(data_r, data_i, wgts) = tensorize_data(uvdata, corr_inds=corr_inds, ants_map=ants_map, polarization=pol, time=time, data_scale_factor=rmsdata, weights=weights, nsamples_in_weights=nsamples_in_weights, dtype=dtype)
if (sky_model is not None):
echo(f'{datetime.datetime.now()} Tensorizing sky model...
', verbose=verbose)
(sky_model_r, sky_model_i, _) = tensorize_data(sky_model, corr_inds=corr_inds, ants_map=ants_map, polarization=pol, time=time, data_scale_factor=rmsdata, weights=weights, dtype=dtype)
else:
(sky_model_r, sky_model_i) = (None, None)
if (first_time or (not init_guesses_from_previous_time_step)):
first_time = False
echo(f'{datetime.datetime.now()} Tensorizing Gains...
', verbose=verbose)
(g_r, g_i) = tensorize_gains(gains, dtype=dtype, time=time, polarization=pol)
echo(f'{datetime.datetime.now()} Tensorizing Foreground coeffs...
', verbose=verbose)
fg_r = tensorize_fg_coeffs(data=data_r, wgts=wgts, fg_model_comps=fg_model_comps, verbose=verbose, notebook_progressbar=notebook_progressbar)
fg_i = tensorize_fg_coeffs(data=data_i, wgts=wgts, fg_model_comps=fg_model_comps, verbose=verbose, notebook_progressbar=notebook_progressbar)
if use_model_snr_weights:
wgts_model = [fg_model(fgr, fgi, fgc) for (fgr, fgi, fgc) in zip(fg_r, fg_i, fg_model_comps)]
wgts = [((tf.square(wm[0]) + tf.square(wm[1])) * w) for (wm, w) in zip(wgts_model, wgts)]
del wgts_model
wgts_sum = np.sum([np.sum(w) for w in wgts])
wgts = [(w / wgts_sum) for w in wgts]
(g_r, g_i, fg_r, fg_i, fit_history_p[time_index]) = fit_gains_and_foregrounds(g_r=g_r, g_i=g_i, fg_r=fg_r, fg_i=fg_i, data_r=data_r, data_i=data_i, wgts=wgts, fg_comps=fg_model_comps, corr_inds=corr_inds, optimizer=optimizer, use_min=use_min, freeze_model=freeze_model, notebook_progressbar=notebook_progressbar, verbose=verbose, tol=tol, dtype=dtype, maxsteps=maxsteps, graph_mode=graph_mode, n_profile_steps=n_profile_steps, profile_log_dir=profile_log_dir, sky_model_r=sky_model_r, sky_model_i=sky_model_i, model_regularization=model_regularization, **opt_kwargs)
insert_model_into_uvdata_tensor(uvdata=model, time=time, polarization=pol, ants_map=ants_map, red_grps=red_grps, model_r=yield_fg_model_array(fg_model_comps=fg_model_comps, fg_coeffs=fg_r, corr_inds=corr_inds, nants=uvdata.Nants_data, nfreqs=uvdata.Nfreqs), model_i=yield_fg_model_array(fg_model_comps=fg_model_comps, fg_coeffs=fg_i, corr_inds=corr_inds, nants=uvdata.Nants_data, nfreqs=uvdata.Nfreqs), scale_factor=rmsdata)
insert_gains_into_uvcal(uvcal=gains, time=time, polarization=pol, gains_re=g_r, gains_im=g_i)
else:
echo(f'{datetime.datetime.now()}: Only {(frac_unflagged * 100)}-percent of data unflagged. Skipping...
', verbose=verbose)
flag_poltime(resid, time=time, polarization=pol)
flag_poltime(gains, time=time, polarization=pol)
flag_poltime(model, time=time, polarization=pol)
fit_history[polnum] = 'skipped!'
if ((not freeze_model) and (model_regularization == 'post_hoc') and np.any((~ model.flag_array[bltsel]))):
renormalize(uvdata_reference_model=sky_model, uvdata_deconv=model, gains=gains, polarization=pol, time=time, additional_flags=uvdata.flag_array)
fit_history[polnum] = fit_history_p
model_with_gains = cal_utils.apply_gains(model, gains, inverse=True)
if (not correct_model):
model = model_with_gains
resid.data_array -= model_with_gains.data_array
resid.data_array[model_with_gains.flag_array] = 0.0
resid.data_array[uvdata.flag_array] = 0.0
if correct_resid:
resid = cal_utils.apply_gains(resid, gains)
return (model, resid, gains, fit_history) |
def calibrate_and_model_mixed(uvdata, horizon=1.0, min_dly=0.0, offset=0.0, ant_dly=0.0, include_autos=False, verbose=False, red_tol=1.0, red_tol_freq=0.5, n_angle_bins=200, notebook_progressbar=False, use_redundancy=False, use_tensorflow_to_derive_modeling_comps=False, eigenval_cutoff=1e-10, dtype_matinv=np.float64, require_exact_angle_match=True, angle_match_tol=0.001, grp_size_threshold=5, model_comps_dict=None, save_dict_to=None, **fitting_kwargs):
"Simultaneously solve for gains and model foregrounds with a mix of DPSS vectors\n for baselines with no frequency redundancy and simple_cov components for\n groups of baselines that have some frequency redundancy.\n\n\n Parameters\n ----------\n uvdata: UVData object.\n dataset to calibrate and filter.\n horizon: float, optional\n fraction of baseline delay length to model with dpss modes\n unitless.\n default is 1.\n min_dly: float, optional\n minimum delay to model with dpss models.\n in units of ns.\n default is 0.\n offset: float optional\n offset off of horizon wedge to include in dpss delay range.\n in units of ns.\n default is 0.\n ant_dly: float, optional\n intrinsic chromaticity of each antenna element\n in units of ns.\n default is 0.\n include_autos: bool, optional\n if true, include autocorrelations in fitting.\n default is False.\n verbose: bool, optional\n lots of text output\n default is False.\n red_tol: float, optional\n tolerance for treating baselines as redundant (meters)\n default is 1.0\n red_tol_freq: float, optional\n tolerance for treating two baselines as having some\n frequency redundancy. When frequency redundancy exists, baselines\n will be modeled jointly.\n n_angle_bins: int, optional\n number of angular bins to use between -pi and pi to compare baselines\n default is 200\n notebook_progressbar: bool, optional\n if True, show graphical notebook progress bar that looks good in jupyter.\n default is False.\n use_redundancy: bool, optional\n If True, model all baselines within each redundant group with the same components\n If False, model each baseline within each redundant group with sepearate components.\n default is False.\n use_tensorflow_to_derive_modeling_comps: bool, optional\n Use tensorflow methods to derive multi-baseline modeling components.\n recommended if you have a GPU with enough memory to perform spectral decomposition\n of multi-baseline covariance matrices.\n eigenval_cutoff: float, optional\n threshold of eigenvectors to include in modeling components.\n dtype_matinv: numpy.dtype, optional\n data type to use for deriving modeling components.\n default is np.float64 (need higher precision for cov-mat like calculation)\n grp_size_threshold: int, optional\n groups with number of elements less then this value are split up into single baselines.\n default is 5.\n model_comps_dict: dict, optional\n dictionary mapping fitting groups to numpy.ndarray see modeling.yield_mixed_comps\n for more specifics.\n default is None -> compute fitting groups automatically.\n save_dict_to: str, optional\n save model_comps_dict to hdf5 container if True\n default is False.\n fitting_kwargs: kwarg dict\n additional kwargs for calibrate_and_model_tensor.\n see docstring of calibrate_and_model_tensor.\n\n Returns\n -------\n model: UVData object\n uvdata object containing DPSS model of intrinsic foregrounds.\n resid: UVData object\n uvdata object containing residuals after subtracting model times gains and applying gains.\n gains: UVCal object\n uvcal object containing fitted gains.\n fit_history:\n dictionary containing fit history for each time-step and polarization in the data with fields:\n 'loss_history': list of values of the loss function in each minimization iteration.\n "
(fitting_grps, blvecs, _, _) = modeling.get_uv_overlapping_grps_conjugated(uvdata, red_tol=red_tol, include_autos=include_autos, red_tol_freq=red_tol_freq, n_angle_bins=n_angle_bins, notebook_progressbar=notebook_progressbar, require_exact_angle_match=require_exact_angle_match, angle_match_tol=angle_match_tol)
if (model_comps_dict is None):
model_comps_dict = modeling.yield_mixed_comps(fitting_grps, blvecs, uvdata.freq_array[0], eigenval_cutoff=eigenval_cutoff, use_tensorflow=use_tensorflow_to_derive_modeling_comps, ant_dly=ant_dly, horizon=horizon, offset=offset, min_dly=min_dly, verbose=verbose, dtype=dtype_matinv, notebook_progressbar=notebook_progressbar, grp_size_threshold=grp_size_threshold)
if (save_dict_to is not None):
np.save(save_dict_to, model_comps_dict)
(model, resid, gains, fitted_info) = calibrate_and_model_tensor(uvdata=uvdata, fg_model_comps_dict=model_comps_dict, include_autos=include_autos, verbose=verbose, notebook_progressbar=notebook_progressbar, use_redundancy=use_redundancy, **fitting_kwargs)
return (model, resid, gains, fitted_info) | 2,490,183,869,147,370,000 | Simultaneously solve for gains and model foregrounds with a mix of DPSS vectors
for baselines with no frequency redundancy and simple_cov components for
groups of baselines that have some frequency redundancy.
Parameters
----------
uvdata: UVData object.
dataset to calibrate and filter.
horizon: float, optional
fraction of baseline delay length to model with dpss modes
unitless.
default is 1.
min_dly: float, optional
minimum delay to model with dpss models.
in units of ns.
default is 0.
offset: float optional
offset off of horizon wedge to include in dpss delay range.
in units of ns.
default is 0.
ant_dly: float, optional
intrinsic chromaticity of each antenna element
in units of ns.
default is 0.
include_autos: bool, optional
if true, include autocorrelations in fitting.
default is False.
verbose: bool, optional
lots of text output
default is False.
red_tol: float, optional
tolerance for treating baselines as redundant (meters)
default is 1.0
red_tol_freq: float, optional
tolerance for treating two baselines as having some
frequency redundancy. When frequency redundancy exists, baselines
will be modeled jointly.
n_angle_bins: int, optional
number of angular bins to use between -pi and pi to compare baselines
default is 200
notebook_progressbar: bool, optional
if True, show graphical notebook progress bar that looks good in jupyter.
default is False.
use_redundancy: bool, optional
If True, model all baselines within each redundant group with the same components
If False, model each baseline within each redundant group with sepearate components.
default is False.
use_tensorflow_to_derive_modeling_comps: bool, optional
Use tensorflow methods to derive multi-baseline modeling components.
recommended if you have a GPU with enough memory to perform spectral decomposition
of multi-baseline covariance matrices.
eigenval_cutoff: float, optional
threshold of eigenvectors to include in modeling components.
dtype_matinv: numpy.dtype, optional
data type to use for deriving modeling components.
default is np.float64 (need higher precision for cov-mat like calculation)
grp_size_threshold: int, optional
groups with number of elements less then this value are split up into single baselines.
default is 5.
model_comps_dict: dict, optional
dictionary mapping fitting groups to numpy.ndarray see modeling.yield_mixed_comps
for more specifics.
default is None -> compute fitting groups automatically.
save_dict_to: str, optional
save model_comps_dict to hdf5 container if True
default is False.
fitting_kwargs: kwarg dict
additional kwargs for calibrate_and_model_tensor.
see docstring of calibrate_and_model_tensor.
Returns
-------
model: UVData object
uvdata object containing DPSS model of intrinsic foregrounds.
resid: UVData object
uvdata object containing residuals after subtracting model times gains and applying gains.
gains: UVCal object
uvcal object containing fitted gains.
fit_history:
dictionary containing fit history for each time-step and polarization in the data with fields:
'loss_history': list of values of the loss function in each minimization iteration. | calamity/calibration.py | calibrate_and_model_mixed | aewallwi/calamity | python | def calibrate_and_model_mixed(uvdata, horizon=1.0, min_dly=0.0, offset=0.0, ant_dly=0.0, include_autos=False, verbose=False, red_tol=1.0, red_tol_freq=0.5, n_angle_bins=200, notebook_progressbar=False, use_redundancy=False, use_tensorflow_to_derive_modeling_comps=False, eigenval_cutoff=1e-10, dtype_matinv=np.float64, require_exact_angle_match=True, angle_match_tol=0.001, grp_size_threshold=5, model_comps_dict=None, save_dict_to=None, **fitting_kwargs):
"Simultaneously solve for gains and model foregrounds with a mix of DPSS vectors\n for baselines with no frequency redundancy and simple_cov components for\n groups of baselines that have some frequency redundancy.\n\n\n Parameters\n ----------\n uvdata: UVData object.\n dataset to calibrate and filter.\n horizon: float, optional\n fraction of baseline delay length to model with dpss modes\n unitless.\n default is 1.\n min_dly: float, optional\n minimum delay to model with dpss models.\n in units of ns.\n default is 0.\n offset: float optional\n offset off of horizon wedge to include in dpss delay range.\n in units of ns.\n default is 0.\n ant_dly: float, optional\n intrinsic chromaticity of each antenna element\n in units of ns.\n default is 0.\n include_autos: bool, optional\n if true, include autocorrelations in fitting.\n default is False.\n verbose: bool, optional\n lots of text output\n default is False.\n red_tol: float, optional\n tolerance for treating baselines as redundant (meters)\n default is 1.0\n red_tol_freq: float, optional\n tolerance for treating two baselines as having some\n frequency redundancy. When frequency redundancy exists, baselines\n will be modeled jointly.\n n_angle_bins: int, optional\n number of angular bins to use between -pi and pi to compare baselines\n default is 200\n notebook_progressbar: bool, optional\n if True, show graphical notebook progress bar that looks good in jupyter.\n default is False.\n use_redundancy: bool, optional\n If True, model all baselines within each redundant group with the same components\n If False, model each baseline within each redundant group with sepearate components.\n default is False.\n use_tensorflow_to_derive_modeling_comps: bool, optional\n Use tensorflow methods to derive multi-baseline modeling components.\n recommended if you have a GPU with enough memory to perform spectral decomposition\n of multi-baseline covariance matrices.\n eigenval_cutoff: float, optional\n threshold of eigenvectors to include in modeling components.\n dtype_matinv: numpy.dtype, optional\n data type to use for deriving modeling components.\n default is np.float64 (need higher precision for cov-mat like calculation)\n grp_size_threshold: int, optional\n groups with number of elements less then this value are split up into single baselines.\n default is 5.\n model_comps_dict: dict, optional\n dictionary mapping fitting groups to numpy.ndarray see modeling.yield_mixed_comps\n for more specifics.\n default is None -> compute fitting groups automatically.\n save_dict_to: str, optional\n save model_comps_dict to hdf5 container if True\n default is False.\n fitting_kwargs: kwarg dict\n additional kwargs for calibrate_and_model_tensor.\n see docstring of calibrate_and_model_tensor.\n\n Returns\n -------\n model: UVData object\n uvdata object containing DPSS model of intrinsic foregrounds.\n resid: UVData object\n uvdata object containing residuals after subtracting model times gains and applying gains.\n gains: UVCal object\n uvcal object containing fitted gains.\n fit_history:\n dictionary containing fit history for each time-step and polarization in the data with fields:\n 'loss_history': list of values of the loss function in each minimization iteration.\n "
(fitting_grps, blvecs, _, _) = modeling.get_uv_overlapping_grps_conjugated(uvdata, red_tol=red_tol, include_autos=include_autos, red_tol_freq=red_tol_freq, n_angle_bins=n_angle_bins, notebook_progressbar=notebook_progressbar, require_exact_angle_match=require_exact_angle_match, angle_match_tol=angle_match_tol)
if (model_comps_dict is None):
model_comps_dict = modeling.yield_mixed_comps(fitting_grps, blvecs, uvdata.freq_array[0], eigenval_cutoff=eigenval_cutoff, use_tensorflow=use_tensorflow_to_derive_modeling_comps, ant_dly=ant_dly, horizon=horizon, offset=offset, min_dly=min_dly, verbose=verbose, dtype=dtype_matinv, notebook_progressbar=notebook_progressbar, grp_size_threshold=grp_size_threshold)
if (save_dict_to is not None):
np.save(save_dict_to, model_comps_dict)
(model, resid, gains, fitted_info) = calibrate_and_model_tensor(uvdata=uvdata, fg_model_comps_dict=model_comps_dict, include_autos=include_autos, verbose=verbose, notebook_progressbar=notebook_progressbar, use_redundancy=use_redundancy, **fitting_kwargs)
return (model, resid, gains, fitted_info) |
def calibrate_and_model_dpss(uvdata, horizon=1.0, min_dly=0.0, offset=0.0, include_autos=False, verbose=False, red_tol=1.0, notebook_progressbar=False, fg_model_comps_dict=None, **fitting_kwargs):
"Simultaneously solve for gains and model foregrounds with DPSS vectors.\n\n Parameters\n ----------\n uvdata: UVData object.\n dataset to calibrate and filter.\n horizon: float, optional\n fraction of baseline delay length to model with dpss modes\n unitless.\n default is 1.\n min_dly: float, optional\n minimum delay to model with dpss models.\n in units of ns.\n default is 0.\n offset: float optional\n offset off of horizon wedge to include in dpss delay range.\n in units of ns.\n default is 0.\n include_autos: bool, optional\n if true, include autocorrelations in fitting.\n default is False.\n verbose: bool, optional\n lots of text output\n default is False.\n red_tol: float, optional\n tolerance for treating baselines as redundant (meters)\n default is 1.0\n notebook_progressbar: bool, optional\n use progress bar optimized for notebook output.\n default is False.\n fg_model_comps_dict: dict, optional\n dictionary containing precomputed foreground model components.\n Currently only supported if use_redundancy is False.\n fitting_kwargs: kwarg dict\n additional kwargs for calibrate_and_model_pbl.\n see docstring of calibrate_and_model_pbl.\n\n Returns\n -------\n model: UVData object\n uvdata object containing DPSS model of intrinsic foregrounds.\n resid: UVData object\n uvdata object containing residuals after subtracting model times gains and applying gains.\n gains: UVCal object\n uvcal object containing fitted gains.\n fit_history:\n dictionary containing fit history for each time-step and polarization in the data with fields:\n 'loss_history': list of values of the loss function in each minimization iteration.\n "
dpss_model_comps_dict = modeling.yield_pbl_dpss_model_comps(uvdata, horizon=horizon, min_dly=min_dly, offset=offset, include_autos=include_autos, red_tol=red_tol, notebook_progressbar=notebook_progressbar, verbose=verbose)
(model, resid, gains, fitted_info) = calibrate_and_model_tensor(uvdata=uvdata, fg_model_comps_dict=dpss_model_comps_dict, include_autos=include_autos, verbose=verbose, notebook_progressbar=notebook_progressbar, **fitting_kwargs)
return (model, resid, gains, fitted_info) | -4,381,372,808,634,719,000 | Simultaneously solve for gains and model foregrounds with DPSS vectors.
Parameters
----------
uvdata: UVData object.
dataset to calibrate and filter.
horizon: float, optional
fraction of baseline delay length to model with dpss modes
unitless.
default is 1.
min_dly: float, optional
minimum delay to model with dpss models.
in units of ns.
default is 0.
offset: float optional
offset off of horizon wedge to include in dpss delay range.
in units of ns.
default is 0.
include_autos: bool, optional
if true, include autocorrelations in fitting.
default is False.
verbose: bool, optional
lots of text output
default is False.
red_tol: float, optional
tolerance for treating baselines as redundant (meters)
default is 1.0
notebook_progressbar: bool, optional
use progress bar optimized for notebook output.
default is False.
fg_model_comps_dict: dict, optional
dictionary containing precomputed foreground model components.
Currently only supported if use_redundancy is False.
fitting_kwargs: kwarg dict
additional kwargs for calibrate_and_model_pbl.
see docstring of calibrate_and_model_pbl.
Returns
-------
model: UVData object
uvdata object containing DPSS model of intrinsic foregrounds.
resid: UVData object
uvdata object containing residuals after subtracting model times gains and applying gains.
gains: UVCal object
uvcal object containing fitted gains.
fit_history:
dictionary containing fit history for each time-step and polarization in the data with fields:
'loss_history': list of values of the loss function in each minimization iteration. | calamity/calibration.py | calibrate_and_model_dpss | aewallwi/calamity | python | def calibrate_and_model_dpss(uvdata, horizon=1.0, min_dly=0.0, offset=0.0, include_autos=False, verbose=False, red_tol=1.0, notebook_progressbar=False, fg_model_comps_dict=None, **fitting_kwargs):
"Simultaneously solve for gains and model foregrounds with DPSS vectors.\n\n Parameters\n ----------\n uvdata: UVData object.\n dataset to calibrate and filter.\n horizon: float, optional\n fraction of baseline delay length to model with dpss modes\n unitless.\n default is 1.\n min_dly: float, optional\n minimum delay to model with dpss models.\n in units of ns.\n default is 0.\n offset: float optional\n offset off of horizon wedge to include in dpss delay range.\n in units of ns.\n default is 0.\n include_autos: bool, optional\n if true, include autocorrelations in fitting.\n default is False.\n verbose: bool, optional\n lots of text output\n default is False.\n red_tol: float, optional\n tolerance for treating baselines as redundant (meters)\n default is 1.0\n notebook_progressbar: bool, optional\n use progress bar optimized for notebook output.\n default is False.\n fg_model_comps_dict: dict, optional\n dictionary containing precomputed foreground model components.\n Currently only supported if use_redundancy is False.\n fitting_kwargs: kwarg dict\n additional kwargs for calibrate_and_model_pbl.\n see docstring of calibrate_and_model_pbl.\n\n Returns\n -------\n model: UVData object\n uvdata object containing DPSS model of intrinsic foregrounds.\n resid: UVData object\n uvdata object containing residuals after subtracting model times gains and applying gains.\n gains: UVCal object\n uvcal object containing fitted gains.\n fit_history:\n dictionary containing fit history for each time-step and polarization in the data with fields:\n 'loss_history': list of values of the loss function in each minimization iteration.\n "
dpss_model_comps_dict = modeling.yield_pbl_dpss_model_comps(uvdata, horizon=horizon, min_dly=min_dly, offset=offset, include_autos=include_autos, red_tol=red_tol, notebook_progressbar=notebook_progressbar, verbose=verbose)
(model, resid, gains, fitted_info) = calibrate_and_model_tensor(uvdata=uvdata, fg_model_comps_dict=dpss_model_comps_dict, include_autos=include_autos, verbose=verbose, notebook_progressbar=notebook_progressbar, **fitting_kwargs)
return (model, resid, gains, fitted_info) |
def read_calibrate_and_model_dpss(input_data_files, input_model_files=None, input_gain_files=None, resid_outfilename=None, gain_outfilename=None, model_outfilename=None, fitted_info_outfilename=None, x_orientation='east', clobber=False, bllen_min=0.0, bllen_max=np.inf, bl_ew_min=0.0, ex_ants=None, select_ants=None, gpu_index=None, gpu_memory_limit=None, precision=32, use_autocorrs_in_weights=False, **calibration_kwargs):
"\n Driver function for using calamity with DPSS modeling.\n\n Parameters\n ----------\n input_data_files: list of strings or UVData object.\n list of paths to input files to read in and calibrate.\n input_model_files: list of strings or UVData object, optional\n list of paths to model files for overal phase/amp reference.\n Default is None -> use input files as model for overall\n phase and amplitude calibration.\n input_gain_files: list of strings or UVCal object, optional\n list of paths to gain files to use as initial guesses for calibration.\n resid_outfilename: str, optional\n path for file to write residuals.\n default is None -> don't write out residuals.\n gain_outfilename: str, optional\n path to gain calfits to write fitted gains.\n default is None -> don't write out gains.\n model_outfilename, str, optional\n path to file to write model output.\n default is None -> Don't write model.\n fitting_info_outfilename, str, optional\n string to pickel fitting info to.\n n_output_chunks: int optional\n split up outputs into n_output_chunks chunked by time.\n default is None -> write single output file.\n bllen_min: float, optional\n select all baselines with length greater then this value [meters].\n default is 0.0\n bllen_max: float, optional\n select only baselines with length less then this value [meters].\n default is np.inf.\n bl_ew_min: float, optional\n select all baselines with EW projected length greater then this value [meters].\n default is 0.0\n gpu_index: int, optional\n limit visible GPUs to be the index of this GPU.\n default: None -> all GPUs are visible.\n gpu_memory_limit: float, optional\n GiB of memory on GPU that can be used.\n default None -> all memory available.\n use_autocorrs_in_weights: bool, optional\n if True, use smooth fits to autocorrelations as\n inverse variance weights.\n default is False.\n calibration_kwargs: kwarg dict\n see kwrags for calibration_and_model_dpss()\n Returns\n -------\n\n model_fit: UVData object\n uvdata object containing DPSS model of intrinsic foregrounds.\n resid_fit: UVData object\n uvdata object containing residuals after subtracting model times gains and applying gains.\n gains_fit: UVCal object\n uvcal object containing fitted gains.\n fit_info:\n dictionary containing fit history for each time-step and polarization in the data with fields:\n 'loss_history': list of values of the loss function in each minimization iteration.\n "
gpus = tf.config.list_physical_devices('GPU')
if (gpu_index is not None):
if gpus:
if (gpu_memory_limit is None):
tf.config.set_visible_devices(gpus[gpu_index], 'GPU')
else:
tf.config.set_logical_device_configuration(gpus[gpu_index], [tf.config.LogicalDeviceConfiguration(memory_limit=(gpu_memory_limit * 1024))])
logical_gpus = tf.config.list_logical_devices('GPU')
print(len(gpus), 'Physical GPUs,', len(logical_gpus), 'Logical GPU')
if isinstance(input_data_files, str):
input_data_files = [input_data_files]
if isinstance(input_data_files, list):
uvd = UVData()
uvd.read(input_data_files)
else:
uvd = input_data_files
if use_autocorrs_in_weights:
weights = get_auto_weights(uvd)
else:
weights = None
utils.select_baselines(uvd, bllen_min=bllen_min, bllen_max=bllen_max, bl_ew_min=bl_ew_min, ex_ants=ex_ants, select_ants=select_ants)
if isinstance(input_model_files, str):
input_model_files = [input_model_files]
if (input_model_files is not None):
if isinstance(input_model_files, list):
uvd_model = UVData()
uvd_model.read(input_model_files)
else:
uvd_model = input_model_files
else:
uvd_model = None
if (uvd_model is not None):
utils.select_baselines(uvd, bllen_min=bllen_min, bllen_max=bllen_max, bl_ew_min=bl_ew_min)
if isinstance(input_gain_files, str):
input_gain_files = [input_gain_files]
if (input_gain_files is not None):
if isinstance(input_gain_files, list):
uvc = UVCal()
uvc.read_calfits(input_gain_files)
else:
uvc = input_gain_files
else:
uvc = None
dtype = {32: np.float32, 64: np.float64}[precision]
if ((gpu_index is not None) and gpus):
with tf.device(f'/device:GPU:{gpus[gpu_index].name[(- 1)]}'):
(model_fit, resid_fit, gains_fit, fit_info) = calibrate_and_model_dpss(uvdata=uvd, sky_model=uvd_model, gains=uvc, dtype=dtype, weights=weights, **calibration_kwargs)
else:
(model_fit, resid_fit, gains_fit, fit_info) = calibrate_and_model_dpss(uvdata=uvd, sky_model=uvd_model, gains=uvc, dtype=dtype, weights=weights, **calibration_kwargs)
if (resid_outfilename is not None):
resid_fit.write_uvh5(resid_outfilename, clobber=clobber)
if (gain_outfilename is not None):
gains_fit.x_orientation = x_orientation
gains_fit.write_calfits(gain_outfilename, clobber=clobber)
if (model_outfilename is not None):
model_fit.write_uvh5(model_outfilename, clobber=clobber)
fit_info['calibration_kwargs'] = calibration_kwargs
fit_info['calibration_kwargs']['dtype'] = dtype
return (model_fit, resid_fit, gains_fit, fit_info) | -2,364,376,502,582,659,600 | Driver function for using calamity with DPSS modeling.
Parameters
----------
input_data_files: list of strings or UVData object.
list of paths to input files to read in and calibrate.
input_model_files: list of strings or UVData object, optional
list of paths to model files for overal phase/amp reference.
Default is None -> use input files as model for overall
phase and amplitude calibration.
input_gain_files: list of strings or UVCal object, optional
list of paths to gain files to use as initial guesses for calibration.
resid_outfilename: str, optional
path for file to write residuals.
default is None -> don't write out residuals.
gain_outfilename: str, optional
path to gain calfits to write fitted gains.
default is None -> don't write out gains.
model_outfilename, str, optional
path to file to write model output.
default is None -> Don't write model.
fitting_info_outfilename, str, optional
string to pickel fitting info to.
n_output_chunks: int optional
split up outputs into n_output_chunks chunked by time.
default is None -> write single output file.
bllen_min: float, optional
select all baselines with length greater then this value [meters].
default is 0.0
bllen_max: float, optional
select only baselines with length less then this value [meters].
default is np.inf.
bl_ew_min: float, optional
select all baselines with EW projected length greater then this value [meters].
default is 0.0
gpu_index: int, optional
limit visible GPUs to be the index of this GPU.
default: None -> all GPUs are visible.
gpu_memory_limit: float, optional
GiB of memory on GPU that can be used.
default None -> all memory available.
use_autocorrs_in_weights: bool, optional
if True, use smooth fits to autocorrelations as
inverse variance weights.
default is False.
calibration_kwargs: kwarg dict
see kwrags for calibration_and_model_dpss()
Returns
-------
model_fit: UVData object
uvdata object containing DPSS model of intrinsic foregrounds.
resid_fit: UVData object
uvdata object containing residuals after subtracting model times gains and applying gains.
gains_fit: UVCal object
uvcal object containing fitted gains.
fit_info:
dictionary containing fit history for each time-step and polarization in the data with fields:
'loss_history': list of values of the loss function in each minimization iteration. | calamity/calibration.py | read_calibrate_and_model_dpss | aewallwi/calamity | python | def read_calibrate_and_model_dpss(input_data_files, input_model_files=None, input_gain_files=None, resid_outfilename=None, gain_outfilename=None, model_outfilename=None, fitted_info_outfilename=None, x_orientation='east', clobber=False, bllen_min=0.0, bllen_max=np.inf, bl_ew_min=0.0, ex_ants=None, select_ants=None, gpu_index=None, gpu_memory_limit=None, precision=32, use_autocorrs_in_weights=False, **calibration_kwargs):
"\n Driver function for using calamity with DPSS modeling.\n\n Parameters\n ----------\n input_data_files: list of strings or UVData object.\n list of paths to input files to read in and calibrate.\n input_model_files: list of strings or UVData object, optional\n list of paths to model files for overal phase/amp reference.\n Default is None -> use input files as model for overall\n phase and amplitude calibration.\n input_gain_files: list of strings or UVCal object, optional\n list of paths to gain files to use as initial guesses for calibration.\n resid_outfilename: str, optional\n path for file to write residuals.\n default is None -> don't write out residuals.\n gain_outfilename: str, optional\n path to gain calfits to write fitted gains.\n default is None -> don't write out gains.\n model_outfilename, str, optional\n path to file to write model output.\n default is None -> Don't write model.\n fitting_info_outfilename, str, optional\n string to pickel fitting info to.\n n_output_chunks: int optional\n split up outputs into n_output_chunks chunked by time.\n default is None -> write single output file.\n bllen_min: float, optional\n select all baselines with length greater then this value [meters].\n default is 0.0\n bllen_max: float, optional\n select only baselines with length less then this value [meters].\n default is np.inf.\n bl_ew_min: float, optional\n select all baselines with EW projected length greater then this value [meters].\n default is 0.0\n gpu_index: int, optional\n limit visible GPUs to be the index of this GPU.\n default: None -> all GPUs are visible.\n gpu_memory_limit: float, optional\n GiB of memory on GPU that can be used.\n default None -> all memory available.\n use_autocorrs_in_weights: bool, optional\n if True, use smooth fits to autocorrelations as\n inverse variance weights.\n default is False.\n calibration_kwargs: kwarg dict\n see kwrags for calibration_and_model_dpss()\n Returns\n -------\n\n model_fit: UVData object\n uvdata object containing DPSS model of intrinsic foregrounds.\n resid_fit: UVData object\n uvdata object containing residuals after subtracting model times gains and applying gains.\n gains_fit: UVCal object\n uvcal object containing fitted gains.\n fit_info:\n dictionary containing fit history for each time-step and polarization in the data with fields:\n 'loss_history': list of values of the loss function in each minimization iteration.\n "
gpus = tf.config.list_physical_devices('GPU')
if (gpu_index is not None):
if gpus:
if (gpu_memory_limit is None):
tf.config.set_visible_devices(gpus[gpu_index], 'GPU')
else:
tf.config.set_logical_device_configuration(gpus[gpu_index], [tf.config.LogicalDeviceConfiguration(memory_limit=(gpu_memory_limit * 1024))])
logical_gpus = tf.config.list_logical_devices('GPU')
print(len(gpus), 'Physical GPUs,', len(logical_gpus), 'Logical GPU')
if isinstance(input_data_files, str):
input_data_files = [input_data_files]
if isinstance(input_data_files, list):
uvd = UVData()
uvd.read(input_data_files)
else:
uvd = input_data_files
if use_autocorrs_in_weights:
weights = get_auto_weights(uvd)
else:
weights = None
utils.select_baselines(uvd, bllen_min=bllen_min, bllen_max=bllen_max, bl_ew_min=bl_ew_min, ex_ants=ex_ants, select_ants=select_ants)
if isinstance(input_model_files, str):
input_model_files = [input_model_files]
if (input_model_files is not None):
if isinstance(input_model_files, list):
uvd_model = UVData()
uvd_model.read(input_model_files)
else:
uvd_model = input_model_files
else:
uvd_model = None
if (uvd_model is not None):
utils.select_baselines(uvd, bllen_min=bllen_min, bllen_max=bllen_max, bl_ew_min=bl_ew_min)
if isinstance(input_gain_files, str):
input_gain_files = [input_gain_files]
if (input_gain_files is not None):
if isinstance(input_gain_files, list):
uvc = UVCal()
uvc.read_calfits(input_gain_files)
else:
uvc = input_gain_files
else:
uvc = None
dtype = {32: np.float32, 64: np.float64}[precision]
if ((gpu_index is not None) and gpus):
with tf.device(f'/device:GPU:{gpus[gpu_index].name[(- 1)]}'):
(model_fit, resid_fit, gains_fit, fit_info) = calibrate_and_model_dpss(uvdata=uvd, sky_model=uvd_model, gains=uvc, dtype=dtype, weights=weights, **calibration_kwargs)
else:
(model_fit, resid_fit, gains_fit, fit_info) = calibrate_and_model_dpss(uvdata=uvd, sky_model=uvd_model, gains=uvc, dtype=dtype, weights=weights, **calibration_kwargs)
if (resid_outfilename is not None):
resid_fit.write_uvh5(resid_outfilename, clobber=clobber)
if (gain_outfilename is not None):
gains_fit.x_orientation = x_orientation
gains_fit.write_calfits(gain_outfilename, clobber=clobber)
if (model_outfilename is not None):
model_fit.write_uvh5(model_outfilename, clobber=clobber)
fit_info['calibration_kwargs'] = calibration_kwargs
fit_info['calibration_kwargs']['dtype'] = dtype
return (model_fit, resid_fit, gains_fit, fit_info) |
def render_formset(formset, **kwargs):
'Render a formset to a Bootstrap layout.'
renderer_cls = get_formset_renderer(**kwargs)
return renderer_cls(formset, **kwargs).render() | 3,676,325,694,860,855,300 | Render a formset to a Bootstrap layout. | src/bootstrap4/forms.py | render_formset | Natureshadow/django-bootstrap4 | python | def render_formset(formset, **kwargs):
renderer_cls = get_formset_renderer(**kwargs)
return renderer_cls(formset, **kwargs).render() |
def render_formset_errors(formset, **kwargs):
'Render formset errors to a Bootstrap layout.'
renderer_cls = get_formset_renderer(**kwargs)
return renderer_cls(formset, **kwargs).render_errors() | -6,894,594,435,518,397,000 | Render formset errors to a Bootstrap layout. | src/bootstrap4/forms.py | render_formset_errors | Natureshadow/django-bootstrap4 | python | def render_formset_errors(formset, **kwargs):
renderer_cls = get_formset_renderer(**kwargs)
return renderer_cls(formset, **kwargs).render_errors() |
def render_form(form, **kwargs):
'Render a form to a Bootstrap layout.'
renderer_cls = get_form_renderer(**kwargs)
return renderer_cls(form, **kwargs).render() | -2,290,790,819,255,574,500 | Render a form to a Bootstrap layout. | src/bootstrap4/forms.py | render_form | Natureshadow/django-bootstrap4 | python | def render_form(form, **kwargs):
renderer_cls = get_form_renderer(**kwargs)
return renderer_cls(form, **kwargs).render() |
def render_form_errors(form, type='all', **kwargs):
'Render form errors to a Bootstrap layout.'
renderer_cls = get_form_renderer(**kwargs)
return renderer_cls(form, **kwargs).render_errors(type) | -6,262,209,671,540,802,000 | Render form errors to a Bootstrap layout. | src/bootstrap4/forms.py | render_form_errors | Natureshadow/django-bootstrap4 | python | def render_form_errors(form, type='all', **kwargs):
renderer_cls = get_form_renderer(**kwargs)
return renderer_cls(form, **kwargs).render_errors(type) |
def render_field(field, **kwargs):
'Render a field to a Bootstrap layout.'
renderer_cls = get_field_renderer(**kwargs)
return renderer_cls(field, **kwargs).render() | 212,413,380,482,624,060 | Render a field to a Bootstrap layout. | src/bootstrap4/forms.py | render_field | Natureshadow/django-bootstrap4 | python | def render_field(field, **kwargs):
renderer_cls = get_field_renderer(**kwargs)
return renderer_cls(field, **kwargs).render() |
def render_label(content, label_for=None, label_class=None, label_title=''):
'Render a label with content.'
attrs = {}
if label_for:
attrs['for'] = label_for
if label_class:
attrs['class'] = label_class
if label_title:
attrs['title'] = label_title
return render_tag('label', attrs=attrs, content=content) | 4,835,622,836,655,177,000 | Render a label with content. | src/bootstrap4/forms.py | render_label | Natureshadow/django-bootstrap4 | python | def render_label(content, label_for=None, label_class=None, label_title=):
attrs = {}
if label_for:
attrs['for'] = label_for
if label_class:
attrs['class'] = label_class
if label_title:
attrs['title'] = label_title
return render_tag('label', attrs=attrs, content=content) |
def render_button(content, button_type=None, button_class='btn-primary', size='', href='', name=None, value=None, title=None, extra_classes='', id=''):
'Render a button with content.'
attrs = {}
classes = add_css_class('btn', button_class)
size = text_value(size).lower().strip()
if (size == 'xs'):
classes = add_css_class(classes, 'btn-xs')
elif ((size == 'sm') or (size == 'small')):
classes = add_css_class(classes, 'btn-sm')
elif ((size == 'lg') or (size == 'large')):
classes = add_css_class(classes, 'btn-lg')
elif ((size == 'md') or (size == 'medium')):
pass
elif size:
raise BootstrapError(f'Parameter "size" should be "xs", "sm", "lg" or empty ("{size}" given).')
if button_type:
if (button_type not in ('submit', 'reset', 'button', 'link')):
raise BootstrapError(f'Parameter "button_type" should be "submit", "reset", "button", "link" or empty ("{button_type}" given).')
if (button_type != 'link'):
attrs['type'] = button_type
classes = add_css_class(classes, extra_classes)
attrs['class'] = classes
if href:
tag = 'a'
if (button_type and (button_type != 'link')):
raise BootstrapError(f'Button of type "{button_type}" is not allowed a "href" parameter.')
attrs['href'] = href
attrs.setdefault('role', 'button')
else:
tag = 'button'
if id:
attrs['id'] = id
if name:
attrs['name'] = name
if value:
attrs['value'] = value
if title:
attrs['title'] = title
return render_tag(tag, attrs=attrs, content=mark_safe(content)) | 5,598,860,407,885,194,000 | Render a button with content. | src/bootstrap4/forms.py | render_button | Natureshadow/django-bootstrap4 | python | def render_button(content, button_type=None, button_class='btn-primary', size=, href=, name=None, value=None, title=None, extra_classes=, id=):
attrs = {}
classes = add_css_class('btn', button_class)
size = text_value(size).lower().strip()
if (size == 'xs'):
classes = add_css_class(classes, 'btn-xs')
elif ((size == 'sm') or (size == 'small')):
classes = add_css_class(classes, 'btn-sm')
elif ((size == 'lg') or (size == 'large')):
classes = add_css_class(classes, 'btn-lg')
elif ((size == 'md') or (size == 'medium')):
pass
elif size:
raise BootstrapError(f'Parameter "size" should be "xs", "sm", "lg" or empty ("{size}" given).')
if button_type:
if (button_type not in ('submit', 'reset', 'button', 'link')):
raise BootstrapError(f'Parameter "button_type" should be "submit", "reset", "button", "link" or empty ("{button_type}" given).')
if (button_type != 'link'):
attrs['type'] = button_type
classes = add_css_class(classes, extra_classes)
attrs['class'] = classes
if href:
tag = 'a'
if (button_type and (button_type != 'link')):
raise BootstrapError(f'Button of type "{button_type}" is not allowed a "href" parameter.')
attrs['href'] = href
attrs.setdefault('role', 'button')
else:
tag = 'button'
if id:
attrs['id'] = id
if name:
attrs['name'] = name
if value:
attrs['value'] = value
if title:
attrs['title'] = title
return render_tag(tag, attrs=attrs, content=mark_safe(content)) |
def render_field_and_label(field, label, field_class='', label_for=None, label_class='', layout='', **kwargs):
'Render a field with its label.'
if (layout == 'horizontal'):
if (not label_class):
label_class = get_bootstrap_setting('horizontal_label_class')
if (not field_class):
field_class = get_bootstrap_setting('horizontal_field_class')
if (not label):
label = mark_safe(' ')
label_class = add_css_class(label_class, 'control-label')
html = field
if field_class:
html = f'<div class="{field_class}">{html}</div>'
if label:
html = (render_label(label, label_for=label_for, label_class=label_class) + html)
return html | 2,039,437,234,522,795,500 | Render a field with its label. | src/bootstrap4/forms.py | render_field_and_label | Natureshadow/django-bootstrap4 | python | def render_field_and_label(field, label, field_class=, label_for=None, label_class=, layout=, **kwargs):
if (layout == 'horizontal'):
if (not label_class):
label_class = get_bootstrap_setting('horizontal_label_class')
if (not field_class):
field_class = get_bootstrap_setting('horizontal_field_class')
if (not label):
label = mark_safe(' ')
label_class = add_css_class(label_class, 'control-label')
html = field
if field_class:
html = f'<div class="{field_class}">{html}</div>'
if label:
html = (render_label(label, label_for=label_for, label_class=label_class) + html)
return html |
def render_form_group(content, css_class=FORM_GROUP_CLASS):
'Render a Bootstrap form group.'
return f'<div class="{css_class}">{content}</div>' | -311,337,625,377,371,400 | Render a Bootstrap form group. | src/bootstrap4/forms.py | render_form_group | Natureshadow/django-bootstrap4 | python | def render_form_group(content, css_class=FORM_GROUP_CLASS):
return f'<div class="{css_class}">{content}</div>' |
def is_widget_with_placeholder(widget):
'\n Return whether this widget should have a placeholder.\n\n Only text, text area, number, e-mail, url, password, number and derived inputs have placeholders.\n '
return isinstance(widget, (TextInput, Textarea, NumberInput, EmailInput, URLInput, PasswordInput)) | 119,742,989,659,189,860 | Return whether this widget should have a placeholder.
Only text, text area, number, e-mail, url, password, number and derived inputs have placeholders. | src/bootstrap4/forms.py | is_widget_with_placeholder | Natureshadow/django-bootstrap4 | python | def is_widget_with_placeholder(widget):
'\n Return whether this widget should have a placeholder.\n\n Only text, text area, number, e-mail, url, password, number and derived inputs have placeholders.\n '
return isinstance(widget, (TextInput, Textarea, NumberInput, EmailInput, URLInput, PasswordInput)) |
def _init_features(self):
'Set up the repository of available Data ONTAP features.'
self.features = na_utils.Features() | 3,182,935,898,800,352,000 | Set up the repository of available Data ONTAP features. | cinder/volume/drivers/netapp/dataontap/client/client_base.py | _init_features | sapcc/cinder | python | def _init_features(self):
self.features = na_utils.Features() |
def get_ontap_version(self, cached=True):
'Gets the ONTAP version.'
if cached:
return self.connection.get_ontap_version()
ontap_version = netapp_api.NaElement('system-get-version')
result = self.connection.invoke_successfully(ontap_version, True)
version_tuple = (result.get_child_by_name('version-tuple') or netapp_api.NaElement('none'))
system_version_tuple = (version_tuple.get_child_by_name('system-version-tuple') or netapp_api.NaElement('none'))
generation = system_version_tuple.get_child_content('generation')
major = system_version_tuple.get_child_content('major')
return ('%(generation)s.%(major)s' % {'generation': generation, 'major': major}) | -6,697,713,649,390,994,000 | Gets the ONTAP version. | cinder/volume/drivers/netapp/dataontap/client/client_base.py | get_ontap_version | sapcc/cinder | python | def get_ontap_version(self, cached=True):
if cached:
return self.connection.get_ontap_version()
ontap_version = netapp_api.NaElement('system-get-version')
result = self.connection.invoke_successfully(ontap_version, True)
version_tuple = (result.get_child_by_name('version-tuple') or netapp_api.NaElement('none'))
system_version_tuple = (version_tuple.get_child_by_name('system-version-tuple') or netapp_api.NaElement('none'))
generation = system_version_tuple.get_child_content('generation')
major = system_version_tuple.get_child_content('major')
return ('%(generation)s.%(major)s' % {'generation': generation, 'major': major}) |
def get_ontapi_version(self, cached=True):
'Gets the supported ontapi version.'
if cached:
return self.connection.get_api_version()
ontapi_version = netapp_api.NaElement('system-get-ontapi-version')
res = self.connection.invoke_successfully(ontapi_version, False)
major = res.get_child_content('major-version')
minor = res.get_child_content('minor-version')
return (major, minor) | 7,650,952,949,425,096,000 | Gets the supported ontapi version. | cinder/volume/drivers/netapp/dataontap/client/client_base.py | get_ontapi_version | sapcc/cinder | python | def get_ontapi_version(self, cached=True):
if cached:
return self.connection.get_api_version()
ontapi_version = netapp_api.NaElement('system-get-ontapi-version')
res = self.connection.invoke_successfully(ontapi_version, False)
major = res.get_child_content('major-version')
minor = res.get_child_content('minor-version')
return (major, minor) |
def check_is_naelement(self, elem):
'Checks if object is instance of NaElement.'
if (not isinstance(elem, netapp_api.NaElement)):
raise ValueError('Expects NaElement') | 2,203,974,980,253,227,500 | Checks if object is instance of NaElement. | cinder/volume/drivers/netapp/dataontap/client/client_base.py | check_is_naelement | sapcc/cinder | python | def check_is_naelement(self, elem):
if (not isinstance(elem, netapp_api.NaElement)):
raise ValueError('Expects NaElement') |
def create_lun(self, volume_name, lun_name, size, metadata, qos_policy_group_name=None):
'Issues API request for creating LUN on volume.'
path = ('/vol/%s/%s' % (volume_name, lun_name))
space_reservation = metadata['SpaceReserved']
initial_size = size
ontap_version = self.get_ontap_version()
if (ontap_version < '9.5'):
initial_size = MAX_SIZE_FOR_A_LUN
space_reservation = 'false'
params = {'path': path, 'size': str(initial_size), 'ostype': metadata['OsType'], 'space-reservation-enabled': space_reservation}
version = self.get_ontapi_version()
if (version >= (1, 110)):
params['use-exact-size'] = 'true'
lun_create = netapp_api.NaElement.create_node_with_children('lun-create-by-size', **params)
if qos_policy_group_name:
lun_create.add_new_child('qos-policy-group', qos_policy_group_name)
try:
self.connection.invoke_successfully(lun_create, True)
except netapp_api.NaApiError as ex:
with excutils.save_and_reraise_exception():
LOG.error('Error provisioning volume %(lun_name)s on %(volume_name)s. Details: %(ex)s', {'lun_name': lun_name, 'volume_name': volume_name, 'ex': ex})
if (ontap_version < '9.5'):
self.do_direct_resize(path, six.text_type(size))
if (metadata['SpaceReserved'] == 'true'):
self.set_lun_space_reservation(path, True) | -900,611,365,462,998,100 | Issues API request for creating LUN on volume. | cinder/volume/drivers/netapp/dataontap/client/client_base.py | create_lun | sapcc/cinder | python | def create_lun(self, volume_name, lun_name, size, metadata, qos_policy_group_name=None):
path = ('/vol/%s/%s' % (volume_name, lun_name))
space_reservation = metadata['SpaceReserved']
initial_size = size
ontap_version = self.get_ontap_version()
if (ontap_version < '9.5'):
initial_size = MAX_SIZE_FOR_A_LUN
space_reservation = 'false'
params = {'path': path, 'size': str(initial_size), 'ostype': metadata['OsType'], 'space-reservation-enabled': space_reservation}
version = self.get_ontapi_version()
if (version >= (1, 110)):
params['use-exact-size'] = 'true'
lun_create = netapp_api.NaElement.create_node_with_children('lun-create-by-size', **params)
if qos_policy_group_name:
lun_create.add_new_child('qos-policy-group', qos_policy_group_name)
try:
self.connection.invoke_successfully(lun_create, True)
except netapp_api.NaApiError as ex:
with excutils.save_and_reraise_exception():
LOG.error('Error provisioning volume %(lun_name)s on %(volume_name)s. Details: %(ex)s', {'lun_name': lun_name, 'volume_name': volume_name, 'ex': ex})
if (ontap_version < '9.5'):
self.do_direct_resize(path, six.text_type(size))
if (metadata['SpaceReserved'] == 'true'):
self.set_lun_space_reservation(path, True) |
def set_lun_space_reservation(self, path, flag):
'Sets the LUN space reservation on ONTAP.'
lun_modify_space_reservation = netapp_api.NaElement.create_node_with_children('lun-set-space-reservation-info', **{'path': path, 'enable': str(flag)})
self.connection.invoke_successfully(lun_modify_space_reservation, True) | 3,347,695,314,018,310,700 | Sets the LUN space reservation on ONTAP. | cinder/volume/drivers/netapp/dataontap/client/client_base.py | set_lun_space_reservation | sapcc/cinder | python | def set_lun_space_reservation(self, path, flag):
lun_modify_space_reservation = netapp_api.NaElement.create_node_with_children('lun-set-space-reservation-info', **{'path': path, 'enable': str(flag)})
self.connection.invoke_successfully(lun_modify_space_reservation, True) |
def destroy_lun(self, path, force=True):
'Destroys the LUN at the path.'
lun_destroy = netapp_api.NaElement.create_node_with_children('lun-destroy', **{'path': path})
if force:
lun_destroy.add_new_child('force', 'true')
self.connection.invoke_successfully(lun_destroy, True)
seg = path.split('/')
LOG.debug('Destroyed LUN %s', seg[(- 1)]) | 7,368,047,698,348,012,000 | Destroys the LUN at the path. | cinder/volume/drivers/netapp/dataontap/client/client_base.py | destroy_lun | sapcc/cinder | python | def destroy_lun(self, path, force=True):
lun_destroy = netapp_api.NaElement.create_node_with_children('lun-destroy', **{'path': path})
if force:
lun_destroy.add_new_child('force', 'true')
self.connection.invoke_successfully(lun_destroy, True)
seg = path.split('/')
LOG.debug('Destroyed LUN %s', seg[(- 1)]) |
def map_lun(self, path, igroup_name, lun_id=None):
'Maps LUN to the initiator and returns LUN id assigned.'
lun_map = netapp_api.NaElement.create_node_with_children('lun-map', **{'path': path, 'initiator-group': igroup_name})
if lun_id:
lun_map.add_new_child('lun-id', lun_id)
try:
result = self.connection.invoke_successfully(lun_map, True)
return result.get_child_content('lun-id-assigned')
except netapp_api.NaApiError as e:
code = e.code
message = e.message
LOG.warning('Error mapping LUN. Code :%(code)s, Message: %(message)s', {'code': code, 'message': message})
raise | 5,147,786,705,441,598,000 | Maps LUN to the initiator and returns LUN id assigned. | cinder/volume/drivers/netapp/dataontap/client/client_base.py | map_lun | sapcc/cinder | python | def map_lun(self, path, igroup_name, lun_id=None):
lun_map = netapp_api.NaElement.create_node_with_children('lun-map', **{'path': path, 'initiator-group': igroup_name})
if lun_id:
lun_map.add_new_child('lun-id', lun_id)
try:
result = self.connection.invoke_successfully(lun_map, True)
return result.get_child_content('lun-id-assigned')
except netapp_api.NaApiError as e:
code = e.code
message = e.message
LOG.warning('Error mapping LUN. Code :%(code)s, Message: %(message)s', {'code': code, 'message': message})
raise |
def unmap_lun(self, path, igroup_name):
'Unmaps a LUN from given initiator.'
lun_unmap = netapp_api.NaElement.create_node_with_children('lun-unmap', **{'path': path, 'initiator-group': igroup_name})
try:
self.connection.invoke_successfully(lun_unmap, True)
except netapp_api.NaApiError as e:
exc_info = sys.exc_info()
LOG.warning('Error unmapping LUN. Code :%(code)s, Message: %(message)s', {'code': e.code, 'message': e.message})
if ((e.code == '13115') or (e.code == '9016')):
pass
else:
six.reraise(*exc_info) | 682,668,600,462,337,700 | Unmaps a LUN from given initiator. | cinder/volume/drivers/netapp/dataontap/client/client_base.py | unmap_lun | sapcc/cinder | python | def unmap_lun(self, path, igroup_name):
lun_unmap = netapp_api.NaElement.create_node_with_children('lun-unmap', **{'path': path, 'initiator-group': igroup_name})
try:
self.connection.invoke_successfully(lun_unmap, True)
except netapp_api.NaApiError as e:
exc_info = sys.exc_info()
LOG.warning('Error unmapping LUN. Code :%(code)s, Message: %(message)s', {'code': e.code, 'message': e.message})
if ((e.code == '13115') or (e.code == '9016')):
pass
else:
six.reraise(*exc_info) |
def create_igroup(self, igroup, igroup_type='iscsi', os_type='default'):
'Creates igroup with specified args.'
igroup_create = netapp_api.NaElement.create_node_with_children('igroup-create', **{'initiator-group-name': igroup, 'initiator-group-type': igroup_type, 'os-type': os_type})
self.connection.invoke_successfully(igroup_create, True) | 4,476,615,876,935,663,000 | Creates igroup with specified args. | cinder/volume/drivers/netapp/dataontap/client/client_base.py | create_igroup | sapcc/cinder | python | def create_igroup(self, igroup, igroup_type='iscsi', os_type='default'):
igroup_create = netapp_api.NaElement.create_node_with_children('igroup-create', **{'initiator-group-name': igroup, 'initiator-group-type': igroup_type, 'os-type': os_type})
self.connection.invoke_successfully(igroup_create, True) |
def add_igroup_initiator(self, igroup, initiator):
'Adds initiators to the specified igroup.'
igroup_add = netapp_api.NaElement.create_node_with_children('igroup-add', **{'initiator-group-name': igroup, 'initiator': initiator})
self.connection.invoke_successfully(igroup_add, True) | -6,145,192,281,599,506,000 | Adds initiators to the specified igroup. | cinder/volume/drivers/netapp/dataontap/client/client_base.py | add_igroup_initiator | sapcc/cinder | python | def add_igroup_initiator(self, igroup, initiator):
igroup_add = netapp_api.NaElement.create_node_with_children('igroup-add', **{'initiator-group-name': igroup, 'initiator': initiator})
self.connection.invoke_successfully(igroup_add, True) |
def do_direct_resize(self, path, new_size_bytes, force=True):
'Resize the LUN.'
seg = path.split('/')
LOG.info('Resizing LUN %s directly to new size.', seg[(- 1)])
lun_resize = netapp_api.NaElement.create_node_with_children('lun-resize', **{'path': path, 'size': new_size_bytes})
if force:
lun_resize.add_new_child('force', 'true')
self.connection.invoke_successfully(lun_resize, True) | 6,683,164,055,784,541,000 | Resize the LUN. | cinder/volume/drivers/netapp/dataontap/client/client_base.py | do_direct_resize | sapcc/cinder | python | def do_direct_resize(self, path, new_size_bytes, force=True):
seg = path.split('/')
LOG.info('Resizing LUN %s directly to new size.', seg[(- 1)])
lun_resize = netapp_api.NaElement.create_node_with_children('lun-resize', **{'path': path, 'size': new_size_bytes})
if force:
lun_resize.add_new_child('force', 'true')
self.connection.invoke_successfully(lun_resize, True) |
def get_lun_geometry(self, path):
'Gets the LUN geometry.'
geometry = {}
lun_geo = netapp_api.NaElement('lun-get-geometry')
lun_geo.add_new_child('path', path)
try:
result = self.connection.invoke_successfully(lun_geo, True)
geometry['size'] = result.get_child_content('size')
geometry['bytes_per_sector'] = result.get_child_content('bytes-per-sector')
geometry['sectors_per_track'] = result.get_child_content('sectors-per-track')
geometry['tracks_per_cylinder'] = result.get_child_content('tracks-per-cylinder')
geometry['cylinders'] = result.get_child_content('cylinders')
geometry['max_resize'] = result.get_child_content('max-resize-size')
except Exception as e:
LOG.error('LUN %(path)s geometry failed. Message - %(msg)s', {'path': path, 'msg': six.text_type(e)})
return geometry | -6,749,439,803,077,511,000 | Gets the LUN geometry. | cinder/volume/drivers/netapp/dataontap/client/client_base.py | get_lun_geometry | sapcc/cinder | python | def get_lun_geometry(self, path):
geometry = {}
lun_geo = netapp_api.NaElement('lun-get-geometry')
lun_geo.add_new_child('path', path)
try:
result = self.connection.invoke_successfully(lun_geo, True)
geometry['size'] = result.get_child_content('size')
geometry['bytes_per_sector'] = result.get_child_content('bytes-per-sector')
geometry['sectors_per_track'] = result.get_child_content('sectors-per-track')
geometry['tracks_per_cylinder'] = result.get_child_content('tracks-per-cylinder')
geometry['cylinders'] = result.get_child_content('cylinders')
geometry['max_resize'] = result.get_child_content('max-resize-size')
except Exception as e:
LOG.error('LUN %(path)s geometry failed. Message - %(msg)s', {'path': path, 'msg': six.text_type(e)})
return geometry |
def get_volume_options(self, volume_name):
'Get the value for the volume option.'
opts = []
vol_option_list = netapp_api.NaElement('volume-options-list-info')
vol_option_list.add_new_child('volume', volume_name)
result = self.connection.invoke_successfully(vol_option_list, True)
options = result.get_child_by_name('options')
if options:
opts = options.get_children()
return opts | -4,522,512,593,235,644,400 | Get the value for the volume option. | cinder/volume/drivers/netapp/dataontap/client/client_base.py | get_volume_options | sapcc/cinder | python | def get_volume_options(self, volume_name):
opts = []
vol_option_list = netapp_api.NaElement('volume-options-list-info')
vol_option_list.add_new_child('volume', volume_name)
result = self.connection.invoke_successfully(vol_option_list, True)
options = result.get_child_by_name('options')
if options:
opts = options.get_children()
return opts |
def move_lun(self, path, new_path):
'Moves the LUN at path to new path.'
seg = path.split('/')
new_seg = new_path.split('/')
LOG.debug('Moving LUN %(name)s to %(new_name)s.', {'name': seg[(- 1)], 'new_name': new_seg[(- 1)]})
lun_move = netapp_api.NaElement('lun-move')
lun_move.add_new_child('path', path)
lun_move.add_new_child('new-path', new_path)
self.connection.invoke_successfully(lun_move, True) | -297,760,326,579,492,900 | Moves the LUN at path to new path. | cinder/volume/drivers/netapp/dataontap/client/client_base.py | move_lun | sapcc/cinder | python | def move_lun(self, path, new_path):
seg = path.split('/')
new_seg = new_path.split('/')
LOG.debug('Moving LUN %(name)s to %(new_name)s.', {'name': seg[(- 1)], 'new_name': new_seg[(- 1)]})
lun_move = netapp_api.NaElement('lun-move')
lun_move.add_new_child('path', path)
lun_move.add_new_child('new-path', new_path)
self.connection.invoke_successfully(lun_move, True) |
def get_iscsi_target_details(self):
'Gets the iSCSI target portal details.'
raise NotImplementedError() | 5,800,743,555,444,232,000 | Gets the iSCSI target portal details. | cinder/volume/drivers/netapp/dataontap/client/client_base.py | get_iscsi_target_details | sapcc/cinder | python | def get_iscsi_target_details(self):
raise NotImplementedError() |
def get_fc_target_wwpns(self):
'Gets the FC target details.'
raise NotImplementedError() | 2,441,588,315,056,165,400 | Gets the FC target details. | cinder/volume/drivers/netapp/dataontap/client/client_base.py | get_fc_target_wwpns | sapcc/cinder | python | def get_fc_target_wwpns(self):
raise NotImplementedError() |
def get_iscsi_service_details(self):
'Returns iscsi iqn.'
raise NotImplementedError() | -6,930,080,783,860,774,000 | Returns iscsi iqn. | cinder/volume/drivers/netapp/dataontap/client/client_base.py | get_iscsi_service_details | sapcc/cinder | python | def get_iscsi_service_details(self):
raise NotImplementedError() |
def check_iscsi_initiator_exists(self, iqn):
'Returns True if initiator exists.'
raise NotImplementedError() | 8,060,813,294,559,359,000 | Returns True if initiator exists. | cinder/volume/drivers/netapp/dataontap/client/client_base.py | check_iscsi_initiator_exists | sapcc/cinder | python | def check_iscsi_initiator_exists(self, iqn):
raise NotImplementedError() |
def set_iscsi_chap_authentication(self, iqn, username, password):
"Provides NetApp host's CHAP credentials to the backend."
raise NotImplementedError() | 3,464,418,870,571,428,400 | Provides NetApp host's CHAP credentials to the backend. | cinder/volume/drivers/netapp/dataontap/client/client_base.py | set_iscsi_chap_authentication | sapcc/cinder | python | def set_iscsi_chap_authentication(self, iqn, username, password):
raise NotImplementedError() |
def get_lun_list(self):
'Gets the list of LUNs on filer.'
raise NotImplementedError() | 5,492,762,630,089,887,000 | Gets the list of LUNs on filer. | cinder/volume/drivers/netapp/dataontap/client/client_base.py | get_lun_list | sapcc/cinder | python | def get_lun_list(self):
raise NotImplementedError() |
def get_igroup_by_initiators(self, initiator_list):
'Get igroups exactly matching a set of initiators.'
raise NotImplementedError() | 7,145,069,194,903,305,000 | Get igroups exactly matching a set of initiators. | cinder/volume/drivers/netapp/dataontap/client/client_base.py | get_igroup_by_initiators | sapcc/cinder | python | def get_igroup_by_initiators(self, initiator_list):
raise NotImplementedError() |
def _has_luns_mapped_to_initiator(self, initiator):
'Checks whether any LUNs are mapped to the given initiator.'
lun_list_api = netapp_api.NaElement('lun-initiator-list-map-info')
lun_list_api.add_new_child('initiator', initiator)
result = self.connection.invoke_successfully(lun_list_api, True)
lun_maps_container = (result.get_child_by_name('lun-maps') or netapp_api.NaElement('none'))
return (len(lun_maps_container.get_children()) > 0) | 9,150,061,350,151,996,000 | Checks whether any LUNs are mapped to the given initiator. | cinder/volume/drivers/netapp/dataontap/client/client_base.py | _has_luns_mapped_to_initiator | sapcc/cinder | python | def _has_luns_mapped_to_initiator(self, initiator):
lun_list_api = netapp_api.NaElement('lun-initiator-list-map-info')
lun_list_api.add_new_child('initiator', initiator)
result = self.connection.invoke_successfully(lun_list_api, True)
lun_maps_container = (result.get_child_by_name('lun-maps') or netapp_api.NaElement('none'))
return (len(lun_maps_container.get_children()) > 0) |
def has_luns_mapped_to_initiators(self, initiator_list):
'Checks whether any LUNs are mapped to the given initiator(s).'
for initiator in initiator_list:
if self._has_luns_mapped_to_initiator(initiator):
return True
return False | 7,278,811,898,778,940,000 | Checks whether any LUNs are mapped to the given initiator(s). | cinder/volume/drivers/netapp/dataontap/client/client_base.py | has_luns_mapped_to_initiators | sapcc/cinder | python | def has_luns_mapped_to_initiators(self, initiator_list):
for initiator in initiator_list:
if self._has_luns_mapped_to_initiator(initiator):
return True
return False |
def get_lun_by_args(self, **args):
'Retrieves LUNs with specified args.'
raise NotImplementedError() | -3,744,136,302,429,453,300 | Retrieves LUNs with specified args. | cinder/volume/drivers/netapp/dataontap/client/client_base.py | get_lun_by_args | sapcc/cinder | python | def get_lun_by_args(self, **args):
raise NotImplementedError() |
def get_performance_counter_info(self, object_name, counter_name):
'Gets info about one or more Data ONTAP performance counters.'
api_args = {'objectname': object_name}
result = self.connection.send_request('perf-object-counter-list-info', api_args, enable_tunneling=False)
counters = (result.get_child_by_name('counters') or netapp_api.NaElement('None'))
for counter in counters.get_children():
if (counter.get_child_content('name') == counter_name):
labels = []
label_list = (counter.get_child_by_name('labels') or netapp_api.NaElement('None'))
for label in label_list.get_children():
labels.extend(label.get_content().split(','))
base_counter = counter.get_child_content('base-counter')
return {'name': counter_name, 'labels': labels, 'base-counter': base_counter}
else:
raise exception.NotFound((_('Counter %s not found') % counter_name)) | -1,821,368,557,504,805,400 | Gets info about one or more Data ONTAP performance counters. | cinder/volume/drivers/netapp/dataontap/client/client_base.py | get_performance_counter_info | sapcc/cinder | python | def get_performance_counter_info(self, object_name, counter_name):
api_args = {'objectname': object_name}
result = self.connection.send_request('perf-object-counter-list-info', api_args, enable_tunneling=False)
counters = (result.get_child_by_name('counters') or netapp_api.NaElement('None'))
for counter in counters.get_children():
if (counter.get_child_content('name') == counter_name):
labels = []
label_list = (counter.get_child_by_name('labels') or netapp_api.NaElement('None'))
for label in label_list.get_children():
labels.extend(label.get_content().split(','))
base_counter = counter.get_child_content('base-counter')
return {'name': counter_name, 'labels': labels, 'base-counter': base_counter}
else:
raise exception.NotFound((_('Counter %s not found') % counter_name)) |
def delete_snapshot(self, volume_name, snapshot_name):
'Deletes a volume snapshot.'
api_args = {'volume': volume_name, 'snapshot': snapshot_name}
self.connection.send_request('snapshot-delete', api_args) | 9,135,002,389,939,195,000 | Deletes a volume snapshot. | cinder/volume/drivers/netapp/dataontap/client/client_base.py | delete_snapshot | sapcc/cinder | python | def delete_snapshot(self, volume_name, snapshot_name):
api_args = {'volume': volume_name, 'snapshot': snapshot_name}
self.connection.send_request('snapshot-delete', api_args) |
def create_cg_snapshot(self, volume_names, snapshot_name):
'Creates a consistency group snapshot out of one or more flexvols.\n\n ONTAP requires an invocation of cg-start to first fence off the\n flexvols to be included in the snapshot. If cg-start returns\n success, a cg-commit must be executed to finalized the snapshot and\n unfence the flexvols.\n '
cg_id = self._start_cg_snapshot(volume_names, snapshot_name)
if (not cg_id):
msg = _('Could not start consistency group snapshot %s.')
raise exception.VolumeBackendAPIException(data=(msg % snapshot_name))
self._commit_cg_snapshot(cg_id) | -1,963,789,309,185,197,600 | Creates a consistency group snapshot out of one or more flexvols.
ONTAP requires an invocation of cg-start to first fence off the
flexvols to be included in the snapshot. If cg-start returns
success, a cg-commit must be executed to finalized the snapshot and
unfence the flexvols. | cinder/volume/drivers/netapp/dataontap/client/client_base.py | create_cg_snapshot | sapcc/cinder | python | def create_cg_snapshot(self, volume_names, snapshot_name):
'Creates a consistency group snapshot out of one or more flexvols.\n\n ONTAP requires an invocation of cg-start to first fence off the\n flexvols to be included in the snapshot. If cg-start returns\n success, a cg-commit must be executed to finalized the snapshot and\n unfence the flexvols.\n '
cg_id = self._start_cg_snapshot(volume_names, snapshot_name)
if (not cg_id):
msg = _('Could not start consistency group snapshot %s.')
raise exception.VolumeBackendAPIException(data=(msg % snapshot_name))
self._commit_cg_snapshot(cg_id) |
def get_snapshot(self, volume_name, snapshot_name):
'Gets a single snapshot.'
raise NotImplementedError() | 2,336,373,799,148,635,000 | Gets a single snapshot. | cinder/volume/drivers/netapp/dataontap/client/client_base.py | get_snapshot | sapcc/cinder | python | def get_snapshot(self, volume_name, snapshot_name):
raise NotImplementedError() |