body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
3106eafbeb541ae8ab72e332c8724747e2c48824c08f5c6da25a74416b80b251 | def components_mass(self):
'Returns a dictionary with the mass of each a/c component'
factors = self.factors
areas = self.areas
MTOW = self.data['MTOW']
ME = self.data['ME']
area_w = areas['wing']
area_f = areas['fuselage']
area_v = areas['vertical_tail']
area_h = areas['horizontal_tail']
mass = {}
mass['wing'] = (((factors['wing'] * area_w) + (factors['main_gear'] * MTOW)) + (factors['power_plant'] * ME))
mass['fuselage'] = (((factors['fuselage'] * area_f) + (factors['nose_gear'] * MTOW)) + (factors['systems'] * MTOW))
mass['horizontal_tail'] = (factors['horizontal_tail'] * area_h)
mass['vertical_tail'] = (factors['vertical_tail'] * area_v)
print(mass['wing'], mass['fuselage'], mass['horizontal_tail'], mass['vertical_tail'])
return mass | Returns a dictionary with the mass of each a/c component | aircraft/cg_calculation.py | components_mass | iamlucassantos/tutorial-systems-engineering | 1 | python | def components_mass(self):
factors = self.factors
areas = self.areas
MTOW = self.data['MTOW']
ME = self.data['ME']
area_w = areas['wing']
area_f = areas['fuselage']
area_v = areas['vertical_tail']
area_h = areas['horizontal_tail']
mass = {}
mass['wing'] = (((factors['wing'] * area_w) + (factors['main_gear'] * MTOW)) + (factors['power_plant'] * ME))
mass['fuselage'] = (((factors['fuselage'] * area_f) + (factors['nose_gear'] * MTOW)) + (factors['systems'] * MTOW))
mass['horizontal_tail'] = (factors['horizontal_tail'] * area_h)
mass['vertical_tail'] = (factors['vertical_tail'] * area_v)
print(mass['wing'], mass['fuselage'], mass['horizontal_tail'], mass['vertical_tail'])
return mass | def components_mass(self):
factors = self.factors
areas = self.areas
MTOW = self.data['MTOW']
ME = self.data['ME']
area_w = areas['wing']
area_f = areas['fuselage']
area_v = areas['vertical_tail']
area_h = areas['horizontal_tail']
mass = {}
mass['wing'] = (((factors['wing'] * area_w) + (factors['main_gear'] * MTOW)) + (factors['power_plant'] * ME))
mass['fuselage'] = (((factors['fuselage'] * area_f) + (factors['nose_gear'] * MTOW)) + (factors['systems'] * MTOW))
mass['horizontal_tail'] = (factors['horizontal_tail'] * area_h)
mass['vertical_tail'] = (factors['vertical_tail'] * area_v)
print(mass['wing'], mass['fuselage'], mass['horizontal_tail'], mass['vertical_tail'])
return mass<|docstring|>Returns a dictionary with the mass of each a/c component<|endoftext|> |
fbcbc6d56b3542174344054d409abf617b062f364bacf3401cc99652cd2c78e5 | def cg_distance_from_nose(self, x_loc, y, surface='w'):
'Returns the cg distance of the wing, vertical tail, and horizontal tail'
if (surface == 'w'):
tr = self.data['taper']
quarter_sweep = self.data['quart_sweep']
A = self.data['A']
distance_to_root = self.data['nose_distance_w']
elif (surface == 'v'):
tr = self.data['taper_v']
quarter_sweep = self.data['quart_sweep_v']
A = self.data['A_v']
distance_to_root = self.data['nose_distance_v']
elif (surface == 'h'):
tr = self.data['taper_h']
quarter_sweep = self.data['quart_sweep_h']
A = self.data['A_h']
distance_to_root = self.data['nose_distance_h']
else:
return None
sweep_le = np.arctan((np.tan(np.radians(quarter_sweep)) - ((4 / A) * (((- 0.25) * (1 - tr)) / (1 + tr)))))
self.sweep_le = sweep_le
cg_distance = ((x_loc + (y * np.tan(sweep_le))) + distance_to_root)
return cg_distance | Returns the cg distance of the wing, vertical tail, and horizontal tail | aircraft/cg_calculation.py | cg_distance_from_nose | iamlucassantos/tutorial-systems-engineering | 1 | python | def cg_distance_from_nose(self, x_loc, y, surface='w'):
if (surface == 'w'):
tr = self.data['taper']
quarter_sweep = self.data['quart_sweep']
A = self.data['A']
distance_to_root = self.data['nose_distance_w']
elif (surface == 'v'):
tr = self.data['taper_v']
quarter_sweep = self.data['quart_sweep_v']
A = self.data['A_v']
distance_to_root = self.data['nose_distance_v']
elif (surface == 'h'):
tr = self.data['taper_h']
quarter_sweep = self.data['quart_sweep_h']
A = self.data['A_h']
distance_to_root = self.data['nose_distance_h']
else:
return None
sweep_le = np.arctan((np.tan(np.radians(quarter_sweep)) - ((4 / A) * (((- 0.25) * (1 - tr)) / (1 + tr)))))
self.sweep_le = sweep_le
cg_distance = ((x_loc + (y * np.tan(sweep_le))) + distance_to_root)
return cg_distance | def cg_distance_from_nose(self, x_loc, y, surface='w'):
if (surface == 'w'):
tr = self.data['taper']
quarter_sweep = self.data['quart_sweep']
A = self.data['A']
distance_to_root = self.data['nose_distance_w']
elif (surface == 'v'):
tr = self.data['taper_v']
quarter_sweep = self.data['quart_sweep_v']
A = self.data['A_v']
distance_to_root = self.data['nose_distance_v']
elif (surface == 'h'):
tr = self.data['taper_h']
quarter_sweep = self.data['quart_sweep_h']
A = self.data['A_h']
distance_to_root = self.data['nose_distance_h']
else:
return None
sweep_le = np.arctan((np.tan(np.radians(quarter_sweep)) - ((4 / A) * (((- 0.25) * (1 - tr)) / (1 + tr)))))
self.sweep_le = sweep_le
cg_distance = ((x_loc + (y * np.tan(sweep_le))) + distance_to_root)
return cg_distance<|docstring|>Returns the cg distance of the wing, vertical tail, and horizontal tail<|endoftext|> |
e3cefd562d14098d7a49ed613c46c24ad0c1a9e667184602b8b19e21ecd77230 | def chord_at_pctg(self, span_pctg, surface='w'):
"Returns the chord length at n% from the\n\n args:\n root_pctg (float): pctg of the root where the chord is wanted\n surface (str): 'w' for wing, 'v' for vertical tail, 'h' for horizontal tail\n "
if (surface == 'w'):
taper_ratio = self.data['taper']
b = self.data['b']
cr = self.cr
elif (surface == 'v'):
taper_ratio = self.data['taper_v']
b = (self.data['b_half_v'] * 2)
cr = self.cr_v
elif (surface == 'h'):
taper_ratio = self.data['taper_h']
b = self.data['b_h']
cr = self.cr_h
else:
return None
y = ((span_pctg * b) / 2)
return ((cr * (1 - (((2 * (1 - taper_ratio)) * y) / b))), y) | Returns the chord length at n% from the
args:
root_pctg (float): pctg of the root where the chord is wanted
surface (str): 'w' for wing, 'v' for vertical tail, 'h' for horizontal tail | aircraft/cg_calculation.py | chord_at_pctg | iamlucassantos/tutorial-systems-engineering | 1 | python | def chord_at_pctg(self, span_pctg, surface='w'):
"Returns the chord length at n% from the\n\n args:\n root_pctg (float): pctg of the root where the chord is wanted\n surface (str): 'w' for wing, 'v' for vertical tail, 'h' for horizontal tail\n "
if (surface == 'w'):
taper_ratio = self.data['taper']
b = self.data['b']
cr = self.cr
elif (surface == 'v'):
taper_ratio = self.data['taper_v']
b = (self.data['b_half_v'] * 2)
cr = self.cr_v
elif (surface == 'h'):
taper_ratio = self.data['taper_h']
b = self.data['b_h']
cr = self.cr_h
else:
return None
y = ((span_pctg * b) / 2)
return ((cr * (1 - (((2 * (1 - taper_ratio)) * y) / b))), y) | def chord_at_pctg(self, span_pctg, surface='w'):
"Returns the chord length at n% from the\n\n args:\n root_pctg (float): pctg of the root where the chord is wanted\n surface (str): 'w' for wing, 'v' for vertical tail, 'h' for horizontal tail\n "
if (surface == 'w'):
taper_ratio = self.data['taper']
b = self.data['b']
cr = self.cr
elif (surface == 'v'):
taper_ratio = self.data['taper_v']
b = (self.data['b_half_v'] * 2)
cr = self.cr_v
elif (surface == 'h'):
taper_ratio = self.data['taper_h']
b = self.data['b_h']
cr = self.cr_h
else:
return None
y = ((span_pctg * b) / 2)
return ((cr * (1 - (((2 * (1 - taper_ratio)) * y) / b))), y)<|docstring|>Returns the chord length at n% from the
args:
root_pctg (float): pctg of the root where the chord is wanted
surface (str): 'w' for wing, 'v' for vertical tail, 'h' for horizontal tail<|endoftext|> |
9b0d5cdb54c07464f7568be0c4a8c2c54504e94026bd29fd1b66ce6706bd769c | def components_cg(self):
'Returns a dictionary with the cg of each a/c component'
cgs = {}
(chord_cg_w, dist_le_w) = self.chord_at_pctg(0.4, surface='w')
cgs['wing'] = self.cg_distance_from_nose((chord_cg_w * 0.38), dist_le_w, surface='w')
(chord_cg_h, dist_le_h) = self.chord_at_pctg(0.38, surface='h')
cgs['horizontal_tail'] = self.cg_distance_from_nose((chord_cg_h * 0.42), dist_le_h, surface='h')
(chord_cg_v, dist_le_v) = self.chord_at_pctg(0.38, surface='v')
cgs['vertical_tail'] = self.cg_distance_from_nose((chord_cg_v * 0.43), dist_le_v, surface='v')
cgs['fuselage'] = (0.42 * self.data['l_f'])
return cgs | Returns a dictionary with the cg of each a/c component | aircraft/cg_calculation.py | components_cg | iamlucassantos/tutorial-systems-engineering | 1 | python | def components_cg(self):
cgs = {}
(chord_cg_w, dist_le_w) = self.chord_at_pctg(0.4, surface='w')
cgs['wing'] = self.cg_distance_from_nose((chord_cg_w * 0.38), dist_le_w, surface='w')
(chord_cg_h, dist_le_h) = self.chord_at_pctg(0.38, surface='h')
cgs['horizontal_tail'] = self.cg_distance_from_nose((chord_cg_h * 0.42), dist_le_h, surface='h')
(chord_cg_v, dist_le_v) = self.chord_at_pctg(0.38, surface='v')
cgs['vertical_tail'] = self.cg_distance_from_nose((chord_cg_v * 0.43), dist_le_v, surface='v')
cgs['fuselage'] = (0.42 * self.data['l_f'])
return cgs | def components_cg(self):
cgs = {}
(chord_cg_w, dist_le_w) = self.chord_at_pctg(0.4, surface='w')
cgs['wing'] = self.cg_distance_from_nose((chord_cg_w * 0.38), dist_le_w, surface='w')
(chord_cg_h, dist_le_h) = self.chord_at_pctg(0.38, surface='h')
cgs['horizontal_tail'] = self.cg_distance_from_nose((chord_cg_h * 0.42), dist_le_h, surface='h')
(chord_cg_v, dist_le_v) = self.chord_at_pctg(0.38, surface='v')
cgs['vertical_tail'] = self.cg_distance_from_nose((chord_cg_v * 0.43), dist_le_v, surface='v')
cgs['fuselage'] = (0.42 * self.data['l_f'])
return cgs<|docstring|>Returns a dictionary with the cg of each a/c component<|endoftext|> |
440fad02280d4095b27f683ce56490a9320db817a7cfdd84bacde4250fb9b24c | def aircraft_cg(self):
'Returns the aircraft cg wrt the three main groups: wing, fuselage and tail'
(numerator, denominator) = (0, 0)
for group in ['wing', 'horizontal_tail', 'vertical_tail', 'fuselage']:
numerator += (self.mass[group] * self.cgs[group])
denominator += self.mass[group]
XLEMAC = self.data['XLEMAC']
mac = self.data['mac']
aircraft_cg = (((numerator / denominator) - XLEMAC) / mac)
return aircraft_cg | Returns the aircraft cg wrt the three main groups: wing, fuselage and tail | aircraft/cg_calculation.py | aircraft_cg | iamlucassantos/tutorial-systems-engineering | 1 | python | def aircraft_cg(self):
(numerator, denominator) = (0, 0)
for group in ['wing', 'horizontal_tail', 'vertical_tail', 'fuselage']:
numerator += (self.mass[group] * self.cgs[group])
denominator += self.mass[group]
XLEMAC = self.data['XLEMAC']
mac = self.data['mac']
aircraft_cg = (((numerator / denominator) - XLEMAC) / mac)
return aircraft_cg | def aircraft_cg(self):
(numerator, denominator) = (0, 0)
for group in ['wing', 'horizontal_tail', 'vertical_tail', 'fuselage']:
numerator += (self.mass[group] * self.cgs[group])
denominator += self.mass[group]
XLEMAC = self.data['XLEMAC']
mac = self.data['mac']
aircraft_cg = (((numerator / denominator) - XLEMAC) / mac)
return aircraft_cg<|docstring|>Returns the aircraft cg wrt the three main groups: wing, fuselage and tail<|endoftext|> |
302833e0cf4d4c1ebb9e39018f87d42aed0af929694eea631afccdc9474e17f2 | def check_projects(config: Configuration, username: str) -> Dict[(str, Any)]:
" Crawl through all projects to check for errors on loading or accessing imporant fields.\n Warning: This method may take a while.\n\n Args:\n config: Configuration to include root gigantum directory\n username: Active username - if none provided crawl for all users.\n\n Returns:\n Dictionary mapping a project path to errors\n\n Schema:\n {\n 'errors': {\n 'username/owner/labbooks/project-name': 'This is the error msg'\n },\n '_collectionTimeSec': 2.0\n }\n "
gigantum_root = config.app_workdir
project_paths = glob.glob(f'{gigantum_root}/{username}/*/labbooks/*')
inventory = InventoryManager()
t0 = time.time()
errors: Dict[(str, Any)] = {'errors': {}}
for project_path in project_paths:
try:
labbook = inventory.load_labbook_from_directory(project_path)
_ = (labbook.creation_date, labbook.modified_on, labbook.data)
except Exception as e:
logger.error(e)
errors['errors'][project_path.replace(gigantum_root, '')] = str(e)
tfin = time.time()
errors['_collectionTimeSec'] = float(f'{(tfin - t0):.2f}')
return errors | Crawl through all projects to check for errors on loading or accessing imporant fields.
Warning: This method may take a while.
Args:
config: Configuration to include root gigantum directory
username: Active username - if none provided crawl for all users.
Returns:
Dictionary mapping a project path to errors
Schema:
{
'errors': {
'username/owner/labbooks/project-name': 'This is the error msg'
},
'_collectionTimeSec': 2.0
} | packages/gtmapi/lmsrvcore/telemetry.py | check_projects | gigabackup/gigantum-client | 60 | python | def check_projects(config: Configuration, username: str) -> Dict[(str, Any)]:
" Crawl through all projects to check for errors on loading or accessing imporant fields.\n Warning: This method may take a while.\n\n Args:\n config: Configuration to include root gigantum directory\n username: Active username - if none provided crawl for all users.\n\n Returns:\n Dictionary mapping a project path to errors\n\n Schema:\n {\n 'errors': {\n 'username/owner/labbooks/project-name': 'This is the error msg'\n },\n '_collectionTimeSec': 2.0\n }\n "
gigantum_root = config.app_workdir
project_paths = glob.glob(f'{gigantum_root}/{username}/*/labbooks/*')
inventory = InventoryManager()
t0 = time.time()
errors: Dict[(str, Any)] = {'errors': {}}
for project_path in project_paths:
try:
labbook = inventory.load_labbook_from_directory(project_path)
_ = (labbook.creation_date, labbook.modified_on, labbook.data)
except Exception as e:
logger.error(e)
errors['errors'][project_path.replace(gigantum_root, )] = str(e)
tfin = time.time()
errors['_collectionTimeSec'] = float(f'{(tfin - t0):.2f}')
return errors | def check_projects(config: Configuration, username: str) -> Dict[(str, Any)]:
" Crawl through all projects to check for errors on loading or accessing imporant fields.\n Warning: This method may take a while.\n\n Args:\n config: Configuration to include root gigantum directory\n username: Active username - if none provided crawl for all users.\n\n Returns:\n Dictionary mapping a project path to errors\n\n Schema:\n {\n 'errors': {\n 'username/owner/labbooks/project-name': 'This is the error msg'\n },\n '_collectionTimeSec': 2.0\n }\n "
gigantum_root = config.app_workdir
project_paths = glob.glob(f'{gigantum_root}/{username}/*/labbooks/*')
inventory = InventoryManager()
t0 = time.time()
errors: Dict[(str, Any)] = {'errors': {}}
for project_path in project_paths:
try:
labbook = inventory.load_labbook_from_directory(project_path)
_ = (labbook.creation_date, labbook.modified_on, labbook.data)
except Exception as e:
logger.error(e)
errors['errors'][project_path.replace(gigantum_root, )] = str(e)
tfin = time.time()
errors['_collectionTimeSec'] = float(f'{(tfin - t0):.2f}')
return errors<|docstring|>Crawl through all projects to check for errors on loading or accessing imporant fields.
Warning: This method may take a while.
Args:
config: Configuration to include root gigantum directory
username: Active username - if none provided crawl for all users.
Returns:
Dictionary mapping a project path to errors
Schema:
{
'errors': {
'username/owner/labbooks/project-name': 'This is the error msg'
},
'_collectionTimeSec': 2.0
}<|endoftext|> |
ac2455b0a0cf732ab3a2c13842720189e28afd0040f1d9b8296a2b7e5884aff3 | def _calc_disk_free_gb() -> Tuple[(float, float)]:
'Call `df` from the Client Container, parse and return as GB'
disk_results = call_subprocess('df -BMB /'.split(), cwd='/').split('\n')
(_, disk_size, disk_used, disk_avail, use_pct, _) = disk_results[1].split()
(disk_size_num, disk_size_unit) = ((float(disk_used[:(- 2)]) / 1000), disk_used[(- 2):])
if (disk_size_unit != 'MB'):
raise ValueError(f'Encountered unexpected unit "{disk_size_unit}" from `df -BMB` in Client')
(disk_avail_num, disk_avail_unit) = ((float(disk_avail[:(- 2)]) / 1000), disk_avail[(- 2):])
if (disk_avail_unit != 'MB'):
raise ValueError(f'Encountered unexpected unit "{disk_avail_unit}" from `df -BMB` in Client')
return (disk_size_num, disk_avail_num) | Call `df` from the Client Container, parse and return as GB | packages/gtmapi/lmsrvcore/telemetry.py | _calc_disk_free_gb | gigabackup/gigantum-client | 60 | python | def _calc_disk_free_gb() -> Tuple[(float, float)]:
disk_results = call_subprocess('df -BMB /'.split(), cwd='/').split('\n')
(_, disk_size, disk_used, disk_avail, use_pct, _) = disk_results[1].split()
(disk_size_num, disk_size_unit) = ((float(disk_used[:(- 2)]) / 1000), disk_used[(- 2):])
if (disk_size_unit != 'MB'):
raise ValueError(f'Encountered unexpected unit "{disk_size_unit}" from `df -BMB` in Client')
(disk_avail_num, disk_avail_unit) = ((float(disk_avail[:(- 2)]) / 1000), disk_avail[(- 2):])
if (disk_avail_unit != 'MB'):
raise ValueError(f'Encountered unexpected unit "{disk_avail_unit}" from `df -BMB` in Client')
return (disk_size_num, disk_avail_num) | def _calc_disk_free_gb() -> Tuple[(float, float)]:
disk_results = call_subprocess('df -BMB /'.split(), cwd='/').split('\n')
(_, disk_size, disk_used, disk_avail, use_pct, _) = disk_results[1].split()
(disk_size_num, disk_size_unit) = ((float(disk_used[:(- 2)]) / 1000), disk_used[(- 2):])
if (disk_size_unit != 'MB'):
raise ValueError(f'Encountered unexpected unit "{disk_size_unit}" from `df -BMB` in Client')
(disk_avail_num, disk_avail_unit) = ((float(disk_avail[:(- 2)]) / 1000), disk_avail[(- 2):])
if (disk_avail_unit != 'MB'):
raise ValueError(f'Encountered unexpected unit "{disk_avail_unit}" from `df -BMB` in Client')
return (disk_size_num, disk_avail_num)<|docstring|>Call `df` from the Client Container, parse and return as GB<|endoftext|> |
a59b915eb3ac772617bb6bf12cd2a896b97c44a57a47ef4c3da4c3a7e8a4196b | def _calc_rq_free() -> Dict[(str, Any)]:
'Parses the output of `rq info` to return total number\n of workers and the count of workers currently idle.'
conn = default_redis_conn()
with rq.Connection(connection=conn):
workers: List[rq.Worker] = [w for w in rq.Worker.all()]
idle_workers = [w for w in workers if (w.get_state() == 'idle')]
resp = {'workersTotal': len(workers), 'workersIdle': len(idle_workers), 'workersUnknown': len([w for w in workers if (w.get_state() == '?')])}
queues = ('default', 'build', 'publish')
resp.update({f'queue{q.capitalize()}Size': len(rq.Queue(f'gigantum-{q}-queue', connection=conn)) for q in queues})
return resp | Parses the output of `rq info` to return total number
of workers and the count of workers currently idle. | packages/gtmapi/lmsrvcore/telemetry.py | _calc_rq_free | gigabackup/gigantum-client | 60 | python | def _calc_rq_free() -> Dict[(str, Any)]:
'Parses the output of `rq info` to return total number\n of workers and the count of workers currently idle.'
conn = default_redis_conn()
with rq.Connection(connection=conn):
workers: List[rq.Worker] = [w for w in rq.Worker.all()]
idle_workers = [w for w in workers if (w.get_state() == 'idle')]
resp = {'workersTotal': len(workers), 'workersIdle': len(idle_workers), 'workersUnknown': len([w for w in workers if (w.get_state() == '?')])}
queues = ('default', 'build', 'publish')
resp.update({f'queue{q.capitalize()}Size': len(rq.Queue(f'gigantum-{q}-queue', connection=conn)) for q in queues})
return resp | def _calc_rq_free() -> Dict[(str, Any)]:
'Parses the output of `rq info` to return total number\n of workers and the count of workers currently idle.'
conn = default_redis_conn()
with rq.Connection(connection=conn):
workers: List[rq.Worker] = [w for w in rq.Worker.all()]
idle_workers = [w for w in workers if (w.get_state() == 'idle')]
resp = {'workersTotal': len(workers), 'workersIdle': len(idle_workers), 'workersUnknown': len([w for w in workers if (w.get_state() == '?')])}
queues = ('default', 'build', 'publish')
resp.update({f'queue{q.capitalize()}Size': len(rq.Queue(f'gigantum-{q}-queue', connection=conn)) for q in queues})
return resp<|docstring|>Parses the output of `rq info` to return total number
of workers and the count of workers currently idle.<|endoftext|> |
ca900bf67765f13253be83262caba7d1985444b91a68bcd10a2257938b4d1fdf | def get_conv_mixer_256_8(image_size=32, filters=256, depth=8, kernel_size=5, patch_size=2, num_classes=10):
'ConvMixer-256/8: https://openreview.net/pdf?id=TVHS5Y4dNvM.\n The hyperparameter values are taken from the paper.\n '
inputs = keras.Input((image_size, image_size, 3))
x = layers.Rescaling(scale=(1.0 / 255))(inputs)
x = conv_stem(x, filters, patch_size)
for _ in range(depth):
x = conv_mixer_block(x, filters, kernel_size)
x = layers.GlobalAvgPool2D()(x)
outputs = layers.Dense(num_classes, activation='softmax')(x)
return keras.Model(inputs, outputs) | ConvMixer-256/8: https://openreview.net/pdf?id=TVHS5Y4dNvM.
The hyperparameter values are taken from the paper. | examples/vision/convmixer.py | get_conv_mixer_256_8 | k-w-w/keras-io | 1,542 | python | def get_conv_mixer_256_8(image_size=32, filters=256, depth=8, kernel_size=5, patch_size=2, num_classes=10):
'ConvMixer-256/8: https://openreview.net/pdf?id=TVHS5Y4dNvM.\n The hyperparameter values are taken from the paper.\n '
inputs = keras.Input((image_size, image_size, 3))
x = layers.Rescaling(scale=(1.0 / 255))(inputs)
x = conv_stem(x, filters, patch_size)
for _ in range(depth):
x = conv_mixer_block(x, filters, kernel_size)
x = layers.GlobalAvgPool2D()(x)
outputs = layers.Dense(num_classes, activation='softmax')(x)
return keras.Model(inputs, outputs) | def get_conv_mixer_256_8(image_size=32, filters=256, depth=8, kernel_size=5, patch_size=2, num_classes=10):
'ConvMixer-256/8: https://openreview.net/pdf?id=TVHS5Y4dNvM.\n The hyperparameter values are taken from the paper.\n '
inputs = keras.Input((image_size, image_size, 3))
x = layers.Rescaling(scale=(1.0 / 255))(inputs)
x = conv_stem(x, filters, patch_size)
for _ in range(depth):
x = conv_mixer_block(x, filters, kernel_size)
x = layers.GlobalAvgPool2D()(x)
outputs = layers.Dense(num_classes, activation='softmax')(x)
return keras.Model(inputs, outputs)<|docstring|>ConvMixer-256/8: https://openreview.net/pdf?id=TVHS5Y4dNvM.
The hyperparameter values are taken from the paper.<|endoftext|> |
025fe97f88ae7c95b6878c52fe0c1c65f60131a3935b4baffa47bc0596938038 | def __init__(self, device='cpu'):
"\n Initialize the class.\n\n Parameters\n ----------\n device : str, optional\n The device where to put the data. The default is 'cpu'.\n "
self.device = device | Initialize the class.
Parameters
----------
device : str, optional
The device where to put the data. The default is 'cpu'. | profrage/generate/reconstruct.py | __init__ | federicoVS/ProFraGe | 0 | python | def __init__(self, device='cpu'):
"\n Initialize the class.\n\n Parameters\n ----------\n device : str, optional\n The device where to put the data. The default is 'cpu'.\n "
self.device = device | def __init__(self, device='cpu'):
"\n Initialize the class.\n\n Parameters\n ----------\n device : str, optional\n The device where to put the data. The default is 'cpu'.\n "
self.device = device<|docstring|>Initialize the class.
Parameters
----------
device : str, optional
The device where to put the data. The default is 'cpu'.<|endoftext|> |
94e5b958af674dafdd29fa82601efb387902990a67b053f3be1c2f49c6474eb2 | def reconstruct(self, D):
'\n Multidimensional scaling algorithm to reconstruct the data.\n\n Parameters\n ----------\n D : torch.Tensor\n The distance matrix.\n\n Returns\n -------\n X : numpy.ndarray\n The coordinate matrix.\n '
n = D.shape[0]
for i in range(n):
for j in range(n):
D[(i, j)] = (D[(i, j)] ** 2)
C = (torch.eye(n).to(self.device) - ((1 / n) * torch.ones(n, n).to(self.device)))
B = ((- 0.5) * torch.matmul(torch.matmul(C, D), C))
B = B.detach().cpu().numpy()
(w, v) = np.linalg.eig(B)
eigen_idx = np.argsort(w)[::(- 1)]
(w, v) = (w[eigen_idx], v[eigen_idx])
X = np.zeros(shape=(n, 3))
for i in range(n):
for j in range(3):
X[(i, j)] = (v[(i, j)] * np.sqrt(w[j]))
return X | Multidimensional scaling algorithm to reconstruct the data.
Parameters
----------
D : torch.Tensor
The distance matrix.
Returns
-------
X : numpy.ndarray
The coordinate matrix. | profrage/generate/reconstruct.py | reconstruct | federicoVS/ProFraGe | 0 | python | def reconstruct(self, D):
'\n Multidimensional scaling algorithm to reconstruct the data.\n\n Parameters\n ----------\n D : torch.Tensor\n The distance matrix.\n\n Returns\n -------\n X : numpy.ndarray\n The coordinate matrix.\n '
n = D.shape[0]
for i in range(n):
for j in range(n):
D[(i, j)] = (D[(i, j)] ** 2)
C = (torch.eye(n).to(self.device) - ((1 / n) * torch.ones(n, n).to(self.device)))
B = ((- 0.5) * torch.matmul(torch.matmul(C, D), C))
B = B.detach().cpu().numpy()
(w, v) = np.linalg.eig(B)
eigen_idx = np.argsort(w)[::(- 1)]
(w, v) = (w[eigen_idx], v[eigen_idx])
X = np.zeros(shape=(n, 3))
for i in range(n):
for j in range(3):
X[(i, j)] = (v[(i, j)] * np.sqrt(w[j]))
return X | def reconstruct(self, D):
'\n Multidimensional scaling algorithm to reconstruct the data.\n\n Parameters\n ----------\n D : torch.Tensor\n The distance matrix.\n\n Returns\n -------\n X : numpy.ndarray\n The coordinate matrix.\n '
n = D.shape[0]
for i in range(n):
for j in range(n):
D[(i, j)] = (D[(i, j)] ** 2)
C = (torch.eye(n).to(self.device) - ((1 / n) * torch.ones(n, n).to(self.device)))
B = ((- 0.5) * torch.matmul(torch.matmul(C, D), C))
B = B.detach().cpu().numpy()
(w, v) = np.linalg.eig(B)
eigen_idx = np.argsort(w)[::(- 1)]
(w, v) = (w[eigen_idx], v[eigen_idx])
X = np.zeros(shape=(n, 3))
for i in range(n):
for j in range(3):
X[(i, j)] = (v[(i, j)] * np.sqrt(w[j]))
return X<|docstring|>Multidimensional scaling algorithm to reconstruct the data.
Parameters
----------
D : torch.Tensor
The distance matrix.
Returns
-------
X : numpy.ndarray
The coordinate matrix.<|endoftext|> |
1f4c2a7cd419b43e669528a76e0a3cef7a227a0ceb8d8e1659ae44029701d2c1 | def get_activation(activation):
' returns the activation function represented by the input string '
if (activation and callable(activation)):
return activation
activation = [x for x in SUPPORTED_ACTIVATION_MAP if (activation.lower() == x.lower())]
assert ((len(activation) == 1) and isinstance(activation[0], str)), 'Unhandled activation function'
activation = activation[0]
if (activation.lower() == 'none'):
return None
return vars(torch.nn.modules.activation)[activation]() | returns the activation function represented by the input string | util/ml_and_math/layers.py | get_activation | pchlenski/NeuroSEED | 39 | python | def get_activation(activation):
' '
if (activation and callable(activation)):
return activation
activation = [x for x in SUPPORTED_ACTIVATION_MAP if (activation.lower() == x.lower())]
assert ((len(activation) == 1) and isinstance(activation[0], str)), 'Unhandled activation function'
activation = activation[0]
if (activation.lower() == 'none'):
return None
return vars(torch.nn.modules.activation)[activation]() | def get_activation(activation):
' '
if (activation and callable(activation)):
return activation
activation = [x for x in SUPPORTED_ACTIVATION_MAP if (activation.lower() == x.lower())]
assert ((len(activation) == 1) and isinstance(activation[0], str)), 'Unhandled activation function'
activation = activation[0]
if (activation.lower() == 'none'):
return None
return vars(torch.nn.modules.activation)[activation]()<|docstring|>returns the activation function represented by the input string<|endoftext|> |
c5f0267fc9445900c179cf67705c8f8b42063d68fd015c1d5642e8aa6aa9de57 | def kronecker_product(t1, t2):
'\n Computes the Kronecker product between two tensors\n See https://en.wikipedia.org/wiki/Kronecker_product\n '
(t1_height, t1_width) = t1.size()
(t2_height, t2_width) = t2.size()
out_height = (t1_height * t2_height)
out_width = (t1_width * t2_width)
tiled_t2 = t2.repeat(t1_height, t1_width)
expanded_t1 = t1.unsqueeze(2).unsqueeze(3).repeat(1, t2_height, t2_width, 1).view(out_height, out_width)
return (expanded_t1 * tiled_t2) | Computes the Kronecker product between two tensors
See https://en.wikipedia.org/wiki/Kronecker_product | rlkit/torch/pytorch_util.py | kronecker_product | Asap7772/railrl_evalsawyer | 0 | python | def kronecker_product(t1, t2):
'\n Computes the Kronecker product between two tensors\n See https://en.wikipedia.org/wiki/Kronecker_product\n '
(t1_height, t1_width) = t1.size()
(t2_height, t2_width) = t2.size()
out_height = (t1_height * t2_height)
out_width = (t1_width * t2_width)
tiled_t2 = t2.repeat(t1_height, t1_width)
expanded_t1 = t1.unsqueeze(2).unsqueeze(3).repeat(1, t2_height, t2_width, 1).view(out_height, out_width)
return (expanded_t1 * tiled_t2) | def kronecker_product(t1, t2):
'\n Computes the Kronecker product between two tensors\n See https://en.wikipedia.org/wiki/Kronecker_product\n '
(t1_height, t1_width) = t1.size()
(t2_height, t2_width) = t2.size()
out_height = (t1_height * t2_height)
out_width = (t1_width * t2_width)
tiled_t2 = t2.repeat(t1_height, t1_width)
expanded_t1 = t1.unsqueeze(2).unsqueeze(3).repeat(1, t2_height, t2_width, 1).view(out_height, out_width)
return (expanded_t1 * tiled_t2)<|docstring|>Computes the Kronecker product between two tensors
See https://en.wikipedia.org/wiki/Kronecker_product<|endoftext|> |
d28344d44185fa1ad322933efa05b19b702bc4ca0b4c38d12b188d9535443996 | def double_moments(x, y):
'\n Returns the first two moments between x and y.\n\n Specifically, for each vector x_i and y_i in x and y, compute their\n outer-product. Flatten this resulting matrix and return it.\n\n The first moments (i.e. x_i and y_i) are included by appending a `1` to x_i\n and y_i before taking the outer product.\n :param x: Shape [batch_size, feature_x_dim]\n :param y: Shape [batch_size, feature_y_dim]\n :return: Shape [batch_size, (feature_x_dim + 1) * (feature_y_dim + 1)\n '
(batch_size, x_dim) = x.size()
(_, y_dim) = x.size()
x = torch.cat((x, torch.ones(batch_size, 1)), dim=1)
y = torch.cat((y, torch.ones(batch_size, 1)), dim=1)
x_dim += 1
y_dim += 1
x = x.unsqueeze(2)
y = y.unsqueeze(1)
outer_prod = (x.expand(batch_size, x_dim, y_dim) * y.expand(batch_size, x_dim, y_dim))
return outer_prod.view(batch_size, (- 1)) | Returns the first two moments between x and y.
Specifically, for each vector x_i and y_i in x and y, compute their
outer-product. Flatten this resulting matrix and return it.
The first moments (i.e. x_i and y_i) are included by appending a `1` to x_i
and y_i before taking the outer product.
:param x: Shape [batch_size, feature_x_dim]
:param y: Shape [batch_size, feature_y_dim]
:return: Shape [batch_size, (feature_x_dim + 1) * (feature_y_dim + 1) | rlkit/torch/pytorch_util.py | double_moments | Asap7772/railrl_evalsawyer | 0 | python | def double_moments(x, y):
'\n Returns the first two moments between x and y.\n\n Specifically, for each vector x_i and y_i in x and y, compute their\n outer-product. Flatten this resulting matrix and return it.\n\n The first moments (i.e. x_i and y_i) are included by appending a `1` to x_i\n and y_i before taking the outer product.\n :param x: Shape [batch_size, feature_x_dim]\n :param y: Shape [batch_size, feature_y_dim]\n :return: Shape [batch_size, (feature_x_dim + 1) * (feature_y_dim + 1)\n '
(batch_size, x_dim) = x.size()
(_, y_dim) = x.size()
x = torch.cat((x, torch.ones(batch_size, 1)), dim=1)
y = torch.cat((y, torch.ones(batch_size, 1)), dim=1)
x_dim += 1
y_dim += 1
x = x.unsqueeze(2)
y = y.unsqueeze(1)
outer_prod = (x.expand(batch_size, x_dim, y_dim) * y.expand(batch_size, x_dim, y_dim))
return outer_prod.view(batch_size, (- 1)) | def double_moments(x, y):
'\n Returns the first two moments between x and y.\n\n Specifically, for each vector x_i and y_i in x and y, compute their\n outer-product. Flatten this resulting matrix and return it.\n\n The first moments (i.e. x_i and y_i) are included by appending a `1` to x_i\n and y_i before taking the outer product.\n :param x: Shape [batch_size, feature_x_dim]\n :param y: Shape [batch_size, feature_y_dim]\n :return: Shape [batch_size, (feature_x_dim + 1) * (feature_y_dim + 1)\n '
(batch_size, x_dim) = x.size()
(_, y_dim) = x.size()
x = torch.cat((x, torch.ones(batch_size, 1)), dim=1)
y = torch.cat((y, torch.ones(batch_size, 1)), dim=1)
x_dim += 1
y_dim += 1
x = x.unsqueeze(2)
y = y.unsqueeze(1)
outer_prod = (x.expand(batch_size, x_dim, y_dim) * y.expand(batch_size, x_dim, y_dim))
return outer_prod.view(batch_size, (- 1))<|docstring|>Returns the first two moments between x and y.
Specifically, for each vector x_i and y_i in x and y, compute their
outer-product. Flatten this resulting matrix and return it.
The first moments (i.e. x_i and y_i) are included by appending a `1` to x_i
and y_i before taking the outer product.
:param x: Shape [batch_size, feature_x_dim]
:param y: Shape [batch_size, feature_y_dim]
:return: Shape [batch_size, (feature_x_dim + 1) * (feature_y_dim + 1)<|endoftext|> |
ced03cdd42ac0e5224f8fbcd46f362d08762ed0550e44fcc883ea5b9344d0049 | def batch_square_vector(vector, M):
'\n Compute x^T M x\n '
vector = vector.unsqueeze(2)
return torch.bmm(torch.bmm(vector.transpose(2, 1), M), vector).squeeze(2) | Compute x^T M x | rlkit/torch/pytorch_util.py | batch_square_vector | Asap7772/railrl_evalsawyer | 0 | python | def batch_square_vector(vector, M):
'\n \n '
vector = vector.unsqueeze(2)
return torch.bmm(torch.bmm(vector.transpose(2, 1), M), vector).squeeze(2) | def batch_square_vector(vector, M):
'\n \n '
vector = vector.unsqueeze(2)
return torch.bmm(torch.bmm(vector.transpose(2, 1), M), vector).squeeze(2)<|docstring|>Compute x^T M x<|endoftext|> |
ba33e708327ebdc8e6f5e67d22e93c1d7e2e18b638800ed7c621916fd3f00c74 | def almost_identity_weights_like(tensor):
'\n Set W = I + lambda * Gaussian no\n :param tensor:\n :return:\n '
shape = tensor.size()
init_value = np.eye(*shape)
init_value += (0.01 * np.random.rand(*shape))
return FloatTensor(init_value) | Set W = I + lambda * Gaussian no
:param tensor:
:return: | rlkit/torch/pytorch_util.py | almost_identity_weights_like | Asap7772/railrl_evalsawyer | 0 | python | def almost_identity_weights_like(tensor):
'\n Set W = I + lambda * Gaussian no\n :param tensor:\n :return:\n '
shape = tensor.size()
init_value = np.eye(*shape)
init_value += (0.01 * np.random.rand(*shape))
return FloatTensor(init_value) | def almost_identity_weights_like(tensor):
'\n Set W = I + lambda * Gaussian no\n :param tensor:\n :return:\n '
shape = tensor.size()
init_value = np.eye(*shape)
init_value += (0.01 * np.random.rand(*shape))
return FloatTensor(init_value)<|docstring|>Set W = I + lambda * Gaussian no
:param tensor:
:return:<|endoftext|> |
c38b313289f5e8b362267c52342a2a59e06247fd246ed0a0909c5756fcc052fa | def debounce(func):
'Decorator function. Debounce callbacks form HomeKit.'
@ha_callback
def call_later_listener(self, *args):
'Callback listener called from call_later.'
debounce_params = self.debounce.pop(func.__name__, None)
if debounce_params:
self.hass.async_add_job(func, self, *debounce_params[1:])
@wraps(func)
def wrapper(self, *args):
'Wrapper starts async timer.'
debounce_params = self.debounce.pop(func.__name__, None)
if debounce_params:
debounce_params[0]()
remove_listener = track_point_in_utc_time(self.hass, partial(call_later_listener, self), (dt_util.utcnow() + timedelta(seconds=DEBOUNCE_TIMEOUT)))
self.debounce[func.__name__] = (remove_listener, *args)
logger.debug('%s: Start %s timeout', self.entity_id, func.__name__.replace('set_', ''))
name = getmodule(func).__name__
logger = logging.getLogger(name)
return wrapper | Decorator function. Debounce callbacks form HomeKit. | homeassistant/components/homekit/accessories.py | debounce | ellsclytn/home-assistant | 0 | python | def debounce(func):
@ha_callback
def call_later_listener(self, *args):
'Callback listener called from call_later.'
debounce_params = self.debounce.pop(func.__name__, None)
if debounce_params:
self.hass.async_add_job(func, self, *debounce_params[1:])
@wraps(func)
def wrapper(self, *args):
'Wrapper starts async timer.'
debounce_params = self.debounce.pop(func.__name__, None)
if debounce_params:
debounce_params[0]()
remove_listener = track_point_in_utc_time(self.hass, partial(call_later_listener, self), (dt_util.utcnow() + timedelta(seconds=DEBOUNCE_TIMEOUT)))
self.debounce[func.__name__] = (remove_listener, *args)
logger.debug('%s: Start %s timeout', self.entity_id, func.__name__.replace('set_', ))
name = getmodule(func).__name__
logger = logging.getLogger(name)
return wrapper | def debounce(func):
@ha_callback
def call_later_listener(self, *args):
'Callback listener called from call_later.'
debounce_params = self.debounce.pop(func.__name__, None)
if debounce_params:
self.hass.async_add_job(func, self, *debounce_params[1:])
@wraps(func)
def wrapper(self, *args):
'Wrapper starts async timer.'
debounce_params = self.debounce.pop(func.__name__, None)
if debounce_params:
debounce_params[0]()
remove_listener = track_point_in_utc_time(self.hass, partial(call_later_listener, self), (dt_util.utcnow() + timedelta(seconds=DEBOUNCE_TIMEOUT)))
self.debounce[func.__name__] = (remove_listener, *args)
logger.debug('%s: Start %s timeout', self.entity_id, func.__name__.replace('set_', ))
name = getmodule(func).__name__
logger = logging.getLogger(name)
return wrapper<|docstring|>Decorator function. Debounce callbacks form HomeKit.<|endoftext|> |
3226cf1f407d093140dfe2ac02425aff41bef4d7fe928b81d0dee389235c986f | @ha_callback
def call_later_listener(self, *args):
'Callback listener called from call_later.'
debounce_params = self.debounce.pop(func.__name__, None)
if debounce_params:
self.hass.async_add_job(func, self, *debounce_params[1:]) | Callback listener called from call_later. | homeassistant/components/homekit/accessories.py | call_later_listener | ellsclytn/home-assistant | 0 | python | @ha_callback
def call_later_listener(self, *args):
debounce_params = self.debounce.pop(func.__name__, None)
if debounce_params:
self.hass.async_add_job(func, self, *debounce_params[1:]) | @ha_callback
def call_later_listener(self, *args):
debounce_params = self.debounce.pop(func.__name__, None)
if debounce_params:
self.hass.async_add_job(func, self, *debounce_params[1:])<|docstring|>Callback listener called from call_later.<|endoftext|> |
dfdba27c96f0d278201e3ef3cfe07de3e5e8c377c09fbb8d30180392e33fb67d | @wraps(func)
def wrapper(self, *args):
'Wrapper starts async timer.'
debounce_params = self.debounce.pop(func.__name__, None)
if debounce_params:
debounce_params[0]()
remove_listener = track_point_in_utc_time(self.hass, partial(call_later_listener, self), (dt_util.utcnow() + timedelta(seconds=DEBOUNCE_TIMEOUT)))
self.debounce[func.__name__] = (remove_listener, *args)
logger.debug('%s: Start %s timeout', self.entity_id, func.__name__.replace('set_', '')) | Wrapper starts async timer. | homeassistant/components/homekit/accessories.py | wrapper | ellsclytn/home-assistant | 0 | python | @wraps(func)
def wrapper(self, *args):
debounce_params = self.debounce.pop(func.__name__, None)
if debounce_params:
debounce_params[0]()
remove_listener = track_point_in_utc_time(self.hass, partial(call_later_listener, self), (dt_util.utcnow() + timedelta(seconds=DEBOUNCE_TIMEOUT)))
self.debounce[func.__name__] = (remove_listener, *args)
logger.debug('%s: Start %s timeout', self.entity_id, func.__name__.replace('set_', )) | @wraps(func)
def wrapper(self, *args):
debounce_params = self.debounce.pop(func.__name__, None)
if debounce_params:
debounce_params[0]()
remove_listener = track_point_in_utc_time(self.hass, partial(call_later_listener, self), (dt_util.utcnow() + timedelta(seconds=DEBOUNCE_TIMEOUT)))
self.debounce[func.__name__] = (remove_listener, *args)
logger.debug('%s: Start %s timeout', self.entity_id, func.__name__.replace('set_', ))<|docstring|>Wrapper starts async timer.<|endoftext|> |
27dc5ad998c7b411c16f952e6feb93d7fba869c61b81ccd3a6c2b38eb9fae159 | def __init__(self, hass, driver, name, entity_id, aid, config, category=CATEGORY_OTHER):
'Initialize a Accessory object.'
super().__init__(driver, name, aid=aid)
model = split_entity_id(entity_id)[0].replace('_', ' ').title()
self.set_info_service(firmware_revision=__version__, manufacturer=MANUFACTURER, model=model, serial_number=entity_id)
self.category = category
self.config = config
self.entity_id = entity_id
self.hass = hass
self.debounce = {} | Initialize a Accessory object. | homeassistant/components/homekit/accessories.py | __init__ | ellsclytn/home-assistant | 0 | python | def __init__(self, hass, driver, name, entity_id, aid, config, category=CATEGORY_OTHER):
super().__init__(driver, name, aid=aid)
model = split_entity_id(entity_id)[0].replace('_', ' ').title()
self.set_info_service(firmware_revision=__version__, manufacturer=MANUFACTURER, model=model, serial_number=entity_id)
self.category = category
self.config = config
self.entity_id = entity_id
self.hass = hass
self.debounce = {} | def __init__(self, hass, driver, name, entity_id, aid, config, category=CATEGORY_OTHER):
super().__init__(driver, name, aid=aid)
model = split_entity_id(entity_id)[0].replace('_', ' ').title()
self.set_info_service(firmware_revision=__version__, manufacturer=MANUFACTURER, model=model, serial_number=entity_id)
self.category = category
self.config = config
self.entity_id = entity_id
self.hass = hass
self.debounce = {}<|docstring|>Initialize a Accessory object.<|endoftext|> |
d017fed2018cf8f6b20a861835cacb617112b5b70d038c66f5d8c947a4def09c | async def run(self):
'Method called by accessory after driver is started.\n\n Run inside the HAP-python event loop.\n '
state = self.hass.states.get(self.entity_id)
self.hass.add_job(self.update_state_callback, None, None, state)
async_track_state_change(self.hass, self.entity_id, self.update_state_callback) | Method called by accessory after driver is started.
Run inside the HAP-python event loop. | homeassistant/components/homekit/accessories.py | run | ellsclytn/home-assistant | 0 | python | async def run(self):
'Method called by accessory after driver is started.\n\n Run inside the HAP-python event loop.\n '
state = self.hass.states.get(self.entity_id)
self.hass.add_job(self.update_state_callback, None, None, state)
async_track_state_change(self.hass, self.entity_id, self.update_state_callback) | async def run(self):
'Method called by accessory after driver is started.\n\n Run inside the HAP-python event loop.\n '
state = self.hass.states.get(self.entity_id)
self.hass.add_job(self.update_state_callback, None, None, state)
async_track_state_change(self.hass, self.entity_id, self.update_state_callback)<|docstring|>Method called by accessory after driver is started.
Run inside the HAP-python event loop.<|endoftext|> |
5206ec7f6ef4475a6445e3773e31172a3db0902c91b7b2102b2af99405c19554 | @ha_callback
def update_state_callback(self, entity_id=None, old_state=None, new_state=None):
'Callback from state change listener.'
_LOGGER.debug('New_state: %s', new_state)
if (new_state is None):
return
self.hass.async_add_job(self.update_state, new_state) | Callback from state change listener. | homeassistant/components/homekit/accessories.py | update_state_callback | ellsclytn/home-assistant | 0 | python | @ha_callback
def update_state_callback(self, entity_id=None, old_state=None, new_state=None):
_LOGGER.debug('New_state: %s', new_state)
if (new_state is None):
return
self.hass.async_add_job(self.update_state, new_state) | @ha_callback
def update_state_callback(self, entity_id=None, old_state=None, new_state=None):
_LOGGER.debug('New_state: %s', new_state)
if (new_state is None):
return
self.hass.async_add_job(self.update_state, new_state)<|docstring|>Callback from state change listener.<|endoftext|> |
bce0ddc35f700ff7b1d55480e931900cc9c02dce11bcf7d7d9e25a7ffa83b9af | def update_state(self, new_state):
'Method called on state change to update HomeKit value.\n\n Overridden by accessory types.\n '
raise NotImplementedError() | Method called on state change to update HomeKit value.
Overridden by accessory types. | homeassistant/components/homekit/accessories.py | update_state | ellsclytn/home-assistant | 0 | python | def update_state(self, new_state):
'Method called on state change to update HomeKit value.\n\n Overridden by accessory types.\n '
raise NotImplementedError() | def update_state(self, new_state):
'Method called on state change to update HomeKit value.\n\n Overridden by accessory types.\n '
raise NotImplementedError()<|docstring|>Method called on state change to update HomeKit value.
Overridden by accessory types.<|endoftext|> |
208c83e858615b7732439c27315501406b84981670fb7d18e468ac6275b569f8 | def __init__(self, hass, driver, name=BRIDGE_NAME):
'Initialize a Bridge object.'
super().__init__(driver, name)
self.set_info_service(firmware_revision=__version__, manufacturer=MANUFACTURER, model=BRIDGE_MODEL, serial_number=BRIDGE_SERIAL_NUMBER)
self.hass = hass | Initialize a Bridge object. | homeassistant/components/homekit/accessories.py | __init__ | ellsclytn/home-assistant | 0 | python | def __init__(self, hass, driver, name=BRIDGE_NAME):
super().__init__(driver, name)
self.set_info_service(firmware_revision=__version__, manufacturer=MANUFACTURER, model=BRIDGE_MODEL, serial_number=BRIDGE_SERIAL_NUMBER)
self.hass = hass | def __init__(self, hass, driver, name=BRIDGE_NAME):
super().__init__(driver, name)
self.set_info_service(firmware_revision=__version__, manufacturer=MANUFACTURER, model=BRIDGE_MODEL, serial_number=BRIDGE_SERIAL_NUMBER)
self.hass = hass<|docstring|>Initialize a Bridge object.<|endoftext|> |
97b4c4d7d22ce779a21ebb2d4539cfcb9f51afab13712bc1dfaef633280b4712 | def setup_message(self):
'Prevent print of pyhap setup message to terminal.'
pass | Prevent print of pyhap setup message to terminal. | homeassistant/components/homekit/accessories.py | setup_message | ellsclytn/home-assistant | 0 | python | def setup_message(self):
pass | def setup_message(self):
pass<|docstring|>Prevent print of pyhap setup message to terminal.<|endoftext|> |
8f3552e6cde56b40f055e704cf3f6cdcd91ba88f52a1aac7b947050fff864f81 | def __init__(self, hass, **kwargs):
'Initialize a AccessoryDriver object.'
super().__init__(**kwargs)
self.hass = hass | Initialize a AccessoryDriver object. | homeassistant/components/homekit/accessories.py | __init__ | ellsclytn/home-assistant | 0 | python | def __init__(self, hass, **kwargs):
super().__init__(**kwargs)
self.hass = hass | def __init__(self, hass, **kwargs):
super().__init__(**kwargs)
self.hass = hass<|docstring|>Initialize a AccessoryDriver object.<|endoftext|> |
a84f79174930d627b987ad80582999f6d9ef5c3dc51f632eb4bf1d908f7f3b23 | def pair(self, client_uuid, client_public):
'Override super function to dismiss setup message if paired.'
success = super().pair(client_uuid, client_public)
if success:
dismiss_setup_message(self.hass)
return success | Override super function to dismiss setup message if paired. | homeassistant/components/homekit/accessories.py | pair | ellsclytn/home-assistant | 0 | python | def pair(self, client_uuid, client_public):
success = super().pair(client_uuid, client_public)
if success:
dismiss_setup_message(self.hass)
return success | def pair(self, client_uuid, client_public):
success = super().pair(client_uuid, client_public)
if success:
dismiss_setup_message(self.hass)
return success<|docstring|>Override super function to dismiss setup message if paired.<|endoftext|> |
e8611daa867adc70fd29108886708c881911433dcd51fc470a3390c9e93671b5 | def unpair(self, client_uuid):
'Override super function to show setup message if unpaired.'
super().unpair(client_uuid)
show_setup_message(self.hass, self.state.pincode) | Override super function to show setup message if unpaired. | homeassistant/components/homekit/accessories.py | unpair | ellsclytn/home-assistant | 0 | python | def unpair(self, client_uuid):
super().unpair(client_uuid)
show_setup_message(self.hass, self.state.pincode) | def unpair(self, client_uuid):
super().unpair(client_uuid)
show_setup_message(self.hass, self.state.pincode)<|docstring|>Override super function to show setup message if unpaired.<|endoftext|> |
6b8667ce0beee80458a2de6d576ce233f99f2adb1e4c7a03c4814c8aa40284a8 | def cumulative_sum_minus_last(l, offset=0):
'Returns cumulative sums for set of counts, removing last entry.\n\n Returns the cumulative sums for a set of counts with the first returned value\n starting at 0. I.e [3,2,4] -> [0, 3, 5]. Note last sum element 9 is missing.\n Useful for reindexing\n\n Parameters\n ----------\n l: list\n List of integers. Typically small counts.\n '
return (np.delete(np.insert(np.cumsum(l), 0, 0), (- 1)) + offset) | Returns cumulative sums for set of counts, removing last entry.
Returns the cumulative sums for a set of counts with the first returned value
starting at 0. I.e [3,2,4] -> [0, 3, 5]. Note last sum element 9 is missing.
Useful for reindexing
Parameters
----------
l: list
List of integers. Typically small counts. | dcCustom/feat/mol_graphs.py | cumulative_sum_minus_last | simonfqy/DTI_prediction | 31 | python | def cumulative_sum_minus_last(l, offset=0):
'Returns cumulative sums for set of counts, removing last entry.\n\n Returns the cumulative sums for a set of counts with the first returned value\n starting at 0. I.e [3,2,4] -> [0, 3, 5]. Note last sum element 9 is missing.\n Useful for reindexing\n\n Parameters\n ----------\n l: list\n List of integers. Typically small counts.\n '
return (np.delete(np.insert(np.cumsum(l), 0, 0), (- 1)) + offset) | def cumulative_sum_minus_last(l, offset=0):
'Returns cumulative sums for set of counts, removing last entry.\n\n Returns the cumulative sums for a set of counts with the first returned value\n starting at 0. I.e [3,2,4] -> [0, 3, 5]. Note last sum element 9 is missing.\n Useful for reindexing\n\n Parameters\n ----------\n l: list\n List of integers. Typically small counts.\n '
return (np.delete(np.insert(np.cumsum(l), 0, 0), (- 1)) + offset)<|docstring|>Returns cumulative sums for set of counts, removing last entry.
Returns the cumulative sums for a set of counts with the first returned value
starting at 0. I.e [3,2,4] -> [0, 3, 5]. Note last sum element 9 is missing.
Useful for reindexing
Parameters
----------
l: list
List of integers. Typically small counts.<|endoftext|> |
ec02e999486e876c26b5dee085e80527a55df56300eebb6aa2155448ed0a55d9 | def cumulative_sum(l, offset=0):
'Returns cumulative sums for set of counts.\n\n Returns the cumulative sums for a set of counts with the first returned value\n starting at 0. I.e [3,2,4] -> [0, 3, 5, 9]. Keeps final sum for searching. \n Useful for reindexing.\n\n Parameters\n ----------\n l: list\n List of integers. Typically small counts.\n '
return (np.insert(np.cumsum(l), 0, 0) + offset) | Returns cumulative sums for set of counts.
Returns the cumulative sums for a set of counts with the first returned value
starting at 0. I.e [3,2,4] -> [0, 3, 5, 9]. Keeps final sum for searching.
Useful for reindexing.
Parameters
----------
l: list
List of integers. Typically small counts. | dcCustom/feat/mol_graphs.py | cumulative_sum | simonfqy/DTI_prediction | 31 | python | def cumulative_sum(l, offset=0):
'Returns cumulative sums for set of counts.\n\n Returns the cumulative sums for a set of counts with the first returned value\n starting at 0. I.e [3,2,4] -> [0, 3, 5, 9]. Keeps final sum for searching. \n Useful for reindexing.\n\n Parameters\n ----------\n l: list\n List of integers. Typically small counts.\n '
return (np.insert(np.cumsum(l), 0, 0) + offset) | def cumulative_sum(l, offset=0):
'Returns cumulative sums for set of counts.\n\n Returns the cumulative sums for a set of counts with the first returned value\n starting at 0. I.e [3,2,4] -> [0, 3, 5, 9]. Keeps final sum for searching. \n Useful for reindexing.\n\n Parameters\n ----------\n l: list\n List of integers. Typically small counts.\n '
return (np.insert(np.cumsum(l), 0, 0) + offset)<|docstring|>Returns cumulative sums for set of counts.
Returns the cumulative sums for a set of counts with the first returned value
starting at 0. I.e [3,2,4] -> [0, 3, 5, 9]. Keeps final sum for searching.
Useful for reindexing.
Parameters
----------
l: list
List of integers. Typically small counts.<|endoftext|> |
c070946fbdcdd7153a52e8c66481d8edb3c48e9eff35ccceff983cd49fda461b | def __init__(self, atom_features, adj_list, smiles=None, max_deg=10, min_deg=0):
'\n Parameters\n ----------\n atom_features: np.ndarray\n Has shape (n_atoms, n_feat)\n canon_adj_list: list\n List of length n_atoms, with neighor indices of each atom.\n max_deg: int, optional\n Maximum degree of any atom.\n min_deg: int, optional\n Minimum degree of any atom.\n '
self.atom_features = atom_features
(self.n_atoms, self.n_feat) = atom_features.shape
self.deg_list = np.array([len(nbrs) for nbrs in adj_list], dtype=np.int32)
self.canon_adj_list = adj_list
self.deg_adj_lists = []
self.deg_slice = []
self.smiles = smiles
self.max_deg = max_deg
self.min_deg = min_deg
self.membership = (self.get_num_atoms() * [0])
self._deg_sort()
self.deg_id_list = (np.array(self.deg_list) - min_deg)
deg_size = [self.get_num_atoms_with_deg(deg) for deg in range(self.min_deg, (self.max_deg + 1))]
self.deg_start = cumulative_sum(deg_size)
deg_block_indices = [(i - self.deg_start[self.deg_list[i]]) for i in range(self.n_atoms)]
self.deg_block_indices = np.array(deg_block_indices) | Parameters
----------
atom_features: np.ndarray
Has shape (n_atoms, n_feat)
canon_adj_list: list
List of length n_atoms, with neighor indices of each atom.
max_deg: int, optional
Maximum degree of any atom.
min_deg: int, optional
Minimum degree of any atom. | dcCustom/feat/mol_graphs.py | __init__ | simonfqy/DTI_prediction | 31 | python | def __init__(self, atom_features, adj_list, smiles=None, max_deg=10, min_deg=0):
'\n Parameters\n ----------\n atom_features: np.ndarray\n Has shape (n_atoms, n_feat)\n canon_adj_list: list\n List of length n_atoms, with neighor indices of each atom.\n max_deg: int, optional\n Maximum degree of any atom.\n min_deg: int, optional\n Minimum degree of any atom.\n '
self.atom_features = atom_features
(self.n_atoms, self.n_feat) = atom_features.shape
self.deg_list = np.array([len(nbrs) for nbrs in adj_list], dtype=np.int32)
self.canon_adj_list = adj_list
self.deg_adj_lists = []
self.deg_slice = []
self.smiles = smiles
self.max_deg = max_deg
self.min_deg = min_deg
self.membership = (self.get_num_atoms() * [0])
self._deg_sort()
self.deg_id_list = (np.array(self.deg_list) - min_deg)
deg_size = [self.get_num_atoms_with_deg(deg) for deg in range(self.min_deg, (self.max_deg + 1))]
self.deg_start = cumulative_sum(deg_size)
deg_block_indices = [(i - self.deg_start[self.deg_list[i]]) for i in range(self.n_atoms)]
self.deg_block_indices = np.array(deg_block_indices) | def __init__(self, atom_features, adj_list, smiles=None, max_deg=10, min_deg=0):
'\n Parameters\n ----------\n atom_features: np.ndarray\n Has shape (n_atoms, n_feat)\n canon_adj_list: list\n List of length n_atoms, with neighor indices of each atom.\n max_deg: int, optional\n Maximum degree of any atom.\n min_deg: int, optional\n Minimum degree of any atom.\n '
self.atom_features = atom_features
(self.n_atoms, self.n_feat) = atom_features.shape
self.deg_list = np.array([len(nbrs) for nbrs in adj_list], dtype=np.int32)
self.canon_adj_list = adj_list
self.deg_adj_lists = []
self.deg_slice = []
self.smiles = smiles
self.max_deg = max_deg
self.min_deg = min_deg
self.membership = (self.get_num_atoms() * [0])
self._deg_sort()
self.deg_id_list = (np.array(self.deg_list) - min_deg)
deg_size = [self.get_num_atoms_with_deg(deg) for deg in range(self.min_deg, (self.max_deg + 1))]
self.deg_start = cumulative_sum(deg_size)
deg_block_indices = [(i - self.deg_start[self.deg_list[i]]) for i in range(self.n_atoms)]
self.deg_block_indices = np.array(deg_block_indices)<|docstring|>Parameters
----------
atom_features: np.ndarray
Has shape (n_atoms, n_feat)
canon_adj_list: list
List of length n_atoms, with neighor indices of each atom.
max_deg: int, optional
Maximum degree of any atom.
min_deg: int, optional
Minimum degree of any atom.<|endoftext|> |
d5cd78eff2bf038a17034f881190013fd046f6cb159a8e7634c1fa84a7c85902 | def get_atoms_with_deg(self, deg):
'Retrieves atom_features with the specific degree'
start_ind = self.deg_slice[((deg - self.min_deg), 0)]
size = self.deg_slice[((deg - self.min_deg), 1)]
return self.atom_features[(start_ind:(start_ind + size), :)] | Retrieves atom_features with the specific degree | dcCustom/feat/mol_graphs.py | get_atoms_with_deg | simonfqy/DTI_prediction | 31 | python | def get_atoms_with_deg(self, deg):
start_ind = self.deg_slice[((deg - self.min_deg), 0)]
size = self.deg_slice[((deg - self.min_deg), 1)]
return self.atom_features[(start_ind:(start_ind + size), :)] | def get_atoms_with_deg(self, deg):
start_ind = self.deg_slice[((deg - self.min_deg), 0)]
size = self.deg_slice[((deg - self.min_deg), 1)]
return self.atom_features[(start_ind:(start_ind + size), :)]<|docstring|>Retrieves atom_features with the specific degree<|endoftext|> |
78741cae9dd904c8b7e7f08917e8c291c414a087b19b63b099731b47736c0b39 | def get_num_atoms_with_deg(self, deg):
'Returns the number of atoms with the given degree'
return self.deg_slice[((deg - self.min_deg), 1)] | Returns the number of atoms with the given degree | dcCustom/feat/mol_graphs.py | get_num_atoms_with_deg | simonfqy/DTI_prediction | 31 | python | def get_num_atoms_with_deg(self, deg):
return self.deg_slice[((deg - self.min_deg), 1)] | def get_num_atoms_with_deg(self, deg):
return self.deg_slice[((deg - self.min_deg), 1)]<|docstring|>Returns the number of atoms with the given degree<|endoftext|> |
a0d09c51a034ec95651671807a769a64461a9df13537a3430e7f82c47104f4b5 | def _deg_sort(self):
'Sorts atoms by degree and reorders internal data structures.\n\n Sort the order of the atom_features by degree, maintaining original order\n whenever two atom_features have the same degree. \n '
old_ind = range(self.get_num_atoms())
deg_list = self.deg_list
new_ind = list(np.lexsort((old_ind, deg_list)))
num_atoms = self.get_num_atoms()
self.atom_features = self.atom_features[(new_ind, :)]
self.deg_list = [self.deg_list[i] for i in new_ind]
self.membership = [self.membership[i] for i in new_ind]
old_to_new = dict(zip(new_ind, old_ind))
self.canon_adj_list = [self.canon_adj_list[i] for i in new_ind]
self.canon_adj_list = [[old_to_new[k] for k in self.canon_adj_list[i]] for i in range(len(new_ind))]
deg_array = np.array(self.deg_list)
self.deg_adj_lists = (((self.max_deg + 1) - self.min_deg) * [0])
for deg in range(self.min_deg, (self.max_deg + 1)):
rng = np.array(range(num_atoms))
indices = rng[(deg_array == deg)]
to_cat = [self.canon_adj_list[i] for i in indices]
if (len(to_cat) > 0):
adj_list = np.vstack([self.canon_adj_list[i] for i in indices])
self.deg_adj_lists[(deg - self.min_deg)] = adj_list
else:
self.deg_adj_lists[(deg - self.min_deg)] = np.zeros([0, deg], dtype=np.int32)
deg_slice = np.zeros([((self.max_deg + 1) - self.min_deg), 2], dtype=np.int32)
for deg in range(self.min_deg, (self.max_deg + 1)):
if (deg == 0):
deg_size = np.sum((deg_array == deg))
else:
deg_size = self.deg_adj_lists[(deg - self.min_deg)].shape[0]
deg_slice[((deg - self.min_deg), 1)] = deg_size
if (deg > self.min_deg):
deg_slice[((deg - self.min_deg), 0)] = (deg_slice[(((deg - self.min_deg) - 1), 0)] + deg_slice[(((deg - self.min_deg) - 1), 1)])
deg_slice[(:, 0)] *= (deg_slice[(:, 1)] != 0)
self.deg_slice = deg_slice | Sorts atoms by degree and reorders internal data structures.
Sort the order of the atom_features by degree, maintaining original order
whenever two atom_features have the same degree. | dcCustom/feat/mol_graphs.py | _deg_sort | simonfqy/DTI_prediction | 31 | python | def _deg_sort(self):
'Sorts atoms by degree and reorders internal data structures.\n\n Sort the order of the atom_features by degree, maintaining original order\n whenever two atom_features have the same degree. \n '
old_ind = range(self.get_num_atoms())
deg_list = self.deg_list
new_ind = list(np.lexsort((old_ind, deg_list)))
num_atoms = self.get_num_atoms()
self.atom_features = self.atom_features[(new_ind, :)]
self.deg_list = [self.deg_list[i] for i in new_ind]
self.membership = [self.membership[i] for i in new_ind]
old_to_new = dict(zip(new_ind, old_ind))
self.canon_adj_list = [self.canon_adj_list[i] for i in new_ind]
self.canon_adj_list = [[old_to_new[k] for k in self.canon_adj_list[i]] for i in range(len(new_ind))]
deg_array = np.array(self.deg_list)
self.deg_adj_lists = (((self.max_deg + 1) - self.min_deg) * [0])
for deg in range(self.min_deg, (self.max_deg + 1)):
rng = np.array(range(num_atoms))
indices = rng[(deg_array == deg)]
to_cat = [self.canon_adj_list[i] for i in indices]
if (len(to_cat) > 0):
adj_list = np.vstack([self.canon_adj_list[i] for i in indices])
self.deg_adj_lists[(deg - self.min_deg)] = adj_list
else:
self.deg_adj_lists[(deg - self.min_deg)] = np.zeros([0, deg], dtype=np.int32)
deg_slice = np.zeros([((self.max_deg + 1) - self.min_deg), 2], dtype=np.int32)
for deg in range(self.min_deg, (self.max_deg + 1)):
if (deg == 0):
deg_size = np.sum((deg_array == deg))
else:
deg_size = self.deg_adj_lists[(deg - self.min_deg)].shape[0]
deg_slice[((deg - self.min_deg), 1)] = deg_size
if (deg > self.min_deg):
deg_slice[((deg - self.min_deg), 0)] = (deg_slice[(((deg - self.min_deg) - 1), 0)] + deg_slice[(((deg - self.min_deg) - 1), 1)])
deg_slice[(:, 0)] *= (deg_slice[(:, 1)] != 0)
self.deg_slice = deg_slice | def _deg_sort(self):
'Sorts atoms by degree and reorders internal data structures.\n\n Sort the order of the atom_features by degree, maintaining original order\n whenever two atom_features have the same degree. \n '
old_ind = range(self.get_num_atoms())
deg_list = self.deg_list
new_ind = list(np.lexsort((old_ind, deg_list)))
num_atoms = self.get_num_atoms()
self.atom_features = self.atom_features[(new_ind, :)]
self.deg_list = [self.deg_list[i] for i in new_ind]
self.membership = [self.membership[i] for i in new_ind]
old_to_new = dict(zip(new_ind, old_ind))
self.canon_adj_list = [self.canon_adj_list[i] for i in new_ind]
self.canon_adj_list = [[old_to_new[k] for k in self.canon_adj_list[i]] for i in range(len(new_ind))]
deg_array = np.array(self.deg_list)
self.deg_adj_lists = (((self.max_deg + 1) - self.min_deg) * [0])
for deg in range(self.min_deg, (self.max_deg + 1)):
rng = np.array(range(num_atoms))
indices = rng[(deg_array == deg)]
to_cat = [self.canon_adj_list[i] for i in indices]
if (len(to_cat) > 0):
adj_list = np.vstack([self.canon_adj_list[i] for i in indices])
self.deg_adj_lists[(deg - self.min_deg)] = adj_list
else:
self.deg_adj_lists[(deg - self.min_deg)] = np.zeros([0, deg], dtype=np.int32)
deg_slice = np.zeros([((self.max_deg + 1) - self.min_deg), 2], dtype=np.int32)
for deg in range(self.min_deg, (self.max_deg + 1)):
if (deg == 0):
deg_size = np.sum((deg_array == deg))
else:
deg_size = self.deg_adj_lists[(deg - self.min_deg)].shape[0]
deg_slice[((deg - self.min_deg), 1)] = deg_size
if (deg > self.min_deg):
deg_slice[((deg - self.min_deg), 0)] = (deg_slice[(((deg - self.min_deg) - 1), 0)] + deg_slice[(((deg - self.min_deg) - 1), 1)])
deg_slice[(:, 0)] *= (deg_slice[(:, 1)] != 0)
self.deg_slice = deg_slice<|docstring|>Sorts atoms by degree and reorders internal data structures.
Sort the order of the atom_features by degree, maintaining original order
whenever two atom_features have the same degree.<|endoftext|> |
ef00b610a2e35c1a6d1787628085c754e9197546a3552fbf547d7f2280a0bb6c | def get_atom_features(self):
'Returns canonicalized version of atom features.\n\n Features are sorted by atom degree, with original order maintained when\n degrees are same.\n '
return self.atom_features | Returns canonicalized version of atom features.
Features are sorted by atom degree, with original order maintained when
degrees are same. | dcCustom/feat/mol_graphs.py | get_atom_features | simonfqy/DTI_prediction | 31 | python | def get_atom_features(self):
'Returns canonicalized version of atom features.\n\n Features are sorted by atom degree, with original order maintained when\n degrees are same.\n '
return self.atom_features | def get_atom_features(self):
'Returns canonicalized version of atom features.\n\n Features are sorted by atom degree, with original order maintained when\n degrees are same.\n '
return self.atom_features<|docstring|>Returns canonicalized version of atom features.
Features are sorted by atom degree, with original order maintained when
degrees are same.<|endoftext|> |
cb5dc576f0f84cd602e50eb972e03294bfbd058c5e2e994a242bdbece0112771 | def get_adjacency_list(self):
'Returns a canonicalized adjacency list.\n\n Canonicalized means that the atoms are re-ordered by degree.\n\n Returns\n -------\n list\n Canonicalized form of adjacency list.\n '
return self.canon_adj_list | Returns a canonicalized adjacency list.
Canonicalized means that the atoms are re-ordered by degree.
Returns
-------
list
Canonicalized form of adjacency list. | dcCustom/feat/mol_graphs.py | get_adjacency_list | simonfqy/DTI_prediction | 31 | python | def get_adjacency_list(self):
'Returns a canonicalized adjacency list.\n\n Canonicalized means that the atoms are re-ordered by degree.\n\n Returns\n -------\n list\n Canonicalized form of adjacency list.\n '
return self.canon_adj_list | def get_adjacency_list(self):
'Returns a canonicalized adjacency list.\n\n Canonicalized means that the atoms are re-ordered by degree.\n\n Returns\n -------\n list\n Canonicalized form of adjacency list.\n '
return self.canon_adj_list<|docstring|>Returns a canonicalized adjacency list.
Canonicalized means that the atoms are re-ordered by degree.
Returns
-------
list
Canonicalized form of adjacency list.<|endoftext|> |
189f343f5711a1beaaa809d3280a0bba710a7f8cf6b707f843c15f5e11cc8fee | def get_deg_adjacency_lists(self):
'Returns adjacency lists grouped by atom degree.\n\n Returns\n -------\n list\n Has length (max_deg+1-min_deg). The element at position deg is\n itself a list of the neighbor-lists for atoms with degree deg.\n '
return self.deg_adj_lists | Returns adjacency lists grouped by atom degree.
Returns
-------
list
Has length (max_deg+1-min_deg). The element at position deg is
itself a list of the neighbor-lists for atoms with degree deg. | dcCustom/feat/mol_graphs.py | get_deg_adjacency_lists | simonfqy/DTI_prediction | 31 | python | def get_deg_adjacency_lists(self):
'Returns adjacency lists grouped by atom degree.\n\n Returns\n -------\n list\n Has length (max_deg+1-min_deg). The element at position deg is\n itself a list of the neighbor-lists for atoms with degree deg.\n '
return self.deg_adj_lists | def get_deg_adjacency_lists(self):
'Returns adjacency lists grouped by atom degree.\n\n Returns\n -------\n list\n Has length (max_deg+1-min_deg). The element at position deg is\n itself a list of the neighbor-lists for atoms with degree deg.\n '
return self.deg_adj_lists<|docstring|>Returns adjacency lists grouped by atom degree.
Returns
-------
list
Has length (max_deg+1-min_deg). The element at position deg is
itself a list of the neighbor-lists for atoms with degree deg.<|endoftext|> |
7f5e166f22f7912dd53667a2f51e496b6caa52c23779dbc1f869d17f30c58a8f | def get_deg_slice(self):
"Returns degree-slice tensor.\n \n The deg_slice tensor allows indexing into a flattened version of the\n molecule's atoms. Assume atoms are sorted in order of degree. Then\n deg_slice[deg][0] is the starting position for atoms of degree deg in\n flattened list, and deg_slice[deg][1] is the number of atoms with degree deg.\n\n Note deg_slice has shape (max_deg+1-min_deg, 2).\n\n Returns\n -------\n deg_slice: np.ndarray \n Shape (max_deg+1-min_deg, 2)\n "
return self.deg_slice | Returns degree-slice tensor.
The deg_slice tensor allows indexing into a flattened version of the
molecule's atoms. Assume atoms are sorted in order of degree. Then
deg_slice[deg][0] is the starting position for atoms of degree deg in
flattened list, and deg_slice[deg][1] is the number of atoms with degree deg.
Note deg_slice has shape (max_deg+1-min_deg, 2).
Returns
-------
deg_slice: np.ndarray
Shape (max_deg+1-min_deg, 2) | dcCustom/feat/mol_graphs.py | get_deg_slice | simonfqy/DTI_prediction | 31 | python | def get_deg_slice(self):
"Returns degree-slice tensor.\n \n The deg_slice tensor allows indexing into a flattened version of the\n molecule's atoms. Assume atoms are sorted in order of degree. Then\n deg_slice[deg][0] is the starting position for atoms of degree deg in\n flattened list, and deg_slice[deg][1] is the number of atoms with degree deg.\n\n Note deg_slice has shape (max_deg+1-min_deg, 2).\n\n Returns\n -------\n deg_slice: np.ndarray \n Shape (max_deg+1-min_deg, 2)\n "
return self.deg_slice | def get_deg_slice(self):
"Returns degree-slice tensor.\n \n The deg_slice tensor allows indexing into a flattened version of the\n molecule's atoms. Assume atoms are sorted in order of degree. Then\n deg_slice[deg][0] is the starting position for atoms of degree deg in\n flattened list, and deg_slice[deg][1] is the number of atoms with degree deg.\n\n Note deg_slice has shape (max_deg+1-min_deg, 2).\n\n Returns\n -------\n deg_slice: np.ndarray \n Shape (max_deg+1-min_deg, 2)\n "
return self.deg_slice<|docstring|>Returns degree-slice tensor.
The deg_slice tensor allows indexing into a flattened version of the
molecule's atoms. Assume atoms are sorted in order of degree. Then
deg_slice[deg][0] is the starting position for atoms of degree deg in
flattened list, and deg_slice[deg][1] is the number of atoms with degree deg.
Note deg_slice has shape (max_deg+1-min_deg, 2).
Returns
-------
deg_slice: np.ndarray
Shape (max_deg+1-min_deg, 2)<|endoftext|> |
7956509d9c996527bc0e9dac0764163fc73d3971c1455350fe3e6d1349135376 | @staticmethod
def get_null_mol(n_feat, max_deg=10, min_deg=0):
'Constructs a null molecules\n\n Get one molecule with one atom of each degree, with all the atoms \n connected to themselves, and containing n_feat features.\n \n Parameters \n ----------\n n_feat : int\n number of features for the nodes in the null molecule\n '
atom_features = np.random.uniform(0, 1, [((max_deg + 1) - min_deg), n_feat])
canon_adj_list = [(deg * [(deg - min_deg)]) for deg in range(min_deg, (max_deg + 1))]
return ConvMol(atom_features, canon_adj_list) | Constructs a null molecules
Get one molecule with one atom of each degree, with all the atoms
connected to themselves, and containing n_feat features.
Parameters
----------
n_feat : int
number of features for the nodes in the null molecule | dcCustom/feat/mol_graphs.py | get_null_mol | simonfqy/DTI_prediction | 31 | python | @staticmethod
def get_null_mol(n_feat, max_deg=10, min_deg=0):
'Constructs a null molecules\n\n Get one molecule with one atom of each degree, with all the atoms \n connected to themselves, and containing n_feat features.\n \n Parameters \n ----------\n n_feat : int\n number of features for the nodes in the null molecule\n '
atom_features = np.random.uniform(0, 1, [((max_deg + 1) - min_deg), n_feat])
canon_adj_list = [(deg * [(deg - min_deg)]) for deg in range(min_deg, (max_deg + 1))]
return ConvMol(atom_features, canon_adj_list) | @staticmethod
def get_null_mol(n_feat, max_deg=10, min_deg=0):
'Constructs a null molecules\n\n Get one molecule with one atom of each degree, with all the atoms \n connected to themselves, and containing n_feat features.\n \n Parameters \n ----------\n n_feat : int\n number of features for the nodes in the null molecule\n '
atom_features = np.random.uniform(0, 1, [((max_deg + 1) - min_deg), n_feat])
canon_adj_list = [(deg * [(deg - min_deg)]) for deg in range(min_deg, (max_deg + 1))]
return ConvMol(atom_features, canon_adj_list)<|docstring|>Constructs a null molecules
Get one molecule with one atom of each degree, with all the atoms
connected to themselves, and containing n_feat features.
Parameters
----------
n_feat : int
number of features for the nodes in the null molecule<|endoftext|> |
ee42d244ca5de7ca0be930f713b549c3f4799e9095cbbce8a49a47907e57d0b9 | @staticmethod
def agglomerate_mols(mols, max_deg=10, min_deg=0):
"Concatenates list of ConvMol's into one mol object that can be used to feed \n into tensorflow placeholders. The indexing of the molecules are preserved during the\n combination, but the indexing of the atoms are greatly changed.\n \n Parameters \n ----\n mols: list\n ConvMol objects to be combined into one molecule."
num_mols = len(mols)
atoms_per_mol = [mol.get_num_atoms() for mol in mols]
smiles_per_mol = [mol.get_smiles() for mol in mols]
atoms_by_deg = [mol.get_atoms_with_deg(deg) for deg in range(min_deg, (max_deg + 1)) for mol in mols]
all_atoms = np.vstack(atoms_by_deg)
mol_deg_sz = [[mol.get_num_atoms_with_deg(deg) for mol in mols] for deg in range(min_deg, (max_deg + 1))]
deg_sizes = list(map(np.sum, mol_deg_sz))
deg_start = cumulative_sum_minus_last(deg_sizes)
deg_slice = np.array(list(zip(deg_start, deg_sizes)))
membership = [k for deg in range(min_deg, (max_deg + 1)) for k in range(num_mols) for i in range(mol_deg_sz[deg][k])]
start_by_deg = np.vstack([cumulative_sum_minus_last(l) for l in mol_deg_sz])
deg_block_indices = [mol.deg_block_indices for mol in mols]
deg_id_lists = [mol.deg_id_list for mol in mols]
start_per_mol = (deg_start[(:, np.newaxis)] + start_by_deg)
def to_final_id(mol_atom_id, mol_id):
deg_id = deg_id_lists[mol_id][mol_atom_id]
return (start_per_mol[(deg_id, mol_id)] + deg_block_indices[mol_id][mol_atom_id])
deg_adj_lists = [np.zeros([deg_sizes[deg], deg], dtype=np.int32) for deg in range(min_deg, (max_deg + 1))]
for deg in range(min_deg, (max_deg + 1)):
row = 0
deg_id = (deg - min_deg)
for mol_id in range(num_mols):
nbr_list = mols[mol_id].deg_adj_lists[deg_id]
for i in range(nbr_list.shape[0]):
for j in range(nbr_list.shape[1]):
deg_adj_lists[deg_id][(row, j)] = to_final_id(nbr_list[(i, j)], mol_id)
row += 1
concat_mol = MultiConvMol(all_atoms, deg_adj_lists, deg_slice, membership, num_mols, smiles_list=smiles_per_mol)
return concat_mol | Concatenates list of ConvMol's into one mol object that can be used to feed
into tensorflow placeholders. The indexing of the molecules are preserved during the
combination, but the indexing of the atoms are greatly changed.
Parameters
----
mols: list
ConvMol objects to be combined into one molecule. | dcCustom/feat/mol_graphs.py | agglomerate_mols | simonfqy/DTI_prediction | 31 | python | @staticmethod
def agglomerate_mols(mols, max_deg=10, min_deg=0):
"Concatenates list of ConvMol's into one mol object that can be used to feed \n into tensorflow placeholders. The indexing of the molecules are preserved during the\n combination, but the indexing of the atoms are greatly changed.\n \n Parameters \n ----\n mols: list\n ConvMol objects to be combined into one molecule."
num_mols = len(mols)
atoms_per_mol = [mol.get_num_atoms() for mol in mols]
smiles_per_mol = [mol.get_smiles() for mol in mols]
atoms_by_deg = [mol.get_atoms_with_deg(deg) for deg in range(min_deg, (max_deg + 1)) for mol in mols]
all_atoms = np.vstack(atoms_by_deg)
mol_deg_sz = [[mol.get_num_atoms_with_deg(deg) for mol in mols] for deg in range(min_deg, (max_deg + 1))]
deg_sizes = list(map(np.sum, mol_deg_sz))
deg_start = cumulative_sum_minus_last(deg_sizes)
deg_slice = np.array(list(zip(deg_start, deg_sizes)))
membership = [k for deg in range(min_deg, (max_deg + 1)) for k in range(num_mols) for i in range(mol_deg_sz[deg][k])]
start_by_deg = np.vstack([cumulative_sum_minus_last(l) for l in mol_deg_sz])
deg_block_indices = [mol.deg_block_indices for mol in mols]
deg_id_lists = [mol.deg_id_list for mol in mols]
start_per_mol = (deg_start[(:, np.newaxis)] + start_by_deg)
def to_final_id(mol_atom_id, mol_id):
deg_id = deg_id_lists[mol_id][mol_atom_id]
return (start_per_mol[(deg_id, mol_id)] + deg_block_indices[mol_id][mol_atom_id])
deg_adj_lists = [np.zeros([deg_sizes[deg], deg], dtype=np.int32) for deg in range(min_deg, (max_deg + 1))]
for deg in range(min_deg, (max_deg + 1)):
row = 0
deg_id = (deg - min_deg)
for mol_id in range(num_mols):
nbr_list = mols[mol_id].deg_adj_lists[deg_id]
for i in range(nbr_list.shape[0]):
for j in range(nbr_list.shape[1]):
deg_adj_lists[deg_id][(row, j)] = to_final_id(nbr_list[(i, j)], mol_id)
row += 1
concat_mol = MultiConvMol(all_atoms, deg_adj_lists, deg_slice, membership, num_mols, smiles_list=smiles_per_mol)
return concat_mol | @staticmethod
def agglomerate_mols(mols, max_deg=10, min_deg=0):
"Concatenates list of ConvMol's into one mol object that can be used to feed \n into tensorflow placeholders. The indexing of the molecules are preserved during the\n combination, but the indexing of the atoms are greatly changed.\n \n Parameters \n ----\n mols: list\n ConvMol objects to be combined into one molecule."
num_mols = len(mols)
atoms_per_mol = [mol.get_num_atoms() for mol in mols]
smiles_per_mol = [mol.get_smiles() for mol in mols]
atoms_by_deg = [mol.get_atoms_with_deg(deg) for deg in range(min_deg, (max_deg + 1)) for mol in mols]
all_atoms = np.vstack(atoms_by_deg)
mol_deg_sz = [[mol.get_num_atoms_with_deg(deg) for mol in mols] for deg in range(min_deg, (max_deg + 1))]
deg_sizes = list(map(np.sum, mol_deg_sz))
deg_start = cumulative_sum_minus_last(deg_sizes)
deg_slice = np.array(list(zip(deg_start, deg_sizes)))
membership = [k for deg in range(min_deg, (max_deg + 1)) for k in range(num_mols) for i in range(mol_deg_sz[deg][k])]
start_by_deg = np.vstack([cumulative_sum_minus_last(l) for l in mol_deg_sz])
deg_block_indices = [mol.deg_block_indices for mol in mols]
deg_id_lists = [mol.deg_id_list for mol in mols]
start_per_mol = (deg_start[(:, np.newaxis)] + start_by_deg)
def to_final_id(mol_atom_id, mol_id):
deg_id = deg_id_lists[mol_id][mol_atom_id]
return (start_per_mol[(deg_id, mol_id)] + deg_block_indices[mol_id][mol_atom_id])
deg_adj_lists = [np.zeros([deg_sizes[deg], deg], dtype=np.int32) for deg in range(min_deg, (max_deg + 1))]
for deg in range(min_deg, (max_deg + 1)):
row = 0
deg_id = (deg - min_deg)
for mol_id in range(num_mols):
nbr_list = mols[mol_id].deg_adj_lists[deg_id]
for i in range(nbr_list.shape[0]):
for j in range(nbr_list.shape[1]):
deg_adj_lists[deg_id][(row, j)] = to_final_id(nbr_list[(i, j)], mol_id)
row += 1
concat_mol = MultiConvMol(all_atoms, deg_adj_lists, deg_slice, membership, num_mols, smiles_list=smiles_per_mol)
return concat_mol<|docstring|>Concatenates list of ConvMol's into one mol object that can be used to feed
into tensorflow placeholders. The indexing of the molecules are preserved during the
combination, but the indexing of the atoms are greatly changed.
Parameters
----
mols: list
ConvMol objects to be combined into one molecule.<|endoftext|> |
2685e744e30a413d93b3d5c2d18ca438fcc7dbb80a0e5ab72e340265f115bbea | @pytest.fixture(scope='session', autouse=True)
def torch_single_threaded():
'Make PyTorch execute code single-threaded.\n\n This allows us to run the test suite with greater across-test parallelism.\n This is faster, since:\n - There are diminishing returns to more threads within a test.\n - Many tests cannot be multi-threaded (e.g. most not using PyTorch training),\n and we have to set between-test parallelism based on peak resource\n consumption of tests to avoid spurious failures.\n '
torch.set_num_threads(1)
torch.set_num_interop_threads(1) | Make PyTorch execute code single-threaded.
This allows us to run the test suite with greater across-test parallelism.
This is faster, since:
- There are diminishing returns to more threads within a test.
- Many tests cannot be multi-threaded (e.g. most not using PyTorch training),
and we have to set between-test parallelism based on peak resource
consumption of tests to avoid spurious failures. | tests/conftest.py | torch_single_threaded | NJFreymuth/imitation | 438 | python | @pytest.fixture(scope='session', autouse=True)
def torch_single_threaded():
'Make PyTorch execute code single-threaded.\n\n This allows us to run the test suite with greater across-test parallelism.\n This is faster, since:\n - There are diminishing returns to more threads within a test.\n - Many tests cannot be multi-threaded (e.g. most not using PyTorch training),\n and we have to set between-test parallelism based on peak resource\n consumption of tests to avoid spurious failures.\n '
torch.set_num_threads(1)
torch.set_num_interop_threads(1) | @pytest.fixture(scope='session', autouse=True)
def torch_single_threaded():
'Make PyTorch execute code single-threaded.\n\n This allows us to run the test suite with greater across-test parallelism.\n This is faster, since:\n - There are diminishing returns to more threads within a test.\n - Many tests cannot be multi-threaded (e.g. most not using PyTorch training),\n and we have to set between-test parallelism based on peak resource\n consumption of tests to avoid spurious failures.\n '
torch.set_num_threads(1)
torch.set_num_interop_threads(1)<|docstring|>Make PyTorch execute code single-threaded.
This allows us to run the test suite with greater across-test parallelism.
This is faster, since:
- There are diminishing returns to more threads within a test.
- Many tests cannot be multi-threaded (e.g. most not using PyTorch training),
and we have to set between-test parallelism based on peak resource
consumption of tests to avoid spurious failures.<|endoftext|> |
c28bdd0dd852bff848f1eeca0235def561cd4e2cd3260d3c85af4d535e687876 | def speech_transcription(input_uri):
'Transcribe speech from a video stored on GCS.'
from google.cloud import videointelligence_v1p1beta1 as videointelligence
video_client = videointelligence.VideoIntelligenceServiceClient()
features = [videointelligence.enums.Feature.SPEECH_TRANSCRIPTION]
config = videointelligence.types.SpeechTranscriptionConfig(language_code='en-US', enable_automatic_punctuation=True)
video_context = videointelligence.types.VideoContext(speech_transcription_config=config)
operation = video_client.annotate_video(input_uri, features=features, video_context=video_context)
print('\nProcessing video for speech transcription.')
result = operation.result(timeout=180)
annotation_results = result.annotation_results[0]
for speech_transcription in annotation_results.speech_transcriptions:
for alternative in speech_transcription.alternatives:
print('Alternative level information:')
print('Transcript: {}'.format(alternative.transcript))
print('Confidence: {}\n'.format(alternative.confidence))
print('Word level information:')
for word_info in alternative.words:
word = word_info.word
start_time = word_info.start_time
end_time = word_info.end_time
print('\t{}s - {}s: {}'.format((start_time.seconds + (start_time.nanos * 1e-09)), (end_time.seconds + (end_time.nanos * 1e-09)), word)) | Transcribe speech from a video stored on GCS. | video/cloud-client/analyze/beta_snippets.py | speech_transcription | namrathaPullalarevu/python-docs-samples | 3 | python | def speech_transcription(input_uri):
from google.cloud import videointelligence_v1p1beta1 as videointelligence
video_client = videointelligence.VideoIntelligenceServiceClient()
features = [videointelligence.enums.Feature.SPEECH_TRANSCRIPTION]
config = videointelligence.types.SpeechTranscriptionConfig(language_code='en-US', enable_automatic_punctuation=True)
video_context = videointelligence.types.VideoContext(speech_transcription_config=config)
operation = video_client.annotate_video(input_uri, features=features, video_context=video_context)
print('\nProcessing video for speech transcription.')
result = operation.result(timeout=180)
annotation_results = result.annotation_results[0]
for speech_transcription in annotation_results.speech_transcriptions:
for alternative in speech_transcription.alternatives:
print('Alternative level information:')
print('Transcript: {}'.format(alternative.transcript))
print('Confidence: {}\n'.format(alternative.confidence))
print('Word level information:')
for word_info in alternative.words:
word = word_info.word
start_time = word_info.start_time
end_time = word_info.end_time
print('\t{}s - {}s: {}'.format((start_time.seconds + (start_time.nanos * 1e-09)), (end_time.seconds + (end_time.nanos * 1e-09)), word)) | def speech_transcription(input_uri):
from google.cloud import videointelligence_v1p1beta1 as videointelligence
video_client = videointelligence.VideoIntelligenceServiceClient()
features = [videointelligence.enums.Feature.SPEECH_TRANSCRIPTION]
config = videointelligence.types.SpeechTranscriptionConfig(language_code='en-US', enable_automatic_punctuation=True)
video_context = videointelligence.types.VideoContext(speech_transcription_config=config)
operation = video_client.annotate_video(input_uri, features=features, video_context=video_context)
print('\nProcessing video for speech transcription.')
result = operation.result(timeout=180)
annotation_results = result.annotation_results[0]
for speech_transcription in annotation_results.speech_transcriptions:
for alternative in speech_transcription.alternatives:
print('Alternative level information:')
print('Transcript: {}'.format(alternative.transcript))
print('Confidence: {}\n'.format(alternative.confidence))
print('Word level information:')
for word_info in alternative.words:
word = word_info.word
start_time = word_info.start_time
end_time = word_info.end_time
print('\t{}s - {}s: {}'.format((start_time.seconds + (start_time.nanos * 1e-09)), (end_time.seconds + (end_time.nanos * 1e-09)), word))<|docstring|>Transcribe speech from a video stored on GCS.<|endoftext|> |
77581943d67c81bb1902f4810e3349cc6f837701ef7243171165f95e1e021d5a | def video_detect_text_gcs(input_uri):
'Detect text in a video stored on GCS.'
from google.cloud import videointelligence_v1p2beta1 as videointelligence
video_client = videointelligence.VideoIntelligenceServiceClient()
features = [videointelligence.enums.Feature.TEXT_DETECTION]
operation = video_client.annotate_video(input_uri=input_uri, features=features)
print('\nProcessing video for text detection.')
result = operation.result(timeout=300)
annotation_result = result.annotation_results[0]
text_annotation = annotation_result.text_annotations[0]
print('\nText: {}'.format(text_annotation.text))
text_segment = text_annotation.segments[0]
start_time = text_segment.segment.start_time_offset
end_time = text_segment.segment.end_time_offset
print('start_time: {}, end_time: {}'.format((start_time.seconds + (start_time.nanos * 1e-09)), (end_time.seconds + (end_time.nanos * 1e-09))))
print('Confidence: {}'.format(text_segment.confidence))
frame = text_segment.frames[0]
time_offset = frame.time_offset
print('Time offset for the first frame: {}'.format((time_offset.seconds + (time_offset.nanos * 1e-09))))
print('Rotated Bounding Box Vertices:')
for vertex in frame.rotated_bounding_box.vertices:
print('\tVertex.x: {}, Vertex.y: {}'.format(vertex.x, vertex.y))
return annotation_result.text_annotations | Detect text in a video stored on GCS. | video/cloud-client/analyze/beta_snippets.py | video_detect_text_gcs | namrathaPullalarevu/python-docs-samples | 3 | python | def video_detect_text_gcs(input_uri):
from google.cloud import videointelligence_v1p2beta1 as videointelligence
video_client = videointelligence.VideoIntelligenceServiceClient()
features = [videointelligence.enums.Feature.TEXT_DETECTION]
operation = video_client.annotate_video(input_uri=input_uri, features=features)
print('\nProcessing video for text detection.')
result = operation.result(timeout=300)
annotation_result = result.annotation_results[0]
text_annotation = annotation_result.text_annotations[0]
print('\nText: {}'.format(text_annotation.text))
text_segment = text_annotation.segments[0]
start_time = text_segment.segment.start_time_offset
end_time = text_segment.segment.end_time_offset
print('start_time: {}, end_time: {}'.format((start_time.seconds + (start_time.nanos * 1e-09)), (end_time.seconds + (end_time.nanos * 1e-09))))
print('Confidence: {}'.format(text_segment.confidence))
frame = text_segment.frames[0]
time_offset = frame.time_offset
print('Time offset for the first frame: {}'.format((time_offset.seconds + (time_offset.nanos * 1e-09))))
print('Rotated Bounding Box Vertices:')
for vertex in frame.rotated_bounding_box.vertices:
print('\tVertex.x: {}, Vertex.y: {}'.format(vertex.x, vertex.y))
return annotation_result.text_annotations | def video_detect_text_gcs(input_uri):
from google.cloud import videointelligence_v1p2beta1 as videointelligence
video_client = videointelligence.VideoIntelligenceServiceClient()
features = [videointelligence.enums.Feature.TEXT_DETECTION]
operation = video_client.annotate_video(input_uri=input_uri, features=features)
print('\nProcessing video for text detection.')
result = operation.result(timeout=300)
annotation_result = result.annotation_results[0]
text_annotation = annotation_result.text_annotations[0]
print('\nText: {}'.format(text_annotation.text))
text_segment = text_annotation.segments[0]
start_time = text_segment.segment.start_time_offset
end_time = text_segment.segment.end_time_offset
print('start_time: {}, end_time: {}'.format((start_time.seconds + (start_time.nanos * 1e-09)), (end_time.seconds + (end_time.nanos * 1e-09))))
print('Confidence: {}'.format(text_segment.confidence))
frame = text_segment.frames[0]
time_offset = frame.time_offset
print('Time offset for the first frame: {}'.format((time_offset.seconds + (time_offset.nanos * 1e-09))))
print('Rotated Bounding Box Vertices:')
for vertex in frame.rotated_bounding_box.vertices:
print('\tVertex.x: {}, Vertex.y: {}'.format(vertex.x, vertex.y))
return annotation_result.text_annotations<|docstring|>Detect text in a video stored on GCS.<|endoftext|> |
c43af4577a8d63a8b0f5fdc3a12ef052017cdbef2d40832bcb712bc5dd75ba1a | def video_detect_text(path):
'Detect text in a local video.'
from google.cloud import videointelligence_v1p2beta1 as videointelligence
video_client = videointelligence.VideoIntelligenceServiceClient()
features = [videointelligence.enums.Feature.TEXT_DETECTION]
video_context = videointelligence.types.VideoContext()
with io.open(path, 'rb') as file:
input_content = file.read()
operation = video_client.annotate_video(input_content=input_content, features=features, video_context=video_context)
print('\nProcessing video for text detection.')
result = operation.result(timeout=300)
annotation_result = result.annotation_results[0]
text_annotation = annotation_result.text_annotations[0]
print('\nText: {}'.format(text_annotation.text))
text_segment = text_annotation.segments[0]
start_time = text_segment.segment.start_time_offset
end_time = text_segment.segment.end_time_offset
print('start_time: {}, end_time: {}'.format((start_time.seconds + (start_time.nanos * 1e-09)), (end_time.seconds + (end_time.nanos * 1e-09))))
print('Confidence: {}'.format(text_segment.confidence))
frame = text_segment.frames[0]
time_offset = frame.time_offset
print('Time offset for the first frame: {}'.format((time_offset.seconds + (time_offset.nanos * 1e-09))))
print('Rotated Bounding Box Vertices:')
for vertex in frame.rotated_bounding_box.vertices:
print('\tVertex.x: {}, Vertex.y: {}'.format(vertex.x, vertex.y))
return annotation_result.text_annotations | Detect text in a local video. | video/cloud-client/analyze/beta_snippets.py | video_detect_text | namrathaPullalarevu/python-docs-samples | 3 | python | def video_detect_text(path):
from google.cloud import videointelligence_v1p2beta1 as videointelligence
video_client = videointelligence.VideoIntelligenceServiceClient()
features = [videointelligence.enums.Feature.TEXT_DETECTION]
video_context = videointelligence.types.VideoContext()
with io.open(path, 'rb') as file:
input_content = file.read()
operation = video_client.annotate_video(input_content=input_content, features=features, video_context=video_context)
print('\nProcessing video for text detection.')
result = operation.result(timeout=300)
annotation_result = result.annotation_results[0]
text_annotation = annotation_result.text_annotations[0]
print('\nText: {}'.format(text_annotation.text))
text_segment = text_annotation.segments[0]
start_time = text_segment.segment.start_time_offset
end_time = text_segment.segment.end_time_offset
print('start_time: {}, end_time: {}'.format((start_time.seconds + (start_time.nanos * 1e-09)), (end_time.seconds + (end_time.nanos * 1e-09))))
print('Confidence: {}'.format(text_segment.confidence))
frame = text_segment.frames[0]
time_offset = frame.time_offset
print('Time offset for the first frame: {}'.format((time_offset.seconds + (time_offset.nanos * 1e-09))))
print('Rotated Bounding Box Vertices:')
for vertex in frame.rotated_bounding_box.vertices:
print('\tVertex.x: {}, Vertex.y: {}'.format(vertex.x, vertex.y))
return annotation_result.text_annotations | def video_detect_text(path):
from google.cloud import videointelligence_v1p2beta1 as videointelligence
video_client = videointelligence.VideoIntelligenceServiceClient()
features = [videointelligence.enums.Feature.TEXT_DETECTION]
video_context = videointelligence.types.VideoContext()
with io.open(path, 'rb') as file:
input_content = file.read()
operation = video_client.annotate_video(input_content=input_content, features=features, video_context=video_context)
print('\nProcessing video for text detection.')
result = operation.result(timeout=300)
annotation_result = result.annotation_results[0]
text_annotation = annotation_result.text_annotations[0]
print('\nText: {}'.format(text_annotation.text))
text_segment = text_annotation.segments[0]
start_time = text_segment.segment.start_time_offset
end_time = text_segment.segment.end_time_offset
print('start_time: {}, end_time: {}'.format((start_time.seconds + (start_time.nanos * 1e-09)), (end_time.seconds + (end_time.nanos * 1e-09))))
print('Confidence: {}'.format(text_segment.confidence))
frame = text_segment.frames[0]
time_offset = frame.time_offset
print('Time offset for the first frame: {}'.format((time_offset.seconds + (time_offset.nanos * 1e-09))))
print('Rotated Bounding Box Vertices:')
for vertex in frame.rotated_bounding_box.vertices:
print('\tVertex.x: {}, Vertex.y: {}'.format(vertex.x, vertex.y))
return annotation_result.text_annotations<|docstring|>Detect text in a local video.<|endoftext|> |
a06a1061dc364fd1a84cf1cad6ed24a5586072f108a7d0693910a18eb8a45b7c | def track_objects_gcs(gcs_uri):
'Object Tracking.'
from google.cloud import videointelligence_v1p2beta1 as videointelligence
video_client = videointelligence.VideoIntelligenceServiceClient()
features = [videointelligence.enums.Feature.OBJECT_TRACKING]
operation = video_client.annotate_video(input_uri=gcs_uri, features=features, location_id='us-east1')
print('\nProcessing video for object annotations.')
result = operation.result(timeout=300)
print('\nFinished processing.\n')
object_annotations = result.annotation_results[0].object_annotations
object_annotation = object_annotations[0]
print(u'Entity description: {}'.format(object_annotation.entity.description))
if object_annotation.entity.entity_id:
print('Entity id: {}'.format(object_annotation.entity.entity_id))
print('Segment: {}s to {}s'.format((object_annotation.segment.start_time_offset.seconds + (object_annotation.segment.start_time_offset.nanos / 1000000000.0)), (object_annotation.segment.end_time_offset.seconds + (object_annotation.segment.end_time_offset.nanos / 1000000000.0))))
print('Confidence: {}'.format(object_annotation.confidence))
frame = object_annotation.frames[0]
box = frame.normalized_bounding_box
print('Time offset of the first frame: {}s'.format((frame.time_offset.seconds + (frame.time_offset.nanos / 1000000000.0))))
print('Bounding box position:')
print('\tleft : {}'.format(box.left))
print('\ttop : {}'.format(box.top))
print('\tright : {}'.format(box.right))
print('\tbottom: {}'.format(box.bottom))
print('\n')
return object_annotations | Object Tracking. | video/cloud-client/analyze/beta_snippets.py | track_objects_gcs | namrathaPullalarevu/python-docs-samples | 3 | python | def track_objects_gcs(gcs_uri):
from google.cloud import videointelligence_v1p2beta1 as videointelligence
video_client = videointelligence.VideoIntelligenceServiceClient()
features = [videointelligence.enums.Feature.OBJECT_TRACKING]
operation = video_client.annotate_video(input_uri=gcs_uri, features=features, location_id='us-east1')
print('\nProcessing video for object annotations.')
result = operation.result(timeout=300)
print('\nFinished processing.\n')
object_annotations = result.annotation_results[0].object_annotations
object_annotation = object_annotations[0]
print(u'Entity description: {}'.format(object_annotation.entity.description))
if object_annotation.entity.entity_id:
print('Entity id: {}'.format(object_annotation.entity.entity_id))
print('Segment: {}s to {}s'.format((object_annotation.segment.start_time_offset.seconds + (object_annotation.segment.start_time_offset.nanos / 1000000000.0)), (object_annotation.segment.end_time_offset.seconds + (object_annotation.segment.end_time_offset.nanos / 1000000000.0))))
print('Confidence: {}'.format(object_annotation.confidence))
frame = object_annotation.frames[0]
box = frame.normalized_bounding_box
print('Time offset of the first frame: {}s'.format((frame.time_offset.seconds + (frame.time_offset.nanos / 1000000000.0))))
print('Bounding box position:')
print('\tleft : {}'.format(box.left))
print('\ttop : {}'.format(box.top))
print('\tright : {}'.format(box.right))
print('\tbottom: {}'.format(box.bottom))
print('\n')
return object_annotations | def track_objects_gcs(gcs_uri):
from google.cloud import videointelligence_v1p2beta1 as videointelligence
video_client = videointelligence.VideoIntelligenceServiceClient()
features = [videointelligence.enums.Feature.OBJECT_TRACKING]
operation = video_client.annotate_video(input_uri=gcs_uri, features=features, location_id='us-east1')
print('\nProcessing video for object annotations.')
result = operation.result(timeout=300)
print('\nFinished processing.\n')
object_annotations = result.annotation_results[0].object_annotations
object_annotation = object_annotations[0]
print(u'Entity description: {}'.format(object_annotation.entity.description))
if object_annotation.entity.entity_id:
print('Entity id: {}'.format(object_annotation.entity.entity_id))
print('Segment: {}s to {}s'.format((object_annotation.segment.start_time_offset.seconds + (object_annotation.segment.start_time_offset.nanos / 1000000000.0)), (object_annotation.segment.end_time_offset.seconds + (object_annotation.segment.end_time_offset.nanos / 1000000000.0))))
print('Confidence: {}'.format(object_annotation.confidence))
frame = object_annotation.frames[0]
box = frame.normalized_bounding_box
print('Time offset of the first frame: {}s'.format((frame.time_offset.seconds + (frame.time_offset.nanos / 1000000000.0))))
print('Bounding box position:')
print('\tleft : {}'.format(box.left))
print('\ttop : {}'.format(box.top))
print('\tright : {}'.format(box.right))
print('\tbottom: {}'.format(box.bottom))
print('\n')
return object_annotations<|docstring|>Object Tracking.<|endoftext|> |
80b27b10ae7b98517bcc746b6a8365be5c0d27ec4bf504cbc0f545e93384ca11 | def track_objects(path):
'Object Tracking.'
from google.cloud import videointelligence_v1p2beta1 as videointelligence
video_client = videointelligence.VideoIntelligenceServiceClient()
features = [videointelligence.enums.Feature.OBJECT_TRACKING]
with io.open(path, 'rb') as file:
input_content = file.read()
operation = video_client.annotate_video(input_content=input_content, features=features, location_id='us-east1')
print('\nProcessing video for object annotations.')
result = operation.result(timeout=300)
print('\nFinished processing.\n')
object_annotations = result.annotation_results[0].object_annotations
object_annotation = object_annotations[0]
print(u'Entity description: {}'.format(object_annotation.entity.description))
if object_annotation.entity.entity_id:
print('Entity id: {}'.format(object_annotation.entity.entity_id))
print('Segment: {}s to {}s'.format((object_annotation.segment.start_time_offset.seconds + (object_annotation.segment.start_time_offset.nanos / 1000000000.0)), (object_annotation.segment.end_time_offset.seconds + (object_annotation.segment.end_time_offset.nanos / 1000000000.0))))
print('Confidence: {}'.format(object_annotation.confidence))
frame = object_annotation.frames[0]
box = frame.normalized_bounding_box
print('Time offset of the first frame: {}s'.format((frame.time_offset.seconds + (frame.time_offset.nanos / 1000000000.0))))
print('Bounding box position:')
print('\tleft : {}'.format(box.left))
print('\ttop : {}'.format(box.top))
print('\tright : {}'.format(box.right))
print('\tbottom: {}'.format(box.bottom))
print('\n')
return object_annotations | Object Tracking. | video/cloud-client/analyze/beta_snippets.py | track_objects | namrathaPullalarevu/python-docs-samples | 3 | python | def track_objects(path):
from google.cloud import videointelligence_v1p2beta1 as videointelligence
video_client = videointelligence.VideoIntelligenceServiceClient()
features = [videointelligence.enums.Feature.OBJECT_TRACKING]
with io.open(path, 'rb') as file:
input_content = file.read()
operation = video_client.annotate_video(input_content=input_content, features=features, location_id='us-east1')
print('\nProcessing video for object annotations.')
result = operation.result(timeout=300)
print('\nFinished processing.\n')
object_annotations = result.annotation_results[0].object_annotations
object_annotation = object_annotations[0]
print(u'Entity description: {}'.format(object_annotation.entity.description))
if object_annotation.entity.entity_id:
print('Entity id: {}'.format(object_annotation.entity.entity_id))
print('Segment: {}s to {}s'.format((object_annotation.segment.start_time_offset.seconds + (object_annotation.segment.start_time_offset.nanos / 1000000000.0)), (object_annotation.segment.end_time_offset.seconds + (object_annotation.segment.end_time_offset.nanos / 1000000000.0))))
print('Confidence: {}'.format(object_annotation.confidence))
frame = object_annotation.frames[0]
box = frame.normalized_bounding_box
print('Time offset of the first frame: {}s'.format((frame.time_offset.seconds + (frame.time_offset.nanos / 1000000000.0))))
print('Bounding box position:')
print('\tleft : {}'.format(box.left))
print('\ttop : {}'.format(box.top))
print('\tright : {}'.format(box.right))
print('\tbottom: {}'.format(box.bottom))
print('\n')
return object_annotations | def track_objects(path):
from google.cloud import videointelligence_v1p2beta1 as videointelligence
video_client = videointelligence.VideoIntelligenceServiceClient()
features = [videointelligence.enums.Feature.OBJECT_TRACKING]
with io.open(path, 'rb') as file:
input_content = file.read()
operation = video_client.annotate_video(input_content=input_content, features=features, location_id='us-east1')
print('\nProcessing video for object annotations.')
result = operation.result(timeout=300)
print('\nFinished processing.\n')
object_annotations = result.annotation_results[0].object_annotations
object_annotation = object_annotations[0]
print(u'Entity description: {}'.format(object_annotation.entity.description))
if object_annotation.entity.entity_id:
print('Entity id: {}'.format(object_annotation.entity.entity_id))
print('Segment: {}s to {}s'.format((object_annotation.segment.start_time_offset.seconds + (object_annotation.segment.start_time_offset.nanos / 1000000000.0)), (object_annotation.segment.end_time_offset.seconds + (object_annotation.segment.end_time_offset.nanos / 1000000000.0))))
print('Confidence: {}'.format(object_annotation.confidence))
frame = object_annotation.frames[0]
box = frame.normalized_bounding_box
print('Time offset of the first frame: {}s'.format((frame.time_offset.seconds + (frame.time_offset.nanos / 1000000000.0))))
print('Bounding box position:')
print('\tleft : {}'.format(box.left))
print('\ttop : {}'.format(box.top))
print('\tright : {}'.format(box.right))
print('\tbottom: {}'.format(box.bottom))
print('\n')
return object_annotations<|docstring|>Object Tracking.<|endoftext|> |
da2ef139a3a78549d7166f3fa69b0e0e3413c70d8a83f288ab40a432d9c5a5ee | def decision_function(self, history, point, **configuration):
'\n Return False while number of measurements less than max_repeats_of_experiment (inherited from abstract class).\n In other case - compute result as average between all experiments.\n :param history: history class object that stores all experiments results\n :param point: concrete experiment configuration that is evaluating\n :return: result or False\n '
all_experiments = history.get(point)
if (len(all_experiments) < self.max_repeats_of_experiment):
return False
else:
result = [0 for x in range(len(all_experiments[0]))]
for experiment in all_experiments:
for (index, value) in enumerate(experiment):
result[index] += value
for (index, value) in enumerate(result):
result[index] = eval(self.WSClient.results_data_types[index])(round((value / len(all_experiments)), 2))
return result | Return False while number of measurements less than max_repeats_of_experiment (inherited from abstract class).
In other case - compute result as average between all experiments.
:param history: history class object that stores all experiments results
:param point: concrete experiment configuration that is evaluating
:return: result or False | main-node/repeater/default_repeater.py | decision_function | Valavanca/benchmark | 0 | python | def decision_function(self, history, point, **configuration):
'\n Return False while number of measurements less than max_repeats_of_experiment (inherited from abstract class).\n In other case - compute result as average between all experiments.\n :param history: history class object that stores all experiments results\n :param point: concrete experiment configuration that is evaluating\n :return: result or False\n '
all_experiments = history.get(point)
if (len(all_experiments) < self.max_repeats_of_experiment):
return False
else:
result = [0 for x in range(len(all_experiments[0]))]
for experiment in all_experiments:
for (index, value) in enumerate(experiment):
result[index] += value
for (index, value) in enumerate(result):
result[index] = eval(self.WSClient.results_data_types[index])(round((value / len(all_experiments)), 2))
return result | def decision_function(self, history, point, **configuration):
'\n Return False while number of measurements less than max_repeats_of_experiment (inherited from abstract class).\n In other case - compute result as average between all experiments.\n :param history: history class object that stores all experiments results\n :param point: concrete experiment configuration that is evaluating\n :return: result or False\n '
all_experiments = history.get(point)
if (len(all_experiments) < self.max_repeats_of_experiment):
return False
else:
result = [0 for x in range(len(all_experiments[0]))]
for experiment in all_experiments:
for (index, value) in enumerate(experiment):
result[index] += value
for (index, value) in enumerate(result):
result[index] = eval(self.WSClient.results_data_types[index])(round((value / len(all_experiments)), 2))
return result<|docstring|>Return False while number of measurements less than max_repeats_of_experiment (inherited from abstract class).
In other case - compute result as average between all experiments.
:param history: history class object that stores all experiments results
:param point: concrete experiment configuration that is evaluating
:return: result or False<|endoftext|> |
c755b6a1dcedc7bbd8b192cc6eda298505b2c3530e325d90e6b46c1e4b9bbb14 | def get_all_data():
"\n Main routine that grabs all COVID and covariate data and\n returns them as a single dataframe that contains:\n\n * count of cumulative cases and deaths by country (by today's date)\n * days since first case for each country\n * CPI gov't transparency index\n * World Bank data on population, healthcare, etc. by country\n "
all_covid_data = _get_latest_covid_timeseries()
covid_cases_rollup = _rollup_by_country(all_covid_data['Confirmed'])
covid_deaths_rollup = _rollup_by_country(all_covid_data['Deaths'])
todays_date = covid_cases_rollup.columns.max()
df_out = pd.DataFrame({'cases': covid_cases_rollup[todays_date], 'deaths': covid_deaths_rollup[todays_date]})
_clean_country_list(df_out)
_clean_country_list(covid_cases_rollup)
df_out['death_rate_observed'] = df_out.apply((lambda row: (row['deaths'] / float(row['cases']))), axis=1)
df_out['days_since_first_case'] = _compute_days_since_first_case(covid_cases_rollup)
_add_cpi_data(df_out)
_add_wb_data(df_out)
num_null = df_out.isnull().sum(axis=1)
to_drop_idx = df_out.index[(num_null > 1)]
print(('Dropping %i/%i countries due to lack of data' % (len(to_drop_idx), len(df_out))))
df_out.drop(to_drop_idx, axis=0, inplace=True)
return (df_out, todays_date) | Main routine that grabs all COVID and covariate data and
returns them as a single dataframe that contains:
* count of cumulative cases and deaths by country (by today's date)
* days since first case for each country
* CPI gov't transparency index
* World Bank data on population, healthcare, etc. by country | services/server/dashboard/nb_mortality_rate.py | get_all_data | adriangrepo/covid-19_virus | 0 | python | def get_all_data():
"\n Main routine that grabs all COVID and covariate data and\n returns them as a single dataframe that contains:\n\n * count of cumulative cases and deaths by country (by today's date)\n * days since first case for each country\n * CPI gov't transparency index\n * World Bank data on population, healthcare, etc. by country\n "
all_covid_data = _get_latest_covid_timeseries()
covid_cases_rollup = _rollup_by_country(all_covid_data['Confirmed'])
covid_deaths_rollup = _rollup_by_country(all_covid_data['Deaths'])
todays_date = covid_cases_rollup.columns.max()
df_out = pd.DataFrame({'cases': covid_cases_rollup[todays_date], 'deaths': covid_deaths_rollup[todays_date]})
_clean_country_list(df_out)
_clean_country_list(covid_cases_rollup)
df_out['death_rate_observed'] = df_out.apply((lambda row: (row['deaths'] / float(row['cases']))), axis=1)
df_out['days_since_first_case'] = _compute_days_since_first_case(covid_cases_rollup)
_add_cpi_data(df_out)
_add_wb_data(df_out)
num_null = df_out.isnull().sum(axis=1)
to_drop_idx = df_out.index[(num_null > 1)]
print(('Dropping %i/%i countries due to lack of data' % (len(to_drop_idx), len(df_out))))
df_out.drop(to_drop_idx, axis=0, inplace=True)
return (df_out, todays_date) | def get_all_data():
"\n Main routine that grabs all COVID and covariate data and\n returns them as a single dataframe that contains:\n\n * count of cumulative cases and deaths by country (by today's date)\n * days since first case for each country\n * CPI gov't transparency index\n * World Bank data on population, healthcare, etc. by country\n "
all_covid_data = _get_latest_covid_timeseries()
covid_cases_rollup = _rollup_by_country(all_covid_data['Confirmed'])
covid_deaths_rollup = _rollup_by_country(all_covid_data['Deaths'])
todays_date = covid_cases_rollup.columns.max()
df_out = pd.DataFrame({'cases': covid_cases_rollup[todays_date], 'deaths': covid_deaths_rollup[todays_date]})
_clean_country_list(df_out)
_clean_country_list(covid_cases_rollup)
df_out['death_rate_observed'] = df_out.apply((lambda row: (row['deaths'] / float(row['cases']))), axis=1)
df_out['days_since_first_case'] = _compute_days_since_first_case(covid_cases_rollup)
_add_cpi_data(df_out)
_add_wb_data(df_out)
num_null = df_out.isnull().sum(axis=1)
to_drop_idx = df_out.index[(num_null > 1)]
print(('Dropping %i/%i countries due to lack of data' % (len(to_drop_idx), len(df_out))))
df_out.drop(to_drop_idx, axis=0, inplace=True)
return (df_out, todays_date)<|docstring|>Main routine that grabs all COVID and covariate data and
returns them as a single dataframe that contains:
* count of cumulative cases and deaths by country (by today's date)
* days since first case for each country
* CPI gov't transparency index
* World Bank data on population, healthcare, etc. by country<|endoftext|> |
132eff4bde34f35537df2ebe22c74518d7eeda189883a5b546a2d4544a6c8ece | def _get_latest_covid_timeseries():
' Pull latest time-series data from JHU CSSE database '
repo = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/'
data_path = 'csse_covid_19_data/csse_covid_19_time_series/'
all_data = {}
for status in ['Confirmed', 'Deaths', 'Recovered']:
file_name = ('time_series_19-covid-%s.csv' % status)
all_data[status] = pd.read_csv(('%s%s%s' % (repo, data_path, file_name)))
return all_data | Pull latest time-series data from JHU CSSE database | services/server/dashboard/nb_mortality_rate.py | _get_latest_covid_timeseries | adriangrepo/covid-19_virus | 0 | python | def _get_latest_covid_timeseries():
' '
repo = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/'
data_path = 'csse_covid_19_data/csse_covid_19_time_series/'
all_data = {}
for status in ['Confirmed', 'Deaths', 'Recovered']:
file_name = ('time_series_19-covid-%s.csv' % status)
all_data[status] = pd.read_csv(('%s%s%s' % (repo, data_path, file_name)))
return all_data | def _get_latest_covid_timeseries():
' '
repo = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/'
data_path = 'csse_covid_19_data/csse_covid_19_time_series/'
all_data = {}
for status in ['Confirmed', 'Deaths', 'Recovered']:
file_name = ('time_series_19-covid-%s.csv' % status)
all_data[status] = pd.read_csv(('%s%s%s' % (repo, data_path, file_name)))
return all_data<|docstring|>Pull latest time-series data from JHU CSSE database<|endoftext|> |
d7d5325c0b0471e5d190136f2e58e660eb53a8e6971a47fccc0f47adaeaeff68 | def _rollup_by_country(df):
'\n Roll up each raw time-series by country, adding up the cases\n across the individual states/provinces within the country\n\n :param df: Pandas DataFrame of raw data from CSSE\n :return: DataFrame of country counts\n '
gb = df.groupby('Country/Region')
df_rollup = gb.sum()
df_rollup.drop(['Lat', 'Long'], axis=1, inplace=True, errors='ignore')
df_rollup.drop(df_rollup.columns[(df_rollup.sum(axis=0) == 0)], axis=1, inplace=True)
idx_as_dt = [datetime.strptime(x, '%m/%d/%y') for x in df_rollup.columns]
df_rollup.columns = idx_as_dt
return df_rollup | Roll up each raw time-series by country, adding up the cases
across the individual states/provinces within the country
:param df: Pandas DataFrame of raw data from CSSE
:return: DataFrame of country counts | services/server/dashboard/nb_mortality_rate.py | _rollup_by_country | adriangrepo/covid-19_virus | 0 | python | def _rollup_by_country(df):
'\n Roll up each raw time-series by country, adding up the cases\n across the individual states/provinces within the country\n\n :param df: Pandas DataFrame of raw data from CSSE\n :return: DataFrame of country counts\n '
gb = df.groupby('Country/Region')
df_rollup = gb.sum()
df_rollup.drop(['Lat', 'Long'], axis=1, inplace=True, errors='ignore')
df_rollup.drop(df_rollup.columns[(df_rollup.sum(axis=0) == 0)], axis=1, inplace=True)
idx_as_dt = [datetime.strptime(x, '%m/%d/%y') for x in df_rollup.columns]
df_rollup.columns = idx_as_dt
return df_rollup | def _rollup_by_country(df):
'\n Roll up each raw time-series by country, adding up the cases\n across the individual states/provinces within the country\n\n :param df: Pandas DataFrame of raw data from CSSE\n :return: DataFrame of country counts\n '
gb = df.groupby('Country/Region')
df_rollup = gb.sum()
df_rollup.drop(['Lat', 'Long'], axis=1, inplace=True, errors='ignore')
df_rollup.drop(df_rollup.columns[(df_rollup.sum(axis=0) == 0)], axis=1, inplace=True)
idx_as_dt = [datetime.strptime(x, '%m/%d/%y') for x in df_rollup.columns]
df_rollup.columns = idx_as_dt
return df_rollup<|docstring|>Roll up each raw time-series by country, adding up the cases
across the individual states/provinces within the country
:param df: Pandas DataFrame of raw data from CSSE
:return: DataFrame of country counts<|endoftext|> |
5b84092b028444bec87e27e597fa7ff0d0c563c69ec8ab2336f6d61d7a3db599 | def _clean_country_list(df):
' Clean up input country list in df '
country_rename = {'Hong Kong SAR': 'Hong Kong', 'Taiwan*': 'Taiwan', 'Czechia': 'Czech Republic', 'Brunei': 'Brunei Darussalam', 'Iran (Islamic Republic of)': 'Iran', 'Viet Nam': 'Vietnam', 'Russian Federation': 'Russia', 'Republic of Korea': 'South Korea', 'Republic of Moldova': 'Moldova', 'China': 'Mainland China'}
df.rename(country_rename, axis=0, inplace=True)
df.drop(ignore_countries, axis=0, inplace=True, errors='ignore') | Clean up input country list in df | services/server/dashboard/nb_mortality_rate.py | _clean_country_list | adriangrepo/covid-19_virus | 0 | python | def _clean_country_list(df):
' '
country_rename = {'Hong Kong SAR': 'Hong Kong', 'Taiwan*': 'Taiwan', 'Czechia': 'Czech Republic', 'Brunei': 'Brunei Darussalam', 'Iran (Islamic Republic of)': 'Iran', 'Viet Nam': 'Vietnam', 'Russian Federation': 'Russia', 'Republic of Korea': 'South Korea', 'Republic of Moldova': 'Moldova', 'China': 'Mainland China'}
df.rename(country_rename, axis=0, inplace=True)
df.drop(ignore_countries, axis=0, inplace=True, errors='ignore') | def _clean_country_list(df):
' '
country_rename = {'Hong Kong SAR': 'Hong Kong', 'Taiwan*': 'Taiwan', 'Czechia': 'Czech Republic', 'Brunei': 'Brunei Darussalam', 'Iran (Islamic Republic of)': 'Iran', 'Viet Nam': 'Vietnam', 'Russian Federation': 'Russia', 'Republic of Korea': 'South Korea', 'Republic of Moldova': 'Moldova', 'China': 'Mainland China'}
df.rename(country_rename, axis=0, inplace=True)
df.drop(ignore_countries, axis=0, inplace=True, errors='ignore')<|docstring|>Clean up input country list in df<|endoftext|> |
39390b4d55630026e1e2fc7c5d9263b84437aac31b10c089d354dcc112460e1d | def _compute_days_since_first_case(df_cases):
' Compute the country-wise days since first confirmed case\n\n :param df_cases: country-wise time-series of confirmed case counts\n :return: Series of country-wise days since first case\n '
date_first_case = df_cases[(df_cases > 0)].idxmin(axis=1)
days_since_first_case = date_first_case.apply((lambda x: (df_cases.columns.max() - x).days))
days_since_first_case.loc['Mainland China'] += 30
return days_since_first_case | Compute the country-wise days since first confirmed case
:param df_cases: country-wise time-series of confirmed case counts
:return: Series of country-wise days since first case | services/server/dashboard/nb_mortality_rate.py | _compute_days_since_first_case | adriangrepo/covid-19_virus | 0 | python | def _compute_days_since_first_case(df_cases):
' Compute the country-wise days since first confirmed case\n\n :param df_cases: country-wise time-series of confirmed case counts\n :return: Series of country-wise days since first case\n '
date_first_case = df_cases[(df_cases > 0)].idxmin(axis=1)
days_since_first_case = date_first_case.apply((lambda x: (df_cases.columns.max() - x).days))
days_since_first_case.loc['Mainland China'] += 30
return days_since_first_case | def _compute_days_since_first_case(df_cases):
' Compute the country-wise days since first confirmed case\n\n :param df_cases: country-wise time-series of confirmed case counts\n :return: Series of country-wise days since first case\n '
date_first_case = df_cases[(df_cases > 0)].idxmin(axis=1)
days_since_first_case = date_first_case.apply((lambda x: (df_cases.columns.max() - x).days))
days_since_first_case.loc['Mainland China'] += 30
return days_since_first_case<|docstring|>Compute the country-wise days since first confirmed case
:param df_cases: country-wise time-series of confirmed case counts
:return: Series of country-wise days since first case<|endoftext|> |
e78bbecdff62922c0d284811ffec0ea84d7e9aec01a919deaca0ef60cee990d3 | def _add_cpi_data(df_input):
'\n Add the Government transparency (CPI - corruption perceptions index)\n data (by country) as a column in the COVID cases dataframe.\n\n :param df_input: COVID-19 data rolled up country-wise\n :return: None, add CPI data to df_input in place\n '
cpi_data = pd.read_excel('https://github.com/jwrichar/COVID19-mortality/blob/master/data/CPI2019.xlsx?raw=true', skiprows=2)
cpi_data.set_index('Country', inplace=True, drop=True)
cpi_data.rename(cpi_country_mapping, axis=0, inplace=True)
df_input['cpi_score_2019'] = cpi_data['CPI score 2019'] | Add the Government transparency (CPI - corruption perceptions index)
data (by country) as a column in the COVID cases dataframe.
:param df_input: COVID-19 data rolled up country-wise
:return: None, add CPI data to df_input in place | services/server/dashboard/nb_mortality_rate.py | _add_cpi_data | adriangrepo/covid-19_virus | 0 | python | def _add_cpi_data(df_input):
'\n Add the Government transparency (CPI - corruption perceptions index)\n data (by country) as a column in the COVID cases dataframe.\n\n :param df_input: COVID-19 data rolled up country-wise\n :return: None, add CPI data to df_input in place\n '
cpi_data = pd.read_excel('https://github.com/jwrichar/COVID19-mortality/blob/master/data/CPI2019.xlsx?raw=true', skiprows=2)
cpi_data.set_index('Country', inplace=True, drop=True)
cpi_data.rename(cpi_country_mapping, axis=0, inplace=True)
df_input['cpi_score_2019'] = cpi_data['CPI score 2019'] | def _add_cpi_data(df_input):
'\n Add the Government transparency (CPI - corruption perceptions index)\n data (by country) as a column in the COVID cases dataframe.\n\n :param df_input: COVID-19 data rolled up country-wise\n :return: None, add CPI data to df_input in place\n '
cpi_data = pd.read_excel('https://github.com/jwrichar/COVID19-mortality/blob/master/data/CPI2019.xlsx?raw=true', skiprows=2)
cpi_data.set_index('Country', inplace=True, drop=True)
cpi_data.rename(cpi_country_mapping, axis=0, inplace=True)
df_input['cpi_score_2019'] = cpi_data['CPI score 2019']<|docstring|>Add the Government transparency (CPI - corruption perceptions index)
data (by country) as a column in the COVID cases dataframe.
:param df_input: COVID-19 data rolled up country-wise
:return: None, add CPI data to df_input in place<|endoftext|> |
7f1e73413ca814c6f1c193d152f597239f2079364dadc7926cd6287f1a98100f | def _add_wb_data(df_input):
'\n Add the World Bank data covariates as columns in the COVID cases dataframe.\n\n :param df_input: COVID-19 data rolled up country-wise\n :return: None, add World Bank data to df_input in place\n '
wb_data = pd.read_csv('https://raw.githubusercontent.com/jwrichar/COVID19-mortality/master/data/world_bank_data.csv', na_values='..')
for (wb_name, var_name) in wb_covariates:
wb_series = wb_data.loc[(wb_data['Series Code'] == wb_name)]
wb_series.set_index('Country Name', inplace=True, drop=True)
wb_series.rename(wb_country_mapping, axis=0, inplace=True)
df_input[var_name] = _get_most_recent_value(wb_series) | Add the World Bank data covariates as columns in the COVID cases dataframe.
:param df_input: COVID-19 data rolled up country-wise
:return: None, add World Bank data to df_input in place | services/server/dashboard/nb_mortality_rate.py | _add_wb_data | adriangrepo/covid-19_virus | 0 | python | def _add_wb_data(df_input):
'\n Add the World Bank data covariates as columns in the COVID cases dataframe.\n\n :param df_input: COVID-19 data rolled up country-wise\n :return: None, add World Bank data to df_input in place\n '
wb_data = pd.read_csv('https://raw.githubusercontent.com/jwrichar/COVID19-mortality/master/data/world_bank_data.csv', na_values='..')
for (wb_name, var_name) in wb_covariates:
wb_series = wb_data.loc[(wb_data['Series Code'] == wb_name)]
wb_series.set_index('Country Name', inplace=True, drop=True)
wb_series.rename(wb_country_mapping, axis=0, inplace=True)
df_input[var_name] = _get_most_recent_value(wb_series) | def _add_wb_data(df_input):
'\n Add the World Bank data covariates as columns in the COVID cases dataframe.\n\n :param df_input: COVID-19 data rolled up country-wise\n :return: None, add World Bank data to df_input in place\n '
wb_data = pd.read_csv('https://raw.githubusercontent.com/jwrichar/COVID19-mortality/master/data/world_bank_data.csv', na_values='..')
for (wb_name, var_name) in wb_covariates:
wb_series = wb_data.loc[(wb_data['Series Code'] == wb_name)]
wb_series.set_index('Country Name', inplace=True, drop=True)
wb_series.rename(wb_country_mapping, axis=0, inplace=True)
df_input[var_name] = _get_most_recent_value(wb_series)<|docstring|>Add the World Bank data covariates as columns in the COVID cases dataframe.
:param df_input: COVID-19 data rolled up country-wise
:return: None, add World Bank data to df_input in place<|endoftext|> |
786eba64a752f381df6e388801c16009391bf59026a8a0deb17a1d82a85c4e65 | def _get_most_recent_value(wb_series):
'\n Get most recent non-null value for each country in the World Bank\n time-series data\n '
ts_data = wb_series[wb_series.columns[3:]]
def _helper(row):
row_nn = row[row.notnull()]
if len(row_nn):
return row_nn[(- 1)]
else:
return np.nan
return ts_data.apply(_helper, axis=1) | Get most recent non-null value for each country in the World Bank
time-series data | services/server/dashboard/nb_mortality_rate.py | _get_most_recent_value | adriangrepo/covid-19_virus | 0 | python | def _get_most_recent_value(wb_series):
'\n Get most recent non-null value for each country in the World Bank\n time-series data\n '
ts_data = wb_series[wb_series.columns[3:]]
def _helper(row):
row_nn = row[row.notnull()]
if len(row_nn):
return row_nn[(- 1)]
else:
return np.nan
return ts_data.apply(_helper, axis=1) | def _get_most_recent_value(wb_series):
'\n Get most recent non-null value for each country in the World Bank\n time-series data\n '
ts_data = wb_series[wb_series.columns[3:]]
def _helper(row):
row_nn = row[row.notnull()]
if len(row_nn):
return row_nn[(- 1)]
else:
return np.nan
return ts_data.apply(_helper, axis=1)<|docstring|>Get most recent non-null value for each country in the World Bank
time-series data<|endoftext|> |
b5d2703d552c479852c92a87fcd8686a95d86d9bcb154535a4ba036b6d9ccbe2 | def _normalize_col(df, colname, how='mean'):
'\n Normalize an input column in one of 3 ways:\n\n * how=mean: unit normal N(0,1)\n * how=upper: normalize to [-1, 0] with highest value set to 0\n * how=lower: normalize to [0, 1] with lowest value set to 0\n\n Returns df modified in place with extra column added.\n '
colname_new = ('%s_normalized' % colname)
if (how == 'mean'):
mu = df[colname].mean()
sig = df[colname].std()
df[colname_new] = ((df[colname] - mu) / sig)
elif (how == 'upper'):
maxval = df[colname].max()
minval = df[colname].min()
df[colname_new] = ((df[colname] - maxval) / (maxval - minval))
elif (how == 'lower'):
maxval = df[colname].max()
minval = df[colname].min()
df[colname_new] = ((df[colname] - minval) / (maxval - minval)) | Normalize an input column in one of 3 ways:
* how=mean: unit normal N(0,1)
* how=upper: normalize to [-1, 0] with highest value set to 0
* how=lower: normalize to [0, 1] with lowest value set to 0
Returns df modified in place with extra column added. | services/server/dashboard/nb_mortality_rate.py | _normalize_col | adriangrepo/covid-19_virus | 0 | python | def _normalize_col(df, colname, how='mean'):
'\n Normalize an input column in one of 3 ways:\n\n * how=mean: unit normal N(0,1)\n * how=upper: normalize to [-1, 0] with highest value set to 0\n * how=lower: normalize to [0, 1] with lowest value set to 0\n\n Returns df modified in place with extra column added.\n '
colname_new = ('%s_normalized' % colname)
if (how == 'mean'):
mu = df[colname].mean()
sig = df[colname].std()
df[colname_new] = ((df[colname] - mu) / sig)
elif (how == 'upper'):
maxval = df[colname].max()
minval = df[colname].min()
df[colname_new] = ((df[colname] - maxval) / (maxval - minval))
elif (how == 'lower'):
maxval = df[colname].max()
minval = df[colname].min()
df[colname_new] = ((df[colname] - minval) / (maxval - minval)) | def _normalize_col(df, colname, how='mean'):
'\n Normalize an input column in one of 3 ways:\n\n * how=mean: unit normal N(0,1)\n * how=upper: normalize to [-1, 0] with highest value set to 0\n * how=lower: normalize to [0, 1] with lowest value set to 0\n\n Returns df modified in place with extra column added.\n '
colname_new = ('%s_normalized' % colname)
if (how == 'mean'):
mu = df[colname].mean()
sig = df[colname].std()
df[colname_new] = ((df[colname] - mu) / sig)
elif (how == 'upper'):
maxval = df[colname].max()
minval = df[colname].min()
df[colname_new] = ((df[colname] - maxval) / (maxval - minval))
elif (how == 'lower'):
maxval = df[colname].max()
minval = df[colname].min()
df[colname_new] = ((df[colname] - minval) / (maxval - minval))<|docstring|>Normalize an input column in one of 3 ways:
* how=mean: unit normal N(0,1)
* how=upper: normalize to [-1, 0] with highest value set to 0
* how=lower: normalize to [0, 1] with lowest value set to 0
Returns df modified in place with extra column added.<|endoftext|> |
ec94e2fa135f62bc0895421bde1b1afed04f8ee215c2e778aaacf6d7c2ba6695 | def getDisplay(self):
'Retrieve the currently-bound, or the default, display'
from OpenGL.EGL import eglGetCurrentDisplay, eglGetDisplay, EGL_DEFAULT_DISPLAY
return (eglGetCurrentDisplay() or eglGetDisplay(EGL_DEFAULT_DISPLAY)) | Retrieve the currently-bound, or the default, display | OpenGL/raw/EGL/_types.py | getDisplay | keunhong/pyopengl | 210 | python | def getDisplay(self):
from OpenGL.EGL import eglGetCurrentDisplay, eglGetDisplay, EGL_DEFAULT_DISPLAY
return (eglGetCurrentDisplay() or eglGetDisplay(EGL_DEFAULT_DISPLAY)) | def getDisplay(self):
from OpenGL.EGL import eglGetCurrentDisplay, eglGetDisplay, EGL_DEFAULT_DISPLAY
return (eglGetCurrentDisplay() or eglGetDisplay(EGL_DEFAULT_DISPLAY))<|docstring|>Retrieve the currently-bound, or the default, display<|endoftext|> |
3f3dedb8c6cb1a656d5efe797d27f5dc56eb803f48a1ffd6ea92147ba25ca538 | def set_size(width, fraction=1):
' Set figure dimensions to avoid scaling in LaTeX.\n\n Parameters\n ----------\n width: float\n Document textwidth or columnwidth in pts\n fraction: float, optional\n Fraction of the width which you wish the figure to occupy\n\n Returns\n -------\n fig_dim: tuple\n Dimensions of figure in inches\n '
fig_width_pt = (width * fraction)
inches_per_pt = (1 / 72.27)
golden_ratio = (((5 ** 0.5) - 1) / 2)
fig_width_in = (fig_width_pt * inches_per_pt)
fig_height_in = (fig_width_in * golden_ratio)
fig_dim = (fig_width_in, fig_height_in)
return fig_dim | Set figure dimensions to avoid scaling in LaTeX.
Parameters
----------
width: float
Document textwidth or columnwidth in pts
fraction: float, optional
Fraction of the width which you wish the figure to occupy
Returns
-------
fig_dim: tuple
Dimensions of figure in inches | src/utils/plot-results-aug-resisc.py | set_size | Berkeley-Data/hpt | 1 | python | def set_size(width, fraction=1):
' Set figure dimensions to avoid scaling in LaTeX.\n\n Parameters\n ----------\n width: float\n Document textwidth or columnwidth in pts\n fraction: float, optional\n Fraction of the width which you wish the figure to occupy\n\n Returns\n -------\n fig_dim: tuple\n Dimensions of figure in inches\n '
fig_width_pt = (width * fraction)
inches_per_pt = (1 / 72.27)
golden_ratio = (((5 ** 0.5) - 1) / 2)
fig_width_in = (fig_width_pt * inches_per_pt)
fig_height_in = (fig_width_in * golden_ratio)
fig_dim = (fig_width_in, fig_height_in)
return fig_dim | def set_size(width, fraction=1):
' Set figure dimensions to avoid scaling in LaTeX.\n\n Parameters\n ----------\n width: float\n Document textwidth or columnwidth in pts\n fraction: float, optional\n Fraction of the width which you wish the figure to occupy\n\n Returns\n -------\n fig_dim: tuple\n Dimensions of figure in inches\n '
fig_width_pt = (width * fraction)
inches_per_pt = (1 / 72.27)
golden_ratio = (((5 ** 0.5) - 1) / 2)
fig_width_in = (fig_width_pt * inches_per_pt)
fig_height_in = (fig_width_in * golden_ratio)
fig_dim = (fig_width_in, fig_height_in)
return fig_dim<|docstring|>Set figure dimensions to avoid scaling in LaTeX.
Parameters
----------
width: float
Document textwidth or columnwidth in pts
fraction: float, optional
Fraction of the width which you wish the figure to occupy
Returns
-------
fig_dim: tuple
Dimensions of figure in inches<|endoftext|> |
8bb3506bac41b4845d9320c5bacf5d50f660608c7e10ab20c57053fc7b230840 | @register.assignment_tag(takes_context=True)
def feincms_nav_reverse(context, feincms_page, level=1, depth=1):
'\n Saves a list of pages into the given context variable.\n '
if isinstance(feincms_page, HttpRequest):
try:
feincms_page = Page.objects.for_request(feincms_page, best_match=True)
except Page.DoesNotExist:
return []
mptt_opts = feincms_page._mptt_meta
mptt_level_range = [(level - 1), ((level + depth) - 1)]
queryset = feincms_page.__class__._default_manager.active().filter(in_navigation=False).filter(**{('%s__gte' % mptt_opts.level_attr): mptt_level_range[0], ('%s__lt' % mptt_opts.level_attr): mptt_level_range[1]})
page_level = getattr(feincms_page, mptt_opts.level_attr)
parent = None
if (level > 1):
if ((level - 2) == page_level):
parent = feincms_page
elif ((level - 2) < page_level):
parent = feincms_page.get_ancestors()[(level - 2)]
elif ((level - 1) > page_level):
queryset = Page.objects.none()
if parent:
if getattr(parent, 'navigation_extension', None):
return list(parent.extended_navigation(depth=depth, request=context.get('request')))
queryset &= parent.get_descendants()
if (depth > 1):
parents = set([None])
if parent:
parents.add(parent.id)
def _parentactive_filter(iterable):
for elem in iterable:
if (elem.parent_id in parents):
(yield elem)
parents.add(elem.id)
queryset = _parentactive_filter(queryset)
if hasattr(feincms_page, 'navigation_extension'):
def _navext_filter(iterable):
current_navextension_node = None
for elem in iterable:
if ((current_navextension_node is not None) and current_navextension_node.is_ancestor_of(elem)):
continue
(yield elem)
if getattr(elem, 'navigation_extension', None):
current_navextension_node = elem
try:
for extended in elem.extended_navigation(depth=depth, request=context.get('request')):
this_level = getattr(extended, mptt_opts.level_attr, 0)
if (this_level < ((level + depth) - 1)):
(yield extended)
except Exception as e:
logger.warn('feincms_nav caught exception in navigation extension for page %d: %s', current_navextension_node.id, format_exception(e))
else:
current_navextension_node = None
queryset = _navext_filter(queryset)
return list(queryset) | Saves a list of pages into the given context variable. | website/templatetags/website_tags.py | feincms_nav_reverse | acaciawater/wfn | 0 | python | @register.assignment_tag(takes_context=True)
def feincms_nav_reverse(context, feincms_page, level=1, depth=1):
'\n \n '
if isinstance(feincms_page, HttpRequest):
try:
feincms_page = Page.objects.for_request(feincms_page, best_match=True)
except Page.DoesNotExist:
return []
mptt_opts = feincms_page._mptt_meta
mptt_level_range = [(level - 1), ((level + depth) - 1)]
queryset = feincms_page.__class__._default_manager.active().filter(in_navigation=False).filter(**{('%s__gte' % mptt_opts.level_attr): mptt_level_range[0], ('%s__lt' % mptt_opts.level_attr): mptt_level_range[1]})
page_level = getattr(feincms_page, mptt_opts.level_attr)
parent = None
if (level > 1):
if ((level - 2) == page_level):
parent = feincms_page
elif ((level - 2) < page_level):
parent = feincms_page.get_ancestors()[(level - 2)]
elif ((level - 1) > page_level):
queryset = Page.objects.none()
if parent:
if getattr(parent, 'navigation_extension', None):
return list(parent.extended_navigation(depth=depth, request=context.get('request')))
queryset &= parent.get_descendants()
if (depth > 1):
parents = set([None])
if parent:
parents.add(parent.id)
def _parentactive_filter(iterable):
for elem in iterable:
if (elem.parent_id in parents):
(yield elem)
parents.add(elem.id)
queryset = _parentactive_filter(queryset)
if hasattr(feincms_page, 'navigation_extension'):
def _navext_filter(iterable):
current_navextension_node = None
for elem in iterable:
if ((current_navextension_node is not None) and current_navextension_node.is_ancestor_of(elem)):
continue
(yield elem)
if getattr(elem, 'navigation_extension', None):
current_navextension_node = elem
try:
for extended in elem.extended_navigation(depth=depth, request=context.get('request')):
this_level = getattr(extended, mptt_opts.level_attr, 0)
if (this_level < ((level + depth) - 1)):
(yield extended)
except Exception as e:
logger.warn('feincms_nav caught exception in navigation extension for page %d: %s', current_navextension_node.id, format_exception(e))
else:
current_navextension_node = None
queryset = _navext_filter(queryset)
return list(queryset) | @register.assignment_tag(takes_context=True)
def feincms_nav_reverse(context, feincms_page, level=1, depth=1):
'\n \n '
if isinstance(feincms_page, HttpRequest):
try:
feincms_page = Page.objects.for_request(feincms_page, best_match=True)
except Page.DoesNotExist:
return []
mptt_opts = feincms_page._mptt_meta
mptt_level_range = [(level - 1), ((level + depth) - 1)]
queryset = feincms_page.__class__._default_manager.active().filter(in_navigation=False).filter(**{('%s__gte' % mptt_opts.level_attr): mptt_level_range[0], ('%s__lt' % mptt_opts.level_attr): mptt_level_range[1]})
page_level = getattr(feincms_page, mptt_opts.level_attr)
parent = None
if (level > 1):
if ((level - 2) == page_level):
parent = feincms_page
elif ((level - 2) < page_level):
parent = feincms_page.get_ancestors()[(level - 2)]
elif ((level - 1) > page_level):
queryset = Page.objects.none()
if parent:
if getattr(parent, 'navigation_extension', None):
return list(parent.extended_navigation(depth=depth, request=context.get('request')))
queryset &= parent.get_descendants()
if (depth > 1):
parents = set([None])
if parent:
parents.add(parent.id)
def _parentactive_filter(iterable):
for elem in iterable:
if (elem.parent_id in parents):
(yield elem)
parents.add(elem.id)
queryset = _parentactive_filter(queryset)
if hasattr(feincms_page, 'navigation_extension'):
def _navext_filter(iterable):
current_navextension_node = None
for elem in iterable:
if ((current_navextension_node is not None) and current_navextension_node.is_ancestor_of(elem)):
continue
(yield elem)
if getattr(elem, 'navigation_extension', None):
current_navextension_node = elem
try:
for extended in elem.extended_navigation(depth=depth, request=context.get('request')):
this_level = getattr(extended, mptt_opts.level_attr, 0)
if (this_level < ((level + depth) - 1)):
(yield extended)
except Exception as e:
logger.warn('feincms_nav caught exception in navigation extension for page %d: %s', current_navextension_node.id, format_exception(e))
else:
current_navextension_node = None
queryset = _navext_filter(queryset)
return list(queryset)<|docstring|>Saves a list of pages into the given context variable.<|endoftext|> |
0fe7d14222e4b863be08b5745a90f9428128c78e34cb865932a94671ee877ed6 | def relative_dispersion(x: np.ndarray) -> float:
' Relative dispersion of vector\n '
out = (np.std(x) / np.std(np.diff(x)))
return out | Relative dispersion of vector | vest/aggregations/relative_dispersion.py | relative_dispersion | vcerqueira/vest-python | 5 | python | def relative_dispersion(x: np.ndarray) -> float:
' \n '
out = (np.std(x) / np.std(np.diff(x)))
return out | def relative_dispersion(x: np.ndarray) -> float:
' \n '
out = (np.std(x) / np.std(np.diff(x)))
return out<|docstring|>Relative dispersion of vector<|endoftext|> |
83802d7de66dd77fbf44b24dc569289ce6b20c243d6dd9ec3835b07f81405289 | @callback
def exclude_attributes(hass: HomeAssistant) -> set[str]:
'Exclude large and chatty update attributes from being recorded in the database.'
return {ATTR_ENTITY_PICTURE, ATTR_IN_PROGRESS, ATTR_RELEASE_SUMMARY} | Exclude large and chatty update attributes from being recorded in the database. | homeassistant/components/update/recorder.py | exclude_attributes | a-p-z/core | 30,023 | python | @callback
def exclude_attributes(hass: HomeAssistant) -> set[str]:
return {ATTR_ENTITY_PICTURE, ATTR_IN_PROGRESS, ATTR_RELEASE_SUMMARY} | @callback
def exclude_attributes(hass: HomeAssistant) -> set[str]:
return {ATTR_ENTITY_PICTURE, ATTR_IN_PROGRESS, ATTR_RELEASE_SUMMARY}<|docstring|>Exclude large and chatty update attributes from being recorded in the database.<|endoftext|> |
20ccd210266b8119f335b06aed48a02f75b21a1f8edb9ee7d0360a6397780d7b | @mock.patch('coverage.get_json_from_url', return_value={'fuzzer_stats_dir': 'gs://oss-fuzz-coverage/systemd/fuzzer_stats/20210303'})
def test_get_valid_project(self, mocked_get_json_from_url):
'Tests that a project\'s coverage report can be downloaded and parsed.\n\n NOTE: This test relies on the PROJECT_NAME repo\'s coverage report.\n The "example" project was not used because it has no coverage reports.\n '
result = coverage._get_fuzzer_stats_dir_url(PROJECT_NAME)
((url,), _) = mocked_get_json_from_url.call_args
self.assertEqual('https://storage.googleapis.com/oss-fuzz-coverage/latest_report_info/curl.json', url)
expected_result = 'https://storage.googleapis.com/oss-fuzz-coverage/systemd/fuzzer_stats/20210303'
self.assertEqual(result, expected_result) | Tests that a project's coverage report can be downloaded and parsed.
NOTE: This test relies on the PROJECT_NAME repo's coverage report.
The "example" project was not used because it has no coverage reports. | infra/cifuzz/coverage_test.py | test_get_valid_project | mejo1024/oss-fuzz | 3 | python | @mock.patch('coverage.get_json_from_url', return_value={'fuzzer_stats_dir': 'gs://oss-fuzz-coverage/systemd/fuzzer_stats/20210303'})
def test_get_valid_project(self, mocked_get_json_from_url):
'Tests that a project\'s coverage report can be downloaded and parsed.\n\n NOTE: This test relies on the PROJECT_NAME repo\'s coverage report.\n The "example" project was not used because it has no coverage reports.\n '
result = coverage._get_fuzzer_stats_dir_url(PROJECT_NAME)
((url,), _) = mocked_get_json_from_url.call_args
self.assertEqual('https://storage.googleapis.com/oss-fuzz-coverage/latest_report_info/curl.json', url)
expected_result = 'https://storage.googleapis.com/oss-fuzz-coverage/systemd/fuzzer_stats/20210303'
self.assertEqual(result, expected_result) | @mock.patch('coverage.get_json_from_url', return_value={'fuzzer_stats_dir': 'gs://oss-fuzz-coverage/systemd/fuzzer_stats/20210303'})
def test_get_valid_project(self, mocked_get_json_from_url):
'Tests that a project\'s coverage report can be downloaded and parsed.\n\n NOTE: This test relies on the PROJECT_NAME repo\'s coverage report.\n The "example" project was not used because it has no coverage reports.\n '
result = coverage._get_fuzzer_stats_dir_url(PROJECT_NAME)
((url,), _) = mocked_get_json_from_url.call_args
self.assertEqual('https://storage.googleapis.com/oss-fuzz-coverage/latest_report_info/curl.json', url)
expected_result = 'https://storage.googleapis.com/oss-fuzz-coverage/systemd/fuzzer_stats/20210303'
self.assertEqual(result, expected_result)<|docstring|>Tests that a project's coverage report can be downloaded and parsed.
NOTE: This test relies on the PROJECT_NAME repo's coverage report.
The "example" project was not used because it has no coverage reports.<|endoftext|> |
10ba315cef0ec60d5b85dd7100618909557767f1995732be564f4c85ef38cf15 | def test_get_invalid_project(self):
'Tests that passing a bad project returns None.'
self.assertIsNone(coverage._get_fuzzer_stats_dir_url('not-a-proj')) | Tests that passing a bad project returns None. | infra/cifuzz/coverage_test.py | test_get_invalid_project | mejo1024/oss-fuzz | 3 | python | def test_get_invalid_project(self):
self.assertIsNone(coverage._get_fuzzer_stats_dir_url('not-a-proj')) | def test_get_invalid_project(self):
self.assertIsNone(coverage._get_fuzzer_stats_dir_url('not-a-proj'))<|docstring|>Tests that passing a bad project returns None.<|endoftext|> |
6eee18a836a7b7b4c67c98251a2135ed8b4f95b1312cce0df246783f94cb92ff | @mock.patch('coverage.get_json_from_url', return_value={})
def test_valid_target(self, mocked_get_json_from_url):
"Tests that a target's coverage report can be downloaded and parsed."
self.coverage_getter.get_target_coverage_report(FUZZ_TARGET)
((url,), _) = mocked_get_json_from_url.call_args
self.assertEqual('https://storage.googleapis.com/oss-fuzz-coverage/curl/fuzzer_stats/20200226/curl_fuzzer.json', url) | Tests that a target's coverage report can be downloaded and parsed. | infra/cifuzz/coverage_test.py | test_valid_target | mejo1024/oss-fuzz | 3 | python | @mock.patch('coverage.get_json_from_url', return_value={})
def test_valid_target(self, mocked_get_json_from_url):
self.coverage_getter.get_target_coverage_report(FUZZ_TARGET)
((url,), _) = mocked_get_json_from_url.call_args
self.assertEqual('https://storage.googleapis.com/oss-fuzz-coverage/curl/fuzzer_stats/20200226/curl_fuzzer.json', url) | @mock.patch('coverage.get_json_from_url', return_value={})
def test_valid_target(self, mocked_get_json_from_url):
self.coverage_getter.get_target_coverage_report(FUZZ_TARGET)
((url,), _) = mocked_get_json_from_url.call_args
self.assertEqual('https://storage.googleapis.com/oss-fuzz-coverage/curl/fuzzer_stats/20200226/curl_fuzzer.json', url)<|docstring|>Tests that a target's coverage report can be downloaded and parsed.<|endoftext|> |
0ca69591285e95505f3e773a1b8fb9b43f1037db5eac90327e8cc057a29cc9ed | def test_invalid_target(self):
'Tests that passing an invalid target coverage report returns None.'
self.assertIsNone(self.coverage_getter.get_target_coverage_report(INVALID_TARGET)) | Tests that passing an invalid target coverage report returns None. | infra/cifuzz/coverage_test.py | test_invalid_target | mejo1024/oss-fuzz | 3 | python | def test_invalid_target(self):
self.assertIsNone(self.coverage_getter.get_target_coverage_report(INVALID_TARGET)) | def test_invalid_target(self):
self.assertIsNone(self.coverage_getter.get_target_coverage_report(INVALID_TARGET))<|docstring|>Tests that passing an invalid target coverage report returns None.<|endoftext|> |
ced573ecb1759378dc45d1e9af86a33a8314ba5382d81e197978a9254df41c98 | @mock.patch('coverage._get_latest_cov_report_info', return_value=None)
def test_invalid_project_json(self, _):
'Tests an invalid project JSON results in None being returned.'
coverage_getter = coverage.OssFuzzCoverageGetter(PROJECT_NAME, REPO_PATH)
self.assertIsNone(coverage_getter.get_target_coverage_report(FUZZ_TARGET)) | Tests an invalid project JSON results in None being returned. | infra/cifuzz/coverage_test.py | test_invalid_project_json | mejo1024/oss-fuzz | 3 | python | @mock.patch('coverage._get_latest_cov_report_info', return_value=None)
def test_invalid_project_json(self, _):
coverage_getter = coverage.OssFuzzCoverageGetter(PROJECT_NAME, REPO_PATH)
self.assertIsNone(coverage_getter.get_target_coverage_report(FUZZ_TARGET)) | @mock.patch('coverage._get_latest_cov_report_info', return_value=None)
def test_invalid_project_json(self, _):
coverage_getter = coverage.OssFuzzCoverageGetter(PROJECT_NAME, REPO_PATH)
self.assertIsNone(coverage_getter.get_target_coverage_report(FUZZ_TARGET))<|docstring|>Tests an invalid project JSON results in None being returned.<|endoftext|> |
cfc9a7655980de607ff34771e9cc7ae46e92a7546c28f768efdbd588420f2bab | def test_valid_target(self):
'Tests that covered files can be retrieved from a coverage report.'
with open(os.path.join(TEST_DATA_PATH, FUZZ_TARGET_COV_JSON_FILENAME)) as file_handle:
fuzzer_cov_info = json.loads(file_handle.read())
with mock.patch('coverage.OssFuzzCoverageGetter.get_target_coverage_report', return_value=fuzzer_cov_info):
file_list = self.coverage_getter.get_files_covered_by_target(FUZZ_TARGET)
curl_files_list_path = os.path.join(TEST_DATA_PATH, 'example_curl_file_list.json')
with open(curl_files_list_path) as file_handle:
expected_file_list = json.loads(file_handle.read())
self.assertCountEqual(file_list, expected_file_list) | Tests that covered files can be retrieved from a coverage report. | infra/cifuzz/coverage_test.py | test_valid_target | mejo1024/oss-fuzz | 3 | python | def test_valid_target(self):
with open(os.path.join(TEST_DATA_PATH, FUZZ_TARGET_COV_JSON_FILENAME)) as file_handle:
fuzzer_cov_info = json.loads(file_handle.read())
with mock.patch('coverage.OssFuzzCoverageGetter.get_target_coverage_report', return_value=fuzzer_cov_info):
file_list = self.coverage_getter.get_files_covered_by_target(FUZZ_TARGET)
curl_files_list_path = os.path.join(TEST_DATA_PATH, 'example_curl_file_list.json')
with open(curl_files_list_path) as file_handle:
expected_file_list = json.loads(file_handle.read())
self.assertCountEqual(file_list, expected_file_list) | def test_valid_target(self):
with open(os.path.join(TEST_DATA_PATH, FUZZ_TARGET_COV_JSON_FILENAME)) as file_handle:
fuzzer_cov_info = json.loads(file_handle.read())
with mock.patch('coverage.OssFuzzCoverageGetter.get_target_coverage_report', return_value=fuzzer_cov_info):
file_list = self.coverage_getter.get_files_covered_by_target(FUZZ_TARGET)
curl_files_list_path = os.path.join(TEST_DATA_PATH, 'example_curl_file_list.json')
with open(curl_files_list_path) as file_handle:
expected_file_list = json.loads(file_handle.read())
self.assertCountEqual(file_list, expected_file_list)<|docstring|>Tests that covered files can be retrieved from a coverage report.<|endoftext|> |
a99a0336d2866035b9d2194e88bc7a777e4480dcdeb39e8ffc2105ed20ed10ca | def test_invalid_target(self):
'Tests passing invalid fuzz target returns None.'
self.assertIsNone(self.coverage_getter.get_files_covered_by_target(INVALID_TARGET)) | Tests passing invalid fuzz target returns None. | infra/cifuzz/coverage_test.py | test_invalid_target | mejo1024/oss-fuzz | 3 | python | def test_invalid_target(self):
self.assertIsNone(self.coverage_getter.get_files_covered_by_target(INVALID_TARGET)) | def test_invalid_target(self):
self.assertIsNone(self.coverage_getter.get_files_covered_by_target(INVALID_TARGET))<|docstring|>Tests passing invalid fuzz target returns None.<|endoftext|> |
0a38fe62000a1bed932be7d1e84b9b9b398460a8952d1d1049f60610cb85707d | def test_is_file_covered_covered(self):
'Tests that is_file_covered returns True for a covered file.'
file_coverage = {'filename': '/src/systemd/src/basic/locale-util.c', 'summary': {'regions': {'count': 204, 'covered': 200, 'notcovered': 200, 'percent': 98.03}}}
self.assertTrue(coverage.is_file_covered(file_coverage)) | Tests that is_file_covered returns True for a covered file. | infra/cifuzz/coverage_test.py | test_is_file_covered_covered | mejo1024/oss-fuzz | 3 | python | def test_is_file_covered_covered(self):
file_coverage = {'filename': '/src/systemd/src/basic/locale-util.c', 'summary': {'regions': {'count': 204, 'covered': 200, 'notcovered': 200, 'percent': 98.03}}}
self.assertTrue(coverage.is_file_covered(file_coverage)) | def test_is_file_covered_covered(self):
file_coverage = {'filename': '/src/systemd/src/basic/locale-util.c', 'summary': {'regions': {'count': 204, 'covered': 200, 'notcovered': 200, 'percent': 98.03}}}
self.assertTrue(coverage.is_file_covered(file_coverage))<|docstring|>Tests that is_file_covered returns True for a covered file.<|endoftext|> |
63fcc25d361e49f98005480cbc683d63aebee62ea1b6aeb1cfeaa86019bf85b6 | def test_is_file_covered_not_covered(self):
'Tests that is_file_covered returns False for a not covered file.'
file_coverage = {'filename': '/src/systemd/src/basic/locale-util.c', 'summary': {'regions': {'count': 204, 'covered': 0, 'notcovered': 0, 'percent': 0}}}
self.assertFalse(coverage.is_file_covered(file_coverage)) | Tests that is_file_covered returns False for a not covered file. | infra/cifuzz/coverage_test.py | test_is_file_covered_not_covered | mejo1024/oss-fuzz | 3 | python | def test_is_file_covered_not_covered(self):
file_coverage = {'filename': '/src/systemd/src/basic/locale-util.c', 'summary': {'regions': {'count': 204, 'covered': 0, 'notcovered': 0, 'percent': 0}}}
self.assertFalse(coverage.is_file_covered(file_coverage)) | def test_is_file_covered_not_covered(self):
file_coverage = {'filename': '/src/systemd/src/basic/locale-util.c', 'summary': {'regions': {'count': 204, 'covered': 0, 'notcovered': 0, 'percent': 0}}}
self.assertFalse(coverage.is_file_covered(file_coverage))<|docstring|>Tests that is_file_covered returns False for a not covered file.<|endoftext|> |
11c496b3e1ade9a1a91cd3d866efb7cd97d12e9fd9143d1d66317067ca04d9d3 | @mock.patch('logging.error')
@mock.patch('coverage.get_json_from_url', return_value={'coverage': 1})
def test_get_latest_cov_report_info(self, mocked_get_json_from_url, mocked_error):
'Tests that _get_latest_cov_report_info works as intended.'
result = coverage._get_latest_cov_report_info(self.PROJECT)
self.assertEqual(result, {'coverage': 1})
mocked_error.assert_not_called()
mocked_get_json_from_url.assert_called_with(self.LATEST_REPORT_INFO_URL) | Tests that _get_latest_cov_report_info works as intended. | infra/cifuzz/coverage_test.py | test_get_latest_cov_report_info | mejo1024/oss-fuzz | 3 | python | @mock.patch('logging.error')
@mock.patch('coverage.get_json_from_url', return_value={'coverage': 1})
def test_get_latest_cov_report_info(self, mocked_get_json_from_url, mocked_error):
result = coverage._get_latest_cov_report_info(self.PROJECT)
self.assertEqual(result, {'coverage': 1})
mocked_error.assert_not_called()
mocked_get_json_from_url.assert_called_with(self.LATEST_REPORT_INFO_URL) | @mock.patch('logging.error')
@mock.patch('coverage.get_json_from_url', return_value={'coverage': 1})
def test_get_latest_cov_report_info(self, mocked_get_json_from_url, mocked_error):
result = coverage._get_latest_cov_report_info(self.PROJECT)
self.assertEqual(result, {'coverage': 1})
mocked_error.assert_not_called()
mocked_get_json_from_url.assert_called_with(self.LATEST_REPORT_INFO_URL)<|docstring|>Tests that _get_latest_cov_report_info works as intended.<|endoftext|> |
c9e8da8b539a9d1bde1621dd112978cd9d15e9228003fb78ac8e6729ee3c67bd | @mock.patch('logging.error')
@mock.patch('coverage.get_json_from_url', return_value=None)
def test_get_latest_cov_report_info_fail(self, _, mocked_error):
"Tests that _get_latest_cov_report_info works as intended when we can't\n get latest report info."
result = coverage._get_latest_cov_report_info('project')
self.assertIsNone(result)
mocked_error.assert_called_with('Could not get the coverage report json from url: %s.', self.LATEST_REPORT_INFO_URL) | Tests that _get_latest_cov_report_info works as intended when we can't
get latest report info. | infra/cifuzz/coverage_test.py | test_get_latest_cov_report_info_fail | mejo1024/oss-fuzz | 3 | python | @mock.patch('logging.error')
@mock.patch('coverage.get_json_from_url', return_value=None)
def test_get_latest_cov_report_info_fail(self, _, mocked_error):
"Tests that _get_latest_cov_report_info works as intended when we can't\n get latest report info."
result = coverage._get_latest_cov_report_info('project')
self.assertIsNone(result)
mocked_error.assert_called_with('Could not get the coverage report json from url: %s.', self.LATEST_REPORT_INFO_URL) | @mock.patch('logging.error')
@mock.patch('coverage.get_json_from_url', return_value=None)
def test_get_latest_cov_report_info_fail(self, _, mocked_error):
"Tests that _get_latest_cov_report_info works as intended when we can't\n get latest report info."
result = coverage._get_latest_cov_report_info('project')
self.assertIsNone(result)
mocked_error.assert_called_with('Could not get the coverage report json from url: %s.', self.LATEST_REPORT_INFO_URL)<|docstring|>Tests that _get_latest_cov_report_info works as intended when we can't
get latest report info.<|endoftext|> |
434413146874aeca1d00e26201ef72641d3fe574ea3b04bd62fa0d610731752e | def logging_function(_=None):
'Logs start, sleeps for 0.5s, logs end'
logging.info(multiprocessing.current_process().name)
time.sleep(0.5)
logging.info(multiprocessing.current_process().name) | Logs start, sleeps for 0.5s, logs end | core/eolearn/tests/test_eoexecutor.py | logging_function | chorng/eo-learn | 0 | python | def logging_function(_=None):
logging.info(multiprocessing.current_process().name)
time.sleep(0.5)
logging.info(multiprocessing.current_process().name) | def logging_function(_=None):
logging.info(multiprocessing.current_process().name)
time.sleep(0.5)
logging.info(multiprocessing.current_process().name)<|docstring|>Logs start, sleeps for 0.5s, logs end<|endoftext|> |
f59fd177e38fe1a2fcfa4e4aaf7c36f1bd2270b1680bd0decb74ea41786a84eb | def as_atom(document_or_set: Union[(Error, DocumentSet, Document)], query: Optional[ClassicAPIQuery]=None) -> str:
'Serialize a :class:`DocumentSet` as Atom.'
if isinstance(document_or_set, Error):
return AtomXMLSerializer().serialize_error(document_or_set, query=query)
elif ('paper_id' in document_or_set):
return AtomXMLSerializer().serialize_document(document_or_set, query=query)
return AtomXMLSerializer().serialize(document_or_set, query=query) | Serialize a :class:`DocumentSet` as Atom. | search/serialize/atom.py | as_atom | f380cedric/arxiv-search | 35 | python | def as_atom(document_or_set: Union[(Error, DocumentSet, Document)], query: Optional[ClassicAPIQuery]=None) -> str:
if isinstance(document_or_set, Error):
return AtomXMLSerializer().serialize_error(document_or_set, query=query)
elif ('paper_id' in document_or_set):
return AtomXMLSerializer().serialize_document(document_or_set, query=query)
return AtomXMLSerializer().serialize(document_or_set, query=query) | def as_atom(document_or_set: Union[(Error, DocumentSet, Document)], query: Optional[ClassicAPIQuery]=None) -> str:
if isinstance(document_or_set, Error):
return AtomXMLSerializer().serialize_error(document_or_set, query=query)
elif ('paper_id' in document_or_set):
return AtomXMLSerializer().serialize_document(document_or_set, query=query)
return AtomXMLSerializer().serialize(document_or_set, query=query)<|docstring|>Serialize a :class:`DocumentSet` as Atom.<|endoftext|> |
3bb442890697bce2e790fa3603f48411b47bbc8c3338405cb843b2e6fd44ffe2 | def transform_document(self, fg: FeedGenerator, doc: Document, query: Optional[ClassicAPIQuery]=None) -> None:
'Select a subset of :class:`Document` properties for public API.'
entry = fg.add_entry()
entry.id(self._fix_url(url_for('abs', paper_id=doc['paper_id'], version=doc['version'], _external=True)))
entry.title(doc['title'])
entry.summary(doc['abstract'])
entry.published(to_utc((doc['submitted_date_first'] or doc['submitted_date'])))
entry.updated(to_utc((doc['updated_date'] or doc['modified_date'] or doc['submitted_date'])))
entry.link({'href': self._fix_url(url_for('abs', paper_id=doc['paper_id'], version=doc['version'], _external=True)), 'type': 'text/html'})
entry.link({'href': self._fix_url(url_for('pdf', paper_id=doc['paper_id'], version=doc['version'], _external=True)), 'type': 'application/pdf', 'rel': 'related', 'title': 'pdf'})
if doc.get('comments'):
entry.arxiv.comment(doc['comments'])
if doc.get('journal_ref'):
entry.arxiv.journal_ref(doc['journal_ref'])
if doc.get('doi'):
entry.arxiv.doi(doc['doi'])
if (doc['primary_classification']['category'] is not None):
entry.arxiv.primary_category(doc['primary_classification']['category']['id'])
entry.category(term=doc['primary_classification']['category']['id'], scheme=ARXIV_NS)
for category in doc['secondary_classification']:
entry.category(term=category['category']['id'], scheme=ARXIV_NS)
for author in doc['authors']:
author_data: Dict[(str, Any)] = {'name': author['full_name']}
if author.get('affiliation'):
author_data['affiliation'] = author['affiliation']
entry.arxiv.author(author_data) | Select a subset of :class:`Document` properties for public API. | search/serialize/atom.py | transform_document | f380cedric/arxiv-search | 35 | python | def transform_document(self, fg: FeedGenerator, doc: Document, query: Optional[ClassicAPIQuery]=None) -> None:
entry = fg.add_entry()
entry.id(self._fix_url(url_for('abs', paper_id=doc['paper_id'], version=doc['version'], _external=True)))
entry.title(doc['title'])
entry.summary(doc['abstract'])
entry.published(to_utc((doc['submitted_date_first'] or doc['submitted_date'])))
entry.updated(to_utc((doc['updated_date'] or doc['modified_date'] or doc['submitted_date'])))
entry.link({'href': self._fix_url(url_for('abs', paper_id=doc['paper_id'], version=doc['version'], _external=True)), 'type': 'text/html'})
entry.link({'href': self._fix_url(url_for('pdf', paper_id=doc['paper_id'], version=doc['version'], _external=True)), 'type': 'application/pdf', 'rel': 'related', 'title': 'pdf'})
if doc.get('comments'):
entry.arxiv.comment(doc['comments'])
if doc.get('journal_ref'):
entry.arxiv.journal_ref(doc['journal_ref'])
if doc.get('doi'):
entry.arxiv.doi(doc['doi'])
if (doc['primary_classification']['category'] is not None):
entry.arxiv.primary_category(doc['primary_classification']['category']['id'])
entry.category(term=doc['primary_classification']['category']['id'], scheme=ARXIV_NS)
for category in doc['secondary_classification']:
entry.category(term=category['category']['id'], scheme=ARXIV_NS)
for author in doc['authors']:
author_data: Dict[(str, Any)] = {'name': author['full_name']}
if author.get('affiliation'):
author_data['affiliation'] = author['affiliation']
entry.arxiv.author(author_data) | def transform_document(self, fg: FeedGenerator, doc: Document, query: Optional[ClassicAPIQuery]=None) -> None:
entry = fg.add_entry()
entry.id(self._fix_url(url_for('abs', paper_id=doc['paper_id'], version=doc['version'], _external=True)))
entry.title(doc['title'])
entry.summary(doc['abstract'])
entry.published(to_utc((doc['submitted_date_first'] or doc['submitted_date'])))
entry.updated(to_utc((doc['updated_date'] or doc['modified_date'] or doc['submitted_date'])))
entry.link({'href': self._fix_url(url_for('abs', paper_id=doc['paper_id'], version=doc['version'], _external=True)), 'type': 'text/html'})
entry.link({'href': self._fix_url(url_for('pdf', paper_id=doc['paper_id'], version=doc['version'], _external=True)), 'type': 'application/pdf', 'rel': 'related', 'title': 'pdf'})
if doc.get('comments'):
entry.arxiv.comment(doc['comments'])
if doc.get('journal_ref'):
entry.arxiv.journal_ref(doc['journal_ref'])
if doc.get('doi'):
entry.arxiv.doi(doc['doi'])
if (doc['primary_classification']['category'] is not None):
entry.arxiv.primary_category(doc['primary_classification']['category']['id'])
entry.category(term=doc['primary_classification']['category']['id'], scheme=ARXIV_NS)
for category in doc['secondary_classification']:
entry.category(term=category['category']['id'], scheme=ARXIV_NS)
for author in doc['authors']:
author_data: Dict[(str, Any)] = {'name': author['full_name']}
if author.get('affiliation'):
author_data['affiliation'] = author['affiliation']
entry.arxiv.author(author_data)<|docstring|>Select a subset of :class:`Document` properties for public API.<|endoftext|> |
38ac6003b3abce233b4529658702a89e48ce78d6ed5c5ff781081f52e43a7d0a | def serialize(self, document_set: DocumentSet, query: Optional[ClassicAPIQuery]=None) -> str:
'Generate Atom response for a :class:`DocumentSet`.'
fg = self._get_feed(query)
fg.opensearch.totalResults(document_set['metadata'].get('total_results'))
fg.opensearch.itemsPerPage(document_set['metadata'].get('size'))
fg.opensearch.startIndex(document_set['metadata'].get('start'))
for doc in reversed(document_set['results']):
self.transform_document(fg, doc, query=query)
return safe_str(fg.atom_str(pretty=True)) | Generate Atom response for a :class:`DocumentSet`. | search/serialize/atom.py | serialize | f380cedric/arxiv-search | 35 | python | def serialize(self, document_set: DocumentSet, query: Optional[ClassicAPIQuery]=None) -> str:
fg = self._get_feed(query)
fg.opensearch.totalResults(document_set['metadata'].get('total_results'))
fg.opensearch.itemsPerPage(document_set['metadata'].get('size'))
fg.opensearch.startIndex(document_set['metadata'].get('start'))
for doc in reversed(document_set['results']):
self.transform_document(fg, doc, query=query)
return safe_str(fg.atom_str(pretty=True)) | def serialize(self, document_set: DocumentSet, query: Optional[ClassicAPIQuery]=None) -> str:
fg = self._get_feed(query)
fg.opensearch.totalResults(document_set['metadata'].get('total_results'))
fg.opensearch.itemsPerPage(document_set['metadata'].get('size'))
fg.opensearch.startIndex(document_set['metadata'].get('start'))
for doc in reversed(document_set['results']):
self.transform_document(fg, doc, query=query)
return safe_str(fg.atom_str(pretty=True))<|docstring|>Generate Atom response for a :class:`DocumentSet`.<|endoftext|> |
be5116e96fe170c80ca4887d01df986ff0958b2e6df4d5203567221fb4d81bfc | def serialize_error(self, error: Error, query: Optional[ClassicAPIQuery]=None) -> str:
'Generate Atom error response.'
fg = self._get_feed(query)
fg.opensearch.totalResults(1)
fg.opensearch.itemsPerPage(1)
fg.opensearch.startIndex(0)
entry = fg.add_entry()
entry.id(error.id)
entry.title('Error')
entry.summary(error.error)
entry.updated(to_utc(error.created))
entry.link({'href': self._fix_url(error.link), 'rel': 'alternate', 'type': 'text/html'})
entry.arxiv.author({'name': error.author})
return safe_str(fg.atom_str(pretty=True)) | Generate Atom error response. | search/serialize/atom.py | serialize_error | f380cedric/arxiv-search | 35 | python | def serialize_error(self, error: Error, query: Optional[ClassicAPIQuery]=None) -> str:
fg = self._get_feed(query)
fg.opensearch.totalResults(1)
fg.opensearch.itemsPerPage(1)
fg.opensearch.startIndex(0)
entry = fg.add_entry()
entry.id(error.id)
entry.title('Error')
entry.summary(error.error)
entry.updated(to_utc(error.created))
entry.link({'href': self._fix_url(error.link), 'rel': 'alternate', 'type': 'text/html'})
entry.arxiv.author({'name': error.author})
return safe_str(fg.atom_str(pretty=True)) | def serialize_error(self, error: Error, query: Optional[ClassicAPIQuery]=None) -> str:
fg = self._get_feed(query)
fg.opensearch.totalResults(1)
fg.opensearch.itemsPerPage(1)
fg.opensearch.startIndex(0)
entry = fg.add_entry()
entry.id(error.id)
entry.title('Error')
entry.summary(error.error)
entry.updated(to_utc(error.created))
entry.link({'href': self._fix_url(error.link), 'rel': 'alternate', 'type': 'text/html'})
entry.arxiv.author({'name': error.author})
return safe_str(fg.atom_str(pretty=True))<|docstring|>Generate Atom error response.<|endoftext|> |
c1e74618f11a8af2354cf63502c77e7c857b9022b5e4dedacc94b7aa20d777db | def serialize_document(self, document: Document, query: Optional[ClassicAPIQuery]=None) -> str:
'Generate Atom feed for a single :class:`Document`.'
document_set = document_set_from_documents([document])
return self.serialize(document_set, query=query) | Generate Atom feed for a single :class:`Document`. | search/serialize/atom.py | serialize_document | f380cedric/arxiv-search | 35 | python | def serialize_document(self, document: Document, query: Optional[ClassicAPIQuery]=None) -> str:
document_set = document_set_from_documents([document])
return self.serialize(document_set, query=query) | def serialize_document(self, document: Document, query: Optional[ClassicAPIQuery]=None) -> str:
document_set = document_set_from_documents([document])
return self.serialize(document_set, query=query)<|docstring|>Generate Atom feed for a single :class:`Document`.<|endoftext|> |
1cca491b754c1ee7cfa28a1bf4f39a8a5442680a4ba9240229817685f7d012a1 | def _compute_covariance(self, lpost, res):
"\n Compute the covariance of the parameters using inverse of the Hessian, i.e.\n the second-order derivative of the log-likelihood. Also calculates an estimate\n of the standard deviation in the parameters, using the square root of the diagonal\n of the covariance matrix.\n\n The Hessian is either estimated directly by the chosen method of fitting, or\n approximated using the ``statsmodel`` ``approx_hess`` function.\n\n Parameters\n ----------\n lpost: instance of :class:`Posterior` or one of its subclasses\n The object containing the function that is being optimized\n in the regression\n\n res: instance of ``scipy``'s ``OptimizeResult`` class\n The object containing the results from a optimization run\n "
if hasattr(res, 'hess_inv'):
if (not isinstance(res.hess_inv, np.ndarray)):
self.cov = np.asarray(res.hess_inv.todense())
else:
self.cov = res.hess_inv
self.err = np.sqrt(np.diag(self.cov))
elif comp_hessian:
self.log.info('Approximating Hessian with finite differences ...')
phess = approx_hess(np.atleast_1d(self.p_opt), lpost)
self.cov = np.linalg.inv(phess)
self.err = np.sqrt(np.diag(np.abs(self.cov)))
else:
self.cov = None
self.err = None | Compute the covariance of the parameters using inverse of the Hessian, i.e.
the second-order derivative of the log-likelihood. Also calculates an estimate
of the standard deviation in the parameters, using the square root of the diagonal
of the covariance matrix.
The Hessian is either estimated directly by the chosen method of fitting, or
approximated using the ``statsmodel`` ``approx_hess`` function.
Parameters
----------
lpost: instance of :class:`Posterior` or one of its subclasses
The object containing the function that is being optimized
in the regression
res: instance of ``scipy``'s ``OptimizeResult`` class
The object containing the results from a optimization run | stingray/modeling/parameterestimation.py | _compute_covariance | nimeshvashistha/stingray | 133 | python | def _compute_covariance(self, lpost, res):
"\n Compute the covariance of the parameters using inverse of the Hessian, i.e.\n the second-order derivative of the log-likelihood. Also calculates an estimate\n of the standard deviation in the parameters, using the square root of the diagonal\n of the covariance matrix.\n\n The Hessian is either estimated directly by the chosen method of fitting, or\n approximated using the ``statsmodel`` ``approx_hess`` function.\n\n Parameters\n ----------\n lpost: instance of :class:`Posterior` or one of its subclasses\n The object containing the function that is being optimized\n in the regression\n\n res: instance of ``scipy``'s ``OptimizeResult`` class\n The object containing the results from a optimization run\n "
if hasattr(res, 'hess_inv'):
if (not isinstance(res.hess_inv, np.ndarray)):
self.cov = np.asarray(res.hess_inv.todense())
else:
self.cov = res.hess_inv
self.err = np.sqrt(np.diag(self.cov))
elif comp_hessian:
self.log.info('Approximating Hessian with finite differences ...')
phess = approx_hess(np.atleast_1d(self.p_opt), lpost)
self.cov = np.linalg.inv(phess)
self.err = np.sqrt(np.diag(np.abs(self.cov)))
else:
self.cov = None
self.err = None | def _compute_covariance(self, lpost, res):
"\n Compute the covariance of the parameters using inverse of the Hessian, i.e.\n the second-order derivative of the log-likelihood. Also calculates an estimate\n of the standard deviation in the parameters, using the square root of the diagonal\n of the covariance matrix.\n\n The Hessian is either estimated directly by the chosen method of fitting, or\n approximated using the ``statsmodel`` ``approx_hess`` function.\n\n Parameters\n ----------\n lpost: instance of :class:`Posterior` or one of its subclasses\n The object containing the function that is being optimized\n in the regression\n\n res: instance of ``scipy``'s ``OptimizeResult`` class\n The object containing the results from a optimization run\n "
if hasattr(res, 'hess_inv'):
if (not isinstance(res.hess_inv, np.ndarray)):
self.cov = np.asarray(res.hess_inv.todense())
else:
self.cov = res.hess_inv
self.err = np.sqrt(np.diag(self.cov))
elif comp_hessian:
self.log.info('Approximating Hessian with finite differences ...')
phess = approx_hess(np.atleast_1d(self.p_opt), lpost)
self.cov = np.linalg.inv(phess)
self.err = np.sqrt(np.diag(np.abs(self.cov)))
else:
self.cov = None
self.err = None<|docstring|>Compute the covariance of the parameters using inverse of the Hessian, i.e.
the second-order derivative of the log-likelihood. Also calculates an estimate
of the standard deviation in the parameters, using the square root of the diagonal
of the covariance matrix.
The Hessian is either estimated directly by the chosen method of fitting, or
approximated using the ``statsmodel`` ``approx_hess`` function.
Parameters
----------
lpost: instance of :class:`Posterior` or one of its subclasses
The object containing the function that is being optimized
in the regression
res: instance of ``scipy``'s ``OptimizeResult`` class
The object containing the results from a optimization run<|endoftext|> |
ec871b78411320124d1520ffb89b07d35179bb0b4442558121a96b6810f2111b | def _compute_model(self, lpost):
'\n Compute the values of the best-fit model for all ``x``.\n\n Parameters\n ----------\n lpost: instance of :class:`Posterior` or one of its subclasses\n The object containing the function that is being optimized\n in the regression\n '
_fitter_to_model_params(lpost.model, self.p_opt)
self.mfit = lpost.model(lpost.x) | Compute the values of the best-fit model for all ``x``.
Parameters
----------
lpost: instance of :class:`Posterior` or one of its subclasses
The object containing the function that is being optimized
in the regression | stingray/modeling/parameterestimation.py | _compute_model | nimeshvashistha/stingray | 133 | python | def _compute_model(self, lpost):
'\n Compute the values of the best-fit model for all ``x``.\n\n Parameters\n ----------\n lpost: instance of :class:`Posterior` or one of its subclasses\n The object containing the function that is being optimized\n in the regression\n '
_fitter_to_model_params(lpost.model, self.p_opt)
self.mfit = lpost.model(lpost.x) | def _compute_model(self, lpost):
'\n Compute the values of the best-fit model for all ``x``.\n\n Parameters\n ----------\n lpost: instance of :class:`Posterior` or one of its subclasses\n The object containing the function that is being optimized\n in the regression\n '
_fitter_to_model_params(lpost.model, self.p_opt)
self.mfit = lpost.model(lpost.x)<|docstring|>Compute the values of the best-fit model for all ``x``.
Parameters
----------
lpost: instance of :class:`Posterior` or one of its subclasses
The object containing the function that is being optimized
in the regression<|endoftext|> |
4b55a418e39850d945eefce59d6c123d9d342cceab31506c622f5afab7511c8c | def _compute_criteria(self, lpost):
'\n Compute various information criteria useful for model comparison in\n non-nested models.\n\n Currently implemented are the Akaike Information Criterion [#]_ and the\n Bayesian Information Criterion [#]_.\n\n Parameters\n ----------\n lpost: instance of :class:`Posterior` or one of its subclasses\n The object containing the function that is being optimized\n in the regression\n\n References\n ----------\n .. [#] http://ieeexplore.ieee.org/document/1100705/?reload=true\n .. [#] https://projecteuclid.org/euclid.aos/1176344136\n\n '
if isinstance(lpost, Posterior):
self.deviance = ((- 2.0) * lpost.loglikelihood(self.p_opt, neg=False))
elif isinstance(lpost, LogLikelihood):
self.deviance = (2.0 * self.result)
self.aic = (self.result + (2.0 * self.p_opt.shape[0]))
self.bic = (self.result + (self.p_opt.shape[0] * np.log(lpost.x.shape[0]))) | Compute various information criteria useful for model comparison in
non-nested models.
Currently implemented are the Akaike Information Criterion [#]_ and the
Bayesian Information Criterion [#]_.
Parameters
----------
lpost: instance of :class:`Posterior` or one of its subclasses
The object containing the function that is being optimized
in the regression
References
----------
.. [#] http://ieeexplore.ieee.org/document/1100705/?reload=true
.. [#] https://projecteuclid.org/euclid.aos/1176344136 | stingray/modeling/parameterestimation.py | _compute_criteria | nimeshvashistha/stingray | 133 | python | def _compute_criteria(self, lpost):
'\n Compute various information criteria useful for model comparison in\n non-nested models.\n\n Currently implemented are the Akaike Information Criterion [#]_ and the\n Bayesian Information Criterion [#]_.\n\n Parameters\n ----------\n lpost: instance of :class:`Posterior` or one of its subclasses\n The object containing the function that is being optimized\n in the regression\n\n References\n ----------\n .. [#] http://ieeexplore.ieee.org/document/1100705/?reload=true\n .. [#] https://projecteuclid.org/euclid.aos/1176344136\n\n '
if isinstance(lpost, Posterior):
self.deviance = ((- 2.0) * lpost.loglikelihood(self.p_opt, neg=False))
elif isinstance(lpost, LogLikelihood):
self.deviance = (2.0 * self.result)
self.aic = (self.result + (2.0 * self.p_opt.shape[0]))
self.bic = (self.result + (self.p_opt.shape[0] * np.log(lpost.x.shape[0]))) | def _compute_criteria(self, lpost):
'\n Compute various information criteria useful for model comparison in\n non-nested models.\n\n Currently implemented are the Akaike Information Criterion [#]_ and the\n Bayesian Information Criterion [#]_.\n\n Parameters\n ----------\n lpost: instance of :class:`Posterior` or one of its subclasses\n The object containing the function that is being optimized\n in the regression\n\n References\n ----------\n .. [#] http://ieeexplore.ieee.org/document/1100705/?reload=true\n .. [#] https://projecteuclid.org/euclid.aos/1176344136\n\n '
if isinstance(lpost, Posterior):
self.deviance = ((- 2.0) * lpost.loglikelihood(self.p_opt, neg=False))
elif isinstance(lpost, LogLikelihood):
self.deviance = (2.0 * self.result)
self.aic = (self.result + (2.0 * self.p_opt.shape[0]))
self.bic = (self.result + (self.p_opt.shape[0] * np.log(lpost.x.shape[0])))<|docstring|>Compute various information criteria useful for model comparison in
non-nested models.
Currently implemented are the Akaike Information Criterion [#]_ and the
Bayesian Information Criterion [#]_.
Parameters
----------
lpost: instance of :class:`Posterior` or one of its subclasses
The object containing the function that is being optimized
in the regression
References
----------
.. [#] http://ieeexplore.ieee.org/document/1100705/?reload=true
.. [#] https://projecteuclid.org/euclid.aos/1176344136<|endoftext|> |
dc240bc66aaa50b64afc4839f7345b7e7b8398362c453a7f95e30517a6de5adc | def _compute_statistics(self, lpost):
'\n Compute some useful fit statistics, like the degrees of freedom and the\n figure of merit.\n\n Parameters\n ----------\n lpost: instance of :class:`Posterior` or one of its subclasses\n The object containing the function that is being optimized\n in the regression\n '
try:
self.mfit
except AttributeError:
self._compute_model(lpost)
self.merit = np.sum((((lpost.y - self.mfit) / self.mfit) ** 2.0))
self.dof = (lpost.y.shape[0] - float(self.p_opt.shape[0]))
self.sexp = ((2.0 * len(lpost.x)) * len(self.p_opt))
self.ssd = np.sqrt((2.0 * self.sexp))
self.sobs = np.sum((lpost.y - self.mfit)) | Compute some useful fit statistics, like the degrees of freedom and the
figure of merit.
Parameters
----------
lpost: instance of :class:`Posterior` or one of its subclasses
The object containing the function that is being optimized
in the regression | stingray/modeling/parameterestimation.py | _compute_statistics | nimeshvashistha/stingray | 133 | python | def _compute_statistics(self, lpost):
'\n Compute some useful fit statistics, like the degrees of freedom and the\n figure of merit.\n\n Parameters\n ----------\n lpost: instance of :class:`Posterior` or one of its subclasses\n The object containing the function that is being optimized\n in the regression\n '
try:
self.mfit
except AttributeError:
self._compute_model(lpost)
self.merit = np.sum((((lpost.y - self.mfit) / self.mfit) ** 2.0))
self.dof = (lpost.y.shape[0] - float(self.p_opt.shape[0]))
self.sexp = ((2.0 * len(lpost.x)) * len(self.p_opt))
self.ssd = np.sqrt((2.0 * self.sexp))
self.sobs = np.sum((lpost.y - self.mfit)) | def _compute_statistics(self, lpost):
'\n Compute some useful fit statistics, like the degrees of freedom and the\n figure of merit.\n\n Parameters\n ----------\n lpost: instance of :class:`Posterior` or one of its subclasses\n The object containing the function that is being optimized\n in the regression\n '
try:
self.mfit
except AttributeError:
self._compute_model(lpost)
self.merit = np.sum((((lpost.y - self.mfit) / self.mfit) ** 2.0))
self.dof = (lpost.y.shape[0] - float(self.p_opt.shape[0]))
self.sexp = ((2.0 * len(lpost.x)) * len(self.p_opt))
self.ssd = np.sqrt((2.0 * self.sexp))
self.sobs = np.sum((lpost.y - self.mfit))<|docstring|>Compute some useful fit statistics, like the degrees of freedom and the
figure of merit.
Parameters
----------
lpost: instance of :class:`Posterior` or one of its subclasses
The object containing the function that is being optimized
in the regression<|endoftext|> |
c9f72c279d38f7dbd54cd411ed5d6a81023bfafd1dc712546dcdad82052527f3 | def print_summary(self, lpost):
'\n Print a useful summary of the fitting procedure to screen or\n a log file.\n\n Parameters\n ----------\n lpost : instance of :class:`Posterior` or one of its subclasses\n The object containing the function that is being optimized\n in the regression\n '
self.log.info('The best-fit model parameters plus errors are:')
fixed = [lpost.model.fixed[n] for n in lpost.model.param_names]
tied = [lpost.model.tied[n] for n in lpost.model.param_names]
bounds = [lpost.model.bounds[n] for n in lpost.model.param_names]
parnames = [n for (n, f) in zip(lpost.model.param_names, np.logical_or(fixed, tied)) if (not f)]
all_parnames = [n for n in lpost.model.param_names]
for (i, par) in enumerate(all_parnames):
self.log.info('{:3}) Parameter {:<20}: '.format(i, par))
if (par in parnames):
idx = parnames.index(par)
err_info = ' (no error estimate)'
if (self.err is not None):
err_info = ' +/- {:<20.5f}'.format(self.err[idx])
self.log.info('{:<20.5f}{} '.format(self.p_opt[idx], err_info))
self.log.info('[{:>10} {:>10}]'.format(str(bounds[i][0]), str(bounds[i][1])))
elif fixed[i]:
self.log.info('{:<20.5f} (Fixed) '.format(lpost.model.parameters[i]))
elif tied[i]:
self.log.info('{:<20.5f} (Tied) '.format(lpost.model.parameters[i]))
self.log.info('\n')
self.log.info('Fitting statistics: ')
self.log.info((' -- number of data points: %i' % len(lpost.x)))
try:
self.deviance
except AttributeError:
self._compute_criteria(lpost)
self.log.info((' -- Deviance [-2 log L] D = %f.3' % self.deviance))
self.log.info(((' -- The Akaike Information Criterion of the model is: ' + str(self.aic)) + '.'))
self.log.info(((' -- The Bayesian Information Criterion of the model is: ' + str(self.bic)) + '.'))
try:
self.merit
except AttributeError:
self._compute_statistics(lpost)
self.log.info(((' -- The figure-of-merit function for this model ' + (' is: %f.5f' % self.merit)) + (' and the fit for %i dof is %f.3f' % (self.dof, (self.merit / self.dof)))))
self.log.info((' -- Summed Residuals S = %f.5f' % self.sobs))
self.log.info((' -- Expected S ~ %f.5 +/- %f.5' % (self.sexp, self.ssd)))
return | Print a useful summary of the fitting procedure to screen or
a log file.
Parameters
----------
lpost : instance of :class:`Posterior` or one of its subclasses
The object containing the function that is being optimized
in the regression | stingray/modeling/parameterestimation.py | print_summary | nimeshvashistha/stingray | 133 | python | def print_summary(self, lpost):
'\n Print a useful summary of the fitting procedure to screen or\n a log file.\n\n Parameters\n ----------\n lpost : instance of :class:`Posterior` or one of its subclasses\n The object containing the function that is being optimized\n in the regression\n '
self.log.info('The best-fit model parameters plus errors are:')
fixed = [lpost.model.fixed[n] for n in lpost.model.param_names]
tied = [lpost.model.tied[n] for n in lpost.model.param_names]
bounds = [lpost.model.bounds[n] for n in lpost.model.param_names]
parnames = [n for (n, f) in zip(lpost.model.param_names, np.logical_or(fixed, tied)) if (not f)]
all_parnames = [n for n in lpost.model.param_names]
for (i, par) in enumerate(all_parnames):
self.log.info('{:3}) Parameter {:<20}: '.format(i, par))
if (par in parnames):
idx = parnames.index(par)
err_info = ' (no error estimate)'
if (self.err is not None):
err_info = ' +/- {:<20.5f}'.format(self.err[idx])
self.log.info('{:<20.5f}{} '.format(self.p_opt[idx], err_info))
self.log.info('[{:>10} {:>10}]'.format(str(bounds[i][0]), str(bounds[i][1])))
elif fixed[i]:
self.log.info('{:<20.5f} (Fixed) '.format(lpost.model.parameters[i]))
elif tied[i]:
self.log.info('{:<20.5f} (Tied) '.format(lpost.model.parameters[i]))
self.log.info('\n')
self.log.info('Fitting statistics: ')
self.log.info((' -- number of data points: %i' % len(lpost.x)))
try:
self.deviance
except AttributeError:
self._compute_criteria(lpost)
self.log.info((' -- Deviance [-2 log L] D = %f.3' % self.deviance))
self.log.info(((' -- The Akaike Information Criterion of the model is: ' + str(self.aic)) + '.'))
self.log.info(((' -- The Bayesian Information Criterion of the model is: ' + str(self.bic)) + '.'))
try:
self.merit
except AttributeError:
self._compute_statistics(lpost)
self.log.info(((' -- The figure-of-merit function for this model ' + (' is: %f.5f' % self.merit)) + (' and the fit for %i dof is %f.3f' % (self.dof, (self.merit / self.dof)))))
self.log.info((' -- Summed Residuals S = %f.5f' % self.sobs))
self.log.info((' -- Expected S ~ %f.5 +/- %f.5' % (self.sexp, self.ssd)))
return | def print_summary(self, lpost):
'\n Print a useful summary of the fitting procedure to screen or\n a log file.\n\n Parameters\n ----------\n lpost : instance of :class:`Posterior` or one of its subclasses\n The object containing the function that is being optimized\n in the regression\n '
self.log.info('The best-fit model parameters plus errors are:')
fixed = [lpost.model.fixed[n] for n in lpost.model.param_names]
tied = [lpost.model.tied[n] for n in lpost.model.param_names]
bounds = [lpost.model.bounds[n] for n in lpost.model.param_names]
parnames = [n for (n, f) in zip(lpost.model.param_names, np.logical_or(fixed, tied)) if (not f)]
all_parnames = [n for n in lpost.model.param_names]
for (i, par) in enumerate(all_parnames):
self.log.info('{:3}) Parameter {:<20}: '.format(i, par))
if (par in parnames):
idx = parnames.index(par)
err_info = ' (no error estimate)'
if (self.err is not None):
err_info = ' +/- {:<20.5f}'.format(self.err[idx])
self.log.info('{:<20.5f}{} '.format(self.p_opt[idx], err_info))
self.log.info('[{:>10} {:>10}]'.format(str(bounds[i][0]), str(bounds[i][1])))
elif fixed[i]:
self.log.info('{:<20.5f} (Fixed) '.format(lpost.model.parameters[i]))
elif tied[i]:
self.log.info('{:<20.5f} (Tied) '.format(lpost.model.parameters[i]))
self.log.info('\n')
self.log.info('Fitting statistics: ')
self.log.info((' -- number of data points: %i' % len(lpost.x)))
try:
self.deviance
except AttributeError:
self._compute_criteria(lpost)
self.log.info((' -- Deviance [-2 log L] D = %f.3' % self.deviance))
self.log.info(((' -- The Akaike Information Criterion of the model is: ' + str(self.aic)) + '.'))
self.log.info(((' -- The Bayesian Information Criterion of the model is: ' + str(self.bic)) + '.'))
try:
self.merit
except AttributeError:
self._compute_statistics(lpost)
self.log.info(((' -- The figure-of-merit function for this model ' + (' is: %f.5f' % self.merit)) + (' and the fit for %i dof is %f.3f' % (self.dof, (self.merit / self.dof)))))
self.log.info((' -- Summed Residuals S = %f.5f' % self.sobs))
self.log.info((' -- Expected S ~ %f.5 +/- %f.5' % (self.sexp, self.ssd)))
return<|docstring|>Print a useful summary of the fitting procedure to screen or
a log file.
Parameters
----------
lpost : instance of :class:`Posterior` or one of its subclasses
The object containing the function that is being optimized
in the regression<|endoftext|> |
747f356f060d0bbe24582e77ec37bc131cd98aad8232158befa3f0e9bdbc75e9 | def fit(self, lpost, t0, neg=True, scipy_optimize_options=None):
'\n Do either a Maximum-A-Posteriori (MAP) or Maximum Likelihood (ML)\n fit to the data.\n\n MAP fits include priors, ML fits do not.\n\n Parameters\n -----------\n lpost : :class:`Posterior` (or subclass) instance\n and instance of class :class:`Posterior` or one of its subclasses\n that defines the function to be minimized (either in ``loglikelihood``\n or ``logposterior``)\n\n t0 : {``list`` | ``numpy.ndarray``}\n List/array with set of initial parameters\n\n neg : bool, optional, default ``True``\n Boolean to be passed to ``lpost``, setting whether to use the\n *negative* posterior or the *negative* log-likelihood. Useful for\n optimization routines, which are generally defined as *minimization* routines.\n\n scipy_optimize_options : dict, optional, default ``None``\n A dictionary with options for ``scipy.optimize.minimize``,\n directly passed on as keyword arguments.\n\n Returns\n --------\n res : :class:`OptimizationResults` object\n An object containing useful summaries of the fitting procedure.\n For details, see documentation of class:`OptimizationResults`.\n '
if ((not isinstance(lpost, Posterior)) and (not isinstance(lpost, LogLikelihood))):
raise TypeError('lpost must be a subclass of Posterior or LogLikelihoood.')
newmod = lpost.model.copy()
p0 = t0
if (not (len(p0) == lpost.npar)):
raise ValueError('Parameter set t0 must be of right length for model in lpost.')
if (scipy.__version__ < '0.10.0'):
args = [neg]
else:
args = (neg,)
if (not scipy_optimize_options):
scipy_optimize_options = {}
funcval = 100.0
i = 0
while ((funcval == 100) or (funcval == 200) or (funcval == 0.0) or (not np.isfinite(funcval))):
if (i > 20):
raise RuntimeError('Fitting unsuccessful!')
t0_p = np.random.multivariate_normal(p0, np.diag((np.abs(p0) / 100.0)))
params = [getattr(newmod, name) for name in newmod.param_names]
bounds = np.array([p.bounds for p in params if (not np.any([p.tied, p.fixed]))])
if (any(((elem is not None) for elem in np.hstack(bounds))) and (self.fitmethod not in ['L-BFGS-B', 'TNC', 'SLSQP'])):
logging.warning((('Fitting method %s ' % self.fitmethod) + 'cannot incorporate the bounds you set!'))
if (any(((elem is not None) for elem in np.hstack(bounds))) or (self.fitmethod not in ['L-BFGS-B', 'TNC', 'SLSQP'])):
use_bounds = False
else:
use_bounds = True
if self.max_post:
if use_bounds:
opt = scipy.optimize.minimize(lpost, t0_p, method=self.fitmethod, args=args, tol=1e-10, bounds=bounds, **scipy_optimize_options)
else:
opt = scipy.optimize.minimize(lpost, t0_p, method=self.fitmethod, args=args, tol=1e-10, **scipy_optimize_options)
elif isinstance(lpost, Posterior):
if use_bounds:
opt = scipy.optimize.minimize(lpost.loglikelihood, t0_p, method=self.fitmethod, args=args, tol=1e-10, bounds=bounds, **scipy_optimize_options)
else:
opt = scipy.optimize.minimize(lpost.loglikelihood, t0_p, method=self.fitmethod, args=args, tol=1e-10, **scipy_optimize_options)
elif isinstance(lpost, LogLikelihood):
if use_bounds:
opt = scipy.optimize.minimize(lpost.evaluate, t0_p, method=self.fitmethod, args=args, tol=1e-10, **scipy_optimize_options)
else:
opt = scipy.optimize.minimize(lpost.evaluate, t0_p, method=self.fitmethod, args=args, tol=1e-10, **scipy_optimize_options)
funcval = opt.fun
if (np.isclose(opt.fun, logmin) or np.isclose(opt.fun, (2 * logmin))):
funcval = 100
i += 1
res = OptimizationResults(lpost, opt, neg=neg)
return res | Do either a Maximum-A-Posteriori (MAP) or Maximum Likelihood (ML)
fit to the data.
MAP fits include priors, ML fits do not.
Parameters
-----------
lpost : :class:`Posterior` (or subclass) instance
and instance of class :class:`Posterior` or one of its subclasses
that defines the function to be minimized (either in ``loglikelihood``
or ``logposterior``)
t0 : {``list`` | ``numpy.ndarray``}
List/array with set of initial parameters
neg : bool, optional, default ``True``
Boolean to be passed to ``lpost``, setting whether to use the
*negative* posterior or the *negative* log-likelihood. Useful for
optimization routines, which are generally defined as *minimization* routines.
scipy_optimize_options : dict, optional, default ``None``
A dictionary with options for ``scipy.optimize.minimize``,
directly passed on as keyword arguments.
Returns
--------
res : :class:`OptimizationResults` object
An object containing useful summaries of the fitting procedure.
For details, see documentation of class:`OptimizationResults`. | stingray/modeling/parameterestimation.py | fit | nimeshvashistha/stingray | 133 | python | def fit(self, lpost, t0, neg=True, scipy_optimize_options=None):
'\n Do either a Maximum-A-Posteriori (MAP) or Maximum Likelihood (ML)\n fit to the data.\n\n MAP fits include priors, ML fits do not.\n\n Parameters\n -----------\n lpost : :class:`Posterior` (or subclass) instance\n and instance of class :class:`Posterior` or one of its subclasses\n that defines the function to be minimized (either in ``loglikelihood``\n or ``logposterior``)\n\n t0 : {``list`` | ``numpy.ndarray``}\n List/array with set of initial parameters\n\n neg : bool, optional, default ``True``\n Boolean to be passed to ``lpost``, setting whether to use the\n *negative* posterior or the *negative* log-likelihood. Useful for\n optimization routines, which are generally defined as *minimization* routines.\n\n scipy_optimize_options : dict, optional, default ``None``\n A dictionary with options for ``scipy.optimize.minimize``,\n directly passed on as keyword arguments.\n\n Returns\n --------\n res : :class:`OptimizationResults` object\n An object containing useful summaries of the fitting procedure.\n For details, see documentation of class:`OptimizationResults`.\n '
if ((not isinstance(lpost, Posterior)) and (not isinstance(lpost, LogLikelihood))):
raise TypeError('lpost must be a subclass of Posterior or LogLikelihoood.')
newmod = lpost.model.copy()
p0 = t0
if (not (len(p0) == lpost.npar)):
raise ValueError('Parameter set t0 must be of right length for model in lpost.')
if (scipy.__version__ < '0.10.0'):
args = [neg]
else:
args = (neg,)
if (not scipy_optimize_options):
scipy_optimize_options = {}
funcval = 100.0
i = 0
while ((funcval == 100) or (funcval == 200) or (funcval == 0.0) or (not np.isfinite(funcval))):
if (i > 20):
raise RuntimeError('Fitting unsuccessful!')
t0_p = np.random.multivariate_normal(p0, np.diag((np.abs(p0) / 100.0)))
params = [getattr(newmod, name) for name in newmod.param_names]
bounds = np.array([p.bounds for p in params if (not np.any([p.tied, p.fixed]))])
if (any(((elem is not None) for elem in np.hstack(bounds))) and (self.fitmethod not in ['L-BFGS-B', 'TNC', 'SLSQP'])):
logging.warning((('Fitting method %s ' % self.fitmethod) + 'cannot incorporate the bounds you set!'))
if (any(((elem is not None) for elem in np.hstack(bounds))) or (self.fitmethod not in ['L-BFGS-B', 'TNC', 'SLSQP'])):
use_bounds = False
else:
use_bounds = True
if self.max_post:
if use_bounds:
opt = scipy.optimize.minimize(lpost, t0_p, method=self.fitmethod, args=args, tol=1e-10, bounds=bounds, **scipy_optimize_options)
else:
opt = scipy.optimize.minimize(lpost, t0_p, method=self.fitmethod, args=args, tol=1e-10, **scipy_optimize_options)
elif isinstance(lpost, Posterior):
if use_bounds:
opt = scipy.optimize.minimize(lpost.loglikelihood, t0_p, method=self.fitmethod, args=args, tol=1e-10, bounds=bounds, **scipy_optimize_options)
else:
opt = scipy.optimize.minimize(lpost.loglikelihood, t0_p, method=self.fitmethod, args=args, tol=1e-10, **scipy_optimize_options)
elif isinstance(lpost, LogLikelihood):
if use_bounds:
opt = scipy.optimize.minimize(lpost.evaluate, t0_p, method=self.fitmethod, args=args, tol=1e-10, **scipy_optimize_options)
else:
opt = scipy.optimize.minimize(lpost.evaluate, t0_p, method=self.fitmethod, args=args, tol=1e-10, **scipy_optimize_options)
funcval = opt.fun
if (np.isclose(opt.fun, logmin) or np.isclose(opt.fun, (2 * logmin))):
funcval = 100
i += 1
res = OptimizationResults(lpost, opt, neg=neg)
return res | def fit(self, lpost, t0, neg=True, scipy_optimize_options=None):
'\n Do either a Maximum-A-Posteriori (MAP) or Maximum Likelihood (ML)\n fit to the data.\n\n MAP fits include priors, ML fits do not.\n\n Parameters\n -----------\n lpost : :class:`Posterior` (or subclass) instance\n and instance of class :class:`Posterior` or one of its subclasses\n that defines the function to be minimized (either in ``loglikelihood``\n or ``logposterior``)\n\n t0 : {``list`` | ``numpy.ndarray``}\n List/array with set of initial parameters\n\n neg : bool, optional, default ``True``\n Boolean to be passed to ``lpost``, setting whether to use the\n *negative* posterior or the *negative* log-likelihood. Useful for\n optimization routines, which are generally defined as *minimization* routines.\n\n scipy_optimize_options : dict, optional, default ``None``\n A dictionary with options for ``scipy.optimize.minimize``,\n directly passed on as keyword arguments.\n\n Returns\n --------\n res : :class:`OptimizationResults` object\n An object containing useful summaries of the fitting procedure.\n For details, see documentation of class:`OptimizationResults`.\n '
if ((not isinstance(lpost, Posterior)) and (not isinstance(lpost, LogLikelihood))):
raise TypeError('lpost must be a subclass of Posterior or LogLikelihoood.')
newmod = lpost.model.copy()
p0 = t0
if (not (len(p0) == lpost.npar)):
raise ValueError('Parameter set t0 must be of right length for model in lpost.')
if (scipy.__version__ < '0.10.0'):
args = [neg]
else:
args = (neg,)
if (not scipy_optimize_options):
scipy_optimize_options = {}
funcval = 100.0
i = 0
while ((funcval == 100) or (funcval == 200) or (funcval == 0.0) or (not np.isfinite(funcval))):
if (i > 20):
raise RuntimeError('Fitting unsuccessful!')
t0_p = np.random.multivariate_normal(p0, np.diag((np.abs(p0) / 100.0)))
params = [getattr(newmod, name) for name in newmod.param_names]
bounds = np.array([p.bounds for p in params if (not np.any([p.tied, p.fixed]))])
if (any(((elem is not None) for elem in np.hstack(bounds))) and (self.fitmethod not in ['L-BFGS-B', 'TNC', 'SLSQP'])):
logging.warning((('Fitting method %s ' % self.fitmethod) + 'cannot incorporate the bounds you set!'))
if (any(((elem is not None) for elem in np.hstack(bounds))) or (self.fitmethod not in ['L-BFGS-B', 'TNC', 'SLSQP'])):
use_bounds = False
else:
use_bounds = True
if self.max_post:
if use_bounds:
opt = scipy.optimize.minimize(lpost, t0_p, method=self.fitmethod, args=args, tol=1e-10, bounds=bounds, **scipy_optimize_options)
else:
opt = scipy.optimize.minimize(lpost, t0_p, method=self.fitmethod, args=args, tol=1e-10, **scipy_optimize_options)
elif isinstance(lpost, Posterior):
if use_bounds:
opt = scipy.optimize.minimize(lpost.loglikelihood, t0_p, method=self.fitmethod, args=args, tol=1e-10, bounds=bounds, **scipy_optimize_options)
else:
opt = scipy.optimize.minimize(lpost.loglikelihood, t0_p, method=self.fitmethod, args=args, tol=1e-10, **scipy_optimize_options)
elif isinstance(lpost, LogLikelihood):
if use_bounds:
opt = scipy.optimize.minimize(lpost.evaluate, t0_p, method=self.fitmethod, args=args, tol=1e-10, **scipy_optimize_options)
else:
opt = scipy.optimize.minimize(lpost.evaluate, t0_p, method=self.fitmethod, args=args, tol=1e-10, **scipy_optimize_options)
funcval = opt.fun
if (np.isclose(opt.fun, logmin) or np.isclose(opt.fun, (2 * logmin))):
funcval = 100
i += 1
res = OptimizationResults(lpost, opt, neg=neg)
return res<|docstring|>Do either a Maximum-A-Posteriori (MAP) or Maximum Likelihood (ML)
fit to the data.
MAP fits include priors, ML fits do not.
Parameters
-----------
lpost : :class:`Posterior` (or subclass) instance
and instance of class :class:`Posterior` or one of its subclasses
that defines the function to be minimized (either in ``loglikelihood``
or ``logposterior``)
t0 : {``list`` | ``numpy.ndarray``}
List/array with set of initial parameters
neg : bool, optional, default ``True``
Boolean to be passed to ``lpost``, setting whether to use the
*negative* posterior or the *negative* log-likelihood. Useful for
optimization routines, which are generally defined as *minimization* routines.
scipy_optimize_options : dict, optional, default ``None``
A dictionary with options for ``scipy.optimize.minimize``,
directly passed on as keyword arguments.
Returns
--------
res : :class:`OptimizationResults` object
An object containing useful summaries of the fitting procedure.
For details, see documentation of class:`OptimizationResults`.<|endoftext|> |
6bf741c549b1a46f72e0667086c23812782a5d280c33dd5acbd52f66c11d9a85 | def compute_lrt(self, lpost1, t1, lpost2, t2, neg=True, max_post=False):
'\n This function computes the Likelihood Ratio Test between two\n nested models.\n\n Parameters\n ----------\n lpost1 : object of a subclass of :class:`Posterior`\n The :class:`Posterior` object for model 1\n\n t1 : iterable\n The starting parameters for model 1\n\n lpost2 : object of a subclass of :class:`Posterior`\n The :class:`Posterior` object for model 2\n\n t2 : iterable\n The starting parameters for model 2\n\n neg : bool, optional, default ``True``\n Boolean flag to decide whether to use the negative log-likelihood\n or log-posterior\n\n max_post: bool, optional, default ``False``\n If ``True``, set the internal state to do the optimization with the\n log-likelihood rather than the log-posterior.\n\n Returns\n -------\n lrt : float\n The likelihood ratio for model 2 and model 1\n\n res1 : OptimizationResults object\n Contains the result of fitting ``lpost1``\n\n res2 : OptimizationResults object\n Contains the results of fitting ``lpost2``\n\n '
self.max_post = max_post
res1 = self.fit(lpost1, t1, neg=neg)
res2 = self.fit(lpost2, t2, neg=neg)
lrt = (res1.deviance - res2.deviance)
return (lrt, res1, res2) | This function computes the Likelihood Ratio Test between two
nested models.
Parameters
----------
lpost1 : object of a subclass of :class:`Posterior`
The :class:`Posterior` object for model 1
t1 : iterable
The starting parameters for model 1
lpost2 : object of a subclass of :class:`Posterior`
The :class:`Posterior` object for model 2
t2 : iterable
The starting parameters for model 2
neg : bool, optional, default ``True``
Boolean flag to decide whether to use the negative log-likelihood
or log-posterior
max_post: bool, optional, default ``False``
If ``True``, set the internal state to do the optimization with the
log-likelihood rather than the log-posterior.
Returns
-------
lrt : float
The likelihood ratio for model 2 and model 1
res1 : OptimizationResults object
Contains the result of fitting ``lpost1``
res2 : OptimizationResults object
Contains the results of fitting ``lpost2`` | stingray/modeling/parameterestimation.py | compute_lrt | nimeshvashistha/stingray | 133 | python | def compute_lrt(self, lpost1, t1, lpost2, t2, neg=True, max_post=False):
'\n This function computes the Likelihood Ratio Test between two\n nested models.\n\n Parameters\n ----------\n lpost1 : object of a subclass of :class:`Posterior`\n The :class:`Posterior` object for model 1\n\n t1 : iterable\n The starting parameters for model 1\n\n lpost2 : object of a subclass of :class:`Posterior`\n The :class:`Posterior` object for model 2\n\n t2 : iterable\n The starting parameters for model 2\n\n neg : bool, optional, default ``True``\n Boolean flag to decide whether to use the negative log-likelihood\n or log-posterior\n\n max_post: bool, optional, default ``False``\n If ``True``, set the internal state to do the optimization with the\n log-likelihood rather than the log-posterior.\n\n Returns\n -------\n lrt : float\n The likelihood ratio for model 2 and model 1\n\n res1 : OptimizationResults object\n Contains the result of fitting ``lpost1``\n\n res2 : OptimizationResults object\n Contains the results of fitting ``lpost2``\n\n '
self.max_post = max_post
res1 = self.fit(lpost1, t1, neg=neg)
res2 = self.fit(lpost2, t2, neg=neg)
lrt = (res1.deviance - res2.deviance)
return (lrt, res1, res2) | def compute_lrt(self, lpost1, t1, lpost2, t2, neg=True, max_post=False):
'\n This function computes the Likelihood Ratio Test between two\n nested models.\n\n Parameters\n ----------\n lpost1 : object of a subclass of :class:`Posterior`\n The :class:`Posterior` object for model 1\n\n t1 : iterable\n The starting parameters for model 1\n\n lpost2 : object of a subclass of :class:`Posterior`\n The :class:`Posterior` object for model 2\n\n t2 : iterable\n The starting parameters for model 2\n\n neg : bool, optional, default ``True``\n Boolean flag to decide whether to use the negative log-likelihood\n or log-posterior\n\n max_post: bool, optional, default ``False``\n If ``True``, set the internal state to do the optimization with the\n log-likelihood rather than the log-posterior.\n\n Returns\n -------\n lrt : float\n The likelihood ratio for model 2 and model 1\n\n res1 : OptimizationResults object\n Contains the result of fitting ``lpost1``\n\n res2 : OptimizationResults object\n Contains the results of fitting ``lpost2``\n\n '
self.max_post = max_post
res1 = self.fit(lpost1, t1, neg=neg)
res2 = self.fit(lpost2, t2, neg=neg)
lrt = (res1.deviance - res2.deviance)
return (lrt, res1, res2)<|docstring|>This function computes the Likelihood Ratio Test between two
nested models.
Parameters
----------
lpost1 : object of a subclass of :class:`Posterior`
The :class:`Posterior` object for model 1
t1 : iterable
The starting parameters for model 1
lpost2 : object of a subclass of :class:`Posterior`
The :class:`Posterior` object for model 2
t2 : iterable
The starting parameters for model 2
neg : bool, optional, default ``True``
Boolean flag to decide whether to use the negative log-likelihood
or log-posterior
max_post: bool, optional, default ``False``
If ``True``, set the internal state to do the optimization with the
log-likelihood rather than the log-posterior.
Returns
-------
lrt : float
The likelihood ratio for model 2 and model 1
res1 : OptimizationResults object
Contains the result of fitting ``lpost1``
res2 : OptimizationResults object
Contains the results of fitting ``lpost2``<|endoftext|> |
5849c5a0176bbd70c8ce3ee81e16282ff4c7a7c61cde2af8873a0c326225071c | def sample(self, lpost, t0, cov=None, nwalkers=500, niter=100, burnin=100, threads=1, print_results=True, plot=False, namestr='test', pool=False):
'\n Sample the :class:`Posterior` distribution defined in ``lpost`` using MCMC.\n Here we use the ``emcee`` package, but other implementations could\n in principle be used.\n\n Parameters\n ----------\n lpost : instance of a :class:`Posterior` subclass\n and instance of class :class:`Posterior` or one of its subclasses\n that defines the function to be minimized (either in ``loglikelihood``\n or ``logposterior``)\n\n t0 : iterable\n list or array containing the starting parameters. Its length\n must match ``lpost.model.npar``.\n\n nwalkers : int, optional, default 500\n The number of walkers (chains) to use during the MCMC procedure.\n The more walkers are used, the slower the estimation will be, but\n the better the final distribution is likely to be.\n\n niter : int, optional, default 100\n The number of iterations to run the MCMC chains for. The larger this\n number, the longer the estimation will take, but the higher the\n chance that the walkers have actually converged on the true\n posterior distribution.\n\n burnin : int, optional, default 100\n The number of iterations to run the walkers before convergence is\n assumed to have occurred. This part of the chain will be discarded\n before sampling from what is then assumed to be the posterior\n distribution desired.\n\n threads : **DEPRECATED** int, optional, default 1\n The number of threads for parallelization.\n Default is ``1``, i.e. no parallelization\n With the change to the new emcee version 3, threads is\n deprecated. Use the `pool` keyword argument instead.\n This will no longer have any effect.\n\n print_results : bool, optional, default ``True``\n Boolean flag setting whether the results of the MCMC run should\n be printed to standard output. Default: True\n\n plot : bool, optional, default ``False``\n Boolean flag setting whether summary plots of the MCMC chains\n should be produced. Default: False\n\n namestr : str, optional, default ``test``\n Optional string for output file names for the plotting.\n\n pool : bool, default False\n If True, use pooling to parallelize the operation.\n\n Returns\n -------\n\n res : class:`SamplingResults` object\n An object of class :class:`SamplingResults` summarizing the\n results of the MCMC run.\n\n '
if (threads > 1):
raise DeprecationWarning("Keyword 'threads' is deprecated. Please use 'pool' instead.")
if (not can_sample):
raise ImportError("emcee not installed! Can't sample!")
ndim = len(t0)
if (cov is None):
res = self.fit(lpost, t0, neg=True)
cov = res.cov
p0 = np.array([np.random.multivariate_normal(t0, cov) for i in range(nwalkers)])
if pool:
with Pool() as pooling:
sampler = emcee.EnsembleSampler(nwalkers, ndim, lpost, args=[False], pool=pooling)
(pos, prob, state) = sampler.run_mcmc(p0, burnin)
sampler.reset()
(_, _, _) = sampler.run_mcmc(pos, niter, rstate0=state)
else:
sampler = emcee.EnsembleSampler(nwalkers, ndim, lpost, args=[False])
(pos, prob, state) = sampler.run_mcmc(p0, burnin)
sampler.reset()
(_, _, _) = sampler.run_mcmc(pos, niter, rstate0=state)
res = SamplingResults(sampler)
if print_results:
res.print_results()
if plot:
fig = res.plot_results(fig=None, save_plot=True, filename=(namestr + '_corner.pdf'))
return res | Sample the :class:`Posterior` distribution defined in ``lpost`` using MCMC.
Here we use the ``emcee`` package, but other implementations could
in principle be used.
Parameters
----------
lpost : instance of a :class:`Posterior` subclass
and instance of class :class:`Posterior` or one of its subclasses
that defines the function to be minimized (either in ``loglikelihood``
or ``logposterior``)
t0 : iterable
list or array containing the starting parameters. Its length
must match ``lpost.model.npar``.
nwalkers : int, optional, default 500
The number of walkers (chains) to use during the MCMC procedure.
The more walkers are used, the slower the estimation will be, but
the better the final distribution is likely to be.
niter : int, optional, default 100
The number of iterations to run the MCMC chains for. The larger this
number, the longer the estimation will take, but the higher the
chance that the walkers have actually converged on the true
posterior distribution.
burnin : int, optional, default 100
The number of iterations to run the walkers before convergence is
assumed to have occurred. This part of the chain will be discarded
before sampling from what is then assumed to be the posterior
distribution desired.
threads : **DEPRECATED** int, optional, default 1
The number of threads for parallelization.
Default is ``1``, i.e. no parallelization
With the change to the new emcee version 3, threads is
deprecated. Use the `pool` keyword argument instead.
This will no longer have any effect.
print_results : bool, optional, default ``True``
Boolean flag setting whether the results of the MCMC run should
be printed to standard output. Default: True
plot : bool, optional, default ``False``
Boolean flag setting whether summary plots of the MCMC chains
should be produced. Default: False
namestr : str, optional, default ``test``
Optional string for output file names for the plotting.
pool : bool, default False
If True, use pooling to parallelize the operation.
Returns
-------
res : class:`SamplingResults` object
An object of class :class:`SamplingResults` summarizing the
results of the MCMC run. | stingray/modeling/parameterestimation.py | sample | nimeshvashistha/stingray | 133 | python | def sample(self, lpost, t0, cov=None, nwalkers=500, niter=100, burnin=100, threads=1, print_results=True, plot=False, namestr='test', pool=False):
'\n Sample the :class:`Posterior` distribution defined in ``lpost`` using MCMC.\n Here we use the ``emcee`` package, but other implementations could\n in principle be used.\n\n Parameters\n ----------\n lpost : instance of a :class:`Posterior` subclass\n and instance of class :class:`Posterior` or one of its subclasses\n that defines the function to be minimized (either in ``loglikelihood``\n or ``logposterior``)\n\n t0 : iterable\n list or array containing the starting parameters. Its length\n must match ``lpost.model.npar``.\n\n nwalkers : int, optional, default 500\n The number of walkers (chains) to use during the MCMC procedure.\n The more walkers are used, the slower the estimation will be, but\n the better the final distribution is likely to be.\n\n niter : int, optional, default 100\n The number of iterations to run the MCMC chains for. The larger this\n number, the longer the estimation will take, but the higher the\n chance that the walkers have actually converged on the true\n posterior distribution.\n\n burnin : int, optional, default 100\n The number of iterations to run the walkers before convergence is\n assumed to have occurred. This part of the chain will be discarded\n before sampling from what is then assumed to be the posterior\n distribution desired.\n\n threads : **DEPRECATED** int, optional, default 1\n The number of threads for parallelization.\n Default is ``1``, i.e. no parallelization\n With the change to the new emcee version 3, threads is\n deprecated. Use the `pool` keyword argument instead.\n This will no longer have any effect.\n\n print_results : bool, optional, default ``True``\n Boolean flag setting whether the results of the MCMC run should\n be printed to standard output. Default: True\n\n plot : bool, optional, default ``False``\n Boolean flag setting whether summary plots of the MCMC chains\n should be produced. Default: False\n\n namestr : str, optional, default ``test``\n Optional string for output file names for the plotting.\n\n pool : bool, default False\n If True, use pooling to parallelize the operation.\n\n Returns\n -------\n\n res : class:`SamplingResults` object\n An object of class :class:`SamplingResults` summarizing the\n results of the MCMC run.\n\n '
if (threads > 1):
raise DeprecationWarning("Keyword 'threads' is deprecated. Please use 'pool' instead.")
if (not can_sample):
raise ImportError("emcee not installed! Can't sample!")
ndim = len(t0)
if (cov is None):
res = self.fit(lpost, t0, neg=True)
cov = res.cov
p0 = np.array([np.random.multivariate_normal(t0, cov) for i in range(nwalkers)])
if pool:
with Pool() as pooling:
sampler = emcee.EnsembleSampler(nwalkers, ndim, lpost, args=[False], pool=pooling)
(pos, prob, state) = sampler.run_mcmc(p0, burnin)
sampler.reset()
(_, _, _) = sampler.run_mcmc(pos, niter, rstate0=state)
else:
sampler = emcee.EnsembleSampler(nwalkers, ndim, lpost, args=[False])
(pos, prob, state) = sampler.run_mcmc(p0, burnin)
sampler.reset()
(_, _, _) = sampler.run_mcmc(pos, niter, rstate0=state)
res = SamplingResults(sampler)
if print_results:
res.print_results()
if plot:
fig = res.plot_results(fig=None, save_plot=True, filename=(namestr + '_corner.pdf'))
return res | def sample(self, lpost, t0, cov=None, nwalkers=500, niter=100, burnin=100, threads=1, print_results=True, plot=False, namestr='test', pool=False):
'\n Sample the :class:`Posterior` distribution defined in ``lpost`` using MCMC.\n Here we use the ``emcee`` package, but other implementations could\n in principle be used.\n\n Parameters\n ----------\n lpost : instance of a :class:`Posterior` subclass\n and instance of class :class:`Posterior` or one of its subclasses\n that defines the function to be minimized (either in ``loglikelihood``\n or ``logposterior``)\n\n t0 : iterable\n list or array containing the starting parameters. Its length\n must match ``lpost.model.npar``.\n\n nwalkers : int, optional, default 500\n The number of walkers (chains) to use during the MCMC procedure.\n The more walkers are used, the slower the estimation will be, but\n the better the final distribution is likely to be.\n\n niter : int, optional, default 100\n The number of iterations to run the MCMC chains for. The larger this\n number, the longer the estimation will take, but the higher the\n chance that the walkers have actually converged on the true\n posterior distribution.\n\n burnin : int, optional, default 100\n The number of iterations to run the walkers before convergence is\n assumed to have occurred. This part of the chain will be discarded\n before sampling from what is then assumed to be the posterior\n distribution desired.\n\n threads : **DEPRECATED** int, optional, default 1\n The number of threads for parallelization.\n Default is ``1``, i.e. no parallelization\n With the change to the new emcee version 3, threads is\n deprecated. Use the `pool` keyword argument instead.\n This will no longer have any effect.\n\n print_results : bool, optional, default ``True``\n Boolean flag setting whether the results of the MCMC run should\n be printed to standard output. Default: True\n\n plot : bool, optional, default ``False``\n Boolean flag setting whether summary plots of the MCMC chains\n should be produced. Default: False\n\n namestr : str, optional, default ``test``\n Optional string for output file names for the plotting.\n\n pool : bool, default False\n If True, use pooling to parallelize the operation.\n\n Returns\n -------\n\n res : class:`SamplingResults` object\n An object of class :class:`SamplingResults` summarizing the\n results of the MCMC run.\n\n '
if (threads > 1):
raise DeprecationWarning("Keyword 'threads' is deprecated. Please use 'pool' instead.")
if (not can_sample):
raise ImportError("emcee not installed! Can't sample!")
ndim = len(t0)
if (cov is None):
res = self.fit(lpost, t0, neg=True)
cov = res.cov
p0 = np.array([np.random.multivariate_normal(t0, cov) for i in range(nwalkers)])
if pool:
with Pool() as pooling:
sampler = emcee.EnsembleSampler(nwalkers, ndim, lpost, args=[False], pool=pooling)
(pos, prob, state) = sampler.run_mcmc(p0, burnin)
sampler.reset()
(_, _, _) = sampler.run_mcmc(pos, niter, rstate0=state)
else:
sampler = emcee.EnsembleSampler(nwalkers, ndim, lpost, args=[False])
(pos, prob, state) = sampler.run_mcmc(p0, burnin)
sampler.reset()
(_, _, _) = sampler.run_mcmc(pos, niter, rstate0=state)
res = SamplingResults(sampler)
if print_results:
res.print_results()
if plot:
fig = res.plot_results(fig=None, save_plot=True, filename=(namestr + '_corner.pdf'))
return res<|docstring|>Sample the :class:`Posterior` distribution defined in ``lpost`` using MCMC.
Here we use the ``emcee`` package, but other implementations could
in principle be used.
Parameters
----------
lpost : instance of a :class:`Posterior` subclass
and instance of class :class:`Posterior` or one of its subclasses
that defines the function to be minimized (either in ``loglikelihood``
or ``logposterior``)
t0 : iterable
list or array containing the starting parameters. Its length
must match ``lpost.model.npar``.
nwalkers : int, optional, default 500
The number of walkers (chains) to use during the MCMC procedure.
The more walkers are used, the slower the estimation will be, but
the better the final distribution is likely to be.
niter : int, optional, default 100
The number of iterations to run the MCMC chains for. The larger this
number, the longer the estimation will take, but the higher the
chance that the walkers have actually converged on the true
posterior distribution.
burnin : int, optional, default 100
The number of iterations to run the walkers before convergence is
assumed to have occurred. This part of the chain will be discarded
before sampling from what is then assumed to be the posterior
distribution desired.
threads : **DEPRECATED** int, optional, default 1
The number of threads for parallelization.
Default is ``1``, i.e. no parallelization
With the change to the new emcee version 3, threads is
deprecated. Use the `pool` keyword argument instead.
This will no longer have any effect.
print_results : bool, optional, default ``True``
Boolean flag setting whether the results of the MCMC run should
be printed to standard output. Default: True
plot : bool, optional, default ``False``
Boolean flag setting whether summary plots of the MCMC chains
should be produced. Default: False
namestr : str, optional, default ``test``
Optional string for output file names for the plotting.
pool : bool, default False
If True, use pooling to parallelize the operation.
Returns
-------
res : class:`SamplingResults` object
An object of class :class:`SamplingResults` summarizing the
results of the MCMC run.<|endoftext|> |
e55394ec781e30ebeb79b3ea7dfff04830426c01fcfffbbfc1fe21dd0f9284b8 | def _generate_model(self, lpost, pars):
'\n Helper function that generates a fake PSD similar to the\n one in the data, but with different parameters.\n\n Parameters\n ----------\n lpost : instance of a :class:`Posterior` or :class:`LogLikelihood` subclass\n The object containing the relevant information about the\n data and the model\n\n pars : iterable\n A list of parameters to be passed to ``lpost.model`` in oder\n to generate a model data set.\n\n Returns:\n --------\n model_data : numpy.ndarray\n An array of model values for each bin in ``lpost.x``\n\n '
assert (isinstance(lpost, LogLikelihood) or isinstance(lpost, Posterior)), 'lpost must be of type LogLikelihood or Posterior or one of its subclasses!'
assert (len(pars) == lpost.npar), ('pars must be a list of %i parameters' % lpost.npar)
m = lpost.model
_fitter_to_model_params(m, pars)
model_data = lpost.model(lpost.x)
return model_data | Helper function that generates a fake PSD similar to the
one in the data, but with different parameters.
Parameters
----------
lpost : instance of a :class:`Posterior` or :class:`LogLikelihood` subclass
The object containing the relevant information about the
data and the model
pars : iterable
A list of parameters to be passed to ``lpost.model`` in oder
to generate a model data set.
Returns:
--------
model_data : numpy.ndarray
An array of model values for each bin in ``lpost.x`` | stingray/modeling/parameterestimation.py | _generate_model | nimeshvashistha/stingray | 133 | python | def _generate_model(self, lpost, pars):
'\n Helper function that generates a fake PSD similar to the\n one in the data, but with different parameters.\n\n Parameters\n ----------\n lpost : instance of a :class:`Posterior` or :class:`LogLikelihood` subclass\n The object containing the relevant information about the\n data and the model\n\n pars : iterable\n A list of parameters to be passed to ``lpost.model`` in oder\n to generate a model data set.\n\n Returns:\n --------\n model_data : numpy.ndarray\n An array of model values for each bin in ``lpost.x``\n\n '
assert (isinstance(lpost, LogLikelihood) or isinstance(lpost, Posterior)), 'lpost must be of type LogLikelihood or Posterior or one of its subclasses!'
assert (len(pars) == lpost.npar), ('pars must be a list of %i parameters' % lpost.npar)
m = lpost.model
_fitter_to_model_params(m, pars)
model_data = lpost.model(lpost.x)
return model_data | def _generate_model(self, lpost, pars):
'\n Helper function that generates a fake PSD similar to the\n one in the data, but with different parameters.\n\n Parameters\n ----------\n lpost : instance of a :class:`Posterior` or :class:`LogLikelihood` subclass\n The object containing the relevant information about the\n data and the model\n\n pars : iterable\n A list of parameters to be passed to ``lpost.model`` in oder\n to generate a model data set.\n\n Returns:\n --------\n model_data : numpy.ndarray\n An array of model values for each bin in ``lpost.x``\n\n '
assert (isinstance(lpost, LogLikelihood) or isinstance(lpost, Posterior)), 'lpost must be of type LogLikelihood or Posterior or one of its subclasses!'
assert (len(pars) == lpost.npar), ('pars must be a list of %i parameters' % lpost.npar)
m = lpost.model
_fitter_to_model_params(m, pars)
model_data = lpost.model(lpost.x)
return model_data<|docstring|>Helper function that generates a fake PSD similar to the
one in the data, but with different parameters.
Parameters
----------
lpost : instance of a :class:`Posterior` or :class:`LogLikelihood` subclass
The object containing the relevant information about the
data and the model
pars : iterable
A list of parameters to be passed to ``lpost.model`` in oder
to generate a model data set.
Returns:
--------
model_data : numpy.ndarray
An array of model values for each bin in ``lpost.x``<|endoftext|> |
d8929f8b77c16e860ddbd7d569057edd556c97458256b53912268859935e4d15 | @staticmethod
def _compute_pvalue(obs_val, sim):
'\n Compute the p-value given an observed value of a test statistic\n and some simulations of that same test statistic.\n\n Parameters\n ----------\n obs_value : float\n The observed value of the test statistic in question\n\n sim: iterable\n A list or array of simulated values for the test statistic\n\n Returns\n -------\n pval : float in range [0, 1]\n The p-value for the test statistic given the simulations.\n\n '
sim = np.array(sim)
ntail = sim[(sim > obs_val)].shape[0]
pval = (float(ntail) / float(sim.shape[0]))
return pval | Compute the p-value given an observed value of a test statistic
and some simulations of that same test statistic.
Parameters
----------
obs_value : float
The observed value of the test statistic in question
sim: iterable
A list or array of simulated values for the test statistic
Returns
-------
pval : float in range [0, 1]
The p-value for the test statistic given the simulations. | stingray/modeling/parameterestimation.py | _compute_pvalue | nimeshvashistha/stingray | 133 | python | @staticmethod
def _compute_pvalue(obs_val, sim):
'\n Compute the p-value given an observed value of a test statistic\n and some simulations of that same test statistic.\n\n Parameters\n ----------\n obs_value : float\n The observed value of the test statistic in question\n\n sim: iterable\n A list or array of simulated values for the test statistic\n\n Returns\n -------\n pval : float in range [0, 1]\n The p-value for the test statistic given the simulations.\n\n '
sim = np.array(sim)
ntail = sim[(sim > obs_val)].shape[0]
pval = (float(ntail) / float(sim.shape[0]))
return pval | @staticmethod
def _compute_pvalue(obs_val, sim):
'\n Compute the p-value given an observed value of a test statistic\n and some simulations of that same test statistic.\n\n Parameters\n ----------\n obs_value : float\n The observed value of the test statistic in question\n\n sim: iterable\n A list or array of simulated values for the test statistic\n\n Returns\n -------\n pval : float in range [0, 1]\n The p-value for the test statistic given the simulations.\n\n '
sim = np.array(sim)
ntail = sim[(sim > obs_val)].shape[0]
pval = (float(ntail) / float(sim.shape[0]))
return pval<|docstring|>Compute the p-value given an observed value of a test statistic
and some simulations of that same test statistic.
Parameters
----------
obs_value : float
The observed value of the test statistic in question
sim: iterable
A list or array of simulated values for the test statistic
Returns
-------
pval : float in range [0, 1]
The p-value for the test statistic given the simulations.<|endoftext|> |
ece94194c86392914bec07b1f0563b365edee6f083b60ef6fbb003fb42252bc1 | def simulate_lrts(self, s_all, lpost1, t1, lpost2, t2, max_post=True, seed=None):
'\n Simulate likelihood ratios.\n For details, see definitions in the subclasses that implement this\n task.\n '
raise NotImplementedError('The behaviour of `simulate_lrts` should be defined in the subclass appropriate for your problem, not in this super class!') | Simulate likelihood ratios.
For details, see definitions in the subclasses that implement this
task. | stingray/modeling/parameterestimation.py | simulate_lrts | nimeshvashistha/stingray | 133 | python | def simulate_lrts(self, s_all, lpost1, t1, lpost2, t2, max_post=True, seed=None):
'\n Simulate likelihood ratios.\n For details, see definitions in the subclasses that implement this\n task.\n '
raise NotImplementedError('The behaviour of `simulate_lrts` should be defined in the subclass appropriate for your problem, not in this super class!') | def simulate_lrts(self, s_all, lpost1, t1, lpost2, t2, max_post=True, seed=None):
'\n Simulate likelihood ratios.\n For details, see definitions in the subclasses that implement this\n task.\n '
raise NotImplementedError('The behaviour of `simulate_lrts` should be defined in the subclass appropriate for your problem, not in this super class!')<|docstring|>Simulate likelihood ratios.
For details, see definitions in the subclasses that implement this
task.<|endoftext|> |
f603631119a631a834427079e5bcdc8eb3a79b04e8e3eafa6aed104f55d387cc | def calibrate_lrt(self, lpost1, t1, lpost2, t2, sample=None, neg=True, max_post=False, nsim=1000, niter=200, nwalkers=500, burnin=200, namestr='test', seed=None):
"Calibrate the outcome of a Likelihood Ratio Test via MCMC.\n\n In order to compare models via likelihood ratio test, one generally\n aims to compute a p-value for the null hypothesis (generally the\n simpler model). There are two special cases where the theoretical\n distribution used to compute that p-value analytically given the\n observed likelihood ratio (a chi-square distribution) is not\n applicable:\n\n * the models are not nested (i.e. Model 1 is not a special, simpler\n case of Model 2),\n * the parameter values fixed in Model 2 to retrieve Model 1 are at the\n edges of parameter space (e.g. if one must set, say, an amplitude to\n zero in order to remove a component in the more complex model, and\n negative amplitudes are excluded a priori)\n\n In these cases, the observed likelihood ratio must be calibrated via\n simulations of the simpler model (Model 1), using MCMC to take into\n account the uncertainty in the parameters. This function does\n exactly that: it computes the likelihood ratio for the observed data,\n and produces simulations to calibrate the likelihood ratio and\n compute a p-value for observing the data under the assumption that\n Model 1 istrue.\n\n If ``max_post=True``, the code will use MCMC to sample the posterior\n of the parameters and simulate fake data from there.\n\n If ``max_post=False``, the code will use the covariance matrix derived\n from the fit to simulate data sets for comparison.\n\n Parameters\n ----------\n lpost1 : object of a subclass of :class:`Posterior`\n The :class:`Posterior` object for model 1\n\n t1 : iterable\n The starting parameters for model 1\n\n lpost2 : object of a subclass of :class:`Posterior`\n The :class:`Posterior` object for model 2\n\n t2 : iterable\n The starting parameters for model 2\n\n neg : bool, optional, default ``True``\n Boolean flag to decide whether to use the negative\n log-likelihood or log-posterior\n\n max_post: bool, optional, default ``False``\n If ``True``, set the internal state to do the optimization with the\n log-likelihood rather than the log-posterior.\n\n Returns\n -------\n pvalue : float [0,1]\n p-value 'n stuff\n "
(lrt_obs, res1, res2) = self.compute_lrt(lpost1, t1, lpost2, t2, neg=neg, max_post=max_post)
rng = np.random.RandomState(seed)
if (sample is None):
if (not max_post):
mvn = scipy.stats.multivariate_normal(mean=res1.p_opt, cov=res1.cov, seed=seed)
s_all = mvn.rvs(size=nsim)
if (lpost1.npar == 1):
s_all = np.atleast_2d(s_all).T
else:
s_mcmc = self.sample(lpost1, res1.p_opt, cov=res1.cov, nwalkers=nwalkers, niter=niter, burnin=burnin, namestr=namestr)
s_all = s_mcmc.samples[rng.choice(s_mcmc.samples.shape[0], nsim, replace=False)]
else:
s_all = sample[rng.choice(sample.shape[0], nsim, replace=False)]
lrt_sim = self.simulate_lrts(s_all, lpost1, t1, lpost2, t2, seed=seed)
pval = ParameterEstimation._compute_pvalue(lrt_obs, lrt_sim)
return pval | Calibrate the outcome of a Likelihood Ratio Test via MCMC.
In order to compare models via likelihood ratio test, one generally
aims to compute a p-value for the null hypothesis (generally the
simpler model). There are two special cases where the theoretical
distribution used to compute that p-value analytically given the
observed likelihood ratio (a chi-square distribution) is not
applicable:
* the models are not nested (i.e. Model 1 is not a special, simpler
case of Model 2),
* the parameter values fixed in Model 2 to retrieve Model 1 are at the
edges of parameter space (e.g. if one must set, say, an amplitude to
zero in order to remove a component in the more complex model, and
negative amplitudes are excluded a priori)
In these cases, the observed likelihood ratio must be calibrated via
simulations of the simpler model (Model 1), using MCMC to take into
account the uncertainty in the parameters. This function does
exactly that: it computes the likelihood ratio for the observed data,
and produces simulations to calibrate the likelihood ratio and
compute a p-value for observing the data under the assumption that
Model 1 istrue.
If ``max_post=True``, the code will use MCMC to sample the posterior
of the parameters and simulate fake data from there.
If ``max_post=False``, the code will use the covariance matrix derived
from the fit to simulate data sets for comparison.
Parameters
----------
lpost1 : object of a subclass of :class:`Posterior`
The :class:`Posterior` object for model 1
t1 : iterable
The starting parameters for model 1
lpost2 : object of a subclass of :class:`Posterior`
The :class:`Posterior` object for model 2
t2 : iterable
The starting parameters for model 2
neg : bool, optional, default ``True``
Boolean flag to decide whether to use the negative
log-likelihood or log-posterior
max_post: bool, optional, default ``False``
If ``True``, set the internal state to do the optimization with the
log-likelihood rather than the log-posterior.
Returns
-------
pvalue : float [0,1]
p-value 'n stuff | stingray/modeling/parameterestimation.py | calibrate_lrt | nimeshvashistha/stingray | 133 | python | def calibrate_lrt(self, lpost1, t1, lpost2, t2, sample=None, neg=True, max_post=False, nsim=1000, niter=200, nwalkers=500, burnin=200, namestr='test', seed=None):
"Calibrate the outcome of a Likelihood Ratio Test via MCMC.\n\n In order to compare models via likelihood ratio test, one generally\n aims to compute a p-value for the null hypothesis (generally the\n simpler model). There are two special cases where the theoretical\n distribution used to compute that p-value analytically given the\n observed likelihood ratio (a chi-square distribution) is not\n applicable:\n\n * the models are not nested (i.e. Model 1 is not a special, simpler\n case of Model 2),\n * the parameter values fixed in Model 2 to retrieve Model 1 are at the\n edges of parameter space (e.g. if one must set, say, an amplitude to\n zero in order to remove a component in the more complex model, and\n negative amplitudes are excluded a priori)\n\n In these cases, the observed likelihood ratio must be calibrated via\n simulations of the simpler model (Model 1), using MCMC to take into\n account the uncertainty in the parameters. This function does\n exactly that: it computes the likelihood ratio for the observed data,\n and produces simulations to calibrate the likelihood ratio and\n compute a p-value for observing the data under the assumption that\n Model 1 istrue.\n\n If ``max_post=True``, the code will use MCMC to sample the posterior\n of the parameters and simulate fake data from there.\n\n If ``max_post=False``, the code will use the covariance matrix derived\n from the fit to simulate data sets for comparison.\n\n Parameters\n ----------\n lpost1 : object of a subclass of :class:`Posterior`\n The :class:`Posterior` object for model 1\n\n t1 : iterable\n The starting parameters for model 1\n\n lpost2 : object of a subclass of :class:`Posterior`\n The :class:`Posterior` object for model 2\n\n t2 : iterable\n The starting parameters for model 2\n\n neg : bool, optional, default ``True``\n Boolean flag to decide whether to use the negative\n log-likelihood or log-posterior\n\n max_post: bool, optional, default ``False``\n If ``True``, set the internal state to do the optimization with the\n log-likelihood rather than the log-posterior.\n\n Returns\n -------\n pvalue : float [0,1]\n p-value 'n stuff\n "
(lrt_obs, res1, res2) = self.compute_lrt(lpost1, t1, lpost2, t2, neg=neg, max_post=max_post)
rng = np.random.RandomState(seed)
if (sample is None):
if (not max_post):
mvn = scipy.stats.multivariate_normal(mean=res1.p_opt, cov=res1.cov, seed=seed)
s_all = mvn.rvs(size=nsim)
if (lpost1.npar == 1):
s_all = np.atleast_2d(s_all).T
else:
s_mcmc = self.sample(lpost1, res1.p_opt, cov=res1.cov, nwalkers=nwalkers, niter=niter, burnin=burnin, namestr=namestr)
s_all = s_mcmc.samples[rng.choice(s_mcmc.samples.shape[0], nsim, replace=False)]
else:
s_all = sample[rng.choice(sample.shape[0], nsim, replace=False)]
lrt_sim = self.simulate_lrts(s_all, lpost1, t1, lpost2, t2, seed=seed)
pval = ParameterEstimation._compute_pvalue(lrt_obs, lrt_sim)
return pval | def calibrate_lrt(self, lpost1, t1, lpost2, t2, sample=None, neg=True, max_post=False, nsim=1000, niter=200, nwalkers=500, burnin=200, namestr='test', seed=None):
"Calibrate the outcome of a Likelihood Ratio Test via MCMC.\n\n In order to compare models via likelihood ratio test, one generally\n aims to compute a p-value for the null hypothesis (generally the\n simpler model). There are two special cases where the theoretical\n distribution used to compute that p-value analytically given the\n observed likelihood ratio (a chi-square distribution) is not\n applicable:\n\n * the models are not nested (i.e. Model 1 is not a special, simpler\n case of Model 2),\n * the parameter values fixed in Model 2 to retrieve Model 1 are at the\n edges of parameter space (e.g. if one must set, say, an amplitude to\n zero in order to remove a component in the more complex model, and\n negative amplitudes are excluded a priori)\n\n In these cases, the observed likelihood ratio must be calibrated via\n simulations of the simpler model (Model 1), using MCMC to take into\n account the uncertainty in the parameters. This function does\n exactly that: it computes the likelihood ratio for the observed data,\n and produces simulations to calibrate the likelihood ratio and\n compute a p-value for observing the data under the assumption that\n Model 1 istrue.\n\n If ``max_post=True``, the code will use MCMC to sample the posterior\n of the parameters and simulate fake data from there.\n\n If ``max_post=False``, the code will use the covariance matrix derived\n from the fit to simulate data sets for comparison.\n\n Parameters\n ----------\n lpost1 : object of a subclass of :class:`Posterior`\n The :class:`Posterior` object for model 1\n\n t1 : iterable\n The starting parameters for model 1\n\n lpost2 : object of a subclass of :class:`Posterior`\n The :class:`Posterior` object for model 2\n\n t2 : iterable\n The starting parameters for model 2\n\n neg : bool, optional, default ``True``\n Boolean flag to decide whether to use the negative\n log-likelihood or log-posterior\n\n max_post: bool, optional, default ``False``\n If ``True``, set the internal state to do the optimization with the\n log-likelihood rather than the log-posterior.\n\n Returns\n -------\n pvalue : float [0,1]\n p-value 'n stuff\n "
(lrt_obs, res1, res2) = self.compute_lrt(lpost1, t1, lpost2, t2, neg=neg, max_post=max_post)
rng = np.random.RandomState(seed)
if (sample is None):
if (not max_post):
mvn = scipy.stats.multivariate_normal(mean=res1.p_opt, cov=res1.cov, seed=seed)
s_all = mvn.rvs(size=nsim)
if (lpost1.npar == 1):
s_all = np.atleast_2d(s_all).T
else:
s_mcmc = self.sample(lpost1, res1.p_opt, cov=res1.cov, nwalkers=nwalkers, niter=niter, burnin=burnin, namestr=namestr)
s_all = s_mcmc.samples[rng.choice(s_mcmc.samples.shape[0], nsim, replace=False)]
else:
s_all = sample[rng.choice(sample.shape[0], nsim, replace=False)]
lrt_sim = self.simulate_lrts(s_all, lpost1, t1, lpost2, t2, seed=seed)
pval = ParameterEstimation._compute_pvalue(lrt_obs, lrt_sim)
return pval<|docstring|>Calibrate the outcome of a Likelihood Ratio Test via MCMC.
In order to compare models via likelihood ratio test, one generally
aims to compute a p-value for the null hypothesis (generally the
simpler model). There are two special cases where the theoretical
distribution used to compute that p-value analytically given the
observed likelihood ratio (a chi-square distribution) is not
applicable:
* the models are not nested (i.e. Model 1 is not a special, simpler
case of Model 2),
* the parameter values fixed in Model 2 to retrieve Model 1 are at the
edges of parameter space (e.g. if one must set, say, an amplitude to
zero in order to remove a component in the more complex model, and
negative amplitudes are excluded a priori)
In these cases, the observed likelihood ratio must be calibrated via
simulations of the simpler model (Model 1), using MCMC to take into
account the uncertainty in the parameters. This function does
exactly that: it computes the likelihood ratio for the observed data,
and produces simulations to calibrate the likelihood ratio and
compute a p-value for observing the data under the assumption that
Model 1 istrue.
If ``max_post=True``, the code will use MCMC to sample the posterior
of the parameters and simulate fake data from there.
If ``max_post=False``, the code will use the covariance matrix derived
from the fit to simulate data sets for comparison.
Parameters
----------
lpost1 : object of a subclass of :class:`Posterior`
The :class:`Posterior` object for model 1
t1 : iterable
The starting parameters for model 1
lpost2 : object of a subclass of :class:`Posterior`
The :class:`Posterior` object for model 2
t2 : iterable
The starting parameters for model 2
neg : bool, optional, default ``True``
Boolean flag to decide whether to use the negative
log-likelihood or log-posterior
max_post: bool, optional, default ``False``
If ``True``, set the internal state to do the optimization with the
log-likelihood rather than the log-posterior.
Returns
-------
pvalue : float [0,1]
p-value 'n stuff<|endoftext|> |
f05a2040d651f68acc0ffab9ac780d69f32fdc684293ab109a9ffcc458597ced | def _check_convergence(self, sampler):
'\n Compute common statistics for convergence of the MCMC\n chains. While you can never be completely sure that your chains\n converged, these present reasonable heuristics to give an\n indication whether convergence is very far off or reasonably close.\n\n Currently implemented are the autocorrelation time [#]_ and the\n Gelman-Rubin convergence criterion [#]_.\n\n Parameters\n ----------\n sampler : an ``emcee.EnsembleSampler`` object\n\n References\n ----------\n .. [#] https://arxiv.org/abs/1202.3665\n .. [#] https://projecteuclid.org/euclid.ss/1177011136\n '
try:
self.acor = sampler.get_autocorr_time()
except emcee.autocorr.AutocorrError:
self.log.info('Chains too short to compute autocorrelation lengths.')
self.rhat = self._compute_rhat(sampler) | Compute common statistics for convergence of the MCMC
chains. While you can never be completely sure that your chains
converged, these present reasonable heuristics to give an
indication whether convergence is very far off or reasonably close.
Currently implemented are the autocorrelation time [#]_ and the
Gelman-Rubin convergence criterion [#]_.
Parameters
----------
sampler : an ``emcee.EnsembleSampler`` object
References
----------
.. [#] https://arxiv.org/abs/1202.3665
.. [#] https://projecteuclid.org/euclid.ss/1177011136 | stingray/modeling/parameterestimation.py | _check_convergence | nimeshvashistha/stingray | 133 | python | def _check_convergence(self, sampler):
'\n Compute common statistics for convergence of the MCMC\n chains. While you can never be completely sure that your chains\n converged, these present reasonable heuristics to give an\n indication whether convergence is very far off or reasonably close.\n\n Currently implemented are the autocorrelation time [#]_ and the\n Gelman-Rubin convergence criterion [#]_.\n\n Parameters\n ----------\n sampler : an ``emcee.EnsembleSampler`` object\n\n References\n ----------\n .. [#] https://arxiv.org/abs/1202.3665\n .. [#] https://projecteuclid.org/euclid.ss/1177011136\n '
try:
self.acor = sampler.get_autocorr_time()
except emcee.autocorr.AutocorrError:
self.log.info('Chains too short to compute autocorrelation lengths.')
self.rhat = self._compute_rhat(sampler) | def _check_convergence(self, sampler):
'\n Compute common statistics for convergence of the MCMC\n chains. While you can never be completely sure that your chains\n converged, these present reasonable heuristics to give an\n indication whether convergence is very far off or reasonably close.\n\n Currently implemented are the autocorrelation time [#]_ and the\n Gelman-Rubin convergence criterion [#]_.\n\n Parameters\n ----------\n sampler : an ``emcee.EnsembleSampler`` object\n\n References\n ----------\n .. [#] https://arxiv.org/abs/1202.3665\n .. [#] https://projecteuclid.org/euclid.ss/1177011136\n '
try:
self.acor = sampler.get_autocorr_time()
except emcee.autocorr.AutocorrError:
self.log.info('Chains too short to compute autocorrelation lengths.')
self.rhat = self._compute_rhat(sampler)<|docstring|>Compute common statistics for convergence of the MCMC
chains. While you can never be completely sure that your chains
converged, these present reasonable heuristics to give an
indication whether convergence is very far off or reasonably close.
Currently implemented are the autocorrelation time [#]_ and the
Gelman-Rubin convergence criterion [#]_.
Parameters
----------
sampler : an ``emcee.EnsembleSampler`` object
References
----------
.. [#] https://arxiv.org/abs/1202.3665
.. [#] https://projecteuclid.org/euclid.ss/1177011136<|endoftext|> |
a086717be2f372e4efe0a4e171213ae55c470ea64e1e58dfb19ae0a30941031b | def _compute_rhat(self, sampler):
'\n Compute Gelman-Rubin convergence criterion [#]_.\n\n Parameters\n ----------\n sampler : an `emcee.EnsembleSampler` object\n\n References\n ----------\n .. [#] https://projecteuclid.org/euclid.ss/1177011136\n '
chain = sampler.get_chain()
mean_samples_iter = np.nanmean(chain, axis=1)
mean_samples = np.nanmean(chain, axis=(0, 1))
bb = ((self.niter / (self.nwalkers - 1)) * np.sum(((mean_samples_iter - mean_samples) ** 2.0), axis=0))
var_samples = np.nanvar(chain, axis=1)
ww = np.nanmean(var_samples, axis=0)
rhat = ((((self.niter - 1) / self.niter) * ww) + ((1 / self.niter) * bb))
return rhat | Compute Gelman-Rubin convergence criterion [#]_.
Parameters
----------
sampler : an `emcee.EnsembleSampler` object
References
----------
.. [#] https://projecteuclid.org/euclid.ss/1177011136 | stingray/modeling/parameterestimation.py | _compute_rhat | nimeshvashistha/stingray | 133 | python | def _compute_rhat(self, sampler):
'\n Compute Gelman-Rubin convergence criterion [#]_.\n\n Parameters\n ----------\n sampler : an `emcee.EnsembleSampler` object\n\n References\n ----------\n .. [#] https://projecteuclid.org/euclid.ss/1177011136\n '
chain = sampler.get_chain()
mean_samples_iter = np.nanmean(chain, axis=1)
mean_samples = np.nanmean(chain, axis=(0, 1))
bb = ((self.niter / (self.nwalkers - 1)) * np.sum(((mean_samples_iter - mean_samples) ** 2.0), axis=0))
var_samples = np.nanvar(chain, axis=1)
ww = np.nanmean(var_samples, axis=0)
rhat = ((((self.niter - 1) / self.niter) * ww) + ((1 / self.niter) * bb))
return rhat | def _compute_rhat(self, sampler):
'\n Compute Gelman-Rubin convergence criterion [#]_.\n\n Parameters\n ----------\n sampler : an `emcee.EnsembleSampler` object\n\n References\n ----------\n .. [#] https://projecteuclid.org/euclid.ss/1177011136\n '
chain = sampler.get_chain()
mean_samples_iter = np.nanmean(chain, axis=1)
mean_samples = np.nanmean(chain, axis=(0, 1))
bb = ((self.niter / (self.nwalkers - 1)) * np.sum(((mean_samples_iter - mean_samples) ** 2.0), axis=0))
var_samples = np.nanvar(chain, axis=1)
ww = np.nanmean(var_samples, axis=0)
rhat = ((((self.niter - 1) / self.niter) * ww) + ((1 / self.niter) * bb))
return rhat<|docstring|>Compute Gelman-Rubin convergence criterion [#]_.
Parameters
----------
sampler : an `emcee.EnsembleSampler` object
References
----------
.. [#] https://projecteuclid.org/euclid.ss/1177011136<|endoftext|> |
07263bd8b894cfca38551dca6e81f6c3777baf29ae6ef686aae3ebbd613a11cd | def _infer(self, ci_min=5, ci_max=95):
'\n Infer the :class:`Posterior` means, standard deviations and credible intervals\n (i.e. the Bayesian equivalent to confidence intervals) from the :class:`Posterior` samples\n for each parameter.\n\n Parameters\n ----------\n ci_min : float\n Lower bound to the credible interval, given as percentage between\n 0 and 100\n\n ci_max : float\n Upper bound to the credible interval, given as percentage between\n 0 and 100\n '
self.mean = np.mean(self.samples, axis=0)
self.std = np.std(self.samples, axis=0)
self.ci = np.percentile(self.samples, [ci_min, ci_max], axis=0) | Infer the :class:`Posterior` means, standard deviations and credible intervals
(i.e. the Bayesian equivalent to confidence intervals) from the :class:`Posterior` samples
for each parameter.
Parameters
----------
ci_min : float
Lower bound to the credible interval, given as percentage between
0 and 100
ci_max : float
Upper bound to the credible interval, given as percentage between
0 and 100 | stingray/modeling/parameterestimation.py | _infer | nimeshvashistha/stingray | 133 | python | def _infer(self, ci_min=5, ci_max=95):
'\n Infer the :class:`Posterior` means, standard deviations and credible intervals\n (i.e. the Bayesian equivalent to confidence intervals) from the :class:`Posterior` samples\n for each parameter.\n\n Parameters\n ----------\n ci_min : float\n Lower bound to the credible interval, given as percentage between\n 0 and 100\n\n ci_max : float\n Upper bound to the credible interval, given as percentage between\n 0 and 100\n '
self.mean = np.mean(self.samples, axis=0)
self.std = np.std(self.samples, axis=0)
self.ci = np.percentile(self.samples, [ci_min, ci_max], axis=0) | def _infer(self, ci_min=5, ci_max=95):
'\n Infer the :class:`Posterior` means, standard deviations and credible intervals\n (i.e. the Bayesian equivalent to confidence intervals) from the :class:`Posterior` samples\n for each parameter.\n\n Parameters\n ----------\n ci_min : float\n Lower bound to the credible interval, given as percentage between\n 0 and 100\n\n ci_max : float\n Upper bound to the credible interval, given as percentage between\n 0 and 100\n '
self.mean = np.mean(self.samples, axis=0)
self.std = np.std(self.samples, axis=0)
self.ci = np.percentile(self.samples, [ci_min, ci_max], axis=0)<|docstring|>Infer the :class:`Posterior` means, standard deviations and credible intervals
(i.e. the Bayesian equivalent to confidence intervals) from the :class:`Posterior` samples
for each parameter.
Parameters
----------
ci_min : float
Lower bound to the credible interval, given as percentage between
0 and 100
ci_max : float
Upper bound to the credible interval, given as percentage between
0 and 100<|endoftext|> |
e17f198a74da30a15c95eb60f700053b6ca5a067e5f2dcd47aef79b7ec49d9a8 | def print_results(self):
'\n Print results of the MCMC run on screen or to a log-file.\n\n\n '
self.log.info(('-- The acceptance fraction is: %f.5' % self.acceptance))
try:
self.log.info('-- The autocorrelation time is: {}'.format(self.acor))
except AttributeError:
pass
self.log.info(('R_hat for the parameters is: ' + str(self.rhat)))
self.log.info('-- Posterior Summary of Parameters: \n')
self.log.info('parameter \t mean \t\t sd \t\t 5% \t\t 95% \n')
self.log.info('---------------------------------------------\n')
for i in range(self.ndim):
self.log.info((((((((((('theta[' + str(i)) + '] \t ') + str(self.mean[i])) + '\t') + str(self.std[i])) + '\t') + str(self.ci[(0, i)])) + '\t') + str(self.ci[(1, i)])) + '\n'))
return | Print results of the MCMC run on screen or to a log-file. | stingray/modeling/parameterestimation.py | print_results | nimeshvashistha/stingray | 133 | python | def print_results(self):
'\n \n\n\n '
self.log.info(('-- The acceptance fraction is: %f.5' % self.acceptance))
try:
self.log.info('-- The autocorrelation time is: {}'.format(self.acor))
except AttributeError:
pass
self.log.info(('R_hat for the parameters is: ' + str(self.rhat)))
self.log.info('-- Posterior Summary of Parameters: \n')
self.log.info('parameter \t mean \t\t sd \t\t 5% \t\t 95% \n')
self.log.info('---------------------------------------------\n')
for i in range(self.ndim):
self.log.info((((((((((('theta[' + str(i)) + '] \t ') + str(self.mean[i])) + '\t') + str(self.std[i])) + '\t') + str(self.ci[(0, i)])) + '\t') + str(self.ci[(1, i)])) + '\n'))
return | def print_results(self):
'\n \n\n\n '
self.log.info(('-- The acceptance fraction is: %f.5' % self.acceptance))
try:
self.log.info('-- The autocorrelation time is: {}'.format(self.acor))
except AttributeError:
pass
self.log.info(('R_hat for the parameters is: ' + str(self.rhat)))
self.log.info('-- Posterior Summary of Parameters: \n')
self.log.info('parameter \t mean \t\t sd \t\t 5% \t\t 95% \n')
self.log.info('---------------------------------------------\n')
for i in range(self.ndim):
self.log.info((((((((((('theta[' + str(i)) + '] \t ') + str(self.mean[i])) + '\t') + str(self.std[i])) + '\t') + str(self.ci[(0, i)])) + '\t') + str(self.ci[(1, i)])) + '\n'))
return<|docstring|>Print results of the MCMC run on screen or to a log-file.<|endoftext|> |
65091dbd2960bd93e884ce1d4381c42651244623ec667c28c1290bb825248aae | def plot_results(self, nsamples=1000, fig=None, save_plot=False, filename='test.pdf'):
'\n Plot some results in a triangle plot.\n If installed, will use [corner]_\n for the plotting, if not,\n uses its own code to make a triangle plot.\n\n By default, this method returns a ``matplotlib.Figure`` object, but\n if ``save_plot=True`` the plot can be saved to file automatically,\n\n Parameters\n ----------\n\n nsamples : int, default 1000\n The maximum number of samples used for plotting.\n\n fig : matplotlib.Figure instance, default None\n If created externally, you can pass a Figure instance to this method.\n If none is passed, the method will create one internally.\n\n save_plot : bool, default ``False``\n If ``True`` save the plot to file with a file name specified by the\n keyword ``filename``. If ``False`` just return the ``Figure`` object\n\n filename : str\n Name of the output file with the figure\n\n References\n ----------\n .. [corner] https://github.com/dfm/corner.py\n '
assert can_plot, 'Need to have matplotlib installed for plotting'
if use_corner:
fig = corner.corner(self.samples, labels=None, fig=fig, bins=int(20), quantiles=[0.16, 0.5, 0.84], show_titles=True, title_args={'fontsize': 12})
else:
if (fig is None):
fig = plt.figure(figsize=(15, 15))
plt.subplots_adjust(top=0.925, bottom=0.025, left=0.025, right=0.975, wspace=0.2, hspace=0.2)
ind_all = np.random.choice(np.arange(self.samples.shape[0]), size=nsamples)
samples = self.samples[ind_all]
for i in range(self.ndim):
for j in range(self.ndim):
(xmin, xmax) = (samples[(:, j)].min(), samples[(:, j)].max())
(ymin, ymax) = (samples[(:, i)].min(), samples[(:, i)].max())
ax = fig.add_subplot(self.ndim, self.ndim, (((i * self.ndim) + j) + 1))
ax.xaxis.set_major_locator(MaxNLocator(5))
ax.ticklabel_format(style='sci', scilimits=((- 2), 2))
if (i == j):
(ntemp, binstemp, patchestemp) = ax.hist(samples[(:, i)], 30, density=True, histtype='stepfilled')
ax.axis([ymin, ymax, 0, (np.max(ntemp) * 1.2)])
else:
ax.axis([xmin, xmax, ymin, ymax])
ax.scatter(samples[(:, j)], samples[(:, i)], s=7)
(xmin, xmax) = (samples[(:, j)].min(), samples[(:, j)].max())
(ymin, ymax) = (samples[(:, i)].min(), samples[(:, i)].max())
try:
(xx, yy) = np.mgrid[(xmin:xmax:100j, ymin:ymax:100j)]
positions = np.vstack([xx.ravel(), yy.ravel()])
values = np.vstack([samples[(:, j)], samples[(:, i)]])
kernel = scipy.stats.gaussian_kde(values)
zz = np.reshape(kernel(positions).T, xx.shape)
ax.contour(xx, yy, zz, 7)
except ValueError:
logging.info('Not making contours.')
if save_plot:
plt.savefig(filename, format='pdf')
return fig | Plot some results in a triangle plot.
If installed, will use [corner]_
for the plotting, if not,
uses its own code to make a triangle plot.
By default, this method returns a ``matplotlib.Figure`` object, but
if ``save_plot=True`` the plot can be saved to file automatically,
Parameters
----------
nsamples : int, default 1000
The maximum number of samples used for plotting.
fig : matplotlib.Figure instance, default None
If created externally, you can pass a Figure instance to this method.
If none is passed, the method will create one internally.
save_plot : bool, default ``False``
If ``True`` save the plot to file with a file name specified by the
keyword ``filename``. If ``False`` just return the ``Figure`` object
filename : str
Name of the output file with the figure
References
----------
.. [corner] https://github.com/dfm/corner.py | stingray/modeling/parameterestimation.py | plot_results | nimeshvashistha/stingray | 133 | python | def plot_results(self, nsamples=1000, fig=None, save_plot=False, filename='test.pdf'):
'\n Plot some results in a triangle plot.\n If installed, will use [corner]_\n for the plotting, if not,\n uses its own code to make a triangle plot.\n\n By default, this method returns a ``matplotlib.Figure`` object, but\n if ``save_plot=True`` the plot can be saved to file automatically,\n\n Parameters\n ----------\n\n nsamples : int, default 1000\n The maximum number of samples used for plotting.\n\n fig : matplotlib.Figure instance, default None\n If created externally, you can pass a Figure instance to this method.\n If none is passed, the method will create one internally.\n\n save_plot : bool, default ``False``\n If ``True`` save the plot to file with a file name specified by the\n keyword ``filename``. If ``False`` just return the ``Figure`` object\n\n filename : str\n Name of the output file with the figure\n\n References\n ----------\n .. [corner] https://github.com/dfm/corner.py\n '
assert can_plot, 'Need to have matplotlib installed for plotting'
if use_corner:
fig = corner.corner(self.samples, labels=None, fig=fig, bins=int(20), quantiles=[0.16, 0.5, 0.84], show_titles=True, title_args={'fontsize': 12})
else:
if (fig is None):
fig = plt.figure(figsize=(15, 15))
plt.subplots_adjust(top=0.925, bottom=0.025, left=0.025, right=0.975, wspace=0.2, hspace=0.2)
ind_all = np.random.choice(np.arange(self.samples.shape[0]), size=nsamples)
samples = self.samples[ind_all]
for i in range(self.ndim):
for j in range(self.ndim):
(xmin, xmax) = (samples[(:, j)].min(), samples[(:, j)].max())
(ymin, ymax) = (samples[(:, i)].min(), samples[(:, i)].max())
ax = fig.add_subplot(self.ndim, self.ndim, (((i * self.ndim) + j) + 1))
ax.xaxis.set_major_locator(MaxNLocator(5))
ax.ticklabel_format(style='sci', scilimits=((- 2), 2))
if (i == j):
(ntemp, binstemp, patchestemp) = ax.hist(samples[(:, i)], 30, density=True, histtype='stepfilled')
ax.axis([ymin, ymax, 0, (np.max(ntemp) * 1.2)])
else:
ax.axis([xmin, xmax, ymin, ymax])
ax.scatter(samples[(:, j)], samples[(:, i)], s=7)
(xmin, xmax) = (samples[(:, j)].min(), samples[(:, j)].max())
(ymin, ymax) = (samples[(:, i)].min(), samples[(:, i)].max())
try:
(xx, yy) = np.mgrid[(xmin:xmax:100j, ymin:ymax:100j)]
positions = np.vstack([xx.ravel(), yy.ravel()])
values = np.vstack([samples[(:, j)], samples[(:, i)]])
kernel = scipy.stats.gaussian_kde(values)
zz = np.reshape(kernel(positions).T, xx.shape)
ax.contour(xx, yy, zz, 7)
except ValueError:
logging.info('Not making contours.')
if save_plot:
plt.savefig(filename, format='pdf')
return fig | def plot_results(self, nsamples=1000, fig=None, save_plot=False, filename='test.pdf'):
'\n Plot some results in a triangle plot.\n If installed, will use [corner]_\n for the plotting, if not,\n uses its own code to make a triangle plot.\n\n By default, this method returns a ``matplotlib.Figure`` object, but\n if ``save_plot=True`` the plot can be saved to file automatically,\n\n Parameters\n ----------\n\n nsamples : int, default 1000\n The maximum number of samples used for plotting.\n\n fig : matplotlib.Figure instance, default None\n If created externally, you can pass a Figure instance to this method.\n If none is passed, the method will create one internally.\n\n save_plot : bool, default ``False``\n If ``True`` save the plot to file with a file name specified by the\n keyword ``filename``. If ``False`` just return the ``Figure`` object\n\n filename : str\n Name of the output file with the figure\n\n References\n ----------\n .. [corner] https://github.com/dfm/corner.py\n '
assert can_plot, 'Need to have matplotlib installed for plotting'
if use_corner:
fig = corner.corner(self.samples, labels=None, fig=fig, bins=int(20), quantiles=[0.16, 0.5, 0.84], show_titles=True, title_args={'fontsize': 12})
else:
if (fig is None):
fig = plt.figure(figsize=(15, 15))
plt.subplots_adjust(top=0.925, bottom=0.025, left=0.025, right=0.975, wspace=0.2, hspace=0.2)
ind_all = np.random.choice(np.arange(self.samples.shape[0]), size=nsamples)
samples = self.samples[ind_all]
for i in range(self.ndim):
for j in range(self.ndim):
(xmin, xmax) = (samples[(:, j)].min(), samples[(:, j)].max())
(ymin, ymax) = (samples[(:, i)].min(), samples[(:, i)].max())
ax = fig.add_subplot(self.ndim, self.ndim, (((i * self.ndim) + j) + 1))
ax.xaxis.set_major_locator(MaxNLocator(5))
ax.ticklabel_format(style='sci', scilimits=((- 2), 2))
if (i == j):
(ntemp, binstemp, patchestemp) = ax.hist(samples[(:, i)], 30, density=True, histtype='stepfilled')
ax.axis([ymin, ymax, 0, (np.max(ntemp) * 1.2)])
else:
ax.axis([xmin, xmax, ymin, ymax])
ax.scatter(samples[(:, j)], samples[(:, i)], s=7)
(xmin, xmax) = (samples[(:, j)].min(), samples[(:, j)].max())
(ymin, ymax) = (samples[(:, i)].min(), samples[(:, i)].max())
try:
(xx, yy) = np.mgrid[(xmin:xmax:100j, ymin:ymax:100j)]
positions = np.vstack([xx.ravel(), yy.ravel()])
values = np.vstack([samples[(:, j)], samples[(:, i)]])
kernel = scipy.stats.gaussian_kde(values)
zz = np.reshape(kernel(positions).T, xx.shape)
ax.contour(xx, yy, zz, 7)
except ValueError:
logging.info('Not making contours.')
if save_plot:
plt.savefig(filename, format='pdf')
return fig<|docstring|>Plot some results in a triangle plot.
If installed, will use [corner]_
for the plotting, if not,
uses its own code to make a triangle plot.
By default, this method returns a ``matplotlib.Figure`` object, but
if ``save_plot=True`` the plot can be saved to file automatically,
Parameters
----------
nsamples : int, default 1000
The maximum number of samples used for plotting.
fig : matplotlib.Figure instance, default None
If created externally, you can pass a Figure instance to this method.
If none is passed, the method will create one internally.
save_plot : bool, default ``False``
If ``True`` save the plot to file with a file name specified by the
keyword ``filename``. If ``False`` just return the ``Figure`` object
filename : str
Name of the output file with the figure
References
----------
.. [corner] https://github.com/dfm/corner.py<|endoftext|> |
f5406800dbbd884f72104109991ebd9203311ab5975a010a3d5eb565894efb35 | def fit(self, lpost, t0, neg=True, scipy_optimize_options=None):
'\n Do either a Maximum-A-Posteriori (MAP) or Maximum Likelihood (ML)\n fit to the power spectrum.\n\n MAP fits include priors, ML fits do not.\n\n Parameters\n -----------\n lpost : :class:`stingray.modeling.PSDPosterior` object\n An instance of class :class:`stingray.modeling.PSDPosterior` that defines the\n function to be minimized (either in ``loglikelihood`` or ``logposterior``)\n\n t0 : {list | numpy.ndarray}\n List/array with set of initial parameters\n\n neg : bool, optional, default ``True``\n Boolean to be passed to ``lpost``, setting whether to use the\n *negative* posterior or the *negative* log-likelihood.\n\n scipy_optimize_options : dict, optional, default None\n A dictionary with options for ``scipy.optimize.minimize``,\n directly passed on as keyword arguments.\n\n Returns\n --------\n res : :class:`OptimizationResults` object\n An object containing useful summaries of the fitting procedure.\n For details, see documentation of :class:`OptimizationResults`.\n '
self.lpost = lpost
res = ParameterEstimation.fit(self, self.lpost, t0, neg=neg, scipy_optimize_options=scipy_optimize_options)
(res.maxpow, res.maxfreq, res.maxind) = self._compute_highest_outlier(self.lpost, res)
return res | Do either a Maximum-A-Posteriori (MAP) or Maximum Likelihood (ML)
fit to the power spectrum.
MAP fits include priors, ML fits do not.
Parameters
-----------
lpost : :class:`stingray.modeling.PSDPosterior` object
An instance of class :class:`stingray.modeling.PSDPosterior` that defines the
function to be minimized (either in ``loglikelihood`` or ``logposterior``)
t0 : {list | numpy.ndarray}
List/array with set of initial parameters
neg : bool, optional, default ``True``
Boolean to be passed to ``lpost``, setting whether to use the
*negative* posterior or the *negative* log-likelihood.
scipy_optimize_options : dict, optional, default None
A dictionary with options for ``scipy.optimize.minimize``,
directly passed on as keyword arguments.
Returns
--------
res : :class:`OptimizationResults` object
An object containing useful summaries of the fitting procedure.
For details, see documentation of :class:`OptimizationResults`. | stingray/modeling/parameterestimation.py | fit | nimeshvashistha/stingray | 133 | python | def fit(self, lpost, t0, neg=True, scipy_optimize_options=None):
'\n Do either a Maximum-A-Posteriori (MAP) or Maximum Likelihood (ML)\n fit to the power spectrum.\n\n MAP fits include priors, ML fits do not.\n\n Parameters\n -----------\n lpost : :class:`stingray.modeling.PSDPosterior` object\n An instance of class :class:`stingray.modeling.PSDPosterior` that defines the\n function to be minimized (either in ``loglikelihood`` or ``logposterior``)\n\n t0 : {list | numpy.ndarray}\n List/array with set of initial parameters\n\n neg : bool, optional, default ``True``\n Boolean to be passed to ``lpost``, setting whether to use the\n *negative* posterior or the *negative* log-likelihood.\n\n scipy_optimize_options : dict, optional, default None\n A dictionary with options for ``scipy.optimize.minimize``,\n directly passed on as keyword arguments.\n\n Returns\n --------\n res : :class:`OptimizationResults` object\n An object containing useful summaries of the fitting procedure.\n For details, see documentation of :class:`OptimizationResults`.\n '
self.lpost = lpost
res = ParameterEstimation.fit(self, self.lpost, t0, neg=neg, scipy_optimize_options=scipy_optimize_options)
(res.maxpow, res.maxfreq, res.maxind) = self._compute_highest_outlier(self.lpost, res)
return res | def fit(self, lpost, t0, neg=True, scipy_optimize_options=None):
'\n Do either a Maximum-A-Posteriori (MAP) or Maximum Likelihood (ML)\n fit to the power spectrum.\n\n MAP fits include priors, ML fits do not.\n\n Parameters\n -----------\n lpost : :class:`stingray.modeling.PSDPosterior` object\n An instance of class :class:`stingray.modeling.PSDPosterior` that defines the\n function to be minimized (either in ``loglikelihood`` or ``logposterior``)\n\n t0 : {list | numpy.ndarray}\n List/array with set of initial parameters\n\n neg : bool, optional, default ``True``\n Boolean to be passed to ``lpost``, setting whether to use the\n *negative* posterior or the *negative* log-likelihood.\n\n scipy_optimize_options : dict, optional, default None\n A dictionary with options for ``scipy.optimize.minimize``,\n directly passed on as keyword arguments.\n\n Returns\n --------\n res : :class:`OptimizationResults` object\n An object containing useful summaries of the fitting procedure.\n For details, see documentation of :class:`OptimizationResults`.\n '
self.lpost = lpost
res = ParameterEstimation.fit(self, self.lpost, t0, neg=neg, scipy_optimize_options=scipy_optimize_options)
(res.maxpow, res.maxfreq, res.maxind) = self._compute_highest_outlier(self.lpost, res)
return res<|docstring|>Do either a Maximum-A-Posteriori (MAP) or Maximum Likelihood (ML)
fit to the power spectrum.
MAP fits include priors, ML fits do not.
Parameters
-----------
lpost : :class:`stingray.modeling.PSDPosterior` object
An instance of class :class:`stingray.modeling.PSDPosterior` that defines the
function to be minimized (either in ``loglikelihood`` or ``logposterior``)
t0 : {list | numpy.ndarray}
List/array with set of initial parameters
neg : bool, optional, default ``True``
Boolean to be passed to ``lpost``, setting whether to use the
*negative* posterior or the *negative* log-likelihood.
scipy_optimize_options : dict, optional, default None
A dictionary with options for ``scipy.optimize.minimize``,
directly passed on as keyword arguments.
Returns
--------
res : :class:`OptimizationResults` object
An object containing useful summaries of the fitting procedure.
For details, see documentation of :class:`OptimizationResults`.<|endoftext|> |
f2336a72e1d51dbb986c9970e6f8fb407428513cd4a5eacbaa48ac425b557d7d | def sample(self, lpost, t0, cov=None, nwalkers=500, niter=100, burnin=100, threads=1, print_results=True, plot=False, namestr='test'):
'\n Sample the posterior distribution defined in ``lpost`` using MCMC.\n Here we use the ``emcee`` package, but other implementations could\n in principle be used.\n\n Parameters\n ----------\n lpost : instance of a :class:`Posterior` subclass\n and instance of class :class:`Posterior` or one of its subclasses\n that defines the function to be minimized (either in ``loglikelihood``\n or ``logposterior``)\n\n t0 : iterable\n list or array containing the starting parameters. Its length\n must match ``lpost.model.npar``.\n\n nwalkers : int, optional, default 500\n The number of walkers (chains) to use during the MCMC procedure.\n The more walkers are used, the slower the estimation will be, but\n the better the final distribution is likely to be.\n\n niter : int, optional, default 100\n The number of iterations to run the MCMC chains for. The larger this\n number, the longer the estimation will take, but the higher the\n chance that the walkers have actually converged on the true\n posterior distribution.\n\n burnin : int, optional, default 100\n The number of iterations to run the walkers before convergence is\n assumed to have occurred. This part of the chain will be discarded\n before sampling from what is then assumed to be the posterior\n distribution desired.\n\n threads : int, optional, default 1\n The number of threads for parallelization.\n Default is ``1``, i.e. no parallelization\n\n print_results : bool, optional, default True\n Boolean flag setting whether the results of the MCMC run should\n be printed to standard output\n\n plot : bool, optional, default False\n Boolean flag setting whether summary plots of the MCMC chains\n should be produced\n\n namestr : str, optional, default ``test``\n Optional string for output file names for the plotting.\n\n Returns\n -------\n\n res : :class:`SamplingResults` object\n An object containing useful summaries of the\n sampling procedure. For details see documentation of :class:`SamplingResults`.\n\n '
self.lpost = lpost
if (cov is None):
fit_res = ParameterEstimation.fit(self, self.lpost, t0, neg=True)
cov = fit_res.cov
t0 = fit_res.p_opt
res = ParameterEstimation.sample(self, self.lpost, t0, cov=cov, nwalkers=nwalkers, niter=niter, burnin=burnin, threads=threads, print_results=print_results, plot=plot, namestr=namestr)
return res | Sample the posterior distribution defined in ``lpost`` using MCMC.
Here we use the ``emcee`` package, but other implementations could
in principle be used.
Parameters
----------
lpost : instance of a :class:`Posterior` subclass
and instance of class :class:`Posterior` or one of its subclasses
that defines the function to be minimized (either in ``loglikelihood``
or ``logposterior``)
t0 : iterable
list or array containing the starting parameters. Its length
must match ``lpost.model.npar``.
nwalkers : int, optional, default 500
The number of walkers (chains) to use during the MCMC procedure.
The more walkers are used, the slower the estimation will be, but
the better the final distribution is likely to be.
niter : int, optional, default 100
The number of iterations to run the MCMC chains for. The larger this
number, the longer the estimation will take, but the higher the
chance that the walkers have actually converged on the true
posterior distribution.
burnin : int, optional, default 100
The number of iterations to run the walkers before convergence is
assumed to have occurred. This part of the chain will be discarded
before sampling from what is then assumed to be the posterior
distribution desired.
threads : int, optional, default 1
The number of threads for parallelization.
Default is ``1``, i.e. no parallelization
print_results : bool, optional, default True
Boolean flag setting whether the results of the MCMC run should
be printed to standard output
plot : bool, optional, default False
Boolean flag setting whether summary plots of the MCMC chains
should be produced
namestr : str, optional, default ``test``
Optional string for output file names for the plotting.
Returns
-------
res : :class:`SamplingResults` object
An object containing useful summaries of the
sampling procedure. For details see documentation of :class:`SamplingResults`. | stingray/modeling/parameterestimation.py | sample | nimeshvashistha/stingray | 133 | python | def sample(self, lpost, t0, cov=None, nwalkers=500, niter=100, burnin=100, threads=1, print_results=True, plot=False, namestr='test'):
'\n Sample the posterior distribution defined in ``lpost`` using MCMC.\n Here we use the ``emcee`` package, but other implementations could\n in principle be used.\n\n Parameters\n ----------\n lpost : instance of a :class:`Posterior` subclass\n and instance of class :class:`Posterior` or one of its subclasses\n that defines the function to be minimized (either in ``loglikelihood``\n or ``logposterior``)\n\n t0 : iterable\n list or array containing the starting parameters. Its length\n must match ``lpost.model.npar``.\n\n nwalkers : int, optional, default 500\n The number of walkers (chains) to use during the MCMC procedure.\n The more walkers are used, the slower the estimation will be, but\n the better the final distribution is likely to be.\n\n niter : int, optional, default 100\n The number of iterations to run the MCMC chains for. The larger this\n number, the longer the estimation will take, but the higher the\n chance that the walkers have actually converged on the true\n posterior distribution.\n\n burnin : int, optional, default 100\n The number of iterations to run the walkers before convergence is\n assumed to have occurred. This part of the chain will be discarded\n before sampling from what is then assumed to be the posterior\n distribution desired.\n\n threads : int, optional, default 1\n The number of threads for parallelization.\n Default is ``1``, i.e. no parallelization\n\n print_results : bool, optional, default True\n Boolean flag setting whether the results of the MCMC run should\n be printed to standard output\n\n plot : bool, optional, default False\n Boolean flag setting whether summary plots of the MCMC chains\n should be produced\n\n namestr : str, optional, default ``test``\n Optional string for output file names for the plotting.\n\n Returns\n -------\n\n res : :class:`SamplingResults` object\n An object containing useful summaries of the\n sampling procedure. For details see documentation of :class:`SamplingResults`.\n\n '
self.lpost = lpost
if (cov is None):
fit_res = ParameterEstimation.fit(self, self.lpost, t0, neg=True)
cov = fit_res.cov
t0 = fit_res.p_opt
res = ParameterEstimation.sample(self, self.lpost, t0, cov=cov, nwalkers=nwalkers, niter=niter, burnin=burnin, threads=threads, print_results=print_results, plot=plot, namestr=namestr)
return res | def sample(self, lpost, t0, cov=None, nwalkers=500, niter=100, burnin=100, threads=1, print_results=True, plot=False, namestr='test'):
'\n Sample the posterior distribution defined in ``lpost`` using MCMC.\n Here we use the ``emcee`` package, but other implementations could\n in principle be used.\n\n Parameters\n ----------\n lpost : instance of a :class:`Posterior` subclass\n and instance of class :class:`Posterior` or one of its subclasses\n that defines the function to be minimized (either in ``loglikelihood``\n or ``logposterior``)\n\n t0 : iterable\n list or array containing the starting parameters. Its length\n must match ``lpost.model.npar``.\n\n nwalkers : int, optional, default 500\n The number of walkers (chains) to use during the MCMC procedure.\n The more walkers are used, the slower the estimation will be, but\n the better the final distribution is likely to be.\n\n niter : int, optional, default 100\n The number of iterations to run the MCMC chains for. The larger this\n number, the longer the estimation will take, but the higher the\n chance that the walkers have actually converged on the true\n posterior distribution.\n\n burnin : int, optional, default 100\n The number of iterations to run the walkers before convergence is\n assumed to have occurred. This part of the chain will be discarded\n before sampling from what is then assumed to be the posterior\n distribution desired.\n\n threads : int, optional, default 1\n The number of threads for parallelization.\n Default is ``1``, i.e. no parallelization\n\n print_results : bool, optional, default True\n Boolean flag setting whether the results of the MCMC run should\n be printed to standard output\n\n plot : bool, optional, default False\n Boolean flag setting whether summary plots of the MCMC chains\n should be produced\n\n namestr : str, optional, default ``test``\n Optional string for output file names for the plotting.\n\n Returns\n -------\n\n res : :class:`SamplingResults` object\n An object containing useful summaries of the\n sampling procedure. For details see documentation of :class:`SamplingResults`.\n\n '
self.lpost = lpost
if (cov is None):
fit_res = ParameterEstimation.fit(self, self.lpost, t0, neg=True)
cov = fit_res.cov
t0 = fit_res.p_opt
res = ParameterEstimation.sample(self, self.lpost, t0, cov=cov, nwalkers=nwalkers, niter=niter, burnin=burnin, threads=threads, print_results=print_results, plot=plot, namestr=namestr)
return res<|docstring|>Sample the posterior distribution defined in ``lpost`` using MCMC.
Here we use the ``emcee`` package, but other implementations could
in principle be used.
Parameters
----------
lpost : instance of a :class:`Posterior` subclass
and instance of class :class:`Posterior` or one of its subclasses
that defines the function to be minimized (either in ``loglikelihood``
or ``logposterior``)
t0 : iterable
list or array containing the starting parameters. Its length
must match ``lpost.model.npar``.
nwalkers : int, optional, default 500
The number of walkers (chains) to use during the MCMC procedure.
The more walkers are used, the slower the estimation will be, but
the better the final distribution is likely to be.
niter : int, optional, default 100
The number of iterations to run the MCMC chains for. The larger this
number, the longer the estimation will take, but the higher the
chance that the walkers have actually converged on the true
posterior distribution.
burnin : int, optional, default 100
The number of iterations to run the walkers before convergence is
assumed to have occurred. This part of the chain will be discarded
before sampling from what is then assumed to be the posterior
distribution desired.
threads : int, optional, default 1
The number of threads for parallelization.
Default is ``1``, i.e. no parallelization
print_results : bool, optional, default True
Boolean flag setting whether the results of the MCMC run should
be printed to standard output
plot : bool, optional, default False
Boolean flag setting whether summary plots of the MCMC chains
should be produced
namestr : str, optional, default ``test``
Optional string for output file names for the plotting.
Returns
-------
res : :class:`SamplingResults` object
An object containing useful summaries of the
sampling procedure. For details see documentation of :class:`SamplingResults`.<|endoftext|> |
1297afd13e1e45212fc1f2845edc7dc5b87c55e2cc9f4b8aa59b324dac8fd58b | def _generate_data(self, lpost, pars, rng=None):
'\n Generate a fake power spectrum from a model.\n\n Parameters\n ----------\n lpost : instance of a :class:`Posterior` or :class:`LogLikelihood` subclass\n The object containing the relevant information about the\n data and the model\n\n pars : iterable\n A list of parameters to be passed to ``lpost.model`` in oder\n to generate a model data set.\n\n Returns:\n --------\n sim_ps : :class:`stingray.Powerspectrum` object\n The simulated :class:`Powerspectrum` object\n\n '
if (rng is None):
rng = np.random.RandomState(None)
model_spectrum = self._generate_model(lpost, pars)
model_powers = ((model_spectrum * rng.chisquare((2 * self.ps.m), size=model_spectrum.shape[0])) / (2.0 * self.ps.m))
sim_ps = copy.copy(self.ps)
sim_ps.power = model_powers
return sim_ps | Generate a fake power spectrum from a model.
Parameters
----------
lpost : instance of a :class:`Posterior` or :class:`LogLikelihood` subclass
The object containing the relevant information about the
data and the model
pars : iterable
A list of parameters to be passed to ``lpost.model`` in oder
to generate a model data set.
Returns:
--------
sim_ps : :class:`stingray.Powerspectrum` object
The simulated :class:`Powerspectrum` object | stingray/modeling/parameterestimation.py | _generate_data | nimeshvashistha/stingray | 133 | python | def _generate_data(self, lpost, pars, rng=None):
'\n Generate a fake power spectrum from a model.\n\n Parameters\n ----------\n lpost : instance of a :class:`Posterior` or :class:`LogLikelihood` subclass\n The object containing the relevant information about the\n data and the model\n\n pars : iterable\n A list of parameters to be passed to ``lpost.model`` in oder\n to generate a model data set.\n\n Returns:\n --------\n sim_ps : :class:`stingray.Powerspectrum` object\n The simulated :class:`Powerspectrum` object\n\n '
if (rng is None):
rng = np.random.RandomState(None)
model_spectrum = self._generate_model(lpost, pars)
model_powers = ((model_spectrum * rng.chisquare((2 * self.ps.m), size=model_spectrum.shape[0])) / (2.0 * self.ps.m))
sim_ps = copy.copy(self.ps)
sim_ps.power = model_powers
return sim_ps | def _generate_data(self, lpost, pars, rng=None):
'\n Generate a fake power spectrum from a model.\n\n Parameters\n ----------\n lpost : instance of a :class:`Posterior` or :class:`LogLikelihood` subclass\n The object containing the relevant information about the\n data and the model\n\n pars : iterable\n A list of parameters to be passed to ``lpost.model`` in oder\n to generate a model data set.\n\n Returns:\n --------\n sim_ps : :class:`stingray.Powerspectrum` object\n The simulated :class:`Powerspectrum` object\n\n '
if (rng is None):
rng = np.random.RandomState(None)
model_spectrum = self._generate_model(lpost, pars)
model_powers = ((model_spectrum * rng.chisquare((2 * self.ps.m), size=model_spectrum.shape[0])) / (2.0 * self.ps.m))
sim_ps = copy.copy(self.ps)
sim_ps.power = model_powers
return sim_ps<|docstring|>Generate a fake power spectrum from a model.
Parameters
----------
lpost : instance of a :class:`Posterior` or :class:`LogLikelihood` subclass
The object containing the relevant information about the
data and the model
pars : iterable
A list of parameters to be passed to ``lpost.model`` in oder
to generate a model data set.
Returns:
--------
sim_ps : :class:`stingray.Powerspectrum` object
The simulated :class:`Powerspectrum` object<|endoftext|> |
7b84feaf5f53163cdd09bd20cb038f4754f3fc4c0e856f3818a92ab22f0ead1f | def simulate_lrts(self, s_all, lpost1, t1, lpost2, t2, seed=None):
'\n Simulate likelihood ratios for two given models based on MCMC samples\n for the simpler model (i.e. the null hypothesis).\n\n Parameters\n ----------\n s_all : numpy.ndarray of shape ``(nsamples, lpost1.npar)``\n An array with MCMC samples derived from the null hypothesis model in\n ``lpost1``. Its second dimension must match the number of free\n parameters in ``lpost1.model``.\n\n lpost1 : :class:`LogLikelihood` or :class:`Posterior` subclass object\n Object containing the null hypothesis model\n\n t1 : iterable of length ``lpost1.npar``\n A starting guess for fitting the model in ``lpost1``\n\n lpost2 : :class:`LogLikelihood` or :class:`Posterior` subclass object\n Object containing the alternative hypothesis model\n\n t2 : iterable of length ``lpost2.npar``\n A starting guess for fitting the model in ``lpost2``\n\n max_post : bool, optional, default ``True``\n If ``True``, then ``lpost1`` and ``lpost2`` should be :class:`Posterior` subclass\n objects; if ``False``, then ``lpost1`` and ``lpost2`` should be\n :class:`LogLikelihood` subclass objects\n\n seed : int, optional default ``None``\n A seed to initialize the ``numpy.random.RandomState`` object to be\n passed on to ``_generate_data``. Useful for producing exactly\n reproducible results\n\n Returns\n -------\n lrt_sim : numpy.ndarray\n An array with the simulated likelihood ratios for the simulated\n data\n '
assert (lpost1.__class__ == lpost2.__class__), 'Both LogLikelihood or Posterior objects must be of the same class!'
nsim = s_all.shape[0]
lrt_sim = np.zeros(nsim)
rng = np.random.RandomState(seed)
for (i, s) in enumerate(s_all):
sim_ps = self._generate_data(lpost1, s, rng)
neg = True
if isinstance(lpost1, LogLikelihood):
sim_lpost1 = PSDLogLikelihood(sim_ps.freq, sim_ps.power, model=lpost1.model)
sim_lpost2 = PSDLogLikelihood(sim_ps.freq, sim_ps.power, model=lpost2.model, m=sim_ps.m)
max_post = False
else:
sim_lpost1 = PSDPosterior(sim_ps.freq, sim_ps.power, lpost1.model, m=sim_ps.m)
sim_lpost1.logprior = lpost1.logprior
sim_lpost2 = PSDPosterior(sim_ps.freq, sim_ps.power, lpost2.model, m=sim_ps.m)
sim_lpost2.logprior = lpost2.logprior
max_post = True
parest_sim = PSDParEst(sim_ps, max_post=max_post, fitmethod=self.fitmethod)
try:
(lrt_sim[i], _, _) = parest_sim.compute_lrt(sim_lpost1, t1, sim_lpost2, t2, neg=neg, max_post=max_post)
except RuntimeError:
logging.warning('Fitting was unsuccessful. Skipping this simulation!')
continue
return lrt_sim | Simulate likelihood ratios for two given models based on MCMC samples
for the simpler model (i.e. the null hypothesis).
Parameters
----------
s_all : numpy.ndarray of shape ``(nsamples, lpost1.npar)``
An array with MCMC samples derived from the null hypothesis model in
``lpost1``. Its second dimension must match the number of free
parameters in ``lpost1.model``.
lpost1 : :class:`LogLikelihood` or :class:`Posterior` subclass object
Object containing the null hypothesis model
t1 : iterable of length ``lpost1.npar``
A starting guess for fitting the model in ``lpost1``
lpost2 : :class:`LogLikelihood` or :class:`Posterior` subclass object
Object containing the alternative hypothesis model
t2 : iterable of length ``lpost2.npar``
A starting guess for fitting the model in ``lpost2``
max_post : bool, optional, default ``True``
If ``True``, then ``lpost1`` and ``lpost2`` should be :class:`Posterior` subclass
objects; if ``False``, then ``lpost1`` and ``lpost2`` should be
:class:`LogLikelihood` subclass objects
seed : int, optional default ``None``
A seed to initialize the ``numpy.random.RandomState`` object to be
passed on to ``_generate_data``. Useful for producing exactly
reproducible results
Returns
-------
lrt_sim : numpy.ndarray
An array with the simulated likelihood ratios for the simulated
data | stingray/modeling/parameterestimation.py | simulate_lrts | nimeshvashistha/stingray | 133 | python | def simulate_lrts(self, s_all, lpost1, t1, lpost2, t2, seed=None):
'\n Simulate likelihood ratios for two given models based on MCMC samples\n for the simpler model (i.e. the null hypothesis).\n\n Parameters\n ----------\n s_all : numpy.ndarray of shape ``(nsamples, lpost1.npar)``\n An array with MCMC samples derived from the null hypothesis model in\n ``lpost1``. Its second dimension must match the number of free\n parameters in ``lpost1.model``.\n\n lpost1 : :class:`LogLikelihood` or :class:`Posterior` subclass object\n Object containing the null hypothesis model\n\n t1 : iterable of length ``lpost1.npar``\n A starting guess for fitting the model in ``lpost1``\n\n lpost2 : :class:`LogLikelihood` or :class:`Posterior` subclass object\n Object containing the alternative hypothesis model\n\n t2 : iterable of length ``lpost2.npar``\n A starting guess for fitting the model in ``lpost2``\n\n max_post : bool, optional, default ``True``\n If ``True``, then ``lpost1`` and ``lpost2`` should be :class:`Posterior` subclass\n objects; if ``False``, then ``lpost1`` and ``lpost2`` should be\n :class:`LogLikelihood` subclass objects\n\n seed : int, optional default ``None``\n A seed to initialize the ``numpy.random.RandomState`` object to be\n passed on to ``_generate_data``. Useful for producing exactly\n reproducible results\n\n Returns\n -------\n lrt_sim : numpy.ndarray\n An array with the simulated likelihood ratios for the simulated\n data\n '
assert (lpost1.__class__ == lpost2.__class__), 'Both LogLikelihood or Posterior objects must be of the same class!'
nsim = s_all.shape[0]
lrt_sim = np.zeros(nsim)
rng = np.random.RandomState(seed)
for (i, s) in enumerate(s_all):
sim_ps = self._generate_data(lpost1, s, rng)
neg = True
if isinstance(lpost1, LogLikelihood):
sim_lpost1 = PSDLogLikelihood(sim_ps.freq, sim_ps.power, model=lpost1.model)
sim_lpost2 = PSDLogLikelihood(sim_ps.freq, sim_ps.power, model=lpost2.model, m=sim_ps.m)
max_post = False
else:
sim_lpost1 = PSDPosterior(sim_ps.freq, sim_ps.power, lpost1.model, m=sim_ps.m)
sim_lpost1.logprior = lpost1.logprior
sim_lpost2 = PSDPosterior(sim_ps.freq, sim_ps.power, lpost2.model, m=sim_ps.m)
sim_lpost2.logprior = lpost2.logprior
max_post = True
parest_sim = PSDParEst(sim_ps, max_post=max_post, fitmethod=self.fitmethod)
try:
(lrt_sim[i], _, _) = parest_sim.compute_lrt(sim_lpost1, t1, sim_lpost2, t2, neg=neg, max_post=max_post)
except RuntimeError:
logging.warning('Fitting was unsuccessful. Skipping this simulation!')
continue
return lrt_sim | def simulate_lrts(self, s_all, lpost1, t1, lpost2, t2, seed=None):
'\n Simulate likelihood ratios for two given models based on MCMC samples\n for the simpler model (i.e. the null hypothesis).\n\n Parameters\n ----------\n s_all : numpy.ndarray of shape ``(nsamples, lpost1.npar)``\n An array with MCMC samples derived from the null hypothesis model in\n ``lpost1``. Its second dimension must match the number of free\n parameters in ``lpost1.model``.\n\n lpost1 : :class:`LogLikelihood` or :class:`Posterior` subclass object\n Object containing the null hypothesis model\n\n t1 : iterable of length ``lpost1.npar``\n A starting guess for fitting the model in ``lpost1``\n\n lpost2 : :class:`LogLikelihood` or :class:`Posterior` subclass object\n Object containing the alternative hypothesis model\n\n t2 : iterable of length ``lpost2.npar``\n A starting guess for fitting the model in ``lpost2``\n\n max_post : bool, optional, default ``True``\n If ``True``, then ``lpost1`` and ``lpost2`` should be :class:`Posterior` subclass\n objects; if ``False``, then ``lpost1`` and ``lpost2`` should be\n :class:`LogLikelihood` subclass objects\n\n seed : int, optional default ``None``\n A seed to initialize the ``numpy.random.RandomState`` object to be\n passed on to ``_generate_data``. Useful for producing exactly\n reproducible results\n\n Returns\n -------\n lrt_sim : numpy.ndarray\n An array with the simulated likelihood ratios for the simulated\n data\n '
assert (lpost1.__class__ == lpost2.__class__), 'Both LogLikelihood or Posterior objects must be of the same class!'
nsim = s_all.shape[0]
lrt_sim = np.zeros(nsim)
rng = np.random.RandomState(seed)
for (i, s) in enumerate(s_all):
sim_ps = self._generate_data(lpost1, s, rng)
neg = True
if isinstance(lpost1, LogLikelihood):
sim_lpost1 = PSDLogLikelihood(sim_ps.freq, sim_ps.power, model=lpost1.model)
sim_lpost2 = PSDLogLikelihood(sim_ps.freq, sim_ps.power, model=lpost2.model, m=sim_ps.m)
max_post = False
else:
sim_lpost1 = PSDPosterior(sim_ps.freq, sim_ps.power, lpost1.model, m=sim_ps.m)
sim_lpost1.logprior = lpost1.logprior
sim_lpost2 = PSDPosterior(sim_ps.freq, sim_ps.power, lpost2.model, m=sim_ps.m)
sim_lpost2.logprior = lpost2.logprior
max_post = True
parest_sim = PSDParEst(sim_ps, max_post=max_post, fitmethod=self.fitmethod)
try:
(lrt_sim[i], _, _) = parest_sim.compute_lrt(sim_lpost1, t1, sim_lpost2, t2, neg=neg, max_post=max_post)
except RuntimeError:
logging.warning('Fitting was unsuccessful. Skipping this simulation!')
continue
return lrt_sim<|docstring|>Simulate likelihood ratios for two given models based on MCMC samples
for the simpler model (i.e. the null hypothesis).
Parameters
----------
s_all : numpy.ndarray of shape ``(nsamples, lpost1.npar)``
An array with MCMC samples derived from the null hypothesis model in
``lpost1``. Its second dimension must match the number of free
parameters in ``lpost1.model``.
lpost1 : :class:`LogLikelihood` or :class:`Posterior` subclass object
Object containing the null hypothesis model
t1 : iterable of length ``lpost1.npar``
A starting guess for fitting the model in ``lpost1``
lpost2 : :class:`LogLikelihood` or :class:`Posterior` subclass object
Object containing the alternative hypothesis model
t2 : iterable of length ``lpost2.npar``
A starting guess for fitting the model in ``lpost2``
max_post : bool, optional, default ``True``
If ``True``, then ``lpost1`` and ``lpost2`` should be :class:`Posterior` subclass
objects; if ``False``, then ``lpost1`` and ``lpost2`` should be
:class:`LogLikelihood` subclass objects
seed : int, optional default ``None``
A seed to initialize the ``numpy.random.RandomState`` object to be
passed on to ``_generate_data``. Useful for producing exactly
reproducible results
Returns
-------
lrt_sim : numpy.ndarray
An array with the simulated likelihood ratios for the simulated
data<|endoftext|> |
a2887d7aeb0a90a7b59b7759eaa19d965c9f5a3b44071fc7bc501309eb2fca98 | def calibrate_highest_outlier(self, lpost, t0, sample=None, max_post=False, nsim=1000, niter=200, nwalkers=500, burnin=200, namestr='test', seed=None):
'\n Calibrate the highest outlier in a data set using MCMC-simulated\n power spectra.\n\n In short, the procedure does a MAP fit to the data, computes the\n statistic\n\n .. math::\n\n \\max{(T_R = 2(\\mathrm{data}/\\mathrm{model}))}\n\n and then does an MCMC run using the data and the model, or generates parameter samples\n from the likelihood distribution using the derived covariance in a Maximum Likelihood\n fit.\n From the (posterior) samples, it generates fake power spectra. Each fake spectrum is fit\n in the same way as the data, and the highest data/model outlier extracted as for the data.\n The observed value of :math:`T_R` can then be directly compared to the simulated\n distribution of :math:`T_R` values in order to derive a p-value of the null\n hypothesis that the observed :math:`T_R` is compatible with being generated by\n noise.\n\n Parameters\n ----------\n lpost : :class:`stingray.modeling.PSDPosterior` object\n An instance of class :class:`stingray.modeling.PSDPosterior` that defines the\n function to be minimized (either in ``loglikelihood`` or ``logposterior``)\n\n t0 : {list | numpy.ndarray}\n List/array with set of initial parameters\n\n sample : :class:`SamplingResults` instance, optional, default ``None``\n If a sampler has already been run, the :class:`SamplingResults` instance can be\n fed into this method here, otherwise this method will run a sampler\n automatically\n\n max_post: bool, optional, default ``False``\n If ``True``, do MAP fits on the power spectrum to find the highest data/model outlier\n Otherwise, do a Maximum Likelihood fit. If ``True``, the simulated power spectra will\n be generated from an MCMC run, otherwise the method will employ the approximated\n covariance matrix for the parameters derived from the likelihood surface to generate\n samples from that likelihood function.\n\n nsim : int, optional, default ``1000``\n Number of fake power spectra to simulate from the posterior sample. Note that this\n number sets the resolution of the resulting p-value. For ``nsim=1000``, the highest\n resolution that can be achieved is :math:`10^{-3}`.\n\n niter : int, optional, default 200\n If ``sample`` is ``None``, this variable will be used to set the number of steps in the\n MCMC procedure *after* burn-in.\n\n nwalkers : int, optional, default 500\n If ``sample`` is ``None``, this variable will be used to set the number of MCMC chains\n run in parallel in the sampler.\n\n burnin : int, optional, default 200\n If ``sample`` is ``None``, this variable will be used to set the number of burn-in steps\n to be discarded in the initial phase of the MCMC run\n\n namestr : str, optional, default ``test``\n A string to be used for storing MCMC output and plots to disk\n\n seed : int, optional, default ``None``\n An optional number to seed the random number generator with, for reproducibility of\n the results obtained with this method.\n\n Returns\n -------\n pval : float\n The p-value that the highest data/model outlier is produced by random noise, calibrated\n using simulated power spectra from an MCMC run.\n\n References\n ----------\n For more details on the procedure employed here, see\n\n * Vaughan, 2010: https://arxiv.org/abs/0910.2706\n * Huppenkothen et al, 2013: https://arxiv.org/abs/1212.1011\n '
res = self.fit(lpost, t0, neg=True)
rng = np.random.RandomState(seed)
(out_high, _, _) = self._compute_highest_outlier(lpost, res)
if (not max_post):
mvn = scipy.stats.multivariate_normal(mean=res.p_opt, cov=res.cov, seed=seed)
if (lpost.npar == 1):
s_all = np.atleast_2d(mvn.rvs(size=nsim)).T
else:
s_all = mvn.rvs(size=nsim)
else:
if (sample is None):
sample = self.sample(lpost, res.p_opt, cov=res.cov, nwalkers=nwalkers, niter=niter, burnin=burnin, namestr=namestr)
s_all = sample.samples[rng.choice(sample.samples.shape[0], nsim, replace=False)]
out_high_sim = self.simulate_highest_outlier(s_all, lpost, t0, max_post=max_post, seed=seed)
pval = ParameterEstimation._compute_pvalue(out_high, out_high_sim)
return pval | Calibrate the highest outlier in a data set using MCMC-simulated
power spectra.
In short, the procedure does a MAP fit to the data, computes the
statistic
.. math::
\max{(T_R = 2(\mathrm{data}/\mathrm{model}))}
and then does an MCMC run using the data and the model, or generates parameter samples
from the likelihood distribution using the derived covariance in a Maximum Likelihood
fit.
From the (posterior) samples, it generates fake power spectra. Each fake spectrum is fit
in the same way as the data, and the highest data/model outlier extracted as for the data.
The observed value of :math:`T_R` can then be directly compared to the simulated
distribution of :math:`T_R` values in order to derive a p-value of the null
hypothesis that the observed :math:`T_R` is compatible with being generated by
noise.
Parameters
----------
lpost : :class:`stingray.modeling.PSDPosterior` object
An instance of class :class:`stingray.modeling.PSDPosterior` that defines the
function to be minimized (either in ``loglikelihood`` or ``logposterior``)
t0 : {list | numpy.ndarray}
List/array with set of initial parameters
sample : :class:`SamplingResults` instance, optional, default ``None``
If a sampler has already been run, the :class:`SamplingResults` instance can be
fed into this method here, otherwise this method will run a sampler
automatically
max_post: bool, optional, default ``False``
If ``True``, do MAP fits on the power spectrum to find the highest data/model outlier
Otherwise, do a Maximum Likelihood fit. If ``True``, the simulated power spectra will
be generated from an MCMC run, otherwise the method will employ the approximated
covariance matrix for the parameters derived from the likelihood surface to generate
samples from that likelihood function.
nsim : int, optional, default ``1000``
Number of fake power spectra to simulate from the posterior sample. Note that this
number sets the resolution of the resulting p-value. For ``nsim=1000``, the highest
resolution that can be achieved is :math:`10^{-3}`.
niter : int, optional, default 200
If ``sample`` is ``None``, this variable will be used to set the number of steps in the
MCMC procedure *after* burn-in.
nwalkers : int, optional, default 500
If ``sample`` is ``None``, this variable will be used to set the number of MCMC chains
run in parallel in the sampler.
burnin : int, optional, default 200
If ``sample`` is ``None``, this variable will be used to set the number of burn-in steps
to be discarded in the initial phase of the MCMC run
namestr : str, optional, default ``test``
A string to be used for storing MCMC output and plots to disk
seed : int, optional, default ``None``
An optional number to seed the random number generator with, for reproducibility of
the results obtained with this method.
Returns
-------
pval : float
The p-value that the highest data/model outlier is produced by random noise, calibrated
using simulated power spectra from an MCMC run.
References
----------
For more details on the procedure employed here, see
* Vaughan, 2010: https://arxiv.org/abs/0910.2706
* Huppenkothen et al, 2013: https://arxiv.org/abs/1212.1011 | stingray/modeling/parameterestimation.py | calibrate_highest_outlier | nimeshvashistha/stingray | 133 | python | def calibrate_highest_outlier(self, lpost, t0, sample=None, max_post=False, nsim=1000, niter=200, nwalkers=500, burnin=200, namestr='test', seed=None):
'\n Calibrate the highest outlier in a data set using MCMC-simulated\n power spectra.\n\n In short, the procedure does a MAP fit to the data, computes the\n statistic\n\n .. math::\n\n \\max{(T_R = 2(\\mathrm{data}/\\mathrm{model}))}\n\n and then does an MCMC run using the data and the model, or generates parameter samples\n from the likelihood distribution using the derived covariance in a Maximum Likelihood\n fit.\n From the (posterior) samples, it generates fake power spectra. Each fake spectrum is fit\n in the same way as the data, and the highest data/model outlier extracted as for the data.\n The observed value of :math:`T_R` can then be directly compared to the simulated\n distribution of :math:`T_R` values in order to derive a p-value of the null\n hypothesis that the observed :math:`T_R` is compatible with being generated by\n noise.\n\n Parameters\n ----------\n lpost : :class:`stingray.modeling.PSDPosterior` object\n An instance of class :class:`stingray.modeling.PSDPosterior` that defines the\n function to be minimized (either in ``loglikelihood`` or ``logposterior``)\n\n t0 : {list | numpy.ndarray}\n List/array with set of initial parameters\n\n sample : :class:`SamplingResults` instance, optional, default ``None``\n If a sampler has already been run, the :class:`SamplingResults` instance can be\n fed into this method here, otherwise this method will run a sampler\n automatically\n\n max_post: bool, optional, default ``False``\n If ``True``, do MAP fits on the power spectrum to find the highest data/model outlier\n Otherwise, do a Maximum Likelihood fit. If ``True``, the simulated power spectra will\n be generated from an MCMC run, otherwise the method will employ the approximated\n covariance matrix for the parameters derived from the likelihood surface to generate\n samples from that likelihood function.\n\n nsim : int, optional, default ``1000``\n Number of fake power spectra to simulate from the posterior sample. Note that this\n number sets the resolution of the resulting p-value. For ``nsim=1000``, the highest\n resolution that can be achieved is :math:`10^{-3}`.\n\n niter : int, optional, default 200\n If ``sample`` is ``None``, this variable will be used to set the number of steps in the\n MCMC procedure *after* burn-in.\n\n nwalkers : int, optional, default 500\n If ``sample`` is ``None``, this variable will be used to set the number of MCMC chains\n run in parallel in the sampler.\n\n burnin : int, optional, default 200\n If ``sample`` is ``None``, this variable will be used to set the number of burn-in steps\n to be discarded in the initial phase of the MCMC run\n\n namestr : str, optional, default ``test``\n A string to be used for storing MCMC output and plots to disk\n\n seed : int, optional, default ``None``\n An optional number to seed the random number generator with, for reproducibility of\n the results obtained with this method.\n\n Returns\n -------\n pval : float\n The p-value that the highest data/model outlier is produced by random noise, calibrated\n using simulated power spectra from an MCMC run.\n\n References\n ----------\n For more details on the procedure employed here, see\n\n * Vaughan, 2010: https://arxiv.org/abs/0910.2706\n * Huppenkothen et al, 2013: https://arxiv.org/abs/1212.1011\n '
res = self.fit(lpost, t0, neg=True)
rng = np.random.RandomState(seed)
(out_high, _, _) = self._compute_highest_outlier(lpost, res)
if (not max_post):
mvn = scipy.stats.multivariate_normal(mean=res.p_opt, cov=res.cov, seed=seed)
if (lpost.npar == 1):
s_all = np.atleast_2d(mvn.rvs(size=nsim)).T
else:
s_all = mvn.rvs(size=nsim)
else:
if (sample is None):
sample = self.sample(lpost, res.p_opt, cov=res.cov, nwalkers=nwalkers, niter=niter, burnin=burnin, namestr=namestr)
s_all = sample.samples[rng.choice(sample.samples.shape[0], nsim, replace=False)]
out_high_sim = self.simulate_highest_outlier(s_all, lpost, t0, max_post=max_post, seed=seed)
pval = ParameterEstimation._compute_pvalue(out_high, out_high_sim)
return pval | def calibrate_highest_outlier(self, lpost, t0, sample=None, max_post=False, nsim=1000, niter=200, nwalkers=500, burnin=200, namestr='test', seed=None):
'\n Calibrate the highest outlier in a data set using MCMC-simulated\n power spectra.\n\n In short, the procedure does a MAP fit to the data, computes the\n statistic\n\n .. math::\n\n \\max{(T_R = 2(\\mathrm{data}/\\mathrm{model}))}\n\n and then does an MCMC run using the data and the model, or generates parameter samples\n from the likelihood distribution using the derived covariance in a Maximum Likelihood\n fit.\n From the (posterior) samples, it generates fake power spectra. Each fake spectrum is fit\n in the same way as the data, and the highest data/model outlier extracted as for the data.\n The observed value of :math:`T_R` can then be directly compared to the simulated\n distribution of :math:`T_R` values in order to derive a p-value of the null\n hypothesis that the observed :math:`T_R` is compatible with being generated by\n noise.\n\n Parameters\n ----------\n lpost : :class:`stingray.modeling.PSDPosterior` object\n An instance of class :class:`stingray.modeling.PSDPosterior` that defines the\n function to be minimized (either in ``loglikelihood`` or ``logposterior``)\n\n t0 : {list | numpy.ndarray}\n List/array with set of initial parameters\n\n sample : :class:`SamplingResults` instance, optional, default ``None``\n If a sampler has already been run, the :class:`SamplingResults` instance can be\n fed into this method here, otherwise this method will run a sampler\n automatically\n\n max_post: bool, optional, default ``False``\n If ``True``, do MAP fits on the power spectrum to find the highest data/model outlier\n Otherwise, do a Maximum Likelihood fit. If ``True``, the simulated power spectra will\n be generated from an MCMC run, otherwise the method will employ the approximated\n covariance matrix for the parameters derived from the likelihood surface to generate\n samples from that likelihood function.\n\n nsim : int, optional, default ``1000``\n Number of fake power spectra to simulate from the posterior sample. Note that this\n number sets the resolution of the resulting p-value. For ``nsim=1000``, the highest\n resolution that can be achieved is :math:`10^{-3}`.\n\n niter : int, optional, default 200\n If ``sample`` is ``None``, this variable will be used to set the number of steps in the\n MCMC procedure *after* burn-in.\n\n nwalkers : int, optional, default 500\n If ``sample`` is ``None``, this variable will be used to set the number of MCMC chains\n run in parallel in the sampler.\n\n burnin : int, optional, default 200\n If ``sample`` is ``None``, this variable will be used to set the number of burn-in steps\n to be discarded in the initial phase of the MCMC run\n\n namestr : str, optional, default ``test``\n A string to be used for storing MCMC output and plots to disk\n\n seed : int, optional, default ``None``\n An optional number to seed the random number generator with, for reproducibility of\n the results obtained with this method.\n\n Returns\n -------\n pval : float\n The p-value that the highest data/model outlier is produced by random noise, calibrated\n using simulated power spectra from an MCMC run.\n\n References\n ----------\n For more details on the procedure employed here, see\n\n * Vaughan, 2010: https://arxiv.org/abs/0910.2706\n * Huppenkothen et al, 2013: https://arxiv.org/abs/1212.1011\n '
res = self.fit(lpost, t0, neg=True)
rng = np.random.RandomState(seed)
(out_high, _, _) = self._compute_highest_outlier(lpost, res)
if (not max_post):
mvn = scipy.stats.multivariate_normal(mean=res.p_opt, cov=res.cov, seed=seed)
if (lpost.npar == 1):
s_all = np.atleast_2d(mvn.rvs(size=nsim)).T
else:
s_all = mvn.rvs(size=nsim)
else:
if (sample is None):
sample = self.sample(lpost, res.p_opt, cov=res.cov, nwalkers=nwalkers, niter=niter, burnin=burnin, namestr=namestr)
s_all = sample.samples[rng.choice(sample.samples.shape[0], nsim, replace=False)]
out_high_sim = self.simulate_highest_outlier(s_all, lpost, t0, max_post=max_post, seed=seed)
pval = ParameterEstimation._compute_pvalue(out_high, out_high_sim)
return pval<|docstring|>Calibrate the highest outlier in a data set using MCMC-simulated
power spectra.
In short, the procedure does a MAP fit to the data, computes the
statistic
.. math::
\max{(T_R = 2(\mathrm{data}/\mathrm{model}))}
and then does an MCMC run using the data and the model, or generates parameter samples
from the likelihood distribution using the derived covariance in a Maximum Likelihood
fit.
From the (posterior) samples, it generates fake power spectra. Each fake spectrum is fit
in the same way as the data, and the highest data/model outlier extracted as for the data.
The observed value of :math:`T_R` can then be directly compared to the simulated
distribution of :math:`T_R` values in order to derive a p-value of the null
hypothesis that the observed :math:`T_R` is compatible with being generated by
noise.
Parameters
----------
lpost : :class:`stingray.modeling.PSDPosterior` object
An instance of class :class:`stingray.modeling.PSDPosterior` that defines the
function to be minimized (either in ``loglikelihood`` or ``logposterior``)
t0 : {list | numpy.ndarray}
List/array with set of initial parameters
sample : :class:`SamplingResults` instance, optional, default ``None``
If a sampler has already been run, the :class:`SamplingResults` instance can be
fed into this method here, otherwise this method will run a sampler
automatically
max_post: bool, optional, default ``False``
If ``True``, do MAP fits on the power spectrum to find the highest data/model outlier
Otherwise, do a Maximum Likelihood fit. If ``True``, the simulated power spectra will
be generated from an MCMC run, otherwise the method will employ the approximated
covariance matrix for the parameters derived from the likelihood surface to generate
samples from that likelihood function.
nsim : int, optional, default ``1000``
Number of fake power spectra to simulate from the posterior sample. Note that this
number sets the resolution of the resulting p-value. For ``nsim=1000``, the highest
resolution that can be achieved is :math:`10^{-3}`.
niter : int, optional, default 200
If ``sample`` is ``None``, this variable will be used to set the number of steps in the
MCMC procedure *after* burn-in.
nwalkers : int, optional, default 500
If ``sample`` is ``None``, this variable will be used to set the number of MCMC chains
run in parallel in the sampler.
burnin : int, optional, default 200
If ``sample`` is ``None``, this variable will be used to set the number of burn-in steps
to be discarded in the initial phase of the MCMC run
namestr : str, optional, default ``test``
A string to be used for storing MCMC output and plots to disk
seed : int, optional, default ``None``
An optional number to seed the random number generator with, for reproducibility of
the results obtained with this method.
Returns
-------
pval : float
The p-value that the highest data/model outlier is produced by random noise, calibrated
using simulated power spectra from an MCMC run.
References
----------
For more details on the procedure employed here, see
* Vaughan, 2010: https://arxiv.org/abs/0910.2706
* Huppenkothen et al, 2013: https://arxiv.org/abs/1212.1011<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.