body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
7f0d80f98aed242cf0cb31a0826159b866a0114b96d4be5bf8abc7d50c77ef4a | def testExpandUsersVariablePath(self):
'Tests the ExpandUsersVariablePath function.'
user_account_artifact1 = artifacts.UserAccountArtifact(path_separator='\\', user_directory='C:\\Users\\Test1', username='Test1')
user_account_artifact2 = artifacts.UserAccountArtifact(path_separator='\\', user_directory='%SystemDrive%\\Users\\Test2', username='Test2')
user_accounts = [user_account_artifact1, user_account_artifact2]
path = '%%users.appdata%%\\Microsoft\\Windows\\Recent'
expanded_paths = path_helper.PathHelper.ExpandUsersVariablePath(path, '\\', user_accounts)
expected_expanded_paths = ['\\Users\\Test1\\AppData\\Roaming\\Microsoft\\Windows\\Recent', '\\Users\\Test1\\Application Data\\Microsoft\\Windows\\Recent', '\\Users\\Test2\\AppData\\Roaming\\Microsoft\\Windows\\Recent', '\\Users\\Test2\\Application Data\\Microsoft\\Windows\\Recent']
self.assertEqual(sorted(expanded_paths), expected_expanded_paths) | Tests the ExpandUsersVariablePath function. | tests/engine/path_helper.py | testExpandUsersVariablePath | roshanmaskey/plaso | 1,253 | python | def testExpandUsersVariablePath(self):
user_account_artifact1 = artifacts.UserAccountArtifact(path_separator='\\', user_directory='C:\\Users\\Test1', username='Test1')
user_account_artifact2 = artifacts.UserAccountArtifact(path_separator='\\', user_directory='%SystemDrive%\\Users\\Test2', username='Test2')
user_accounts = [user_account_artifact1, user_account_artifact2]
path = '%%users.appdata%%\\Microsoft\\Windows\\Recent'
expanded_paths = path_helper.PathHelper.ExpandUsersVariablePath(path, '\\', user_accounts)
expected_expanded_paths = ['\\Users\\Test1\\AppData\\Roaming\\Microsoft\\Windows\\Recent', '\\Users\\Test1\\Application Data\\Microsoft\\Windows\\Recent', '\\Users\\Test2\\AppData\\Roaming\\Microsoft\\Windows\\Recent', '\\Users\\Test2\\Application Data\\Microsoft\\Windows\\Recent']
self.assertEqual(sorted(expanded_paths), expected_expanded_paths) | def testExpandUsersVariablePath(self):
user_account_artifact1 = artifacts.UserAccountArtifact(path_separator='\\', user_directory='C:\\Users\\Test1', username='Test1')
user_account_artifact2 = artifacts.UserAccountArtifact(path_separator='\\', user_directory='%SystemDrive%\\Users\\Test2', username='Test2')
user_accounts = [user_account_artifact1, user_account_artifact2]
path = '%%users.appdata%%\\Microsoft\\Windows\\Recent'
expanded_paths = path_helper.PathHelper.ExpandUsersVariablePath(path, '\\', user_accounts)
expected_expanded_paths = ['\\Users\\Test1\\AppData\\Roaming\\Microsoft\\Windows\\Recent', '\\Users\\Test1\\Application Data\\Microsoft\\Windows\\Recent', '\\Users\\Test2\\AppData\\Roaming\\Microsoft\\Windows\\Recent', '\\Users\\Test2\\Application Data\\Microsoft\\Windows\\Recent']
self.assertEqual(sorted(expanded_paths), expected_expanded_paths)<|docstring|>Tests the ExpandUsersVariablePath function.<|endoftext|> |
ee69027f21e879ad88ae0981b51f16815fabfea5cb8eb1608a72e48c96a2b744 | def testExpandWindowsPath(self):
'Tests the ExpandWindowsPath function.'
environment_variables = []
environment_variable = artifacts.EnvironmentVariableArtifact(case_sensitive=False, name='SystemRoot', value='C:\\Windows')
environment_variables.append(environment_variable)
expanded_path = path_helper.PathHelper.ExpandWindowsPath('%SystemRoot%\\System32', environment_variables)
self.assertEqual(expanded_path, '\\Windows\\System32') | Tests the ExpandWindowsPath function. | tests/engine/path_helper.py | testExpandWindowsPath | roshanmaskey/plaso | 1,253 | python | def testExpandWindowsPath(self):
environment_variables = []
environment_variable = artifacts.EnvironmentVariableArtifact(case_sensitive=False, name='SystemRoot', value='C:\\Windows')
environment_variables.append(environment_variable)
expanded_path = path_helper.PathHelper.ExpandWindowsPath('%SystemRoot%\\System32', environment_variables)
self.assertEqual(expanded_path, '\\Windows\\System32') | def testExpandWindowsPath(self):
environment_variables = []
environment_variable = artifacts.EnvironmentVariableArtifact(case_sensitive=False, name='SystemRoot', value='C:\\Windows')
environment_variables.append(environment_variable)
expanded_path = path_helper.PathHelper.ExpandWindowsPath('%SystemRoot%\\System32', environment_variables)
self.assertEqual(expanded_path, '\\Windows\\System32')<|docstring|>Tests the ExpandWindowsPath function.<|endoftext|> |
e281c7baf2c116a1ed2f68ac514265791c9ef741f9c285b2e76d7d855e47da31 | def testExpandWindowsPathSegments(self):
'Tests the ExpandWindowsPathSegments function.'
environment_variables = []
environment_variable = artifacts.EnvironmentVariableArtifact(case_sensitive=False, name='allusersappdata', value='C:\\Documents and Settings\\All Users\\Application Data')
environment_variables.append(environment_variable)
environment_variable = artifacts.EnvironmentVariableArtifact(case_sensitive=False, name='allusersprofile', value='C:\\Documents and Settings\\All Users')
environment_variables.append(environment_variable)
environment_variable = artifacts.EnvironmentVariableArtifact(case_sensitive=False, name='SystemRoot', value='C:\\Windows')
environment_variables.append(environment_variable)
expected_expanded_path_segment = ['', 'Documents and Settings', 'All Users', 'Application Data', 'Apache Software Foundation']
expanded_path_segment = path_helper.PathHelper.ExpandWindowsPathSegments(['%AllUsersAppData%', 'Apache Software Foundation'], environment_variables)
self.assertEqual(expanded_path_segment, expected_expanded_path_segment)
expected_expanded_path_segment = ['', 'Documents and Settings', 'All Users', 'Start Menu', 'Programs', 'Startup']
expanded_path_segment = path_helper.PathHelper.ExpandWindowsPathSegments(['%AllUsersProfile%', 'Start Menu', 'Programs', 'Startup'], environment_variables)
self.assertEqual(expanded_path_segment, expected_expanded_path_segment)
expanded_path_segment = path_helper.PathHelper.ExpandWindowsPathSegments(['%SystemRoot%', 'System32'], environment_variables)
self.assertEqual(expanded_path_segment, ['', 'Windows', 'System32'])
expanded_path_segment = path_helper.PathHelper.ExpandWindowsPathSegments(['C:', 'Windows', 'System32'], environment_variables)
self.assertEqual(expanded_path_segment, ['', 'Windows', 'System32'])
expanded_path_segment = path_helper.PathHelper.ExpandWindowsPathSegments(['%SystemRoot%', 'System32'], None)
self.assertEqual(expanded_path_segment, ['%SystemRoot%', 'System32'])
expanded_path_segment = path_helper.PathHelper.ExpandWindowsPathSegments(['%Bogus%', 'System32'], environment_variables)
self.assertEqual(expanded_path_segment, ['%Bogus%', 'System32'])
expanded_path_segment = path_helper.PathHelper.ExpandWindowsPathSegments(['%%environ_systemroot%%', 'System32'], environment_variables)
self.assertEqual(expanded_path_segment, ['', 'Windows', 'System32'])
environment_variables = []
environment_variable = artifacts.EnvironmentVariableArtifact(case_sensitive=False, name='SystemRoot', value=('bogus', 0))
environment_variables.append(environment_variable)
expanded_path_segment = path_helper.PathHelper.ExpandWindowsPathSegments(['%SystemRoot%', 'System32'], environment_variables)
self.assertEqual(expanded_path_segment, ['%SystemRoot%', 'System32']) | Tests the ExpandWindowsPathSegments function. | tests/engine/path_helper.py | testExpandWindowsPathSegments | roshanmaskey/plaso | 1,253 | python | def testExpandWindowsPathSegments(self):
environment_variables = []
environment_variable = artifacts.EnvironmentVariableArtifact(case_sensitive=False, name='allusersappdata', value='C:\\Documents and Settings\\All Users\\Application Data')
environment_variables.append(environment_variable)
environment_variable = artifacts.EnvironmentVariableArtifact(case_sensitive=False, name='allusersprofile', value='C:\\Documents and Settings\\All Users')
environment_variables.append(environment_variable)
environment_variable = artifacts.EnvironmentVariableArtifact(case_sensitive=False, name='SystemRoot', value='C:\\Windows')
environment_variables.append(environment_variable)
expected_expanded_path_segment = [, 'Documents and Settings', 'All Users', 'Application Data', 'Apache Software Foundation']
expanded_path_segment = path_helper.PathHelper.ExpandWindowsPathSegments(['%AllUsersAppData%', 'Apache Software Foundation'], environment_variables)
self.assertEqual(expanded_path_segment, expected_expanded_path_segment)
expected_expanded_path_segment = [, 'Documents and Settings', 'All Users', 'Start Menu', 'Programs', 'Startup']
expanded_path_segment = path_helper.PathHelper.ExpandWindowsPathSegments(['%AllUsersProfile%', 'Start Menu', 'Programs', 'Startup'], environment_variables)
self.assertEqual(expanded_path_segment, expected_expanded_path_segment)
expanded_path_segment = path_helper.PathHelper.ExpandWindowsPathSegments(['%SystemRoot%', 'System32'], environment_variables)
self.assertEqual(expanded_path_segment, [, 'Windows', 'System32'])
expanded_path_segment = path_helper.PathHelper.ExpandWindowsPathSegments(['C:', 'Windows', 'System32'], environment_variables)
self.assertEqual(expanded_path_segment, [, 'Windows', 'System32'])
expanded_path_segment = path_helper.PathHelper.ExpandWindowsPathSegments(['%SystemRoot%', 'System32'], None)
self.assertEqual(expanded_path_segment, ['%SystemRoot%', 'System32'])
expanded_path_segment = path_helper.PathHelper.ExpandWindowsPathSegments(['%Bogus%', 'System32'], environment_variables)
self.assertEqual(expanded_path_segment, ['%Bogus%', 'System32'])
expanded_path_segment = path_helper.PathHelper.ExpandWindowsPathSegments(['%%environ_systemroot%%', 'System32'], environment_variables)
self.assertEqual(expanded_path_segment, [, 'Windows', 'System32'])
environment_variables = []
environment_variable = artifacts.EnvironmentVariableArtifact(case_sensitive=False, name='SystemRoot', value=('bogus', 0))
environment_variables.append(environment_variable)
expanded_path_segment = path_helper.PathHelper.ExpandWindowsPathSegments(['%SystemRoot%', 'System32'], environment_variables)
self.assertEqual(expanded_path_segment, ['%SystemRoot%', 'System32']) | def testExpandWindowsPathSegments(self):
environment_variables = []
environment_variable = artifacts.EnvironmentVariableArtifact(case_sensitive=False, name='allusersappdata', value='C:\\Documents and Settings\\All Users\\Application Data')
environment_variables.append(environment_variable)
environment_variable = artifacts.EnvironmentVariableArtifact(case_sensitive=False, name='allusersprofile', value='C:\\Documents and Settings\\All Users')
environment_variables.append(environment_variable)
environment_variable = artifacts.EnvironmentVariableArtifact(case_sensitive=False, name='SystemRoot', value='C:\\Windows')
environment_variables.append(environment_variable)
expected_expanded_path_segment = [, 'Documents and Settings', 'All Users', 'Application Data', 'Apache Software Foundation']
expanded_path_segment = path_helper.PathHelper.ExpandWindowsPathSegments(['%AllUsersAppData%', 'Apache Software Foundation'], environment_variables)
self.assertEqual(expanded_path_segment, expected_expanded_path_segment)
expected_expanded_path_segment = [, 'Documents and Settings', 'All Users', 'Start Menu', 'Programs', 'Startup']
expanded_path_segment = path_helper.PathHelper.ExpandWindowsPathSegments(['%AllUsersProfile%', 'Start Menu', 'Programs', 'Startup'], environment_variables)
self.assertEqual(expanded_path_segment, expected_expanded_path_segment)
expanded_path_segment = path_helper.PathHelper.ExpandWindowsPathSegments(['%SystemRoot%', 'System32'], environment_variables)
self.assertEqual(expanded_path_segment, [, 'Windows', 'System32'])
expanded_path_segment = path_helper.PathHelper.ExpandWindowsPathSegments(['C:', 'Windows', 'System32'], environment_variables)
self.assertEqual(expanded_path_segment, [, 'Windows', 'System32'])
expanded_path_segment = path_helper.PathHelper.ExpandWindowsPathSegments(['%SystemRoot%', 'System32'], None)
self.assertEqual(expanded_path_segment, ['%SystemRoot%', 'System32'])
expanded_path_segment = path_helper.PathHelper.ExpandWindowsPathSegments(['%Bogus%', 'System32'], environment_variables)
self.assertEqual(expanded_path_segment, ['%Bogus%', 'System32'])
expanded_path_segment = path_helper.PathHelper.ExpandWindowsPathSegments(['%%environ_systemroot%%', 'System32'], environment_variables)
self.assertEqual(expanded_path_segment, [, 'Windows', 'System32'])
environment_variables = []
environment_variable = artifacts.EnvironmentVariableArtifact(case_sensitive=False, name='SystemRoot', value=('bogus', 0))
environment_variables.append(environment_variable)
expanded_path_segment = path_helper.PathHelper.ExpandWindowsPathSegments(['%SystemRoot%', 'System32'], environment_variables)
self.assertEqual(expanded_path_segment, ['%SystemRoot%', 'System32'])<|docstring|>Tests the ExpandWindowsPathSegments function.<|endoftext|> |
009253553c27bcb38d454d3af3f8fc95b14fb16f7aa2f51ea2d0d759e976e787 | def testGetDisplayNameForPathSpec(self):
'Tests the GetDisplayNameForPathSpec function.'
test_path = self._GetTestFilePath(['syslog.gz'])
os_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_OS, location=test_path)
expected_display_name = 'OS:{0:s}'.format(test_path)
display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(os_path_spec)
self.assertEqual(display_name, expected_display_name)
gzip_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_GZIP, parent=os_path_spec)
expected_display_name = 'GZIP:{0:s}'.format(test_path)
display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(gzip_path_spec)
self.assertEqual(display_name, expected_display_name)
test_path = self._GetTestFilePath(['syslog.bz2'])
os_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_OS, location=test_path)
compressed_stream_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_COMPRESSED_STREAM, compression_method=dfvfs_definitions.COMPRESSION_METHOD_BZIP2, parent=os_path_spec)
expected_display_name = 'BZIP2:{0:s}'.format(test_path)
display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(compressed_stream_path_spec)
self.assertEqual(display_name, expected_display_name)
test_path = self._GetTestFilePath(['syslog.xz'])
os_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_OS, location=test_path)
compressed_stream_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_COMPRESSED_STREAM, compression_method=dfvfs_definitions.COMPRESSION_METHOD_XZ, parent=os_path_spec)
expected_display_name = 'XZ:{0:s}'.format(test_path)
display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(compressed_stream_path_spec)
self.assertEqual(display_name, expected_display_name)
test_path = self._GetTestFilePath(['vsstest.qcow2'])
os_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_OS, location=test_path)
qcow_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_QCOW, parent=os_path_spec)
vshadow_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_VSHADOW, location='/vss2', store_index=1, parent=qcow_path_spec)
tsk_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_TSK, inode=35, location='/syslog.gz', parent=vshadow_path_spec)
expected_display_name = 'VSS2:TSK:/syslog.gz'
display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(tsk_path_spec)
self.assertEqual(display_name, expected_display_name)
expected_display_name = 'VSS2:TSK:C:/syslog.gz'
display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(tsk_path_spec, text_prepend='C:')
self.assertEqual(display_name, expected_display_name)
test_path = self._GetTestFilePath(['vsstest.qcow2'])
os_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_OS, location=test_path)
qcow_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_QCOW, parent=os_path_spec)
vshadow_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_VSHADOW, location='/vss2', store_index=1, parent=qcow_path_spec)
ntfs_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_NTFS, mft_entry=35, location='\\syslog.gz', parent=vshadow_path_spec)
expected_display_name = 'VSS2:NTFS:\\syslog.gz'
display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(ntfs_path_spec)
self.assertEqual(display_name, expected_display_name)
expected_display_name = 'VSS2:NTFS:C:\\syslog.gz'
display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(ntfs_path_spec, text_prepend='C:')
self.assertEqual(display_name, expected_display_name)
tar_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_TAR, location='/ustar/umlauts-\udcc4\udcd6\udcdc\udce4\udcf6\udcfc\udcdf', parent=os_path_spec)
expected_display_name = 'TAR:/ustar/umlauts-\\xc4\\xd6\\xdc\\xe4\\xf6\\xfc\\xdf'
display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(tar_path_spec)
self.assertEqual(display_name, expected_display_name)
display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(None)
self.assertIsNone(display_name)
os_path_spec.location = None
qcow_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_QCOW, parent=os_path_spec)
self.assertIsNone(display_name) | Tests the GetDisplayNameForPathSpec function. | tests/engine/path_helper.py | testGetDisplayNameForPathSpec | roshanmaskey/plaso | 1,253 | python | def testGetDisplayNameForPathSpec(self):
test_path = self._GetTestFilePath(['syslog.gz'])
os_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_OS, location=test_path)
expected_display_name = 'OS:{0:s}'.format(test_path)
display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(os_path_spec)
self.assertEqual(display_name, expected_display_name)
gzip_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_GZIP, parent=os_path_spec)
expected_display_name = 'GZIP:{0:s}'.format(test_path)
display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(gzip_path_spec)
self.assertEqual(display_name, expected_display_name)
test_path = self._GetTestFilePath(['syslog.bz2'])
os_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_OS, location=test_path)
compressed_stream_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_COMPRESSED_STREAM, compression_method=dfvfs_definitions.COMPRESSION_METHOD_BZIP2, parent=os_path_spec)
expected_display_name = 'BZIP2:{0:s}'.format(test_path)
display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(compressed_stream_path_spec)
self.assertEqual(display_name, expected_display_name)
test_path = self._GetTestFilePath(['syslog.xz'])
os_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_OS, location=test_path)
compressed_stream_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_COMPRESSED_STREAM, compression_method=dfvfs_definitions.COMPRESSION_METHOD_XZ, parent=os_path_spec)
expected_display_name = 'XZ:{0:s}'.format(test_path)
display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(compressed_stream_path_spec)
self.assertEqual(display_name, expected_display_name)
test_path = self._GetTestFilePath(['vsstest.qcow2'])
os_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_OS, location=test_path)
qcow_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_QCOW, parent=os_path_spec)
vshadow_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_VSHADOW, location='/vss2', store_index=1, parent=qcow_path_spec)
tsk_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_TSK, inode=35, location='/syslog.gz', parent=vshadow_path_spec)
expected_display_name = 'VSS2:TSK:/syslog.gz'
display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(tsk_path_spec)
self.assertEqual(display_name, expected_display_name)
expected_display_name = 'VSS2:TSK:C:/syslog.gz'
display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(tsk_path_spec, text_prepend='C:')
self.assertEqual(display_name, expected_display_name)
test_path = self._GetTestFilePath(['vsstest.qcow2'])
os_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_OS, location=test_path)
qcow_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_QCOW, parent=os_path_spec)
vshadow_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_VSHADOW, location='/vss2', store_index=1, parent=qcow_path_spec)
ntfs_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_NTFS, mft_entry=35, location='\\syslog.gz', parent=vshadow_path_spec)
expected_display_name = 'VSS2:NTFS:\\syslog.gz'
display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(ntfs_path_spec)
self.assertEqual(display_name, expected_display_name)
expected_display_name = 'VSS2:NTFS:C:\\syslog.gz'
display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(ntfs_path_spec, text_prepend='C:')
self.assertEqual(display_name, expected_display_name)
tar_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_TAR, location='/ustar/umlauts-\udcc4\udcd6\udcdc\udce4\udcf6\udcfc\udcdf', parent=os_path_spec)
expected_display_name = 'TAR:/ustar/umlauts-\\xc4\\xd6\\xdc\\xe4\\xf6\\xfc\\xdf'
display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(tar_path_spec)
self.assertEqual(display_name, expected_display_name)
display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(None)
self.assertIsNone(display_name)
os_path_spec.location = None
qcow_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_QCOW, parent=os_path_spec)
self.assertIsNone(display_name) | def testGetDisplayNameForPathSpec(self):
test_path = self._GetTestFilePath(['syslog.gz'])
os_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_OS, location=test_path)
expected_display_name = 'OS:{0:s}'.format(test_path)
display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(os_path_spec)
self.assertEqual(display_name, expected_display_name)
gzip_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_GZIP, parent=os_path_spec)
expected_display_name = 'GZIP:{0:s}'.format(test_path)
display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(gzip_path_spec)
self.assertEqual(display_name, expected_display_name)
test_path = self._GetTestFilePath(['syslog.bz2'])
os_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_OS, location=test_path)
compressed_stream_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_COMPRESSED_STREAM, compression_method=dfvfs_definitions.COMPRESSION_METHOD_BZIP2, parent=os_path_spec)
expected_display_name = 'BZIP2:{0:s}'.format(test_path)
display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(compressed_stream_path_spec)
self.assertEqual(display_name, expected_display_name)
test_path = self._GetTestFilePath(['syslog.xz'])
os_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_OS, location=test_path)
compressed_stream_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_COMPRESSED_STREAM, compression_method=dfvfs_definitions.COMPRESSION_METHOD_XZ, parent=os_path_spec)
expected_display_name = 'XZ:{0:s}'.format(test_path)
display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(compressed_stream_path_spec)
self.assertEqual(display_name, expected_display_name)
test_path = self._GetTestFilePath(['vsstest.qcow2'])
os_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_OS, location=test_path)
qcow_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_QCOW, parent=os_path_spec)
vshadow_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_VSHADOW, location='/vss2', store_index=1, parent=qcow_path_spec)
tsk_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_TSK, inode=35, location='/syslog.gz', parent=vshadow_path_spec)
expected_display_name = 'VSS2:TSK:/syslog.gz'
display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(tsk_path_spec)
self.assertEqual(display_name, expected_display_name)
expected_display_name = 'VSS2:TSK:C:/syslog.gz'
display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(tsk_path_spec, text_prepend='C:')
self.assertEqual(display_name, expected_display_name)
test_path = self._GetTestFilePath(['vsstest.qcow2'])
os_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_OS, location=test_path)
qcow_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_QCOW, parent=os_path_spec)
vshadow_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_VSHADOW, location='/vss2', store_index=1, parent=qcow_path_spec)
ntfs_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_NTFS, mft_entry=35, location='\\syslog.gz', parent=vshadow_path_spec)
expected_display_name = 'VSS2:NTFS:\\syslog.gz'
display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(ntfs_path_spec)
self.assertEqual(display_name, expected_display_name)
expected_display_name = 'VSS2:NTFS:C:\\syslog.gz'
display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(ntfs_path_spec, text_prepend='C:')
self.assertEqual(display_name, expected_display_name)
tar_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_TAR, location='/ustar/umlauts-\udcc4\udcd6\udcdc\udce4\udcf6\udcfc\udcdf', parent=os_path_spec)
expected_display_name = 'TAR:/ustar/umlauts-\\xc4\\xd6\\xdc\\xe4\\xf6\\xfc\\xdf'
display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(tar_path_spec)
self.assertEqual(display_name, expected_display_name)
display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(None)
self.assertIsNone(display_name)
os_path_spec.location = None
qcow_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_QCOW, parent=os_path_spec)
self.assertIsNone(display_name)<|docstring|>Tests the GetDisplayNameForPathSpec function.<|endoftext|> |
e6aaa2d70fe7af7a640f33efc81bf1a5a90137a846d15d0f206d12d4a601fcd5 | def testGetRelativePathForPathSpec(self):
'Tests the GetRelativePathForPathSpec function.'
test_path = self._GetTestFilePath(['syslog.gz'])
os_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_OS, location=test_path)
relative_path = path_helper.PathHelper.GetRelativePathForPathSpec(os_path_spec)
self.assertEqual(relative_path, test_path)
mount_path = self._GetTestFilePath([])
relative_path = path_helper.PathHelper.GetRelativePathForPathSpec(os_path_spec, mount_path=mount_path)
expected_relative_path = '{0:s}syslog.gz'.format(os.path.sep)
self.assertEqual(relative_path, expected_relative_path)
relative_path = path_helper.PathHelper.GetRelativePathForPathSpec(os_path_spec, mount_path='/bogus')
self.assertEqual(relative_path, test_path)
os_path_spec.data_stream = 'MYDATA'
expected_relative_path = '{0:s}:MYDATA'.format(test_path)
relative_path = path_helper.PathHelper.GetRelativePathForPathSpec(os_path_spec)
self.assertEqual(relative_path, expected_relative_path)
display_name = path_helper.PathHelper.GetRelativePathForPathSpec(None)
self.assertIsNone(display_name)
os_path_spec.location = None
qcow_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_QCOW, parent=os_path_spec)
display_name = path_helper.PathHelper.GetRelativePathForPathSpec(qcow_path_spec)
self.assertIsNone(display_name) | Tests the GetRelativePathForPathSpec function. | tests/engine/path_helper.py | testGetRelativePathForPathSpec | roshanmaskey/plaso | 1,253 | python | def testGetRelativePathForPathSpec(self):
test_path = self._GetTestFilePath(['syslog.gz'])
os_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_OS, location=test_path)
relative_path = path_helper.PathHelper.GetRelativePathForPathSpec(os_path_spec)
self.assertEqual(relative_path, test_path)
mount_path = self._GetTestFilePath([])
relative_path = path_helper.PathHelper.GetRelativePathForPathSpec(os_path_spec, mount_path=mount_path)
expected_relative_path = '{0:s}syslog.gz'.format(os.path.sep)
self.assertEqual(relative_path, expected_relative_path)
relative_path = path_helper.PathHelper.GetRelativePathForPathSpec(os_path_spec, mount_path='/bogus')
self.assertEqual(relative_path, test_path)
os_path_spec.data_stream = 'MYDATA'
expected_relative_path = '{0:s}:MYDATA'.format(test_path)
relative_path = path_helper.PathHelper.GetRelativePathForPathSpec(os_path_spec)
self.assertEqual(relative_path, expected_relative_path)
display_name = path_helper.PathHelper.GetRelativePathForPathSpec(None)
self.assertIsNone(display_name)
os_path_spec.location = None
qcow_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_QCOW, parent=os_path_spec)
display_name = path_helper.PathHelper.GetRelativePathForPathSpec(qcow_path_spec)
self.assertIsNone(display_name) | def testGetRelativePathForPathSpec(self):
test_path = self._GetTestFilePath(['syslog.gz'])
os_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_OS, location=test_path)
relative_path = path_helper.PathHelper.GetRelativePathForPathSpec(os_path_spec)
self.assertEqual(relative_path, test_path)
mount_path = self._GetTestFilePath([])
relative_path = path_helper.PathHelper.GetRelativePathForPathSpec(os_path_spec, mount_path=mount_path)
expected_relative_path = '{0:s}syslog.gz'.format(os.path.sep)
self.assertEqual(relative_path, expected_relative_path)
relative_path = path_helper.PathHelper.GetRelativePathForPathSpec(os_path_spec, mount_path='/bogus')
self.assertEqual(relative_path, test_path)
os_path_spec.data_stream = 'MYDATA'
expected_relative_path = '{0:s}:MYDATA'.format(test_path)
relative_path = path_helper.PathHelper.GetRelativePathForPathSpec(os_path_spec)
self.assertEqual(relative_path, expected_relative_path)
display_name = path_helper.PathHelper.GetRelativePathForPathSpec(None)
self.assertIsNone(display_name)
os_path_spec.location = None
qcow_path_spec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_QCOW, parent=os_path_spec)
display_name = path_helper.PathHelper.GetRelativePathForPathSpec(qcow_path_spec)
self.assertIsNone(display_name)<|docstring|>Tests the GetRelativePathForPathSpec function.<|endoftext|> |
1e89456f8ff1f61bdbd6f1b7a7b5858084bb0c8db76610403b60b6ec2ce1ca29 | def build_windows_subsystem(profile, make_program):
' The AutotoolsDeps can be used also in pure Makefiles, if the makefiles follow\n the Autotools conventions\n '
client = TestClient(path_with_spaces=False)
client.run('new hello/0.1 --template=cmake_lib')
os.rename(os.path.join(client.current_folder, 'test_package'), os.path.join(client.current_folder, 'test_package2'))
client.save({'profile': profile})
client.run('create . --profile=profile')
main = gen_function_cpp(name='main', includes=['hello'], calls=['hello'])
makefile = gen_makefile(apps=['app'])
conanfile = textwrap.dedent('\n from conans import ConanFile\n from conan.tools.gnu import AutotoolsToolchain, Autotools, AutotoolsDeps\n\n class TestConan(ConanFile):\n requires = "hello/0.1"\n settings = "os", "compiler", "arch", "build_type"\n exports_sources = "Makefile"\n generators = "AutotoolsDeps", "AutotoolsToolchain"\n\n def build(self):\n autotools = Autotools(self)\n autotools.make()\n ')
client.save({'app.cpp': main, 'Makefile': makefile, 'conanfile.py': conanfile, 'profile': profile}, clean_first=True)
client.run('install . --profile=profile')
cmd = environment_wrap_command(ConanFileMock(), ['conanbuildenv', 'conanautotoolstoolchain', 'conanautotoolsdeps'], make_program, cwd=client.current_folder)
client.run_command(cmd)
client.run_command('app')
check_exe_run(client.out, 'main', 'gcc', None, 'Release', 'x86_64', None)
assert ('hello/0.1: Hello World Release!' in client.out)
client.save({'app.cpp': gen_function_cpp(name='main', msg='main2', includes=['hello'], calls=['hello'])})
t = (time.time() + 1)
touch(os.path.join(client.current_folder, 'app.cpp'), (t, t))
client.run('build .')
client.run_command('app')
check_exe_run(client.out, 'main2', 'gcc', None, 'Release', 'x86_64', None, cxx11_abi=0)
assert ('hello/0.1: Hello World Release!' in client.out)
return client.out | The AutotoolsDeps can be used also in pure Makefiles, if the makefiles follow
the Autotools conventions | conans/test/functional/toolchains/gnu/autotools/test_basic.py | build_windows_subsystem | nmasseyKM/conan | 6,205 | python | def build_windows_subsystem(profile, make_program):
' The AutotoolsDeps can be used also in pure Makefiles, if the makefiles follow\n the Autotools conventions\n '
client = TestClient(path_with_spaces=False)
client.run('new hello/0.1 --template=cmake_lib')
os.rename(os.path.join(client.current_folder, 'test_package'), os.path.join(client.current_folder, 'test_package2'))
client.save({'profile': profile})
client.run('create . --profile=profile')
main = gen_function_cpp(name='main', includes=['hello'], calls=['hello'])
makefile = gen_makefile(apps=['app'])
conanfile = textwrap.dedent('\n from conans import ConanFile\n from conan.tools.gnu import AutotoolsToolchain, Autotools, AutotoolsDeps\n\n class TestConan(ConanFile):\n requires = "hello/0.1"\n settings = "os", "compiler", "arch", "build_type"\n exports_sources = "Makefile"\n generators = "AutotoolsDeps", "AutotoolsToolchain"\n\n def build(self):\n autotools = Autotools(self)\n autotools.make()\n ')
client.save({'app.cpp': main, 'Makefile': makefile, 'conanfile.py': conanfile, 'profile': profile}, clean_first=True)
client.run('install . --profile=profile')
cmd = environment_wrap_command(ConanFileMock(), ['conanbuildenv', 'conanautotoolstoolchain', 'conanautotoolsdeps'], make_program, cwd=client.current_folder)
client.run_command(cmd)
client.run_command('app')
check_exe_run(client.out, 'main', 'gcc', None, 'Release', 'x86_64', None)
assert ('hello/0.1: Hello World Release!' in client.out)
client.save({'app.cpp': gen_function_cpp(name='main', msg='main2', includes=['hello'], calls=['hello'])})
t = (time.time() + 1)
touch(os.path.join(client.current_folder, 'app.cpp'), (t, t))
client.run('build .')
client.run_command('app')
check_exe_run(client.out, 'main2', 'gcc', None, 'Release', 'x86_64', None, cxx11_abi=0)
assert ('hello/0.1: Hello World Release!' in client.out)
return client.out | def build_windows_subsystem(profile, make_program):
' The AutotoolsDeps can be used also in pure Makefiles, if the makefiles follow\n the Autotools conventions\n '
client = TestClient(path_with_spaces=False)
client.run('new hello/0.1 --template=cmake_lib')
os.rename(os.path.join(client.current_folder, 'test_package'), os.path.join(client.current_folder, 'test_package2'))
client.save({'profile': profile})
client.run('create . --profile=profile')
main = gen_function_cpp(name='main', includes=['hello'], calls=['hello'])
makefile = gen_makefile(apps=['app'])
conanfile = textwrap.dedent('\n from conans import ConanFile\n from conan.tools.gnu import AutotoolsToolchain, Autotools, AutotoolsDeps\n\n class TestConan(ConanFile):\n requires = "hello/0.1"\n settings = "os", "compiler", "arch", "build_type"\n exports_sources = "Makefile"\n generators = "AutotoolsDeps", "AutotoolsToolchain"\n\n def build(self):\n autotools = Autotools(self)\n autotools.make()\n ')
client.save({'app.cpp': main, 'Makefile': makefile, 'conanfile.py': conanfile, 'profile': profile}, clean_first=True)
client.run('install . --profile=profile')
cmd = environment_wrap_command(ConanFileMock(), ['conanbuildenv', 'conanautotoolstoolchain', 'conanautotoolsdeps'], make_program, cwd=client.current_folder)
client.run_command(cmd)
client.run_command('app')
check_exe_run(client.out, 'main', 'gcc', None, 'Release', 'x86_64', None)
assert ('hello/0.1: Hello World Release!' in client.out)
client.save({'app.cpp': gen_function_cpp(name='main', msg='main2', includes=['hello'], calls=['hello'])})
t = (time.time() + 1)
touch(os.path.join(client.current_folder, 'app.cpp'), (t, t))
client.run('build .')
client.run_command('app')
check_exe_run(client.out, 'main2', 'gcc', None, 'Release', 'x86_64', None, cxx11_abi=0)
assert ('hello/0.1: Hello World Release!' in client.out)
return client.out<|docstring|>The AutotoolsDeps can be used also in pure Makefiles, if the makefiles follow
the Autotools conventions<|endoftext|> |
8632bb53528dc672fe60ac78b91258b68f0926c3eb645183583f43d9b32ceab5 | def __init__(self, parent=None):
'Constructor.'
super(InfoDialog, self).__init__(parent)
self.setupUi(self) | Constructor. | CanadianWebServices/info_dialog.py | __init__ | eswright/cgdi-qgis-services | 2 | python | def __init__(self, parent=None):
super(InfoDialog, self).__init__(parent)
self.setupUi(self) | def __init__(self, parent=None):
super(InfoDialog, self).__init__(parent)
self.setupUi(self)<|docstring|>Constructor.<|endoftext|> |
5b1611af6e6c823721effd9044aca11a1b65585123beaa2b377b196ff81b41ca | def __init__(__self__, *, authenticator: pulumi.Input[str], parent_flow_alias: pulumi.Input[str], realm_id: pulumi.Input[str], requirement: Optional[pulumi.Input[str]]=None):
"\n The set of arguments for constructing a Execution resource.\n :param pulumi.Input[str] authenticator: The name of the authenticator. This can be found by experimenting with the GUI and looking at HTTP requests within the network tab of your browser's development tools.\n :param pulumi.Input[str] parent_flow_alias: The alias of the flow this execution is attached to.\n :param pulumi.Input[str] realm_id: The realm the authentication execution exists in.\n :param pulumi.Input[str] requirement: The requirement setting, which can be one of `REQUIRED`, `ALTERNATIVE`, `OPTIONAL`, `CONDITIONAL`, or `DISABLED`. Defaults to `DISABLED`.\n "
pulumi.set(__self__, 'authenticator', authenticator)
pulumi.set(__self__, 'parent_flow_alias', parent_flow_alias)
pulumi.set(__self__, 'realm_id', realm_id)
if (requirement is not None):
pulumi.set(__self__, 'requirement', requirement) | The set of arguments for constructing a Execution resource.
:param pulumi.Input[str] authenticator: The name of the authenticator. This can be found by experimenting with the GUI and looking at HTTP requests within the network tab of your browser's development tools.
:param pulumi.Input[str] parent_flow_alias: The alias of the flow this execution is attached to.
:param pulumi.Input[str] realm_id: The realm the authentication execution exists in.
:param pulumi.Input[str] requirement: The requirement setting, which can be one of `REQUIRED`, `ALTERNATIVE`, `OPTIONAL`, `CONDITIONAL`, or `DISABLED`. Defaults to `DISABLED`. | sdk/python/pulumi_keycloak/authentication/execution.py | __init__ | davide-talesco/pulumi-keycloak | 13 | python | def __init__(__self__, *, authenticator: pulumi.Input[str], parent_flow_alias: pulumi.Input[str], realm_id: pulumi.Input[str], requirement: Optional[pulumi.Input[str]]=None):
"\n The set of arguments for constructing a Execution resource.\n :param pulumi.Input[str] authenticator: The name of the authenticator. This can be found by experimenting with the GUI and looking at HTTP requests within the network tab of your browser's development tools.\n :param pulumi.Input[str] parent_flow_alias: The alias of the flow this execution is attached to.\n :param pulumi.Input[str] realm_id: The realm the authentication execution exists in.\n :param pulumi.Input[str] requirement: The requirement setting, which can be one of `REQUIRED`, `ALTERNATIVE`, `OPTIONAL`, `CONDITIONAL`, or `DISABLED`. Defaults to `DISABLED`.\n "
pulumi.set(__self__, 'authenticator', authenticator)
pulumi.set(__self__, 'parent_flow_alias', parent_flow_alias)
pulumi.set(__self__, 'realm_id', realm_id)
if (requirement is not None):
pulumi.set(__self__, 'requirement', requirement) | def __init__(__self__, *, authenticator: pulumi.Input[str], parent_flow_alias: pulumi.Input[str], realm_id: pulumi.Input[str], requirement: Optional[pulumi.Input[str]]=None):
"\n The set of arguments for constructing a Execution resource.\n :param pulumi.Input[str] authenticator: The name of the authenticator. This can be found by experimenting with the GUI and looking at HTTP requests within the network tab of your browser's development tools.\n :param pulumi.Input[str] parent_flow_alias: The alias of the flow this execution is attached to.\n :param pulumi.Input[str] realm_id: The realm the authentication execution exists in.\n :param pulumi.Input[str] requirement: The requirement setting, which can be one of `REQUIRED`, `ALTERNATIVE`, `OPTIONAL`, `CONDITIONAL`, or `DISABLED`. Defaults to `DISABLED`.\n "
pulumi.set(__self__, 'authenticator', authenticator)
pulumi.set(__self__, 'parent_flow_alias', parent_flow_alias)
pulumi.set(__self__, 'realm_id', realm_id)
if (requirement is not None):
pulumi.set(__self__, 'requirement', requirement)<|docstring|>The set of arguments for constructing a Execution resource.
:param pulumi.Input[str] authenticator: The name of the authenticator. This can be found by experimenting with the GUI and looking at HTTP requests within the network tab of your browser's development tools.
:param pulumi.Input[str] parent_flow_alias: The alias of the flow this execution is attached to.
:param pulumi.Input[str] realm_id: The realm the authentication execution exists in.
:param pulumi.Input[str] requirement: The requirement setting, which can be one of `REQUIRED`, `ALTERNATIVE`, `OPTIONAL`, `CONDITIONAL`, or `DISABLED`. Defaults to `DISABLED`.<|endoftext|> |
cae56140dfc1892345dfe8772abd4fa8a3211fbbdfb293ca6ccb34e05bb3a250 | @property
@pulumi.getter
def authenticator(self) -> pulumi.Input[str]:
"\n The name of the authenticator. This can be found by experimenting with the GUI and looking at HTTP requests within the network tab of your browser's development tools.\n "
return pulumi.get(self, 'authenticator') | The name of the authenticator. This can be found by experimenting with the GUI and looking at HTTP requests within the network tab of your browser's development tools. | sdk/python/pulumi_keycloak/authentication/execution.py | authenticator | davide-talesco/pulumi-keycloak | 13 | python | @property
@pulumi.getter
def authenticator(self) -> pulumi.Input[str]:
"\n \n "
return pulumi.get(self, 'authenticator') | @property
@pulumi.getter
def authenticator(self) -> pulumi.Input[str]:
"\n \n "
return pulumi.get(self, 'authenticator')<|docstring|>The name of the authenticator. This can be found by experimenting with the GUI and looking at HTTP requests within the network tab of your browser's development tools.<|endoftext|> |
8422a4a8e4281814f81a39999abd81d19a2e7fca6b12e0a01a57a45c36b6d0aa | @property
@pulumi.getter(name='parentFlowAlias')
def parent_flow_alias(self) -> pulumi.Input[str]:
'\n The alias of the flow this execution is attached to.\n '
return pulumi.get(self, 'parent_flow_alias') | The alias of the flow this execution is attached to. | sdk/python/pulumi_keycloak/authentication/execution.py | parent_flow_alias | davide-talesco/pulumi-keycloak | 13 | python | @property
@pulumi.getter(name='parentFlowAlias')
def parent_flow_alias(self) -> pulumi.Input[str]:
'\n \n '
return pulumi.get(self, 'parent_flow_alias') | @property
@pulumi.getter(name='parentFlowAlias')
def parent_flow_alias(self) -> pulumi.Input[str]:
'\n \n '
return pulumi.get(self, 'parent_flow_alias')<|docstring|>The alias of the flow this execution is attached to.<|endoftext|> |
eb4e7546a684298120f4177db2e87ae4de4092269f0f4f4e8ea2fd95771094d6 | @property
@pulumi.getter(name='realmId')
def realm_id(self) -> pulumi.Input[str]:
'\n The realm the authentication execution exists in.\n '
return pulumi.get(self, 'realm_id') | The realm the authentication execution exists in. | sdk/python/pulumi_keycloak/authentication/execution.py | realm_id | davide-talesco/pulumi-keycloak | 13 | python | @property
@pulumi.getter(name='realmId')
def realm_id(self) -> pulumi.Input[str]:
'\n \n '
return pulumi.get(self, 'realm_id') | @property
@pulumi.getter(name='realmId')
def realm_id(self) -> pulumi.Input[str]:
'\n \n '
return pulumi.get(self, 'realm_id')<|docstring|>The realm the authentication execution exists in.<|endoftext|> |
6cbac8d37572bb72dd0bce6e7e1575458543eccdd87e60e391104543081fd981 | @property
@pulumi.getter
def requirement(self) -> Optional[pulumi.Input[str]]:
'\n The requirement setting, which can be one of `REQUIRED`, `ALTERNATIVE`, `OPTIONAL`, `CONDITIONAL`, or `DISABLED`. Defaults to `DISABLED`.\n '
return pulumi.get(self, 'requirement') | The requirement setting, which can be one of `REQUIRED`, `ALTERNATIVE`, `OPTIONAL`, `CONDITIONAL`, or `DISABLED`. Defaults to `DISABLED`. | sdk/python/pulumi_keycloak/authentication/execution.py | requirement | davide-talesco/pulumi-keycloak | 13 | python | @property
@pulumi.getter
def requirement(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'requirement') | @property
@pulumi.getter
def requirement(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'requirement')<|docstring|>The requirement setting, which can be one of `REQUIRED`, `ALTERNATIVE`, `OPTIONAL`, `CONDITIONAL`, or `DISABLED`. Defaults to `DISABLED`.<|endoftext|> |
7e691c011d598dcdcc737e5e38bcab19c66afc9d6ab1845ec53363f8554fd49f | def __init__(__self__, *, authenticator: Optional[pulumi.Input[str]]=None, parent_flow_alias: Optional[pulumi.Input[str]]=None, realm_id: Optional[pulumi.Input[str]]=None, requirement: Optional[pulumi.Input[str]]=None):
"\n Input properties used for looking up and filtering Execution resources.\n :param pulumi.Input[str] authenticator: The name of the authenticator. This can be found by experimenting with the GUI and looking at HTTP requests within the network tab of your browser's development tools.\n :param pulumi.Input[str] parent_flow_alias: The alias of the flow this execution is attached to.\n :param pulumi.Input[str] realm_id: The realm the authentication execution exists in.\n :param pulumi.Input[str] requirement: The requirement setting, which can be one of `REQUIRED`, `ALTERNATIVE`, `OPTIONAL`, `CONDITIONAL`, or `DISABLED`. Defaults to `DISABLED`.\n "
if (authenticator is not None):
pulumi.set(__self__, 'authenticator', authenticator)
if (parent_flow_alias is not None):
pulumi.set(__self__, 'parent_flow_alias', parent_flow_alias)
if (realm_id is not None):
pulumi.set(__self__, 'realm_id', realm_id)
if (requirement is not None):
pulumi.set(__self__, 'requirement', requirement) | Input properties used for looking up and filtering Execution resources.
:param pulumi.Input[str] authenticator: The name of the authenticator. This can be found by experimenting with the GUI and looking at HTTP requests within the network tab of your browser's development tools.
:param pulumi.Input[str] parent_flow_alias: The alias of the flow this execution is attached to.
:param pulumi.Input[str] realm_id: The realm the authentication execution exists in.
:param pulumi.Input[str] requirement: The requirement setting, which can be one of `REQUIRED`, `ALTERNATIVE`, `OPTIONAL`, `CONDITIONAL`, or `DISABLED`. Defaults to `DISABLED`. | sdk/python/pulumi_keycloak/authentication/execution.py | __init__ | davide-talesco/pulumi-keycloak | 13 | python | def __init__(__self__, *, authenticator: Optional[pulumi.Input[str]]=None, parent_flow_alias: Optional[pulumi.Input[str]]=None, realm_id: Optional[pulumi.Input[str]]=None, requirement: Optional[pulumi.Input[str]]=None):
"\n Input properties used for looking up and filtering Execution resources.\n :param pulumi.Input[str] authenticator: The name of the authenticator. This can be found by experimenting with the GUI and looking at HTTP requests within the network tab of your browser's development tools.\n :param pulumi.Input[str] parent_flow_alias: The alias of the flow this execution is attached to.\n :param pulumi.Input[str] realm_id: The realm the authentication execution exists in.\n :param pulumi.Input[str] requirement: The requirement setting, which can be one of `REQUIRED`, `ALTERNATIVE`, `OPTIONAL`, `CONDITIONAL`, or `DISABLED`. Defaults to `DISABLED`.\n "
if (authenticator is not None):
pulumi.set(__self__, 'authenticator', authenticator)
if (parent_flow_alias is not None):
pulumi.set(__self__, 'parent_flow_alias', parent_flow_alias)
if (realm_id is not None):
pulumi.set(__self__, 'realm_id', realm_id)
if (requirement is not None):
pulumi.set(__self__, 'requirement', requirement) | def __init__(__self__, *, authenticator: Optional[pulumi.Input[str]]=None, parent_flow_alias: Optional[pulumi.Input[str]]=None, realm_id: Optional[pulumi.Input[str]]=None, requirement: Optional[pulumi.Input[str]]=None):
"\n Input properties used for looking up and filtering Execution resources.\n :param pulumi.Input[str] authenticator: The name of the authenticator. This can be found by experimenting with the GUI and looking at HTTP requests within the network tab of your browser's development tools.\n :param pulumi.Input[str] parent_flow_alias: The alias of the flow this execution is attached to.\n :param pulumi.Input[str] realm_id: The realm the authentication execution exists in.\n :param pulumi.Input[str] requirement: The requirement setting, which can be one of `REQUIRED`, `ALTERNATIVE`, `OPTIONAL`, `CONDITIONAL`, or `DISABLED`. Defaults to `DISABLED`.\n "
if (authenticator is not None):
pulumi.set(__self__, 'authenticator', authenticator)
if (parent_flow_alias is not None):
pulumi.set(__self__, 'parent_flow_alias', parent_flow_alias)
if (realm_id is not None):
pulumi.set(__self__, 'realm_id', realm_id)
if (requirement is not None):
pulumi.set(__self__, 'requirement', requirement)<|docstring|>Input properties used for looking up and filtering Execution resources.
:param pulumi.Input[str] authenticator: The name of the authenticator. This can be found by experimenting with the GUI and looking at HTTP requests within the network tab of your browser's development tools.
:param pulumi.Input[str] parent_flow_alias: The alias of the flow this execution is attached to.
:param pulumi.Input[str] realm_id: The realm the authentication execution exists in.
:param pulumi.Input[str] requirement: The requirement setting, which can be one of `REQUIRED`, `ALTERNATIVE`, `OPTIONAL`, `CONDITIONAL`, or `DISABLED`. Defaults to `DISABLED`.<|endoftext|> |
10316865e23bcb45fc9a88adb2c488f976f283542b790cc08eb37920b8dd2de6 | @property
@pulumi.getter
def authenticator(self) -> Optional[pulumi.Input[str]]:
"\n The name of the authenticator. This can be found by experimenting with the GUI and looking at HTTP requests within the network tab of your browser's development tools.\n "
return pulumi.get(self, 'authenticator') | The name of the authenticator. This can be found by experimenting with the GUI and looking at HTTP requests within the network tab of your browser's development tools. | sdk/python/pulumi_keycloak/authentication/execution.py | authenticator | davide-talesco/pulumi-keycloak | 13 | python | @property
@pulumi.getter
def authenticator(self) -> Optional[pulumi.Input[str]]:
"\n \n "
return pulumi.get(self, 'authenticator') | @property
@pulumi.getter
def authenticator(self) -> Optional[pulumi.Input[str]]:
"\n \n "
return pulumi.get(self, 'authenticator')<|docstring|>The name of the authenticator. This can be found by experimenting with the GUI and looking at HTTP requests within the network tab of your browser's development tools.<|endoftext|> |
ad86299f0d5f282ba7b42ddfe05e096f06239b756b407c02b2aabac566e7efb9 | @property
@pulumi.getter(name='parentFlowAlias')
def parent_flow_alias(self) -> Optional[pulumi.Input[str]]:
'\n The alias of the flow this execution is attached to.\n '
return pulumi.get(self, 'parent_flow_alias') | The alias of the flow this execution is attached to. | sdk/python/pulumi_keycloak/authentication/execution.py | parent_flow_alias | davide-talesco/pulumi-keycloak | 13 | python | @property
@pulumi.getter(name='parentFlowAlias')
def parent_flow_alias(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'parent_flow_alias') | @property
@pulumi.getter(name='parentFlowAlias')
def parent_flow_alias(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'parent_flow_alias')<|docstring|>The alias of the flow this execution is attached to.<|endoftext|> |
0548940112a46d9548dc12a3ce9e4afcc2d5e76d458bda6bc3151164ecd1fc4a | @property
@pulumi.getter(name='realmId')
def realm_id(self) -> Optional[pulumi.Input[str]]:
'\n The realm the authentication execution exists in.\n '
return pulumi.get(self, 'realm_id') | The realm the authentication execution exists in. | sdk/python/pulumi_keycloak/authentication/execution.py | realm_id | davide-talesco/pulumi-keycloak | 13 | python | @property
@pulumi.getter(name='realmId')
def realm_id(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'realm_id') | @property
@pulumi.getter(name='realmId')
def realm_id(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'realm_id')<|docstring|>The realm the authentication execution exists in.<|endoftext|> |
6cbac8d37572bb72dd0bce6e7e1575458543eccdd87e60e391104543081fd981 | @property
@pulumi.getter
def requirement(self) -> Optional[pulumi.Input[str]]:
'\n The requirement setting, which can be one of `REQUIRED`, `ALTERNATIVE`, `OPTIONAL`, `CONDITIONAL`, or `DISABLED`. Defaults to `DISABLED`.\n '
return pulumi.get(self, 'requirement') | The requirement setting, which can be one of `REQUIRED`, `ALTERNATIVE`, `OPTIONAL`, `CONDITIONAL`, or `DISABLED`. Defaults to `DISABLED`. | sdk/python/pulumi_keycloak/authentication/execution.py | requirement | davide-talesco/pulumi-keycloak | 13 | python | @property
@pulumi.getter
def requirement(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'requirement') | @property
@pulumi.getter
def requirement(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'requirement')<|docstring|>The requirement setting, which can be one of `REQUIRED`, `ALTERNATIVE`, `OPTIONAL`, `CONDITIONAL`, or `DISABLED`. Defaults to `DISABLED`.<|endoftext|> |
325e1298f0a0b85d04b89071b13210698b2c3216c3ecb0614583784a8c709f60 | @overload
def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, authenticator: Optional[pulumi.Input[str]]=None, parent_flow_alias: Optional[pulumi.Input[str]]=None, realm_id: Optional[pulumi.Input[str]]=None, requirement: Optional[pulumi.Input[str]]=None, __props__=None):
'\n Allows for creating and managing an authentication execution within Keycloak.\n\n An authentication execution is an action that the user or service may or may not take when authenticating through an authentication\n flow.\n\n > Due to limitations in the Keycloak API, the ordering of authentication executions within a flow must be specified using `depends_on`. Authentication executions that are created first will appear first within the flow.\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_keycloak as keycloak\n\n realm = keycloak.Realm("realm",\n realm="my-realm",\n enabled=True)\n flow = keycloak.authentication.Flow("flow",\n realm_id=realm.id,\n alias="my-flow-alias")\n # first execution\n execution_one = keycloak.authentication.Execution("executionOne",\n realm_id=realm.id,\n parent_flow_alias=flow.alias,\n authenticator="auth-cookie",\n requirement="ALTERNATIVE")\n # second execution\n execution_two = keycloak.authentication.Execution("executionTwo",\n realm_id=realm.id,\n parent_flow_alias=flow.alias,\n authenticator="identity-provider-redirector",\n requirement="ALTERNATIVE",\n opts=pulumi.ResourceOptions(depends_on=[execution_one]))\n ```\n\n ## Import\n\n Authentication executions can be imported using the formats`{{realmId}}/{{parentFlowAlias}}/{{authenticationExecutionId}}`. Examplebash\n\n ```sh\n $ pulumi import keycloak:authentication/execution:Execution execution_one my-realm/my-flow-alias/30559fcf-6fb8-45ea-8c46-2b86f46ebc17\n ```\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] authenticator: The name of the authenticator. This can be found by experimenting with the GUI and looking at HTTP requests within the network tab of your browser\'s development tools.\n :param pulumi.Input[str] parent_flow_alias: The alias of the flow this execution is attached to.\n :param pulumi.Input[str] realm_id: The realm the authentication execution exists in.\n :param pulumi.Input[str] requirement: The requirement setting, which can be one of `REQUIRED`, `ALTERNATIVE`, `OPTIONAL`, `CONDITIONAL`, or `DISABLED`. Defaults to `DISABLED`.\n '
... | Allows for creating and managing an authentication execution within Keycloak.
An authentication execution is an action that the user or service may or may not take when authenticating through an authentication
flow.
> Due to limitations in the Keycloak API, the ordering of authentication executions within a flow must be specified using `depends_on`. Authentication executions that are created first will appear first within the flow.
## Example Usage
```python
import pulumi
import pulumi_keycloak as keycloak
realm = keycloak.Realm("realm",
realm="my-realm",
enabled=True)
flow = keycloak.authentication.Flow("flow",
realm_id=realm.id,
alias="my-flow-alias")
# first execution
execution_one = keycloak.authentication.Execution("executionOne",
realm_id=realm.id,
parent_flow_alias=flow.alias,
authenticator="auth-cookie",
requirement="ALTERNATIVE")
# second execution
execution_two = keycloak.authentication.Execution("executionTwo",
realm_id=realm.id,
parent_flow_alias=flow.alias,
authenticator="identity-provider-redirector",
requirement="ALTERNATIVE",
opts=pulumi.ResourceOptions(depends_on=[execution_one]))
```
## Import
Authentication executions can be imported using the formats`{{realmId}}/{{parentFlowAlias}}/{{authenticationExecutionId}}`. Examplebash
```sh
$ pulumi import keycloak:authentication/execution:Execution execution_one my-realm/my-flow-alias/30559fcf-6fb8-45ea-8c46-2b86f46ebc17
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] authenticator: The name of the authenticator. This can be found by experimenting with the GUI and looking at HTTP requests within the network tab of your browser's development tools.
:param pulumi.Input[str] parent_flow_alias: The alias of the flow this execution is attached to.
:param pulumi.Input[str] realm_id: The realm the authentication execution exists in.
:param pulumi.Input[str] requirement: The requirement setting, which can be one of `REQUIRED`, `ALTERNATIVE`, `OPTIONAL`, `CONDITIONAL`, or `DISABLED`. Defaults to `DISABLED`. | sdk/python/pulumi_keycloak/authentication/execution.py | __init__ | davide-talesco/pulumi-keycloak | 13 | python | @overload
def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, authenticator: Optional[pulumi.Input[str]]=None, parent_flow_alias: Optional[pulumi.Input[str]]=None, realm_id: Optional[pulumi.Input[str]]=None, requirement: Optional[pulumi.Input[str]]=None, __props__=None):
'\n Allows for creating and managing an authentication execution within Keycloak.\n\n An authentication execution is an action that the user or service may or may not take when authenticating through an authentication\n flow.\n\n > Due to limitations in the Keycloak API, the ordering of authentication executions within a flow must be specified using `depends_on`. Authentication executions that are created first will appear first within the flow.\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_keycloak as keycloak\n\n realm = keycloak.Realm("realm",\n realm="my-realm",\n enabled=True)\n flow = keycloak.authentication.Flow("flow",\n realm_id=realm.id,\n alias="my-flow-alias")\n # first execution\n execution_one = keycloak.authentication.Execution("executionOne",\n realm_id=realm.id,\n parent_flow_alias=flow.alias,\n authenticator="auth-cookie",\n requirement="ALTERNATIVE")\n # second execution\n execution_two = keycloak.authentication.Execution("executionTwo",\n realm_id=realm.id,\n parent_flow_alias=flow.alias,\n authenticator="identity-provider-redirector",\n requirement="ALTERNATIVE",\n opts=pulumi.ResourceOptions(depends_on=[execution_one]))\n ```\n\n ## Import\n\n Authentication executions can be imported using the formats`{{realmId}}/{{parentFlowAlias}}/{{authenticationExecutionId}}`. Examplebash\n\n ```sh\n $ pulumi import keycloak:authentication/execution:Execution execution_one my-realm/my-flow-alias/30559fcf-6fb8-45ea-8c46-2b86f46ebc17\n ```\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] authenticator: The name of the authenticator. This can be found by experimenting with the GUI and looking at HTTP requests within the network tab of your browser\'s development tools.\n :param pulumi.Input[str] parent_flow_alias: The alias of the flow this execution is attached to.\n :param pulumi.Input[str] realm_id: The realm the authentication execution exists in.\n :param pulumi.Input[str] requirement: The requirement setting, which can be one of `REQUIRED`, `ALTERNATIVE`, `OPTIONAL`, `CONDITIONAL`, or `DISABLED`. Defaults to `DISABLED`.\n '
... | @overload
def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, authenticator: Optional[pulumi.Input[str]]=None, parent_flow_alias: Optional[pulumi.Input[str]]=None, realm_id: Optional[pulumi.Input[str]]=None, requirement: Optional[pulumi.Input[str]]=None, __props__=None):
'\n Allows for creating and managing an authentication execution within Keycloak.\n\n An authentication execution is an action that the user or service may or may not take when authenticating through an authentication\n flow.\n\n > Due to limitations in the Keycloak API, the ordering of authentication executions within a flow must be specified using `depends_on`. Authentication executions that are created first will appear first within the flow.\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_keycloak as keycloak\n\n realm = keycloak.Realm("realm",\n realm="my-realm",\n enabled=True)\n flow = keycloak.authentication.Flow("flow",\n realm_id=realm.id,\n alias="my-flow-alias")\n # first execution\n execution_one = keycloak.authentication.Execution("executionOne",\n realm_id=realm.id,\n parent_flow_alias=flow.alias,\n authenticator="auth-cookie",\n requirement="ALTERNATIVE")\n # second execution\n execution_two = keycloak.authentication.Execution("executionTwo",\n realm_id=realm.id,\n parent_flow_alias=flow.alias,\n authenticator="identity-provider-redirector",\n requirement="ALTERNATIVE",\n opts=pulumi.ResourceOptions(depends_on=[execution_one]))\n ```\n\n ## Import\n\n Authentication executions can be imported using the formats`{{realmId}}/{{parentFlowAlias}}/{{authenticationExecutionId}}`. Examplebash\n\n ```sh\n $ pulumi import keycloak:authentication/execution:Execution execution_one my-realm/my-flow-alias/30559fcf-6fb8-45ea-8c46-2b86f46ebc17\n ```\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] authenticator: The name of the authenticator. This can be found by experimenting with the GUI and looking at HTTP requests within the network tab of your browser\'s development tools.\n :param pulumi.Input[str] parent_flow_alias: The alias of the flow this execution is attached to.\n :param pulumi.Input[str] realm_id: The realm the authentication execution exists in.\n :param pulumi.Input[str] requirement: The requirement setting, which can be one of `REQUIRED`, `ALTERNATIVE`, `OPTIONAL`, `CONDITIONAL`, or `DISABLED`. Defaults to `DISABLED`.\n '
...<|docstring|>Allows for creating and managing an authentication execution within Keycloak.
An authentication execution is an action that the user or service may or may not take when authenticating through an authentication
flow.
> Due to limitations in the Keycloak API, the ordering of authentication executions within a flow must be specified using `depends_on`. Authentication executions that are created first will appear first within the flow.
## Example Usage
```python
import pulumi
import pulumi_keycloak as keycloak
realm = keycloak.Realm("realm",
realm="my-realm",
enabled=True)
flow = keycloak.authentication.Flow("flow",
realm_id=realm.id,
alias="my-flow-alias")
# first execution
execution_one = keycloak.authentication.Execution("executionOne",
realm_id=realm.id,
parent_flow_alias=flow.alias,
authenticator="auth-cookie",
requirement="ALTERNATIVE")
# second execution
execution_two = keycloak.authentication.Execution("executionTwo",
realm_id=realm.id,
parent_flow_alias=flow.alias,
authenticator="identity-provider-redirector",
requirement="ALTERNATIVE",
opts=pulumi.ResourceOptions(depends_on=[execution_one]))
```
## Import
Authentication executions can be imported using the formats`{{realmId}}/{{parentFlowAlias}}/{{authenticationExecutionId}}`. Examplebash
```sh
$ pulumi import keycloak:authentication/execution:Execution execution_one my-realm/my-flow-alias/30559fcf-6fb8-45ea-8c46-2b86f46ebc17
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] authenticator: The name of the authenticator. This can be found by experimenting with the GUI and looking at HTTP requests within the network tab of your browser's development tools.
:param pulumi.Input[str] parent_flow_alias: The alias of the flow this execution is attached to.
:param pulumi.Input[str] realm_id: The realm the authentication execution exists in.
:param pulumi.Input[str] requirement: The requirement setting, which can be one of `REQUIRED`, `ALTERNATIVE`, `OPTIONAL`, `CONDITIONAL`, or `DISABLED`. Defaults to `DISABLED`.<|endoftext|> |
1d7f97cb5d6e3212c7647beb5270668a1a45b5af9c53f11c4d772a9814effa52 | @overload
def __init__(__self__, resource_name: str, args: ExecutionArgs, opts: Optional[pulumi.ResourceOptions]=None):
'\n Allows for creating and managing an authentication execution within Keycloak.\n\n An authentication execution is an action that the user or service may or may not take when authenticating through an authentication\n flow.\n\n > Due to limitations in the Keycloak API, the ordering of authentication executions within a flow must be specified using `depends_on`. Authentication executions that are created first will appear first within the flow.\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_keycloak as keycloak\n\n realm = keycloak.Realm("realm",\n realm="my-realm",\n enabled=True)\n flow = keycloak.authentication.Flow("flow",\n realm_id=realm.id,\n alias="my-flow-alias")\n # first execution\n execution_one = keycloak.authentication.Execution("executionOne",\n realm_id=realm.id,\n parent_flow_alias=flow.alias,\n authenticator="auth-cookie",\n requirement="ALTERNATIVE")\n # second execution\n execution_two = keycloak.authentication.Execution("executionTwo",\n realm_id=realm.id,\n parent_flow_alias=flow.alias,\n authenticator="identity-provider-redirector",\n requirement="ALTERNATIVE",\n opts=pulumi.ResourceOptions(depends_on=[execution_one]))\n ```\n\n ## Import\n\n Authentication executions can be imported using the formats`{{realmId}}/{{parentFlowAlias}}/{{authenticationExecutionId}}`. Examplebash\n\n ```sh\n $ pulumi import keycloak:authentication/execution:Execution execution_one my-realm/my-flow-alias/30559fcf-6fb8-45ea-8c46-2b86f46ebc17\n ```\n\n :param str resource_name: The name of the resource.\n :param ExecutionArgs args: The arguments to use to populate this resource\'s properties.\n :param pulumi.ResourceOptions opts: Options for the resource.\n '
... | Allows for creating and managing an authentication execution within Keycloak.
An authentication execution is an action that the user or service may or may not take when authenticating through an authentication
flow.
> Due to limitations in the Keycloak API, the ordering of authentication executions within a flow must be specified using `depends_on`. Authentication executions that are created first will appear first within the flow.
## Example Usage
```python
import pulumi
import pulumi_keycloak as keycloak
realm = keycloak.Realm("realm",
realm="my-realm",
enabled=True)
flow = keycloak.authentication.Flow("flow",
realm_id=realm.id,
alias="my-flow-alias")
# first execution
execution_one = keycloak.authentication.Execution("executionOne",
realm_id=realm.id,
parent_flow_alias=flow.alias,
authenticator="auth-cookie",
requirement="ALTERNATIVE")
# second execution
execution_two = keycloak.authentication.Execution("executionTwo",
realm_id=realm.id,
parent_flow_alias=flow.alias,
authenticator="identity-provider-redirector",
requirement="ALTERNATIVE",
opts=pulumi.ResourceOptions(depends_on=[execution_one]))
```
## Import
Authentication executions can be imported using the formats`{{realmId}}/{{parentFlowAlias}}/{{authenticationExecutionId}}`. Examplebash
```sh
$ pulumi import keycloak:authentication/execution:Execution execution_one my-realm/my-flow-alias/30559fcf-6fb8-45ea-8c46-2b86f46ebc17
```
:param str resource_name: The name of the resource.
:param ExecutionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource. | sdk/python/pulumi_keycloak/authentication/execution.py | __init__ | davide-talesco/pulumi-keycloak | 13 | python | @overload
def __init__(__self__, resource_name: str, args: ExecutionArgs, opts: Optional[pulumi.ResourceOptions]=None):
'\n Allows for creating and managing an authentication execution within Keycloak.\n\n An authentication execution is an action that the user or service may or may not take when authenticating through an authentication\n flow.\n\n > Due to limitations in the Keycloak API, the ordering of authentication executions within a flow must be specified using `depends_on`. Authentication executions that are created first will appear first within the flow.\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_keycloak as keycloak\n\n realm = keycloak.Realm("realm",\n realm="my-realm",\n enabled=True)\n flow = keycloak.authentication.Flow("flow",\n realm_id=realm.id,\n alias="my-flow-alias")\n # first execution\n execution_one = keycloak.authentication.Execution("executionOne",\n realm_id=realm.id,\n parent_flow_alias=flow.alias,\n authenticator="auth-cookie",\n requirement="ALTERNATIVE")\n # second execution\n execution_two = keycloak.authentication.Execution("executionTwo",\n realm_id=realm.id,\n parent_flow_alias=flow.alias,\n authenticator="identity-provider-redirector",\n requirement="ALTERNATIVE",\n opts=pulumi.ResourceOptions(depends_on=[execution_one]))\n ```\n\n ## Import\n\n Authentication executions can be imported using the formats`{{realmId}}/{{parentFlowAlias}}/{{authenticationExecutionId}}`. Examplebash\n\n ```sh\n $ pulumi import keycloak:authentication/execution:Execution execution_one my-realm/my-flow-alias/30559fcf-6fb8-45ea-8c46-2b86f46ebc17\n ```\n\n :param str resource_name: The name of the resource.\n :param ExecutionArgs args: The arguments to use to populate this resource\'s properties.\n :param pulumi.ResourceOptions opts: Options for the resource.\n '
... | @overload
def __init__(__self__, resource_name: str, args: ExecutionArgs, opts: Optional[pulumi.ResourceOptions]=None):
'\n Allows for creating and managing an authentication execution within Keycloak.\n\n An authentication execution is an action that the user or service may or may not take when authenticating through an authentication\n flow.\n\n > Due to limitations in the Keycloak API, the ordering of authentication executions within a flow must be specified using `depends_on`. Authentication executions that are created first will appear first within the flow.\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_keycloak as keycloak\n\n realm = keycloak.Realm("realm",\n realm="my-realm",\n enabled=True)\n flow = keycloak.authentication.Flow("flow",\n realm_id=realm.id,\n alias="my-flow-alias")\n # first execution\n execution_one = keycloak.authentication.Execution("executionOne",\n realm_id=realm.id,\n parent_flow_alias=flow.alias,\n authenticator="auth-cookie",\n requirement="ALTERNATIVE")\n # second execution\n execution_two = keycloak.authentication.Execution("executionTwo",\n realm_id=realm.id,\n parent_flow_alias=flow.alias,\n authenticator="identity-provider-redirector",\n requirement="ALTERNATIVE",\n opts=pulumi.ResourceOptions(depends_on=[execution_one]))\n ```\n\n ## Import\n\n Authentication executions can be imported using the formats`{{realmId}}/{{parentFlowAlias}}/{{authenticationExecutionId}}`. Examplebash\n\n ```sh\n $ pulumi import keycloak:authentication/execution:Execution execution_one my-realm/my-flow-alias/30559fcf-6fb8-45ea-8c46-2b86f46ebc17\n ```\n\n :param str resource_name: The name of the resource.\n :param ExecutionArgs args: The arguments to use to populate this resource\'s properties.\n :param pulumi.ResourceOptions opts: Options for the resource.\n '
...<|docstring|>Allows for creating and managing an authentication execution within Keycloak.
An authentication execution is an action that the user or service may or may not take when authenticating through an authentication
flow.
> Due to limitations in the Keycloak API, the ordering of authentication executions within a flow must be specified using `depends_on`. Authentication executions that are created first will appear first within the flow.
## Example Usage
```python
import pulumi
import pulumi_keycloak as keycloak
realm = keycloak.Realm("realm",
realm="my-realm",
enabled=True)
flow = keycloak.authentication.Flow("flow",
realm_id=realm.id,
alias="my-flow-alias")
# first execution
execution_one = keycloak.authentication.Execution("executionOne",
realm_id=realm.id,
parent_flow_alias=flow.alias,
authenticator="auth-cookie",
requirement="ALTERNATIVE")
# second execution
execution_two = keycloak.authentication.Execution("executionTwo",
realm_id=realm.id,
parent_flow_alias=flow.alias,
authenticator="identity-provider-redirector",
requirement="ALTERNATIVE",
opts=pulumi.ResourceOptions(depends_on=[execution_one]))
```
## Import
Authentication executions can be imported using the formats`{{realmId}}/{{parentFlowAlias}}/{{authenticationExecutionId}}`. Examplebash
```sh
$ pulumi import keycloak:authentication/execution:Execution execution_one my-realm/my-flow-alias/30559fcf-6fb8-45ea-8c46-2b86f46ebc17
```
:param str resource_name: The name of the resource.
:param ExecutionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.<|endoftext|> |
6ae7201a3bd86d14e60bed6bf8a0904177838f916f5f4b7e3a08391f3e995470 | @staticmethod
def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None, authenticator: Optional[pulumi.Input[str]]=None, parent_flow_alias: Optional[pulumi.Input[str]]=None, realm_id: Optional[pulumi.Input[str]]=None, requirement: Optional[pulumi.Input[str]]=None) -> 'Execution':
"\n Get an existing Execution resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] authenticator: The name of the authenticator. This can be found by experimenting with the GUI and looking at HTTP requests within the network tab of your browser's development tools.\n :param pulumi.Input[str] parent_flow_alias: The alias of the flow this execution is attached to.\n :param pulumi.Input[str] realm_id: The realm the authentication execution exists in.\n :param pulumi.Input[str] requirement: The requirement setting, which can be one of `REQUIRED`, `ALTERNATIVE`, `OPTIONAL`, `CONDITIONAL`, or `DISABLED`. Defaults to `DISABLED`.\n "
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ExecutionState.__new__(_ExecutionState)
__props__.__dict__['authenticator'] = authenticator
__props__.__dict__['parent_flow_alias'] = parent_flow_alias
__props__.__dict__['realm_id'] = realm_id
__props__.__dict__['requirement'] = requirement
return Execution(resource_name, opts=opts, __props__=__props__) | Get an existing Execution resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] authenticator: The name of the authenticator. This can be found by experimenting with the GUI and looking at HTTP requests within the network tab of your browser's development tools.
:param pulumi.Input[str] parent_flow_alias: The alias of the flow this execution is attached to.
:param pulumi.Input[str] realm_id: The realm the authentication execution exists in.
:param pulumi.Input[str] requirement: The requirement setting, which can be one of `REQUIRED`, `ALTERNATIVE`, `OPTIONAL`, `CONDITIONAL`, or `DISABLED`. Defaults to `DISABLED`. | sdk/python/pulumi_keycloak/authentication/execution.py | get | davide-talesco/pulumi-keycloak | 13 | python | @staticmethod
def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None, authenticator: Optional[pulumi.Input[str]]=None, parent_flow_alias: Optional[pulumi.Input[str]]=None, realm_id: Optional[pulumi.Input[str]]=None, requirement: Optional[pulumi.Input[str]]=None) -> 'Execution':
"\n Get an existing Execution resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] authenticator: The name of the authenticator. This can be found by experimenting with the GUI and looking at HTTP requests within the network tab of your browser's development tools.\n :param pulumi.Input[str] parent_flow_alias: The alias of the flow this execution is attached to.\n :param pulumi.Input[str] realm_id: The realm the authentication execution exists in.\n :param pulumi.Input[str] requirement: The requirement setting, which can be one of `REQUIRED`, `ALTERNATIVE`, `OPTIONAL`, `CONDITIONAL`, or `DISABLED`. Defaults to `DISABLED`.\n "
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ExecutionState.__new__(_ExecutionState)
__props__.__dict__['authenticator'] = authenticator
__props__.__dict__['parent_flow_alias'] = parent_flow_alias
__props__.__dict__['realm_id'] = realm_id
__props__.__dict__['requirement'] = requirement
return Execution(resource_name, opts=opts, __props__=__props__) | @staticmethod
def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None, authenticator: Optional[pulumi.Input[str]]=None, parent_flow_alias: Optional[pulumi.Input[str]]=None, realm_id: Optional[pulumi.Input[str]]=None, requirement: Optional[pulumi.Input[str]]=None) -> 'Execution':
"\n Get an existing Execution resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] authenticator: The name of the authenticator. This can be found by experimenting with the GUI and looking at HTTP requests within the network tab of your browser's development tools.\n :param pulumi.Input[str] parent_flow_alias: The alias of the flow this execution is attached to.\n :param pulumi.Input[str] realm_id: The realm the authentication execution exists in.\n :param pulumi.Input[str] requirement: The requirement setting, which can be one of `REQUIRED`, `ALTERNATIVE`, `OPTIONAL`, `CONDITIONAL`, or `DISABLED`. Defaults to `DISABLED`.\n "
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ExecutionState.__new__(_ExecutionState)
__props__.__dict__['authenticator'] = authenticator
__props__.__dict__['parent_flow_alias'] = parent_flow_alias
__props__.__dict__['realm_id'] = realm_id
__props__.__dict__['requirement'] = requirement
return Execution(resource_name, opts=opts, __props__=__props__)<|docstring|>Get an existing Execution resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] authenticator: The name of the authenticator. This can be found by experimenting with the GUI and looking at HTTP requests within the network tab of your browser's development tools.
:param pulumi.Input[str] parent_flow_alias: The alias of the flow this execution is attached to.
:param pulumi.Input[str] realm_id: The realm the authentication execution exists in.
:param pulumi.Input[str] requirement: The requirement setting, which can be one of `REQUIRED`, `ALTERNATIVE`, `OPTIONAL`, `CONDITIONAL`, or `DISABLED`. Defaults to `DISABLED`.<|endoftext|> |
e6406107bf503110c610b71e93910b5b250a4988519d57f9cd9cb2e5c0790224 | @property
@pulumi.getter
def authenticator(self) -> pulumi.Output[str]:
"\n The name of the authenticator. This can be found by experimenting with the GUI and looking at HTTP requests within the network tab of your browser's development tools.\n "
return pulumi.get(self, 'authenticator') | The name of the authenticator. This can be found by experimenting with the GUI and looking at HTTP requests within the network tab of your browser's development tools. | sdk/python/pulumi_keycloak/authentication/execution.py | authenticator | davide-talesco/pulumi-keycloak | 13 | python | @property
@pulumi.getter
def authenticator(self) -> pulumi.Output[str]:
"\n \n "
return pulumi.get(self, 'authenticator') | @property
@pulumi.getter
def authenticator(self) -> pulumi.Output[str]:
"\n \n "
return pulumi.get(self, 'authenticator')<|docstring|>The name of the authenticator. This can be found by experimenting with the GUI and looking at HTTP requests within the network tab of your browser's development tools.<|endoftext|> |
ede50c82163bc55186f445db8955b07f394d8952b79ae00a934a027f27b5e6a9 | @property
@pulumi.getter(name='parentFlowAlias')
def parent_flow_alias(self) -> pulumi.Output[str]:
'\n The alias of the flow this execution is attached to.\n '
return pulumi.get(self, 'parent_flow_alias') | The alias of the flow this execution is attached to. | sdk/python/pulumi_keycloak/authentication/execution.py | parent_flow_alias | davide-talesco/pulumi-keycloak | 13 | python | @property
@pulumi.getter(name='parentFlowAlias')
def parent_flow_alias(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'parent_flow_alias') | @property
@pulumi.getter(name='parentFlowAlias')
def parent_flow_alias(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'parent_flow_alias')<|docstring|>The alias of the flow this execution is attached to.<|endoftext|> |
5611798e91e2e8800d51eea8442878602f9c3ac063184aa3a552feffb24d4297 | @property
@pulumi.getter(name='realmId')
def realm_id(self) -> pulumi.Output[str]:
'\n The realm the authentication execution exists in.\n '
return pulumi.get(self, 'realm_id') | The realm the authentication execution exists in. | sdk/python/pulumi_keycloak/authentication/execution.py | realm_id | davide-talesco/pulumi-keycloak | 13 | python | @property
@pulumi.getter(name='realmId')
def realm_id(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'realm_id') | @property
@pulumi.getter(name='realmId')
def realm_id(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'realm_id')<|docstring|>The realm the authentication execution exists in.<|endoftext|> |
dbe0268656aae7566175a34a65616970c923f89b0182c2ec85abdca4ce076200 | @property
@pulumi.getter
def requirement(self) -> pulumi.Output[Optional[str]]:
'\n The requirement setting, which can be one of `REQUIRED`, `ALTERNATIVE`, `OPTIONAL`, `CONDITIONAL`, or `DISABLED`. Defaults to `DISABLED`.\n '
return pulumi.get(self, 'requirement') | The requirement setting, which can be one of `REQUIRED`, `ALTERNATIVE`, `OPTIONAL`, `CONDITIONAL`, or `DISABLED`. Defaults to `DISABLED`. | sdk/python/pulumi_keycloak/authentication/execution.py | requirement | davide-talesco/pulumi-keycloak | 13 | python | @property
@pulumi.getter
def requirement(self) -> pulumi.Output[Optional[str]]:
'\n \n '
return pulumi.get(self, 'requirement') | @property
@pulumi.getter
def requirement(self) -> pulumi.Output[Optional[str]]:
'\n \n '
return pulumi.get(self, 'requirement')<|docstring|>The requirement setting, which can be one of `REQUIRED`, `ALTERNATIVE`, `OPTIONAL`, `CONDITIONAL`, or `DISABLED`. Defaults to `DISABLED`.<|endoftext|> |
60c1500cd7c94b339673503df57aefe16ed5fd508523faa3c39fd7b3c060b983 | def test_jvp_jit_cached(self):
'Bug in caching in presence of JVP and JIT.'
def func(x):
def inner(y):
return (y * x)
res1 = api.jit(inner)(4.0)
res2 = api.jit(inner)(5.0)
return (res1 + res2)
self.assertAllClose((45.0, 9.0), api.jvp(func, (5.0,), (1.0,))) | Bug in caching in presence of JVP and JIT. | tests/api_test.py | test_jvp_jit_cached | niklasschmitz/jax | 4 | python | def test_jvp_jit_cached(self):
def func(x):
def inner(y):
return (y * x)
res1 = api.jit(inner)(4.0)
res2 = api.jit(inner)(5.0)
return (res1 + res2)
self.assertAllClose((45.0, 9.0), api.jvp(func, (5.0,), (1.0,))) | def test_jvp_jit_cached(self):
def func(x):
def inner(y):
return (y * x)
res1 = api.jit(inner)(4.0)
res2 = api.jit(inner)(5.0)
return (res1 + res2)
self.assertAllClose((45.0, 9.0), api.jvp(func, (5.0,), (1.0,)))<|docstring|>Bug in caching in presence of JVP and JIT.<|endoftext|> |
620e32265771221ce5d494c763d733fa2be3ee1eebe0cc964b94bdd89f8667bc | def f(x: int) -> int:
'docstring of f.'
return (x + 1) | docstring of f. | tests/api_test.py | f | niklasschmitz/jax | 4 | python | def f(x: int) -> int:
return (x + 1) | def f(x: int) -> int:
return (x + 1)<|docstring|>docstring of f.<|endoftext|> |
831b4260e50561829c5c4dd72a009348e7f4d3e96104c868468e00ca320526aa | def superfun(a):
'Does things with stuff.'
pass | Does things with stuff. | tests/api_test.py | superfun | niklasschmitz/jax | 4 | python | def superfun(a):
pass | def superfun(a):
pass<|docstring|>Does things with stuff.<|endoftext|> |
47cf0f35e75d8294bc422ab2b1abe284b7987740a23c37ed8bae780a40901330 | def prepare_folder_testcase(mocker, moctave, tmpdir):
'Create the necessary mocks.'
mocker.patch('matl_online.tasks.Task')
mktmp = mocker.patch('matl_online.tasks.tempfile.mkdtemp')
mktmp.return_value = tmpdir.strpath
gettmp = mocker.patch('matl_online.tasks.tempfile.gettempdir')
gettmp.return_value = tmpdir.strpath | Create the necessary mocks. | tests/test_tasks.py | prepare_folder_testcase | suever/MATL-Online | 5 | python | def prepare_folder_testcase(mocker, moctave, tmpdir):
mocker.patch('matl_online.tasks.Task')
mktmp = mocker.patch('matl_online.tasks.tempfile.mkdtemp')
mktmp.return_value = tmpdir.strpath
gettmp = mocker.patch('matl_online.tasks.tempfile.gettempdir')
gettmp.return_value = tmpdir.strpath | def prepare_folder_testcase(mocker, moctave, tmpdir):
mocker.patch('matl_online.tasks.Task')
mktmp = mocker.patch('matl_online.tasks.tempfile.mkdtemp')
mktmp.return_value = tmpdir.strpath
gettmp = mocker.patch('matl_online.tasks.tempfile.gettempdir')
gettmp.return_value = tmpdir.strpath<|docstring|>Create the necessary mocks.<|endoftext|> |
1128a2c24224bc30345413e5edae46e5de05ba727bc930bda6aa32768dd6d453 | def test_octave_property(self, mocker, moctave, logger):
'Make sure that an instance is created only when requested.'
mocker.patch('matl_online.tasks.Task')
logger.setLevel(logging.ERROR)
moctave.logger = logger
task = OctaveTask()
assert (task._octave is None)
newoctave = task.octave
assert (task._octave == newoctave)
assert (newoctave == moctave)
return task | Make sure that an instance is created only when requested. | tests/test_tasks.py | test_octave_property | suever/MATL-Online | 5 | python | def test_octave_property(self, mocker, moctave, logger):
mocker.patch('matl_online.tasks.Task')
logger.setLevel(logging.ERROR)
moctave.logger = logger
task = OctaveTask()
assert (task._octave is None)
newoctave = task.octave
assert (task._octave == newoctave)
assert (newoctave == moctave)
return task | def test_octave_property(self, mocker, moctave, logger):
mocker.patch('matl_online.tasks.Task')
logger.setLevel(logging.ERROR)
moctave.logger = logger
task = OctaveTask()
assert (task._octave is None)
newoctave = task.octave
assert (task._octave == newoctave)
assert (newoctave == moctave)
return task<|docstring|>Make sure that an instance is created only when requested.<|endoftext|> |
d6ff549c2be9aee8ceb7ff64fea9ad18f5511ae3f8774feacc8d557e766aea57 | def test_octave_property_repeat(self, mocker, moctave, logger):
'Octave sessions are only created once per task.'
task = self.test_octave_property(mocker, moctave, logger)
assert (task.octave == moctave) | Octave sessions are only created once per task. | tests/test_tasks.py | test_octave_property_repeat | suever/MATL-Online | 5 | python | def test_octave_property_repeat(self, mocker, moctave, logger):
task = self.test_octave_property(mocker, moctave, logger)
assert (task.octave == moctave) | def test_octave_property_repeat(self, mocker, moctave, logger):
task = self.test_octave_property(mocker, moctave, logger)
assert (task.octave == moctave)<|docstring|>Octave sessions are only created once per task.<|endoftext|> |
13743721050a4d3195904f03edeedfb88ff4c11c26b7caf853d9ecf6013004dc | def test_folder_no_session(self, mocker, moctave, tmpdir):
'Test the dynamic folder property when there is no session.'
prepare_folder_testcase(mocker, moctave, tmpdir)
task = OctaveTask()
assert os.path.isdir(tmpdir.strpath)
assert (task.folder == tmpdir.strpath)
assert os.path.isdir(tmpdir.strpath) | Test the dynamic folder property when there is no session. | tests/test_tasks.py | test_folder_no_session | suever/MATL-Online | 5 | python | def test_folder_no_session(self, mocker, moctave, tmpdir):
prepare_folder_testcase(mocker, moctave, tmpdir)
task = OctaveTask()
assert os.path.isdir(tmpdir.strpath)
assert (task.folder == tmpdir.strpath)
assert os.path.isdir(tmpdir.strpath) | def test_folder_no_session(self, mocker, moctave, tmpdir):
prepare_folder_testcase(mocker, moctave, tmpdir)
task = OctaveTask()
assert os.path.isdir(tmpdir.strpath)
assert (task.folder == tmpdir.strpath)
assert os.path.isdir(tmpdir.strpath)<|docstring|>Test the dynamic folder property when there is no session.<|endoftext|> |
2a513be818cce3ea22893bb209ed8935f95b551403a1ca4845b553dcdd21bd41 | def test_folder_with_session(self, mocker, moctave, tmpdir):
'Test the folder property when we DO have a session.'
prepare_folder_testcase(mocker, moctave, tmpdir)
task = OctaveTask()
session_id = '123456'
task.session_id = session_id
outfolder = os.path.join(tmpdir.strpath, session_id)
assert (not os.path.isdir(outfolder))
assert (task.folder == outfolder)
assert os.path.isdir(outfolder) | Test the folder property when we DO have a session. | tests/test_tasks.py | test_folder_with_session | suever/MATL-Online | 5 | python | def test_folder_with_session(self, mocker, moctave, tmpdir):
prepare_folder_testcase(mocker, moctave, tmpdir)
task = OctaveTask()
session_id = '123456'
task.session_id = session_id
outfolder = os.path.join(tmpdir.strpath, session_id)
assert (not os.path.isdir(outfolder))
assert (task.folder == outfolder)
assert os.path.isdir(outfolder) | def test_folder_with_session(self, mocker, moctave, tmpdir):
prepare_folder_testcase(mocker, moctave, tmpdir)
task = OctaveTask()
session_id = '123456'
task.session_id = session_id
outfolder = os.path.join(tmpdir.strpath, session_id)
assert (not os.path.isdir(outfolder))
assert (task.folder == outfolder)
assert os.path.isdir(outfolder)<|docstring|>Test the folder property when we DO have a session.<|endoftext|> |
2d574a396d05449b2ae407b9709f9af7fa559d4fafeb79a3d5f7d11f1e2a7caf | def test_on_term(self, mocker, moctave):
'Ensure cleanup is performed as expected when a task is terminated.'
initialize = mocker.patch('matl_online.tasks._initialize_process')
task = OctaveTask()
task.on_term()
methods = [m[0] for m in moctave.method_calls]
assert ('restart' in methods)
assert (initialize.call_count == 1) | Ensure cleanup is performed as expected when a task is terminated. | tests/test_tasks.py | test_on_term | suever/MATL-Online | 5 | python | def test_on_term(self, mocker, moctave):
initialize = mocker.patch('matl_online.tasks._initialize_process')
task = OctaveTask()
task.on_term()
methods = [m[0] for m in moctave.method_calls]
assert ('restart' in methods)
assert (initialize.call_count == 1) | def test_on_term(self, mocker, moctave):
initialize = mocker.patch('matl_online.tasks._initialize_process')
task = OctaveTask()
task.on_term()
methods = [m[0] for m in moctave.method_calls]
assert ('restart' in methods)
assert (initialize.call_count == 1)<|docstring|>Ensure cleanup is performed as expected when a task is terminated.<|endoftext|> |
fccebc3550c7179e92175905d99634b7188faa6e5b98c4d4494a435bfec98643 | def test_normal(self, mocker, moctave, socketclient):
'Test that messages are received as expected in normal case.'
socketclient.get_received()
mocker.patch('matl_online.tasks.socket', new_callable=(lambda : socketclient.socketio))
matl_task('-ro', '1D', session=session_id_for_client(socketclient))
received = socketclient.get_received()
assert (received[(- 1)]['args'][0] == {'message': '', 'success': True}) | Test that messages are received as expected in normal case. | tests/test_tasks.py | test_normal | suever/MATL-Online | 5 | python | def test_normal(self, mocker, moctave, socketclient):
socketclient.get_received()
mocker.patch('matl_online.tasks.socket', new_callable=(lambda : socketclient.socketio))
matl_task('-ro', '1D', session=session_id_for_client(socketclient))
received = socketclient.get_received()
assert (received[(- 1)]['args'][0] == {'message': , 'success': True}) | def test_normal(self, mocker, moctave, socketclient):
socketclient.get_received()
mocker.patch('matl_online.tasks.socket', new_callable=(lambda : socketclient.socketio))
matl_task('-ro', '1D', session=session_id_for_client(socketclient))
received = socketclient.get_received()
assert (received[(- 1)]['args'][0] == {'message': , 'success': True})<|docstring|>Test that messages are received as expected in normal case.<|endoftext|> |
4f6809b77256487ac2bdbe11124fbac4702b66976e23bedc3cb05839814479a7 | def test_keyboard_interupt(self, mocker, moctave, socketclient):
'Ensure proper handling of keyboard interrupt events.'
socketclient.get_received()
mocker.patch('matl_online.tasks.socket', new_callable=(lambda : socketclient.socketio))
ev = mocker.patch('matl_online.tasks.matl_task.octave.eval')
ev.side_effect = KeyboardInterrupt
with pytest.raises(KeyboardInterrupt):
matl_task('-ro', '1D', session=session_id_for_client(socketclient))
received = socketclient.get_received()
payload = received[0]['args'][0]
assert (payload.get('session') == session_id_for_client(socketclient))
assert (payload['data'][0]['type'] == 'stderr')
assert (payload['data'][0]['value'] == 'Job cancelled')
assert (received[(- 1)]['args'][0] == {'success': False}) | Ensure proper handling of keyboard interrupt events. | tests/test_tasks.py | test_keyboard_interupt | suever/MATL-Online | 5 | python | def test_keyboard_interupt(self, mocker, moctave, socketclient):
socketclient.get_received()
mocker.patch('matl_online.tasks.socket', new_callable=(lambda : socketclient.socketio))
ev = mocker.patch('matl_online.tasks.matl_task.octave.eval')
ev.side_effect = KeyboardInterrupt
with pytest.raises(KeyboardInterrupt):
matl_task('-ro', '1D', session=session_id_for_client(socketclient))
received = socketclient.get_received()
payload = received[0]['args'][0]
assert (payload.get('session') == session_id_for_client(socketclient))
assert (payload['data'][0]['type'] == 'stderr')
assert (payload['data'][0]['value'] == 'Job cancelled')
assert (received[(- 1)]['args'][0] == {'success': False}) | def test_keyboard_interupt(self, mocker, moctave, socketclient):
socketclient.get_received()
mocker.patch('matl_online.tasks.socket', new_callable=(lambda : socketclient.socketio))
ev = mocker.patch('matl_online.tasks.matl_task.octave.eval')
ev.side_effect = KeyboardInterrupt
with pytest.raises(KeyboardInterrupt):
matl_task('-ro', '1D', session=session_id_for_client(socketclient))
received = socketclient.get_received()
payload = received[0]['args'][0]
assert (payload.get('session') == session_id_for_client(socketclient))
assert (payload['data'][0]['type'] == 'stderr')
assert (payload['data'][0]['value'] == 'Job cancelled')
assert (received[(- 1)]['args'][0] == {'success': False})<|docstring|>Ensure proper handling of keyboard interrupt events.<|endoftext|> |
65f42a2a0b67d53249c3eeeac28ee50d3d6d45a115246bc1974fe944983830db | def test_time_limit(self, mocker, moctave, socketclient):
'Ensure tasks exceeding the time limit are dealth with properly.'
socketclient.get_received()
mocker.patch('matl_online.tasks.socket', new_callable=(lambda : socketclient.socketio))
ev = mocker.patch('matl_online.tasks.matl_task.octave.eval')
ev.side_effect = SoftTimeLimitExceeded
with pytest.raises(SoftTimeLimitExceeded):
matl_task('-ro', '1D', session=session_id_for_client(socketclient))
received = socketclient.get_received()
payload = received[0]['args'][0]
assert (payload.get('session') == session_id_for_client(socketclient))
assert (payload['data'][0]['type'] == 'stderr')
assert (payload['data'][0]['value'] == 'Operation timed out')
assert (received[(- 1)]['args'][0] == {'success': False}) | Ensure tasks exceeding the time limit are dealth with properly. | tests/test_tasks.py | test_time_limit | suever/MATL-Online | 5 | python | def test_time_limit(self, mocker, moctave, socketclient):
socketclient.get_received()
mocker.patch('matl_online.tasks.socket', new_callable=(lambda : socketclient.socketio))
ev = mocker.patch('matl_online.tasks.matl_task.octave.eval')
ev.side_effect = SoftTimeLimitExceeded
with pytest.raises(SoftTimeLimitExceeded):
matl_task('-ro', '1D', session=session_id_for_client(socketclient))
received = socketclient.get_received()
payload = received[0]['args'][0]
assert (payload.get('session') == session_id_for_client(socketclient))
assert (payload['data'][0]['type'] == 'stderr')
assert (payload['data'][0]['value'] == 'Operation timed out')
assert (received[(- 1)]['args'][0] == {'success': False}) | def test_time_limit(self, mocker, moctave, socketclient):
socketclient.get_received()
mocker.patch('matl_online.tasks.socket', new_callable=(lambda : socketclient.socketio))
ev = mocker.patch('matl_online.tasks.matl_task.octave.eval')
ev.side_effect = SoftTimeLimitExceeded
with pytest.raises(SoftTimeLimitExceeded):
matl_task('-ro', '1D', session=session_id_for_client(socketclient))
received = socketclient.get_received()
payload = received[0]['args'][0]
assert (payload.get('session') == session_id_for_client(socketclient))
assert (payload['data'][0]['type'] == 'stderr')
assert (payload['data'][0]['value'] == 'Operation timed out')
assert (received[(- 1)]['args'][0] == {'success': False})<|docstring|>Ensure tasks exceeding the time limit are dealth with properly.<|endoftext|> |
27cc479cca1841c545f7e89ecfcc7bb6f93d2073a9bb2e668698fcb778a51bde | def __init__(self, value=''):
'\n Params:\n bg: background color\n fg: foreground color\n icon: icon\n '
Widget.__init__(self)
self.value = value
self.bg = None
self.fg = colors['c_white']
self.icon = icons['clock'] | Params:
bg: background color
fg: foreground color
icon: icon | widgets/date.py | __init__ | alberand/lemonbar | 0 | python | def __init__(self, value=):
'\n Params:\n bg: background color\n fg: foreground color\n icon: icon\n '
Widget.__init__(self)
self.value = value
self.bg = None
self.fg = colors['c_white']
self.icon = icons['clock'] | def __init__(self, value=):
'\n Params:\n bg: background color\n fg: foreground color\n icon: icon\n '
Widget.__init__(self)
self.value = value
self.bg = None
self.fg = colors['c_white']
self.icon = icons['clock']<|docstring|>Params:
bg: background color
fg: foreground color
icon: icon<|endoftext|> |
b3a2fc5e6bd390af2b879871f08e2c492486614906d18e411dca641d2d51cccb | def update(self):
'\n\n TO IMPLEMENT.\n\n '
date = datetime.datetime.now()
self.value = date.strftime('%a %d.%m.%y %H:%M %p') | TO IMPLEMENT. | widgets/date.py | update | alberand/lemonbar | 0 | python | def update(self):
'\n\n \n\n '
date = datetime.datetime.now()
self.value = date.strftime('%a %d.%m.%y %H:%M %p') | def update(self):
'\n\n \n\n '
date = datetime.datetime.now()
self.value = date.strftime('%a %d.%m.%y %H:%M %p')<|docstring|>TO IMPLEMENT.<|endoftext|> |
710a122b65ae250418fa41f2a5729d0978aba0fad95b2ac63a2a785431cfbfe1 | def test_basic_dice_operations_ga():
'Test basic dice operations.'
d12 = Dice.from_dice(12)
assert ((d12 + 3) == Dice.from_full({4: Fraction(1, 12), 5: Fraction(1, 12), 6: Fraction(1, 12), 7: Fraction(1, 12), 8: Fraction(1, 12), 9: Fraction(1, 12), 10: Fraction(1, 12), 11: Fraction(1, 12), 12: Fraction(1, 12), 13: Fraction(1, 12), 14: Fraction(1, 12), 15: Fraction(1, 12)})) | Test basic dice operations. | tests/documentation/test_tutorial.py | test_basic_dice_operations_ga | sponsfreixes/dice_stats | 2 | python | def test_basic_dice_operations_ga():
d12 = Dice.from_dice(12)
assert ((d12 + 3) == Dice.from_full({4: Fraction(1, 12), 5: Fraction(1, 12), 6: Fraction(1, 12), 7: Fraction(1, 12), 8: Fraction(1, 12), 9: Fraction(1, 12), 10: Fraction(1, 12), 11: Fraction(1, 12), 12: Fraction(1, 12), 13: Fraction(1, 12), 14: Fraction(1, 12), 15: Fraction(1, 12)})) | def test_basic_dice_operations_ga():
d12 = Dice.from_dice(12)
assert ((d12 + 3) == Dice.from_full({4: Fraction(1, 12), 5: Fraction(1, 12), 6: Fraction(1, 12), 7: Fraction(1, 12), 8: Fraction(1, 12), 9: Fraction(1, 12), 10: Fraction(1, 12), 11: Fraction(1, 12), 12: Fraction(1, 12), 13: Fraction(1, 12), 14: Fraction(1, 12), 15: Fraction(1, 12)}))<|docstring|>Test basic dice operations.<|endoftext|> |
b626ae07eca2ff6eebf5c0541131e8d3a1ee00a2ebc3047aa9f0704eddca6e66 | def test_basic_dice_operations_gs():
'Test basic dice operations.'
d6 = Dice.from_dice(6)
gsw = Dice.from_full({5: Fraction(1, 36), 6: Fraction(2, 36), 7: Fraction(3, 36), 8: Fraction(4, 36), 9: Fraction(5, 36), 10: Fraction(6, 36), 11: Fraction(5, 36), 12: Fraction(4, 36), 13: Fraction(3, 36), 14: Fraction(2, 36), 15: Fraction(1, 36)})
assert (((2 * d6) + 3) == gsw)
assert (((d6 + d6) + 3) == gsw) | Test basic dice operations. | tests/documentation/test_tutorial.py | test_basic_dice_operations_gs | sponsfreixes/dice_stats | 2 | python | def test_basic_dice_operations_gs():
d6 = Dice.from_dice(6)
gsw = Dice.from_full({5: Fraction(1, 36), 6: Fraction(2, 36), 7: Fraction(3, 36), 8: Fraction(4, 36), 9: Fraction(5, 36), 10: Fraction(6, 36), 11: Fraction(5, 36), 12: Fraction(4, 36), 13: Fraction(3, 36), 14: Fraction(2, 36), 15: Fraction(1, 36)})
assert (((2 * d6) + 3) == gsw)
assert (((d6 + d6) + 3) == gsw) | def test_basic_dice_operations_gs():
d6 = Dice.from_dice(6)
gsw = Dice.from_full({5: Fraction(1, 36), 6: Fraction(2, 36), 7: Fraction(3, 36), 8: Fraction(4, 36), 9: Fraction(5, 36), 10: Fraction(6, 36), 11: Fraction(5, 36), 12: Fraction(4, 36), 13: Fraction(3, 36), 14: Fraction(2, 36), 15: Fraction(1, 36)})
assert (((2 * d6) + 3) == gsw)
assert (((d6 + d6) + 3) == gsw)<|docstring|>Test basic dice operations.<|endoftext|> |
5753e54a6bb7e69b07306e3c6f30b0d2704a16740169c4ab1097312fca9f266e | def test_rerolling_reroll():
'Test reroll.'
d6 = Dice.from_dice(6)
assert (((2 * d6.reroll([1, 2])) + 3) == Dice.from_full({5: Fraction(1, 324), 6: Fraction(1, 162), 7: Fraction(1, 36), 8: Fraction(4, 81), 9: Fraction(8, 81), 10: Fraction(12, 81), 11: Fraction(14, 81), 12: Fraction(16, 81), 13: Fraction(12, 81), 14: Fraction(8, 81), 15: Fraction(4, 81)})) | Test reroll. | tests/documentation/test_tutorial.py | test_rerolling_reroll | sponsfreixes/dice_stats | 2 | python | def test_rerolling_reroll():
d6 = Dice.from_dice(6)
assert (((2 * d6.reroll([1, 2])) + 3) == Dice.from_full({5: Fraction(1, 324), 6: Fraction(1, 162), 7: Fraction(1, 36), 8: Fraction(4, 81), 9: Fraction(8, 81), 10: Fraction(12, 81), 11: Fraction(14, 81), 12: Fraction(16, 81), 13: Fraction(12, 81), 14: Fraction(8, 81), 15: Fraction(4, 81)})) | def test_rerolling_reroll():
d6 = Dice.from_dice(6)
assert (((2 * d6.reroll([1, 2])) + 3) == Dice.from_full({5: Fraction(1, 324), 6: Fraction(1, 162), 7: Fraction(1, 36), 8: Fraction(4, 81), 9: Fraction(8, 81), 10: Fraction(12, 81), 11: Fraction(14, 81), 12: Fraction(16, 81), 13: Fraction(12, 81), 14: Fraction(8, 81), 15: Fraction(4, 81)}))<|docstring|>Test reroll.<|endoftext|> |
e5780132920d9741ae92466c75699f0b99e35c91268e420e871b2e164e798ed0 | def tokenize(self, text: str):
'\n Operates on a single line of text, returns all words in the line as a\n tuple in a list.\n\n input: "1. isz-pur-ram a-na"\n output: [("isz-pur-ram", "akkadian"), ("a-na", "akkadian")]\n\n :param: line: text string\n :return: list of tuples: (word, language)\n '
beginning_underscore = '_[^_]+(?!_)$'
ending_underscore = '^(?<!_)[^_]+_'
two_underscores = '_[^_]+_'
words = text.split()
language = 'akkadian'
output_words = []
for word in words:
if re.search(two_underscores, word):
output_words.append((word, 'sumerian'))
elif re.search(beginning_underscore, word):
language = 'sumerian'
output_words.append((word, language))
elif re.search(ending_underscore, word):
output_words.append((word, language))
language = 'akkadian'
else:
output_words.append((word, language))
return output_words | Operates on a single line of text, returns all words in the line as a
tuple in a list.
input: "1. isz-pur-ram a-na"
output: [("isz-pur-ram", "akkadian"), ("a-na", "akkadian")]
:param: line: text string
:return: list of tuples: (word, language) | src/cltk/tokenizers/akk.py | tokenize | GrantLS/cltk | 757 | python | def tokenize(self, text: str):
'\n Operates on a single line of text, returns all words in the line as a\n tuple in a list.\n\n input: "1. isz-pur-ram a-na"\n output: [("isz-pur-ram", "akkadian"), ("a-na", "akkadian")]\n\n :param: line: text string\n :return: list of tuples: (word, language)\n '
beginning_underscore = '_[^_]+(?!_)$'
ending_underscore = '^(?<!_)[^_]+_'
two_underscores = '_[^_]+_'
words = text.split()
language = 'akkadian'
output_words = []
for word in words:
if re.search(two_underscores, word):
output_words.append((word, 'sumerian'))
elif re.search(beginning_underscore, word):
language = 'sumerian'
output_words.append((word, language))
elif re.search(ending_underscore, word):
output_words.append((word, language))
language = 'akkadian'
else:
output_words.append((word, language))
return output_words | def tokenize(self, text: str):
'\n Operates on a single line of text, returns all words in the line as a\n tuple in a list.\n\n input: "1. isz-pur-ram a-na"\n output: [("isz-pur-ram", "akkadian"), ("a-na", "akkadian")]\n\n :param: line: text string\n :return: list of tuples: (word, language)\n '
beginning_underscore = '_[^_]+(?!_)$'
ending_underscore = '^(?<!_)[^_]+_'
two_underscores = '_[^_]+_'
words = text.split()
language = 'akkadian'
output_words = []
for word in words:
if re.search(two_underscores, word):
output_words.append((word, 'sumerian'))
elif re.search(beginning_underscore, word):
language = 'sumerian'
output_words.append((word, language))
elif re.search(ending_underscore, word):
output_words.append((word, language))
language = 'akkadian'
else:
output_words.append((word, language))
return output_words<|docstring|>Operates on a single line of text, returns all words in the line as a
tuple in a list.
input: "1. isz-pur-ram a-na"
output: [("isz-pur-ram", "akkadian"), ("a-na", "akkadian")]
:param: line: text string
:return: list of tuples: (word, language)<|endoftext|> |
0274c831e01a6ca297c8686f7ae66d065326c82c9e2ddac2033b50ca471a176e | def tokenize_sign(self, word: str):
'\n Takes tuple (word, language) and splits the word up into individual\n sign tuples (sign, language) in a list.\n\n input: ("{gisz}isz-pur-ram", "akkadian")\n output: [("gisz", "determinative"), ("isz", "akkadian"),\n ("pur", "akkadian"), ("ram", "akkadian")]\n\n :param: tuple created by word_tokenizer2\n :return: list of tuples: (sign, function or language)\n '
word_signs = []
sign = ''
language = word[1]
determinative = False
for char in word[0]:
if (determinative is True):
if (char == '}'):
determinative = False
if (len(sign) > 0):
word_signs.append((sign, 'determinative'))
sign = ''
language = word[1]
continue
else:
sign += char
continue
elif (language == 'akkadian'):
if (char == '{'):
if (len(sign) > 0):
word_signs.append((sign, language))
sign = ''
determinative = True
continue
elif (char == '_'):
if (len(sign) > 0):
word_signs.append((sign, language))
sign = ''
language = 'sumerian'
continue
elif (char == '-'):
if (len(sign) > 0):
word_signs.append((sign, language))
sign = ''
language = word[1]
continue
else:
sign += char
elif (language == 'sumerian'):
if (char == '{'):
if (len(sign) > 0):
word_signs.append((sign, language))
sign = ''
determinative = True
continue
elif (char == '_'):
if (len(sign) > 0):
word_signs.append((sign, language))
sign = ''
language = word[1]
continue
elif (char == '-'):
if (len(sign) > 0):
word_signs.append((sign, language))
sign = ''
language = word[1]
continue
else:
sign += char
if (len(sign) > 0):
word_signs.append((sign, language))
return word_signs | Takes tuple (word, language) and splits the word up into individual
sign tuples (sign, language) in a list.
input: ("{gisz}isz-pur-ram", "akkadian")
output: [("gisz", "determinative"), ("isz", "akkadian"),
("pur", "akkadian"), ("ram", "akkadian")]
:param: tuple created by word_tokenizer2
:return: list of tuples: (sign, function or language) | src/cltk/tokenizers/akk.py | tokenize_sign | GrantLS/cltk | 757 | python | def tokenize_sign(self, word: str):
'\n Takes tuple (word, language) and splits the word up into individual\n sign tuples (sign, language) in a list.\n\n input: ("{gisz}isz-pur-ram", "akkadian")\n output: [("gisz", "determinative"), ("isz", "akkadian"),\n ("pur", "akkadian"), ("ram", "akkadian")]\n\n :param: tuple created by word_tokenizer2\n :return: list of tuples: (sign, function or language)\n '
word_signs = []
sign =
language = word[1]
determinative = False
for char in word[0]:
if (determinative is True):
if (char == '}'):
determinative = False
if (len(sign) > 0):
word_signs.append((sign, 'determinative'))
sign =
language = word[1]
continue
else:
sign += char
continue
elif (language == 'akkadian'):
if (char == '{'):
if (len(sign) > 0):
word_signs.append((sign, language))
sign =
determinative = True
continue
elif (char == '_'):
if (len(sign) > 0):
word_signs.append((sign, language))
sign =
language = 'sumerian'
continue
elif (char == '-'):
if (len(sign) > 0):
word_signs.append((sign, language))
sign =
language = word[1]
continue
else:
sign += char
elif (language == 'sumerian'):
if (char == '{'):
if (len(sign) > 0):
word_signs.append((sign, language))
sign =
determinative = True
continue
elif (char == '_'):
if (len(sign) > 0):
word_signs.append((sign, language))
sign =
language = word[1]
continue
elif (char == '-'):
if (len(sign) > 0):
word_signs.append((sign, language))
sign =
language = word[1]
continue
else:
sign += char
if (len(sign) > 0):
word_signs.append((sign, language))
return word_signs | def tokenize_sign(self, word: str):
'\n Takes tuple (word, language) and splits the word up into individual\n sign tuples (sign, language) in a list.\n\n input: ("{gisz}isz-pur-ram", "akkadian")\n output: [("gisz", "determinative"), ("isz", "akkadian"),\n ("pur", "akkadian"), ("ram", "akkadian")]\n\n :param: tuple created by word_tokenizer2\n :return: list of tuples: (sign, function or language)\n '
word_signs = []
sign =
language = word[1]
determinative = False
for char in word[0]:
if (determinative is True):
if (char == '}'):
determinative = False
if (len(sign) > 0):
word_signs.append((sign, 'determinative'))
sign =
language = word[1]
continue
else:
sign += char
continue
elif (language == 'akkadian'):
if (char == '{'):
if (len(sign) > 0):
word_signs.append((sign, language))
sign =
determinative = True
continue
elif (char == '_'):
if (len(sign) > 0):
word_signs.append((sign, language))
sign =
language = 'sumerian'
continue
elif (char == '-'):
if (len(sign) > 0):
word_signs.append((sign, language))
sign =
language = word[1]
continue
else:
sign += char
elif (language == 'sumerian'):
if (char == '{'):
if (len(sign) > 0):
word_signs.append((sign, language))
sign =
determinative = True
continue
elif (char == '_'):
if (len(sign) > 0):
word_signs.append((sign, language))
sign =
language = word[1]
continue
elif (char == '-'):
if (len(sign) > 0):
word_signs.append((sign, language))
sign =
language = word[1]
continue
else:
sign += char
if (len(sign) > 0):
word_signs.append((sign, language))
return word_signs<|docstring|>Takes tuple (word, language) and splits the word up into individual
sign tuples (sign, language) in a list.
input: ("{gisz}isz-pur-ram", "akkadian")
output: [("gisz", "determinative"), ("isz", "akkadian"),
("pur", "akkadian"), ("ram", "akkadian")]
:param: tuple created by word_tokenizer2
:return: list of tuples: (sign, function or language)<|endoftext|> |
696ba9ed442d75baea0b5c328faaae0fd71da6cff0f64e913a3381c076573ffe | def __init__(self, debug):
'Initialize the parser\n :debug: provide debug output if set\n '
HTMLParser.__init__(self)
self._debug = debug
self._index = (- 1)
self._dindex = (- 1)
self._th = 0
self._entry = None
self._start = False | Initialize the parser
:debug: provide debug output if set | tools/kronos-util.py | __init__ | ArdanaCLM/logging-ansible | 0 | python | def __init__(self, debug):
'Initialize the parser\n :debug: provide debug output if set\n '
HTMLParser.__init__(self)
self._debug = debug
self._index = (- 1)
self._dindex = (- 1)
self._th = 0
self._entry = None
self._start = False | def __init__(self, debug):
'Initialize the parser\n :debug: provide debug output if set\n '
HTMLParser.__init__(self)
self._debug = debug
self._index = (- 1)
self._dindex = (- 1)
self._th = 0
self._entry = None
self._start = False<|docstring|>Initialize the parser
:debug: provide debug output if set<|endoftext|> |
0bc9094660088d989ee4d6ecf8c597bc4e499e6cf4f448f7969eb084df83a684 | def __init__(self, debug, wiki, ardana_files):
' Initialize\n :debug: provide debug output if set\n :wiki: html file containing wiki data\n :ardana_files: list of files to validate data against\n '
self._debug = debug
self.openstack_entries = []
self.wiki_entries = None
with open(wiki, 'r') as strm:
html = strm.read()
parser = Parser(self._debug)
parser.feed(html)
self.wiki_entries = parser.entries
hostgroup_mappings = {}
hostgroup_file = os.path.join((os.path.dirname(ardana_files[0]) + '../../../kronos-producer-configure.yml'))
with open(hostgroup_file, 'r') as f:
for entry in [x['include'] for x in yaml.load(f) if ('include' in x)]:
pieces = entry.split(' ')
target_hosts = pieces[1][(pieces[1].find('=') + 1):]
vars_file = pieces[2][(pieces[2].find('=') + 1):pieces[2].find('-clr.yml')]
if (vars_file not in hostgroup_mappings):
hostgroup_mappings[vars_file] = []
hostgroup_mappings[vars_file].append(target_hosts)
for ardana_file in ardana_files:
with open(ardana_file, 'r') as strm:
sub_service = yaml.load(strm)['sub_service']
for option in sub_service['logging_options']:
for logfile in option['files']:
entry = Entry(sub_service['service'])
entry.sub_service = sub_service['name']
if (sub_service['name'] in hostgroup_mappings):
entry.hostgroups = hostgroup_mappings[sub_service['name']]
entry.logfile = logfile
if ('centralized_logging' in option):
entry.logged = option['centralized_logging']['enabled']
entry.format = option['centralized_logging']['format']
if ('log_rotate' in option):
log_rotate = option['log_rotate']
if ('daily' in log_rotate):
entry.frequency = 'daily'
if ('weekly' in log_rotate):
entry.frequency = 'weekly'
if ('monthly' in log_rotate):
entry.frequency = 'monthly'
try:
entry.maxsize = next((x[(x.find('maxsize') + 8):] for x in log_rotate if ('maxsize' in x)))
except:
pass
try:
entry.retention = next((x[(x.find('rotate') + 7):] for x in log_rotate if ('rotate' in x)))
except:
pass
try:
entry.create_user = next((x.split(' ')[2] for x in log_rotate if ('create' in x)))
except:
pass
try:
entry.create_group = next((x.split(' ')[3] for x in log_rotate if ('create' in x)))
except:
pass
try:
entry.su_user = next((x.split(' ')[2] for x in log_rotate if ('su' in x)))
except:
pass
try:
entry.su_group = next((x.split(' ')[3] for x in log_rotate if ('su' in x)))
except:
pass
if (len(list(filter((lambda x: (x == 'notifempty')), log_rotate))) > 1):
print("{:.<60} multiple notifempty in '{}'".format(entry.logfile, entry.sub_service))
sys.exit()
if ('notifempty' in log_rotate):
entry.notifempty = True
self.openstack_entries.append(entry) | Initialize
:debug: provide debug output if set
:wiki: html file containing wiki data
:ardana_files: list of files to validate data against | tools/kronos-util.py | __init__ | ArdanaCLM/logging-ansible | 0 | python | def __init__(self, debug, wiki, ardana_files):
' Initialize\n :debug: provide debug output if set\n :wiki: html file containing wiki data\n :ardana_files: list of files to validate data against\n '
self._debug = debug
self.openstack_entries = []
self.wiki_entries = None
with open(wiki, 'r') as strm:
html = strm.read()
parser = Parser(self._debug)
parser.feed(html)
self.wiki_entries = parser.entries
hostgroup_mappings = {}
hostgroup_file = os.path.join((os.path.dirname(ardana_files[0]) + '../../../kronos-producer-configure.yml'))
with open(hostgroup_file, 'r') as f:
for entry in [x['include'] for x in yaml.load(f) if ('include' in x)]:
pieces = entry.split(' ')
target_hosts = pieces[1][(pieces[1].find('=') + 1):]
vars_file = pieces[2][(pieces[2].find('=') + 1):pieces[2].find('-clr.yml')]
if (vars_file not in hostgroup_mappings):
hostgroup_mappings[vars_file] = []
hostgroup_mappings[vars_file].append(target_hosts)
for ardana_file in ardana_files:
with open(ardana_file, 'r') as strm:
sub_service = yaml.load(strm)['sub_service']
for option in sub_service['logging_options']:
for logfile in option['files']:
entry = Entry(sub_service['service'])
entry.sub_service = sub_service['name']
if (sub_service['name'] in hostgroup_mappings):
entry.hostgroups = hostgroup_mappings[sub_service['name']]
entry.logfile = logfile
if ('centralized_logging' in option):
entry.logged = option['centralized_logging']['enabled']
entry.format = option['centralized_logging']['format']
if ('log_rotate' in option):
log_rotate = option['log_rotate']
if ('daily' in log_rotate):
entry.frequency = 'daily'
if ('weekly' in log_rotate):
entry.frequency = 'weekly'
if ('monthly' in log_rotate):
entry.frequency = 'monthly'
try:
entry.maxsize = next((x[(x.find('maxsize') + 8):] for x in log_rotate if ('maxsize' in x)))
except:
pass
try:
entry.retention = next((x[(x.find('rotate') + 7):] for x in log_rotate if ('rotate' in x)))
except:
pass
try:
entry.create_user = next((x.split(' ')[2] for x in log_rotate if ('create' in x)))
except:
pass
try:
entry.create_group = next((x.split(' ')[3] for x in log_rotate if ('create' in x)))
except:
pass
try:
entry.su_user = next((x.split(' ')[2] for x in log_rotate if ('su' in x)))
except:
pass
try:
entry.su_group = next((x.split(' ')[3] for x in log_rotate if ('su' in x)))
except:
pass
if (len(list(filter((lambda x: (x == 'notifempty')), log_rotate))) > 1):
print("{:.<60} multiple notifempty in '{}'".format(entry.logfile, entry.sub_service))
sys.exit()
if ('notifempty' in log_rotate):
entry.notifempty = True
self.openstack_entries.append(entry) | def __init__(self, debug, wiki, ardana_files):
' Initialize\n :debug: provide debug output if set\n :wiki: html file containing wiki data\n :ardana_files: list of files to validate data against\n '
self._debug = debug
self.openstack_entries = []
self.wiki_entries = None
with open(wiki, 'r') as strm:
html = strm.read()
parser = Parser(self._debug)
parser.feed(html)
self.wiki_entries = parser.entries
hostgroup_mappings = {}
hostgroup_file = os.path.join((os.path.dirname(ardana_files[0]) + '../../../kronos-producer-configure.yml'))
with open(hostgroup_file, 'r') as f:
for entry in [x['include'] for x in yaml.load(f) if ('include' in x)]:
pieces = entry.split(' ')
target_hosts = pieces[1][(pieces[1].find('=') + 1):]
vars_file = pieces[2][(pieces[2].find('=') + 1):pieces[2].find('-clr.yml')]
if (vars_file not in hostgroup_mappings):
hostgroup_mappings[vars_file] = []
hostgroup_mappings[vars_file].append(target_hosts)
for ardana_file in ardana_files:
with open(ardana_file, 'r') as strm:
sub_service = yaml.load(strm)['sub_service']
for option in sub_service['logging_options']:
for logfile in option['files']:
entry = Entry(sub_service['service'])
entry.sub_service = sub_service['name']
if (sub_service['name'] in hostgroup_mappings):
entry.hostgroups = hostgroup_mappings[sub_service['name']]
entry.logfile = logfile
if ('centralized_logging' in option):
entry.logged = option['centralized_logging']['enabled']
entry.format = option['centralized_logging']['format']
if ('log_rotate' in option):
log_rotate = option['log_rotate']
if ('daily' in log_rotate):
entry.frequency = 'daily'
if ('weekly' in log_rotate):
entry.frequency = 'weekly'
if ('monthly' in log_rotate):
entry.frequency = 'monthly'
try:
entry.maxsize = next((x[(x.find('maxsize') + 8):] for x in log_rotate if ('maxsize' in x)))
except:
pass
try:
entry.retention = next((x[(x.find('rotate') + 7):] for x in log_rotate if ('rotate' in x)))
except:
pass
try:
entry.create_user = next((x.split(' ')[2] for x in log_rotate if ('create' in x)))
except:
pass
try:
entry.create_group = next((x.split(' ')[3] for x in log_rotate if ('create' in x)))
except:
pass
try:
entry.su_user = next((x.split(' ')[2] for x in log_rotate if ('su' in x)))
except:
pass
try:
entry.su_group = next((x.split(' ')[3] for x in log_rotate if ('su' in x)))
except:
pass
if (len(list(filter((lambda x: (x == 'notifempty')), log_rotate))) > 1):
print("{:.<60} multiple notifempty in '{}'".format(entry.logfile, entry.sub_service))
sys.exit()
if ('notifempty' in log_rotate):
entry.notifempty = True
self.openstack_entries.append(entry)<|docstring|>Initialize
:debug: provide debug output if set
:wiki: html file containing wiki data
:ardana_files: list of files to validate data against<|endoftext|> |
2b3388af47bcd9b36de160b660ea2a97932f8c39a34c2cd14869a73fe9482cd3 | def first(self, func, iterable):
'Get the first item that matches the func or return None\n :returns: first matching item else None\n '
result = None
if (type(iterable) == dict):
for (k, v) in iterable.items():
if func(k, v):
result = iterable[k]
break
else:
for x in iterable:
if func(x):
result = x
break
return result | Get the first item that matches the func or return None
:returns: first matching item else None | tools/kronos-util.py | first | ArdanaCLM/logging-ansible | 0 | python | def first(self, func, iterable):
'Get the first item that matches the func or return None\n :returns: first matching item else None\n '
result = None
if (type(iterable) == dict):
for (k, v) in iterable.items():
if func(k, v):
result = iterable[k]
break
else:
for x in iterable:
if func(x):
result = x
break
return result | def first(self, func, iterable):
'Get the first item that matches the func or return None\n :returns: first matching item else None\n '
result = None
if (type(iterable) == dict):
for (k, v) in iterable.items():
if func(k, v):
result = iterable[k]
break
else:
for x in iterable:
if func(x):
result = x
break
return result<|docstring|>Get the first item that matches the func or return None
:returns: first matching item else None<|endoftext|> |
63d524dc48332cf550530b6153b585c47c6ebe0097a25287f0e7db37fd7e8584 | def validate(self, all, service, service_names, hostgroups, users, rotate_freq, rotate_maxsize, rotate_retention):
"Validate the given paramters against OpenStack using Wiki data\n :all: validate all\n :service: validate the given service only\n :service_names: validate the service name\n :hostgroups: validate the service's hostgroups are correct\n :users: validate the logfiles have the correct users and groups set\n :rotate_freq: validate that all logs have correct frequency\n :rotate_maxsize: validate that all logs have correct maxsize\n :rotate_retention: validate that all logs have correct retention\n "
openstack_entries = self.openstack_entries
wiki_entries = self.wiki_entries
if service:
blue('<[={:-^85}=]> '.format(service))
openstack_entries = self.first((lambda x, y: (service in x)), self.group_by_service(self.openstack_entries))
wiki_entries = self.first((lambda x, y: (service in x)), self.group_by_service(self.wiki_entries))
if (all or service or service_names):
result = True
yellow('<[={:-^85}=]> '.format('Validating service names'), end='')
for wiki_entry in wiki_entries:
openstack_entry = self.first((lambda x: (x.sub_service == wiki_entry.sub_service)), openstack_entries)
if (not openstack_entry):
if result:
red('[failure]')
print("{:.<60}doesn't exist".format(wiki_entry.sub_service))
result = False
for openstack_entry in openstack_entries:
wiki_entry = self.first((lambda x: (x.sub_service == openstack_entry.sub_service)), wiki_entries)
if (not wiki_entry):
if result:
red('[failure]')
print("{:.<60}shouldn't exist".format(openstack_entry.sub_service))
result = False
if result:
green('[success]')
if (all or service or hostgroups):
result = True
yellow('<[={:-^85}=]> '.format('Validating service hostgroups'), end='')
for wiki_entry in wiki_entries:
openstack_entry = self.first((lambda x: (x.logfile == wiki_entry.logfile)), openstack_entries)
for x in wiki_entry.hostgroups:
if (openstack_entry and (x not in openstack_entry.hostgroups)):
if result:
red('[failure]')
print('{:.<60}hostgroups {} not found'.format(wiki_entry.logfile, str(wiki_entry.hostgroups)))
result = False
if openstack_entry:
for x in openstack_entry.hostgroups:
if (x not in wiki_entry.hostgroups):
if result:
red('[failure]')
print("{:.<60}hostgroups {} shouldn't exist".format(wiki_entry.logfile, str(x)))
result = False
if result:
green('[success]')
if (all or service):
result = True
yellow('<[={:-^85}=]> '.format('Validating logging'), end='')
for wiki_entry in wiki_entries:
openstack_entry = self.first((lambda x: (x.logfile == wiki_entry.logfile)), openstack_entries)
if (not openstack_entry):
if result:
red('[failure]')
print("{:.<60}doesn't exist".format(wiki_entry.logfile))
result = False
elif ((not openstack_entry.logged) and wiki_entry.logged):
if result:
red('[failure]')
print('{:.<60}not logged'.format(wiki_entry.logfile))
result = False
for openstack_entry in openstack_entries:
if openstack_entry.logged:
wiki_entry = self.first((lambda x: (x.logfile == openstack_entry.logfile)), wiki_entries)
if ((not wiki_entry) or (not wiki_entry.logged)):
if result:
red('[failure]')
print("{:.<60}being logged and shouldn't be".format(openstack_entry.logfile))
result = False
if result:
green('[success]')
if (all or service or users):
result = True
yellow('<[={:-^85}=]> '.format('Validating create users and groups'), end='')
for wiki_entry in wiki_entries:
if (Entry.ex in wiki_entry.status):
continue
openstack_entry = self.first((lambda x: (x.logfile == wiki_entry.logfile)), openstack_entries)
if (openstack_entry and (openstack_entry.create_user != wiki_entry.create_user)):
if result:
red('[failure]')
print("{:.<60}user '{}' should be '{}'".format(wiki_entry.logfile, str(openstack_entry.create_user), str(wiki_entry.create_user)))
result = False
if (openstack_entry and (openstack_entry.create_group != wiki_entry.create_group)):
if result:
red('[failure]')
print("{:.<60}group '{}' should be '{}'".format(wiki_entry.logfile, str(openstack_entry.create_group), str(wiki_entry.create_group)))
result = False
if result:
green('[success]')
if (all or service or rotate_freq or rotate_maxsize or rotate_retention):
result = True
yellow('<[={:-^85}=]> '.format('Validating all logs are being rotated'), end='')
for wiki_entry in wiki_entries:
if (Entry.ex in wiki_entry.status):
continue
openstack_entry = self.first((lambda x: (x.logfile == wiki_entry.logfile)), openstack_entries)
if (not openstack_entry):
if result:
red('[failure]')
print('{:.<60}not rotated'.format(wiki_entry.logfile))
result = False
sys.exit()
for openstack_entry in openstack_entries:
wiki_entry = self.first((lambda x: (x.logfile == openstack_entry.logfile)), wiki_entries)
if (not wiki_entry):
if result:
red('[failure]')
print('{:.<60}rotated but not on Wiki'.format(openstack_entry.logfile))
result = False
sys.exit()
elif ((Entry.ex in wiki_entry.status) and any((openstack_entry.frequency, openstack_entry.retention, openstack_entry.maxsize))):
if result:
red('[failure]')
print('{:.<60}rotated but external on Wiki'.format(openstack_entry.logfile))
result = False
sys.exit()
if result:
green('[success]')
if (all or service or rotate_freq):
result = True
yellow('<[={:-^85}=]> '.format('Validating logging rotate frequency'), end='')
for wiki_entry in wiki_entries:
if (Entry.ex in wiki_entry.status):
continue
openstack_entry = self.first((lambda x: (x.logfile == wiki_entry.logfile)), openstack_entries)
if (openstack_entry.frequency != wiki_entry.frequency):
if result:
red('[failure]')
print("{:.<60}frequency '{}' should be '{}'".format(wiki_entry.logfile, str(openstack_entry.frequency), str(wiki_entry.frequency)))
result = False
if result:
green('[success]')
if (all or service or rotate_retention):
result = True
yellow('<[={:-^85}=]> '.format('Validating logging rotate retention'), end='')
for wiki_entry in wiki_entries:
if (Entry.ex in wiki_entry.status):
continue
openstack_entry = self.first((lambda x: (x.logfile == wiki_entry.logfile)), openstack_entries)
if (openstack_entry.retention != wiki_entry.retention):
if result:
red('[failure]')
print("{:.<60}retention '{}' should be '{}'".format(wiki_entry.logfile, str(openstack_entry.retention), str(wiki_entry.retention)))
result = False
if result:
green('[success]')
if (all or service or rotate_maxsize):
result = True
yellow('<[={:-^85}=]> '.format('Validating logging rotate maxsize'), end='')
for wiki_entry in wiki_entries:
if (Entry.ex in wiki_entry.status):
continue
openstack_entry = self.first((lambda x: (x.logfile == wiki_entry.logfile)), openstack_entries)
if (openstack_entry.maxsize != wiki_entry.maxsize):
if result:
red('[failure]')
print("{:.<60}maxsize '{}' should be '{}'".format(wiki_entry.logfile, str(openstack_entry.maxsize), str(wiki_entry.maxsize)))
result = False
if result:
green('[success]') | Validate the given paramters against OpenStack using Wiki data
:all: validate all
:service: validate the given service only
:service_names: validate the service name
:hostgroups: validate the service's hostgroups are correct
:users: validate the logfiles have the correct users and groups set
:rotate_freq: validate that all logs have correct frequency
:rotate_maxsize: validate that all logs have correct maxsize
:rotate_retention: validate that all logs have correct retention | tools/kronos-util.py | validate | ArdanaCLM/logging-ansible | 0 | python | def validate(self, all, service, service_names, hostgroups, users, rotate_freq, rotate_maxsize, rotate_retention):
"Validate the given paramters against OpenStack using Wiki data\n :all: validate all\n :service: validate the given service only\n :service_names: validate the service name\n :hostgroups: validate the service's hostgroups are correct\n :users: validate the logfiles have the correct users and groups set\n :rotate_freq: validate that all logs have correct frequency\n :rotate_maxsize: validate that all logs have correct maxsize\n :rotate_retention: validate that all logs have correct retention\n "
openstack_entries = self.openstack_entries
wiki_entries = self.wiki_entries
if service:
blue('<[={:-^85}=]> '.format(service))
openstack_entries = self.first((lambda x, y: (service in x)), self.group_by_service(self.openstack_entries))
wiki_entries = self.first((lambda x, y: (service in x)), self.group_by_service(self.wiki_entries))
if (all or service or service_names):
result = True
yellow('<[={:-^85}=]> '.format('Validating service names'), end=)
for wiki_entry in wiki_entries:
openstack_entry = self.first((lambda x: (x.sub_service == wiki_entry.sub_service)), openstack_entries)
if (not openstack_entry):
if result:
red('[failure]')
print("{:.<60}doesn't exist".format(wiki_entry.sub_service))
result = False
for openstack_entry in openstack_entries:
wiki_entry = self.first((lambda x: (x.sub_service == openstack_entry.sub_service)), wiki_entries)
if (not wiki_entry):
if result:
red('[failure]')
print("{:.<60}shouldn't exist".format(openstack_entry.sub_service))
result = False
if result:
green('[success]')
if (all or service or hostgroups):
result = True
yellow('<[={:-^85}=]> '.format('Validating service hostgroups'), end=)
for wiki_entry in wiki_entries:
openstack_entry = self.first((lambda x: (x.logfile == wiki_entry.logfile)), openstack_entries)
for x in wiki_entry.hostgroups:
if (openstack_entry and (x not in openstack_entry.hostgroups)):
if result:
red('[failure]')
print('{:.<60}hostgroups {} not found'.format(wiki_entry.logfile, str(wiki_entry.hostgroups)))
result = False
if openstack_entry:
for x in openstack_entry.hostgroups:
if (x not in wiki_entry.hostgroups):
if result:
red('[failure]')
print("{:.<60}hostgroups {} shouldn't exist".format(wiki_entry.logfile, str(x)))
result = False
if result:
green('[success]')
if (all or service):
result = True
yellow('<[={:-^85}=]> '.format('Validating logging'), end=)
for wiki_entry in wiki_entries:
openstack_entry = self.first((lambda x: (x.logfile == wiki_entry.logfile)), openstack_entries)
if (not openstack_entry):
if result:
red('[failure]')
print("{:.<60}doesn't exist".format(wiki_entry.logfile))
result = False
elif ((not openstack_entry.logged) and wiki_entry.logged):
if result:
red('[failure]')
print('{:.<60}not logged'.format(wiki_entry.logfile))
result = False
for openstack_entry in openstack_entries:
if openstack_entry.logged:
wiki_entry = self.first((lambda x: (x.logfile == openstack_entry.logfile)), wiki_entries)
if ((not wiki_entry) or (not wiki_entry.logged)):
if result:
red('[failure]')
print("{:.<60}being logged and shouldn't be".format(openstack_entry.logfile))
result = False
if result:
green('[success]')
if (all or service or users):
result = True
yellow('<[={:-^85}=]> '.format('Validating create users and groups'), end=)
for wiki_entry in wiki_entries:
if (Entry.ex in wiki_entry.status):
continue
openstack_entry = self.first((lambda x: (x.logfile == wiki_entry.logfile)), openstack_entries)
if (openstack_entry and (openstack_entry.create_user != wiki_entry.create_user)):
if result:
red('[failure]')
print("{:.<60}user '{}' should be '{}'".format(wiki_entry.logfile, str(openstack_entry.create_user), str(wiki_entry.create_user)))
result = False
if (openstack_entry and (openstack_entry.create_group != wiki_entry.create_group)):
if result:
red('[failure]')
print("{:.<60}group '{}' should be '{}'".format(wiki_entry.logfile, str(openstack_entry.create_group), str(wiki_entry.create_group)))
result = False
if result:
green('[success]')
if (all or service or rotate_freq or rotate_maxsize or rotate_retention):
result = True
yellow('<[={:-^85}=]> '.format('Validating all logs are being rotated'), end=)
for wiki_entry in wiki_entries:
if (Entry.ex in wiki_entry.status):
continue
openstack_entry = self.first((lambda x: (x.logfile == wiki_entry.logfile)), openstack_entries)
if (not openstack_entry):
if result:
red('[failure]')
print('{:.<60}not rotated'.format(wiki_entry.logfile))
result = False
sys.exit()
for openstack_entry in openstack_entries:
wiki_entry = self.first((lambda x: (x.logfile == openstack_entry.logfile)), wiki_entries)
if (not wiki_entry):
if result:
red('[failure]')
print('{:.<60}rotated but not on Wiki'.format(openstack_entry.logfile))
result = False
sys.exit()
elif ((Entry.ex in wiki_entry.status) and any((openstack_entry.frequency, openstack_entry.retention, openstack_entry.maxsize))):
if result:
red('[failure]')
print('{:.<60}rotated but external on Wiki'.format(openstack_entry.logfile))
result = False
sys.exit()
if result:
green('[success]')
if (all or service or rotate_freq):
result = True
yellow('<[={:-^85}=]> '.format('Validating logging rotate frequency'), end=)
for wiki_entry in wiki_entries:
if (Entry.ex in wiki_entry.status):
continue
openstack_entry = self.first((lambda x: (x.logfile == wiki_entry.logfile)), openstack_entries)
if (openstack_entry.frequency != wiki_entry.frequency):
if result:
red('[failure]')
print("{:.<60}frequency '{}' should be '{}'".format(wiki_entry.logfile, str(openstack_entry.frequency), str(wiki_entry.frequency)))
result = False
if result:
green('[success]')
if (all or service or rotate_retention):
result = True
yellow('<[={:-^85}=]> '.format('Validating logging rotate retention'), end=)
for wiki_entry in wiki_entries:
if (Entry.ex in wiki_entry.status):
continue
openstack_entry = self.first((lambda x: (x.logfile == wiki_entry.logfile)), openstack_entries)
if (openstack_entry.retention != wiki_entry.retention):
if result:
red('[failure]')
print("{:.<60}retention '{}' should be '{}'".format(wiki_entry.logfile, str(openstack_entry.retention), str(wiki_entry.retention)))
result = False
if result:
green('[success]')
if (all or service or rotate_maxsize):
result = True
yellow('<[={:-^85}=]> '.format('Validating logging rotate maxsize'), end=)
for wiki_entry in wiki_entries:
if (Entry.ex in wiki_entry.status):
continue
openstack_entry = self.first((lambda x: (x.logfile == wiki_entry.logfile)), openstack_entries)
if (openstack_entry.maxsize != wiki_entry.maxsize):
if result:
red('[failure]')
print("{:.<60}maxsize '{}' should be '{}'".format(wiki_entry.logfile, str(openstack_entry.maxsize), str(wiki_entry.maxsize)))
result = False
if result:
green('[success]') | def validate(self, all, service, service_names, hostgroups, users, rotate_freq, rotate_maxsize, rotate_retention):
"Validate the given paramters against OpenStack using Wiki data\n :all: validate all\n :service: validate the given service only\n :service_names: validate the service name\n :hostgroups: validate the service's hostgroups are correct\n :users: validate the logfiles have the correct users and groups set\n :rotate_freq: validate that all logs have correct frequency\n :rotate_maxsize: validate that all logs have correct maxsize\n :rotate_retention: validate that all logs have correct retention\n "
openstack_entries = self.openstack_entries
wiki_entries = self.wiki_entries
if service:
blue('<[={:-^85}=]> '.format(service))
openstack_entries = self.first((lambda x, y: (service in x)), self.group_by_service(self.openstack_entries))
wiki_entries = self.first((lambda x, y: (service in x)), self.group_by_service(self.wiki_entries))
if (all or service or service_names):
result = True
yellow('<[={:-^85}=]> '.format('Validating service names'), end=)
for wiki_entry in wiki_entries:
openstack_entry = self.first((lambda x: (x.sub_service == wiki_entry.sub_service)), openstack_entries)
if (not openstack_entry):
if result:
red('[failure]')
print("{:.<60}doesn't exist".format(wiki_entry.sub_service))
result = False
for openstack_entry in openstack_entries:
wiki_entry = self.first((lambda x: (x.sub_service == openstack_entry.sub_service)), wiki_entries)
if (not wiki_entry):
if result:
red('[failure]')
print("{:.<60}shouldn't exist".format(openstack_entry.sub_service))
result = False
if result:
green('[success]')
if (all or service or hostgroups):
result = True
yellow('<[={:-^85}=]> '.format('Validating service hostgroups'), end=)
for wiki_entry in wiki_entries:
openstack_entry = self.first((lambda x: (x.logfile == wiki_entry.logfile)), openstack_entries)
for x in wiki_entry.hostgroups:
if (openstack_entry and (x not in openstack_entry.hostgroups)):
if result:
red('[failure]')
print('{:.<60}hostgroups {} not found'.format(wiki_entry.logfile, str(wiki_entry.hostgroups)))
result = False
if openstack_entry:
for x in openstack_entry.hostgroups:
if (x not in wiki_entry.hostgroups):
if result:
red('[failure]')
print("{:.<60}hostgroups {} shouldn't exist".format(wiki_entry.logfile, str(x)))
result = False
if result:
green('[success]')
if (all or service):
result = True
yellow('<[={:-^85}=]> '.format('Validating logging'), end=)
for wiki_entry in wiki_entries:
openstack_entry = self.first((lambda x: (x.logfile == wiki_entry.logfile)), openstack_entries)
if (not openstack_entry):
if result:
red('[failure]')
print("{:.<60}doesn't exist".format(wiki_entry.logfile))
result = False
elif ((not openstack_entry.logged) and wiki_entry.logged):
if result:
red('[failure]')
print('{:.<60}not logged'.format(wiki_entry.logfile))
result = False
for openstack_entry in openstack_entries:
if openstack_entry.logged:
wiki_entry = self.first((lambda x: (x.logfile == openstack_entry.logfile)), wiki_entries)
if ((not wiki_entry) or (not wiki_entry.logged)):
if result:
red('[failure]')
print("{:.<60}being logged and shouldn't be".format(openstack_entry.logfile))
result = False
if result:
green('[success]')
if (all or service or users):
result = True
yellow('<[={:-^85}=]> '.format('Validating create users and groups'), end=)
for wiki_entry in wiki_entries:
if (Entry.ex in wiki_entry.status):
continue
openstack_entry = self.first((lambda x: (x.logfile == wiki_entry.logfile)), openstack_entries)
if (openstack_entry and (openstack_entry.create_user != wiki_entry.create_user)):
if result:
red('[failure]')
print("{:.<60}user '{}' should be '{}'".format(wiki_entry.logfile, str(openstack_entry.create_user), str(wiki_entry.create_user)))
result = False
if (openstack_entry and (openstack_entry.create_group != wiki_entry.create_group)):
if result:
red('[failure]')
print("{:.<60}group '{}' should be '{}'".format(wiki_entry.logfile, str(openstack_entry.create_group), str(wiki_entry.create_group)))
result = False
if result:
green('[success]')
if (all or service or rotate_freq or rotate_maxsize or rotate_retention):
result = True
yellow('<[={:-^85}=]> '.format('Validating all logs are being rotated'), end=)
for wiki_entry in wiki_entries:
if (Entry.ex in wiki_entry.status):
continue
openstack_entry = self.first((lambda x: (x.logfile == wiki_entry.logfile)), openstack_entries)
if (not openstack_entry):
if result:
red('[failure]')
print('{:.<60}not rotated'.format(wiki_entry.logfile))
result = False
sys.exit()
for openstack_entry in openstack_entries:
wiki_entry = self.first((lambda x: (x.logfile == openstack_entry.logfile)), wiki_entries)
if (not wiki_entry):
if result:
red('[failure]')
print('{:.<60}rotated but not on Wiki'.format(openstack_entry.logfile))
result = False
sys.exit()
elif ((Entry.ex in wiki_entry.status) and any((openstack_entry.frequency, openstack_entry.retention, openstack_entry.maxsize))):
if result:
red('[failure]')
print('{:.<60}rotated but external on Wiki'.format(openstack_entry.logfile))
result = False
sys.exit()
if result:
green('[success]')
if (all or service or rotate_freq):
result = True
yellow('<[={:-^85}=]> '.format('Validating logging rotate frequency'), end=)
for wiki_entry in wiki_entries:
if (Entry.ex in wiki_entry.status):
continue
openstack_entry = self.first((lambda x: (x.logfile == wiki_entry.logfile)), openstack_entries)
if (openstack_entry.frequency != wiki_entry.frequency):
if result:
red('[failure]')
print("{:.<60}frequency '{}' should be '{}'".format(wiki_entry.logfile, str(openstack_entry.frequency), str(wiki_entry.frequency)))
result = False
if result:
green('[success]')
if (all or service or rotate_retention):
result = True
yellow('<[={:-^85}=]> '.format('Validating logging rotate retention'), end=)
for wiki_entry in wiki_entries:
if (Entry.ex in wiki_entry.status):
continue
openstack_entry = self.first((lambda x: (x.logfile == wiki_entry.logfile)), openstack_entries)
if (openstack_entry.retention != wiki_entry.retention):
if result:
red('[failure]')
print("{:.<60}retention '{}' should be '{}'".format(wiki_entry.logfile, str(openstack_entry.retention), str(wiki_entry.retention)))
result = False
if result:
green('[success]')
if (all or service or rotate_maxsize):
result = True
yellow('<[={:-^85}=]> '.format('Validating logging rotate maxsize'), end=)
for wiki_entry in wiki_entries:
if (Entry.ex in wiki_entry.status):
continue
openstack_entry = self.first((lambda x: (x.logfile == wiki_entry.logfile)), openstack_entries)
if (openstack_entry.maxsize != wiki_entry.maxsize):
if result:
red('[failure]')
print("{:.<60}maxsize '{}' should be '{}'".format(wiki_entry.logfile, str(openstack_entry.maxsize), str(wiki_entry.maxsize)))
result = False
if result:
green('[success]')<|docstring|>Validate the given paramters against OpenStack using Wiki data
:all: validate all
:service: validate the given service only
:service_names: validate the service name
:hostgroups: validate the service's hostgroups are correct
:users: validate the logfiles have the correct users and groups set
:rotate_freq: validate that all logs have correct frequency
:rotate_maxsize: validate that all logs have correct maxsize
:rotate_retention: validate that all logs have correct retention<|endoftext|> |
364207496133c060cf45bd976b93ad1b6315c73604b3b2a69992bf9a73943f2b | def disk_usage(self, service=None, openstack=False):
'Calculate disk usage for all services\n :service: validate the given service only\n :openstack: base calculations off openstack entries\n '
size_fd = 0
size_se = 0
max_disk = 65000
reserve = 3250
alloc_fd = 6500
services = 55000
alloc_per_se = 2500
compression = (1 - 0.8)
openstack_entries = self.openstack_entries
wiki_entries = self.wiki_entries
if service:
openstack_entries = self.first((lambda x, y: (service in x)), self.group_by_service(self.openstack_entries))
wiki_entries = self.first((lambda x, y: (service in x)), self.group_by_service(self.wiki_entries))
entries = wiki_entries
if openstack:
entries = openstack_entries
for entry in entries:
maxsize = int(entry.maxsize[:(- 1)])
if (entry.maxsize[(- 1)] == 'K'):
maxsize = (maxsize / 1024)
retention = int(entry.retention)
val = int((maxsize + ((maxsize * retention) * compression)))
if (entry.service == 'ceph'):
val *= 6
if (not openstack):
if (Entry.fd in entry.status):
size_fd += val
elif (Entry.se in entry.status):
size_se += val
else:
size_se += val
if (not service):
yellow('<[={:-^85}=]> '.format('Validating foundation disk quotas'), end='')
if (size_fd > alloc_fd):
red('[failure]')
else:
green('[success]')
for entry in [x for x in wiki_entries if (Entry.fd in x.status)]:
maxsize = int(entry.maxsize[:(- 1)])
if (entry.maxsize[(- 1)] == 'K'):
maxsize = (maxsize / 1024)
retention = int(entry.retention)
val = int((maxsize + ((maxsize * retention) * compression)))
if (size_fd > alloc_fd):
print('{:.<60}Maxsize: {}, Retention: {} = {}M'.format(entry.logfile, str(entry.maxsize), str(entry.retention), str(val)))
print('{:.<60}'.format("Results for ['{}']".format('foundation')), end='')
if (size_fd > alloc_fd):
red('Total: {}m, Quota: {}m'.format(str(size_fd), str(alloc_fd)))
else:
green('Total: {}M, Quota: {}M'.format(str(size_fd), str(alloc_fd)))
yellow('<[={:-^85}=]> '.format('Validating service disk quotas'), end='')
if (size_se > services):
red('[failure]')
else:
green('[success]')
entries = wiki_entries
if openstack:
entries = openstack_entries
grouped_by_service = self.group_by_service(entries)
for (service_name, entries) in sorted(grouped_by_service.items()):
if ((not openstack) and (not any([x for x in entries if (Entry.se in x.status)]))):
continue
msg = ''
size_per_se = 0
if service:
msg += '<[={:-^85}=]> \n'.format('{} - disk usage'.format(service_name))
else:
msg += '<[={:-^85}=]> \n'.format(service_name)
for entry in entries:
maxsize = int(entry.maxsize[:(- 1)])
if (entry.maxsize[(- 1)] == 'K'):
maxsize = (maxsize / 1024)
retention = int(entry.retention)
val = int((maxsize + ((maxsize * retention) * compression)))
if (service_name == 'ceph'):
val *= 6
size_per_se += val
msg += '{:.<60}Maxsize: {}, Retention: {} = {}M\n'.format(entry.logfile, str(entry.maxsize), str(entry.retention), str(val))
if (size_per_se > alloc_per_se):
print(msg)
print('{:.<60}'.format("Results for ['{}']".format(entry.service)), end='')
if (size_per_se > alloc_per_se):
red('Total: {}M, Quota: {}M'.format(str(size_per_se), str(alloc_per_se)))
else:
green('Total: {}M, Quota: {}M'.format(str(size_per_se), str(alloc_per_se)))
print('{0:-<91}\n{0:-<91}\n{1:.<60}'.format('', 'Services total results'), end='')
if (size_se > services):
red('Total: {}M, Quota: {}M'.format(str(size_se), str(services)))
else:
green('Total: {}M, Quota: {}M'.format(str(size_se), str(services))) | Calculate disk usage for all services
:service: validate the given service only
:openstack: base calculations off openstack entries | tools/kronos-util.py | disk_usage | ArdanaCLM/logging-ansible | 0 | python | def disk_usage(self, service=None, openstack=False):
'Calculate disk usage for all services\n :service: validate the given service only\n :openstack: base calculations off openstack entries\n '
size_fd = 0
size_se = 0
max_disk = 65000
reserve = 3250
alloc_fd = 6500
services = 55000
alloc_per_se = 2500
compression = (1 - 0.8)
openstack_entries = self.openstack_entries
wiki_entries = self.wiki_entries
if service:
openstack_entries = self.first((lambda x, y: (service in x)), self.group_by_service(self.openstack_entries))
wiki_entries = self.first((lambda x, y: (service in x)), self.group_by_service(self.wiki_entries))
entries = wiki_entries
if openstack:
entries = openstack_entries
for entry in entries:
maxsize = int(entry.maxsize[:(- 1)])
if (entry.maxsize[(- 1)] == 'K'):
maxsize = (maxsize / 1024)
retention = int(entry.retention)
val = int((maxsize + ((maxsize * retention) * compression)))
if (entry.service == 'ceph'):
val *= 6
if (not openstack):
if (Entry.fd in entry.status):
size_fd += val
elif (Entry.se in entry.status):
size_se += val
else:
size_se += val
if (not service):
yellow('<[={:-^85}=]> '.format('Validating foundation disk quotas'), end=)
if (size_fd > alloc_fd):
red('[failure]')
else:
green('[success]')
for entry in [x for x in wiki_entries if (Entry.fd in x.status)]:
maxsize = int(entry.maxsize[:(- 1)])
if (entry.maxsize[(- 1)] == 'K'):
maxsize = (maxsize / 1024)
retention = int(entry.retention)
val = int((maxsize + ((maxsize * retention) * compression)))
if (size_fd > alloc_fd):
print('{:.<60}Maxsize: {}, Retention: {} = {}M'.format(entry.logfile, str(entry.maxsize), str(entry.retention), str(val)))
print('{:.<60}'.format("Results for ['{}']".format('foundation')), end=)
if (size_fd > alloc_fd):
red('Total: {}m, Quota: {}m'.format(str(size_fd), str(alloc_fd)))
else:
green('Total: {}M, Quota: {}M'.format(str(size_fd), str(alloc_fd)))
yellow('<[={:-^85}=]> '.format('Validating service disk quotas'), end=)
if (size_se > services):
red('[failure]')
else:
green('[success]')
entries = wiki_entries
if openstack:
entries = openstack_entries
grouped_by_service = self.group_by_service(entries)
for (service_name, entries) in sorted(grouped_by_service.items()):
if ((not openstack) and (not any([x for x in entries if (Entry.se in x.status)]))):
continue
msg =
size_per_se = 0
if service:
msg += '<[={:-^85}=]> \n'.format('{} - disk usage'.format(service_name))
else:
msg += '<[={:-^85}=]> \n'.format(service_name)
for entry in entries:
maxsize = int(entry.maxsize[:(- 1)])
if (entry.maxsize[(- 1)] == 'K'):
maxsize = (maxsize / 1024)
retention = int(entry.retention)
val = int((maxsize + ((maxsize * retention) * compression)))
if (service_name == 'ceph'):
val *= 6
size_per_se += val
msg += '{:.<60}Maxsize: {}, Retention: {} = {}M\n'.format(entry.logfile, str(entry.maxsize), str(entry.retention), str(val))
if (size_per_se > alloc_per_se):
print(msg)
print('{:.<60}'.format("Results for ['{}']".format(entry.service)), end=)
if (size_per_se > alloc_per_se):
red('Total: {}M, Quota: {}M'.format(str(size_per_se), str(alloc_per_se)))
else:
green('Total: {}M, Quota: {}M'.format(str(size_per_se), str(alloc_per_se)))
print('{0:-<91}\n{0:-<91}\n{1:.<60}'.format(, 'Services total results'), end=)
if (size_se > services):
red('Total: {}M, Quota: {}M'.format(str(size_se), str(services)))
else:
green('Total: {}M, Quota: {}M'.format(str(size_se), str(services))) | def disk_usage(self, service=None, openstack=False):
'Calculate disk usage for all services\n :service: validate the given service only\n :openstack: base calculations off openstack entries\n '
size_fd = 0
size_se = 0
max_disk = 65000
reserve = 3250
alloc_fd = 6500
services = 55000
alloc_per_se = 2500
compression = (1 - 0.8)
openstack_entries = self.openstack_entries
wiki_entries = self.wiki_entries
if service:
openstack_entries = self.first((lambda x, y: (service in x)), self.group_by_service(self.openstack_entries))
wiki_entries = self.first((lambda x, y: (service in x)), self.group_by_service(self.wiki_entries))
entries = wiki_entries
if openstack:
entries = openstack_entries
for entry in entries:
maxsize = int(entry.maxsize[:(- 1)])
if (entry.maxsize[(- 1)] == 'K'):
maxsize = (maxsize / 1024)
retention = int(entry.retention)
val = int((maxsize + ((maxsize * retention) * compression)))
if (entry.service == 'ceph'):
val *= 6
if (not openstack):
if (Entry.fd in entry.status):
size_fd += val
elif (Entry.se in entry.status):
size_se += val
else:
size_se += val
if (not service):
yellow('<[={:-^85}=]> '.format('Validating foundation disk quotas'), end=)
if (size_fd > alloc_fd):
red('[failure]')
else:
green('[success]')
for entry in [x for x in wiki_entries if (Entry.fd in x.status)]:
maxsize = int(entry.maxsize[:(- 1)])
if (entry.maxsize[(- 1)] == 'K'):
maxsize = (maxsize / 1024)
retention = int(entry.retention)
val = int((maxsize + ((maxsize * retention) * compression)))
if (size_fd > alloc_fd):
print('{:.<60}Maxsize: {}, Retention: {} = {}M'.format(entry.logfile, str(entry.maxsize), str(entry.retention), str(val)))
print('{:.<60}'.format("Results for ['{}']".format('foundation')), end=)
if (size_fd > alloc_fd):
red('Total: {}m, Quota: {}m'.format(str(size_fd), str(alloc_fd)))
else:
green('Total: {}M, Quota: {}M'.format(str(size_fd), str(alloc_fd)))
yellow('<[={:-^85}=]> '.format('Validating service disk quotas'), end=)
if (size_se > services):
red('[failure]')
else:
green('[success]')
entries = wiki_entries
if openstack:
entries = openstack_entries
grouped_by_service = self.group_by_service(entries)
for (service_name, entries) in sorted(grouped_by_service.items()):
if ((not openstack) and (not any([x for x in entries if (Entry.se in x.status)]))):
continue
msg =
size_per_se = 0
if service:
msg += '<[={:-^85}=]> \n'.format('{} - disk usage'.format(service_name))
else:
msg += '<[={:-^85}=]> \n'.format(service_name)
for entry in entries:
maxsize = int(entry.maxsize[:(- 1)])
if (entry.maxsize[(- 1)] == 'K'):
maxsize = (maxsize / 1024)
retention = int(entry.retention)
val = int((maxsize + ((maxsize * retention) * compression)))
if (service_name == 'ceph'):
val *= 6
size_per_se += val
msg += '{:.<60}Maxsize: {}, Retention: {} = {}M\n'.format(entry.logfile, str(entry.maxsize), str(entry.retention), str(val))
if (size_per_se > alloc_per_se):
print(msg)
print('{:.<60}'.format("Results for ['{}']".format(entry.service)), end=)
if (size_per_se > alloc_per_se):
red('Total: {}M, Quota: {}M'.format(str(size_per_se), str(alloc_per_se)))
else:
green('Total: {}M, Quota: {}M'.format(str(size_per_se), str(alloc_per_se)))
print('{0:-<91}\n{0:-<91}\n{1:.<60}'.format(, 'Services total results'), end=)
if (size_se > services):
red('Total: {}M, Quota: {}M'.format(str(size_se), str(services)))
else:
green('Total: {}M, Quota: {}M'.format(str(size_se), str(services)))<|docstring|>Calculate disk usage for all services
:service: validate the given service only
:openstack: base calculations off openstack entries<|endoftext|> |
64db61c206c0c67cc8f967dc705b48da48e4afa6c94ff1b2f3895ac41997e4a4 | def group_by_service(self, entries):
'Sort log files by service first (e.g. /var/log/neutron)\n :entries: entries to sort by service\n :results: sorted entries by service\n '
grouped_by_service = {}
for entry in entries:
if (entry.service not in grouped_by_service):
grouped_by_service[entry.service] = []
grouped_by_service[entry.service].append(entry)
return grouped_by_service | Sort log files by service first (e.g. /var/log/neutron)
:entries: entries to sort by service
:results: sorted entries by service | tools/kronos-util.py | group_by_service | ArdanaCLM/logging-ansible | 0 | python | def group_by_service(self, entries):
'Sort log files by service first (e.g. /var/log/neutron)\n :entries: entries to sort by service\n :results: sorted entries by service\n '
grouped_by_service = {}
for entry in entries:
if (entry.service not in grouped_by_service):
grouped_by_service[entry.service] = []
grouped_by_service[entry.service].append(entry)
return grouped_by_service | def group_by_service(self, entries):
'Sort log files by service first (e.g. /var/log/neutron)\n :entries: entries to sort by service\n :results: sorted entries by service\n '
grouped_by_service = {}
for entry in entries:
if (entry.service not in grouped_by_service):
grouped_by_service[entry.service] = []
grouped_by_service[entry.service].append(entry)
return grouped_by_service<|docstring|>Sort log files by service first (e.g. /var/log/neutron)
:entries: entries to sort by service
:results: sorted entries by service<|endoftext|> |
cbf681daafd0ffd15bf00fd65af6850ffc8a1d8e861de40374ba3ce8fe94bb60 | def DisableCrosBeeps(chromeos_root, remote, log_level):
'Disable annoying chromebooks beeps after reboots.'
cmd_executer = command_executer.GetCommandExecuter(log_level=log_level)
command = '/usr/share/vboot/bin/set_gbb_flags.sh 0x1'
logger.GetLogger().LogOutput('Trying to disable beeping.')
(ret, o, _) = cmd_executer.CrosRunCommandWOutput(command, chromeos_root=chromeos_root, machine=remote)
if (ret != 0):
logger.GetLogger().LogOutput(o)
logger.GetLogger().LogOutput('Failed to disable beeps.') | Disable annoying chromebooks beeps after reboots. | image_chromeos.py | DisableCrosBeeps | TinkerBoard2-Android/external-toolchain-utils | 0 | python | def DisableCrosBeeps(chromeos_root, remote, log_level):
cmd_executer = command_executer.GetCommandExecuter(log_level=log_level)
command = '/usr/share/vboot/bin/set_gbb_flags.sh 0x1'
logger.GetLogger().LogOutput('Trying to disable beeping.')
(ret, o, _) = cmd_executer.CrosRunCommandWOutput(command, chromeos_root=chromeos_root, machine=remote)
if (ret != 0):
logger.GetLogger().LogOutput(o)
logger.GetLogger().LogOutput('Failed to disable beeps.') | def DisableCrosBeeps(chromeos_root, remote, log_level):
cmd_executer = command_executer.GetCommandExecuter(log_level=log_level)
command = '/usr/share/vboot/bin/set_gbb_flags.sh 0x1'
logger.GetLogger().LogOutput('Trying to disable beeping.')
(ret, o, _) = cmd_executer.CrosRunCommandWOutput(command, chromeos_root=chromeos_root, machine=remote)
if (ret != 0):
logger.GetLogger().LogOutput(o)
logger.GetLogger().LogOutput('Failed to disable beeps.')<|docstring|>Disable annoying chromebooks beeps after reboots.<|endoftext|> |
0da7a353d4cb55848f8460873945065208cb4013ad5983d31c0a6e8acace191f | def FindChromeOSImage(image_file, chromeos_root):
'Find path for ChromeOS image inside chroot.\n\n This function could be called with image paths that are either inside\n or outside the chroot. In either case the path needs to be translated\n to an real/absolute path inside the chroot.\n Example input paths:\n /usr/local/google/home/uname/chromeos/chroot/tmp/my-test-images/image\n ~/trunk/src/build/images/board/latest/image\n /tmp/peppy-release/R67-1235.0.0/image\n\n Corresponding example output paths:\n /tmp/my-test-images/image\n /home/uname/trunk/src/build/images/board/latest/image\n /tmp/peppy-release/R67-1235.0,0/image\n '
whoami = getpass.getuser()
real_chroot_dir = os.path.join(os.path.realpath(chromeos_root), 'chroot')
real_chromeos_root = os.path.realpath(chromeos_root)
if (image_file.find(real_chroot_dir) != (- 1)):
chroot_image = image_file[len(real_chroot_dir):]
elif (image_file.find(real_chromeos_root) != (- 1)):
chroot_image = image_file[len(real_chromeos_root):]
chroot_image = ('/home/%s/trunk%s' % (whoami, chroot_image))
else:
chroot_image = image_file
return chroot_image | Find path for ChromeOS image inside chroot.
This function could be called with image paths that are either inside
or outside the chroot. In either case the path needs to be translated
to an real/absolute path inside the chroot.
Example input paths:
/usr/local/google/home/uname/chromeos/chroot/tmp/my-test-images/image
~/trunk/src/build/images/board/latest/image
/tmp/peppy-release/R67-1235.0.0/image
Corresponding example output paths:
/tmp/my-test-images/image
/home/uname/trunk/src/build/images/board/latest/image
/tmp/peppy-release/R67-1235.0,0/image | image_chromeos.py | FindChromeOSImage | TinkerBoard2-Android/external-toolchain-utils | 0 | python | def FindChromeOSImage(image_file, chromeos_root):
'Find path for ChromeOS image inside chroot.\n\n This function could be called with image paths that are either inside\n or outside the chroot. In either case the path needs to be translated\n to an real/absolute path inside the chroot.\n Example input paths:\n /usr/local/google/home/uname/chromeos/chroot/tmp/my-test-images/image\n ~/trunk/src/build/images/board/latest/image\n /tmp/peppy-release/R67-1235.0.0/image\n\n Corresponding example output paths:\n /tmp/my-test-images/image\n /home/uname/trunk/src/build/images/board/latest/image\n /tmp/peppy-release/R67-1235.0,0/image\n '
whoami = getpass.getuser()
real_chroot_dir = os.path.join(os.path.realpath(chromeos_root), 'chroot')
real_chromeos_root = os.path.realpath(chromeos_root)
if (image_file.find(real_chroot_dir) != (- 1)):
chroot_image = image_file[len(real_chroot_dir):]
elif (image_file.find(real_chromeos_root) != (- 1)):
chroot_image = image_file[len(real_chromeos_root):]
chroot_image = ('/home/%s/trunk%s' % (whoami, chroot_image))
else:
chroot_image = image_file
return chroot_image | def FindChromeOSImage(image_file, chromeos_root):
'Find path for ChromeOS image inside chroot.\n\n This function could be called with image paths that are either inside\n or outside the chroot. In either case the path needs to be translated\n to an real/absolute path inside the chroot.\n Example input paths:\n /usr/local/google/home/uname/chromeos/chroot/tmp/my-test-images/image\n ~/trunk/src/build/images/board/latest/image\n /tmp/peppy-release/R67-1235.0.0/image\n\n Corresponding example output paths:\n /tmp/my-test-images/image\n /home/uname/trunk/src/build/images/board/latest/image\n /tmp/peppy-release/R67-1235.0,0/image\n '
whoami = getpass.getuser()
real_chroot_dir = os.path.join(os.path.realpath(chromeos_root), 'chroot')
real_chromeos_root = os.path.realpath(chromeos_root)
if (image_file.find(real_chroot_dir) != (- 1)):
chroot_image = image_file[len(real_chroot_dir):]
elif (image_file.find(real_chromeos_root) != (- 1)):
chroot_image = image_file[len(real_chromeos_root):]
chroot_image = ('/home/%s/trunk%s' % (whoami, chroot_image))
else:
chroot_image = image_file
return chroot_image<|docstring|>Find path for ChromeOS image inside chroot.
This function could be called with image paths that are either inside
or outside the chroot. In either case the path needs to be translated
to an real/absolute path inside the chroot.
Example input paths:
/usr/local/google/home/uname/chromeos/chroot/tmp/my-test-images/image
~/trunk/src/build/images/board/latest/image
/tmp/peppy-release/R67-1235.0.0/image
Corresponding example output paths:
/tmp/my-test-images/image
/home/uname/trunk/src/build/images/board/latest/image
/tmp/peppy-release/R67-1235.0,0/image<|endoftext|> |
232aa0df19985db2233ee6eaf4aa16e8a6a23fbd9f309bebd32fa3606db1db1f | def DoImage(argv):
'Image ChromeOS.'
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--chromeos_root', dest='chromeos_root', help='Target directory for ChromeOS installation.')
parser.add_argument('-r', '--remote', dest='remote', help='Target device.')
parser.add_argument('-i', '--image', dest='image', help='Image binary file.')
parser.add_argument('-b', '--board', dest='board', help='Target board override.')
parser.add_argument('-f', '--force', dest='force', action='store_true', default=False, help='Force an image even if it is non-test.')
parser.add_argument('-n', '--no_lock', dest='no_lock', default=False, action='store_true', help='Do not attempt to lock remote before imaging. This option should only be used in cases where the exclusive lock has already been acquired (e.g. in a script that calls this one).')
parser.add_argument('-l', '--logging_level', dest='log_level', default='verbose', help="Amount of logging to be used. Valid levels are 'quiet', 'average', and 'verbose'.")
parser.add_argument('-a', '--image_args', dest='image_args')
options = parser.parse_args(argv[1:])
if (not (options.log_level in command_executer.LOG_LEVEL)):
Usage(parser, "--logging_level must be 'quiet', 'average' or 'verbose'")
else:
log_level = options.log_level
cmd_executer = command_executer.GetCommandExecuter(log_level=log_level)
l = logger.GetLogger()
if (options.chromeos_root is None):
Usage(parser, '--chromeos_root must be set')
if (options.remote is None):
Usage(parser, '--remote must be set')
options.chromeos_root = os.path.expanduser(options.chromeos_root)
if (options.board is None):
board = cmd_executer.CrosLearnBoard(options.chromeos_root, options.remote)
else:
board = options.board
if (options.image is None):
images_dir = misc.GetImageDir(options.chromeos_root, board)
image = os.path.join(images_dir, 'latest', 'chromiumos_test_image.bin')
if (not os.path.exists(image)):
image = os.path.join(images_dir, 'latest', 'chromiumos_image.bin')
is_xbuddy_image = False
else:
image = options.image
is_xbuddy_image = image.startswith('xbuddy://')
if (not is_xbuddy_image):
image = os.path.expanduser(image)
if (not is_xbuddy_image):
image = os.path.realpath(image)
if ((not os.path.exists(image)) and (not is_xbuddy_image)):
Usage(parser, (('Image file: ' + image) + ' does not exist!'))
try:
should_unlock = False
if (not options.no_lock):
try:
_ = locks.AcquireLock(list(options.remote.split()), options.chromeos_root)
should_unlock = True
except Exception as e:
raise RuntimeError(('Error acquiring machine: %s' % str(e)))
reimage = False
local_image = False
if (not is_xbuddy_image):
local_image = True
image_checksum = FileUtils().Md5File(image, log_level=log_level)
command = ('cat ' + checksum_file)
(ret, device_checksum, _) = cmd_executer.CrosRunCommandWOutput(command, chromeos_root=options.chromeos_root, machine=options.remote)
device_checksum = device_checksum.strip()
image_checksum = str(image_checksum)
l.LogOutput(('Image checksum: ' + image_checksum))
l.LogOutput(('Device checksum: ' + device_checksum))
if (image_checksum != device_checksum):
[found, located_image] = LocateOrCopyImage(options.chromeos_root, image, board=board)
reimage = True
l.LogOutput('Checksums do not match. Re-imaging...')
chroot_image = FindChromeOSImage(located_image, options.chromeos_root)
is_test_image = IsImageModdedForTest(options.chromeos_root, chroot_image, log_level)
if ((not is_test_image) and (not options.force)):
logger.GetLogger().LogFatal('Have to pass --force to image a non-test image!')
else:
reimage = True
found = True
l.LogOutput('Using non-local image; Re-imaging...')
if reimage:
command = 'mount -o remount,rw,exec /tmp'
cmd_executer.CrosRunCommand(command, chromeos_root=options.chromeos_root, machine=options.remote)
CheckForCrosFlash(options.chromeos_root, options.remote, log_level)
DisableCrosBeeps(options.chromeos_root, options.remote, log_level)
cros_flash_args = ['cros', 'flash', ('--board=%s' % board), '--clobber-stateful', options.remote]
if local_image:
cros_flash_args.append(chroot_image)
else:
cros_flash_args.append(image)
command = ' '.join(cros_flash_args)
os.chmod(misc.GetChromeOSKeyFile(options.chromeos_root), 384)
if (log_level == 'average'):
cmd_executer.SetLogLevel('verbose')
retries = 0
while True:
if (log_level == 'quiet'):
l.LogOutput(('CMD : %s' % command))
ret = cmd_executer.ChrootRunCommand(options.chromeos_root, command, command_timeout=1800)
if ((ret == 0) or (retries >= 2)):
break
retries += 1
if (log_level == 'quiet'):
l.LogOutput(('Imaging failed. Retry # %d.' % retries))
if (log_level == 'average'):
cmd_executer.SetLogLevel(log_level)
logger.GetLogger().LogFatalIf(ret, 'Image command failed')
ret = EnsureMachineUp(options.chromeos_root, options.remote, log_level)
if ((not local_image) and ret):
ret = 0
else:
ret = 1
if local_image:
if (log_level == 'average'):
l.LogOutput('Verifying image.')
command = ('echo %s > %s && chmod -w %s' % (image_checksum, checksum_file, checksum_file))
ret = cmd_executer.CrosRunCommand(command, chromeos_root=options.chromeos_root, machine=options.remote)
logger.GetLogger().LogFatalIf(ret, 'Writing checksum failed.')
successfully_imaged = VerifyChromeChecksum(options.chromeos_root, chroot_image, options.remote, log_level)
logger.GetLogger().LogFatalIf((not successfully_imaged), 'Image verification failed!')
TryRemountPartitionAsRW(options.chromeos_root, options.remote, log_level)
if (not found):
temp_dir = os.path.dirname(located_image)
l.LogOutput(('Deleting temp image dir: %s' % temp_dir))
shutil.rmtree(temp_dir)
l.LogOutput('Image updated.')
else:
l.LogOutput('Checksums match, skip image update and reboot.')
command = 'reboot && exit'
_ = cmd_executer.CrosRunCommand(command, chromeos_root=options.chromeos_root, machine=options.remote)
time.sleep(30)
finally:
if should_unlock:
locks.ReleaseLock(list(options.remote.split()), options.chromeos_root)
return ret | Image ChromeOS. | image_chromeos.py | DoImage | TinkerBoard2-Android/external-toolchain-utils | 0 | python | def DoImage(argv):
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--chromeos_root', dest='chromeos_root', help='Target directory for ChromeOS installation.')
parser.add_argument('-r', '--remote', dest='remote', help='Target device.')
parser.add_argument('-i', '--image', dest='image', help='Image binary file.')
parser.add_argument('-b', '--board', dest='board', help='Target board override.')
parser.add_argument('-f', '--force', dest='force', action='store_true', default=False, help='Force an image even if it is non-test.')
parser.add_argument('-n', '--no_lock', dest='no_lock', default=False, action='store_true', help='Do not attempt to lock remote before imaging. This option should only be used in cases where the exclusive lock has already been acquired (e.g. in a script that calls this one).')
parser.add_argument('-l', '--logging_level', dest='log_level', default='verbose', help="Amount of logging to be used. Valid levels are 'quiet', 'average', and 'verbose'.")
parser.add_argument('-a', '--image_args', dest='image_args')
options = parser.parse_args(argv[1:])
if (not (options.log_level in command_executer.LOG_LEVEL)):
Usage(parser, "--logging_level must be 'quiet', 'average' or 'verbose'")
else:
log_level = options.log_level
cmd_executer = command_executer.GetCommandExecuter(log_level=log_level)
l = logger.GetLogger()
if (options.chromeos_root is None):
Usage(parser, '--chromeos_root must be set')
if (options.remote is None):
Usage(parser, '--remote must be set')
options.chromeos_root = os.path.expanduser(options.chromeos_root)
if (options.board is None):
board = cmd_executer.CrosLearnBoard(options.chromeos_root, options.remote)
else:
board = options.board
if (options.image is None):
images_dir = misc.GetImageDir(options.chromeos_root, board)
image = os.path.join(images_dir, 'latest', 'chromiumos_test_image.bin')
if (not os.path.exists(image)):
image = os.path.join(images_dir, 'latest', 'chromiumos_image.bin')
is_xbuddy_image = False
else:
image = options.image
is_xbuddy_image = image.startswith('xbuddy://')
if (not is_xbuddy_image):
image = os.path.expanduser(image)
if (not is_xbuddy_image):
image = os.path.realpath(image)
if ((not os.path.exists(image)) and (not is_xbuddy_image)):
Usage(parser, (('Image file: ' + image) + ' does not exist!'))
try:
should_unlock = False
if (not options.no_lock):
try:
_ = locks.AcquireLock(list(options.remote.split()), options.chromeos_root)
should_unlock = True
except Exception as e:
raise RuntimeError(('Error acquiring machine: %s' % str(e)))
reimage = False
local_image = False
if (not is_xbuddy_image):
local_image = True
image_checksum = FileUtils().Md5File(image, log_level=log_level)
command = ('cat ' + checksum_file)
(ret, device_checksum, _) = cmd_executer.CrosRunCommandWOutput(command, chromeos_root=options.chromeos_root, machine=options.remote)
device_checksum = device_checksum.strip()
image_checksum = str(image_checksum)
l.LogOutput(('Image checksum: ' + image_checksum))
l.LogOutput(('Device checksum: ' + device_checksum))
if (image_checksum != device_checksum):
[found, located_image] = LocateOrCopyImage(options.chromeos_root, image, board=board)
reimage = True
l.LogOutput('Checksums do not match. Re-imaging...')
chroot_image = FindChromeOSImage(located_image, options.chromeos_root)
is_test_image = IsImageModdedForTest(options.chromeos_root, chroot_image, log_level)
if ((not is_test_image) and (not options.force)):
logger.GetLogger().LogFatal('Have to pass --force to image a non-test image!')
else:
reimage = True
found = True
l.LogOutput('Using non-local image; Re-imaging...')
if reimage:
command = 'mount -o remount,rw,exec /tmp'
cmd_executer.CrosRunCommand(command, chromeos_root=options.chromeos_root, machine=options.remote)
CheckForCrosFlash(options.chromeos_root, options.remote, log_level)
DisableCrosBeeps(options.chromeos_root, options.remote, log_level)
cros_flash_args = ['cros', 'flash', ('--board=%s' % board), '--clobber-stateful', options.remote]
if local_image:
cros_flash_args.append(chroot_image)
else:
cros_flash_args.append(image)
command = ' '.join(cros_flash_args)
os.chmod(misc.GetChromeOSKeyFile(options.chromeos_root), 384)
if (log_level == 'average'):
cmd_executer.SetLogLevel('verbose')
retries = 0
while True:
if (log_level == 'quiet'):
l.LogOutput(('CMD : %s' % command))
ret = cmd_executer.ChrootRunCommand(options.chromeos_root, command, command_timeout=1800)
if ((ret == 0) or (retries >= 2)):
break
retries += 1
if (log_level == 'quiet'):
l.LogOutput(('Imaging failed. Retry # %d.' % retries))
if (log_level == 'average'):
cmd_executer.SetLogLevel(log_level)
logger.GetLogger().LogFatalIf(ret, 'Image command failed')
ret = EnsureMachineUp(options.chromeos_root, options.remote, log_level)
if ((not local_image) and ret):
ret = 0
else:
ret = 1
if local_image:
if (log_level == 'average'):
l.LogOutput('Verifying image.')
command = ('echo %s > %s && chmod -w %s' % (image_checksum, checksum_file, checksum_file))
ret = cmd_executer.CrosRunCommand(command, chromeos_root=options.chromeos_root, machine=options.remote)
logger.GetLogger().LogFatalIf(ret, 'Writing checksum failed.')
successfully_imaged = VerifyChromeChecksum(options.chromeos_root, chroot_image, options.remote, log_level)
logger.GetLogger().LogFatalIf((not successfully_imaged), 'Image verification failed!')
TryRemountPartitionAsRW(options.chromeos_root, options.remote, log_level)
if (not found):
temp_dir = os.path.dirname(located_image)
l.LogOutput(('Deleting temp image dir: %s' % temp_dir))
shutil.rmtree(temp_dir)
l.LogOutput('Image updated.')
else:
l.LogOutput('Checksums match, skip image update and reboot.')
command = 'reboot && exit'
_ = cmd_executer.CrosRunCommand(command, chromeos_root=options.chromeos_root, machine=options.remote)
time.sleep(30)
finally:
if should_unlock:
locks.ReleaseLock(list(options.remote.split()), options.chromeos_root)
return ret | def DoImage(argv):
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--chromeos_root', dest='chromeos_root', help='Target directory for ChromeOS installation.')
parser.add_argument('-r', '--remote', dest='remote', help='Target device.')
parser.add_argument('-i', '--image', dest='image', help='Image binary file.')
parser.add_argument('-b', '--board', dest='board', help='Target board override.')
parser.add_argument('-f', '--force', dest='force', action='store_true', default=False, help='Force an image even if it is non-test.')
parser.add_argument('-n', '--no_lock', dest='no_lock', default=False, action='store_true', help='Do not attempt to lock remote before imaging. This option should only be used in cases where the exclusive lock has already been acquired (e.g. in a script that calls this one).')
parser.add_argument('-l', '--logging_level', dest='log_level', default='verbose', help="Amount of logging to be used. Valid levels are 'quiet', 'average', and 'verbose'.")
parser.add_argument('-a', '--image_args', dest='image_args')
options = parser.parse_args(argv[1:])
if (not (options.log_level in command_executer.LOG_LEVEL)):
Usage(parser, "--logging_level must be 'quiet', 'average' or 'verbose'")
else:
log_level = options.log_level
cmd_executer = command_executer.GetCommandExecuter(log_level=log_level)
l = logger.GetLogger()
if (options.chromeos_root is None):
Usage(parser, '--chromeos_root must be set')
if (options.remote is None):
Usage(parser, '--remote must be set')
options.chromeos_root = os.path.expanduser(options.chromeos_root)
if (options.board is None):
board = cmd_executer.CrosLearnBoard(options.chromeos_root, options.remote)
else:
board = options.board
if (options.image is None):
images_dir = misc.GetImageDir(options.chromeos_root, board)
image = os.path.join(images_dir, 'latest', 'chromiumos_test_image.bin')
if (not os.path.exists(image)):
image = os.path.join(images_dir, 'latest', 'chromiumos_image.bin')
is_xbuddy_image = False
else:
image = options.image
is_xbuddy_image = image.startswith('xbuddy://')
if (not is_xbuddy_image):
image = os.path.expanduser(image)
if (not is_xbuddy_image):
image = os.path.realpath(image)
if ((not os.path.exists(image)) and (not is_xbuddy_image)):
Usage(parser, (('Image file: ' + image) + ' does not exist!'))
try:
should_unlock = False
if (not options.no_lock):
try:
_ = locks.AcquireLock(list(options.remote.split()), options.chromeos_root)
should_unlock = True
except Exception as e:
raise RuntimeError(('Error acquiring machine: %s' % str(e)))
reimage = False
local_image = False
if (not is_xbuddy_image):
local_image = True
image_checksum = FileUtils().Md5File(image, log_level=log_level)
command = ('cat ' + checksum_file)
(ret, device_checksum, _) = cmd_executer.CrosRunCommandWOutput(command, chromeos_root=options.chromeos_root, machine=options.remote)
device_checksum = device_checksum.strip()
image_checksum = str(image_checksum)
l.LogOutput(('Image checksum: ' + image_checksum))
l.LogOutput(('Device checksum: ' + device_checksum))
if (image_checksum != device_checksum):
[found, located_image] = LocateOrCopyImage(options.chromeos_root, image, board=board)
reimage = True
l.LogOutput('Checksums do not match. Re-imaging...')
chroot_image = FindChromeOSImage(located_image, options.chromeos_root)
is_test_image = IsImageModdedForTest(options.chromeos_root, chroot_image, log_level)
if ((not is_test_image) and (not options.force)):
logger.GetLogger().LogFatal('Have to pass --force to image a non-test image!')
else:
reimage = True
found = True
l.LogOutput('Using non-local image; Re-imaging...')
if reimage:
command = 'mount -o remount,rw,exec /tmp'
cmd_executer.CrosRunCommand(command, chromeos_root=options.chromeos_root, machine=options.remote)
CheckForCrosFlash(options.chromeos_root, options.remote, log_level)
DisableCrosBeeps(options.chromeos_root, options.remote, log_level)
cros_flash_args = ['cros', 'flash', ('--board=%s' % board), '--clobber-stateful', options.remote]
if local_image:
cros_flash_args.append(chroot_image)
else:
cros_flash_args.append(image)
command = ' '.join(cros_flash_args)
os.chmod(misc.GetChromeOSKeyFile(options.chromeos_root), 384)
if (log_level == 'average'):
cmd_executer.SetLogLevel('verbose')
retries = 0
while True:
if (log_level == 'quiet'):
l.LogOutput(('CMD : %s' % command))
ret = cmd_executer.ChrootRunCommand(options.chromeos_root, command, command_timeout=1800)
if ((ret == 0) or (retries >= 2)):
break
retries += 1
if (log_level == 'quiet'):
l.LogOutput(('Imaging failed. Retry # %d.' % retries))
if (log_level == 'average'):
cmd_executer.SetLogLevel(log_level)
logger.GetLogger().LogFatalIf(ret, 'Image command failed')
ret = EnsureMachineUp(options.chromeos_root, options.remote, log_level)
if ((not local_image) and ret):
ret = 0
else:
ret = 1
if local_image:
if (log_level == 'average'):
l.LogOutput('Verifying image.')
command = ('echo %s > %s && chmod -w %s' % (image_checksum, checksum_file, checksum_file))
ret = cmd_executer.CrosRunCommand(command, chromeos_root=options.chromeos_root, machine=options.remote)
logger.GetLogger().LogFatalIf(ret, 'Writing checksum failed.')
successfully_imaged = VerifyChromeChecksum(options.chromeos_root, chroot_image, options.remote, log_level)
logger.GetLogger().LogFatalIf((not successfully_imaged), 'Image verification failed!')
TryRemountPartitionAsRW(options.chromeos_root, options.remote, log_level)
if (not found):
temp_dir = os.path.dirname(located_image)
l.LogOutput(('Deleting temp image dir: %s' % temp_dir))
shutil.rmtree(temp_dir)
l.LogOutput('Image updated.')
else:
l.LogOutput('Checksums match, skip image update and reboot.')
command = 'reboot && exit'
_ = cmd_executer.CrosRunCommand(command, chromeos_root=options.chromeos_root, machine=options.remote)
time.sleep(30)
finally:
if should_unlock:
locks.ReleaseLock(list(options.remote.split()), options.chromeos_root)
return ret<|docstring|>Image ChromeOS.<|endoftext|> |
a7d35dcc36cdf4345aaba218459921f20f6e1bf2239f267d4c7a1d3e0ad25e75 | def event_lists_to_counts_image(header, table_of_files, logger=None):
"Make count image from event lists (like gtbin).\n\n TODO: what's a good API and location for this?\n\n Parameters\n ----------\n header : `~astropy.io.fits.Header`\n FITS header\n table_of_files : `~astropy.table.Table`\n Table of event list filenames\n logger : `logging.Logger` or None\n Logger to use\n\n Returns\n -------\n image : `~astropy.io.fits.ImageHDU`\n Count image\n "
shape = (header['NAXIS2'], header['NAXIS1'])
data = np.zeros(shape, dtype='int')
for row in table_of_files:
if (row['filetype'] != 'events'):
continue
ds = EventListDataset.read(row['filename'])
if logger:
logger.info('Processing OBS_ID = {:06d} with {:6d} events.'.format(row['OBS_ID'], len(ds.event_list)))
return fits.ImageHDU(data=data, header=header) | Make count image from event lists (like gtbin).
TODO: what's a good API and location for this?
Parameters
----------
header : `~astropy.io.fits.Header`
FITS header
table_of_files : `~astropy.table.Table`
Table of event list filenames
logger : `logging.Logger` or None
Logger to use
Returns
-------
image : `~astropy.io.fits.ImageHDU`
Count image | gammapy/data/event_list.py | event_lists_to_counts_image | joleroi/gammapy | 0 | python | def event_lists_to_counts_image(header, table_of_files, logger=None):
"Make count image from event lists (like gtbin).\n\n TODO: what's a good API and location for this?\n\n Parameters\n ----------\n header : `~astropy.io.fits.Header`\n FITS header\n table_of_files : `~astropy.table.Table`\n Table of event list filenames\n logger : `logging.Logger` or None\n Logger to use\n\n Returns\n -------\n image : `~astropy.io.fits.ImageHDU`\n Count image\n "
shape = (header['NAXIS2'], header['NAXIS1'])
data = np.zeros(shape, dtype='int')
for row in table_of_files:
if (row['filetype'] != 'events'):
continue
ds = EventListDataset.read(row['filename'])
if logger:
logger.info('Processing OBS_ID = {:06d} with {:6d} events.'.format(row['OBS_ID'], len(ds.event_list)))
return fits.ImageHDU(data=data, header=header) | def event_lists_to_counts_image(header, table_of_files, logger=None):
"Make count image from event lists (like gtbin).\n\n TODO: what's a good API and location for this?\n\n Parameters\n ----------\n header : `~astropy.io.fits.Header`\n FITS header\n table_of_files : `~astropy.table.Table`\n Table of event list filenames\n logger : `logging.Logger` or None\n Logger to use\n\n Returns\n -------\n image : `~astropy.io.fits.ImageHDU`\n Count image\n "
shape = (header['NAXIS2'], header['NAXIS1'])
data = np.zeros(shape, dtype='int')
for row in table_of_files:
if (row['filetype'] != 'events'):
continue
ds = EventListDataset.read(row['filename'])
if logger:
logger.info('Processing OBS_ID = {:06d} with {:6d} events.'.format(row['OBS_ID'], len(ds.event_list)))
return fits.ImageHDU(data=data, header=header)<|docstring|>Make count image from event lists (like gtbin).
TODO: what's a good API and location for this?
Parameters
----------
header : `~astropy.io.fits.Header`
FITS header
table_of_files : `~astropy.table.Table`
Table of event list filenames
logger : `logging.Logger` or None
Logger to use
Returns
-------
image : `~astropy.io.fits.ImageHDU`
Count image<|endoftext|> |
34b66c3024c0cbc5bea86551744e1308dcc8d764a19f6c36c281a1feb6e76143 | @property
def info(self):
'Summary info string.'
s = '---> Event list info:\n'
s += '- Observation duration: {}\n'.format(self.observation_time_duration)
s += '- Dead-time fraction: {:5.3f} %\n'.format((100 * self.observation_dead_time_fraction))
s += '-- Event info:\n'
s += '- Number of events: {}\n'.format(len(self))
s += '- Median energy: {}\n'.format(np.median(self.energy))
s += '- Median azimuth: {}\n'.format(np.median(self['AZ']))
s += '- Median altitude: {}\n'.format(np.median(self['ALT']))
return s | Summary info string. | gammapy/data/event_list.py | info | joleroi/gammapy | 0 | python | @property
def info(self):
s = '---> Event list info:\n'
s += '- Observation duration: {}\n'.format(self.observation_time_duration)
s += '- Dead-time fraction: {:5.3f} %\n'.format((100 * self.observation_dead_time_fraction))
s += '-- Event info:\n'
s += '- Number of events: {}\n'.format(len(self))
s += '- Median energy: {}\n'.format(np.median(self.energy))
s += '- Median azimuth: {}\n'.format(np.median(self['AZ']))
s += '- Median altitude: {}\n'.format(np.median(self['ALT']))
return s | @property
def info(self):
s = '---> Event list info:\n'
s += '- Observation duration: {}\n'.format(self.observation_time_duration)
s += '- Dead-time fraction: {:5.3f} %\n'.format((100 * self.observation_dead_time_fraction))
s += '-- Event info:\n'
s += '- Number of events: {}\n'.format(len(self))
s += '- Median energy: {}\n'.format(np.median(self.energy))
s += '- Median azimuth: {}\n'.format(np.median(self['AZ']))
s += '- Median altitude: {}\n'.format(np.median(self['ALT']))
return s<|docstring|>Summary info string.<|endoftext|> |
ec20bf2fe03d281325d3679acd76a7e0713d4883c4579d71b2f3504cdfd9e5b1 | @property
def time(self):
'Event times (`~astropy.time.Time`).\n\n Notes\n -----\n Times are automatically converted to 64-bit floats.\n With 32-bit floats times will be incorrect by a few seconds\n when e.g. adding them to the reference time.\n '
met_ref = utils._time_ref_from_dict(self.meta)
met = Quantity(self['TIME'].astype('float64'), 'second')
time = (met_ref + met)
return time | Event times (`~astropy.time.Time`).
Notes
-----
Times are automatically converted to 64-bit floats.
With 32-bit floats times will be incorrect by a few seconds
when e.g. adding them to the reference time. | gammapy/data/event_list.py | time | joleroi/gammapy | 0 | python | @property
def time(self):
'Event times (`~astropy.time.Time`).\n\n Notes\n -----\n Times are automatically converted to 64-bit floats.\n With 32-bit floats times will be incorrect by a few seconds\n when e.g. adding them to the reference time.\n '
met_ref = utils._time_ref_from_dict(self.meta)
met = Quantity(self['TIME'].astype('float64'), 'second')
time = (met_ref + met)
return time | @property
def time(self):
'Event times (`~astropy.time.Time`).\n\n Notes\n -----\n Times are automatically converted to 64-bit floats.\n With 32-bit floats times will be incorrect by a few seconds\n when e.g. adding them to the reference time.\n '
met_ref = utils._time_ref_from_dict(self.meta)
met = Quantity(self['TIME'].astype('float64'), 'second')
time = (met_ref + met)
return time<|docstring|>Event times (`~astropy.time.Time`).
Notes
-----
Times are automatically converted to 64-bit floats.
With 32-bit floats times will be incorrect by a few seconds
when e.g. adding them to the reference time.<|endoftext|> |
8fd6a1421581d01040fd6585f2bcb5458710c4fa3c9eb381e97b37f5ad3f858f | @property
def radec(self):
'Event RA / DEC sky coordinates (`~astropy.coordinates.SkyCoord`)\n\n TODO: the `radec` and `galactic` properties should be cached as table columns\n '
(lon, lat) = (self['RA'], self['DEC'])
return SkyCoord(lon, lat, unit='deg', frame='fk5') | Event RA / DEC sky coordinates (`~astropy.coordinates.SkyCoord`)
TODO: the `radec` and `galactic` properties should be cached as table columns | gammapy/data/event_list.py | radec | joleroi/gammapy | 0 | python | @property
def radec(self):
'Event RA / DEC sky coordinates (`~astropy.coordinates.SkyCoord`)\n\n TODO: the `radec` and `galactic` properties should be cached as table columns\n '
(lon, lat) = (self['RA'], self['DEC'])
return SkyCoord(lon, lat, unit='deg', frame='fk5') | @property
def radec(self):
'Event RA / DEC sky coordinates (`~astropy.coordinates.SkyCoord`)\n\n TODO: the `radec` and `galactic` properties should be cached as table columns\n '
(lon, lat) = (self['RA'], self['DEC'])
return SkyCoord(lon, lat, unit='deg', frame='fk5')<|docstring|>Event RA / DEC sky coordinates (`~astropy.coordinates.SkyCoord`)
TODO: the `radec` and `galactic` properties should be cached as table columns<|endoftext|> |
fea13b179fef6334d4ff84840416e9584a66764b5b2249dd39a9016bf6a029c1 | @property
def galactic(self):
"Event Galactic sky coordinates (`~astropy.coordinates.SkyCoord`).\n\n Note: uses the ``GLON`` and ``GLAT`` columns.\n If only ``RA`` and ``DEC`` are present use the explicit\n ``event_list.radec.to('galactic')`` instead.\n "
self.add_galactic_columns()
(lon, lat) = (self['GLON'], self['GLAT'])
return SkyCoord(lon, lat, unit='deg', frame='galactic') | Event Galactic sky coordinates (`~astropy.coordinates.SkyCoord`).
Note: uses the ``GLON`` and ``GLAT`` columns.
If only ``RA`` and ``DEC`` are present use the explicit
``event_list.radec.to('galactic')`` instead. | gammapy/data/event_list.py | galactic | joleroi/gammapy | 0 | python | @property
def galactic(self):
"Event Galactic sky coordinates (`~astropy.coordinates.SkyCoord`).\n\n Note: uses the ``GLON`` and ``GLAT`` columns.\n If only ``RA`` and ``DEC`` are present use the explicit\n ``event_list.radec.to('galactic')`` instead.\n "
self.add_galactic_columns()
(lon, lat) = (self['GLON'], self['GLAT'])
return SkyCoord(lon, lat, unit='deg', frame='galactic') | @property
def galactic(self):
"Event Galactic sky coordinates (`~astropy.coordinates.SkyCoord`).\n\n Note: uses the ``GLON`` and ``GLAT`` columns.\n If only ``RA`` and ``DEC`` are present use the explicit\n ``event_list.radec.to('galactic')`` instead.\n "
self.add_galactic_columns()
(lon, lat) = (self['GLON'], self['GLAT'])
return SkyCoord(lon, lat, unit='deg', frame='galactic')<|docstring|>Event Galactic sky coordinates (`~astropy.coordinates.SkyCoord`).
Note: uses the ``GLON`` and ``GLAT`` columns.
If only ``RA`` and ``DEC`` are present use the explicit
``event_list.radec.to('galactic')`` instead.<|endoftext|> |
fd1282ac22ea732cb3e3a1699a8299b2ca2e6d9db830bb454c6cbb85de7a9011 | def add_galactic_columns(self):
'Add Galactic coordinate columns to the table.\n\n Adds the following columns to the table if not already present:\n - "GLON" - Galactic longitude (deg)\n - "GLAT" - Galactic latitude (deg)\n '
if set(['GLON', 'GLAT']).issubset(self.colnames):
return
galactic = self.radec.galactic
self['GLON'] = galactic.l.degree
self['GLAT'] = galactic.b.degree | Add Galactic coordinate columns to the table.
Adds the following columns to the table if not already present:
- "GLON" - Galactic longitude (deg)
- "GLAT" - Galactic latitude (deg) | gammapy/data/event_list.py | add_galactic_columns | joleroi/gammapy | 0 | python | def add_galactic_columns(self):
'Add Galactic coordinate columns to the table.\n\n Adds the following columns to the table if not already present:\n - "GLON" - Galactic longitude (deg)\n - "GLAT" - Galactic latitude (deg)\n '
if set(['GLON', 'GLAT']).issubset(self.colnames):
return
galactic = self.radec.galactic
self['GLON'] = galactic.l.degree
self['GLAT'] = galactic.b.degree | def add_galactic_columns(self):
'Add Galactic coordinate columns to the table.\n\n Adds the following columns to the table if not already present:\n - "GLON" - Galactic longitude (deg)\n - "GLAT" - Galactic latitude (deg)\n '
if set(['GLON', 'GLAT']).issubset(self.colnames):
return
galactic = self.radec.galactic
self['GLON'] = galactic.l.degree
self['GLAT'] = galactic.b.degree<|docstring|>Add Galactic coordinate columns to the table.
Adds the following columns to the table if not already present:
- "GLON" - Galactic longitude (deg)
- "GLAT" - Galactic latitude (deg)<|endoftext|> |
57ba5f9e0b275a6170495a80782901464ce1c5191c4aa48d0b2e873a505cd724 | @property
def altaz(self):
'Event horizontal sky coordinates (`~astropy.coordinates.SkyCoord`)'
time = self.time
location = self.observatory_earth_location
altaz_frame = AltAz(obstime=time, location=location)
(lon, lat) = (self['AZ'], self['ALT'])
return SkyCoord(lon, lat, unit='deg', frame=altaz_frame) | Event horizontal sky coordinates (`~astropy.coordinates.SkyCoord`) | gammapy/data/event_list.py | altaz | joleroi/gammapy | 0 | python | @property
def altaz(self):
time = self.time
location = self.observatory_earth_location
altaz_frame = AltAz(obstime=time, location=location)
(lon, lat) = (self['AZ'], self['ALT'])
return SkyCoord(lon, lat, unit='deg', frame=altaz_frame) | @property
def altaz(self):
time = self.time
location = self.observatory_earth_location
altaz_frame = AltAz(obstime=time, location=location)
(lon, lat) = (self['AZ'], self['ALT'])
return SkyCoord(lon, lat, unit='deg', frame=altaz_frame)<|docstring|>Event horizontal sky coordinates (`~astropy.coordinates.SkyCoord`)<|endoftext|> |
179555edfb391dc56e90b57f3819c544e90df95b44ba9ba9161a97e104b862eb | @property
def energy(self):
'Event energies (`~astropy.units.Quantity`).'
energy = self['ENERGY']
return Quantity(energy, self.meta['EUNIT']) | Event energies (`~astropy.units.Quantity`). | gammapy/data/event_list.py | energy | joleroi/gammapy | 0 | python | @property
def energy(self):
energy = self['ENERGY']
return Quantity(energy, self.meta['EUNIT']) | @property
def energy(self):
energy = self['ENERGY']
return Quantity(energy, self.meta['EUNIT'])<|docstring|>Event energies (`~astropy.units.Quantity`).<|endoftext|> |
429fe94a331725a5e96a6c35bc3027011ed9d72d3244bec4f25e53dc25bd8ab9 | @property
def observatory_earth_location(self):
'Observatory location (`~astropy.coordinates.EarthLocation`)'
return utils._earth_location_from_dict(self.meta) | Observatory location (`~astropy.coordinates.EarthLocation`) | gammapy/data/event_list.py | observatory_earth_location | joleroi/gammapy | 0 | python | @property
def observatory_earth_location(self):
return utils._earth_location_from_dict(self.meta) | @property
def observatory_earth_location(self):
return utils._earth_location_from_dict(self.meta)<|docstring|>Observatory location (`~astropy.coordinates.EarthLocation`)<|endoftext|> |
a571f4d7e0f2d2c6dd5a6ab0462769f0dc8484a3a7c6966a2a98e0c0e498f87f | @property
def observation_time_duration(self):
'Observation time duration in seconds (`~astropy.units.Quantity`).\n\n The wall time, including dead-time.\n '
return Quantity(self.meta['ONTIME'], 'second') | Observation time duration in seconds (`~astropy.units.Quantity`).
The wall time, including dead-time. | gammapy/data/event_list.py | observation_time_duration | joleroi/gammapy | 0 | python | @property
def observation_time_duration(self):
'Observation time duration in seconds (`~astropy.units.Quantity`).\n\n The wall time, including dead-time.\n '
return Quantity(self.meta['ONTIME'], 'second') | @property
def observation_time_duration(self):
'Observation time duration in seconds (`~astropy.units.Quantity`).\n\n The wall time, including dead-time.\n '
return Quantity(self.meta['ONTIME'], 'second')<|docstring|>Observation time duration in seconds (`~astropy.units.Quantity`).
The wall time, including dead-time.<|endoftext|> |
af2068d39e6b7aadf12699f38d47c8c058d7fac294469eb3f66e3001fc5c0634 | @property
def observation_live_time_duration(self):
'Live-time duration in seconds (`~astropy.units.Quantity`).\n\n The dead-time-corrected observation time.\n\n Computed as ``t_live = t_observation * (1 - f_dead)``\n where ``f_dead`` is the dead-time fraction.\n '
return Quantity(self.meta['LIVETIME'], 'second') | Live-time duration in seconds (`~astropy.units.Quantity`).
The dead-time-corrected observation time.
Computed as ``t_live = t_observation * (1 - f_dead)``
where ``f_dead`` is the dead-time fraction. | gammapy/data/event_list.py | observation_live_time_duration | joleroi/gammapy | 0 | python | @property
def observation_live_time_duration(self):
'Live-time duration in seconds (`~astropy.units.Quantity`).\n\n The dead-time-corrected observation time.\n\n Computed as ``t_live = t_observation * (1 - f_dead)``\n where ``f_dead`` is the dead-time fraction.\n '
return Quantity(self.meta['LIVETIME'], 'second') | @property
def observation_live_time_duration(self):
'Live-time duration in seconds (`~astropy.units.Quantity`).\n\n The dead-time-corrected observation time.\n\n Computed as ``t_live = t_observation * (1 - f_dead)``\n where ``f_dead`` is the dead-time fraction.\n '
return Quantity(self.meta['LIVETIME'], 'second')<|docstring|>Live-time duration in seconds (`~astropy.units.Quantity`).
The dead-time-corrected observation time.
Computed as ``t_live = t_observation * (1 - f_dead)``
where ``f_dead`` is the dead-time fraction.<|endoftext|> |
d54e52e654a79f2d63bbdd7c05d00dca244d5059f38e0ef0bf105852610f10be | @property
def observation_dead_time_fraction(self):
"Dead-time fraction.\n\n Defined as dead-time over observation time.\n\n Dead-time is defined as the time during the observation\n where the detector didn't record events:\n http://en.wikipedia.org/wiki/Dead_time\n http://adsabs.harvard.edu/abs/2004APh....22..285F\n\n The dead-time fraction is used in the live-time computation,\n which in turn is used in the exposure and flux computation.\n "
return (1 - self.meta['DEADC']) | Dead-time fraction.
Defined as dead-time over observation time.
Dead-time is defined as the time during the observation
where the detector didn't record events:
http://en.wikipedia.org/wiki/Dead_time
http://adsabs.harvard.edu/abs/2004APh....22..285F
The dead-time fraction is used in the live-time computation,
which in turn is used in the exposure and flux computation. | gammapy/data/event_list.py | observation_dead_time_fraction | joleroi/gammapy | 0 | python | @property
def observation_dead_time_fraction(self):
"Dead-time fraction.\n\n Defined as dead-time over observation time.\n\n Dead-time is defined as the time during the observation\n where the detector didn't record events:\n http://en.wikipedia.org/wiki/Dead_time\n http://adsabs.harvard.edu/abs/2004APh....22..285F\n\n The dead-time fraction is used in the live-time computation,\n which in turn is used in the exposure and flux computation.\n "
return (1 - self.meta['DEADC']) | @property
def observation_dead_time_fraction(self):
"Dead-time fraction.\n\n Defined as dead-time over observation time.\n\n Dead-time is defined as the time during the observation\n where the detector didn't record events:\n http://en.wikipedia.org/wiki/Dead_time\n http://adsabs.harvard.edu/abs/2004APh....22..285F\n\n The dead-time fraction is used in the live-time computation,\n which in turn is used in the exposure and flux computation.\n "
return (1 - self.meta['DEADC'])<|docstring|>Dead-time fraction.
Defined as dead-time over observation time.
Dead-time is defined as the time during the observation
where the detector didn't record events:
http://en.wikipedia.org/wiki/Dead_time
http://adsabs.harvard.edu/abs/2004APh....22..285F
The dead-time fraction is used in the live-time computation,
which in turn is used in the exposure and flux computation.<|endoftext|> |
731d31cb14943211e44a785dbc480964b788f6abe970f1a89261a21e18b81a31 | def select_energy(self, energy_band):
"Select events in energy band.\n\n Parameters\n ----------\n energy_band : `~astropy.units.Quantity`\n Energy band ``[energy_min, energy_max)``\n\n Returns\n -------\n event_list : `EventList`\n Copy of event list with selection applied.\n\n Examples\n --------\n >>> from astropy.units import Quantity\n >>> from gammapy.data import EventList\n >>> event_list = EventList.read('events.fits')\n >>> energy_band = Quantity([1, 20], 'TeV')\n >>> event_list = event_list.select_energy()\n "
energy = self.energy
mask = (energy_band[0] <= energy)
mask &= (energy < energy_band[1])
return self[mask] | Select events in energy band.
Parameters
----------
energy_band : `~astropy.units.Quantity`
Energy band ``[energy_min, energy_max)``
Returns
-------
event_list : `EventList`
Copy of event list with selection applied.
Examples
--------
>>> from astropy.units import Quantity
>>> from gammapy.data import EventList
>>> event_list = EventList.read('events.fits')
>>> energy_band = Quantity([1, 20], 'TeV')
>>> event_list = event_list.select_energy() | gammapy/data/event_list.py | select_energy | joleroi/gammapy | 0 | python | def select_energy(self, energy_band):
"Select events in energy band.\n\n Parameters\n ----------\n energy_band : `~astropy.units.Quantity`\n Energy band ``[energy_min, energy_max)``\n\n Returns\n -------\n event_list : `EventList`\n Copy of event list with selection applied.\n\n Examples\n --------\n >>> from astropy.units import Quantity\n >>> from gammapy.data import EventList\n >>> event_list = EventList.read('events.fits')\n >>> energy_band = Quantity([1, 20], 'TeV')\n >>> event_list = event_list.select_energy()\n "
energy = self.energy
mask = (energy_band[0] <= energy)
mask &= (energy < energy_band[1])
return self[mask] | def select_energy(self, energy_band):
"Select events in energy band.\n\n Parameters\n ----------\n energy_band : `~astropy.units.Quantity`\n Energy band ``[energy_min, energy_max)``\n\n Returns\n -------\n event_list : `EventList`\n Copy of event list with selection applied.\n\n Examples\n --------\n >>> from astropy.units import Quantity\n >>> from gammapy.data import EventList\n >>> event_list = EventList.read('events.fits')\n >>> energy_band = Quantity([1, 20], 'TeV')\n >>> event_list = event_list.select_energy()\n "
energy = self.energy
mask = (energy_band[0] <= energy)
mask &= (energy < energy_band[1])
return self[mask]<|docstring|>Select events in energy band.
Parameters
----------
energy_band : `~astropy.units.Quantity`
Energy band ``[energy_min, energy_max)``
Returns
-------
event_list : `EventList`
Copy of event list with selection applied.
Examples
--------
>>> from astropy.units import Quantity
>>> from gammapy.data import EventList
>>> event_list = EventList.read('events.fits')
>>> energy_band = Quantity([1, 20], 'TeV')
>>> event_list = event_list.select_energy()<|endoftext|> |
d37ba3ef172ae66ddae7ac77712525b6a5a4e1c4711e21086a9a6db9d173a2bb | def select_time(self, time_interval):
'Select events in interval.\n '
time = self.time
mask = (time_interval[0] <= time)
mask &= (time < time_interval[1])
return self[mask] | Select events in interval. | gammapy/data/event_list.py | select_time | joleroi/gammapy | 0 | python | def select_time(self, time_interval):
'\n '
time = self.time
mask = (time_interval[0] <= time)
mask &= (time < time_interval[1])
return self[mask] | def select_time(self, time_interval):
'\n '
time = self.time
mask = (time_interval[0] <= time)
mask &= (time < time_interval[1])
return self[mask]<|docstring|>Select events in interval.<|endoftext|> |
bb470f3075c9ef4243aa7319ff06b8c8fb53de2b3f319a8fc398bd6d4bd230a4 | def select_sky_cone(self, center, radius):
'Select events in sky circle.\n\n Parameters\n ----------\n center : `~astropy.coordinates.SkyCoord`\n Sky circle center\n radius : `~astropy.coordinates.Angle`\n Sky circle radius\n\n Returns\n -------\n event_list : `EventList`\n Copy of event list with selection applied.\n '
position = self.radec
separation = center.separation(position)
mask = (separation > radius)
return self[mask] | Select events in sky circle.
Parameters
----------
center : `~astropy.coordinates.SkyCoord`
Sky circle center
radius : `~astropy.coordinates.Angle`
Sky circle radius
Returns
-------
event_list : `EventList`
Copy of event list with selection applied. | gammapy/data/event_list.py | select_sky_cone | joleroi/gammapy | 0 | python | def select_sky_cone(self, center, radius):
'Select events in sky circle.\n\n Parameters\n ----------\n center : `~astropy.coordinates.SkyCoord`\n Sky circle center\n radius : `~astropy.coordinates.Angle`\n Sky circle radius\n\n Returns\n -------\n event_list : `EventList`\n Copy of event list with selection applied.\n '
position = self.radec
separation = center.separation(position)
mask = (separation > radius)
return self[mask] | def select_sky_cone(self, center, radius):
'Select events in sky circle.\n\n Parameters\n ----------\n center : `~astropy.coordinates.SkyCoord`\n Sky circle center\n radius : `~astropy.coordinates.Angle`\n Sky circle radius\n\n Returns\n -------\n event_list : `EventList`\n Copy of event list with selection applied.\n '
position = self.radec
separation = center.separation(position)
mask = (separation > radius)
return self[mask]<|docstring|>Select events in sky circle.
Parameters
----------
center : `~astropy.coordinates.SkyCoord`
Sky circle center
radius : `~astropy.coordinates.Angle`
Sky circle radius
Returns
-------
event_list : `EventList`
Copy of event list with selection applied.<|endoftext|> |
f0fd80f61fc570d693df143ba118e275163f09ec703b3e55e53cef8d958b541e | def select_sky_box(self, lon_lim, lat_lim, frame='icrs'):
'Select events in sky box.\n\n TODO: move `gammapy.catalog.select_sky_box` to `gammapy.utils`.\n '
from ..catalog import select_sky_box
return select_sky_box(self, lon_lim, lat_lim, frame) | Select events in sky box.
TODO: move `gammapy.catalog.select_sky_box` to `gammapy.utils`. | gammapy/data/event_list.py | select_sky_box | joleroi/gammapy | 0 | python | def select_sky_box(self, lon_lim, lat_lim, frame='icrs'):
'Select events in sky box.\n\n TODO: move `gammapy.catalog.select_sky_box` to `gammapy.utils`.\n '
from ..catalog import select_sky_box
return select_sky_box(self, lon_lim, lat_lim, frame) | def select_sky_box(self, lon_lim, lat_lim, frame='icrs'):
'Select events in sky box.\n\n TODO: move `gammapy.catalog.select_sky_box` to `gammapy.utils`.\n '
from ..catalog import select_sky_box
return select_sky_box(self, lon_lim, lat_lim, frame)<|docstring|>Select events in sky box.
TODO: move `gammapy.catalog.select_sky_box` to `gammapy.utils`.<|endoftext|> |
3531c93246ed0699dc1506bc3705d2e49e20c7d38016690944652d1bcba17fe1 | def fill_counts_image(self, image):
"Fill events in counts image.\n\n TODO: what's a good API here to support ImageHDU and Header as input?\n\n Parameters\n ----------\n image : `~astropy.io.fits.ImageHDU`\n Image HDU\n\n Returns\n -------\n image : `~astropy.io.fits.ImageHDU`\n Input image with changed data (event count added)\n\n See also\n --------\n EventList.fill_counts_header\n "
header = image.header
(lon, lat) = self._get_lon_lat(header)
counts_image = wcs_histogram2d(header, lon, lat)
image.data += counts_image.data
return image | Fill events in counts image.
TODO: what's a good API here to support ImageHDU and Header as input?
Parameters
----------
image : `~astropy.io.fits.ImageHDU`
Image HDU
Returns
-------
image : `~astropy.io.fits.ImageHDU`
Input image with changed data (event count added)
See also
--------
EventList.fill_counts_header | gammapy/data/event_list.py | fill_counts_image | joleroi/gammapy | 0 | python | def fill_counts_image(self, image):
"Fill events in counts image.\n\n TODO: what's a good API here to support ImageHDU and Header as input?\n\n Parameters\n ----------\n image : `~astropy.io.fits.ImageHDU`\n Image HDU\n\n Returns\n -------\n image : `~astropy.io.fits.ImageHDU`\n Input image with changed data (event count added)\n\n See also\n --------\n EventList.fill_counts_header\n "
header = image.header
(lon, lat) = self._get_lon_lat(header)
counts_image = wcs_histogram2d(header, lon, lat)
image.data += counts_image.data
return image | def fill_counts_image(self, image):
"Fill events in counts image.\n\n TODO: what's a good API here to support ImageHDU and Header as input?\n\n Parameters\n ----------\n image : `~astropy.io.fits.ImageHDU`\n Image HDU\n\n Returns\n -------\n image : `~astropy.io.fits.ImageHDU`\n Input image with changed data (event count added)\n\n See also\n --------\n EventList.fill_counts_header\n "
header = image.header
(lon, lat) = self._get_lon_lat(header)
counts_image = wcs_histogram2d(header, lon, lat)
image.data += counts_image.data
return image<|docstring|>Fill events in counts image.
TODO: what's a good API here to support ImageHDU and Header as input?
Parameters
----------
image : `~astropy.io.fits.ImageHDU`
Image HDU
Returns
-------
image : `~astropy.io.fits.ImageHDU`
Input image with changed data (event count added)
See also
--------
EventList.fill_counts_header<|endoftext|> |
e4ad10578cb5e58f9c763f52e7412cb95ee2e37f0b82ab9b2aebcc21564263e3 | def fill_counts_header(self, header):
'Fill events in counts image specified by a FITS header.\n\n TODO: document. Is this a good API?\n\n See also\n --------\n EventList.fill_counts_image\n '
(lon, lat) = self._get_lon_lat(header)
counts_image = wcs_histogram2d(header, lon, lat)
return counts_image | Fill events in counts image specified by a FITS header.
TODO: document. Is this a good API?
See also
--------
EventList.fill_counts_image | gammapy/data/event_list.py | fill_counts_header | joleroi/gammapy | 0 | python | def fill_counts_header(self, header):
'Fill events in counts image specified by a FITS header.\n\n TODO: document. Is this a good API?\n\n See also\n --------\n EventList.fill_counts_image\n '
(lon, lat) = self._get_lon_lat(header)
counts_image = wcs_histogram2d(header, lon, lat)
return counts_image | def fill_counts_header(self, header):
'Fill events in counts image specified by a FITS header.\n\n TODO: document. Is this a good API?\n\n See also\n --------\n EventList.fill_counts_image\n '
(lon, lat) = self._get_lon_lat(header)
counts_image = wcs_histogram2d(header, lon, lat)
return counts_image<|docstring|>Fill events in counts image specified by a FITS header.
TODO: document. Is this a good API?
See also
--------
EventList.fill_counts_image<|endoftext|> |
9564c73a068e89cf4e0bf33d31c73728c87cbaa7d7894a4ca4fe292f5f293a6b | @staticmethod
def from_hdu_list(hdu_list):
'Create `EventList` from a `~astropy.io.fits.HDUList`.\n '
raise NotImplementedError
event_list = EventList.from_hdu(hdu_list['EVENTS'])
telescope_array = TelescopeArray.from_hdu(hdu_list['TELARRAY'])
good_time_intervals = GoodTimeIntervals.from_hdu(hdu_list['GTI'])
return EventListDataset(event_list, telescope_array, good_time_intervals) | Create `EventList` from a `~astropy.io.fits.HDUList`. | gammapy/data/event_list.py | from_hdu_list | joleroi/gammapy | 0 | python | @staticmethod
def from_hdu_list(hdu_list):
'\n '
raise NotImplementedError
event_list = EventList.from_hdu(hdu_list['EVENTS'])
telescope_array = TelescopeArray.from_hdu(hdu_list['TELARRAY'])
good_time_intervals = GoodTimeIntervals.from_hdu(hdu_list['GTI'])
return EventListDataset(event_list, telescope_array, good_time_intervals) | @staticmethod
def from_hdu_list(hdu_list):
'\n '
raise NotImplementedError
event_list = EventList.from_hdu(hdu_list['EVENTS'])
telescope_array = TelescopeArray.from_hdu(hdu_list['TELARRAY'])
good_time_intervals = GoodTimeIntervals.from_hdu(hdu_list['GTI'])
return EventListDataset(event_list, telescope_array, good_time_intervals)<|docstring|>Create `EventList` from a `~astropy.io.fits.HDUList`.<|endoftext|> |
7536b0bbe81f9daf32b9f4ab4cf84cec2e399d092799ee0759720f2b21e846f9 | @staticmethod
def read(filename):
'Read event list from FITS file.\n '
event_list = EventList.read(filename, hdu='EVENTS')
try:
telescope_array = TelescopeArray.read(filename, hdu='TELARRAY')
except KeyError:
telescope_array = None
try:
good_time_intervals = GoodTimeIntervals.read(filename, hdu='GTI')
except KeyError:
good_time_intervals = None
return EventListDataset(event_list, telescope_array, good_time_intervals) | Read event list from FITS file. | gammapy/data/event_list.py | read | joleroi/gammapy | 0 | python | @staticmethod
def read(filename):
'\n '
event_list = EventList.read(filename, hdu='EVENTS')
try:
telescope_array = TelescopeArray.read(filename, hdu='TELARRAY')
except KeyError:
telescope_array = None
try:
good_time_intervals = GoodTimeIntervals.read(filename, hdu='GTI')
except KeyError:
good_time_intervals = None
return EventListDataset(event_list, telescope_array, good_time_intervals) | @staticmethod
def read(filename):
'\n '
event_list = EventList.read(filename, hdu='EVENTS')
try:
telescope_array = TelescopeArray.read(filename, hdu='TELARRAY')
except KeyError:
telescope_array = None
try:
good_time_intervals = GoodTimeIntervals.read(filename, hdu='GTI')
except KeyError:
good_time_intervals = None
return EventListDataset(event_list, telescope_array, good_time_intervals)<|docstring|>Read event list from FITS file.<|endoftext|> |
e661a3ac5e269f6d2dcde11050b24dce8ef481caf96f09e40e89540304755504 | @staticmethod
def vstack_from_files(filenames, logger=None):
'Stack event lists vertically (combine events and GTIs).\n\n This function stacks (a.k.a. concatenates) event lists.\n E.g. if you have one event list with 100 events (i.e. 100 rows)\n and another with 42 events, the output event list will have 142 events.\n\n It also stacks the GTIs so that exposure computations are still\n possible using the stacked event list.\n\n\n At the moment this can require a lot of memory.\n All event lists are loaded into memory at the same time.\n\n TODO: implement and benchmark different a more efficient method:\n Get number of rows from headers, pre-allocate a large table,\n open files one by one and fill correct rows.\n\n TODO: handle header keywords "correctly".\n At the moment the output event list header keywords are copies of\n the values from the first observation, i.e. meaningless.\n Here\'s a (probably incomplete) list of values we should handle\n (usually by computing the min, max or mean or removing it):\n - OBS_ID\n - DATE_OBS, DATE_END\n - TIME_OBS, TIME_END\n - TSTART, TSTOP\n - LIVETIME, DEADC\n - RA_PNT, DEC_PNT\n - ALT_PNT, AZ_PNT\n\n\n Parameters\n ----------\n filenames : list of str\n List of event list filenames\n\n Returns\n -------\n event_list_dataset : `~gammapy.data.EventListDataset`\n\n '
total_filesize = 0
for filename in filenames:
total_filesize += os.path.getsize(filename)
if logger:
logger.info('Number of files to stack: {}'.format(len(filenames)))
logger.info('Total filesize: {:.2f} MB'.format((total_filesize / (1024.0 ** 2))))
logger.info('Reading event list files ...')
event_lists = []
gtis = []
from astropy.utils.console import ProgressBar
for filename in ProgressBar(filenames):
event_list = Table.read(filename, hdu='EVENTS')
meta_del = ['OBS_ID', 'OBJECT']
meta_mod = ['DATE_OBS', 'DATE_END', 'TIME_OBS', 'TIME_END']
gti = Table.read(filename, hdu='GTI')
event_lists.append(event_list)
gtis.append(gti)
from astropy.table import vstack as vstack_tables
total_event_list = vstack_tables(event_lists, metadata_conflicts='silent')
total_gti = vstack_tables(gtis, metadata_conflicts='silent')
total_event_list.meta['EVTSTACK'] = 'yes'
total_gti.meta['EVTSTACK'] = 'yes'
return EventListDataset(event_list=total_event_list, good_time_intervals=total_gti) | Stack event lists vertically (combine events and GTIs).
This function stacks (a.k.a. concatenates) event lists.
E.g. if you have one event list with 100 events (i.e. 100 rows)
and another with 42 events, the output event list will have 142 events.
It also stacks the GTIs so that exposure computations are still
possible using the stacked event list.
At the moment this can require a lot of memory.
All event lists are loaded into memory at the same time.
TODO: implement and benchmark different a more efficient method:
Get number of rows from headers, pre-allocate a large table,
open files one by one and fill correct rows.
TODO: handle header keywords "correctly".
At the moment the output event list header keywords are copies of
the values from the first observation, i.e. meaningless.
Here's a (probably incomplete) list of values we should handle
(usually by computing the min, max or mean or removing it):
- OBS_ID
- DATE_OBS, DATE_END
- TIME_OBS, TIME_END
- TSTART, TSTOP
- LIVETIME, DEADC
- RA_PNT, DEC_PNT
- ALT_PNT, AZ_PNT
Parameters
----------
filenames : list of str
List of event list filenames
Returns
-------
event_list_dataset : `~gammapy.data.EventListDataset` | gammapy/data/event_list.py | vstack_from_files | joleroi/gammapy | 0 | python | @staticmethod
def vstack_from_files(filenames, logger=None):
'Stack event lists vertically (combine events and GTIs).\n\n This function stacks (a.k.a. concatenates) event lists.\n E.g. if you have one event list with 100 events (i.e. 100 rows)\n and another with 42 events, the output event list will have 142 events.\n\n It also stacks the GTIs so that exposure computations are still\n possible using the stacked event list.\n\n\n At the moment this can require a lot of memory.\n All event lists are loaded into memory at the same time.\n\n TODO: implement and benchmark different a more efficient method:\n Get number of rows from headers, pre-allocate a large table,\n open files one by one and fill correct rows.\n\n TODO: handle header keywords "correctly".\n At the moment the output event list header keywords are copies of\n the values from the first observation, i.e. meaningless.\n Here\'s a (probably incomplete) list of values we should handle\n (usually by computing the min, max or mean or removing it):\n - OBS_ID\n - DATE_OBS, DATE_END\n - TIME_OBS, TIME_END\n - TSTART, TSTOP\n - LIVETIME, DEADC\n - RA_PNT, DEC_PNT\n - ALT_PNT, AZ_PNT\n\n\n Parameters\n ----------\n filenames : list of str\n List of event list filenames\n\n Returns\n -------\n event_list_dataset : `~gammapy.data.EventListDataset`\n\n '
total_filesize = 0
for filename in filenames:
total_filesize += os.path.getsize(filename)
if logger:
logger.info('Number of files to stack: {}'.format(len(filenames)))
logger.info('Total filesize: {:.2f} MB'.format((total_filesize / (1024.0 ** 2))))
logger.info('Reading event list files ...')
event_lists = []
gtis = []
from astropy.utils.console import ProgressBar
for filename in ProgressBar(filenames):
event_list = Table.read(filename, hdu='EVENTS')
meta_del = ['OBS_ID', 'OBJECT']
meta_mod = ['DATE_OBS', 'DATE_END', 'TIME_OBS', 'TIME_END']
gti = Table.read(filename, hdu='GTI')
event_lists.append(event_list)
gtis.append(gti)
from astropy.table import vstack as vstack_tables
total_event_list = vstack_tables(event_lists, metadata_conflicts='silent')
total_gti = vstack_tables(gtis, metadata_conflicts='silent')
total_event_list.meta['EVTSTACK'] = 'yes'
total_gti.meta['EVTSTACK'] = 'yes'
return EventListDataset(event_list=total_event_list, good_time_intervals=total_gti) | @staticmethod
def vstack_from_files(filenames, logger=None):
'Stack event lists vertically (combine events and GTIs).\n\n This function stacks (a.k.a. concatenates) event lists.\n E.g. if you have one event list with 100 events (i.e. 100 rows)\n and another with 42 events, the output event list will have 142 events.\n\n It also stacks the GTIs so that exposure computations are still\n possible using the stacked event list.\n\n\n At the moment this can require a lot of memory.\n All event lists are loaded into memory at the same time.\n\n TODO: implement and benchmark different a more efficient method:\n Get number of rows from headers, pre-allocate a large table,\n open files one by one and fill correct rows.\n\n TODO: handle header keywords "correctly".\n At the moment the output event list header keywords are copies of\n the values from the first observation, i.e. meaningless.\n Here\'s a (probably incomplete) list of values we should handle\n (usually by computing the min, max or mean or removing it):\n - OBS_ID\n - DATE_OBS, DATE_END\n - TIME_OBS, TIME_END\n - TSTART, TSTOP\n - LIVETIME, DEADC\n - RA_PNT, DEC_PNT\n - ALT_PNT, AZ_PNT\n\n\n Parameters\n ----------\n filenames : list of str\n List of event list filenames\n\n Returns\n -------\n event_list_dataset : `~gammapy.data.EventListDataset`\n\n '
total_filesize = 0
for filename in filenames:
total_filesize += os.path.getsize(filename)
if logger:
logger.info('Number of files to stack: {}'.format(len(filenames)))
logger.info('Total filesize: {:.2f} MB'.format((total_filesize / (1024.0 ** 2))))
logger.info('Reading event list files ...')
event_lists = []
gtis = []
from astropy.utils.console import ProgressBar
for filename in ProgressBar(filenames):
event_list = Table.read(filename, hdu='EVENTS')
meta_del = ['OBS_ID', 'OBJECT']
meta_mod = ['DATE_OBS', 'DATE_END', 'TIME_OBS', 'TIME_END']
gti = Table.read(filename, hdu='GTI')
event_lists.append(event_list)
gtis.append(gti)
from astropy.table import vstack as vstack_tables
total_event_list = vstack_tables(event_lists, metadata_conflicts='silent')
total_gti = vstack_tables(gtis, metadata_conflicts='silent')
total_event_list.meta['EVTSTACK'] = 'yes'
total_gti.meta['EVTSTACK'] = 'yes'
return EventListDataset(event_list=total_event_list, good_time_intervals=total_gti)<|docstring|>Stack event lists vertically (combine events and GTIs).
This function stacks (a.k.a. concatenates) event lists.
E.g. if you have one event list with 100 events (i.e. 100 rows)
and another with 42 events, the output event list will have 142 events.
It also stacks the GTIs so that exposure computations are still
possible using the stacked event list.
At the moment this can require a lot of memory.
All event lists are loaded into memory at the same time.
TODO: implement and benchmark different a more efficient method:
Get number of rows from headers, pre-allocate a large table,
open files one by one and fill correct rows.
TODO: handle header keywords "correctly".
At the moment the output event list header keywords are copies of
the values from the first observation, i.e. meaningless.
Here's a (probably incomplete) list of values we should handle
(usually by computing the min, max or mean or removing it):
- OBS_ID
- DATE_OBS, DATE_END
- TIME_OBS, TIME_END
- TSTART, TSTOP
- LIVETIME, DEADC
- RA_PNT, DEC_PNT
- ALT_PNT, AZ_PNT
Parameters
----------
filenames : list of str
List of event list filenames
Returns
-------
event_list_dataset : `~gammapy.data.EventListDataset`<|endoftext|> |
73dfbcf84325b2890e0296e1a4721df979a9a2c47f311b71e82dee81b2920fe0 | def write(self, *args, **kwargs):
'Write to FITS file.\n\n Calls `~astropy.io.fits.HDUList.writeto`, forwarding all arguments.\n '
self.to_fits().writeto(*args, **kwargs) | Write to FITS file.
Calls `~astropy.io.fits.HDUList.writeto`, forwarding all arguments. | gammapy/data/event_list.py | write | joleroi/gammapy | 0 | python | def write(self, *args, **kwargs):
'Write to FITS file.\n\n Calls `~astropy.io.fits.HDUList.writeto`, forwarding all arguments.\n '
self.to_fits().writeto(*args, **kwargs) | def write(self, *args, **kwargs):
'Write to FITS file.\n\n Calls `~astropy.io.fits.HDUList.writeto`, forwarding all arguments.\n '
self.to_fits().writeto(*args, **kwargs)<|docstring|>Write to FITS file.
Calls `~astropy.io.fits.HDUList.writeto`, forwarding all arguments.<|endoftext|> |
0df831e1d5d4cdaf67c30fdac20172815e5c945f5a05692697dac23251abcec6 | def to_fits(self):
'Convert to FITS HDU list format.\n\n Returns\n -------\n hdu_list : `~astropy.io.fits.HDUList`\n HDU list with EVENTS and GTI extension.\n '
hdu_list = fits.HDUList()
del self.event_list['TELMASK']
data = self.event_list.as_array()
header = fits.Header()
header.update(self.event_list.meta)
hdu_list.append(fits.BinTableHDU(data=data, header=header, name='EVENTS'))
data = self.good_time_intervals.as_array()
header = fits.Header()
header.update(self.good_time_intervals.meta)
hdu_list.append(fits.BinTableHDU(data, header=header, name='GTI'))
return hdu_list | Convert to FITS HDU list format.
Returns
-------
hdu_list : `~astropy.io.fits.HDUList`
HDU list with EVENTS and GTI extension. | gammapy/data/event_list.py | to_fits | joleroi/gammapy | 0 | python | def to_fits(self):
'Convert to FITS HDU list format.\n\n Returns\n -------\n hdu_list : `~astropy.io.fits.HDUList`\n HDU list with EVENTS and GTI extension.\n '
hdu_list = fits.HDUList()
del self.event_list['TELMASK']
data = self.event_list.as_array()
header = fits.Header()
header.update(self.event_list.meta)
hdu_list.append(fits.BinTableHDU(data=data, header=header, name='EVENTS'))
data = self.good_time_intervals.as_array()
header = fits.Header()
header.update(self.good_time_intervals.meta)
hdu_list.append(fits.BinTableHDU(data, header=header, name='GTI'))
return hdu_list | def to_fits(self):
'Convert to FITS HDU list format.\n\n Returns\n -------\n hdu_list : `~astropy.io.fits.HDUList`\n HDU list with EVENTS and GTI extension.\n '
hdu_list = fits.HDUList()
del self.event_list['TELMASK']
data = self.event_list.as_array()
header = fits.Header()
header.update(self.event_list.meta)
hdu_list.append(fits.BinTableHDU(data=data, header=header, name='EVENTS'))
data = self.good_time_intervals.as_array()
header = fits.Header()
header.update(self.good_time_intervals.meta)
hdu_list.append(fits.BinTableHDU(data, header=header, name='GTI'))
return hdu_list<|docstring|>Convert to FITS HDU list format.
Returns
-------
hdu_list : `~astropy.io.fits.HDUList`
HDU list with EVENTS and GTI extension.<|endoftext|> |
fa50ef6f1e77896e5a18b94260585cf085128d24228f0453d25428f41aeebdc3 | @property
def info(self):
'Summary info string.'
s = '===> Event list dataset information:\n'
s += self.event_list.info
s += self.telescope_array.info
s += self.good_time_intervals.info
s += '- telescopes: {}\n'.format(len(self.telescope_array))
s += '- good time intervals: {}\n'.format(len(self.good_time_intervals))
return s | Summary info string. | gammapy/data/event_list.py | info | joleroi/gammapy | 0 | python | @property
def info(self):
s = '===> Event list dataset information:\n'
s += self.event_list.info
s += self.telescope_array.info
s += self.good_time_intervals.info
s += '- telescopes: {}\n'.format(len(self.telescope_array))
s += '- good time intervals: {}\n'.format(len(self.good_time_intervals))
return s | @property
def info(self):
s = '===> Event list dataset information:\n'
s += self.event_list.info
s += self.telescope_array.info
s += self.good_time_intervals.info
s += '- telescopes: {}\n'.format(len(self.telescope_array))
s += '- good time intervals: {}\n'.format(len(self.good_time_intervals))
return s<|docstring|>Summary info string.<|endoftext|> |
ea34ab595433ac69e2d34454eca82988bca8db1fe5a5c5192c0ff773cd3877b9 | def check(self, checks='all'):
"Check if format and content is ok.\n\n This is a convenience method that instantiates\n and runs a `~gammapy.data.EventListDatasetChecker` ...\n if you want more options use this way to use it:\n\n >>> from gammapy.data import EventListDatasetChecker\n >>> checker = EventListDatasetChecker(event_list, ...)\n >>> checker.run(which, ...) #\n\n Parameters\n ----------\n checks : list of str or 'all'\n Which checks to run (see list in\n `~gammapy.data.EventListDatasetChecker.run` docstring).\n\n Returns\n -------\n ok : bool\n Everything ok?\n "
checker = EventListDatasetChecker(self)
return checker.run(checks) | Check if format and content is ok.
This is a convenience method that instantiates
and runs a `~gammapy.data.EventListDatasetChecker` ...
if you want more options use this way to use it:
>>> from gammapy.data import EventListDatasetChecker
>>> checker = EventListDatasetChecker(event_list, ...)
>>> checker.run(which, ...) #
Parameters
----------
checks : list of str or 'all'
Which checks to run (see list in
`~gammapy.data.EventListDatasetChecker.run` docstring).
Returns
-------
ok : bool
Everything ok? | gammapy/data/event_list.py | check | joleroi/gammapy | 0 | python | def check(self, checks='all'):
"Check if format and content is ok.\n\n This is a convenience method that instantiates\n and runs a `~gammapy.data.EventListDatasetChecker` ...\n if you want more options use this way to use it:\n\n >>> from gammapy.data import EventListDatasetChecker\n >>> checker = EventListDatasetChecker(event_list, ...)\n >>> checker.run(which, ...) #\n\n Parameters\n ----------\n checks : list of str or 'all'\n Which checks to run (see list in\n `~gammapy.data.EventListDatasetChecker.run` docstring).\n\n Returns\n -------\n ok : bool\n Everything ok?\n "
checker = EventListDatasetChecker(self)
return checker.run(checks) | def check(self, checks='all'):
"Check if format and content is ok.\n\n This is a convenience method that instantiates\n and runs a `~gammapy.data.EventListDatasetChecker` ...\n if you want more options use this way to use it:\n\n >>> from gammapy.data import EventListDatasetChecker\n >>> checker = EventListDatasetChecker(event_list, ...)\n >>> checker.run(which, ...) #\n\n Parameters\n ----------\n checks : list of str or 'all'\n Which checks to run (see list in\n `~gammapy.data.EventListDatasetChecker.run` docstring).\n\n Returns\n -------\n ok : bool\n Everything ok?\n "
checker = EventListDatasetChecker(self)
return checker.run(checks)<|docstring|>Check if format and content is ok.
This is a convenience method that instantiates
and runs a `~gammapy.data.EventListDatasetChecker` ...
if you want more options use this way to use it:
>>> from gammapy.data import EventListDatasetChecker
>>> checker = EventListDatasetChecker(event_list, ...)
>>> checker.run(which, ...) #
Parameters
----------
checks : list of str or 'all'
Which checks to run (see list in
`~gammapy.data.EventListDatasetChecker.run` docstring).
Returns
-------
ok : bool
Everything ok?<|endoftext|> |
db1f6ae24cd2fc8cb739936dd624c4d3563e9ecd591e93fd5545a97a416a31c9 | def run(self, checks='all'):
'Run checks.\n\n Available checks: {...}\n\n Parameters\n ----------\n checks : list of str or "all"\n Which checks to run\n\n Returns\n -------\n ok : bool\n Everything ok?\n '
if (checks == 'all'):
checks = self._AVAILABLE_CHECKS.keys()
unknown_checks = set(checks).difference(self._AVAILABLE_CHECKS.keys())
if unknown_checks:
raise ValueError('Unknown checks: {}'.format(unknown_checks))
ok = True
for check in checks:
check_method = getattr(self, self._AVAILABLE_CHECKS[check])
ok &= check_method()
return ok | Run checks.
Available checks: {...}
Parameters
----------
checks : list of str or "all"
Which checks to run
Returns
-------
ok : bool
Everything ok? | gammapy/data/event_list.py | run | joleroi/gammapy | 0 | python | def run(self, checks='all'):
'Run checks.\n\n Available checks: {...}\n\n Parameters\n ----------\n checks : list of str or "all"\n Which checks to run\n\n Returns\n -------\n ok : bool\n Everything ok?\n '
if (checks == 'all'):
checks = self._AVAILABLE_CHECKS.keys()
unknown_checks = set(checks).difference(self._AVAILABLE_CHECKS.keys())
if unknown_checks:
raise ValueError('Unknown checks: {}'.format(unknown_checks))
ok = True
for check in checks:
check_method = getattr(self, self._AVAILABLE_CHECKS[check])
ok &= check_method()
return ok | def run(self, checks='all'):
'Run checks.\n\n Available checks: {...}\n\n Parameters\n ----------\n checks : list of str or "all"\n Which checks to run\n\n Returns\n -------\n ok : bool\n Everything ok?\n '
if (checks == 'all'):
checks = self._AVAILABLE_CHECKS.keys()
unknown_checks = set(checks).difference(self._AVAILABLE_CHECKS.keys())
if unknown_checks:
raise ValueError('Unknown checks: {}'.format(unknown_checks))
ok = True
for check in checks:
check_method = getattr(self, self._AVAILABLE_CHECKS[check])
ok &= check_method()
return ok<|docstring|>Run checks.
Available checks: {...}
Parameters
----------
checks : list of str or "all"
Which checks to run
Returns
-------
ok : bool
Everything ok?<|endoftext|> |
3956b1be8361289935446d8a05d5cb13be47a5c7fd7ad2b412449f4d458b6979 | def check_misc(self):
'Check misc basic stuff.'
ok = True
required_meta = ['TELESCOP', 'OBS_ID']
missing_meta = (set(required_meta) - set(self.dset.event_list.meta))
if missing_meta:
ok = False
logging.error('Missing meta info: {}'.format(missing_meta))
return ok | Check misc basic stuff. | gammapy/data/event_list.py | check_misc | joleroi/gammapy | 0 | python | def check_misc(self):
ok = True
required_meta = ['TELESCOP', 'OBS_ID']
missing_meta = (set(required_meta) - set(self.dset.event_list.meta))
if missing_meta:
ok = False
logging.error('Missing meta info: {}'.format(missing_meta))
return ok | def check_misc(self):
ok = True
required_meta = ['TELESCOP', 'OBS_ID']
missing_meta = (set(required_meta) - set(self.dset.event_list.meta))
if missing_meta:
ok = False
logging.error('Missing meta info: {}'.format(missing_meta))
return ok<|docstring|>Check misc basic stuff.<|endoftext|> |
ed12698b82a03da95bfeb43f8bbc7fba67d63b9a81194b9c4f6987e8d7bee3ca | def _check_times_gtis(self):
'Check GTI info'
for colname in ['START', 'STOP']:
if (colname not in self.colnames):
raise InvalidDataError('GTI missing column: {}'.format(colname))
for key in ['TSTART', 'TSTOP', 'MJDREFI', 'MJDREFF']:
if (key not in self.meta):
raise InvalidDataError('GTI missing header keyword: {}'.format(key))
times = np.ravel(self['START'], self['STOP'])
if (not np.all((np.diff(times) >= 0))):
raise InvalidDataError('GTIs are not consecutive or sorted.') | Check GTI info | gammapy/data/event_list.py | _check_times_gtis | joleroi/gammapy | 0 | python | def _check_times_gtis(self):
for colname in ['START', 'STOP']:
if (colname not in self.colnames):
raise InvalidDataError('GTI missing column: {}'.format(colname))
for key in ['TSTART', 'TSTOP', 'MJDREFI', 'MJDREFF']:
if (key not in self.meta):
raise InvalidDataError('GTI missing header keyword: {}'.format(key))
times = np.ravel(self['START'], self['STOP'])
if (not np.all((np.diff(times) >= 0))):
raise InvalidDataError('GTIs are not consecutive or sorted.') | def _check_times_gtis(self):
for colname in ['START', 'STOP']:
if (colname not in self.colnames):
raise InvalidDataError('GTI missing column: {}'.format(colname))
for key in ['TSTART', 'TSTOP', 'MJDREFI', 'MJDREFF']:
if (key not in self.meta):
raise InvalidDataError('GTI missing header keyword: {}'.format(key))
times = np.ravel(self['START'], self['STOP'])
if (not np.all((np.diff(times) >= 0))):
raise InvalidDataError('GTIs are not consecutive or sorted.')<|docstring|>Check GTI info<|endoftext|> |
1bfa255b615a776af363431f2c979317d9c88edb029fe13e6363957056e3733d | def check_times(self):
'Check if various times are consistent.\n\n The headers and tables of the FITS EVENTS and GTI extension\n contain various observation and event time information.\n '
ok = True
telescope_met_refs = OrderedDict(FERMI=Time('2001-01-01 00:00:00', scale='utc'), HESS=Time('2000-01-01 12:00:00.000', scale='utc'))
telescope = self.dset.event_list.meta['TELESCOP']
met_ref = utils._time_ref_from_dict(self.dset.event_list.meta)
if (telescope in telescope_met_refs.keys()):
dt = (met_ref - telescope_met_refs[telescope])
if (dt > self.accuracy['time']):
ok = False
logging.error('MET reference is incorrect.')
else:
logging.debug('Skipping MET reference check ... not known for this telescope.')
return ok | Check if various times are consistent.
The headers and tables of the FITS EVENTS and GTI extension
contain various observation and event time information. | gammapy/data/event_list.py | check_times | joleroi/gammapy | 0 | python | def check_times(self):
'Check if various times are consistent.\n\n The headers and tables of the FITS EVENTS and GTI extension\n contain various observation and event time information.\n '
ok = True
telescope_met_refs = OrderedDict(FERMI=Time('2001-01-01 00:00:00', scale='utc'), HESS=Time('2000-01-01 12:00:00.000', scale='utc'))
telescope = self.dset.event_list.meta['TELESCOP']
met_ref = utils._time_ref_from_dict(self.dset.event_list.meta)
if (telescope in telescope_met_refs.keys()):
dt = (met_ref - telescope_met_refs[telescope])
if (dt > self.accuracy['time']):
ok = False
logging.error('MET reference is incorrect.')
else:
logging.debug('Skipping MET reference check ... not known for this telescope.')
return ok | def check_times(self):
'Check if various times are consistent.\n\n The headers and tables of the FITS EVENTS and GTI extension\n contain various observation and event time information.\n '
ok = True
telescope_met_refs = OrderedDict(FERMI=Time('2001-01-01 00:00:00', scale='utc'), HESS=Time('2000-01-01 12:00:00.000', scale='utc'))
telescope = self.dset.event_list.meta['TELESCOP']
met_ref = utils._time_ref_from_dict(self.dset.event_list.meta)
if (telescope in telescope_met_refs.keys()):
dt = (met_ref - telescope_met_refs[telescope])
if (dt > self.accuracy['time']):
ok = False
logging.error('MET reference is incorrect.')
else:
logging.debug('Skipping MET reference check ... not known for this telescope.')
return ok<|docstring|>Check if various times are consistent.
The headers and tables of the FITS EVENTS and GTI extension
contain various observation and event time information.<|endoftext|> |
2dd3841211824b116ebe636cf4f727957b22ae168383e3b76d220b22a592b2ee | def check_coordinates(self):
'Check if various event list coordinates are consistent.\n\n Parameters\n ----------\n event_list_dataset : `~gammapy.data.EventListDataset`\n Event list dataset\n accuracy : `~astropy.coordinates.Angle`\n Required accuracy.\n\n Returns\n -------\n status : bool\n All coordinates consistent?\n '
ok = True
ok &= self._check_coordinates_header()
ok &= self._check_coordinates_galactic()
ok &= self._check_coordinates_altaz()
ok &= self._check_coordinates_field_of_view()
return ok | Check if various event list coordinates are consistent.
Parameters
----------
event_list_dataset : `~gammapy.data.EventListDataset`
Event list dataset
accuracy : `~astropy.coordinates.Angle`
Required accuracy.
Returns
-------
status : bool
All coordinates consistent? | gammapy/data/event_list.py | check_coordinates | joleroi/gammapy | 0 | python | def check_coordinates(self):
'Check if various event list coordinates are consistent.\n\n Parameters\n ----------\n event_list_dataset : `~gammapy.data.EventListDataset`\n Event list dataset\n accuracy : `~astropy.coordinates.Angle`\n Required accuracy.\n\n Returns\n -------\n status : bool\n All coordinates consistent?\n '
ok = True
ok &= self._check_coordinates_header()
ok &= self._check_coordinates_galactic()
ok &= self._check_coordinates_altaz()
ok &= self._check_coordinates_field_of_view()
return ok | def check_coordinates(self):
'Check if various event list coordinates are consistent.\n\n Parameters\n ----------\n event_list_dataset : `~gammapy.data.EventListDataset`\n Event list dataset\n accuracy : `~astropy.coordinates.Angle`\n Required accuracy.\n\n Returns\n -------\n status : bool\n All coordinates consistent?\n '
ok = True
ok &= self._check_coordinates_header()
ok &= self._check_coordinates_galactic()
ok &= self._check_coordinates_altaz()
ok &= self._check_coordinates_field_of_view()
return ok<|docstring|>Check if various event list coordinates are consistent.
Parameters
----------
event_list_dataset : `~gammapy.data.EventListDataset`
Event list dataset
accuracy : `~astropy.coordinates.Angle`
Required accuracy.
Returns
-------
status : bool
All coordinates consistent?<|endoftext|> |
ce984d1af56f463f5f57e7b963e75c0bd0e56264fc46c9b10f816dd988de7ec6 | def _check_coordinates_header(self):
'Check TODO'
return True | Check TODO | gammapy/data/event_list.py | _check_coordinates_header | joleroi/gammapy | 0 | python | def _check_coordinates_header(self):
return True | def _check_coordinates_header(self):
return True<|docstring|>Check TODO<|endoftext|> |
ea2d020511d69265c4eb0671d1ae95ad9f58a222d23b8a631edd456ec3641bba | def _check_coordinates_galactic(self):
'Check if RA / DEC matches GLON / GLAT.'
event_list = self.dset.event_list
for colname in ['RA', 'DEC', 'GLON', 'GLAT']:
if (colname not in event_list.colnames):
self.logger.info('Skipping Galactic coordinate check. Missing column: "{}".'.format(colname))
return True
radec = event_list.radec
galactic = event_list.galactic
separation = radec.separation(galactic).to('arcsec')
return self._check_separation(separation, 'GLON / GLAT', 'RA / DEC') | Check if RA / DEC matches GLON / GLAT. | gammapy/data/event_list.py | _check_coordinates_galactic | joleroi/gammapy | 0 | python | def _check_coordinates_galactic(self):
event_list = self.dset.event_list
for colname in ['RA', 'DEC', 'GLON', 'GLAT']:
if (colname not in event_list.colnames):
self.logger.info('Skipping Galactic coordinate check. Missing column: "{}".'.format(colname))
return True
radec = event_list.radec
galactic = event_list.galactic
separation = radec.separation(galactic).to('arcsec')
return self._check_separation(separation, 'GLON / GLAT', 'RA / DEC') | def _check_coordinates_galactic(self):
event_list = self.dset.event_list
for colname in ['RA', 'DEC', 'GLON', 'GLAT']:
if (colname not in event_list.colnames):
self.logger.info('Skipping Galactic coordinate check. Missing column: "{}".'.format(colname))
return True
radec = event_list.radec
galactic = event_list.galactic
separation = radec.separation(galactic).to('arcsec')
return self._check_separation(separation, 'GLON / GLAT', 'RA / DEC')<|docstring|>Check if RA / DEC matches GLON / GLAT.<|endoftext|> |
eb22345cb45098fa30cc0f70ecb576378e8183f704de7bfe7196e43c20805f4e | def _check_coordinates_altaz(self):
'Check if ALT / AZ matches RA / DEC.'
event_list = self.dset.event_list
telescope_array = self.dset.telescope_array
for colname in ['RA', 'DEC', 'AZ', 'ALT']:
if (colname not in event_list.colnames):
self.logger.info('Skipping AltAz coordinate check. Missing column: "{}".'.format(colname))
return True
radec = event_list.radec
altaz_expected = event_list.altaz
altaz_actual = radec.transform_to(altaz_expected)
separation = altaz_actual.separation(altaz_expected).to('arcsec')
return self._check_separation(separation, 'ALT / AZ', 'RA / DEC') | Check if ALT / AZ matches RA / DEC. | gammapy/data/event_list.py | _check_coordinates_altaz | joleroi/gammapy | 0 | python | def _check_coordinates_altaz(self):
event_list = self.dset.event_list
telescope_array = self.dset.telescope_array
for colname in ['RA', 'DEC', 'AZ', 'ALT']:
if (colname not in event_list.colnames):
self.logger.info('Skipping AltAz coordinate check. Missing column: "{}".'.format(colname))
return True
radec = event_list.radec
altaz_expected = event_list.altaz
altaz_actual = radec.transform_to(altaz_expected)
separation = altaz_actual.separation(altaz_expected).to('arcsec')
return self._check_separation(separation, 'ALT / AZ', 'RA / DEC') | def _check_coordinates_altaz(self):
event_list = self.dset.event_list
telescope_array = self.dset.telescope_array
for colname in ['RA', 'DEC', 'AZ', 'ALT']:
if (colname not in event_list.colnames):
self.logger.info('Skipping AltAz coordinate check. Missing column: "{}".'.format(colname))
return True
radec = event_list.radec
altaz_expected = event_list.altaz
altaz_actual = radec.transform_to(altaz_expected)
separation = altaz_actual.separation(altaz_expected).to('arcsec')
return self._check_separation(separation, 'ALT / AZ', 'RA / DEC')<|docstring|>Check if ALT / AZ matches RA / DEC.<|endoftext|> |
636597df5a284e2b8561b559955bcb28b809f048d15df1da03b0cab184a455e5 | def _check_coordinates_field_of_view(self):
'Check if DETX / DETY matches ALT / AZ'
return True | Check if DETX / DETY matches ALT / AZ | gammapy/data/event_list.py | _check_coordinates_field_of_view | joleroi/gammapy | 0 | python | def _check_coordinates_field_of_view(self):
return True | def _check_coordinates_field_of_view(self):
return True<|docstring|>Check if DETX / DETY matches ALT / AZ<|endoftext|> |
4fa903129920765bb7b15de57237c301db3a0137e4b96b2e21b1fa31ec9feee4 | @classmethod
def make(clazz, root_dir, revision, setup_filename, untracked=False, debug=False):
'Make an egg from a git root_dir. setup_filename is relative to that root'
git.check_is_repo(root_dir)
base_name = path.basename(root_dir)
tmp_archive_filename = temp_file.make_temp_file(delete=(not debug), prefix=('%s.egg.' % base_name), suffix='.tar.gz')
if debug:
print(('tmp_archive_filename: %s' % tmp_archive_filename))
git.archive(root_dir, revision, base_name, tmp_archive_filename, untracked=untracked)
tmp_extract_dir = temp_file.make_temp_dir(delete=(not debug))
if debug:
print(('tmp_extract_dir: %s' % tmp_extract_dir))
archiver.extract_all(tmp_archive_filename, tmp_extract_dir, strip_common_ancestor=True)
cmd = ['python', setup_filename, 'bdist_egg']
env = copy.deepcopy(os.environ)
env['PYTHONDONTWRITEBYTECODE'] = '1'
execute.execute(cmd, shell=False, cwd=tmp_extract_dir, env=env, non_blocking=debug)
eggs = glob.glob(('%s/dist/*.egg' % tmp_extract_dir))
if (len(eggs) == 0):
raise RuntimeError(('no egg got laid: %s - %s' % (root_dir, setup_filename)))
if (len(eggs) > 1):
raise RuntimeError(('too many eggs got laid (probably downloaded requirements): %s - %s' % (root_dir, setup_filename)))
return eggs[0] | Make an egg from a git root_dir. setup_filename is relative to that root | lib/bes/egg/egg.py | make | reconstruir/bes | 0 | python | @classmethod
def make(clazz, root_dir, revision, setup_filename, untracked=False, debug=False):
git.check_is_repo(root_dir)
base_name = path.basename(root_dir)
tmp_archive_filename = temp_file.make_temp_file(delete=(not debug), prefix=('%s.egg.' % base_name), suffix='.tar.gz')
if debug:
print(('tmp_archive_filename: %s' % tmp_archive_filename))
git.archive(root_dir, revision, base_name, tmp_archive_filename, untracked=untracked)
tmp_extract_dir = temp_file.make_temp_dir(delete=(not debug))
if debug:
print(('tmp_extract_dir: %s' % tmp_extract_dir))
archiver.extract_all(tmp_archive_filename, tmp_extract_dir, strip_common_ancestor=True)
cmd = ['python', setup_filename, 'bdist_egg']
env = copy.deepcopy(os.environ)
env['PYTHONDONTWRITEBYTECODE'] = '1'
execute.execute(cmd, shell=False, cwd=tmp_extract_dir, env=env, non_blocking=debug)
eggs = glob.glob(('%s/dist/*.egg' % tmp_extract_dir))
if (len(eggs) == 0):
raise RuntimeError(('no egg got laid: %s - %s' % (root_dir, setup_filename)))
if (len(eggs) > 1):
raise RuntimeError(('too many eggs got laid (probably downloaded requirements): %s - %s' % (root_dir, setup_filename)))
return eggs[0] | @classmethod
def make(clazz, root_dir, revision, setup_filename, untracked=False, debug=False):
git.check_is_repo(root_dir)
base_name = path.basename(root_dir)
tmp_archive_filename = temp_file.make_temp_file(delete=(not debug), prefix=('%s.egg.' % base_name), suffix='.tar.gz')
if debug:
print(('tmp_archive_filename: %s' % tmp_archive_filename))
git.archive(root_dir, revision, base_name, tmp_archive_filename, untracked=untracked)
tmp_extract_dir = temp_file.make_temp_dir(delete=(not debug))
if debug:
print(('tmp_extract_dir: %s' % tmp_extract_dir))
archiver.extract_all(tmp_archive_filename, tmp_extract_dir, strip_common_ancestor=True)
cmd = ['python', setup_filename, 'bdist_egg']
env = copy.deepcopy(os.environ)
env['PYTHONDONTWRITEBYTECODE'] = '1'
execute.execute(cmd, shell=False, cwd=tmp_extract_dir, env=env, non_blocking=debug)
eggs = glob.glob(('%s/dist/*.egg' % tmp_extract_dir))
if (len(eggs) == 0):
raise RuntimeError(('no egg got laid: %s - %s' % (root_dir, setup_filename)))
if (len(eggs) > 1):
raise RuntimeError(('too many eggs got laid (probably downloaded requirements): %s - %s' % (root_dir, setup_filename)))
return eggs[0]<|docstring|>Make an egg from a git root_dir. setup_filename is relative to that root<|endoftext|> |
7388048a806cc98dd9b09aa8f64ee2475e7ff0a440fb31219ad6d6f1f8e8b479 | def get_char(prompt='', valid=None, echo=True, newline=True):
'reads a single character'
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
while True:
sys.stdout.write(prompt)
sys.stdout.flush()
tty.setraw(fd)
char = sys.stdin.read(1)
if (char == '\x03'):
sys.exit(1)
if echo:
sys.stdout.write(char)
sys.stdout.flush()
if ((valid is None) or (char in valid)):
return char
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
if newline:
sys.stdout.write('\n')
sys.stdout.flush() | reads a single character | tools/memtool.py | get_char | unhold/game-and-watch-patch | 48 | python | def get_char(prompt=, valid=None, echo=True, newline=True):
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
while True:
sys.stdout.write(prompt)
sys.stdout.flush()
tty.setraw(fd)
char = sys.stdin.read(1)
if (char == '\x03'):
sys.exit(1)
if echo:
sys.stdout.write(char)
sys.stdout.flush()
if ((valid is None) or (char in valid)):
return char
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
if newline:
sys.stdout.write('\n')
sys.stdout.flush() | def get_char(prompt=, valid=None, echo=True, newline=True):
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
while True:
sys.stdout.write(prompt)
sys.stdout.flush()
tty.setraw(fd)
char = sys.stdin.read(1)
if (char == '\x03'):
sys.exit(1)
if echo:
sys.stdout.write(char)
sys.stdout.flush()
if ((valid is None) or (char in valid)):
return char
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
if newline:
sys.stdout.write('\n')
sys.stdout.flush()<|docstring|>reads a single character<|endoftext|> |
3c29af1b79d47b3186be9236763628beb9cf3de9001ccb397841c1425b59f23e | def zero_runs(a):
'\n Source: https://stackoverflow.com/a/24892274\n '
iszero = np.concatenate(([0], np.equal(a, 0).view(np.int8), [0]))
absdiff = np.abs(np.diff(iszero))
ranges = np.where((absdiff == 1))[0].reshape((- 1), 2)
return ranges | Source: https://stackoverflow.com/a/24892274 | tools/memtool.py | zero_runs | unhold/game-and-watch-patch | 48 | python | def zero_runs(a):
'\n \n '
iszero = np.concatenate(([0], np.equal(a, 0).view(np.int8), [0]))
absdiff = np.abs(np.diff(iszero))
ranges = np.where((absdiff == 1))[0].reshape((- 1), 2)
return ranges | def zero_runs(a):
'\n \n '
iszero = np.concatenate(([0], np.equal(a, 0).view(np.int8), [0]))
absdiff = np.abs(np.diff(iszero))
ranges = np.where((absdiff == 1))[0].reshape((- 1), 2)
return ranges<|docstring|>Source: https://stackoverflow.com/a/24892274<|endoftext|> |
4ed6cd5555a18a5eebaec75dc32cf029c9707cd3cc2af1ffda9e3958c47b992e | def flow(self, argv):
'This command is not yet in a minimum working state.'
parser = argparse.ArgumentParser(description='Capture PC data from device.')
parser.add_argument('cmd_start', type=str, help='GDB command ')
parser.add_argument('cmd_end', type=str, help='GDB command ')
args = parser.parse_args(argv)
def gdb_parser(cmd):
if isinstance(cmd, str):
cmd = cmd.split(' ')
if (cmd[0] == 'break'):
self.target.set_breakpoint(auto_int(cmd[1]))
elif (cmd[0] == 'rwatch'):
self.target.set_watchpoint(auto_int(cmd[1]), 1, Target.WatchpointType.READ)
elif (cmd[0] == 'watch'):
self.target.set_watchpoint(auto_int(cmd[1]), 1, Target.WatchpointType.WRITE)
elif (cmd[0] == 'awatch'):
self.target.set_watchpoint(auto_int(cmd[1]), 1, Target.WatchpointType.READ_WRITE)
else:
raise ValueError(f'Unknown gdb command {cmd[0]}')
self.target.halt()
gdb_parser(args.cmd_start)
self.target.resume()
print('Waiting until start condition is met')
self._wait_until_halt()
print('Start condition is met!')
gdb_parser(args.cmd_end)
for _ in tqdm(inf_generator()):
self.target.step()
if (self.target.get_halt_reason() != Target.HaltReason.DEBUG):
break | This command is not yet in a minimum working state. | tools/memtool.py | flow | unhold/game-and-watch-patch | 48 | python | def flow(self, argv):
parser = argparse.ArgumentParser(description='Capture PC data from device.')
parser.add_argument('cmd_start', type=str, help='GDB command ')
parser.add_argument('cmd_end', type=str, help='GDB command ')
args = parser.parse_args(argv)
def gdb_parser(cmd):
if isinstance(cmd, str):
cmd = cmd.split(' ')
if (cmd[0] == 'break'):
self.target.set_breakpoint(auto_int(cmd[1]))
elif (cmd[0] == 'rwatch'):
self.target.set_watchpoint(auto_int(cmd[1]), 1, Target.WatchpointType.READ)
elif (cmd[0] == 'watch'):
self.target.set_watchpoint(auto_int(cmd[1]), 1, Target.WatchpointType.WRITE)
elif (cmd[0] == 'awatch'):
self.target.set_watchpoint(auto_int(cmd[1]), 1, Target.WatchpointType.READ_WRITE)
else:
raise ValueError(f'Unknown gdb command {cmd[0]}')
self.target.halt()
gdb_parser(args.cmd_start)
self.target.resume()
print('Waiting until start condition is met')
self._wait_until_halt()
print('Start condition is met!')
gdb_parser(args.cmd_end)
for _ in tqdm(inf_generator()):
self.target.step()
if (self.target.get_halt_reason() != Target.HaltReason.DEBUG):
break | def flow(self, argv):
parser = argparse.ArgumentParser(description='Capture PC data from device.')
parser.add_argument('cmd_start', type=str, help='GDB command ')
parser.add_argument('cmd_end', type=str, help='GDB command ')
args = parser.parse_args(argv)
def gdb_parser(cmd):
if isinstance(cmd, str):
cmd = cmd.split(' ')
if (cmd[0] == 'break'):
self.target.set_breakpoint(auto_int(cmd[1]))
elif (cmd[0] == 'rwatch'):
self.target.set_watchpoint(auto_int(cmd[1]), 1, Target.WatchpointType.READ)
elif (cmd[0] == 'watch'):
self.target.set_watchpoint(auto_int(cmd[1]), 1, Target.WatchpointType.WRITE)
elif (cmd[0] == 'awatch'):
self.target.set_watchpoint(auto_int(cmd[1]), 1, Target.WatchpointType.READ_WRITE)
else:
raise ValueError(f'Unknown gdb command {cmd[0]}')
self.target.halt()
gdb_parser(args.cmd_start)
self.target.resume()
print('Waiting until start condition is met')
self._wait_until_halt()
print('Start condition is met!')
gdb_parser(args.cmd_end)
for _ in tqdm(inf_generator()):
self.target.step()
if (self.target.get_halt_reason() != Target.HaltReason.DEBUG):
break<|docstring|>This command is not yet in a minimum working state.<|endoftext|> |
e2a44eaf069f0f30237399025e1a8ab0eea7b862e1dc8f6a4f552e7b6c7305e7 | def run_same_policy():
'Use the same policy for both agents (trivial case).'
tune.run('PG', config={'env': RockPaperScissorsEnv}) | Use the same policy for both agents (trivial case). | rllib/examples/rock_paper_scissors_multiagent.py | run_same_policy | jorenretel/ray | 1 | python | def run_same_policy():
tune.run('PG', config={'env': RockPaperScissorsEnv}) | def run_same_policy():
tune.run('PG', config={'env': RockPaperScissorsEnv})<|docstring|>Use the same policy for both agents (trivial case).<|endoftext|> |
f1b0f3652773433d8711e3cafeffe51c8fff9c82c44e470502fe0557974dc0cc | def run_heuristic_vs_learned(use_lstm=False, trainer='PG'):
'Run heuristic policies vs a learned agent.\n\n The learned agent should eventually reach a reward of ~5 with\n use_lstm=False, and ~7 with use_lstm=True. The reason the LSTM policy\n can perform better is since it can distinguish between the always_same vs\n beat_last heuristics.\n '
def select_policy(agent_id):
if (agent_id == 'player1'):
return 'learned'
else:
return random.choice(['always_same', 'beat_last'])
args = parser.parse_args()
tune.run(trainer, stop={'timesteps_total': args.stop}, config={'env': RockPaperScissorsEnv, 'gamma': 0.9, 'num_workers': 0, 'num_envs_per_worker': 4, 'sample_batch_size': 10, 'train_batch_size': 200, 'multiagent': {'policies_to_train': ['learned'], 'policies': {'always_same': (AlwaysSameHeuristic, Discrete(3), Discrete(3), {}), 'beat_last': (BeatLastHeuristic, Discrete(3), Discrete(3), {}), 'learned': (None, Discrete(3), Discrete(3), {'model': {'use_lstm': use_lstm}})}, 'policy_mapping_fn': select_policy}}) | Run heuristic policies vs a learned agent.
The learned agent should eventually reach a reward of ~5 with
use_lstm=False, and ~7 with use_lstm=True. The reason the LSTM policy
can perform better is since it can distinguish between the always_same vs
beat_last heuristics. | rllib/examples/rock_paper_scissors_multiagent.py | run_heuristic_vs_learned | jorenretel/ray | 1 | python | def run_heuristic_vs_learned(use_lstm=False, trainer='PG'):
'Run heuristic policies vs a learned agent.\n\n The learned agent should eventually reach a reward of ~5 with\n use_lstm=False, and ~7 with use_lstm=True. The reason the LSTM policy\n can perform better is since it can distinguish between the always_same vs\n beat_last heuristics.\n '
def select_policy(agent_id):
if (agent_id == 'player1'):
return 'learned'
else:
return random.choice(['always_same', 'beat_last'])
args = parser.parse_args()
tune.run(trainer, stop={'timesteps_total': args.stop}, config={'env': RockPaperScissorsEnv, 'gamma': 0.9, 'num_workers': 0, 'num_envs_per_worker': 4, 'sample_batch_size': 10, 'train_batch_size': 200, 'multiagent': {'policies_to_train': ['learned'], 'policies': {'always_same': (AlwaysSameHeuristic, Discrete(3), Discrete(3), {}), 'beat_last': (BeatLastHeuristic, Discrete(3), Discrete(3), {}), 'learned': (None, Discrete(3), Discrete(3), {'model': {'use_lstm': use_lstm}})}, 'policy_mapping_fn': select_policy}}) | def run_heuristic_vs_learned(use_lstm=False, trainer='PG'):
'Run heuristic policies vs a learned agent.\n\n The learned agent should eventually reach a reward of ~5 with\n use_lstm=False, and ~7 with use_lstm=True. The reason the LSTM policy\n can perform better is since it can distinguish between the always_same vs\n beat_last heuristics.\n '
def select_policy(agent_id):
if (agent_id == 'player1'):
return 'learned'
else:
return random.choice(['always_same', 'beat_last'])
args = parser.parse_args()
tune.run(trainer, stop={'timesteps_total': args.stop}, config={'env': RockPaperScissorsEnv, 'gamma': 0.9, 'num_workers': 0, 'num_envs_per_worker': 4, 'sample_batch_size': 10, 'train_batch_size': 200, 'multiagent': {'policies_to_train': ['learned'], 'policies': {'always_same': (AlwaysSameHeuristic, Discrete(3), Discrete(3), {}), 'beat_last': (BeatLastHeuristic, Discrete(3), Discrete(3), {}), 'learned': (None, Discrete(3), Discrete(3), {'model': {'use_lstm': use_lstm}})}, 'policy_mapping_fn': select_policy}})<|docstring|>Run heuristic policies vs a learned agent.
The learned agent should eventually reach a reward of ~5 with
use_lstm=False, and ~7 with use_lstm=True. The reason the LSTM policy
can perform better is since it can distinguish between the always_same vs
beat_last heuristics.<|endoftext|> |
d483047d5de30a5270f386a6ba90fb0456ef7266e9e0bd0c670ca4fef7ebd28b | def run_with_custom_entropy_loss():
'Example of customizing the loss function of an existing policy.\n\n This performs about the same as the default loss does.'
def entropy_policy_gradient_loss(policy, model, dist_class, train_batch):
(logits, _) = model.from_batch(train_batch)
action_dist = dist_class(logits, model)
return (((- 0.1) * action_dist.entropy()) - tf.reduce_mean((action_dist.logp(train_batch['actions']) * train_batch['advantages'])))
EntropyPolicy = PGTFPolicy.with_updates(loss_fn=entropy_policy_gradient_loss)
EntropyLossPG = PGTrainer.with_updates(name='EntropyPG', get_policy_class=(lambda _: EntropyPolicy))
run_heuristic_vs_learned(use_lstm=True, trainer=EntropyLossPG) | Example of customizing the loss function of an existing policy.
This performs about the same as the default loss does. | rllib/examples/rock_paper_scissors_multiagent.py | run_with_custom_entropy_loss | jorenretel/ray | 1 | python | def run_with_custom_entropy_loss():
'Example of customizing the loss function of an existing policy.\n\n This performs about the same as the default loss does.'
def entropy_policy_gradient_loss(policy, model, dist_class, train_batch):
(logits, _) = model.from_batch(train_batch)
action_dist = dist_class(logits, model)
return (((- 0.1) * action_dist.entropy()) - tf.reduce_mean((action_dist.logp(train_batch['actions']) * train_batch['advantages'])))
EntropyPolicy = PGTFPolicy.with_updates(loss_fn=entropy_policy_gradient_loss)
EntropyLossPG = PGTrainer.with_updates(name='EntropyPG', get_policy_class=(lambda _: EntropyPolicy))
run_heuristic_vs_learned(use_lstm=True, trainer=EntropyLossPG) | def run_with_custom_entropy_loss():
'Example of customizing the loss function of an existing policy.\n\n This performs about the same as the default loss does.'
def entropy_policy_gradient_loss(policy, model, dist_class, train_batch):
(logits, _) = model.from_batch(train_batch)
action_dist = dist_class(logits, model)
return (((- 0.1) * action_dist.entropy()) - tf.reduce_mean((action_dist.logp(train_batch['actions']) * train_batch['advantages'])))
EntropyPolicy = PGTFPolicy.with_updates(loss_fn=entropy_policy_gradient_loss)
EntropyLossPG = PGTrainer.with_updates(name='EntropyPG', get_policy_class=(lambda _: EntropyPolicy))
run_heuristic_vs_learned(use_lstm=True, trainer=EntropyLossPG)<|docstring|>Example of customizing the loss function of an existing policy.
This performs about the same as the default loss does.<|endoftext|> |
a8730e014182971268c29c49b8e31314cecefd4bdc06d27270c9279bf4f4db9a | def prepare_data(self):
'Called to initialize data. Use the call to construct features'
args = self.hparams
processor = processors[args.task]()
self.labels = processor.get_labels()
for mode in ['train', 'dev']:
cached_features_file = self._feature_file(mode)
if ((not os.path.exists(cached_features_file)) and (not args.overwrite_cache)):
logger.info('Creating features from dataset file at %s', args.data_dir)
examples = (processor.get_dev_examples(args.data_dir) if (mode == 'dev') else processor.get_train_examples(args.data_dir))
features = convert_examples_to_features(examples, self.tokenizer, max_length=args.max_seq_length, label_list=self.labels, output_mode=args.glue_output_mode)
logger.info('Saving features into cached file %s', cached_features_file)
torch.save(features, cached_features_file) | Called to initialize data. Use the call to construct features | methods/T5/transformer_local/examples/glue/run_pl_glue.py | prepare_data | Guaguago/CommonGen | 480 | python | def prepare_data(self):
args = self.hparams
processor = processors[args.task]()
self.labels = processor.get_labels()
for mode in ['train', 'dev']:
cached_features_file = self._feature_file(mode)
if ((not os.path.exists(cached_features_file)) and (not args.overwrite_cache)):
logger.info('Creating features from dataset file at %s', args.data_dir)
examples = (processor.get_dev_examples(args.data_dir) if (mode == 'dev') else processor.get_train_examples(args.data_dir))
features = convert_examples_to_features(examples, self.tokenizer, max_length=args.max_seq_length, label_list=self.labels, output_mode=args.glue_output_mode)
logger.info('Saving features into cached file %s', cached_features_file)
torch.save(features, cached_features_file) | def prepare_data(self):
args = self.hparams
processor = processors[args.task]()
self.labels = processor.get_labels()
for mode in ['train', 'dev']:
cached_features_file = self._feature_file(mode)
if ((not os.path.exists(cached_features_file)) and (not args.overwrite_cache)):
logger.info('Creating features from dataset file at %s', args.data_dir)
examples = (processor.get_dev_examples(args.data_dir) if (mode == 'dev') else processor.get_train_examples(args.data_dir))
features = convert_examples_to_features(examples, self.tokenizer, max_length=args.max_seq_length, label_list=self.labels, output_mode=args.glue_output_mode)
logger.info('Saving features into cached file %s', cached_features_file)
torch.save(features, cached_features_file)<|docstring|>Called to initialize data. Use the call to construct features<|endoftext|> |
299f1b1dd4cc9808f5fbc794175cd748c07aa1a7c4be43549797580fd965193e | def load_dataset(self, mode, batch_size):
'Load datasets. Called after prepare data.'
mode = ('dev' if (mode == 'test') else mode)
cached_features_file = self._feature_file(mode)
logger.info('Loading features from cached file %s', cached_features_file)
features = torch.load(cached_features_file)
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
if (self.hparams.glue_output_mode == 'classification'):
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
elif (self.hparams.glue_output_mode == 'regression'):
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
return DataLoader(TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels), batch_size=batch_size, shuffle=True) | Load datasets. Called after prepare data. | methods/T5/transformer_local/examples/glue/run_pl_glue.py | load_dataset | Guaguago/CommonGen | 480 | python | def load_dataset(self, mode, batch_size):
mode = ('dev' if (mode == 'test') else mode)
cached_features_file = self._feature_file(mode)
logger.info('Loading features from cached file %s', cached_features_file)
features = torch.load(cached_features_file)
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
if (self.hparams.glue_output_mode == 'classification'):
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
elif (self.hparams.glue_output_mode == 'regression'):
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
return DataLoader(TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels), batch_size=batch_size, shuffle=True) | def load_dataset(self, mode, batch_size):
mode = ('dev' if (mode == 'test') else mode)
cached_features_file = self._feature_file(mode)
logger.info('Loading features from cached file %s', cached_features_file)
features = torch.load(cached_features_file)
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
if (self.hparams.glue_output_mode == 'classification'):
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
elif (self.hparams.glue_output_mode == 'regression'):
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
return DataLoader(TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels), batch_size=batch_size, shuffle=True)<|docstring|>Load datasets. Called after prepare data.<|endoftext|> |
bc0f289a82360828122f1962d2cc12a2efdcad86b8a824ec02fe60c036a44573 | def __init__(self, mol: Chem.Mol, atoms_in_bridge_cutoff: int=2, valence_correction: str='element', debug: bool=False):
'\n Instantiates but does not call ``fix`` (or its specific methods).\n\n :param mol: Does not get edited. ``.mol`` does (but is a``Chem.RWMol``, so use ``mol.GetMol()``)\n # how many bridge atoms can be deleted? (0 = preserves norbornane, 1 = preserves monsterantane)\n\n :param valence_correction:\n :param debug:\n '
self.debug = bool(debug)
if (valence_correction not in ('charge', 'element')):
raise ValueError(f'valence_correction "{valence_correction} id not charge/element')
self.valence_correction = str(valence_correction)
self.atoms_in_bridge_cutoff = int(atoms_in_bridge_cutoff)
self.rwmol = Chem.RWMol(mol)
self.modifications = []
self._valence_mode = 'max'
self._iterations_done = 0
self._subiterations_done = 0 | Instantiates but does not call ``fix`` (or its specific methods).
:param mol: Does not get edited. ``.mol`` does (but is a``Chem.RWMol``, so use ``mol.GetMol()``)
# how many bridge atoms can be deleted? (0 = preserves norbornane, 1 = preserves monsterantane)
:param valence_correction:
:param debug: | molecular_rectifier/_base.py | __init__ | matteoferla/molecular_rectifier | 16 | python | def __init__(self, mol: Chem.Mol, atoms_in_bridge_cutoff: int=2, valence_correction: str='element', debug: bool=False):
'\n Instantiates but does not call ``fix`` (or its specific methods).\n\n :param mol: Does not get edited. ``.mol`` does (but is a``Chem.RWMol``, so use ``mol.GetMol()``)\n # how many bridge atoms can be deleted? (0 = preserves norbornane, 1 = preserves monsterantane)\n\n :param valence_correction:\n :param debug:\n '
self.debug = bool(debug)
if (valence_correction not in ('charge', 'element')):
raise ValueError(f'valence_correction "{valence_correction} id not charge/element')
self.valence_correction = str(valence_correction)
self.atoms_in_bridge_cutoff = int(atoms_in_bridge_cutoff)
self.rwmol = Chem.RWMol(mol)
self.modifications = []
self._valence_mode = 'max'
self._iterations_done = 0
self._subiterations_done = 0 | def __init__(self, mol: Chem.Mol, atoms_in_bridge_cutoff: int=2, valence_correction: str='element', debug: bool=False):
'\n Instantiates but does not call ``fix`` (or its specific methods).\n\n :param mol: Does not get edited. ``.mol`` does (but is a``Chem.RWMol``, so use ``mol.GetMol()``)\n # how many bridge atoms can be deleted? (0 = preserves norbornane, 1 = preserves monsterantane)\n\n :param valence_correction:\n :param debug:\n '
self.debug = bool(debug)
if (valence_correction not in ('charge', 'element')):
raise ValueError(f'valence_correction "{valence_correction} id not charge/element')
self.valence_correction = str(valence_correction)
self.atoms_in_bridge_cutoff = int(atoms_in_bridge_cutoff)
self.rwmol = Chem.RWMol(mol)
self.modifications = []
self._valence_mode = 'max'
self._iterations_done = 0
self._subiterations_done = 0<|docstring|>Instantiates but does not call ``fix`` (or its specific methods).
:param mol: Does not get edited. ``.mol`` does (but is a``Chem.RWMol``, so use ``mol.GetMol()``)
# how many bridge atoms can be deleted? (0 = preserves norbornane, 1 = preserves monsterantane)
:param valence_correction:
:param debug:<|endoftext|> |
a1efb163d94a99cd88ef9aab6406b7ef646fadd6bcea7bf3cff624717b2c442c | def _get_ring_info(self, mode='atom') -> Tuple[Tuple[int]]:
'\n you cannot get ring info on an unsanitized mol. Ironically I need ring info for sanitization\n\n :param mode: bond|atom\n :return: same as mol.GetRingInfo().AtomRings() or .BondRings()\n '
mol2 = Chem.Mol(self.mol)
for bond in mol2.GetBonds():
bond.SetBondType(Chem.BondType.UNSPECIFIED)
for atom in mol2.GetAtoms():
atom.SetIsAromatic(False)
atom.SetAtomicNum(0)
Chem.SanitizeMol(mol2)
if (mode == 'atom'):
return mol2.GetRingInfo().AtomRings()
elif (mode == 'bond'):
return mol2.GetRingInfo().BondRings()
else:
raise ValueError(f'Unknown mode {mode}') | you cannot get ring info on an unsanitized mol. Ironically I need ring info for sanitization
:param mode: bond|atom
:return: same as mol.GetRingInfo().AtomRings() or .BondRings() | molecular_rectifier/_base.py | _get_ring_info | matteoferla/molecular_rectifier | 16 | python | def _get_ring_info(self, mode='atom') -> Tuple[Tuple[int]]:
'\n you cannot get ring info on an unsanitized mol. Ironically I need ring info for sanitization\n\n :param mode: bond|atom\n :return: same as mol.GetRingInfo().AtomRings() or .BondRings()\n '
mol2 = Chem.Mol(self.mol)
for bond in mol2.GetBonds():
bond.SetBondType(Chem.BondType.UNSPECIFIED)
for atom in mol2.GetAtoms():
atom.SetIsAromatic(False)
atom.SetAtomicNum(0)
Chem.SanitizeMol(mol2)
if (mode == 'atom'):
return mol2.GetRingInfo().AtomRings()
elif (mode == 'bond'):
return mol2.GetRingInfo().BondRings()
else:
raise ValueError(f'Unknown mode {mode}') | def _get_ring_info(self, mode='atom') -> Tuple[Tuple[int]]:
'\n you cannot get ring info on an unsanitized mol. Ironically I need ring info for sanitization\n\n :param mode: bond|atom\n :return: same as mol.GetRingInfo().AtomRings() or .BondRings()\n '
mol2 = Chem.Mol(self.mol)
for bond in mol2.GetBonds():
bond.SetBondType(Chem.BondType.UNSPECIFIED)
for atom in mol2.GetAtoms():
atom.SetIsAromatic(False)
atom.SetAtomicNum(0)
Chem.SanitizeMol(mol2)
if (mode == 'atom'):
return mol2.GetRingInfo().AtomRings()
elif (mode == 'bond'):
return mol2.GetRingInfo().BondRings()
else:
raise ValueError(f'Unknown mode {mode}')<|docstring|>you cannot get ring info on an unsanitized mol. Ironically I need ring info for sanitization
:param mode: bond|atom
:return: same as mol.GetRingInfo().AtomRings() or .BondRings()<|endoftext|> |
1169001a4322ee59e19cf97f34f7b7bd52db535b9c673d3e02fd5874368030bd | def task(*, name, waiter=None, exception_handler=None):
'Returns a decorator that creates a `Task` with the given options.'
def decorator(func):
return Task(name, func, waiter, exception_handler, instance=None)
return decorator | Returns a decorator that creates a `Task` with the given options. | tle/util/tasks.py | task | tle-alt/TLE | 367 | python | def task(*, name, waiter=None, exception_handler=None):
def decorator(func):
return Task(name, func, waiter, exception_handler, instance=None)
return decorator | def task(*, name, waiter=None, exception_handler=None):
def decorator(func):
return Task(name, func, waiter, exception_handler, instance=None)
return decorator<|docstring|>Returns a decorator that creates a `Task` with the given options.<|endoftext|> |
24dc7d6f8e63c6a4afc2a583d21bfc3bd7409c1f22a563ab31785ba9ebf2c3c1 | def task_spec(*, name, waiter=None, exception_handler=None):
'Returns a decorator that creates a `TaskSpec` descriptor with the given options.'
def decorator(func):
return TaskSpec(name, func, waiter, exception_handler)
return decorator | Returns a decorator that creates a `TaskSpec` descriptor with the given options. | tle/util/tasks.py | task_spec | tle-alt/TLE | 367 | python | def task_spec(*, name, waiter=None, exception_handler=None):
def decorator(func):
return TaskSpec(name, func, waiter, exception_handler)
return decorator | def task_spec(*, name, waiter=None, exception_handler=None):
def decorator(func):
return TaskSpec(name, func, waiter, exception_handler)
return decorator<|docstring|>Returns a decorator that creates a `TaskSpec` descriptor with the given options.<|endoftext|> |
fbb4d34e5a030d8df52b151e18b273917368025fd0b116831807b1ef19ce4d7c | def __init__(self, func, *, run_first=False, needs_instance=False):
"`run_first` denotes whether this waiter should be run before the task's `func` when\n run for the first time. `needs_instance` indicates whether a self argument is required by\n the `func`.\n "
_ensure_coroutine_func(func)
self.func = func
self.run_first = run_first
self.needs_instance = needs_instance | `run_first` denotes whether this waiter should be run before the task's `func` when
run for the first time. `needs_instance` indicates whether a self argument is required by
the `func`. | tle/util/tasks.py | __init__ | tle-alt/TLE | 367 | python | def __init__(self, func, *, run_first=False, needs_instance=False):
"`run_first` denotes whether this waiter should be run before the task's `func` when\n run for the first time. `needs_instance` indicates whether a self argument is required by\n the `func`.\n "
_ensure_coroutine_func(func)
self.func = func
self.run_first = run_first
self.needs_instance = needs_instance | def __init__(self, func, *, run_first=False, needs_instance=False):
"`run_first` denotes whether this waiter should be run before the task's `func` when\n run for the first time. `needs_instance` indicates whether a self argument is required by\n the `func`.\n "
_ensure_coroutine_func(func)
self.func = func
self.run_first = run_first
self.needs_instance = needs_instance<|docstring|>`run_first` denotes whether this waiter should be run before the task's `func` when
run for the first time. `needs_instance` indicates whether a self argument is required by
the `func`.<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.