input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
# Define the common dependencies that contain all the actual
# Chromium functionality. This list gets pulled in below by
# the link of the actual chrome (or chromium) executable on
# Linux or Mac, and into chrome.dll on Windows.
# NOTE: Most new includes should go in the OS!="ios" condition below.
'chromium_browser_dependencies': [
'common',
'browser',
'../sync/sync.gyp:sync',
],
'chromium_child_dependencies': [
'common',
'../sync/sync.gyp:sync',
],
'allocator_target': '../base/allocator/allocator.gyp:allocator',
'grit_out_dir': '<(SHARED_INTERMEDIATE_DIR)/chrome',
'protoc_out_dir': '<(SHARED_INTERMEDIATE_DIR)/protoc_out',
'repack_locales_cmd': ['python', 'tools/build/repack_locales.py'],
'conditions': [
['OS!="ios"', {
'chromium_browser_dependencies': [
'../ppapi/ppapi_internal.gyp:ppapi_host',
],
'chromium_child_dependencies': [
'debugger',
'plugin',
'renderer',
'utility',
'../content/content.gyp:content_gpu',
'../content/content.gyp:content_ppapi_plugin',
'../content/content.gyp:content_worker',
'../third_party/WebKit/public/blink_devtools.gyp:blink_devtools_frontend_resources',
],
}],
['enable_printing!=0', {
'chromium_browser_dependencies': [
'../printing/printing.gyp:printing',
],
}],
['OS=="win"', {
'platform_locale_settings_grd':
'app/resources/locale_settings_win.grd',
},],
['enable_printing==1', {
'chromium_browser_dependencies': [
'service',
],
}],
['OS=="linux"', {
'conditions': [
['chromeos==1', {
'conditions': [
['branding=="Chrome"', {
'platform_locale_settings_grd':
'app/resources/locale_settings_google_chromeos.grd',
}, { # branding!=Chrome
'platform_locale_settings_grd':
'app/resources/locale_settings_chromiumos.grd',
}],
]
}, { # chromeos==0
'platform_locale_settings_grd':
'app/resources/locale_settings_linux.grd',
}],
],
},],
['os_posix == 1 and OS != "mac" and OS != "ios" and OS != "linux"', {
'platform_locale_settings_grd':
'app/resources/locale_settings_linux.grd',
},],
['OS=="mac"', {
'tweak_info_plist_path': '../build/mac/tweak_info_plist.py',
'platform_locale_settings_grd':
'app/resources/locale_settings_mac.grd',
}], # OS=="mac"
], # conditions
}, # variables
'includes': [
# Place some targets in gypi files to reduce contention on this file.
# By using an include, we keep everything in a single xcodeproj file.
# Note on Win64 targets: targets that end with win64 be used
# on 64-bit Windows only. Targets that end with nacl_win64 should be used
# by Native Client only.
# NOTE: Most new includes should go in the OS!="ios" condition below.
'../build/chrome_settings.gypi',
'../build/util/version.gypi',
'../build/win_precompile.gypi',
'chrome_browser.gypi',
'chrome_browser_ui.gypi',
'chrome_common.gypi',
'chrome_installer_util.gypi',
'../components/nacl/nacl_defines.gypi',
],
'conditions': [
['OS!="ios"', {
'includes': [
'chrome_browser_extensions.gypi',
'chrome_dll.gypi',
'chrome_exe.gypi',
'chrome_installer.gypi',
'chrome_renderer.gypi',
'chrome_tests.gypi',
'chrome_tests_unit.gypi',
'policy_templates.gypi',
'../apps/apps.gypi',
],
'targets': [
{
'target_name': 'default_extensions',
'type': 'none',
'conditions': [
['OS=="win"', {
'copies': [
{
'destination': '<(PRODUCT_DIR)/extensions',
'files': [
'browser/extensions/default_extensions/external_extensions.json'
]
}
],
}]
],
},
{
'target_name': 'debugger',
'type': 'static_library',
'variables': { 'enable_wexit_time_destructors': 1, },
'dependencies': [
'chrome_resources.gyp:chrome_extra_resources',
'chrome_resources.gyp:chrome_resources',
'chrome_resources.gyp:chrome_strings',
'chrome_resources.gyp:theme_resources',
'common/extensions/api/api.gyp:chrome_api',
'../base/base.gyp:base',
'../content/content.gyp:content_browser',
'../net/net.gyp:http_server',
'../net/net.gyp:net',
'../skia/skia.gyp:skia',
'../third_party/icu/icu.gyp:icui18n',
'../third_party/icu/icu.gyp:icuuc',
'../third_party/leveldatabase/leveldatabase.gyp:leveldatabase',
'../third_party/libusb/libusb.gyp:libusb',
],
'include_dirs': [
'..',
],
'sources': [
'browser/devtools/adb/android_rsa.cc',
'browser/devtools/adb/android_rsa.h',
'browser/devtools/adb/android_usb_device.cc',
'browser/devtools/adb/android_usb_device.h',
'browser/devtools/adb/android_usb_socket.cc',
'browser/devtools/adb/android_usb_socket.h',
'browser/devtools/adb_client_socket.cc',
'browser/devtools/adb_client_socket.h',
'browser/devtools/adb_web_socket.cc',
'browser/devtools/adb_web_socket.h',
'browser/devtools/android_device.cc',
'browser/devtools/android_device.h',
'browser/devtools/browser_list_tabcontents_provider.cc',
'browser/devtools/browser_list_tabcontents_provider.h',
'browser/devtools/devtools_adb_bridge.cc',
'browser/devtools/devtools_adb_bridge.h',
'browser/devtools/devtools_contents_resizing_strategy.cc',
'browser/devtools/devtools_contents_resizing_strategy.h',
'browser/devtools/devtools_embedder_message_dispatcher.cc',
'browser/devtools/devtools_embedder_message_dispatcher.h',
'browser/devtools/devtools_file_helper.cc',
'browser/devtools/devtools_file_helper.h',
'browser/devtools/devtools_file_system_indexer.cc',
'browser/devtools/devtools_file_system_indexer.h',
'browser/devtools/devtools_protocol.cc',
'browser/devtools/devtools_protocol.h',
'browser/devtools/devtools_target_impl.cc',
'browser/devtools/devtools_target_impl.h',
'browser/devtools/devtools_targets_ui.cc',
'browser/devtools/devtools_targets_ui.h',
'browser/devtools/devtools_toggle_action.cc',
'browser/devtools/devtools_toggle_action.h',
'browser/devtools/devtools_window.cc',
'browser/devtools/devtools_window.h',
'browser/devtools/port_forwarding_controller.cc',
'browser/devtools/port_forwarding_controller.h',
'browser/devtools/refcounted_adb_thread.cc',
'browser/devtools/refcounted_adb_thread.h',
'browser/devtools/remote_debugging_server.cc',
'browser/devtools/remote_debugging_server.h',
],
'conditions': [
['toolkit_uses_gtk == 1', {
'dependencies': [
'../build/linux/system.gyp:gtk',
],
}],
['OS=="android"', {
'dependencies!': [
'../third_party/libusb/libusb.gyp:libusb',
],
'sources!': [
'browser/devtools/adb/android_rsa.cc',
'browser/devtools/browser_list_tabcontents_provider.cc',
'browser/devtools/devtools_file_system_indexer.cc',
'browser/devtools/devtools_target_impl.cc',
'browser/devtools/devtools_window.cc',
'browser/devtools/remote_debugging_server.cc',
],
}],
['debug_devtools==1', {
'defines': [
'DEBUG_DEVTOOLS=1',
],
}],
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [ 4267, ],
},
{
'target_name': 'plugin',
'type': 'static_library',
'variables': { 'enable_wexit_time_destructors': 1, },
'dependencies': [
'chrome_resources.gyp:chrome_strings',
'../base/base.gyp:base',
'../content/content.gyp:content_plugin',
],
'sources': [
'plugin/chrome_content_plugin_client.cc',
'plugin/chrome_content_plugin_client.h',
],
'include_dirs': [
'..',
'<(grit_out_dir)',
],
},
{
'target_name': 'utility',
'type': 'static_library',
'variables': { 'enable_wexit_time_destructors': 1, },
'dependencies': [
'common/extensions/api/api.gyp:chrome_api',
'../base/base.gyp:base',
'../content/content.gyp:content_utility',
'../media/media.gyp:media',
'../skia/skia.gyp:skia',
'../third_party/libxml/libxml.gyp:libxml',
'common',
'<(DEPTH)/chrome/chrome_resources.gyp:chrome_resources',
'<(DEPTH)/chrome/chrome_resources.gyp:chrome_strings',
],
'sources': [
'utility/chrome_content_utility_client.cc',
'utility/chrome_content_utility_client.h',
'utility/chrome_content_utility_ipc_whitelist.cc',
'utility/chrome_content_utility_ipc_whitelist.h',
'utility/cloud_print/bitmap_image.cc',
'utility/cloud_print/bitmap_image.h',
'utility/cloud_print/pwg_encoder.cc',
'utility/cloud_print/pwg_encoder.h',
'utility/extensions/unpacker.cc',
'utility/extensions/unpacker.h',
'utility/image_writer/error_messages.cc',
'utility/image_writer/error_messages.h',
'utility/image_writer/image_writer.cc',
'utility/image_writer/image_writer.h',
'utility/image_writer/image_writer_handler.cc',
'utility/image_writer/image_writer_handler.h',
'utility/importer/bookmark_html_reader.cc',
'utility/importer/bookmark_html_reader.h',
'utility/importer/bookmarks_file_importer.cc',
'utility/importer/bookmarks_file_importer.h',
'utility/importer/external_process_importer_bridge.cc',
'utility/importer/external_process_importer_bridge.h',
'utility/importer/favicon_reencode.cc',
'utility/importer/favicon_reencode.h',
'utility/importer/firefox_importer.cc',
'utility/importer/firefox_importer.h',
'utility/importer/ie_importer_win.cc',
'utility/importer/ie_importer_win.h',
'utility/importer/importer.cc',
'utility/importer/importer.h',
'utility/importer/importer_creator.cc',
'utility/importer/importer_creator.h',
'utility/importer/nss_decryptor.cc',
'utility/importer/nss_decryptor.h',
'utility/importer/nss_decryptor_mac.h',
'utility/importer/nss_decryptor_mac.mm',
'utility/importer/nss_decryptor_win.cc',
'utility/importer/nss_decryptor_win.h',
'utility/importer/safari_importer.h',
'utility/importer/safari_importer.mm',
'utility/media_galleries/ipc_data_source.cc',
'utility/media_galleries/ipc_data_source.h',
'utility/media_galleries/itunes_pref_parser_win.cc',
'utility/media_galleries/itunes_pref_parser_win.h',
'utility/media_galleries/media_metadata_parser.cc',
'utility/media_galleries/media_metadata_parser.h',
'utility/profile_import_handler.cc',
'utility/profile_import_handler.h',
'utility/utility_message_handler.h',
'utility/web_resource_unpacker.cc',
'utility/web_resource_unpacker.h',
],
'include_dirs': [
'..',
'<(grit_out_dir)',
],
'conditions': [
['toolkit_uses_gtk == 1', {
'dependencies': [
'../build/linux/system.gyp:gtk',
],
}],
['OS=="win" or OS=="mac"', {
'sources': [
'utility/media_galleries/iapps_xml_utils.cc',
'utility/media_galleries/iapps_xml_utils.h',
'utility/media_galleries/itunes_library_parser.cc',
'utility/media_galleries/itunes_library_parser.h',
'utility/media_galleries/picasa_album_table_reader.cc',
'utility/media_galleries/picasa_album_table_reader.h',
'utility/media_galleries/picasa_albums_indexer.cc',
'utility/media_galleries/picasa_albums_indexer.h',
'utility/media_galleries/pmp_column_reader.cc',
'utility/media_galleries/pmp_column_reader.h',
],
}],
['OS=="mac"', {
'sources': [
'utility/media_galleries/iphoto_library_parser.cc',
'utility/media_galleries/iphoto_library_parser.h',
],
}],
['use_openssl==1', {
'sources!': [
'utility/importer/nss_decryptor.cc',
]
}],
['OS!="win" and OS!="mac" and use_openssl==0', {
'dependencies': [
'../crypto/crypto.gyp:crypto',
],
'sources': [
'utility/importer/nss_decryptor_system_nss.cc',
'utility/importer/nss_decryptor_system_nss.h',
],
}],
['OS=="android"', {
'sources/': [
['exclude', '^utility/importer/'],
['exclude', '^utility/media_galleries/'],
['exclude', '^utility/profile_import_handler\.cc'],
],
}],
['enable_mdns == 1', {
'sources': [
'utility/local_discovery/service_discovery_client_impl.cc',
'utility/local_discovery/service_discovery_client_impl.h',
'utility/local_discovery/service_discovery_message_handler.cc',
'utility/local_discovery/service_discovery_message_handler.h',
]
}],
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [ 4267, ],
},
],
}], # OS!="ios"
['OS=="mac"', {
'includes': [
'../apps/app_shim/app_shim.gypi',
],
'targets': [
{
'target_name': 'helper_app',
'type': 'executable',
'variables': { 'enable_wexit_time_destructors': 1, },
'product_name': '<(mac_product_name) Helper',
'mac_bundle': 1,
'dependencies': [
'chrome_dll',
'infoplist_strings_tool',
],
'sources': [
# chrome_exe_main_mac.cc's main() is the entry point for
# the "chrome" (browser app) target. All it does is jump
# to chrome_dll's ChromeMain. This is appropriate for
# helper processes too, because the logic to discriminate
# between process types at run time is actually directed
# by the --type command line argument processed by
# ChromeMain. Sharing chrome_exe_main_mac.cc with the
# browser app will suffice for now.
'app/chrome_exe_main_mac.cc',
'app/helper-Info.plist',
],
# TODO(mark): Come up with a fancier way to do this. It should only
# be necessary to list helper-Info.plist once, not the three times it
# is listed here.
'mac_bundle_resources!': [
'app/helper-Info.plist',
],
# TODO(mark): For now, don't put any resources into this app. Its
# resources directory will be a symbolic link to the browser app's
# resources directory.
'mac_bundle_resources/': [
['exclude', '.*'],
],
'xcode_settings': {
'CHROMIUM_BUNDLE_ID': '<(mac_bundle_id)',
'CHROMIUM_SHORT_NAME': '<(branding)',
'CHROMIUM_STRIP_SAVE_FILE': 'app/app.saves',
'INFOPLIST_FILE': 'app/helper-Info.plist',
},
'postbuilds': [
{
# The helper doesn't have real localizations, it just has
# empty .lproj directories, which is enough to convince Cocoa
# that anything running out of the helper .app supports those
# languages.
'postbuild_name': 'Make Empty Localizations',
'variables': {
'locale_dirs': [
'>!@(<(apply_locales_cmd) -d ZZLOCALE.lproj <(locales))',
],
},
'action': [
'tools/build/mac/make_locale_dirs.sh',
'<@(locale_dirs)',
],
},
{
# The framework (chrome_dll) defines its load-time path
# (DYLIB_INSTALL_NAME_BASE) relative to the main executable
# (chrome). A different relative path needs to be used in
# helper_app.
'postbuild_name': 'Fix Framework Link',
'action': [
'install_name_tool',
'-change',
'@executable_path/../Versions/<(version_full)/<(mac_product_name) Framework.framework/<(mac_product_name) Framework',
'@executable_path/../../../<(mac_product_name) Framework.framework/<(mac_product_name) Framework',
'${BUILT_PRODUCTS_DIR}/${EXECUTABLE_PATH}'
],
},
{
# Modify the Info.plist as needed. The script explains why this
# is needed. This is also done in the chrome and chrome_dll
# targets. In this case, --breakpad=0, --keystone=0, and --scm=0
# are used because Breakpad, Keystone, and SCM keys are
# never placed into the helper.
'postbuild_name': 'Tweak Info.plist',
'action': ['<(tweak_info_plist_path)',
'--breakpad=0',
'--keystone=0',
'--scm=0'],
},
{
# Make sure there isn't any Objective-C in the helper app's
# executable.
'postbuild_name': 'Verify No Objective-C',
'action': [
'../build/mac/verify_no_objc.sh',
],
},
],
'conditions': [
['mac_breakpad==1', {
'variables': {
# A real .dSYM is needed for dump_syms to operate on.
'mac_real_dsym': 1,
},
'xcode_settings': {
# With mac_real_dsym set, strip_from_xcode won't be used.
# Specify CHROMIUM_STRIP_SAVE_FILE directly to Xcode.
'STRIPFLAGS': '-s $(CHROMIUM_STRIP_SAVE_FILE)',
},
}],
['asan==1', {
'xcode_settings': {
# Override the outer definition of CHROMIUM_STRIP_SAVE_FILE.
'CHROMIUM_STRIP_SAVE_FILE': 'app/app_asan.saves',
},
}],
['component=="shared_library"', {
'xcode_settings': {
'LD_RUNPATH_SEARCH_PATHS': [
# Get back from Chromium.app/Contents/Versions/V/
# Helper.app/Contents/MacOS
'@loader_path/../../../../../../..',
],
},
}],
],
}, # target helper_app
{
# A library containing the actual code for the app mode app, shared
# by unit tests.
'target_name': 'app_mode_app_support',
'type': 'static_library',
'variables': { 'enable_wexit_time_destructors': 1, },
'product_name': 'app_mode_app_support',
'dependencies': [
'../base/base.gyp:base',
'common_constants.gyp:common_constants',
],
'sources': [
'common/mac/app_mode_chrome_locator.h',
'common/mac/app_mode_chrome_locator.mm',
'common/mac/app_mode_common.h',
'common/mac/app_mode_common.mm',
],
'include_dirs': [
'..',
],
}, # target app_mode_app_support
{
# This produces the template for app mode loader bundles. It's a
# template in the sense that parts of it need to be "filled in" by
# Chrome before it can be executed.
'target_name': 'app_mode_app',
'type': 'executable',
'mac_bundle' : 1,
'variables': { 'enable_wexit_time_destructors': 1, },
'product_name': 'app_mode_loader',
'dependencies': [
'app_mode_app_support',
'infoplist_strings_tool',
],
'sources': [
'app/app_mode_loader_mac.mm',
'app/app_mode-Info.plist',
],
'include_dirs': [
'..',
],
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/CoreFoundation.framework',
'$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
],
},
'mac_bundle_resources!': [
'app/app_mode-Info.plist',
],
'mac_bundle_resources/': [
['exclude', '.*'],
],
'xcode_settings': {
'INFOPLIST_FILE': 'app/app_mode-Info.plist',
'APP_MODE_APP_BUNDLE_ID': '<(mac_bundle_id).app.@APP_MODE_SHORTCUT_ID@',
},
'postbuilds' : [
{
# Modify the Info.plist as needed. The script explains why this
# is needed. This is also done in the chrome and chrome_dll
# targets. In this case, --breakpad=0, --keystone=0, and --scm=0
# are used because Breakpad, Keystone, and SCM keys | |
#!/usr/bin/env python3
'''
Tools for standardized saving/loading a class or dictionary to a .hdf5 file.
Strings are saved as attributes of the file; lists of strings are saved as tab
delimited strings; arrays are saved as datasets. Dicts are saved as a new folder,
with data saved as numpy datasets. Other objects are saved as pickle dumps.
Useage:
* listing objects in an hdf5 file:
f = hdf5manager(mypath)
f.print()
* saving data to file:
f = hdf5manager(mypath)
f.save(mydict)
OR:
f.save(myClass)
* loading data from file:
f = hdf5manager(mypath)
data = f.load()
Authors: <NAME>
Date: 2017-01-13
'''
import h5py
import numpy as np
import os
import pickle
class hdf5manager:
def __init__(self, path, verbose=False, create=True):
assert (path.endswith('.hdf5') | path.endswith('.mat'))
path = os.path.abspath(path)
if not os.path.isfile(path) and create:
# Create the file
print('Creating file at:', path)
f = h5py.File(path, 'w')
f.close()
else:
assert os.path.isfile(path), 'File does not exist'
self.path = path
self.verbose = verbose
if verbose:
self.print()
def print(self):
path = self.path
print()
# If not saving or loading, open the file to read it
if not hasattr(self, 'f'):
print('Opening File to read...')
f = h5py.File(path, 'r')
else:
f = self.f
if len(list(f.keys())) > 0:
print('{0} has the following keys:'.format(path))
for file in f.keys():
print('\t-', file)
else:
print('{0} has no keys.'.format(path))
if len(list(f.attrs)) > 0:
print('{0} has the following attributes:'.format(path))
for attribute in f.attrs:
print('\t-', attribute)
else:
print('{0} has no attributes.'.format(path))
# If not saving or loading, close the file after finished
if not hasattr(self, 'f'):
print('Closing file...')
f.close()
print()
def keys(self):
# If not saving or loading, open the file to read it
if not hasattr(self, 'f'):
f = h5py.File(self.path, 'r')
else:
f = self.f
keys = [key for key in f.attrs]
keys.extend([key for key in f.keys()])
if not hasattr(self, 'f'):
f.close()
return keys
def open(self):
path = self.path
verbose = self.verbose
f = h5py.File(path, 'a')
self.f = f
self.print() # print all variables
if verbose:
print(
'File is now open for manual accessing.\n'
'To access a file handle, assign hdf5manager.f.[key] to a handle'
' and pull slices: \n'
'\t slice = np.array(handle[0,:,1:6])\n'
'It is also possible to write to a file this way\n'
'\t handle[0,:,1:6] = np.zeros(x,y,z)\n')
def close(self):
self.f.close()
del self.f
def load(self, target=None, ignore=None):
path = self.path
verbose = self.verbose
def loadDict(f, key):
# Load dict to key from its folder
if verbose:
print('\t\t-', 'loading', key, 'from file...')
g = f[key]
if verbose:
print('\t\t-', key, 'has the following keys:')
if verbose:
print('\t\t ', ', '.join([gkey for gkey in g.keys()]))
data = {}
if g.keys().__len__() > 0:
for gkey in g.keys():
if type(g[gkey]) is h5py.Group:
data[gkey] = loadDict(g, gkey)
elif type(g[gkey]) is h5py.Dataset:
if verbose:
print('\t\t-', 'loading', key, 'from file...')
data[gkey] = np.array(g[gkey])
else:
if verbose:
print('key was of unknown type', type(gkey))
if verbose:
print('\t\t-', key, 'has the following attributes:')
if verbose:
print('\t\t ', ', '.join([gkey for gkey in g.attrs]))
for gkey in g.attrs:
if verbose:
print('\t\t\t', gkey + ';', type(g.attrs[gkey]).__name__)
if verbose:
print('\t\t\t-', 'loading', gkey, 'from file...')
if type(g.attrs[gkey]) is str:
data[gkey] = g.attrs[gkey]
elif type(g.attrs[gkey] is np.void):
out = g.attrs[gkey]
data[gkey] = pickle.loads(out.tobytes())
else:
print('INVALID TYPE:', type(g.attrs[gkey]))
return data
f = h5py.File(path, 'a') # Open file for access
self.f = f # set to variable so other functions know file is open
if target is None:
if verbose:
print('No target key specified; loading all datasets')
keys = f.keys()
attrs = f.attrs
else:
assert (type(target) is str) or (type(target) is
list), 'invalid target'
if type(target) is str:
target = [target]
keys = []
attrs = []
for item in target:
if (type(item) is str) & (item in f.keys()):
if verbose:
print('Target key found:', item)
keys.append(item)
elif (type(item) is str) & (item in f.attrs):
if verbose:
print('Target attribute found:', item)
attrs.append(item)
else:
print('Target was not valid:', item)
if verbose:
print('\nLoading datasets from hdf5 file:')
data = {}
for key in keys:
if verbose:
print('\t', key + ';', type(f[key]).__name__)
if key == ignore:
if verbose:
print('\t\t- ignoring key:', key)
else:
if type(f[key]) is h5py.Group:
data[key] = loadDict(f, key)
elif type(f[key]) is h5py.Dataset:
if verbose:
print('\t\t-', 'loading', key, 'from file...')
if f[key].dtype.type is np.void:
data[key] = pickle.loads(np.array(f[key]).tobytes())
else:
data[key] = np.array(f[key])
else:
if verbose:
print('\t\t- attribute was unsupported type:',
type(f[key]).__name__)
for key in attrs:
if verbose:
print('\t', key + ';', type(f.attrs[key]).__name__)
if key == ignore:
if verbose:
print('ignoring attribute:', key)
else:
if verbose:
print('\t\t-', 'loading', key, 'from file...')
if type(f.attrs[key]) is str:
data[key] = f.attrs[key]
elif type(f.attrs[key] is np.void):
out = f.attrs[key]
data[key] = pickle.loads(out.tobytes())
if verbose:
print('Keys extracted from file:')
if verbose:
print('\t', ', '.join([key for key in data.keys()]))
if verbose:
print('\n\n')
del self.f
f.close()
if (type(target) is list) and (len(target) == 1):
data = data[target[0]]
return data
def delete(self, target):
if type(target) is not list:
target = [target]
f = h5py.File(self.path, 'a') # Open file for access
self.f = f # set to variable so other functions know file is open
verbose = self.verbose
for key in target:
if key in self.keys():
if verbose:
print('key found:', key)
try:
del f[key]
except:
del f.attrs[key]
else:
if verbose:
print('key not found:', key)
del self.f
f.close()
def save(self, data):
# data is a class file or dict of keys/data
path = self.path
verbose = self.verbose
'''
Saves a class or dict to hdf5 file.
Note that lists of numbers are not supported, only np arrays or
lists of strings.
'''
# Functions to save each type of data:
# --------------------------------------------------------------
def saveDict(f, fdict, key):
# Write dict to key as its own folder
if verbose:
print('\t\t-', 'writing', key, 'to file...')
# Delete if it exists
if key in f:
if verbose:
print('\t\t-', 'Removing', key, 'from file')
del f[key]
g = f.create_group(key)
data_d = fdict
for dkey in fdict:
if (type(fdict[dkey]) is str):
saveString(g, fdict[dkey], dkey)
elif type(fdict[dkey]) is np.ndarray:
saveArray(g, fdict[dkey], dkey)
elif type(fdict[dkey]) is dict:
saveDict(g, fdict[dkey], dkey)
else:
if verbose:
print('\t\t- attribute was unsupported type:',
type(fdict[dkey]).__name__)
if verbose:
print('\t\tAttempting to save pickle dump of object')
try:
saveOther(g, fdict[dkey], dkey)
if verbose:
print('\t\tSaved succesfully!')
except:
if verbose:
print('\t\tFailed..')
if verbose:
print('\t\t-', key, 'has the following keys:')
if verbose:
print('\t\t ', ', '.join([dkey for dkey in g.keys()]))
if verbose:
print('\t\t-', key, 'has the following attributes:')
if verbose:
print('\t\t ', ', '.join([dkey for dkey in g.attrs]))
def saveString(f, string, key):
# Write all strings as attributes of the dataset
if verbose:
print('\t\t-', 'writing', key, 'to file...')
f.attrs[key] = string
def saveArray(f, array, key):
# Check if key exists, and if entry is the same as existing value
if key in f.keys():
if (not np.array_equal(array, f[key])):
if verbose:
print(
'\t\t-', key, 'in saved file is inconsistent '
'with current version')
if verbose:
print('\t\t-', 'deleting', key, 'from file')
del f[key]
if verbose:
print('\t\t-', 'writing', key, 'to file...')
f.create_dataset(key, data=array, chunks=None)
else:
if verbose:
print(
'\t\t-', key, 'in saved file is the same as '
'the current version')
else:
if verbose:
print('\t\t-', 'writing', key, 'to file...')
f.create_dataset(key, data=array, chunks=None)
def saveOther(f, obj, key):
# Compress to bytestring using pickle, save similar to string
# Write all strings as attributes of the dataset
if verbose:
print('\t\t-', 'writing', key, 'to file...')
bstring = np.void(pickle.dumps(obj))
try:
f.attrs[key] = bstring
except RuntimeError:
if verbose:
print('\t\t\tEncountered RuntimeError')
if verbose:
print('\t\t\tSaving pickle dump as data...')
if key in f.keys():
if verbose:
print('Deleting previous copy of', key)
del f[key]
f[key] = bstring
# Check input data type, open file:
# --------------------------------------------------------------
# If data is not a dictionary, assume
if type(data) is not dict:
# Get dictionary of all keys in class type
data = data.__dict__
if verbose:
print('Attributes found in data file:')
for key in data.keys():
print('\t', key, ':', type(data[key]))
f = h5py.File(path, 'a')
self.f = f
if verbose:
self.print()
# Loop through keys and save them in hdf5 file:
# --------------------------------------------------------------
if verbose:
print('\nSaving class attributes:')
for key in data.keys():
if verbose:
print('\t', key + ';', type(data[key]).__name__)
if (type(data[key]) is str):
saveString(f, | |
experiment Ids taken from csv file
kappaEmt = [i.split('.')[0] for i in kappaEmt]
kappaFrame_Cluster_1['Experiment_id'] = kappaEmt
kappaFrame_Cluster_1.set_index('Experiment_id', drop=True, inplace=True)
kappaEmt = [i.split('/')[-1] for i in kappaFrame_Cluster_2.index.values] ### Renaming the experiment Ids taken from csv file
kappaEmt = ['_'.join(i.split(' ')) for i in kappaEmt] ### Renaming the experiment Ids taken from csv file
kappaEmt = ['_'.join(i.split("'")) for i in kappaEmt] ### Renaming the experiment Ids taken from csv file
kappaEmt = ['_'.join(i.split('(')) for i in kappaEmt] ### Renaming the experiment Ids taken from csv file
kappaEmt = ['_'.join(i.split(')')) for i in kappaEmt] ### Renaming the experiment Ids taken from csv file
kappaEmt = ['_'.join(i.split('&')) for i in kappaEmt] ### Renaming the experiment Ids taken from csv file
kappaEmt = [i.split('.')[0] for i in kappaEmt]
kappaFrame_Cluster_2['Experiment_id'] = kappaEmt
kappaFrame_Cluster_2.set_index('Experiment_id', drop=True, inplace=True)
for vidStim in summary_data_frame.index.values:
try:
"kappa", 'stats', "p_val", 'subjects', 'raters'
summary_data_frame.loc[vidStim, 'K_kappa'] = kappaFrame.loc[vidStim, 'kappa']
summary_data_frame.loc[vidStim, 'K_stats'] = kappaFrame.loc[vidStim, 'stats']
summary_data_frame.loc[vidStim, 'K_pVal'] = kappaFrame.loc[vidStim, 'p_val']
summary_data_frame.loc[vidStim, 'K_subjects'] = kappaFrame.loc[vidStim, 'subjects']
summary_data_frame.loc[vidStim, 'K_raters'] = kappaFrame.loc[vidStim, 'raters']
summary_data_frame.loc[vidStim, 'K_kappa1'] = kappaFrame_Cluster_1.loc[vidStim, 'kappa']
summary_data_frame.loc[vidStim, 'K_stats1'] = kappaFrame_Cluster_1.loc[vidStim, 'stats']
summary_data_frame.loc[vidStim, 'K_pVal1'] = kappaFrame_Cluster_1.loc[vidStim, 'p_val']
summary_data_frame.loc[vidStim, 'K_subjects1'] = kappaFrame_Cluster_1.loc[vidStim, 'subjects']
summary_data_frame.loc[vidStim, 'K_raters1'] = kappaFrame_Cluster_1.loc[vidStim, 'raters']
summary_data_frame.loc[vidStim, 'K_kappa2'] = kappaFrame_Cluster_2.loc[vidStim, 'kappa']
summary_data_frame.loc[vidStim, 'K_stats2'] = kappaFrame_Cluster_2.loc[vidStim, 'stats']
summary_data_frame.loc[vidStim, 'K_pVal2'] = kappaFrame_Cluster_2.loc[vidStim, 'p_val']
summary_data_frame.loc[vidStim, 'K_subjects2'] = kappaFrame_Cluster_2.loc[vidStim, 'subjects']
summary_data_frame.loc[vidStim, 'K_raters2'] = kappaFrame_Cluster_2.loc[vidStim, 'raters']
except:
continue'''
####################### No Dominance: Concordance Results
## This file is created using R Program: /mnt/7CBFA0EC210FC340/ExperimentRelatedData/FromUbuntuAcerSystem/Experiment/Survey/IRRTest_KendallVegan_StimulationWise.R
'''CCCFile = 'NoDominance_AllStimuli_CCC_Test_Result_%s.csv' %(date.split('2018_')[1])
CCCFile_Cluster_1 = 'NoDominance_Cluster-1_CCC_Test_Result_%s.csv' %(date.split('2018_')[1])
CCCFile_Cluster_2 = 'NoDominance_Cluster-2_CCC_Test_Result_%s.csv' %(date.split('2018_')[1])
CCCFrame = pd.read_csv(os.path.join(_thisDir, 'NewTarget', videoPrefix, CCCFile), index_col = 0)
CCCFrame_Cluster_1 = pd.read_csv(os.path.join(_thisDir, 'NewTarget', videoPrefix, CCCFile_Cluster_1), index_col = 0)
CCCFrame_Cluster_2 = pd.read_csv(os.path.join(_thisDir, 'NewTarget', videoPrefix, CCCFile_Cluster_2), index_col = 0)
CCCEmt = CCCFrame.index.values
CCCEmt = [i.split('/')[-1] for i in CCCFrame.index.values] ### Renaming the experiment Ids taken from csv file
CCCEmt = ['_'.join(i.split(' ')) for i in CCCEmt] ### Renaming the experiment Ids taken from csv file
CCCEmt = ['_'.join(i.split("'")) for i in CCCEmt] ### Renaming the experiment Ids taken from csv file
CCCEmt = ['_'.join(i.split('(')) for i in CCCEmt] ### Renaming the experiment Ids taken from csv file
CCCEmt = ['_'.join(i.split(')')) for i in CCCEmt] ### Renaming the experiment Ids taken from csv file
CCCEmt = ['_'.join(i.split('&')) for i in CCCEmt] ### Renaming the experiment Ids taken from csv file
CCCEmt = [i.split('.')[0] for i in CCCEmt]
CCCFrame['Experiment_id'] = CCCEmt
CCCFrame.set_index('Experiment_id', drop=True, inplace=True)
CCCEmt = [i.split('/')[-1] for i in CCCFrame_Cluster_1.index.values] ### Renaming the experiment Ids taken from csv file
CCCEmt = ['_'.join(i.split(' ')) for i in CCCEmt] ### Renaming the experiment Ids taken from csv file
CCCEmt = ['_'.join(i.split("'")) for i in CCCEmt] ### Renaming the experiment Ids taken from csv file
CCCEmt = ['_'.join(i.split('(')) for i in CCCEmt] ### Renaming the experiment Ids taken from csv file
CCCEmt = ['_'.join(i.split(')')) for i in CCCEmt] ### Renaming the experiment Ids taken from csv file
CCCEmt = ['_'.join(i.split('&')) for i in CCCEmt] ### Renaming the experiment Ids taken from csv file
CCCEmt = [i.split('.')[0] for i in CCCEmt]
CCCFrame_Cluster_1['Experiment_id'] = CCCEmt
CCCFrame_Cluster_1.set_index('Experiment_id', drop=True, inplace=True)
CCCEmt = [i.split('/')[-1] for i in CCCFrame_Cluster_2.index.values] ### Renaming the experiment Ids taken from csv file
CCCEmt = ['_'.join(i.split(' ')) for i in CCCEmt] ### Renaming the experiment Ids taken from csv file
CCCEmt = ['_'.join(i.split("'")) for i in CCCEmt] ### Renaming the experiment Ids taken from csv file
CCCEmt = ['_'.join(i.split('(')) for i in CCCEmt] ### Renaming the experiment Ids taken from csv file
CCCEmt = ['_'.join(i.split(')')) for i in CCCEmt] ### Renaming the experiment Ids taken from csv file
CCCEmt = ['_'.join(i.split('&')) for i in CCCEmt] ### Renaming the experiment Ids taken from csv file
CCCEmt = [i.split('.')[0] for i in CCCEmt]
CCCFrame_Cluster_2['Experiment_id'] = CCCEmt
CCCFrame_Cluster_2.set_index('Experiment_id', drop=True, inplace=True)
for vidStim in summary_data_frame.index.values:
try:
summary_data_frame.loc[vidStim, 'NpDom_W'] = CCCFrame.loc[vidStim, 'Concord_W']
summary_data_frame.loc[vidStim, 'NpDom_F'] = CCCFrame.loc[vidStim, 'Concord_F']
summary_data_frame.loc[vidStim, 'NpDom_Prob.F'] = CCCFrame.loc[vidStim, 'Concord_Prob.F']
summary_data_frame.loc[vidStim, 'NpDom_Chi2'] = CCCFrame.loc[vidStim, 'Concord_Chi2']
summary_data_frame.loc[vidStim, 'NpDom_Prob.perm'] = CCCFrame.loc[vidStim, 'Concord_Prob.perm']
summary_data_frame.loc[vidStim, 'NpDom_Dimension'] = CCCFrame.loc[vidStim, 'Concord_Dimension']
summary_data_frame.loc[vidStim, 'NpDom_Categ'] = CCCFrame.loc[vidStim, 'ConcordCateg']
summary_data_frame.loc[vidStim, 'NpDom_W1'] = CCCFrame_Cluster_1.loc[vidStim, 'Concord_W']
summary_data_frame.loc[vidStim, 'NpDom_F1'] = CCCFrame_Cluster_1.loc[vidStim, 'Concord_F']
summary_data_frame.loc[vidStim, 'NpDom_Prob.F1'] = CCCFrame_Cluster_1.loc[vidStim, 'Concord_Prob.F']
summary_data_frame.loc[vidStim, 'NpDom_Chi21'] = CCCFrame_Cluster_1.loc[vidStim, 'Concord_Chi2']
summary_data_frame.loc[vidStim, 'NpDom_Prob.perm1'] = CCCFrame_Cluster_1.loc[vidStim, 'Concord_Prob.perm']
summary_data_frame.loc[vidStim, 'NpDom_Dimension1'] = CCCFrame_Cluster_1.loc[vidStim, 'Concord_Dimension']
summary_data_frame.loc[vidStim, 'NpDomCateg1'] = CCCFrame_Cluster_1.loc[vidStim, 'ConcordCateg']
summary_data_frame.loc[vidStim, 'NpDom_W2'] = CCCFrame_Cluster_2.loc[vidStim, 'Concord_W']
summary_data_frame.loc[vidStim, 'NpDom_F2'] = CCCFrame_Cluster_2.loc[vidStim, 'Concord_F']
summary_data_frame.loc[vidStim, 'NpDom_Prob.F2'] = CCCFrame_Cluster_2.loc[vidStim, 'Concord_Prob.F']
summary_data_frame.loc[vidStim, 'NpDom_Chi22'] = CCCFrame_Cluster_2.loc[vidStim, 'Concord_Chi2']
summary_data_frame.loc[vidStim, 'NpDom_Prob.perm2'] = CCCFrame_Cluster_2.loc[vidStim, 'Concord_Prob.perm']
summary_data_frame.loc[vidStim, 'NpDom_Dimension2'] = CCCFrame_Cluster_2.loc[vidStim, 'Concord_Dimension']
summary_data_frame.loc[vidStim, 'NpDomCateg2'] = CCCFrame_Cluster_2.loc[vidStim, 'ConcordCateg']
except:
continue
####################### No Dominance: kappam.fleiss Statistics
## This file is created using R Program: /mnt/7CBFA0EC210FC340/ExperimentRelatedData/FromUbuntuAcerSystem/Experiment/Survey/IRRTest_KendallVegan_StimulationWise.R
kappaFile = 'NoDominance_AllStimuli_Kappa_Test_Result_%s.csv' %(date.split('2018_')[1])
kappaFile_Cluster_1 = 'NoDominance_Cluster-1_Kappa_Test_Result_%s.csv' %(date.split('2018_')[1])
kappaFile_Cluster_2 = 'NoDominance_Cluster-2_Kappa_Test_Result_%s.csv' %(date.split('2018_')[1])
kappaFrame = pd.read_csv(os.path.join(_thisDir, 'NewTarget', videoPrefix, kappaFile), index_col = 0)
kappaFrame_Cluster_1 = pd.read_csv(os.path.join(_thisDir, 'NewTarget', videoPrefix, kappaFile_Cluster_1), index_col = 0)
kappaFrame_Cluster_2 = pd.read_csv(os.path.join(_thisDir, 'NewTarget', videoPrefix, kappaFile_Cluster_2), index_col = 0)
kappaEmt = kappaFrame.index.values
kappaEmt = [i.split('/')[-1] for i in kappaFrame.index.values] ### Renaming the experiment Ids taken from csv file
kappaEmt = ['_'.join(i.split(' ')) for i in kappaEmt] ### Renaming the experiment Ids taken from csv file
kappaEmt = ['_'.join(i.split("'")) for i in kappaEmt] ### Renaming the experiment Ids taken from csv file
kappaEmt = ['_'.join(i.split('(')) for i in kappaEmt] ### Renaming the experiment Ids taken from csv file
kappaEmt = ['_'.join(i.split(')')) for i in kappaEmt] ### Renaming the experiment Ids taken from csv file
kappaEmt = ['_'.join(i.split('&')) for i in kappaEmt] ### Renaming the experiment Ids taken from csv file
kappaEmt = [i.split('.')[0] for i in kappaEmt]
kappaFrame['Experiment_id'] = kappaEmt
kappaFrame.set_index('Experiment_id', drop=True, inplace=True)
kappaEmt = [i.split('/')[-1] for i in kappaFrame_Cluster_1.index.values] ### Renaming the experiment Ids taken from csv file
kappaEmt = ['_'.join(i.split(' ')) for i in kappaEmt] ### Renaming the experiment Ids taken from csv file
kappaEmt = ['_'.join(i.split("'")) for i in kappaEmt] ### Renaming the experiment Ids taken from csv file
kappaEmt = ['_'.join(i.split('(')) for i in kappaEmt] ### Renaming the experiment Ids taken from csv file
kappaEmt = ['_'.join(i.split(')')) for i in kappaEmt] ### Renaming the experiment Ids taken from csv file
kappaEmt = ['_'.join(i.split('&')) for i in kappaEmt] ### Renaming the experiment Ids taken from csv file
kappaEmt = [i.split('.')[0] for i in kappaEmt]
kappaFrame_Cluster_1['Experiment_id'] = kappaEmt
kappaFrame_Cluster_1.set_index('Experiment_id', drop=True, inplace=True)
kappaEmt = [i.split('/')[-1] for i in kappaFrame_Cluster_2.index.values] ### Renaming the experiment Ids taken from csv file
kappaEmt = ['_'.join(i.split(' ')) for i in kappaEmt] ### Renaming the experiment Ids taken from csv file
kappaEmt = ['_'.join(i.split("'")) for i in kappaEmt] ### Renaming the experiment Ids taken from csv file
kappaEmt = ['_'.join(i.split('(')) for i in kappaEmt] ### Renaming the experiment Ids taken from csv file
kappaEmt = ['_'.join(i.split(')')) for i in kappaEmt] ### Renaming the experiment Ids taken from csv file
kappaEmt = ['_'.join(i.split('&')) for i in kappaEmt] ### Renaming the experiment Ids taken from csv file
kappaEmt = [i.split('.')[0] for i in kappaEmt]
kappaFrame_Cluster_2['Experiment_id'] = kappaEmt
kappaFrame_Cluster_2.set_index('Experiment_id', drop=True, inplace=True)
for vidStim in summary_data_frame.index.values:
try:
#"kappa", 'stats', "p_val", 'subjects', 'raters'
summary_data_frame.loc[vidStim, 'NpDom_kappa'] = kappaFrame.loc[vidStim, 'kappa']
summary_data_frame.loc[vidStim, 'NpDom_stats'] = kappaFrame.loc[vidStim, 'stats']
summary_data_frame.loc[vidStim, 'NpDom_pVal'] = kappaFrame.loc[vidStim, 'p_val']
summary_data_frame.loc[vidStim, 'NpDom_subjects'] = kappaFrame.loc[vidStim, 'subjects']
summary_data_frame.loc[vidStim, 'NpDom_raters'] = kappaFrame.loc[vidStim, 'raters']
summary_data_frame.loc[vidStim, 'NpDom_kappa1'] = kappaFrame_Cluster_1.loc[vidStim, 'kappa']
summary_data_frame.loc[vidStim, 'NpDom_stats1'] = kappaFrame_Cluster_1.loc[vidStim, 'stats']
summary_data_frame.loc[vidStim, 'NpDom_pVal1'] = kappaFrame_Cluster_1.loc[vidStim, 'p_val']
summary_data_frame.loc[vidStim, 'NpDom_subjects1'] = kappaFrame_Cluster_1.loc[vidStim, 'subjects']
summary_data_frame.loc[vidStim, 'NpDom_raters1'] = kappaFrame_Cluster_1.loc[vidStim, 'raters']
summary_data_frame.loc[vidStim, 'NpDom_kappa2'] = kappaFrame_Cluster_2.loc[vidStim, 'kappa']
summary_data_frame.loc[vidStim, 'NpDom_stats2'] = kappaFrame_Cluster_2.loc[vidStim, 'stats']
summary_data_frame.loc[vidStim, 'NpDom_pVal2'] = kappaFrame_Cluster_2.loc[vidStim, 'p_val']
summary_data_frame.loc[vidStim, 'NpDom_subjects2'] = kappaFrame_Cluster_2.loc[vidStim, 'subjects']
summary_data_frame.loc[vidStim, 'NpDom_raters2'] = kappaFrame_Cluster_2.loc[vidStim, 'raters']
except:
continue'''
pdb.set_trace()
summary_data_frame.to_csv(os.path.join(_thisDir, 'NewTarget', 'WithCCC_kappa_WithClustering_summary_data_frame_'+cleaning_flag+date+'_withClusterInformation.csv'))
'''for vidStim in VideosWithEmotions.keys():
emts = np.unique(VideosWithEmotions[vidStim])
tmp = 0
for _emt in emts:
factor = np.sum(np.array(VideosWithEmotions[vidStim])==_emt)/len(VideosWithEmotions[vidStim])
if tmp < factor:
tmp = factor
overallStats.loc[vidStim, 'emtFactor'] = round(factor,2)
overallStats.loc[vidStim, 'MostRated'] = _emt
overallStats.loc[vidStim, 'NRatedEmotions'] = len(VideosWithEmotions[vidStim])
if len(MADFrame.loc[vidStim, ['VMAD', 'AMAD', 'DMAD', 'LMAD', 'FMAD']]) == 1:
overallStats.loc[vidStim, ['VMAD', 'AMAD', 'DMAD', 'LMAD', 'FMAD']] = MADFrame.loc[vidStim, ['VMAD', 'AMAD', 'DMAD', 'LMAD', 'FMAD']]
else:
overallStats.loc[vidStim, ['VMAD', 'AMAD', 'DMAD', 'LMAD', 'FMAD']] = MADFrame.loc[vidStim, ['VMAD', 'AMAD', 'DMAD', 'LMAD', 'FMAD']].values[0]'''
def VAD_Plotting(file_name = | |
a bundle of transcripts, find intervals matching retained
introns. A retained intron is defined as an interval from an exon/intron
boundary to the next where both boundaries are in the same exon of another
transcript'''
intron_intervals = [GTF.toIntronIntervals(transcript)
for transcript in gene]
intron_intervals = list(set(
itertools.chain.from_iterable(intron_intervals)))
intron_intervals.sort()
for transcript in gene:
exons = iter(sorted(GTF.asRanges(transcript)))
introns = iter(intron_intervals)
retained_introns = []
try:
intron = introns.next()
exon = exons.next()
while True:
if exon[1] < intron[0]:
exon = exons.next()
continue
if intron[0] >= exon[0] and intron[1] <= exon[1]:
E.debug("exon %s of transcript %s contains intron %s" %
(exon, transcript[0].transcript_id, intron))
retained_introns.append(intron)
intron = introns.next()
except StopIteration:
pass
retained_introns = Intervals.combine(retained_introns)
for intron in retained_introns:
entry = GTF.Entry()
entry = entry.copy(transcript[0])
entry.start = intron[0]
entry.end = intron[1]
yield entry
def gene_to_blocks(gene):
'''Given a bundle of all exons in a gene, create a seperate exon
for each unqiue part of a exon, as well as one for introns. '''
exons = [(e.start, e.end)
for e in gene if e.feature == "exon"]
exons = list(set(sum(exons, ())))
exons.sort()
entry = GTF.Entry()
entry = entry.copy(gene[0])
entry.transcript_id = "merged"
entry.feature = "exon"
entry.source = "merged"
for i in range(len(exons)-1):
entry.start = exons[i]
entry.end = exons[i+1]
entry.attributes["exon_id"] = str(i + 1)
yield entry
def main(argv=None):
if not argv:
argv = sys.argv
parser = E.OptionParser(version="%prog version: $Id$",
usage=globals()["__doc__"])
parser.add_option("--merge-exons-distance",
dest="merge_exons_distance",
type="int",
help="distance in nucleotides between "
"exons to be merged [%default].")
parser.add_option("--pattern-identifier", dest="pattern", type="string",
help="pattern to use for renaming genes/transcripts. "
"The pattern should contain a %i, for example "
"--pattern-identifier=ENSG%010i [%default].")
parser.add_option("--sort-order",
dest="sort_order",
type="choice",
choices=("gene",
"gene+transcript",
"transcript",
"position",
"contig+gene",
"position+gene",
"gene+position",
"gene+exon"),
help="sort input data [%default].")
parser.add_option("--mark-utr",
dest="mark_utr",
action="store_true",
help="mark utr for method --merge-exons. "
"[%default].")
parser.add_option(
"--without-utr",
dest="with_utr",
action="store_false",
help="exclude UTR in methods --merge-exons, merge-transcripts "
"and intersect-transripts. Setting this option will remove "
"non-coding transcripts. "
"[%default].")
parser.add_option(
"--filter-method", dest="filter_method",
type="choice",
choices=("gene",
"transcript",
"longest-gene",
"longest-transcript",
"representative-transcript",
"proteincoding",
"lincrna"),
help="Filter method to apply. Available filters are: "
"'gene': filter by gene_id given in ``--map-tsv-file``, "
"'transcript': filter by transcript_id given in ``--map-tsv-file``, "
"'longest-gene': output the longest gene for overlapping genes ,"
"'longest-transcript': output the longest transcript per gene,"
"'representative-transcript': output the representative transcript "
"per gene. The representative transcript is the transcript "
"that shares most exons with other transcripts in a gene. "
"The input needs to be sorted by gene. "
"'proteincoding': only output protein coding features. "
"'lincrna': only output lincRNA features. "
"[%default].")
parser.add_option("-a", "--map-tsv-file", dest="filename_filter",
type="string",
metavar="tsv",
help="filename of ids to map/filter [%default].")
parser.add_option(
"--gff-file", dest="filename_gff", type="string",
metavar="GFF",
help="second filename of features (see --remove-overlapping) "
"[%default]")
parser.add_option("--invert-filter",
dest="invert_filter",
action="store_true",
help="when using --filter, invert selection "
"(like grep -v). "
"[%default].")
parser.add_option("--sample-size", dest="sample_size", type="int",
help="extract a random sample of size # if the option "
"'--method=filter --filter-method' is set "
"[%default].")
parser.add_option(
"--intron-min-length",
dest="intron_min_length", type="int",
help="minimum length for introns (for --exons-file2introns) "
"[%default].")
parser.add_option("--min-exons-length",
dest="min_exons_length",
type="int",
help="minimum length for gene (sum of exons) "
"(--sam-fileple-size) [%default].")
parser.add_option(
"--intron-border",
dest="intron_border",
type="int",
help="number of residues to exclude at intron at either end "
"(--exons-file2introns) [%default].")
parser.add_option("--ignore-strand",
dest="ignore_strand",
action="store_true",
help="remove strandedness of features (set to '.') when "
"using ``transcripts2genes`` or ``filter``"
"[%default].")
parser.add_option("--permit-duplicates", dest="strict",
action="store_false",
help="permit duplicate genes. "
"[%default]")
parser.add_option(
"--duplicate-feature",
dest="duplicate_feature",
type="choice",
choices=("gene", "transcript", "both", "ucsc", "coordinates"),
help="remove duplicates by gene/transcript. "
"If ``ucsc`` is chosen, transcripts ending on _dup# are "
"removed. This is necessary to remove duplicate entries "
"that are next to each other in the sort order "
"[%default]")
parser.add_option("--use-gene-id", dest="use_geneid", action="store_true",
help="when merging transcripts, exons or introns, use "
"the parent gene_id as the transcript id.")
parser.add_option("-m", "--method", dest="method", type="choice",
action="append",
choices=(
"add-protein-id",
"exons2introns",
"filter",
"find-retained-introns",
"genes-to-unique-chunks",
"intersect-transcripts",
"join-exons",
"merge-exons",
"merge-transcripts",
"merge-genes",
"merge-introns",
"remove-overlapping",
"remove-duplicates",
"rename-genes",
"rename-transcripts",
"rename-duplicates",
"renumber-genes",
"renumber-transcripts",
"set-transcript-to-gene",
"set-gene-to-transcript",
"set-protein-to-transcript",
"set-score-to-distance",
"set-gene_biotype-to-source",
"set-source-to-transcript_biotype",
"sort",
"transcript2genes",
"unset-genes"),
help="Method to apply [%default]."
"Please only select one.")
parser.set_defaults(
sort_order="gene",
filter_method="gene",
pattern="%i",
merge_exons_distance=0,
filename_filter=None,
intron_border=None,
intron_min_length=None,
sample_size=0,
min_exons_length=0,
ignore_strand=False,
mark_utr=False,
with_utr=True,
invert_filter=False,
duplicate_feature=None,
strict=True,
method=None,
use_geneid=False,
)
(options, args) = E.Start(parser, argv=argv)
ninput, noutput, nfeatures, ndiscarded = 0, 0, 0, 0
if options.method is None:
raise ValueError("please specify a --method")
if len(options.method) > 1:
raise ValueError("multiple --method arguements specified")
else:
options.method = options.method[0]
if options.method == "set-transcript-to-gene":
for gff in GTF.iterator(options.stdin):
ninput += 1
gff.setAttribute("transcript_id", gff.gene_id)
options.stdout.write("%s\n" % str(gff))
noutput += 1
nfeatures += 1
elif options.method == "set-gene_biotype-to-source":
for gff in GTF.iterator(options.stdin):
ninput += 1
if "gene_biotype" not in gff.attributes:
gff.setAttribute("gene_biotype", gff.source)
options.stdout.write("%s\n" % str(gff))
noutput += 1
nfeatures += 1
elif options.method == "set-source-to-transcript_biotype":
for gff in GTF.iterator(options.stdin):
ninput += 1
try:
gff.source = gff.transcript_biotype
except AttributeError:
pass
options.stdout.write("%s\n" % str(gff))
noutput += 1
nfeatures += 1
elif options.method == "remove-duplicates":
counts = collections.defaultdict(int)
if options.duplicate_feature == "ucsc":
store = []
remove = set()
f = lambda x: x[0].transcript_id
gffs = GTF.transcript_iterator(
GTF.iterator(options.stdin), strict=False)
outf = lambda x: "\n".join([str(y) for y in x])
for entry in gffs:
ninput += 1
store.append(entry)
id = f(entry)
if "_dup" in id:
remove.add(re.sub("_dup\d+", "", id))
remove.add(id)
for entry in store:
id = f(entry)
if id not in remove:
options.stdout.write(outf(entry) + "\n")
noutput += 1
else:
ndiscarded += 1
E.info("discarded duplicates for %s" % (id))
else:
if options.duplicate_feature == "gene":
gffs = GTF.gene_iterator(
GTF.iterator(options.stdin), strict=False)
f = lambda x: x[0][0].gene_id
outf = lambda x: "\n".join(
["\n".join([str(y) for y in xx]) for xx in x])
elif options.duplicate_feature == "transcript":
gffs = GTF.transcript_iterator(
GTF.iterator(options.stdin), strict=False)
f = lambda x: x[0].transcript_id
outf = lambda x: "\n".join([str(y) for y in x])
elif options.duplicate_feature == "coordinates":
gffs = GTF.chunk_iterator(GTF.iterator(options.stdin))
f = lambda x: x[0].contig + "_" + \
str(x[0].start) + "-" + str(x[0].end)
outf = lambda x: "\n".join([str(y) for y in x])
store = []
for entry in gffs:
ninput += 1
store.append(entry)
id = f(entry)
counts[id] += 1
# Assumes GTF file sorted by contig then start
last_id = ""
if options.duplicate_feature == "coordinates":
for entry in store:
id = f(entry)
if id == last_id:
ndiscarded += 1
E.info("discarded duplicates for %s: %i" %
(id, counts[id]))
else:
options.stdout.write(outf(entry) + "\n")
noutput += 1
last_id = id
else:
for entry in store:
id = f(entry)
if counts[id] == 1:
options.stdout.write(outf(entry) + "\n")
noutput += 1
else:
ndiscarded += 1
E.info("discarded duplicates for %s: %i" %
(id, counts[id]))
elif "sort" == options.method:
for gff in GTF.iterator_sorted(GTF.iterator(options.stdin),
sort_order=options.sort_order):
ninput += 1
options.stdout.write("%s\n" % str(gff))
noutput += 1
nfeatures += 1
elif "set-gene-to-transcript" == options.method:
for gff in GTF.iterator(options.stdin):
ninput += 1
gff.setAttribute("gene_id", gff.transcript_id)
options.stdout.write("%s\n" % str(gff))
noutput += 1
nfeatures += 1
elif "set-protein-to-transcript" == options.method:
for gff in GTF.iterator(options.stdin):
ninput += 1
gff.setAttribute("protein_id", gff.transcript_id)
options.stdout.write("%s\n" % str(gff))
noutput += 1
nfeatures += 1
elif "add-protein-id" == options.method:
transcript2protein = IOTools.readMap(
IOTools.openFile(options.filename_filter, "r"))
missing = set()
for gff in GTF.iterator(options.stdin):
ninput += 1
if gff.transcript_id not in transcript2protein:
if gff.transcript_id not in missing:
E.debug(
("removing transcript '%s' due to "
"missing protein id") % gff.transcript_id)
missing.add(gff.transcript_id)
ndiscarded += 1
continue
gff.setAttribute(
"protein_id", transcript2protein[gff.transcript_id])
options.stdout.write("%s\n" % str(gff))
noutput += 1
nfeatures += 1
E.info("transcripts removed due to missing protein ids: %i" %
len(missing))
elif "join-exons" == options.method:
for exons in GTF.transcript_iterator(GTF.iterator(options.stdin)):
ninput += 1
strand = Genomics.convertStrand(exons[0].strand)
contig = exons[0].contig
transid = exons[0].transcript_id
geneid = exons[0].gene_id
biotype = exons[0].source
all_start, all_end = min([x.start for x in exons]), max(
[x.end for x in exons])
y = GTF.Entry()
y.contig = contig
y.source = biotype
y.feature = "transcript"
y.start = all_start
y.end = all_end
y.strand = strand
y.transcript_id = transid
y.gene_id = geneid
options.stdout.write("%s\n" % str(y))
elif "merge-genes" == options.method:
# merges overlapping genes
#
gffs = GTF.iterator_sorted_chunks(
GTF.flat_gene_iterator(GTF.iterator(options.stdin)),
sort_by="contig-strand-start")
def iterate_chunks(gff_chunks):
last = gff_chunks.next()
to_join = [last]
for gffs in gff_chunks:
d = gffs[0].start - last[-1].end
if gffs[0].contig == last[0].contig and \
gffs[0].strand == last[0].strand:
assert gffs[0].start >= last[0].start, \
("input file should be sorted by contig, strand "
"and position: d=%i:\nlast=\n%s\nthis=\n%s\n") % \
(d,
| |
<reponame>rockychen-dpaw/resource_tracking<gh_stars>0
import traceback
import os
import logging
import requests
import itertools
import json
from datetime import timedelta
from django.utils import timezone
from django.conf import settings
from django.contrib.gis.db import models
from django.contrib.auth.models import User
from django.core import validators
from django.db import connection
from django.core.exceptions import ObjectDoesNotExist
from django.dispatch import receiver
from django.db.models.signals import post_save,pre_save,post_delete
from django.contrib.postgres.fields import ArrayField,JSONField
from django.core.files.storage import FileSystemStorage
logger = logging.getLogger(__name__)
_programmatic_user = None
def get_user_program():
"""
"""
global _programmatic_user
if not _programmatic_user:
try:
_programmatic_user = User.objects.get(username='Programmatic')
except ObjectDoesNotExist as ex:
with connection.cursor() as cursor:
try:
cursor.execute("""
INSERT INTO auth_user
(username,first_name,last_name,email,is_staff,is_active,is_superuser,password,date_joined)
VALUES
('Programmatic','Programmatic','System','<EMAIL>',false,true,false,'','{}')
""".format(timezone.now().strftime('%Y-%m-%d %H:%M:%S %Z')))
except:
pass
_programmatic_user = User.objects.get(username='Programmatic')
return _programmatic_user
# Create your models here.
class Option(models.Model):
_tvalue = None
name = models.CharField(max_length=32,unique=True)
comments = models.TextField(max_length=512,null=True,blank=True)
value = models.CharField(max_length=64,null=True,blank=True)
@property
def tvalue(self):
"""
typed value
"""
if self.value is None or self.value == "":
return None
if self._tvalue is None:
try:
self._tvalue = json.loads(self.value)
except:
self._tvalue = self.value
return self._tvalue
@classmethod
def get_option(cls,key,default=None):
try:
return Option.objects.get(name=key).tvalue
except:
return default
def __str__(self):
return "Option({})".format(self.name)
class Meta:
ordering = ["name"]
class District(models.Model):
name = models.CharField(max_length=64,unique=True,null=False,editable=False)
def clean(self):
if self.value:
self.value = self.value.strip()
def __str__(self):
return self.name
class Meta:
ordering = ["name"]
class Network(models.Model):
name = models.CharField(max_length=64,unique=True,null=False)
comments = models.CharField(max_length=512,null=True,blank=True)
creator = models.ForeignKey(
settings.AUTH_USER_MODEL, blank=True, null=True, on_delete=models.PROTECT,
related_name='+', editable=False)
modifier = models.ForeignKey(
settings.AUTH_USER_MODEL, blank=True, null=True, on_delete=models.PROTECT,
related_name='+', editable=False)
created = models.DateTimeField(default=timezone.now, editable=False)
modified = models.DateTimeField(auto_now=True, editable=False)
def __str__(self):
return self.name
class Meta:
ordering = ["name"]
class Repeater(models.Model):
site_name = models.CharField(max_length=128,unique=True)
network = models.ForeignKey(Network,on_delete=models.SET_NULL,null=True,blank=True,editable=False)
last_inspected = models.DateField(default=timezone.now, null=True,blank=True)
sss_display = models.BooleanField(default=True,verbose_name="SSS Display Enabled")
sss_description = models.CharField(max_length=512,null=True,blank=True)
district = models.ForeignKey(District,on_delete=models.PROTECT,null=True,blank=True)
channel_number = models.SmallIntegerField(null=True,blank=True)
point = models.PointField(null=True,blank=True)
link_description = models.CharField(max_length=512,null=True,blank=True)
link_point = models.PointField(null=True,blank=True)
tx_frequency = models.DecimalField(max_digits=16,decimal_places=8,null=True,blank=True,verbose_name="TX Frequency (mHz)")
rx_frequency = models.DecimalField(max_digits=16,decimal_places=8,null=True,blank=True,verbose_name="RX Frequency (mHz)")
ctcss_tx = models.DecimalField(max_digits=16,decimal_places=8,null=True,blank=True,verbose_name="CTCSS TX (Hz)")
ctcss_rx = models.DecimalField(max_digits=16,decimal_places=8,null=True,blank=True,verbose_name="CTCSS RX (Hz)")
nac_tx = models.CharField(max_length=32,null=True,blank=True,verbose_name="NAC TX (P25)")
nac_rx = models.CharField(max_length=32,null=True,blank=True,verbose_name="NAC RX (P25)")
tx_antenna_height = models.FloatField(null=True,blank=True,verbose_name="TX Antenna Height (M)")
rx_antenna_height = models.FloatField(null=True,blank=True,verbose_name="RX Antenna Height (M)")
tx_power = models.FloatField(null=True,blank=True,verbose_name="TX Transmitter RF power in Watts,20dBm=0.1w")
rx_power = models.FloatField(null=True,blank=True,verbose_name="RX Transmitter RF power in Watts,20dBm=0.1w")
tx_antenna_gain = models.FloatField(null=True,blank=True,verbose_name="TX Transmitter antenna gain in dBi")
rx_antenna_gain = models.FloatField(null=True,blank=True,verbose_name="RX Transmitter antenna gain in dBi")
output_color = models.CharField(max_length=32,null=True,blank=True)
output_radius = models.FloatField(null=True,blank=True,verbose_name="Output Radius (Km)")
output_clutter = models.FloatField(null=True,blank=True,verbose_name="Output Clutter (M)")
creator = models.ForeignKey(
settings.AUTH_USER_MODEL, blank=True, null=True, on_delete=models.PROTECT,
related_name='+', editable=False)
modifier = models.ForeignKey(
settings.AUTH_USER_MODEL, blank=True, null=True, on_delete=models.PROTECT,
related_name='+', editable=False)
created = models.DateTimeField(default=timezone.now, editable=False)
modified = models.DateTimeField(auto_now=True, editable=False)
@property
def longitude(self):
return self.point.x if self.point else None
@property
def latitude(self):
return self.point.y if self.point else None
@property
def link_longitude(self):
return self.link_point.x if self.link_point else None
@property
def link_latitude(self):
return self.link_point.x if self.link_point else None
@property
def is_complete(self):
return True if (self.point and self.channel_number and self.tx_frequency and self.rx_frequency and self.tx_antenna_height and self.rx_antenna_height) else False
def __str__(self):
return "{} - {}".format(self.district,self.site_name)
class Meta:
ordering = ["district__name","site_name"]
class OverwriteStorage(FileSystemStorage):
def get_available_name(self, name):
"""Returns a filename that's free on the target storage system, and
available for new content to be written to.
Found at http://djangosnippets.org/snippets/976/
This file storage solves overwrite on upload problem. Another
proposed solution was to override the save method on the model
like so (from https://code.djangoproject.com/ticket/11663):
def save(self, *args, **kwargs):
try:
this = MyModelName.objects.get(id=self.id)
if this.MyImageFieldName != self.MyImageFieldName:
this.MyImageFieldName.delete()
except: pass
super(MyModelName, self).save(*args, **kwargs)
"""
# If the filename already exists, remove it as if it was a true file system
if self.exists(name):
os.remove(os.path.join(settings.MEDIA_ROOT, name))
return name
def network_file_path(instance,filename,suffix=None):
if isinstance(instance,NetworkTXAnalysis):
folder = "tx"
else:
folder = "rx"
ext = os.path.splitext(filename)[1]
name = instance.network.name.lower().replace(" ","_")
if suffix :
return os.path.join("radio","networks",name,folder,"{}_{}_{}{}".format(name,folder,suffix,ext))
else:
return os.path.join("radio","networks",name,folder,"{}_{}{}".format(name,folder,ext))
def network_4326_file_path(instance,filename):
return network_file_path(instance,filename,suffix="4326")
def network_mercator_file_path(instance,filename):
return network_file_path(instance,filename,suffix="mercator")
class CoverageAnalysis(models.Model):
IDLE = 0
WAITING = 110
WAITING_TO_DELETE = 120
DELETING = 121
DELETE_FAILED = -122
DELETED = 129
WAITING_TO_ANALYSE = 130
ANALYSING = 131
ANALYSE_FAILED = -132
ANALYSED = 139
WAITING_TO_DOWNLOAD = 140
DOWNLOADING = 141
DOWNLOAD_FAILED = -142
DOWNLOADED = 149
WAITING_TO_EXTRACT = 150
EXTRACTING = 151
EXTRACT_FAILED = -152
EXTRACT_REQUIRED = -153
EXTRACTED = 159
#Failed statuses
TIMEOUT = -9998
FAILED = -9999
PROCESS_STATUS_CHOICES = (
(IDLE,"Idle"),
(WAITING,"Waiting to process"),
(DELETE_FAILED ,"Delete Failed"),
(WAITING_TO_DELETE ,"Waiting to delete"),
(DELETING,"Deleting Calculation"),
(DELETED,"Deleted Calculation"),
(ANALYSE_FAILED,"Analyse Failed"),
(WAITING_TO_ANALYSE ,"Waiting to analyse"),
(ANALYSING,"Analysing"),
(ANALYSED,"Analysed"),
(DOWNLOAD_FAILED,"Download Failed"),
(WAITING_TO_DOWNLOAD ,"Waiting to download"),
(DOWNLOADING,"Downloading"),
(DOWNLOADED,"Downloaded"),
(EXTRACT_FAILED,"Extracting Failed"),
(EXTRACT_REQUIRED,"Extract Required"),
(WAITING_TO_EXTRACT ,"Waiting to extract"),
(EXTRACTING,"Extrating Spatial Data"),
(TIMEOUT,"Timeout"),
(FAILED,"Failed"),
)
PROCESS_STATUS_MAPPING = dict(PROCESS_STATUS_CHOICES)
WAITING_STATUS_MAPPING = {
IDLE:WAITING,
WAITING:WAITING,
WAITING_TO_DELETE:WAITING_TO_DELETE,
DELETING:WAITING_TO_DELETE,
DELETE_FAILED:WAITING_TO_DELETE,
DELETED:WAITING_TO_ANALYSE,
WAITING_TO_ANALYSE:WAITING_TO_ANALYSE,
ANALYSING:WAITING_TO_ANALYSE,
ANALYSE_FAILED:WAITING_TO_ANALYSE,
ANALYSED:WAITING_TO_DOWNLOAD,
WAITING_TO_DOWNLOAD:WAITING_TO_DOWNLOAD,
DOWNLOADING:WAITING_TO_DOWNLOAD,
DOWNLOAD_FAILED:WAITING_TO_DOWNLOAD,
DOWNLOADED:WAITING_TO_EXTRACT,
WAITING_TO_EXTRACT:WAITING_TO_EXTRACT,
EXTRACTING:WAITING_TO_EXTRACT,
#EXTRACT_FAILED:WAITING_TO_DOWNLOAD,
EXTRACT_FAILED:WAITING_TO_EXTRACT,
EXTRACT_REQUIRED:WAITING_TO_EXTRACT,
TIMEOUT:WAITING,
FAILED:WAITING
}
PROCESS_TIMEOUT = timedelta(hours=6)
process_status = models.SmallIntegerField(default=IDLE,choices=PROCESS_STATUS_CHOICES,editable=False)
process_msg = models.TextField(editable=False,null=True)
process_start = models.DateTimeField(editable=False,null=True)
process_end = models.DateTimeField(editable=False,null=True)
@property
def process_status_name(self):
if self.process_status > self.IDLE and timezone.now() - self.process_start > self.PROCESS_TIMEOUT:
return "Timeout"
else:
return self.PROCESS_STATUS_CHOICES[self.process_status]
@property
def status_name(self):
status = self.process_status
if self.process_status > self.IDLE and timezone.now() - self.process_start > self.PROCESS_TIMEOUT:
status = self.IDLE
if status == self.IDLE:
if not self.last_analysed:
return "Outdated"
elif self.last_analysed < self.analyse_requested:
return "Outdated"
else:
return "Latest"
else:
return self.PROCESS_STATUS_MAPPING[status]
@property
def is_outdated(self):
if not self.last_analysed:
return True
elif self.last_analysed < self.analyse_requested:
return True
else:
return False
class Meta:
abstract = True
class NetworkAnalysis(CoverageAnalysis):
analyse_requested = models.DateTimeField(editable=False)
last_analysed = models.DateTimeField(editable=False,null=True)
analyse_result = JSONField(null=True,editable=False)
raster_4326 = models.FileField(max_length=512,null=True,editable=False,upload_to=network_4326_file_path, storage=OverwriteStorage())
world_file_4326 = models.FileField(max_length=512,null=True,editable=False,upload_to=network_4326_file_path, storage=OverwriteStorage())
#raster_mercator = models.FileField(max_length=512,null=True,editable=False,upload_to=network_mercator_file_path, storage=OverwriteStorage())
#world_file_mercator = models.FileField(max_length=512,null=True,editable=False,upload_to=network_mercator_file_path, storage=OverwriteStorage())
bbox = ArrayField(base_field=models.FloatField(),size=4,null=True,editable=False) #left bottom lon,left bottom lon,upper right lon,upper right lat
class Meta:
abstract = True
class NetworkTXAnalysis(NetworkAnalysis):
network = models.OneToOneField(Network,on_delete=models.CASCADE,primary_key=True,editable=False,related_name="tx_analysis")
class NetworkRXAnalysis(NetworkAnalysis):
network = models.OneToOneField(Network,on_delete=models.CASCADE,primary_key=True,editable=False,related_name="rx_analysis")
def repeater_file_path(instance,filename,suffix=None,ext=None):
if isinstance(instance,RepeaterTXAnalysis):
folder = "tx"
else:
folder = "rx"
if not ext:
ext = os.path.splitext(filename)[1]
site_name = instance.repeater.site_name.lower().replace(" ","_")
if suffix :
return os.path.join("radio","repeaters",site_name,folder,"{}_{}_{}{}".format(site_name,folder,suffix,ext))
else:
return os.path.join("radio","repeaters",site_name,folder,"{}_{}{}".format(site_name,folder,ext))
def repeater_4326_file_path(instance,filename):
return repeater_file_path(instance,filename,suffix="4326",ext=".tiff")
def repeater_mercator_file_path(instance,filename):
return repeater_file_path(instance,filename,suffix="mercator")
def repeater_shp_file_path(instance,filename):
return repeater_file_path(instance,filename,ext=".shp.zip")
COMPRESS_FILE_SETTINGS = [
(".7z",lambda f,output:["7za","-y","x",f,"-o{}".format(output)]),
(".zip",lambda f,output:["unzip","-o","-q",f,"-d",output]),
(".tar",lambda f,output:["tar","--overwrite","-x","-f",f,"-C",output]),
(".tar.gz",lambda f,output:["tar","--overwrite","-x","-z","-f",f,"-C",output]),
(".tgz",lambda f,output:["tar","--overwrite","-x","-z","-f",f,"-C",output]),
(".tar.xz",lambda f,output:["tar","--overwrite","-x","-J","-f",f,"-C",output]),
(".tar.bz2",lambda f,output:["tar","--overwrite","-x","-j","-f",f,"-C",output]),
(".tar.bz",lambda f,output:["tar","--overwrite","-x","-j","-f",f,"-C",output])
]
class RepeaterAnalysis(CoverageAnalysis):
network = models.CharField(max_length=64,null=True,editable=False)
analyse_requested = models.DateTimeField(editable=False)
last_analysed = models.DateTimeField(editable=False,null=True)
last_merged = models.DateTimeField(editable=False,null=True)
last_resolved = models.DateTimeField(editable=False,null=True)
last_simplified = models.DateTimeField(editable=False,null=True)
analyse_result = JSONField(null=True,editable=False)
raster_4326 = models.FileField(max_length=512,null=True,editable=False,upload_to=repeater_4326_file_path, storage=OverwriteStorage())
world_file_4326 = models.FileField(max_length=512,null=True,editable=False,upload_to=repeater_4326_file_path, storage=OverwriteStorage())
#raster_mercator = models.FileField(max_length=512,null=True,editable=False,upload_to=repeater_mercator_file_path, storage=OverwriteStorage())
#world_file_mercator = models.FileField(max_length=512,null=True,editable=False,upload_to=repeater_mercator_file_path, storage=OverwriteStorage())
shp_file = models.FileField(max_length=512,null=True,editable=False,upload_to=repeater_shp_file_path, storage=OverwriteStorage())
bbox = ArrayField(base_field=models.FloatField(),size=4,null=True,editable=False) #left bottom lon,left bottom lon,upper right lon,upper right lat
@property
def raster_4326_path(self):
return os.path.join(settings.MEDIA_ROOT,self.raster_4326.name) if self.raster_4326 else None
@property
def raster_4326_filename(self):
return os.path.split(self.raster_4326.name)[1] if self.raster_4326 else None
@property
def raster_4326_basename(self):
return os.path.splitext(os.path.split(self.raster_4326.name)[1])[0] if self.raster_4326 else None
class Meta:
abstract = True
class RepeaterTXAnalysis(RepeaterAnalysis):
repeater = models.OneToOneField(Repeater,on_delete=models.CASCADE,primary_key=True,editable=False,related_name="tx_analysis")
class RepeaterRXAnalysis(RepeaterAnalysis):
repeater = models.OneToOneField(Repeater,on_delete=models.CASCADE,primary_key=True,editable=False,related_name="rx_analysis")
class RepeaterCoverage(models.Model):
repeater = models.ForeignKey(Repeater,on_delete=models.CASCADE,editable=False,related_name="+")
site_name = models.CharField(max_length=128)
district = models.CharField(max_length=64,null=False,editable=False)
dn= models.IntegerField(null=True,editable=False)
geom = models.MultiPolygonField(null=True,editable=False)
class Meta:
abstract = True
class RepeaterTXCoverage(RepeaterCoverage):
pass
class RepeaterRXCoverage(RepeaterCoverage):
pass
class RepeaterTXCoverageSimplified(RepeaterCoverage):
color = models.CharField(max_length=16,null=True,editable=False)
pass
class RepeaterRXCoverageSimplified(RepeaterCoverage):
color = models.CharField(max_length=16,null=True,editable=False)
pass
class AnalysisListener(object):
@staticmethod
@receiver(pre_save, sender=Repeater)
def update_analysis_4_existing_repeater(sender,instance,**kwargs):
if instance.pk is None:
#new repeater,process in post_save
return
existing_repeater = Repeater.objects.get(pk=instance.pk)
AnalysisListener._update_analysis(existing_repeater,instance)
@staticmethod
@receiver(post_save, sender=Repeater)
def update_analysis_4_new_repeater(sender,instance,created,**kwargs):
if created:
#new repeater
AnalysisListener._update_analysis(None,instance)
@staticmethod
@receiver(post_delete, sender=Repeater)
def update_analysis_4_deleted_repeater(sender,instance,**kwargs):
AnalysisListener._update_analysis(instance,None)
@staticmethod
@receiver(post_save, sender=Network)
def create_network_analysis(sender,instance,created,**kwargs):
if created:
#new network,create related network analysis object
now = timezone.now()
NetworkTXAnalysis(network=instance,analyse_requested=now).save()
NetworkRXAnalysis(network=instance,analyse_requested=now).save()
@staticmethod
@receiver(pre_save, sender=Network)
def create_network_analysis(sender,instance,**kwargs):
if not instance.pk:
#new network
return
existing_network = Network.objects.get(pk=instance.pk)
if instance.name != existing_network.name:
#network name changed.
now = timezone.now()
for rep in Repeater.objects.filter(network=instance):
RepeaterTXAnalysis.objects.update_or_create(repeater=r,defaults={"analyse_requested":now})
RepeaterRXAnalysis.objects.update_or_create(repeater=r,defaults={"analyse_requested":now})
@staticmethod
def _update_analysis(existing_repeater,repeater):
#update repeater analysis data
tx_changed = False
rx_changed = False
now = timezone.now()
if repeater:
#update or create repeater
if existing_repeater:
try:
tx_analysis = RepeaterTXAnalysis.objects.get(repeater=existing_repeater)
except ObjectDoesNotExist as ex:
tx_analysis = None
try:
rx_analysis = RepeaterRXAnalysis.objects.get(repeater=existing_repeater)
except ObjectDoesNotExist as ex:
rx_analysis = None
else:
tx_analysis = None
rx_analysis = None
if tx_analysis is None:
RepeaterTXAnalysis(repeater=repeater,analyse_requested=now).save()
tx_changed = True
else:
for key in ["network","point","output_radius","output_clutter","tx_frequency","tx_antenna_height"]:
if getattr(existing_repeater,key) != getattr(repeater,key):
tx_analysis.analyse_requested = now
tx_analysis.save(update_fields=["analyse_requested"])
tx_changed = True
break
if rx_analysis is None:
RepeaterRXAnalysis(repeater=repeater,analyse_requested=now).save()
rx_cahnged = True
else:
for key in ["network","point","output_radius","output_clutter","rx_frequency","rx_antenna_height"]:
if getattr(existing_repeater,key) != getattr(repeater,key):
rx_analysis.analyse_requested = now
rx_analysis.save(update_fields=["analyse_requested"])
rx_changed = True
break
#update network analysis data
previous_network_tx_changed = False
previous_network_rx_changed = False
network_tx_changed = False
network_rx_changed = False
if existing_repeater:
previous_network = existing_repeater.network
else:
previous_network = None
if repeater:
network = repeater.network
else:
network = None
if previous_network != network:
if previous_network:
previous_network_tx_changed = True
previous_network_rx_changed = True
if network:
network_tx_changed = True
network_rx_changed = True
elif network:
network_tx_changed = tx_changed
network_rx_changed = rx_changed
if previous_network_tx_changed:
| |
"""Code for extracting geometry data from ETABS text files (*.E2K & *.$ET).
Functions exist for pushing the data out to a GSA text file.
TODO:
- openings in floor diaphragms
- revise default parsing so that two keys at the beginning become nested keys
of nested dictionaries. Also, single key and value become a dictionary
but that when parsing a subsequent line, a check is carried out for a
dictionary with a single value as well as looking for sub-keys.
- calculate floor area for frame without slabs (99%) (add strategy for choice)
- create a list of floor slabs that have the `DIAPH` parameter.
- set up analyses and combinations (50%)
- coordinate systems (25%)
- grid layouts (0%)
- functions (response spectra, ground motions etc) (0%)
- wind loads (0%)
- seismic loads (0%)
- logs (0%)
- add filled steel tubes / pipes (MATERIAL & FILLMATERIAL props & quantities) (10%)
- add section pools (20%)
- add buckling restrained beams (10%)
- embedded sections (80%)
- check deck properties
- add logging: At the moment there is no log kept of elements that do not "make sense".
This could be useful for identifying how complete the record is.
- LINECURVEDATA - just found it and I didn't know what it was...
- sort out situation where embed is an SD section
DONE:
- split beams at intersections (100%)
- add point and area loads (including point loads on beams) (100%)
- NONE sections -> dummy
"""
#from os import listdir
from os.path import exists, isfile, join, basename, splitext
import pickle
from eng_utilities.general_utilities import is_numeric, try_numeric
from eng_utilities.geometry_utilities import *
from eng_utilities.section_utilities import build_section_dict
from eng_utilities.E2K_postprocessing import *
from collections import namedtuple
LoadKey = namedtuple('LoadKey', 'MEMBER STORY LOADCASE')
def is_key(string):
"""Checking whether element functions as a 'key' in the file
>>> [is_key(x) for x in ['RELEASE', '"PINNED"', 'CARDINALPT', '8', 8, 8.8, '"32"']]
[True, False, True, False, False, False, False]
"""
return (not(is_numeric(string)) and (string.find('"') == -1))
def line_split(line):
"""Fastest line-splitter (should be fast for IronPython)"""
line_list = []
[line_list.append('"' + chunk + '"') if i%2==1 else line_list.extend(chunk.split()) for i, chunk in enumerate(line.split('"'))]
return line_list
def gather(data):
"""Does a reverse generation of tuples for an E2K line
"""
# This could be the location to sort out the POINT data format
data_list = []
temp_list = []
for datum in data[::-1]:
if is_key(datum):
if len(temp_list) < 1:
out = None
elif len(temp_list) == 1:
out = temp_list[0]
else:
out = tuple(temp_list)
data_list.append((datum, out))
temp_list = []
else:
#print(try_numeric(datum.replace('"','')))
temp_list = [try_numeric(datum.replace('"',''))] + temp_list
return data_list[::-1]
# If top-level dict matches first item and sub-dict doesn't, then
def try_branch(a_dict, data_coll, debug=False, keyword=''):
"""When provided with a bunched line, it sets up
dictionaries and subdictionaries in a tree structure
based on branching.
If this is not appropriate, it merges the entries into
a single dictionary"""
# pick first pair
a, b = data_coll[0]
try_1 = a_dict.get(a)
if try_1 is None: # if there isn't an entry already
#print(f'{a} Not found, add {a}:, {b}: & {data_coll[1:]}')
a_dict[a] = {b:{k:v for k, v in data_coll[1:]}}
elif a_dict[a].get(b) is not None: # try_merge
#print(f'{a} found')
#print(f'OK : {a_dict[a]} {b} -> {a_dict[a].get(b)} therefore, try_merge')
b_dict = a_dict[a][b]
if b_dict == dict(data_coll[1:]): # ignore repeated lines
pass
else:
try_merge(b_dict, data_coll[1:], debug=debug, keyword=keyword)
a_dict[a][b] = b_dict.copy()
else: # try_branch (tested)
#print(f'{a} found')
#print(f'Not: {a_dict[a]} {b} -> {a_dict[a].get(b)}')
a_dict[a][b] = {k:v for k, v in data_coll[1:]}
def try_merge(a_dict, data_coll, debug=False, keyword=''):
"""When the line of data has a key that matches an existing one
it merges the data into the dictionary under the existing key"""
try:
## - Snip start
if not isinstance(data_coll, (list, tuple)):
if debug:
print(f'In try_merge ({keyword}), data_coll is {data_coll} (type: {type(data_coll)})')
elif not isinstance(data_coll[0], (list, tuple)):
if debug:
print(f'In try_merge ({keyword}), data_coll[0] is {data_coll} (type: {type(data_coll[0])})')
elif data_coll[0][0] == 'SHAPE':
# c_dict = a_dict.copy()
try_branch(a_dict, data_coll, debug=debug, keyword=keyword)
return
## - Snip end
except:
print(f'WARNING: ** In try_merge ({keyword}), data_coll is {data_coll} (type: {type(data_coll)})')
print('WARNING: (cont\'d)) Possibly a case of "IndexError: tuple index out of range" **')
for data in data_coll:
try_1 = a_dict.get(data[0], None)
if try_1 is not None:
# ---
if isinstance(try_1, list):
try_1.append(data[1])
else:
try_1 = [try_1] + [data[1]]
a_dict[data[0]] = try_1
# ---
# --- the following has been removed from the corresponding location just above
"""# if try_1 == data[1]: # data is two levels deep
#print('data is two levels deep')
pass
else:
if isinstance(try_1, list):
try_1.append(data[1])
else:
try_1 = [try_1] + [data[1]]
a_dict[data[0]] = try_1"""
# ---
else:
a_dict[data[0]] = data[1]
def load_func(the_dict, line, debug=False): # a_dict is
loadclass = line[0][0]
member, story = line[0][1]
line_dict = dict(line)
key = tuple([member, story, line_dict.get('LC')])
if the_dict.get(loadclass) is None:
the_dict[loadclass] = dict()
if debug:
print(f'Starting to parse {loadclass}')
a_dict = the_dict[loadclass]
#print('a_dict', a_dict)
a_dict[key] = a_dict.get(key, []) + list(load_parser(line_dict))
def load_parser(d):
"""
For loadclass = 'POINTLOAD', 'LINELOAD' or 'AREALOAD'
LINELOAD "B2141" "5F" TYPE "POINTF" DIR "GRAV" LC "LL_0.5" FVAL 15 RDIST 0.4
LINELOAD "B2109" "6F" TYPE "UNIFF" DIR "GRAV" LC "DL" FVAL 0.7
LINELOAD "C7" "GF" TYPE "TEMP" LC "THN" T -10
AREALOAD "A1" "MEZZ" TYPE "TEMP" LC "THP" T 10'
AREALOAD "F1" "G/F" TYPE "UNIFF" DIR "GRAV" LC "LL" FVAL 0.005'
AREALOAD "F34" "L1" TYPE "UNIFLOADSET" "BOH"
"""
ltype = d.get('TYPE')
direction = d.get('DIR', None)
ldict = {'TYPE': ltype, 'DIR': direction}
if ltype == 'TRAPF':
load_values = [try_numeric(d.get(item)) for item in ('FSTART', 'FEND', 'RDSTART', 'RDEND')]
load_data = (load_values[2], load_values[0]), (load_values[3], load_values[1])
ave_load = 0.5 * (load_values[0] + load_values[1]) * (load_values[3] - load_values[2])
ldict.update({'DATA': load_data, 'AVE_LOAD': ave_load})
elif ltype == 'UNIFF':
load_value = try_numeric(d.get('FVAL'))
load_data = ((0, load_value), (1, load_value))
ave_load = load_value
ldict.update({'DATA': load_data, 'AVE_LOAD': ave_load})
elif ltype == 'POINTF': # This is for beams
load_data = [try_numeric(d.get(item)) for item in ('FVAL', 'RDIST')][::-1]
ldict.update({'DATA': tuple(load_data)})
elif ltype == 'FORCE': # This is for nodes
forces = ('FX', 'FY', 'FZ', 'MX', 'MY', 'MZ')
load_data = [try_numeric(d.get(item)) for item in forces]
ldict.update({'DATA': tuple(load_data)})
elif ltype == 'TEMP': # This is for lines and shells with uniform temperature load
temp_data = try_numeric(d.get('T',0))
ldict.update({'DATA': temp_data})
#return {key:[ldict]}
return [ldict]
def combo_func(the_dict, line):
line_key = line[0][0]
combo_name = line[0][1]
data_type = line[1][0] # e.g. 'LOAD', 'SPEC', 'COMBO', 'LOADCASE', 'DESIGN'
line_dict = dict(line[1:])
# if the line key is not already in the dictionary, add it
if not the_dict.get(line_key):
the_dict[line_key] = dict() # the_dict['COMBO']
#print(f'...adding {line_key} to COMBO_dict')
# make the line key the current reference
a_dict = the_dict[line_key] # a_dict is the_dict['COMBO']
# if the combination is not already in the dictionary, add it
if not a_dict.get(combo_name):
a_dict[combo_name] = dict()
#print(f'...adding {combo_name} to {line_key} to COMBO_dict')
# make the combination name the current reference
b_dict = a_dict[combo_name] # b_dict is the_dict['COMBO']['COMBO1']
# if the combination name is not already in the dictionary, add it
if data_type == 'TYPE': # add type to the combination dictionary
b_dict['TYPE'] = line_dict['TYPE']
elif data_type == 'DESIGN': # add type to the combination dictionary
# b_dict['DESIGN'] = line_dict['DESIGN']
b_dict.update({k:v for k,v in line_dict.items()})
else: # add the different load cases with their load factor for each datatype
#c_dict.get(data_type, []) + list(tuple([line_dict[data_type], line_dict['SF']]))
if not b_dict.get(data_type): # if there is no datatype 'SPEC'
b_dict[data_type] = [tuple([line_dict[data_type], line_dict['SF']])]
else: # add the new data to the existing
the_list = b_dict[data_type]
the_list.append(tuple([line_dict[data_type], line_dict['SF']]))
b_dict[data_type] = the_list
def add_to_dict_list(the_dict, key, value):
value_list = the_dict.get(key,[])
value_list.append(value)
the_dict[key] = value_list
def story_func(the_dict, line, debug=False):
"""
One of the challenges is that if a Tower has been defined
this needs to be carried over from any previous lines (it
is only defined once for each line and that then applies to
all following ones)
NB `current_tower` needs to be defined in the current namespace"""
# Keep a list of stories
if not the_dict.get('Story_Lists'):
the_dict['Story_Lists'] = dict()
line_key = line[0][0] # 'STORY'
story_basic_name = str(line[0][1])
story_type = line[1][0] # e.g. 'HEIGHT', 'ELEV'
line_dict = dict(line) # NB STORY is retained as a key-value | |
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2014 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""
Simple example that connects to the first Crazyflie found, logs the Stabilizer
and prints it to the console. After 10s the application disconnects and exits.
"""
from __future__ import division
import logging
import threading
import cflib.crtp # noqa
from drone_quaternion import Quadcopter
from cflib.crazyflie import Crazyflie
from cflib.crazyflie.log import LogConfig
# Only output errors from the logging framework
logging.basicConfig(level=logging.ERROR)
# Keypress
import pygame, sys, time
from pygame.locals import *
SCREEN_WIDTH = 400
SCREEN_HEIGHT = 310
FONT_SIZE = 30
from numpy import *
from math import *
stop = False
logger = False
crazyflie = False
positionIndex = 0
ONLINE_CONTROLLER = True
MANUAL_TRAJECTORY = False
# Variabili atterraggio
land = False
land_altitude = False # 10cm
land_altitude_step = 0.1 # 10cm
uri = 'radio://0/120/2M'
# uri = 'radio://0/40/250K'
x = 1.5
y = 1.4
z = 1.6
# x y z YAW
sequence = [(x, y, z, 0),
(1.5*x, y, z, 0),
(1.5*x, 0.5*y, z, 0),
(x, 0.5*y, z, 0),
(x, y, z, 0),
(x, y, 0.5, 0)
]
# Manual Trajectory
trajectoryStep = 0.10 # 10cm
manual = {'x': x, 'y': y, 'z': z}
positionLogger = False
quadcopter = Quadcopter(0.01)
def start_sequence(cf):
pygame.event.pump()
keys = pygame.key.get_pressed()
if keys[K_RETURN]:
print 'Sequence Started!'
cf.commander.send_setpoint(0, 0, 0, 0)
#for i in range(0, 500):
#cf.param.set_value('lqrCtrl.tM', '{}'.format(1.01))
#cf.param.set_value('lqrCtrl.rM', '{}'.format(-1))
#cf.param.set_value('lqrCtrl.pM', '{}'.format(-1))
# cf.param.set_value('flightmode.posSet', '1')
# cf.param.set_value('flightmode.poshold', '1')
return True
return False
def emergency_stop(cf):
pygame.event.pump()
keys = pygame.key.get_pressed()
if keys[K_ESCAPE]:
print 'Emergency Stop!'
cf.commander.send_setpoint(0, 0, 0, 0)
#for i in range(0, 500):
#cf.param.set_value('lqrCtrl.tM', '{}'.format(1.01))
#cf.param.set_value('lqrCtrl.rM', '{}'.format(-1))
#cf.param.set_value('lqrCtrl.pM', '{}'.format(-1))
# cf.param.set_value('flightmode.posSet', '1')
# cf.param.set_value('flightmode.poshold', '1')
return True
return False
def text_to_screen(screen, text, x, y, size=50, color=(200, 000, 000)):
try:
basicfont = pygame.font.SysFont(None, size)
text = basicfont.render(text, True, color, (255, 255, 255))
text_rect = text.get_rect(center=(SCREEN_WIDTH / 2, y+10))
screen.blit(text, text_rect)
pygame.display.update()
except Exception, e:
print 'Font Error, saw it coming'
raise e
class LoggingExample:
"""
Simple logging example class that logs the Stabilizer from a supplied
link uri and disconnects after 5s.
"""
def __init__(self, link_uri, maxLen):
# Attitude
self.q0 = 1.0
self.q1 = 0.0
self.q2 = 0.0
self.q3 = 0.0
self.wx = 0.0
self.wy = 0.0
self.wz = 0.0
# Linear
self.px = 0.0
self.py = 0.0
self.pz = 0.0
self.vx = 0.0
self.vy = 0.0
self.vz = 0.0
self.maxLen = maxLen
self.data = []
# Create a Crazyflie object without specifying any cache dirs
self._cf = Crazyflie()
# Initialize the low-level drivers (don't list the debug drivers)
cflib.crtp.init_drivers(enable_debug_driver=False)
# Scan for Crazyflies and use the first one found
print('Scanning interfaces for Crazyflies...')
available = cflib.crtp.scan_interfaces()
print('Crazyflies found:')
for i in available:
print i[0]
if len(available) <= 0:
print('No Crazyflies found, cannot run example')
# Connect some callbacks from the Crazyflie API
self._cf.connected.add_callback(self._connected)
self._cf.disconnected.add_callback(self._disconnected)
self._cf.connection_failed.add_callback(self._connection_failed)
self._cf.connection_lost.add_callback(self._connection_lost)
print('Connecting to %s' % link_uri)
# Try to connect to the Crazyflie
self._cf.open_link(link_uri)
# Variable used to keep main loop occupied until disconnect
self.is_connected = True
self.status = False
self.landing = False
def crazyflie(self):
return self._cf
def close_link(self):
self._cf.close_link()
def getData(self):
return self.data
def log_status(self):
return self.status
def state(self):
return self.q0, self.q1, self.q2, self.q3, self.wx, self.wy, self.wz, self.px, self.py, self.pz, self.vx, self.vy, self.vz
def getLanding(self):
return self.landing
def enable_landing(self):
self.landing = True
def _connected(self, link_uri):
""" This callback is called form the Crazyflie API when a Crazyflie
has been connected and the TOCs have been downloaded."""
print('Connected to %s' % link_uri)
self.log_config1 = LogConfig(name='StateAngular1', period_in_ms=10)
self.log_config1.add_variable('state.q0', 'float')
self.log_config1.add_variable('state.q1', 'float')
self.log_config1.add_variable('state.q2', 'float')
self.log_config1.add_variable('state.q3', 'float')
self.log_config2 = LogConfig(name='StateAngular2', period_in_ms=10)
self.log_config2.add_variable('state.wx', 'float')
self.log_config2.add_variable('state.wy', 'float')
self.log_config2.add_variable('state.wz', 'float')
self.log_config3 = LogConfig(name='StateLinear', period_in_ms=10)
#self.log_config3.add_variable('state.px', 'float')
#self.log_config3.add_variable('state.py', 'float')
#self.log_config3.add_variable('state.pz', 'float')
#self.log_config3.add_variable('state.vx', 'float')
#self.log_config3.add_variable('state.vy', 'float')
#self.log_config3.add_variable('state.vz', 'float')
#self.log_config3.add_variable('ctrltarget.px', 'float')
#self.log_config3.add_variable('ctrltarget.py', 'float')
#self.log_config3.add_variable('ctrltarget.pz', 'float')
#self.log_config3.add_variable('ctrltarget.vx', 'float')
#self.log_config3.add_variable('ctrltarget.vy', 'float')
#self.log_config3.add_variable('ctrltarget.vz', 'float')
#self.log_config3.add_variable('ctrltarget.ax', 'float')
#self.log_config3.add_variable('ctrltarget.ay', 'float')
#self.log_config3.add_variable('ctrltarget.az', 'float')
self.log_config3.add_variable('Bks.dx', 'float')
self.log_config3.add_variable('Bks.dy', 'float')
self.log_config3.add_variable('Bks.dz', 'float')
self.log_config3.add_variable('Bks.dwx', 'float')
self.log_config3.add_variable('Bks.dwy', 'float')
self.log_config3.add_variable('Bks.dwz', 'float')
# Adding the configuration cannot be done until a Crazyflie is
# connected, since we need to check that the variables we
# would like to log are in the TOC.
try:
self._cf.log.add_config(self.log_config1)
self._cf.log.add_config(self.log_config2)
self._cf.log.add_config(self.log_config3)
# This callback will receive the data
self.log_config1.data_received_cb.add_callback(self._log_data_quaternion)
self.log_config2.data_received_cb.add_callback(self._log_data_angular)
self.log_config3.data_received_cb.add_callback(self._log_data_linear)
# This callback will be called on errors
self.log_config1.error_cb.add_callback(self._log_error)
self.log_config2.error_cb.add_callback(self._log_error)
self.log_config3.error_cb.add_callback(self._log_error)
# Start the logging
if not ONLINE_CONTROLLER:
self.log_config1.start()
self.log_config2.start()
self.log_config3.start()
print "Log succesfully started!"
self.status = True
except KeyError as e:
print('Could not start log configuration, {} not found in TOC'.format(str(e)))
except AttributeError:
print('Could not add Stabilizer log config, bad configuration.')
def _log_error(self, logconf, msg):
"""Callback from the log API when an error occurs"""
print('Error when logging %s: %s' % (logconf.name, msg))
def _log_data_quaternion(self, timestamp, data, logconf):
"""Callback froma the log API when data arrives"""
#self.plot.drawNow(np.random.random(self.plot.X.shape))
self.q0 = data['state.q0']
self.q1 = data['state.q1']
self.q2 = data['state.q2']
self.q3 = data['state.q3']
def _log_data_angular(self, timestamp, data, logconf):
"""Callback froma the log API when data arrives"""
self.wx = data['state.wx']
self.wy = data['state.wy']
self.wz = data['state.wz']
def _log_data_linear(self, timestamp, data, logconf):
"""Callback froma the log API when data arrives"""
#self.px = data['state.px']
#self.py = data['state.py']
#self.pz = data['state.pz']
#self.vx = data['state.vx']
#self.vy = data['state.vy']
#self.vz = data['state.vz']
self.data = data
print data
def _connection_failed(self, link_uri, msg):
"""Callback when connection initial connection fails (i.e no Crazyflie
at the speficied address)"""
print('Connection to %s failed: %s' % (link_uri, msg))
self.is_connected = False
def _connection_lost(self, link_uri, msg):
"""Callback when disconnected after a connection has been made (i.e
Crazyflie moves out of range)"""
print('Connection to %s lost: %s' % (link_uri, msg))
self._cf.open_link(link_uri)
def _disconnected(self, link_uri):
"""Callback when the Crazyflie is disconnected (called in all cases)"""
print('Disconnected from %s' % link_uri)
self.is_connected = False
# Control main loop
def control(interval, function, iterations=0):
global stop
if not stop:
if iterations != 1:
threading.Timer(interval, control, [interval, function, 0 if iterations == 0 else iterations-1]).start()
function()
t = 0
X = False
Xd = 0
Xdd = 0
Y = False
Yd = 0
Ydd = 0
step = 0
def set_point():
global positionIndex
global sequence
global quadcopter
global crazyflie
global logger
global stop
global land
global land_altitude
global land_altitude_step
global ONLINE_CONTROLLER
global t
global X
global Xd
global Xdd
global Y
global Yd
global Ydd
global step
if not logger.getLanding():
if not stop:
position = sequence[0]
'''
if not X:
X = position[0]
if not Y:
Y = position[0]
if t >= 10:
step = step + 1
if step == 3:
step = 0
t = 0
dt = 0.0001
if step == 0:
#Traiettoria X
if X < position[0]*1.5:
X = X + dt
Xd = dt
Xdd = 0
else:
X = position[0] * 1.5
Xd = 0
# Traiettoria Y
if Y > position[1] * 3 / 4:
Y = Y - dt
Yd = -dt
Ydd = 0
else:
Y = position[1] * 3 / 4
Yd = 0
elif step == 1:
if X > position[0]:
X = X - dt
Xd = -dt
Xdd = 0
else:
X = position[0]
Xd = 0
if Y >= position[1] * 0.5:
Y = Y - dt
Yd = -dt
Ydd = 0
else:
Y = position[1] * 0.5
Yd = 0
else:
X = position[0]
Xd = 0
Y = Y + dt
Yd = dt
Ydd = | |
is None or existing_policy.window is None or
(existing_policy.window.dailyMaintenanceWindow is None and
existing_policy.window.recurringWindow is None)):
raise util.Error(NOTHING_TO_UPDATE_ERROR_MSG)
existing_policy.window.dailyMaintenanceWindow = None
existing_policy.window.recurringWindow = None
return self._SendMaintenancePolicyRequest(cluster_ref, existing_policy)
def _NormalizeMaintenanceExclusionsForPolicy(self, policy):
"""Given a maintenance policy (can be None), return a normalized form.
This makes it easier to add and remove blackouts because the blackouts
list will definitely exist.
Args:
policy: The policy to normalize.
Returns:
The modified policy (note: modifies in place, but there might not have
even been an existing policy).
"""
empty_excl = self.messages.MaintenanceWindow.MaintenanceExclusionsValue()
if policy is None:
policy = self.messages.MaintenancePolicy(
window=self.messages.MaintenanceWindow(
maintenanceExclusions=empty_excl))
elif policy.window is None:
# Shouldn't happen due to defaulting on the server, but easy enough to
# handle.
policy.window = self.messages.MaintenanceWindow(
maintenanceExclusions=empty_excl)
elif policy.window.maintenanceExclusions is None:
policy.window.maintenanceExclusions = empty_excl
return policy
def _GetMaintenanceExclusionNames(self, maintenance_policy):
"""Returns a list of maintenance exclusion names from the policy."""
return [
p.key for p in
maintenance_policy.window.maintenanceExclusions.additionalProperties
]
def AddMaintenanceExclusion(self, cluster_ref, existing_policy, window_name,
window_start, window_end):
"""Adds a maintenance exclusion to the cluster's maintenance policy.
Args:
cluster_ref: The cluster to update.
existing_policy: The existing maintenance policy, if any.
window_name: Unique name for the exclusion. Can be None (will be
autogenerated if so).
window_start: Start time of the window as a datetime.datetime. Can be
None.
window_end: End time of the window as a datetime.datetime.
Returns:
Operation from this cluster update.
Raises:
Error if a maintenance exclusion of that name already exists.
"""
existing_policy = self._NormalizeMaintenanceExclusionsForPolicy(
existing_policy)
if window_start is None:
window_start = times.Now(times.UTC)
if window_name is None:
# Collisions from this shouldn't be an issue because this has millisecond
# resolution.
window_name = 'generated-exclusion-' + times.Now(times.UTC).isoformat()
if window_name in self._GetMaintenanceExclusionNames(existing_policy):
raise util.Error(
'A maintenance exclusion named {0} already exists.'.format(
window_name))
# Note: we're using external/python/gcloud_deps/apitools/base/protorpclite
# which does *not* handle maps very nicely. We actually have a
# MaintenanceExclusionsValue field that has a repeated additionalProperties
# field that has key and value fields. See
# third_party/apis/container/v1alpha1/container_v1alpha1_messages.py.
exclusions = existing_policy.window.maintenanceExclusions
window = self.messages.TimeWindow(
startTime=window_start.isoformat(), endTime=window_end.isoformat())
exclusions.additionalProperties.append(
exclusions.AdditionalProperty(key=window_name, value=window))
return self._SendMaintenancePolicyRequest(cluster_ref, existing_policy)
def RemoveMaintenanceExclusion(self, cluster_ref, existing_policy,
exclusion_name):
"""Removes a maintenance exclusion from the maintenance policy by name."""
existing_policy = self._NormalizeMaintenanceExclusionsForPolicy(
existing_policy)
existing_exclusions = self._GetMaintenanceExclusionNames(existing_policy)
if exclusion_name not in existing_exclusions:
message = ('No maintenance exclusion with name {0} exists. Existing '
'exclusions: {1}.').format(exclusion_name,
', '.join(existing_exclusions))
raise util.Error(message)
props = []
for ex in existing_policy.window.maintenanceExclusions.additionalProperties:
if ex.key != exclusion_name:
props.append(ex)
existing_policy.window.maintenanceExclusions.additionalProperties = props
return self._SendMaintenancePolicyRequest(cluster_ref, existing_policy)
def ListUsableSubnets(self, project_ref, network_project, filter_arg):
"""List usable subnets for a given project.
Args:
project_ref: project where clusters will be created.
network_project: project ID where clusters will be created.
filter_arg: value of filter flag.
Returns:
Response containing the list of subnetworks and a next page token.
"""
filters = []
if network_project is not None:
filters.append('networkProjectId=' + network_project)
if filter_arg is not None:
filters.append(filter_arg)
filters = ' AND '.join(filters)
req = self.messages.ContainerProjectsAggregatedUsableSubnetworksListRequest(
# parent example: 'projects/abc'
parent=project_ref.RelativeName(),
# max pageSize accepted by GKE
pageSize=500,
filter=filters)
return self.client.projects_aggregated_usableSubnetworks.List(req)
class V1Adapter(APIAdapter):
"""APIAdapter for v1."""
class V1Beta1Adapter(V1Adapter):
"""APIAdapter for v1beta1."""
def CreateCluster(self, cluster_ref, options):
cluster = self.CreateClusterCommon(cluster_ref, options)
if options.addons:
# CloudRun is disabled by default.
if CLOUDRUN in options.addons:
if not options.enable_stackdriver_kubernetes:
raise util.Error(CLOUDRUN_STACKDRIVER_KUBERNETES_DISABLED_ERROR_MSG)
if INGRESS not in options.addons:
raise util.Error(CLOUDRUN_INGRESS_KUBERNETES_DISABLED_ERROR_MSG)
cluster.addonsConfig.cloudRunConfig = self.messages.CloudRunConfig(
disabled=False)
# CloudBuild is disabled by default.
if CLOUDBUILD in options.addons:
cluster.addonsConfig.cloudBuildConfig = self.messages.CloudBuildConfig(
enabled=True)
# Istio is disabled by default.
if ISTIO in options.addons:
istio_auth = self.messages.IstioConfig.AuthValueValuesEnum.AUTH_NONE
mtls = self.messages.IstioConfig.AuthValueValuesEnum.AUTH_MUTUAL_TLS
istio_config = options.istio_config
if istio_config is not None:
auth_config = istio_config.get('auth')
if auth_config is not None:
if auth_config == 'MTLS_STRICT':
istio_auth = mtls
cluster.addonsConfig.istioConfig = self.messages.IstioConfig(
disabled=False, auth=istio_auth)
if (options.enable_autoprovisioning is not None or
options.autoscaling_profile is not None):
cluster.autoscaling = self.CreateClusterAutoscalingCommon(
None, options, False)
if options.boot_disk_kms_key:
for pool in cluster.nodePools:
pool.config.bootDiskKmsKey = options.boot_disk_kms_key
if options.identity_namespace is not None:
cluster.workloadIdentityConfig = self.messages.WorkloadIdentityConfig(
identityNamespace=options.identity_namespace)
_AddReleaseChannelToCluster(cluster, options, self.messages)
req = self.messages.CreateClusterRequest(
parent=ProjectLocation(cluster_ref.projectId, cluster_ref.zone),
cluster=cluster)
operation = self.client.projects_locations_clusters.Create(req)
return self.ParseOperation(operation.name, cluster_ref.zone)
def UpdateCluster(self, cluster_ref, options):
update = self.UpdateClusterCommon(cluster_ref, options)
if options.identity_namespace:
update = self.messages.ClusterUpdate(
desiredWorkloadIdentityConfig=self.messages.WorkloadIdentityConfig(
identityNamespace=options.identity_namespace))
elif options.disable_workload_identity:
update = self.messages.ClusterUpdate(
desiredWorkloadIdentityConfig=self.messages.WorkloadIdentityConfig(
identityNamespace=''))
if options.enable_shielded_nodes is not None:
update = self.messages.ClusterUpdate(
desiredShieldedNodes=self.messages.ShieldedNodes(
enabled=options.enable_shielded_nodes))
if options.release_channel is not None:
update = self.messages.ClusterUpdate(
desiredReleaseChannel=_GetReleaseChannelForClusterUpdate(
options, self.messages))
if not update:
# if reached here, it's possible:
# - someone added update flags but not handled
# - none of the update flags specified from command line
# so raise an error with readable message like:
# Nothing to update
# to catch this error.
raise util.Error(NOTHING_TO_UPDATE_ERROR_MSG)
if options.disable_addons is not None:
if options.disable_addons.get(ISTIO) is not None:
istio_auth = self.messages.IstioConfig.AuthValueValuesEnum.AUTH_NONE
mtls = self.messages.IstioConfig.AuthValueValuesEnum.AUTH_MUTUAL_TLS
istio_config = options.istio_config
if istio_config is not None:
auth_config = istio_config.get('auth')
if auth_config is not None:
if auth_config == 'MTLS_STRICT':
istio_auth = mtls
update.desiredAddonsConfig.istioConfig = self.messages.IstioConfig(
disabled=options.disable_addons.get(ISTIO), auth=istio_auth)
if options.disable_addons.get(CLOUDRUN) is not None:
update.desiredAddonsConfig.cloudRunConfig = (
self.messages.CloudRunConfig(
disabled=options.disable_addons.get(CLOUDRUN)))
if options.disable_addons.get(APPLICATIONMANAGER) is not None:
update.desiredAddonsConfig.kalmConfig = (
self.messages.KalmConfig(
enabled=(not options.disable_addons.get(APPLICATIONMANAGER))))
if options.disable_addons.get(CLOUDBUILD) is not None:
update.desiredAddonsConfig.cloudBuildConfig = (
self.messages.CloudBuildConfig(
enabled=(not options.disable_addons.get(CLOUDBUILD))))
op = self.client.projects_locations_clusters.Update(
self.messages.UpdateClusterRequest(
name=ProjectLocationCluster(cluster_ref.projectId, cluster_ref.zone,
cluster_ref.clusterId),
update=update))
return self.ParseOperation(op.name, cluster_ref.zone)
def CreateClusterAutoscalingCommon(self, cluster_ref, options, for_update):
"""Create cluster's autoscaling configuration.
Args:
cluster_ref: Cluster reference.
options: Either CreateClusterOptions or UpdateClusterOptions.
for_update: Is function executed for update operation.
Returns:
Cluster's autoscaling configuration.
"""
# Patch cluster autoscaling if cluster_ref is provided.
autoscaling = self.messages.ClusterAutoscaling()
cluster = self.GetCluster(cluster_ref) if cluster_ref else None
if cluster and cluster.autoscaling:
autoscaling.enableNodeAutoprovisioning = \
cluster.autoscaling.enableNodeAutoprovisioning
resource_limits = []
if options.autoprovisioning_config_file is not None:
# Create using config file only.
config = yaml.load(options.autoprovisioning_config_file)
resource_limits = config.get(RESOURCE_LIMITS)
service_account = config.get(SERVICE_ACCOUNT)
scopes = config.get(SCOPES)
max_surge_upgrade = None
max_unavailable_upgrade = None
upgrade_settings = config.get(UPGRADE_SETTINGS)
if upgrade_settings:
max_surge_upgrade = upgrade_settings.get(MAX_SURGE_UPGRADE)
max_unavailable_upgrade = upgrade_settings.get(MAX_UNAVAILABLE_UPGRADE)
management_settings = config.get(NODE_MANAGEMENT)
enable_autoupgrade = None
enable_autorepair = None
if management_settings:
enable_autoupgrade = management_settings.get(ENABLE_AUTO_UPGRADE)
enable_autorepair = management_settings.get(ENABLE_AUTO_REPAIR)
autoprovisioning_locations = \
config.get(AUTOPROVISIONING_LOCATIONS)
else:
resource_limits = self.ResourceLimitsFromFlags(options)
service_account = options.autoprovisioning_service_account
scopes = options.autoprovisioning_scopes
autoprovisioning_locations = options.autoprovisioning_locations
max_surge_upgrade = options.autoprovisioning_max_surge_upgrade
max_unavailable_upgrade = options.autoprovisioning_max_unavailable_upgrade
enable_autoupgrade = options.enable_autoprovisioning_autoupgrade
enable_autorepair = options.enable_autoprovisioning_autorepair
if options.enable_autoprovisioning is not None:
autoscaling.enableNodeAutoprovisioning = options.enable_autoprovisioning
autoscaling.resourceLimits = resource_limits or []
if scopes is None:
scopes = []
management = None
upgrade_settings = None
if max_surge_upgrade is not None or max_unavailable_upgrade is not None:
upgrade_settings = self.messages.UpgradeSettings()
upgrade_settings.maxUnavailable = max_unavailable_upgrade
upgrade_settings.maxSurge = max_surge_upgrade
if enable_autorepair is not None or enable_autoupgrade is not None:
management = (self.messages.NodeManagement(
autoUpgrade=enable_autoupgrade, autoRepair=enable_autorepair))
autoscaling.autoprovisioningNodePoolDefaults = self.messages \
.AutoprovisioningNodePoolDefaults(serviceAccount=service_account,
oauthScopes=scopes,
upgradeSettings=upgrade_settings,
management=management)
if autoprovisioning_locations:
autoscaling.autoprovisioningLocations = \
sorted(autoprovisioning_locations)
if options.autoscaling_profile is not None:
autoscaling.autoscalingProfile = \
self.CreateAutoscalingProfileCommon(options)
self.ValidateClusterAutoscaling(autoscaling, for_update)
return autoscaling
def CreateAutoscalingProfileCommon(self, options):
"""Create and validate cluster's autoscaling profile configuration.
Args:
options: Either CreateClusterOptions or UpdateClusterOptions.
Returns:
Cluster's autoscaling profile configuration.
"""
profiles_enum = \
self.messages.ClusterAutoscaling.AutoscalingProfileValueValuesEnum
valid_choices = [arg_utils.EnumNameToChoice(n)
for n in profiles_enum.names()
if n != 'profile-unspecified']
return arg_utils.ChoiceToEnum(
choice=arg_utils.EnumNameToChoice(options.autoscaling_profile),
enum_type=profiles_enum,
valid_choices=valid_choices)
def ValidateClusterAutoscaling(self, autoscaling, for_update):
"""Validate cluster autoscaling configuration.
Args:
autoscaling: autoscaling configuration to be validated.
for_update: Is function executed for update operation.
Raises:
Error if the new configuration is invalid.
"""
if autoscaling.enableNodeAutoprovisioning:
if not for_update or autoscaling.resourceLimits:
cpu_found = any(
limit.resourceType == 'cpu' for limit in autoscaling.resourceLimits)
mem_found = any(limit.resourceType == 'memory'
for limit in autoscaling.resourceLimits)
if not cpu_found or not mem_found:
raise util.Error(NO_AUTOPROVISIONING_LIMITS_ERROR_MSG)
defaults = autoscaling.autoprovisioningNodePoolDefaults
if defaults:
if defaults.upgradeSettings:
max_surge_found = defaults.upgradeSettings.maxSurge is not None
max_unavailable_found = defaults.upgradeSettings.maxUnavailable is not None
if max_unavailable_found != max_surge_found:
raise util.Error(BOTH_AUTOPROVISIONING_UPGRADE_SETTINGS_ERROR_MSG)
if defaults.management:
auto_upgrade_found = defaults.management.autoUpgrade is not None
auto_repair_found = defaults.management.autoRepair is not None
if auto_repair_found != auto_upgrade_found:
raise util.Error(
BOTH_AUTOPROVISIONING_MANAGEMENT_SETTINGS_ERROR_MSG)
elif autoscaling.resourceLimits:
raise util.Error(LIMITS_WITHOUT_AUTOPROVISIONING_MSG)
elif autoscaling.autoprovisioningNodePoolDefaults and \
(autoscaling.autoprovisioningNodePoolDefaults.serviceAccount or
autoscaling.autoprovisioningNodePoolDefaults.oauthScopes or
autoscaling.autoprovisioningNodePoolDefaults.management or
autoscaling.autoprovisioningNodePoolDefaults.upgradeSettings):
raise util.Error(DEFAULTS_WITHOUT_AUTOPROVISIONING_MSG)
def UpdateNodePoolRequest(self, node_pool_ref, options):
"""Creates an UpdateNodePoolRequest from the provided options.
Arguments:
node_pool_ref: The node pool to act on.
options: UpdateNodePoolOptions with the user-specified options.
Returns:
An UpdateNodePoolRequest.
"""
update_request = self.messages.UpdateNodePoolRequest(
name=ProjectLocationClusterNodePool(
node_pool_ref.projectId,
node_pool_ref.zone,
node_pool_ref.clusterId,
node_pool_ref.nodePoolId,
))
if options.workload_metadata_from_node is not None:
_AddWorkloadMetadataToNodeConfig(update_request, options, self.messages)
elif options.node_locations is not None:
update_request.locations = sorted(options.node_locations)
elif (options.max_surge_upgrade is not None or
options.max_unavailable_upgrade is not None):
update_request.upgradeSettings = self.UpdateUpgradeSettings(
node_pool_ref, options)
return update_request
def UpdateNodePool(self, node_pool_ref, options):
| |
<reponame>100kimch/ros_galapagos
#!/usr/bin/env python3
# from lib_line_tracing # ! Deprecated
# from lib_signal_recognition # ! Deprecated
# NOTE: using rospy library unrecommended in processor.py
import rospy
# NOTE: python 3.5^ needed to use asyncio
import asyncio
from lib_frontcam import *
from lib_fishcam import *
from lib_lidar import *
from lib_eye import *
from lib_parking import *
from turtlebot import TURTLE
from constants import IS_DEBUG_MODE, SELECTED_STATE
# NOTE: to check time:
import timeit
import math
# * Variables
################ Variables by minsoo #######################
CURRENT_STATE = 'inter_curving'
#CURRENT_STATE = 'intersection'
MOVING_POSITION = False
SEEN_PARKING_SIGN = False
SEEN_TUNNEL_SIGN = False
IS_IN_TUNNEL = False
#
# HAS_OBJECT_IN_50 = False
# HAS_OBJECT_IN_20 = False
straight_cnt = 0
curving_cnt = 0
LIDAR_FLAG = False
############################################################
GALAPAGOS_STATE = "view"
'''
if SELECTED_STATE == '':
CURRENT_STATE = 'traffic_light'
else:
CURRENT_STATE = SELECTED_STATE
'''
LINE_BASE = 'both'
MOVING_POSITION = False # ! Deprecated
DIRECTION = 'left'
SEEN_LEFT_SIGN = False
SEEN_PARKING_SIGN = False # ! Deprecated
SEEN_TUNNEL_SIGN = False # ! Deprecated
IS_IN_TUNNEL = False
SEEN_STOPPING_SIGN = False
SIGN_CORNER = None
# DISTANCE_FRONT = 0.00 # ! Deprecated
# HAS_OBJECT_IN_50 = False
# HAS_OBJECT_IN_20 = False
HAS_BOTH_LINES = False
IS_TURNING = False
TRACKING = "fish"
TURNING_TO = False
TEST_ANGULAR = 0
TEST_ONCE = True
# STATE_CONSTRUCTION = "start"
STATE_CONSTRUCTION = "searching"
STATE_TUNNEL = "inside"
TURN_COUNT = 0
# * Methods
def initialize():
EYE.calibrate()
TURTLE.set_speed('normal')
def reset_front_image_flags():
global LINE_BASE
global MOVING_POSITION
global SEEN_PARKING_SIGN
global SEEN_TUNNEL_SIGN
global SEEN_STOPPING_SIGN
LINE_BASE = 'both'
MOVING_POSITION = False
SEEN_PARKING_SIGN = False
SEEN_TUNNEL_SIGN = False
SEEN_STOPPING_SIGN = False
def reverse_direction():
global DIRECTION
if DIRECTION == 'right':
DIRECTION = 'left'
else:
DIRECTION = 'right'
rospy.loginfo('\n[PROC] direction changed to ' + DIRECTION)
def track_front(event=None):
global TRACKING
EYE.reset_state()
TRACKING = "front"
# rospy.loginfo("\n[PROC] tracking changed to " + TRACKING)
return
def track_fish(event=None):
global TRACKING
TRACKING = "fish"
# rospy.loginfo("\n[PROC] tracking changed to " + TRACKING)
return
def process_frontcam(image):
""" process the image of raspicam """
global CURRENT_STATE
global MOVING_POSITION
global SEEN_PARKING_SIGN
global SEEN_TUNNEL_SIGN
global LIDAR_FLAG
raw_data = np.fromstring(image.data, np.uint8)
cv_img = cv2.imdecode(raw_data, cv2.IMREAD_COLOR)
#### ROI SETTING ######
blob_ROI = cv_img[100:, :]
#######################
if CURRENT_STATE == 'traffic_light':
if is_light_green(cv_img):
TURTLE.enable()
#TURTLE.set_speed('fast')
print("detected green")
CURRENT_STATE = 'intersection'
#TURTLE.set_weight(0.8)
return
else:
print("no green")
TURTLE.enable()
#TURTLE.set_speed('fast')
print("detected green")
CURRENT_STATE = 'intersection'
#TURTLE.set_weight(0.8)
return
if CURRENT_STATE == 'intersection':
cv2.imshow("blob_ROI",blob_ROI)
# cv2.waitKey(1)
print("intersection state")
if is_intersection(cv_img):
TURTLE.set_weight(0.8)
CURRENT_STATE = 'left_or_right'
print("intersection detected!!")
return
else:
return
if CURRENT_STATE == 'left_or_right':
print("left or right state")
cv2.imshow("blob_ROI",blob_ROI)
# cv2.waitKey(1)
tmp_state = check_left_right_sign(blob_ROI)
print("tmp state: ",tmp_state)
if tmp_state == 'right':
#print("11tmp state: ",tmp_state, ", right cnt: ",inter_right_cnt)
TURTLE.LINE_BASE = 2
#print("11tmp state: ",tmp_state, ", right cnt: ",inter_right_cnt)
CURRENT_STATE = 'inter_curving'
TURTLE.set_weight(1.0)
elif tmp_state == 'left':
#print("11tmp state: ",tmp_state, ", left cnt: ",inter_left_cnt)
TURTLE.LINE_BASE = 1
#print("22tmp state: ",tmp_state, ", left cnt: ",inter_left_cnt)
CURRENT_STATE = 'inter_curving'
TURTLE.set_weight(1.0)
elif tmp_state == 'none':
return
if CURRENT_STATE == 'inter_curving':
print("#################################################")
print("########### inter_curving state #################")
print("#################################################")
global straight_cnt
if abs(TURTLE.weight*TURTLE._angular) < 0.1:
straight_cnt += 1
print("straight counting : ",straight_cnt," is counted")
if straight_cnt > 5:
straight_cnt = 0
TURTLE.LINE_BASE = 2
CURRENT_STATE = 'construction'
return
else:
return
else:
straight_cnt = 0
return
if CURRENT_STATE == 'construct_recog':
tmp_state = is_construction(blob_ROI)
print(tmp_state)
if tmp_state is True:
TURTLE.LINE_BASE = 2
CURRENT_STATE = 'construction'
#LIDAR_FLAG = True
else:
return
if CURRENT_STATE == 'construction':
return
# if CURRENT_STATE == 'construction':
# '''
# task for Ji-hyung
# '''
# TURTLE.LINE_BASE = 1
# CURRENT_STATE = 'parking'
# pass
'''
if CURRENT_STATE == 'stop_sign':
if stop_sign_flag == False:
sign_stop = is_stopping_sign(image)
if sign_stop == True:
stop_sign_flag = True
return
else:
return
else:
sign_stop = is_stopping_sign(image)
if sign_stop == False:
stop_false_cnt = stop_false_cnt + 1
if stop_false_cnt > 6 :
CURRENT_STATE = 'construction'
return
else:
return
else:
stop_false_cnt = 0
return
# if CURRENT_STATE == 'construction':
# TURTLE.set_speed("slow")
# if HAS_OBJECT_IN_20:
TURTLE.turn(MOVING_POSITION, 15, 3)
if MOVING_POSITION == 'left':
MOVING_POSITION = 'right'
else:
MOVING_POSITION = 'left'
return
else:
if has_crossing_line(image):
if MOVING_POSITION == 'left':
moving_to = 'right'
else:
moving_to = 'left'
TURTLE.turn(moving_to, 15, 3)
else:
# TODO: combine trace_line() + trace_blocking()
if get_num_of_lines(image) == 2:
CURRENT_STATE = 'normal'
else:
return
if CURRENT_STATE == 'parking':
# TODO: finish code of parking state
TURTLE.set_speed('normal')
return
if CURRENT_STATE == 'blocking_bar':
# TODO: finish code of blocking_bar state
TURTLE.set_speed('normal')
return
if CURRENT_STATE == 'tunnel':
# TODO: finish code of tunnel state
TURTLE.set_speed('normal')
return
# ending the normal state:
if CURRENT_STATE == 'normal':
reset_front_image_flags()
TURTLE.set_speed('fast')
return
'''
'''
def process_frontcam(image):
""" process the image of raspicam """
global CURRENT_STATE
global MOVING_POSITION
global SEEN_PARKING_SIGN
global SEEN_TUNNEL_SIGN
global LINE_BASE
global SEEN_STOPPING_SIGN
if CURRENT_STATE == 'traffic_light':
if is_light_green(image):
if IS_DEBUG_MODE == True:
TURTLE.set_speed('fast')
rospy.loginfo('\n[PROC] Current state: normal')
CURRENT_STATE = 'normal'
else:
rospy.logdebug('Debug mode finished')
raise rospy.ROSInterruptException
else:
return
if CURRENT_STATE == 'normal':
LINE_BASE = 'both'
sign = check_sign(image)
if sign == 'intersection':
TURTLE.set_speed('normal')
rospy.loginfo('\n[PROC] Current state: intersection')
CURRENT_STATE = 'intersection'
elif sign == 'construction':
TURTLE.set_speed('slow')
rospy.loginfo('\n[PROC] Current state: construction')
DIRECTION = 'right'
LINE_BASE = 'left'
CURRENT_STATE = 'construction'
elif sign == 'parking':
# SEEN_PARKING_SIGN = True # ! Deprecated
TURTLE.set_speed('normal')
rospy.loginfo('\n[PROC] Current state: parking')
CURRENT_STATE = 'parking'
# return
elif HAS_OBJECT_IN_50:
TURTLE.set_speed('slow')
rospy.loginfo('\n[PROC] Current state: blocking_bar')
CURRENT_STATE = 'blocking_bar'
elif sign == 'tunnel':
# SEEN_TUNNEL_SIGN = True # ! Deprecated
TURTLE.set_speed('normal')
rospy.loginfo('\n[PROC] Current state: tunnel')
TURTLE.turn_by_degree(45, 0) # TODO: support integer deg value
# return
elif is_straight_in(10, image):
if is_straight_in(50, image):
TURTLE.increase_speed()
else:
TURTLE.decrease_speed()
return
else:
TURTLE.set_speed('normal')
return
if CURRENT_STATE == 'intersection':
sign_corner = check_sign(image)
if sign_corner == None:
if SEEN_STOPPING_SIGN:
TURTLE
if sign_corner == None:
LINE_BASE = 'both'
else:
if sign_corner == 'left':
LINE_BASE = 'left'
elif sign_corner == 'right':
LINE_BASE = 'right'
if is_stopping_sign(image):
SEEN_STOPPING_SIGN = True
else:
if SEEN_STOPPING_SIGN:
CURRENT_STATE = 'normal'
else:
return
# if CURRENT_STATE == 'construction':
# TURTLE.set_speed('slow')
# if HAS_OBJECT_IN_20:
TURTLE.turn(MOVING_POSITION, 15, 3)
if MOVING_POSITION == 'left':
MOVING_POSITION = 'right'
else:
MOVING_POSITION = 'left'
return
else:
if has_crossing_line(image):
if MOVING_POSITION == 'left':
moving_to = 'right'
else:
moving_to = 'left'
TURTLE.turn(moving_to, 15, 3)
else:
# TODO: combine trace_line() + trace_blocking()
if get_num_of_lines(image) == 2:
CURRENT_STATE = 'normal'
else:
return
if CURRENT_STATE == 'parking':
# TODO: finish code of parking state
TURTLE.set_speed('normal')
return
if CURRENT_STATE == 'blocking_bar':
# TODO: finish code of blocking_bar state
TURTLE.set_speed('normal')
return
if CURRENT_STATE == 'tunnel':
# TODO: finish code of tunnel state
TURTLE.set_speed('normal')
return
# ending the normal state:
if CURRENT_STATE == 'normal':
if IS_DEBUG_MODE == True:
rospy.logdebug('Debug mode finished')
raise rospy.ROSInterruptException
else:
rospy.loginfo('\n[PROC] Current state: normal')
reset_front_image_flags()
TURTLE.set_speed('fast')
return
'''
def idle(event=None):
""" idle process """
global TRACKING
rospy.loginfo("\n[PROC] idle executed")
TRACKING = "idle"
# TURTLE.disable()
rospy.spin()
# while True:
# rospy.loginfo('tracking:' + TRACKING)
return
def process_fishcam(image):
""" trace side lines by base
if base is 'left: trace left line
if base is 'both': trace both lines
if base is 'right': trace right lne
return value is not needed.
"""
if TURTLE._enable_running is False:
return
if TURTLE.enable_fish is False:
return
if TURTLE.LINE_BASE == 3:
trace_line(image)
elif TURTLE.LINE_BASE == 1 or TURTLE.LINE_BASE == 2:
# rospy.loginfo("[LINE] trace_one_line(" + str(LINE_BASE) + ")")
trace_one_line(image, TURTLE.LINE_BASE)
# use TURTLE.set_
# use TURTLE.set_angular(angular)
return
'''
def process_fishcam(image):
""" process the fisheye lens image """
start = timeit.default_timer()
global LINE_BASE
global TRACKING
global TURNING_TO
global TEST_ANGULAR
global TEST_ONCE
if TRACKING is not "fish":
return
if not EYE.is_fish_occupied():
# if True:
info = EYE.see_bottom(image)
# rospy.Timer(rospy.Duration(0.04), EYE.release_fish_occupied, oneshot=True, reset=True)
if info is None:
print("NO INFO!")
# TURTLE.set_angular_smooth(0.12)
# pass
else:
# rospy.loginfo("\n[PROC] info: " + str(info))
# TURTLE.set_speed('slow')
# if info["slope"]:
# TURTLE.set_speed_by_percentage(-abs(info["slope"] / 6))
# else:
TURTLE.set_speed('normal')
if info["right"] < 640:
rospy.loginfo("\n[PROC] info: " + str(info))
TURTLE.set_angular(0.75 + abs(info["slope"]) * 1.8)
TURTLE.set_angular_smooth(-0.1)
else:
if TEST_ANGULAR is not 1:
if info["left"] is 0:
if TEST_ONCE:
TURTLE.set_angular(0.12)
TURTLE.set_angular_smooth(0.05)
else:
TURTLE.set_angular(0.12)
TURTLE.set_angular_smooth(0.05)
TEST_ONCE = False
# elif info["left"] < 7 and info["left"] > 0:
# TURTLE.set_angular_smooth(0.1)
TEST_ANGULAR = 1
if info["left"] > 0 and info["left"] <= 10:
TURTLE.set_angular(0)
TEST_ANGULAR = 0
# if TEST_ANGULAR is not -1:
if info["left"] > 10:
TURTLE.set_angular(-0.75 + -abs(info["slope"]) * 1.8)
TURTLE.set_angular_smooth(-0.1)
TEST_ANGULAR = -1
rospy.Timer(rospy.Duration(0.05), EYE.release_fish_occupied, oneshot=True, reset=True)
# EYE.release_fish_occupied()
end = timeit.default_timer()
print("l: {:d}".format(info["left"]) + " s: {:.01f}".format(info["slope"])
+ " time: {:.02f}".format(end - start))
# print(end - start)
# print("turning to: " + str(TURNING_TO))
# if TURNING_TO:
# TURTLE.turn(TURNING_TO, 2.3)
# TURNING_TO = None
# rospy.Timer(rospy.Duration(2.3), track_front, | |
at the moment because it is optimized for power!'
# arm extension during toss
if user.get_average_data('shoulder2wrist_left')[1] < (playerleft_arm.get_average_data()[1] * 0.80):
arm_tip_load_left_first = 'Non-dominant arm extention throughout the toss is significantly inconsistent. Try to keep your left arm a lot straighter on the takeback and load.'
arm_tip_load_left_second = 'the extension of your non-dominant arm throughout the toss is significantly inconsistent. Try to keep your left arm a lot straighter on the takeback and load.'
score += 7
elif user.get_average_data('shoulder2wrist_left')[1] < (playerleft_arm.get_average_data()[1] * 0.90):
arm_tip_load_left_first = 'Non-dominant arm extention throughout the toss is inconsistent. Try to keep your left arm straighter on the takeback and load.'
arm_tip_load_left_second = 'the extension of your non-dominant arm throughout the toss is inconsistent. Try to keep your left arm a lot straighter on the takeback and load.'
score += 4
elif user.get_average_data('shoulder2wrist_left')[1] < (playerleft_arm.get_average_data()[1] * 0.95):
arm_tip_load_left_first = 'Non-dominant arm extention throughout the toss is slightly inconsistent. Try to keep your left arm a bit straighter on the takeback and load.'
arm_tip_load_left_second = 'the extension of your non-dominant arm throughout the toss is slightly inconsistent. Try to keep your left arm a lot straighter on the takeback and load.'
score += 2
else:
arm_tip_load_left_first = 'Your tossing arm looks very fluid and consistent!'
arm_tip_load_left_second = 'Great work on the tossing arm in the load! it looks very fluid and consistent'
pick_from_this_right = [arm_tip_load_right_first, arm_tip_load_right_second]
pick_from_this_left = [arm_tip_load_left_first, arm_tip_load_left_second]
arm_tip_load_right = random.choice(pick_from_this_right)
arm_tip_load_left = random.choice(pick_from_this_left)
arm_tips_load = [arm_tip_load_right, arm_tip_load_left, score]
return arm_tips_load
def arms_tips_extend(user, playerright_arm):
arm_tip_extend_right_first = 'None'
arm_tip_extend_right_second = 'None'
score = 0
if user.get_max_data('shoulder2wrist_right')[2] < (playerright_arm.get_max_data()[2] * 0.80):
arm_tip_extend_right_first = 'Dominant arm is significantly not extending enough during contact. Make sure to either toss the ball a lot higher and/or make contact at its apex.'
arm_tip_extend_right_second = 'When you make contact at the ball flights apex, make sure that your arm is extended out signficantly more.'
score += 7
elif user.get_max_data('shoulder2wrist_right')[2] < (playerright_arm.get_max_data()[2] * 0.90):
arm_tip_extend_right_first = 'Dominant arm is not extending enough during contact. Make sure to either toss the ball higher and/or make contact at its apex.'
arm_tip_extend_right_second = 'When you make contact at the ball flights apex, make sure that your arm is extended out more.'
score += 4
elif user.get_max_data('shoulder2wrist_right')[2] < (playerright_arm.get_max_data()[2] * 0.95):
arm_tip_extend_right_first = 'Dominant arm is slightly not extending enough during contact. Make sure to either toss the ball a bit higher and/or make contact at its apex.'
arm_tip_extend_right_second = 'When you make contact at the ball flights apex, make sure that your arm is extended out slightly more.'
score += 2
else:
arm_tip_extend_right_first = 'Good job, your dominant arm is optimally extended on contact!'
arm_tip_extend_right_second = 'Nice, your dominant arm is extended the perfect amout!'
pick_from_this_right = [arm_tip_extend_right_first, arm_tip_extend_right_second]
arm_tip_extend_right = random.choice(pick_from_this_right)
arm_tips_extend = [arm_tip_extend_right, score]
return arm_tips_extend
def arms_tips_finish(user, playerleft_arm):
arm_tip_finish_left_first = 'None'
arm_tip_finish_left_second = 'None'
score = 0
if user.get_min_data('shoulder2wrist_left')[3] > (playerleft_arm.get_min_data()[3] * 1.20):
arm_tip_finish_left_first = 'Your non-dominant arm should be significantly closer to your body in preparation for an easier recovery of the racquet.'
arm_tip_finish_left_second = 'Recovery is much easier if your non-dominant arm is closer to your body. Try brining your non-dominant arm significantly closer to your body on the finish.'
score += 7
elif user.get_min_data('shoulder2wrist_left')[3] > (playerleft_arm.get_min_data()[3] * 1.10):
arm_tip_finish_left_first = 'Your non-dominant arm should be closer to your body in preparation for an easier recovery of the racquet.'
arm_tip_finish_left_second = 'Recovery is much easier if your non-dominant arm is closer to your body. Try brining your non-dominant arm closer to your body on the finish.'
score += 4
elif user.get_min_data('shoulder2wrist_left')[3] > (playerleft_arm.get_min_data()[3] * 1.05):
arm_tip_finish_left_first = 'Your non-dominant arm should be slightly closer to your body in preparation for an easier recovery of the racquet.'
arm_tip_finish_left_second = 'Recovery is much easier if your non-dominant arm is closer to your body. Try brining your non-dominant arm slightly closer to your body on the finish.'
score += 2
else:
arm_tip_finish_left_first = 'Nice, your arms seem to be positioned correctly on the finish! Racquet recovery is much easier with optimal arm placement.'
arm_tip_finish_left_second = 'Good job, your arms seem to be positioned correctly on the finish! Racquet recovery is much easier with optimal arm placement.'
pick_from_this_left = [arm_tip_finish_left_first, arm_tip_finish_left_second]
arm_tip_finish_left = random.choice(pick_from_this_left)
arm_tips_finish = [arm_tip_finish_left, score]
return arm_tips_finish
def arm_tip_summary(user, playerright_arm, playerleft_arm):
full_arm_list = []
arm_start = arms_tips_start(user, playerright_arm, playerleft_arm)
arm_load = arms_tips_load(user, playerright_arm, playerleft_arm)
arm_extend = arms_tips_extend(user, playerright_arm)
arm_finish = arms_tips_finish(user, playerleft_arm)
arm_tip_list = [arm_start, arm_load, arm_extend, arm_finish]
for i in arm_tip_list:
for j in i:
if type(j) != int:
full_arm_list.append(j)
return full_arm_list
def arm_score_quant(user, playerright_arm, playerleft_arm):
arm_start = arms_tips_start(user, playerright_arm, playerleft_arm)
arm_load = arms_tips_load(user, playerright_arm, playerleft_arm)
arm_extend = arms_tips_extend(user, playerright_arm)
arm_finish = arms_tips_finish(user, playerleft_arm)
arm_tip_list = [arm_start, arm_load, arm_extend, arm_finish]
score = []
for i in arm_tip_list:
score.append(i[-1])
return score
# body
def body_tips_start(user, playerright_body):
body_tip_start_first = 'None'
body_tip_start_second = 'None'
score_lost = 0
# less than the pro angles
if user.get_min_data('elbow2hip_right')[0] < (playerright_body.get_min_data()[0] * 0.80):
body_tip_start_first = 'Your dominant arm is hanging significantly too low on the starting position. Try raising your are by a large amount.'
body_tip_start_second = 'Raise your dominant arm by a significant amount because it is hanging too low on the starting position'
score_lost += 7
elif user.get_min_data('elbow2hip_right')[0] < (playerright_body.get_min_data()[0] * 0.90):
body_tip_start_first = 'Your dominant arm is hanging too low on the starting position. Try raising your amount.'
body_tip_start_second = 'Raise your dominant arm because it is hanging too low on the starting position'
score_lost += 4
elif user.get_min_data('elbow2hip_right')[0] < (playerright_body.get_min_data()[0] * 0.95):
body_tip_start_first = 'Your dominant arm is hanging slightly too low on the starting position. Try raising your are by a small amount.'
body_tip_start_second = 'Raise your dominant arm by a slight amount because it is hanging too low on the starting position'
score_lost += 2
# grater than the pro angles
elif user.get_min_data('elbow2hip_right')[0] > (playerright_body.get_max_data()[0] * 1.20):
body_tip_start_first = 'Your dominant arm is raised significantly too high on the starting position. Try lowering your are by a large amount.'
body_tip_start_second = 'Lower your dominant arm by a large amount because it is hanging significantly too high on the starting position'
score_lost += 7
elif user.get_min_data('elbow2hip_right')[0] > (playerright_body.get_max_data()[0] * 1.10):
body_tip_start_first = 'Your dominant arm is raised too high on the starting position. Try lowering your amount.'
body_tip_start_second = 'Lower your dominant arm because it is hanging too high on the starting position'
score_lost += 4
elif user.get_min_data('elbow2hip_right')[0] > (playerright_body.get_max_data()[0] * 1.05):
body_tip_start_first = 'Your dominant arm is raised slightly too high on the starting position. Try lowering your are by a small amount.'
body_tip_start_second = 'Lower your dominant arm by a small amount because it is hanging slightly too high on the starting position'
score_lost += 2
else:
body_tip_start_first = 'Your upper arms are the perfect distance from your body!'
body_tip_start_second = 'Nice job, Your upper arms are the perfect distance from your body!'
pick_from_this_body = [body_tip_start_first, body_tip_start_second]
body_tip_start = random.choice(pick_from_this_body)
return [body_tip_start, score_lost]
def body_tips_load(user, playerleft_body):
score_lost = 0
body_tip_load_left_first = 'None'
body_tip_load_left_second = 'None'
if user.get_max_data('elbow2hip_left')[1] < (playerleft_body.get_max_data()[1] * 0.80):
body_tip_load_left_first = 'The tossing side of your body is significantly under stretching during the load. Try to reach up with your tossing arm a lot more.'
body_tip_load_left_second = 'Make sure to reach up with your tossing arm a lot more during the load because your tossing side of the body is significantly under stretching.'
score_lost += 7
elif user.get_min_data('elbow2hip_left')[1] < (playerleft_body.get_min_data()[1] * 0.90):
body_tip_load_left_first = 'The tossing side of your body is under stretching during the load. Try to reach up with your tossing arm more.'
body_tip_load_left_second = 'Make sure to reach up with your tossing arm more during the load because your tossing side of the body is under stretching.'
score_lost += 4
elif user.get_min_data('elbow2hip_left')[1] < (playerleft_body.get_min_data()[1] * 0.95):
body_tip_load_left_first = 'The tossing side of your | |
<filename>tools/utils/losses.py
"""
source code:
x
https://github.com/chtaal/pystoi
"""
import tensorflow.keras.backend as K
import numpy as np
import tensorflow as tf
import functools
from TrialsOfNeuralVocalRecon.tools.utils.OBM import OBM as oooooo
import TrialsOfNeuralVocalRecon.tools.utils.pmsqe as pmsqe
import TrialsOfNeuralVocalRecon.tools.utils.perceptual_constants as perceptual_constants
tf.keras.backend.set_floatx('float32')
if tf.__version__[:2] == '1.':
window_fn = functools.partial(tf.signal.hann_window, periodic=True)
tf_signal = tf.signal
tf_log = tf.log
elif tf.__version__[:2] == '2.':
window_fn = functools.partial(tf.signal.hann_window, periodic=True)
tf_signal = tf.signal
tf_log = tf.math.log
else:
raise NotImplementedError
def thirdoct(fs, nfft, num_bands, min_freq):
""" Returns the 1/3 octave band matrix and its center frequencies
# Arguments :
fs : sampling rate
nfft : FFT size
num_bands : number of 1/3 octave bands
min_freq : center frequency of the lowest 1/3 octave band
# Returns :
obm : Octave Band Matrix
cf : center frequencies
"""
f = np.linspace(0, fs, nfft + 1)
f = f[:int(nfft / 2 + 1)]
k = np.array(range(num_bands)).astype(float)
cf = np.power(2. ** (1. / 3), k) * min_freq
freq_low = min_freq * np.power(2., (2 * k - 1) / 6)
freq_high = min_freq * np.power(2., (2 * k + 1) / 6)
obm = np.zeros((num_bands, len(f))) # a verifier
for i in range(len(cf)):
# Match 1/3 oct band freq with fft frequency bin
f_bin = np.argmin(np.square(f - freq_low[i]))
freq_low[i] = f[f_bin]
fl_ii = f_bin
f_bin = np.argmin(np.square(f - freq_high[i]))
freq_high[i] = f[f_bin]
fh_ii = f_bin
# Assign to the octave band matrix
obm[i, fl_ii:fh_ii] = 1
return obm.astype(np.float32), cf
def log10(x):
numerator = tf_log(x)
denominator = tf_log(tf.constant(10, dtype=numerator.dtype))
return numerator / denominator
'''
""" This is Luca's version. it was wrong?"""
def random_segSNR_loss(fs=15000):
# proper definition: Speech enhancement using super-Gaussian speech models and noncausal a priori SNR estimation
# half window of 32ms, N in the paper, eq (27)
w = tf.cast(32/1000*fs/2, tf.int32)
def rsSNR(y_true, y_pred):
sound_len = tf.shape(y_true)[1]
nw = tf.cast(sound_len/w, tf.int32)
random_downsampling = tf.random.uniform(shape=[], minval=1, maxval=nw, dtype=tf.int32)
print(random_downsampling)
ds_true = y_true[:, ::random_downsampling*w]
ds_pred = y_pred[:, ::random_downsampling*w]
print(y_pred.shape)
print(ds_pred.shape)
num = tf.reduce_sum(tf.square(ds_pred), axis=1)
den = tf.reduce_sum(tf.square(ds_pred - ds_true), axis=1)
loss = 10 * log10(num) - 10 * log10(den)
return tf.reduce_mean(loss)
return rsSNR
'''
def threshold(x):
x_max=tf.math.maximum(x,-20)
return tf.math.minimum(x_max,35)
def segSNR_loss(fs=15000):
# proper definition: Speech enhancement using super-Gaussian speech models and noncausal a priori SNR estimation
# half window of 32ms, N in the paper, eq (27)
w = tf.cast(32/1000*fs, tf.int32) #windows of 32ms
#shift=tf.cast(16/1000*fs, tf.int32) #shift half a window
def rsSNR(y_true, y_pred): ##both have to be the same type of float
sound_len = tf.shape(y_true)[1]
nw = tf.cast((sound_len/w), tf.int32)#/shift, tf.int32)
y_true = tf.squeeze(y_true, axis=-1)
y_pred = tf.squeeze(y_pred, axis=-1)
loss=0.0
for l in range(0,nw):
num = tf.reduce_sum(tf.square(tf.slice(y_true,[0,int(l*w)],[-1,w-1])), axis=1)#int(l*w/2)],[-1,w-1])), axis=1)
den = tf.reduce_sum(tf.square(tf.slice(y_true,[0,int(l*w)],[-1,w-1]) - tf.slice(y_pred,[0,int(l*w)],[-1,w-1])), axis=1)
loss_i= 10 * log10(num) - 10 * log10(den)
loss_i=threshold(loss_i)
loss=loss+loss_i
return -loss/tf.cast(nw,tf.float32)
return rsSNR
def si_sdr_loss(y_true, y_pred):
# print("######## SI-SDR LOSS ########")
x = tf.squeeze(y_true, axis=-1)
y = tf.squeeze(y_pred, axis=-1)
smallVal = 1e-9 # To avoid divide by zero
a = K.sum(y * x, axis=-1, keepdims=True) / (K.sum(x * x, axis=-1, keepdims=True) + smallVal)
xa = a * x
xay = xa - y
d = K.sum(xa * xa, axis=-1, keepdims=True) / (K.sum(xay * xay, axis=-1, keepdims=True) + smallVal)
# d1=tf.zeros(d.shape)
d1 = d == 0
d1 = 1 - tf.cast(d1, tf.float32)
d = -K.mean(10 * d1 * log10(d + smallVal))
return d
def calc_sdr(estimation, origin):
"""
batch-wise SDR caculation for one audio file.
estimation: (batch, nsample)
origin: (batch, nsample)
"""
origin_power = tf.reduce_sum(origin ** 2, 1, keepdims=True) + 1e-12 # (batch, 1)
scale = tf.reduce_sum(origin * estimation, 1, keepdims=True) / origin_power # (batch, 1)
est_true = scale * origin # (batch, nsample)
est_res = estimation - est_true # (batch, nsample)
# est_true = est_true.T
# est_res = est_res.T
true_power = tf.reduce_sum(est_true ** 2, 1)
res_power = tf.reduce_sum(est_res ** 2, 1)
return 10 * log10(true_power) - 10 * log10(res_power) # (batch, 1)
def estoi_sisdr_loss(batch_size=8, nbf=200, fs=10000, nfft=512, N=30, J=15, min_freq=150):
estoi = estoi_loss(batch_size=batch_size, nbf=nbf, fs=fs, nfft=nfft, N=N, J=J, min_freq=min_freq)
def esloss(y_true, y_pred):
loss = si_sdr_loss(y_true, y_pred) + 50 * estoi(y_true, y_pred)
return tf.reduce_mean(loss)
return esloss
def audio_to_mfcc(audio, sample_rate, frame_length=1024, frame_step=256, fft_length=1024):
# from https://www.tensorflow.org/api_docs/python/tf/signal/mfccs_from_log_mel_spectrograms
stfts = tf.signal.stft(audio, frame_length=frame_length, frame_step=frame_step, fft_length=fft_length)
spectrograms = tf.abs(stfts)
# Warp the linear scale spectrograms into the mel-scale.
num_spectrogram_bins = stfts.shape[-1] # .value
lower_edge_hertz, upper_edge_hertz, num_mel_bins = 80.0, 7350.0, 80
linear_to_mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix(
num_mel_bins, num_spectrogram_bins, sample_rate, lower_edge_hertz, upper_edge_hertz)
mel_spectrograms = tf.tensordot(spectrograms, linear_to_mel_weight_matrix, 1)
mel_spectrograms.set_shape(spectrograms.shape[:-1].concatenate(linear_to_mel_weight_matrix.shape[-1:]))
# Compute a stabilized log to get log-magnitude mel-scale spectrograms.
log_mel_spectrograms = tf.math.log(mel_spectrograms + 1e-6)
# Compute MFCCs from log_mel_spectrograms and take the first 13.
mfccs = tf.signal.mfccs_from_log_mel_spectrograms(log_mel_spectrograms)[..., 1:13]
return mfccs
def mfcc_loss(sample_rate, frame_length=1024, frame_step=256, fft_length=1024):
def mfccloss(y_true, y_pred):
# to make sure the signal is a tensor of [batch_size, num_samples]
y_true = tf.reduce_mean(y_true, axis=2)
y_pred = tf.reduce_mean(y_pred, axis=2)
mfcc_true = audio_to_mfcc(y_true, sample_rate, frame_length=frame_length,
frame_step=frame_step, fft_length=fft_length)
mfcc_pred = audio_to_mfcc(y_pred, sample_rate, frame_length=frame_length,
frame_step=frame_step, fft_length=fft_length)
mse = tf.reduce_mean(tf.square(mfcc_true - mfcc_pred))
return mse
return mfccloss
def estoi_loss(batch_size=8, nbf=200, fs=10000, nfft=512,
N=30, # 30 # length of temporal envelope vectors
J=15, # Number of one-third octave bands (cannot be varied)
min_freq=150 # 1050
):
def estoi_loss_inner(y_true, y_pred):
# print("######## ESTOI LOSS ########")
M = int(nbf - (N - 1)) # number of temporal envelope vectors
epsilon = 1e-9 # To avoid divide by zero
OBM, _ = thirdoct(fs, nfft, J, min_freq)
y_true = tf.squeeze(y_true, axis=-1)
y_pred = tf.squeeze(y_pred, axis=-1)
y_pred_shape = K.shape(y_pred)
stft_true = tf_signal.stft(y_true, 256, 128, 512, window_fn, pad_end=False)
stft_pred = tf_signal.stft(y_pred, 256, 128, 512, window_fn, pad_end=False)
OBM1 = tf.convert_to_tensor(OBM) # oooooo Luca
# OBM1 = tf.convert_to_tensor(oooooo) # Maryam
OBM1 = K.tile(OBM1, [y_pred_shape[0], 1, ])
# OBM1 = K.reshape(OBM1, [y_pred_shape[0], J, -1, ])
OBM1 = K.reshape(OBM1, [y_pred_shape[0], J, 257, ])
OCT_pred = K.sqrt(tf.matmul(OBM1, K.square(K.abs(tf.transpose(stft_pred, perm=[0, 2, 1])))))
OCT_true = K.sqrt(tf.matmul(OBM1, K.square(K.abs(tf.transpose(stft_true, perm=[0, 2, 1])))))
d = 0.0 # K.variable(0.0, 'float32')
for i in range(0, batch_size):
for m in range(0, M):
x = K.squeeze(tf.slice(OCT_true, [i, 0, m], [1, J, N]), axis=0)
y = K.squeeze(tf.slice(OCT_pred, [i, 0, m], [1, J, N]), axis=0)
xn = x - K.mean(x, axis=-1, keepdims=True)
yn = y - K.mean(y, axis=-1, keepdims=True)
xn = xn / (K.sqrt(K.sum(xn * xn, axis=-1, keepdims=True)) + epsilon)
yn = yn / (K.sqrt(K.sum(yn * yn, axis=-1, keepdims=True)) + epsilon)
xn = xn - K.tile(K.mean(xn, axis=-2, keepdims=True), [J, 1, ])
yn = yn - K.tile(K.mean(yn, axis=-2, keepdims=True), [J, 1, ])
xn = xn / (K.sqrt(K.sum(xn * xn, axis=-2, keepdims=True)) + epsilon)
yn = yn / (K.sqrt(K.sum(yn * yn, axis=-2, keepdims=True)) + epsilon)
di = K.sum(xn * yn, axis=-1, keepdims=True)
di = 1 / N * K.sum(di, axis=0, keepdims=False)
d = d + di
return 1 - (d / K.cast(batch_size * M, dtype='float'))
return estoi_loss_inner
def stoi_loss(batch_size=8, nbf=200):
def stoi_loss_inner(y_true, y_pred):
# print("######## STOI LOSS ########")
y_true = K.squeeze(y_true, axis=-1)
y_pred = K.squeeze(y_pred, axis=-1)
y_pred_shape = K.shape(y_pred)
stft_true = tf_signal.stft(y_true, 256, 128, 512, window_fn, pad_end=False)
stft_pred = tf_signal.stft(y_pred, 256, 128, 512, window_fn, pad_end=False)
N = 44 # 230 # 30 # length of temporal envelope vectors
J = 15 # Number of one-third octave bands (cannot be varied)
M = int(nbf - (N - 1)) # number of temporal envelope vectors
smallVal = 1e-9 # To avoid divide by zero
fs = 10000 # 97656.25
nfft = 512 # 256
min_freq = 150 # 1150 # 1050
print(oooooo.shape)
OBM, _ = thirdoct(fs, nfft, J, min_freq)
print(OBM.shape)
OBM1 = tf.convert_to_tensor(oooooo)
OBM1 = K.tile(OBM1, [y_pred_shape[0], 1, ])
OBM1 = K.reshape(OBM1, [y_pred_shape[0], 15, 257, ])
OCT_pred = K.sqrt(tf.matmul(OBM1, K.square(K.abs(tf.transpose(stft_pred, perm=[0, 2, 1])))))
OCT_true = K.sqrt(tf.matmul(OBM1, K.square(K.abs(tf.transpose(stft_true, perm=[0, 2, 1])))))
doNorm = True
c = K.constant(5.62341325, 'float') # 10^(-Beta/20) with Beta = -15
d = K.variable(0.0, 'float')
for i in range(0, batch_size): # Run over mini-batches
for m in range(0, M): # Run over temporal envelope vectors
x = K.squeeze(tf.slice(OCT_true, [i, 0, m], [1, J, N]), axis=0)
y = K.squeeze(tf.slice(OCT_pred, [i, 0, m], [1, J, N]), axis=0)
if doNorm:
alpha = K.sqrt(K.sum(K.square(x), axis=-1, keepdims=True) / (
K.sum(K.square(y), axis=-1, keepdims=True)) + smallVal)
alpha = K.tile(alpha, [1, N, ])
ay = y * alpha
y = K.minimum(ay, x + x * c)
| |
<filename>sphinx/ext/autosummary/__init__.py
# -*- coding: utf-8 -*-
"""
sphinx.ext.autosummary
~~~~~~~~~~~~~~~~~~~~~~
Sphinx extension that adds an autosummary:: directive, which can be
used to generate function/method/attribute/etc. summary lists, similar
to those output eg. by Epydoc and other API doc generation tools.
An :autolink: role is also provided.
autosummary directive
---------------------
The autosummary directive has the form::
.. autosummary::
:nosignatures:
:toctree: generated/
module.function_1
module.function_2
...
and it generates an output table (containing signatures, optionally)
======================== =============================================
module.function_1(args) Summary line from the docstring of function_1
module.function_2(args) Summary line from the docstring
...
======================== =============================================
If the :toctree: option is specified, files matching the function names
are inserted to the toctree with the given prefix:
generated/module.function_1
generated/module.function_2
...
Note: The file names contain the module:: or currentmodule:: prefixes.
.. seealso:: autosummary_generate.py
autolink role
-------------
The autolink role functions as ``:obj:`` when the name referred can be
resolved to a Python object, and otherwise it becomes simple emphasis.
This can be used as the default role to make links 'smart'.
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import inspect
import os
import posixpath
import re
import sys
import warnings
from types import ModuleType
from docutils import nodes
from docutils.parsers.rst import directives
from docutils.parsers.rst.states import RSTStateMachine, state_classes
from docutils.statemachine import ViewList
from six import string_types
from six import text_type
import sphinx
from sphinx import addnodes
from sphinx.deprecation import RemovedInSphinx20Warning
from sphinx.environment.adapters.toctree import TocTree
from sphinx.ext.autodoc import get_documenters
from sphinx.ext.autodoc.directive import DocumenterBridge, Options
from sphinx.ext.autodoc.importer import import_module
from sphinx.locale import __
from sphinx.pycode import ModuleAnalyzer, PycodeError
from sphinx.util import import_object, rst, logging
from sphinx.util.docutils import (
NullReporter, SphinxDirective, new_document, switch_source_input
)
if False:
# For type annotation
from typing import Any, Dict, List, Tuple, Type, Union # NOQA
from docutils.utils import Inliner # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.ext.autodoc import Documenter # NOQA
logger = logging.getLogger(__name__)
periods_re = re.compile(r'\.(?:\s+)')
literal_re = re.compile(r'::\s*$')
# -- autosummary_toc node ------------------------------------------------------
class autosummary_toc(nodes.comment):
pass
def process_autosummary_toc(app, doctree):
# type: (Sphinx, nodes.Node) -> None
"""Insert items described in autosummary:: to the TOC tree, but do
not generate the toctree:: list.
"""
env = app.builder.env
crawled = {}
def crawl_toc(node, depth=1):
# type: (nodes.Node, int) -> None
crawled[node] = True
for j, subnode in enumerate(node):
try:
if (isinstance(subnode, autosummary_toc) and
isinstance(subnode[0], addnodes.toctree)):
TocTree(env).note(env.docname, subnode[0])
continue
except IndexError:
continue
if not isinstance(subnode, nodes.section):
continue
if subnode not in crawled:
crawl_toc(subnode, depth + 1)
crawl_toc(doctree)
def autosummary_toc_visit_html(self, node):
# type: (nodes.NodeVisitor, autosummary_toc) -> None
"""Hide autosummary toctree list in HTML output."""
raise nodes.SkipNode
def autosummary_noop(self, node):
# type: (nodes.NodeVisitor, nodes.Node) -> None
pass
# -- autosummary_table node ----------------------------------------------------
class autosummary_table(nodes.comment):
pass
def autosummary_table_visit_html(self, node):
# type: (nodes.NodeVisitor, autosummary_table) -> None
"""Make the first column of the table non-breaking."""
try:
tbody = node[0][0][-1]
for row in tbody:
col1_entry = row[0]
par = col1_entry[0]
for j, subnode in enumerate(list(par)):
if isinstance(subnode, nodes.Text):
new_text = text_type(subnode.astext())
new_text = new_text.replace(u" ", u"\u00a0")
par[j] = nodes.Text(new_text)
except IndexError:
pass
# -- autodoc integration -------------------------------------------------------
# current application object (used in `get_documenter()`).
_app = None # type: Sphinx
class FakeDirective(DocumenterBridge):
def __init__(self):
# type: () -> None
super(FakeDirective, self).__init__({}, None, Options(), 0) # type: ignore
def get_documenter(*args):
# type: (Any) -> Type[Documenter]
"""Get an autodoc.Documenter class suitable for documenting the given
object.
*obj* is the Python object to be documented, and *parent* is an
another Python object (e.g. a module or a class) to which *obj*
belongs to.
"""
from sphinx.ext.autodoc import DataDocumenter, ModuleDocumenter
if len(args) == 3:
# new style arguments: (app, obj, parent)
app, obj, parent = args
else:
# old style arguments: (obj, parent)
app = _app
obj, parent = args
warnings.warn('the interface of get_documenter() has been changed. '
'Please give application object as first argument.',
RemovedInSphinx20Warning)
if inspect.ismodule(obj):
# ModuleDocumenter.can_document_member always returns False
return ModuleDocumenter
# Construct a fake documenter for *parent*
if parent is not None:
parent_doc_cls = get_documenter(app, parent, None)
else:
parent_doc_cls = ModuleDocumenter
if hasattr(parent, '__name__'):
parent_doc = parent_doc_cls(FakeDirective(), parent.__name__)
else:
parent_doc = parent_doc_cls(FakeDirective(), "")
# Get the corrent documenter class for *obj*
classes = [cls for cls in get_documenters(app).values()
if cls.can_document_member(obj, '', False, parent_doc)]
if classes:
classes.sort(key=lambda cls: cls.priority)
return classes[-1]
else:
return DataDocumenter
# -- .. autosummary:: ----------------------------------------------------------
class Autosummary(SphinxDirective):
"""
Pretty table containing short signatures and summaries of functions etc.
autosummary can also optionally generate a hidden toctree:: node.
"""
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
has_content = True
option_spec = {
'toctree': directives.unchanged,
'nosignatures': directives.flag,
'template': directives.unchanged,
}
def warn(self, msg):
# type: (unicode) -> None
self.warnings.append(self.state.document.reporter.warning(
msg, line=self.lineno))
def run(self):
# type: () -> List[nodes.Node]
self.genopt = Options()
self.warnings = [] # type: List[nodes.Node]
self.result = ViewList()
names = [x.strip().split()[0] for x in self.content
if x.strip() and re.search(r'^[~a-zA-Z_]', x.strip()[0])]
items = self.get_items(names)
nodes = self.get_table(items)
if 'toctree' in self.options:
dirname = posixpath.dirname(self.env.docname)
tree_prefix = self.options['toctree'].strip()
docnames = []
for name, sig, summary, real_name in items:
docname = posixpath.join(tree_prefix, real_name)
docname = posixpath.normpath(posixpath.join(dirname, docname))
if docname not in self.env.found_docs:
self.warn('toctree references unknown document %r'
% docname)
docnames.append(docname)
tocnode = addnodes.toctree()
tocnode['includefiles'] = docnames
tocnode['entries'] = [(None, docn) for docn in docnames]
tocnode['maxdepth'] = -1
tocnode['glob'] = None
tocnode = autosummary_toc('', '', tocnode)
nodes.append(tocnode)
return self.warnings + nodes
def get_items(self, names):
# type: (List[unicode]) -> List[Tuple[unicode, unicode, unicode, unicode]]
"""Try to import the given names, and return a list of
``[(name, signature, summary_string, real_name), ...]``.
"""
prefixes = get_import_prefixes_from_env(self.env)
items = [] # type: List[Tuple[unicode, unicode, unicode, unicode]]
max_item_chars = 50
for name in names:
display_name = name
if name.startswith('~'):
name = name[1:]
display_name = name.split('.')[-1]
try:
real_name, obj, parent, modname = import_by_name(name, prefixes=prefixes)
except ImportError:
self.warn('failed to import %s' % name)
items.append((name, '', '', name))
continue
self.result = ViewList() # initialize for each documenter
full_name = real_name
if not isinstance(obj, ModuleType):
# give explicitly separated module name, so that members
# of inner classes can be documented
full_name = modname + '::' + full_name[len(modname) + 1:]
# NB. using full_name here is important, since Documenters
# handle module prefixes slightly differently
documenter = get_documenter(self.env.app, obj, parent)(self, full_name)
if not documenter.parse_name():
self.warn('failed to parse name %s' % real_name)
items.append((display_name, '', '', real_name))
continue
if not documenter.import_object():
self.warn('failed to import object %s' % real_name)
items.append((display_name, '', '', real_name))
continue
if documenter.options.members and not documenter.check_module():
continue
# try to also get a source code analyzer for attribute docs
try:
documenter.analyzer = ModuleAnalyzer.for_module(
documenter.get_real_modname())
# parse right now, to get PycodeErrors on parsing (results will
# be cached anyway)
documenter.analyzer.find_attr_docs()
except PycodeError as err:
logger.debug('[autodoc] module analyzer failed: %s', err)
# no source file -- e.g. for builtin and C modules
documenter.analyzer = None
# -- Grab the signature
sig = documenter.format_signature()
if not sig:
sig = ''
else:
max_chars = max(10, max_item_chars - len(display_name))
sig = mangle_signature(sig, max_chars=max_chars)
# -- Grab the summary
documenter.add_content(None)
summary = extract_summary(self.result.data[:], self.state.document)
items.append((display_name, sig, summary, real_name))
return items
def get_table(self, items):
# type: (List[Tuple[unicode, unicode, unicode, unicode]]) -> List[Union[addnodes.tabular_col_spec, autosummary_table]] # NOQA
"""Generate a proper list of table nodes for autosummary:: directive.
*items* is a list produced by :meth:`get_items`.
"""
table_spec = addnodes.tabular_col_spec()
table_spec['spec'] = r'\X{1}{2}\X{1}{2}'
table = autosummary_table('')
real_table = nodes.table('', classes=['longtable'])
table.append(real_table)
group = nodes.tgroup('', cols=2)
real_table.append(group)
group.append(nodes.colspec('', colwidth=10))
group.append(nodes.colspec('', colwidth=90))
body = nodes.tbody('')
group.append(body)
def append_row(*column_texts):
# type: (unicode) -> None
row = nodes.row('')
source, line = self.state_machine.get_source_and_line()
for text in column_texts:
node = nodes.paragraph('')
vl = ViewList()
vl.append(text, '%s:%d:<autosummary>' % (source, line))
with switch_source_input(self.state, vl):
self.state.nested_parse(vl, 0, node)
try:
if isinstance(node[0], nodes.paragraph):
node = node[0]
except IndexError:
pass
row.append(nodes.entry('', node))
body.append(row)
for name, sig, summary, real_name in items:
qualifier = 'obj'
if 'nosignatures' not in self.options:
col1 = ':%s:`%s <%s>`\\ %s' % (qualifier, name, real_name, rst.escape(sig)) # type: unicode # NOQA
else:
col1 = ':%s:`%s <%s>`' % (qualifier, name, real_name)
col2 = summary
append_row(col1, col2)
return [table_spec, table]
def strip_arg_typehint(s):
# type: (unicode) -> unicode
"""Strip a type hint from argument definition."""
return s.split(':')[0].strip()
def mangle_signature(sig, max_chars=30):
# type: (unicode, int) -> unicode
"""Reformat a function signature to a more compact form."""
# | |
<filename>cltwit/main.py
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Cltwit is a command line twitter utility
Author : <NAME>
Date : 2013
"""
import os
import sys
import re
import getopt
import gettext
import sqlite3
import webbrowser
import ConfigParser
from sqlite2csv import sqlite2csv
from cltwitdb import cltwitdb
from utils import LocalTimezone
from cltwitreport import TweetsReport
APP_NAME = 'cltwit'
LOC_PATH = os.path.dirname(__file__) + '/locale'
gettext.find(APP_NAME, LOC_PATH)
gettext.install(APP_NAME, LOC_PATH, True)
try:
import tweepy
except ImportError:
print(_("Veuillez installer tweetpy https://github.com/tweepy/tweepy"))
sys.exit()
# Répertoire pour conf et bdd
__cltwitdir__ = os.path.expanduser("~/.config/cltwit")
# Fichier de configuration
__configfile__ = __cltwitdir__ + "/cltwit.conf"
# base de données et table sqlite
__dblocation__ = __cltwitdir__ + '/data.db'
__tablename__ = 'twitter'
__Local__ = LocalTimezone()
# gestion des couleurs sur le terminal
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
def has_colours(stream):
"""Vérifier la prise en charge des couleurs par le terminal"""
if not hasattr(stream, "isatty"):
return False
if not stream.isatty():
return False # couleurs auto sur un TTY
try:
import curses
curses.setupterm()
return curses.tigetnum("colors") > 2
except:
# Si erreur on suppose false
return False
__has_colours__ = has_colours(sys.stdout)
def printout(text, colour=WHITE):
"""Print en couleur"""
if __has_colours__:
seq = "\x1b[1;%dm" % (30 + colour) + text + "\x1b[0m"
sys.stdout.write(seq)
else:
sys.stdout.write(text.encode("Utf-8"))
def checkdb():
""" Vérifier la présence de la bdd sqlite et la créer si absente """
if (not os.path.exists(__dblocation__)):
printout(_(u"Vous devez d'abord lancer la commande --database create \
pour créer une base de données de vos tweets."), RED)
sys.exit()
def checkconfig():
"""Récupérer la configuration ou la créer"""
# On ouvre le fichier de conf
config = ConfigParser.RawConfigParser()
try:
config.read(__configfile__)
if config.has_option('twitterapi', 'access_token'):
access_token = config.get('twitterapi', 'access_token')
if config.has_option('twitterapi', 'access_password'):
access_password = config.get('twitterapi', 'access_password')
except:
pass
auth = tweepy.OAuthHandler("Jus1rnqM6S0WojJfOH1kQ",
"AHQ5sTC8YYArHilXmqnsstOivY6ygQ2N27L1zBwk")
# Si aucune conf , autorisation de connexion à twitter via OAuth
if not(config.has_option('twitterapi', 'access_token') and
config.has_option('twitterapi', 'access_password')):
# On ouvre le navigateur web pour récupếrer le numéro d'autorisation
while True:
try:
webbrowser.open(auth.get_authorization_url())
var = raw_input(_("Entrez le token !\n"))
auth.get_access_token(var)
except Exception, e:
print(str(e))
continue
break
var = auth.access_token
# On récupère le token et le password
access_password = str(var).split("&")[0].split("=")[1]
access_token = str(var).split("&")[1].split("=")[1]
# écrire le fichier de conf avec les informations récupérées
try:
cfgfile = open(__configfile__, 'w')
if not(config.has_section('twitterapi')):
config.add_section('twitterapi')
config.set('twitterapi', 'access_token', access_token)
config.set('twitterapi', 'access_password', access_password)
config.write(cfgfile)
except IOError:
pass
finally:
cfgfile.close()
else: # Si un fichier de conf existait déjà
auth.set_access_token(access_token, access_password)
return auth
def login():
""" Se connecter à l'api twitter via tweepy """
auth = checkconfig()
api = tweepy.API(auth)
# On vérifie la connexion à l'api en récupérant le user name
try:
twittername = api.me().screen_name
except Exception, e:
if 'Unable to get username' in (str(e)):
printout(_(u"Impossible de s'authentifier avec l'api Twitter.\
Fonctionne en mode déconnecté"), RED)
print("\n")
twittername = "offline_mode"
printout(_(u"Authentifié avec le user twitter {0}").format(twittername.decode('utf-8')), GREEN)
print("\n")
return api, auth, twittername
def get_friends_followers(api):
"""Renvoie la liste des id des friends et followers"""
friend_id = []
follower_id = []
printout(_(u"Récupération des Followers..."), YELLOW)
print("\n")
for follower in tweepy.Cursor(api.followers).items():
follower_id.append(follower.id)
printout((u"Récupération des Friends..."), YELLOW)
print("\n")
for friend in tweepy.Cursor(api.friends).items():
friend_id.append(friend.id)
return friend_id, follower_id
def get_diff(liste1, liste2):
"""Renvoie les objets de liste1 qui ne sont pas dans liste2"""
return list(set(liste1).difference(set(liste2)))
def follow_users(api, user):
"""Suivre une personne"""
try:
api.create_friendship(user)
printout(_(u"Vous suivez maintenant {0}").format(api.get_user(user).screen_name.decode('utf-8')), GREEN)
except Exception, e:
print(e)
def unfollow_user(api, user):
"""Cesser de suivre une personne"""
try:
api.destroy_friendship(user)
printout(_(u"Vous ne suivez plus {0}").format(api.get_user(user).screen_name.decode('utf-8')), GREEN)
except Exception, e:
print(e)
def main(argv=None):
""" Point d'entrée """
# Si le répertoire pour la conf et la base de données n'existe pas le créer
if not os.path.exists(__cltwitdir__):
os.makedirs(__cltwitdir__)
#~ twittername = "offline_mode"
# Traitement des arguments
if argv is None:
argv = sys.argv
if len(argv) == 1:
help()
try:
opts, args = getopt.getopt(sys.argv[1:], "r:ahfut:o:s:d:",
["report", "api", "help", "follow", "unfollow", "tweet=", "output=", "search=", "database="])
except getopt.GetoptError, err:
print(err)
help()
sys.exit()
# traitement des options
for option, value in opts:
if option in ('-a', '--api'):
api, auth, twittername = login()
res = api.rate_limit_status()
rtime = res['reset_time']
rhits = res['remaining_hits']
hlimit = res['hourly_limit']
from dateutil.parser import parse
drtime = parse(rtime)
printout(_("Informations sur l'utilisation de l'api Twitter"), YELLOW)
print("\n")
# Définir l'heure locale qui correspond à l'heure renvoyée
# par l'api Twitter
rlocaltime = drtime.astimezone(__Local__)
printout(_("Maximum d'appels par heure: "), BLUE)
print hlimit
printout(_("Nombre d'appels restants: "), BLUE)
print rhits
printout(_("Heure du prochain reset: "), BLUE)
print rlocaltime.strftime("%H:%M %Y-%m-%d")
if option in ('-r', '--report'):
api, auth, twittername = login()
checkdb()
conn = sqlite3.connect(__dblocation__)
c = conn.cursor()
c.execute("select substr(date, 1,4) from twitter order by date asc limit 1")
dmois = c.fetchone()[0]
c.execute("select substr(date, 1,4) from twitter order by date desc limit 1")
fmois = c.fetchone()[0]
# Requête des données à exporter
dd = dict()
for a in range(int(dmois), int(fmois) + 1):
result = []
for m in range(1, 13):
mois = ('{num:02d}'.format(num=m))
c.execute("select count(*) from twitter where substr(date, 1,4) = '{0}' and substr(date, 6,2) = '{1}'".format(a, mois))
result.append(c.fetchone()[0])
dd[a] = result
c.close()
conn.close()
treport = TweetsReport(value)
# twittername = "offline"
treport.ecrireTitre(twittername)
nb = 0
for annee, donnees in dd.items():
nb += 1
if nb == 4:
treport.NextPage()
nb = 1
saut = 0
if nb == 1:
saut = 0
if nb == 2:
saut = 200
if nb == 3:
saut = 400
treport.ecrireLegende(saut, annee, donnees)
treport.addPie(saut, donnees)
treport.save()
printout(_(u"Report {0} créé !").format(value), GREEN)
print("\n")
sys.exit(0)
if option in ('-d', '--database'):
if value in ('u', 'update'):
# Se connecter à l'api twitter
api, auth, twittername = login()
# Mettre à jour la base de données
db = cltwitdb(__dblocation__, __tablename__)
printout(_(u"Mise à jour de la base de données de {0}").format(twittername.decode('utf-8')), YELLOW)
print("\n")
nb = db.update(api, twittername)
printout(_(u"Ajout de {0} tweet(s) dans la base de données.").format(nb), GREEN)
if value in ('c', 'create'):
# Se connecter à l'api twitter
api, auth, twittername = login()
# Créer la base de données
db = cltwitdb(__dblocation__, __tablename__)
printout(_(u"Création de la liste des tweets de ") + twittername.decode('utf-8'), YELLOW)
db.create(api, twittername)
printout(_(u"Base de données crée"), GREEN)
sys.exit()
#~ database_create(api,twittername)
if option in ("-o", "--output"):
# Exporter en csv
checkdb()
conn = sqlite3.connect(__dblocation__)
c = conn.cursor()
# Requête des données à exporter
c.execute('select date, tweet, url from {0} order by date desc'.format(__tablename__))
# On appelle la classe sqlite2csv qui se charge de l'export
export = sqlite2csv(open(value, "wb"))
# Entête du fichier csv
export.writerow(["Date", "Tweet", "URL"])
# Lignes du fichier csv
export.writerows(c)
# On ferme la connexion sqlite et le curseur
c.close()
conn.close()
printout(_(u"Fichier csv {0} créé.").format(value.decode('utf-8')), GREEN)
sys.exit()
if option in ("-s", "--search"):
# Rechercher un motif dans la base des tweets
checkdb()
printout(_(u"Recherche de {0} dans vos anciens tweets...")
.format(value.decode('utf-8')), YELLOW)
print("\n")
# la méthode search retourne un tuple avec les champs
# qui contiennent le motif
db = cltwitdb(__dblocation__, __tablename__)
results = db.search(value, "tweet")
for result in results:
print((u"{0} -> {1}\n{2}\n\n").format(result[1].decode('utf-8'), result[4].decode('utf-8'), result[2].decode('utf-8')))
if option in ("-u", "--unfollow"):
# Se connecter à l'api twitter
api, auth, twittername = login()
# Créer les liste friend et followers (par id)
friend_id, follower_id = get_friends_followers(api)
# Création des listes follow et unfollow
follow_liste = get_diff(follower_id, friend_id)
unfollow_liste = get_diff(friend_id, follower_id)
# Un-follow
printout(_("Vous suivez {0} personnes qui ne vous suivent pas.")
.format(len(unfollow_liste)), YELLOW)
print("\n")
printout(_("Voulez changer cela ? (o/N)"), BLUE)
print("\n")
reponse = raw_input("> ")
if (reponse.lower() == 'o' or reponse.lower() == 'y'):
for user in unfollow_liste:
printout(_("Voulez-vous cesser de suivre {0} ? (o/N)")
.format(api.get_user(user).screen_name), BLUE)
print("\n")
reponse = raw_input("> ")
if (reponse.lower() == 'o' or reponse.lower() == 'y'):
unfollow_user(api, user)
if option in ("-f", "--follow"):
# Se connecter à l'api twitter
api, auth, twittername = login()
# Créer les liste friend et followers (par id)
friend_id, follower_id = get_friends_followers(api)
# Création des listes follow et unfollow
follow_liste = get_diff(follower_id, friend_id)
unfollow_liste = get_diff(friend_id, follower_id)
# follow
printout(_("{0} personnes vous suivent alors que vous ne les suivez pas.")
.format(len(follow_liste)), YELLOW)
print("\n")
printout(_("Voulez changer cela ? (o/N)"), BLUE)
print("\n")
reponse = raw_input("> ")
if (reponse.lower() == 'o' or reponse.lower() == 'y'):
for user in follow_liste:
printout(_("Voulez-vous suivre {0} ? (o/N)"
.format(api.get_user(user).screen_name)), BLUE)
print("\n")
reponse = raw_input("> ")
if (reponse.lower() == 'o' or reponse.lower() == 'y'):
follow_users(api, user)
if option in ("-t", "--tweet"):
# Se connecter à l'api twitter
api, auth, twittername = login()
# | |
#!/usr/bin/env python3
# This software was developed at the National Institute of Standards
# and Technology in whole or in part by employees of the Federal
# Government in the course of their official duties. Pursuant to
# title 17 Section 105 of the United States Code portions of this
# software authored by NIST employees are not subject to copyright
# protection and are in the public domain. For portions not authored
# by NIST employees, NIST has been granted unlimited rights. NIST
# assumes no responsibility whatsoever for its use by other parties,
# and makes no guarantees, expressed or implied, about its quality,
# reliability, or any other characteristic.
#
# We would appreciate acknowledgement if the software is used.
"""
make_differential_dfxml.py
Takes two DFXML files as input.
Produces a differential DFXML file as output.
This program's main purpose is matching files correctly. It only performs enough analysis to determine that a fileobject has changed at all. (This is half of the work done by idifference.py.)
"""
__version__ = "0.12.1"
import dfxml.objects as Objects
import logging
import xml.etree.ElementTree as ET
import os
import sys
import collections
import dfxml
_logger = logging.getLogger(os.path.basename(__file__))
def _lower_ftype_str(vo):
"""The string labels of file system names might differ by something small like the casing. Normalize the labels by lower-casing them."""
Objects._typecheck(vo, Objects.VolumeObject)
f = vo.ftype_str
if isinstance(f, str): f = f.lower()
return f
def ignorable_name(fn):
"""Filter out recognized pseudo-file names."""
if fn is None:
return False
return os.path.basename(fn) in [".", "..", "$FAT1", "$FAT2", "$OrphanFiles"]
def make_differential_dfxml(pre, post, **kwargs):
"""
Takes as input two paths to DFXML files. Returns a DFXMLObject.
@param pre String.
@param post String.
@param diff_mode Optional. One of "all" or "idifference".
@param retain_unchanged Optional. Boolean.
@param ignore_properties Optional. Set.
@param annotate_matches Optional. Boolean. True -> matched file objects get a "delta:matched='1'" attribute.
@param rename_requires_hash Optional. Boolean. True -> all matches require matching SHA-1's, if present.
@param ignore_filename_function Optional. Function, string -> Boolean. Returns True if a file name (which can be null) should be ignored.
@param glom_byte_runs Optional. Boolean. Joins contiguous-region byte runs together in FileObject byte run lists.
"""
diff_mode = kwargs.get("diff_mode", "all")
retain_unchanged = kwargs.get("retain_unchanged", False)
ignore_properties = kwargs.get("ignore_properties", set())
annotate_matches = kwargs.get("annotate_matches", False)
rename_requires_hash = kwargs.get("rename_requires_hash", False)
ignore_filename_function = kwargs.get("ignore_filename_function", ignorable_name)
glom_byte_runs = kwargs.get("glom_byte_runs", False)
_expected_diff_modes = ["all", "idifference"]
if diff_mode not in _expected_diff_modes:
raise ValueError("Differencing mode should be in: %r." % _expected_diff_modes)
diff_mask_set = set()
if diff_mode == "idifference":
diff_mask_set |= set([
"atime",
"byte_runs",
"crtime",
"ctime",
"filename",
"filesize",
"md5",
"mtime",
"sha1"
])
_logger.debug("diff_mask_set = " + repr(diff_mask_set))
#d: The container DFXMLObject, ultimately returned.
d = Objects.DFXMLObject(version="1.2.0")
if sys.argv[0] == os.path.basename(__file__):
d.program = sys.argv[0]
d.program_version = __version__
d.command_line = " ".join(sys.argv)
d.add_namespace("delta", dfxml.XMLNS_DELTA)
d.dc["type"] = "Disk image difference set"
d.add_creator_library("Python", ".".join(map(str, sys.version_info[0:3]))) #A bit of a bend, but gets the major version information out.
d.add_creator_library("Objects.py", Objects.__version__)
d.add_creator_library("dfxml.py", Objects.dfxml.__version__)
d.diff_file_ignores |= ignore_properties
_logger.debug("d.diff_file_ignores = " + repr(d.diff_file_ignores))
#The list most of this function is spent on building
fileobjects_changed = []
#Unmodified files; only retained if requested.
fileobjects_unchanged = []
#Key: (partition, inode, filename); value: FileObject
old_fis = None
new_fis = None
#Key: (partition, inode, filename); value: FileObject list
old_fis_unalloc = None
new_fis_unalloc = None
#Key: Partition byte offset within the disk image, paired with the file system type
#Value: VolumeObject
old_volumes = None
new_volumes = None
matched_volumes = dict()
#Populated in distinct (offset, file system type as string) encounter order
volumes_encounter_order = dict()
for infile in [pre, post]:
_logger.debug("infile = %r" % infile)
old_fis = new_fis
new_fis = dict()
old_volumes = new_volumes
new_volumes = dict()
#Fold in the matched volumes - we're just discarding the deleted volumes
for k in matched_volumes:
old_volumes[k] = matched_volumes[k]
matched_volumes = dict()
old_fis_unalloc = new_fis_unalloc
new_fis_unalloc = collections.defaultdict(list)
d.sources.append(infile)
for (i, (event, new_obj)) in enumerate(Objects.iterparse(infile)):
if isinstance(new_obj, Objects.DFXMLObject):
#Inherit desired properties from the source DFXMLObject.
#Inherit namespaces
for (prefix, url) in new_obj.iter_namespaces():
d.add_namespace(prefix, url)
continue
elif isinstance(new_obj, Objects.VolumeObject):
if event == "end":
#This algorithm doesn't yet need to know when a volume is concluded. On to the next object.
continue
offset = new_obj.partition_offset
if offset is None:
raise AttributeError("To perform differencing with volumes, the <volume> elements must have a <partition_offset>. Either re-generate your DFXML with partition offsets, or run this program again with the --ignore-volumes flag.")
#Use the lower-case volume spelling
ftype_str = _lower_ftype_str(new_obj)
#Re-capping the general differential analysis algorithm:
#0. If the volume is in the new list, something's gone wrong.
if (offset, ftype_str) in new_volumes:
_logger.debug("new_obj.partition_offset = %r." % offset)
_logger.warning("Encountered a volume that starts at an offset as another volume, in the same disk image. This analysis is based on the assumption that that doesn't happen. Check results that depend on partition mappings.")
#1. If the volume is in the old list, pop it out of the old list - it's matched.
if old_volumes and (offset, ftype_str) in old_volumes:
_logger.debug("Found a volume in post image, at offset %r." % offset)
old_obj = old_volumes.pop((offset, ftype_str))
new_obj.original_volume = old_obj
new_obj.compare_to_original()
matched_volumes[(offset, ftype_str)] = new_obj
#2. If the volume is NOT in the old list, add it to the new list.
else:
_logger.debug("Found a new volume, at offset %r." % offset)
new_volumes[(offset, ftype_str)] = new_obj
volumes_encounter_order[(offset, ftype_str)] = len(new_volumes) + ((old_volumes and len(old_volumes)) or 0) + len(matched_volumes)
#3. Afterwards, the old list contains deleted volumes.
#Record the ID
new_obj.id = volumes_encounter_order[(offset, ftype_str)]
#Move on to the next object
continue
elif not isinstance(new_obj, Objects.FileObject):
#The rest of this loop compares only file objects.
continue
if ignore_filename_function(new_obj.filename):
continue
#Simplify byte runs if requested
if glom_byte_runs:
if new_obj.byte_runs:
temp_byte_runs = Objects.ByteRuns()
for run in new_obj.byte_runs:
temp_byte_runs.glom(run)
new_obj.byte_runs = temp_byte_runs
#Normalize the partition number
if new_obj.volume_object is None:
new_obj.partition = None
else:
vo = new_obj.volume_object
fts = _lower_ftype_str(vo)
new_obj.partition = volumes_encounter_order[(vo.partition_offset, fts)]
#Define the identity key of this file -- affected by the --ignore argument
_key_partition = None if "partition" in ignore_properties else new_obj.partition
_key_inode = None if "inode" in ignore_properties else new_obj.inode
_key_filename = None if "filename" in ignore_properties else new_obj.filename
key = (_key_partition, _key_inode, _key_filename)
#Ignore unallocated content comparisons until a later loop. The unique identification of deleted files needs a little more to work.
if not new_obj.alloc:
new_fis_unalloc[key].append(new_obj)
continue
#The rest of this loop is irrelevant until the second DFXML file.
if old_fis is None:
new_fis[key] = new_obj
continue
if key in old_fis:
#Extract the old fileobject and check for changes
old_obj = old_fis.pop(key)
new_obj.original_fileobject = old_obj
new_obj.compare_to_original(file_ignores=d.diff_file_ignores)
#_logger.debug("Diffs: %r." % _diffs)
_diffs = new_obj.diffs - d.diff_file_ignores
#_logger.debug("Diffs after ignore-set: %r." % _diffs)
if diff_mask_set:
_diffs &= diff_mask_set
#_logger.debug("Diffs after mask-set: %r." % _diffs)
if len(_diffs) > 0:
#_logger.debug("Remaining diffs: " + repr(_diffs))
fileobjects_changed.append(new_obj)
else:
#Unmodified file; only keep if requested.
if retain_unchanged:
fileobjects_unchanged.append(new_obj)
else:
#Store the new object
new_fis[key] = new_obj
#The rest of the files loop is irrelevant until the second file.
if old_fis is None:
continue
_logger.debug("len(old_fis) = %d" % len(old_fis))
_logger.debug("len(old_fis_unalloc) = %d" % len(old_fis_unalloc))
_logger.debug("len(new_fis) = %d" % len(new_fis))
_logger.debug("len(new_fis_unalloc) = %d" % len(new_fis_unalloc))
_logger.debug("len(fileobjects_changed) = %d" % len(fileobjects_changed))
#Identify renames - only possible if 1-to-1. Many-to-many renames are just left as new and deleted files.
_logger.debug("Detecting renames...")
fileobjects_renamed = []
def _make_name_map(d):
"""Returns a dictionary, mapping (partition, inode) -> {filename}."""
retdict = collections.defaultdict(lambda: set())
for (partition, inode, filename) in d.keys():
retdict[(partition, inode)].add(filename)
return retdict
old_inode_names = _make_name_map(old_fis)
new_inode_names = _make_name_map(new_fis)
for key in new_inode_names.keys():
(partition, inode) = key
if len(new_inode_names[key]) != 1:
continue
if not key in old_inode_names:
continue
if len(old_inode_names[key]) != 1:
continue
if rename_requires_hash:
#Peek at the set elements by doing a quite-ephemeral list cast
old_obj = old_fis[(partition, inode, list(old_inode_names[key])[0])]
new_obj = new_fis[(partition, inode, list(new_inode_names[key])[0])]
if old_obj.sha1 != new_obj.sha1:
continue
#Found a match if we're at this point in the loop
old_name = old_inode_names[key].pop()
new_name = new_inode_names[key].pop()
old_obj = old_fis.pop((partition, inode, old_name))
new_obj = new_fis.pop((partition, inode, new_name))
new_obj.original_fileobject = old_obj
new_obj.compare_to_original(file_ignores=d.diff_file_ignores)
fileobjects_renamed.append(new_obj)
_logger.debug("len(old_fis) -> %d" % len(old_fis))
_logger.debug("len(new_fis) -> %d" % len(new_fis))
_logger.debug("len(fileobjects_changed) -> %d" % len(fileobjects_changed))
_logger.debug("len(fileobjects_renamed) = %d" % len(fileobjects_renamed))
#Identify files that just | |
<<<<<<< HEAD
#!/usr/bin/env python
import inspect
import re
from pprint import pprint
# require pip install --upgrade pandas
import pandas as pd
# ██╗ ██╗███████╗██████╗ ██████╗ ██████╗ ███████╗██╗████████╗██╗ ██╗
# ██║ ██║██╔════╝██╔══██╗██╔══██╗██╔═══██╗██╔════╝██║╚══██╔══╝╚██╗ ██╔╝
# ██║ ██║█████╗ ██████╔╝██████╔╝██║ ██║███████╗██║ ██║ ╚████╔╝
# ╚██╗ ██╔╝██╔══╝ ██╔══██╗██╔══██╗██║ ██║╚════██║██║ ██║ ╚██╔╝
# ╚████╔╝ ███████╗██║ ██║██████╔╝╚██████╔╝███████║██║ ██║ ██║
# ╚═══╝ ╚══════╝╚═╝ ╚═╝╚═════╝ ╚═════╝ ╚══════╝╚═╝ ╚═╝ ╚═╝
class Verbosity:
''' class Verbosity
Provide a colorfull output to make it better visible '''
# Private variables
__red = '\033[31m'
__green = '\033[32m'
__orange = '\033[33m'
__blue = '\033[34m'
__purple = '\033[35m'
__cyan = '\033[36m'
__lightgrey = '\033[37m'
__darkgrey = '\033[90m'
__lightred = '\033[91m'
__lightgreen = '\033[92m'
__yellow = '\033[93m'
__lightblue = '\033[94m'
__pink = '\033[95m'
__lightcyan = '\033[96m'
__reset = '\033[0m'
def __init__(self, **args):
''' Constructor
Is a reserved method in Python classes.
This method called when an object is created from the class and
it allow the class to initialize the attributes of a class '''
self.message = None
self.error = False
super(Verbosity, self).__init__()
''' End of def __init__(self, **args): '''
def v_debug(self, message):
''' Display an error message '''
print(self.__purple + '[DEBUG] ' + self.__reset + message)
def v_error(self, message):
''' Display an error message '''
print(self.__red + '[ERROR] ' + self.__reset + message)
# ███████╗██╗██╗ ████████╗███████╗██████╗
# ██╔════╝██║██║ ╚══██╔══╝██╔════╝██╔══██╗
# █████╗ ██║██║ ██║ █████╗ ██████╔╝
# ██╔══╝ ██║██║ ██║ ██╔══╝ ██╔══██╗
# ██║ ██║███████╗██║ ███████╗██║ ██║
# ╚═╝ ╚═╝╚══════╝╚═╝ ╚══════╝╚═╝ ╚═╝
# ███╗ ███╗ ██████╗ ██████╗ ██╗ ██╗██╗ ███████╗
# ████╗ ████║██╔═══██╗██╔══██╗██║ ██║██║ ██╔════╝
# ██╔████╔██║██║ ██║██║ ██║██║ ██║██║ █████╗
# ██║╚██╔╝██║██║ ██║██║ ██║██║ ██║██║ ██╔══╝
# ██║ ╚═╝ ██║╚██████╔╝██████╔╝╚██████╔╝███████╗███████╗
# ╚═╝ ╚═╝ ╚═════╝ ╚═════╝ ╚═════╝ ╚══════╝╚══════╝
class FilterModule(object):
def __init__(self):
self.dp = Verbosity()
self.debug = False
self.error = False
def filters(self):
return {
'filter_model': self.filter_model,
'find_commands': self.find_commands,
'find_image_file': self.find_image_file,
'find_image_folder': self.find_image_folder,
'parse_data_for_deletion': self.parse_data_for_deletion,
'parse_filesystem_list': self.parse_filesystem_list,
'parse_show_switch': self.parse_show_switch,
'parse_show_version': self.parse_show_version,
}
# -------------------------------------------------------------------------
# ___ _ _ ___ _ ___ ___ __ __ ___ _____ _ _ ___ ___ ___
# | _ \ | | | _ ) | |_ _/ __| | \/ | __|_ _| || |/ _ \| \/ __|
# | _/ |_| | _ \ |__ | | (__ | |\/| | _| | | | __ | (_) | |) \__ \
# |_| \___/|___/____|___\___| |_| |_|___| |_| |_||_|\___/|___/|___/
#
# -------------------------------------------------------------------------
def filter_model(self, model_arg, debug=False):
''' filter_model
Matches the characters from keyword value at the start of each
newline.
Args:
model: Cisco model
debug: passing Boolean to enable/disable debug
'''
model = ""
self.debug = debug
if self.debug:
self.dp.v_debug(">>> Start of " + self.__who_am_i())
# End of if self.debug:
if isinstance(model_arg, str):
# The passed argument is a string
if self.debug:
message = "(" + self.__who_am_i() + ") The argument " + \
"model_arg is a string."
self.dp.v_debug(message)
# End of if self.debug:
# Split a string into a list using character minus (-) as separator
# but only keep the first 2 elements of the list.
stripped_model = model_arg.split('-')[0:2]
if stripped_model[0].upper() == 'WS':
# The first element has the string 'WS', which stands for
# Workgroup Switch.
if self.debug:
message = "(" + self.__who_am_i() + ") Detected that " + \
"the string starts with the string 'WS'"
self.dp.v_debug(message)
# End of if self.debug:
model = '-'.join(stripped_model)
# End of if stripped_model[0].upper() == 'WS':
else:
# The first element doesn't have the string 'WS'.
# Most routers will start with the string CISCO or ISR but most
# likely have the character slash '/'.
if self.debug:
message = "(" + self.__who_am_i() + ") Detected that " + \
"the string doesn't start with string 'WS'"
self.dp.v_debug(message)
# End of if self.debug:
# Split string into a list using the character slash '/' as
# separator and return with the first element of the list.
model = stripped_model[0].split('/')[0]
# End of else:
# End of if isinstance(model_arg, str):
else:
# The passed argument is NOT a string
message = "(" + self.__who_am_i() + ") The passed model " + \
"argument isn't a string."
self.dp.v_error(message)
return model
# End of else:
if self.debug:
message = "(" + self.__who_am_i() + ") model = '" + model + "'"
self.dp.v_debug(message)
self.dp.v_debug("<<< End of " + self.__who_am_i())
# End of if self.debug:
return model.upper()
''' End of def filter_model(self,model_arg, debug=False): '''
def find_commands(self, data, command, debug=False):
''' find_commands
Build a dictionary of Cisco exisiting upgrade commands that can be
used.
Args:
data: contains the output from exec command '?'
command: command string
debug: passing Boolean to enable/disable debug
'''
flag = False
self.debug = debug
if self.debug:
self.dp.v_debug(">>> Start of " + self.__who_am_i())
# End of if self.debug:
raw_list = self.__stdout(data, 'find_commands')
if self.error:
return False
# End of if error:
if not isinstance(command, str):
message = "(" + self.__who_am_i() + \
") The passed command isn't a string."
self.dp.error(message)
return False
# End of if not isinstance(command, str):
# Split string into a list.
command_list = command.split()
if len(command_list) == 0:
message = "(" + self.__who_am_i() + \
") The passed command is empty."
self.dp.error(message)
return False
# End of if len(command_list) == 0:
elif len(command_list) == 1:
set_cmd = command_list[0].strip()
# End of elif len(command_list) == 1:
else:
set_cmd = command_list[-1].strip()
# End of else:
for line in raw_list:
# Loop through each output line
line = line.strip() # Remove leading and trailing spaces
cond = re.match('^(' + re.escape(set_cmd) + ')\\s+', line)
if cond:
if self.debug:
message = "(" + self.__who_am_i() + ") Found line: " + \
line
self.dp.v_debug(message)
# End of if self.debug:
flag = True
# End of if cond:
# End of for line in raw_list:
if self.debug:
message = "(" + self.__who_am_i() + ") flag = " + \
str(flag) + " <" + str(type(flag))
message += ">"
self.dp.v_debug(message)
self.dp.v_debug("<<< End of def " + self.__who_am_i())
# End of if self.debug:
return flag
''' End of def find_commands(self, data, command, debug=False): '''
def find_image_file(self, data, alist, binfile, debug=False):
''' find_image_file
Args:
data: contains the output from the running IOS command
alist: passing list to append data into it
binfile: passing string with name of binary file
debug: passing Boolean to enable/disable debug
'''
data_list = []
self.debug = debug
if self.debug:
self.dp.v_debug(">>> Start of " + self.__who_am_i())
message = "(" + self.__who_am_i() + ") The passed argument " + \
"'data' is a " + str(type(data))
self.dp.v_debug(message)
message = "(" + self.__who_am_i() + ") The passed argument " + \
"'alist' is a " + str(type(alist))
self.dp.v_debug(message)
message = "(" + self.__who_am_i() + ") The passed argument " + \
"'binfile' is a " + str(type(binfile))
self.dp.v_debug(message)
# End of if self.debug:
cond = (not isinstance(data, list) or not isinstance(
alist, list) or not isinstance(binfile, str))
if cond:
message = "(" + self.__who_am_i() + ") One or both passed " + \
"arguments wasn't able to pass the condition. " + \
"'data' = <<list>>, 'folder' = <<string>>"
self.dp.v_error(message)
return data_list
# End of if cond:
for item in data:
new_dict = {'filesystem': None, 'filename': None}
if "item" in item:
key = "item"
label = "filesystem"
new_dict['filesystem'] = self.__parse_find_file(
item, key, label)
if "stdout" in item:
key = "stdout"
label = None
if self.debug:
message = "(" + self.__who_am_i() + ") Found " + \
"dictionary key 'stdout'."
self.dp.v_debug(message)
lines = self.__parse_find_file(item, key, label)
new_dict['filename'] = self.__parse_filefolder(
lines, binfile, '-rw', self.__who_am_i())
# End of if "item" in item:
data_list.append(new_dict)
# End of for item in data:
df1 = pd.DataFrame(alist).set_index('filesystem')
df2 = pd.DataFrame(data_list).set_index('filesystem')
df = df1.merge(df2, left_index=True, right_index=True)
data_list = df.T.to_dict()
if self.debug:
self.dp.v_debug("<<< End of " + self.__who_am_i())
# End of if self.debug:
return data_list
''' End of def find_image_file(data, alist, binfile, debug=False): '''
def find_image_folder(self, data, folder, debug=False):
''' find_image_folder
Args:
data: contains the output from the running IOS command
source: passing string with name of folder
debug: passing Boolean to enable/disable debug
'''
data_list = []
| |
import gc
import os
import sys
import utime
from uhttp_signature.authed_urequests import make_validated_request, RequestError, SignatureException
from pozetron_config import *
from credentials import KEY_ID, HMAC_SECRET
from logger import log, exc_logline
if isinstance(KEY_ID, bytes):
KEY_ID = KEY_ID.decode('utf-8')
debug = False
# Module name for references
pozetron = sys.modules[__name__]
# Was POST checkin successful?
# See also post_checkin
_on_startup_checkin_done = False
last_refreshed_scripts = None
last_checked_commands = None
forget_network_time = None
# Some commands must be executed before others (when processed as a batch)
COMMAND_ORDER = {
'log_mode': 0,
'forget_network': 1,
'reboot': 2
}
one_second = const(1000)
one_minute = const(60000)
five_minutes = const(300000)
one_day = const(24*60*60*1000)
def epilog():
# We're trying to be cooperative here.
utime.sleep_ms(0)
now = utime.ticks_ms()
# In the case of an signature error (client or server) try update the system time in
# case the clock has skewed significantly
for attempt in range(3):
try:
# Check commands before refreshing scripts so reboot remains a viable failsafe
if utime.ticks_diff(now, pozetron.last_checked_commands) > one_minute or pozetron.last_checked_commands is None:
try:
check_commands()
pozetron.last_checked_commands = now
flush_logs()
except SignatureException as ex:
raise ex
except RequestError:
pass
if utime.ticks_diff(now, pozetron.last_refreshed_scripts) > one_minute or pozetron.last_refreshed_scripts is None:
try:
refresh_scripts()
pozetron.last_refreshed_scripts = now
flush_logs()
except SignatureException as ex:
raise ex
except RequestError:
pass
except SignatureException:
# Try to set the time through NTP, but not too hard
try:
import ntptime
ntptime.settime()
except:
pass
finally:
ntptime = None
del(ntptime)
else:
break
# Wrapper for make_validated_request, for brevity
def request(url, **kw):
result = make_validated_request(url, KEY_ID, HMAC_SECRET, debug=debug, **kw)
# Successful request -> we can disable backoff
reset_backoff()
return result
# We must write "Failure sending logs..." message only once
# until the next flush_logs success
_logs_lost = False
# This function does not raise
def flush_logs():
global _logs_lost
import logger
url = API_BASE + '/logs/'
try:
if should_backoff():
return
# If log is empty, then we don't have to flush anything
if not logger.file_size:
logs = logger._logs
if len(logs) == 0:
return
else:
try:
if os.stat(logger._LOG_FILE)[6] == 0: # size == 0
return
except OSError: # No file = no logs = no problem
return
# Post logs to API
try:
try:
# NOTE: when API is available again after failure, this will not execute
# if log is empty, until some other message is logged.
if _logs_lost:
request(url, method='POST', json=[{'text': 'Failure sending logs, truncated logs have been lost'}])
_logs_lost = False
# TODO: In the future, find a way to compute the HMAC on the file rather piece by piece than after
# loading the list to memory.
if not logger.file_size:
json = [{'text': x} for x in logger._logs]
if logger._overflow_errors:
json.append({'text': '{} failed writes to log file due to logger.file_size={}'.format(logger._overflow_errors, logger.file_size)})
request(url, method='POST', json=json)
del json
else:
# If there are overflow errors we send them separately to make sure they get there.
if logger._overflow_errors:
json = [{'text': '{} failed writes to log file due to logger.file_size={}'.format(logger._overflow_errors, logger.file_size)}]
request(url, method='POST', json=json)
del json
logger._overflow_errors = 0
# If there were no overflow errors we just send the log file.
request(url, method='POST', in_file=logger._LOG_FILE)
# Success - clear logs
logger._overflow_errors = 0
logger._send_fails = 0
if not logger.file_size:
logger._logs.clear()
else:
_truncate_file(logger._LOG_FILE)
except RequestError as ex:
# NOTE: API will accept logs even if they are corrupted
print(ex)
set_backoff()
raise ex
#finally:
# Delete variable
# The follow call to del causes:
# MemoryError: memory allocation failed, allocating 1073672184 bytes
#del logs
except Exception as ex:
log(exc_logline.format('send logs', ex))
logger._send_fails += 1
# If too many fails, reset log
if logger._send_fails >= 3:
clear_logs()
_logs_lost = True
except Exception as o:
sys.print_exception(o)
finally:
del url
del logger
# No truncate() so we use a workaround.
# Don't physically write if file is already empty, to save flash.
def _truncate_file(filename):
try:
if os.stat(filename)[6] > 0:
with open(filename, 'w'):
pass
except OSError:
pass
def clear_logs():
import logger
logger._overflow_errors = 0
logger._send_fails = 0
logger._logs.clear()
_truncate_file(logger._LOG_FILE)
del logger
# Exponential backoff: don't do requests to API for some time,
# increasing that time after every subsequent failure.
_backoff_until = None
_backoff_factor = 0
_backoff_factor_max = 7 # max wait depends on this and on timeout below
_backoff_timeout_ms = const(10000) # max wait is about 20 minutes
def set_backoff(timeout=None):
global _backoff_until, _backoff_factor
if not timeout:
# We randomize backoff period by 25% to avoid "thundering herd" problem
import urandom
timeout = (_backoff_timeout_ms * 2**_backoff_factor) * (1024 + (256 - urandom.getrandbits(9))) // 1024
del urandom
# Increase backoff factor
_backoff_factor = min(_backoff_factor + 1, _backoff_factor_max)
# next try at (now + timeout)
_backoff_until = utime.ticks_add(utime.ticks_ms(), timeout)
# Return True if backoff is in effect and we should come later
def should_backoff():
global _backoff_until
if _backoff_until is None:
return False
diff = utime.ticks_diff(_backoff_until, utime.ticks_ms())
# Check for wrap around
if not (-one_day < diff < one_day):
diff = 0
# Are we still waiting?
if diff > 0:
return True
# Reset wait, but don't reset factor. Factor is reset on first successful request.
_backoff_until = None
return False
# Reset back-off mechanism after successful request.
def reset_backoff():
global _backoff_factor
_backoff_factor = 0
def makesubdir(path, subdir):
# Replacement for os.makedirs
if path[-1] != '/':
path += '/'
items = subdir.strip('/').split('/')
for x in items:
path += x + '/'
try:
os.mkdir(path)
except OSError:
pass
del x, items
def autocollect(function):
def autocollect_decorator(*args, **kwargs):
try:
return function(*args, **kwargs)
finally:
gc.collect()
return autocollect_decorator
def post_checkin():
# Returns True if checkin is successful, False otherwise.
global _on_startup_checkin_done
if should_backoff():
return False
try:
request(API_BASE + '/checkin/', method='POST', data=' ')
except RequestError as ex:
print(exc_logline.format('post checkin', ex))
set_backoff()
return False
_on_startup_checkin_done = True
return True
@autocollect
def on_startup():
# This function MUST be called once on device startup.
post_checkin()
#log('on_startup completed')
def _reboot():
log('Rebooting')
flush_logs()
import machine
machine.reset()
@autocollect
def check_commands(debug=pozetron.debug):
# Get list of commands from server and execute them.
global _on_startup_checkin_done, forget_network_time
if not _on_startup_checkin_done:
if not post_checkin():
return
if should_backoff():
return
try:
commands = request(API_BASE + '/checkin/')
# Because SignatureException is a type of RequestError we have to
# catch it here and raise it explicitly.
except SignatureException as ex:
raise ex
except RequestError as ex:
print(ex)
set_backoff()
return
commands = commands.json()
import logger
try:
# Commands must be executed in a particular order
if len(commands) > 1:
commands.sort(key=lambda x: COMMAND_ORDER.get(x['type'], 0))
for command in commands:
# set error=<str> and it will be reported to server
error = ''
if command['type'] == 'log_mode':
# Log enable/disable
old_send_logs = logger._send_logs
logger._send_logs = command['data']['enable']
# If being disabled, flush remaining lines
if old_send_logs and not logger._send_logs:
flush_logs()
# Change log mode
new_size = None
if command['data'].get('mode') == 'memory':
new_size = 0
elif command['data'].get('mode') == 'file' and 'file_size' in command['data']:
new_size = command['data']['file_size']
if new_size is not None and new_size != logger.file_size:
# Flush unconditionally, to keep it simple.
flush_logs()
# Flush failed? Force clear.
# (this is not relevant if mode is still "file")
if logger._send_fails and (logger.file_size == 0 or new_size == 0): # memory <-> file
clear_logs()
logger.file_size = new_size # so that following line goes to new destination
global _logs_lost
_logs_lost = True
logger.file_size = new_size
del new_size
if logger._send_logs != old_send_logs:
log('Log mode enabled' if logger._send_logs else 'Log mode disabled')
# Save log mode to file, to be used on next reboot
_save_log_mode()
elif command['type'] == 'reboot':
# Make sure there is 1 second delay between forget-network and reboot
if forget_network_time is not None:
utime.sleep_ms(one_second - utime.ticks_diff(utime.ticks_ms(), forget_network_time))
_reboot()
continue # reboot is special, we send confirmation AFTER reboot
elif command['type'] == 'forget_network' and forget_network_time is None:
try:
os.remove('/network_config.py')
forget_network_time = utime.ticks_ms()
log('Removed network config')
except OSError: # no file = do nothing
print('forget-network is a no-op')
else:
error = 'Unknown command'
# Confirm command execution
request(API_BASE + '/command/', method='POST',
json={
'command': command,
'success': error == '',
'error': error
})
finally:
del logger
def check_file_signature(in_file, signature, secret):
try:
from ubinascii import unhexlify
import uhmac
try:
# Try and just read the entire file into memory to HMAC it
hmac_instance = uhmac.new(unhexlify(secret), digestmod="sha256")
content = in_file.read()
hmac_instance.update(content)
except MemoryError:
try:
del(hmac_instance)
del(content)
except NameError:
pass
hmac_instance = uhmac.new(unhexlify(secret), digestmod="sha256")
# If we don't have enough memory to | |
an array or a tuple containing an
array and a mask, depending on the value of ``filtered``.
"""
ids = origin['pore._id'][pores]
return self._map(element='pore', ids=ids, filtered=filtered)
def map_throats(self, throats, origin, filtered=True):
r"""
Given a list of throats on a target object, finds indices of
those throats on the calling object
Parameters
----------
throats : array_like
The indices of the throats on the object specified in ``origin``
origin : OpenPNM Base object
The object corresponding to the indices given in ``throats``
filtered : boolean (default is ``True``)
If ``True`` then a ND-array of indices is returned with missing
indices removed, otherwise a named-tuple containing both the
``indices`` and a boolean ``mask`` with ``False`` indicating
which locations were not found.
Returns
-------
Throat indices on the calling object corresponding to the same throats
on the target object. Can be an array or a tuple containing an array
and a mask, depending on the value of ``filtered``.
"""
ids = origin['throat._id'][throats]
return self._map(element='throat', ids=ids, filtered=filtered)
def _tomask(self, indices, element):
r"""
This is a generalized version of tomask that accepts a string of
'pore' or 'throat' for programmatic access.
"""
element = self._parse_element(element, single=True)
indices = self._parse_indices(indices)
N = sp.shape(self[element + '.all'])[0]
ind = sp.array(indices, ndmin=1)
mask = sp.zeros((N, ), dtype=bool)
mask[ind] = True
return mask
def tomask(self, pores=None, throats=None):
r"""
Convert a list of pore or throat indices into a boolean mask of the
correct length
Parameters
----------
pores or throats : array_like
List of pore or throat indices. Only one of these can be specified
at a time, and the returned result will be of the corresponding
length.
Returns
-------
A boolean mask of length Np or Nt with True in the specified pore or
throat locations.
See Also
--------
toindices
Examples
--------
>>> import openpnm as op
>>> pn = op.network.Cubic(shape=[5, 5, 5])
>>> mask = pn.tomask(pores=[0, 10, 20])
>>> sum(mask) # 3 non-zero elements exist in the mask (0, 10 and 20)
3
>>> len(mask) # Mask size is equal to the number of pores in network
125
>>> mask = pn.tomask(throats=[0, 10, 20])
>>> len(mask) # Mask is now equal to number of throats in network
300
"""
if (pores is not None) and (throats is None):
mask = self._tomask(element='pore', indices=pores)
elif (throats is not None) and (pores is None):
mask = self._tomask(element='throat', indices=throats)
else:
raise Exception('Cannot specify both pores and throats')
return mask
def toindices(self, mask):
r"""
Convert a boolean mask to a list of pore or throat indices
Parameters
----------
mask : array_like booleans
A boolean array with True at locations where indices are desired.
The appropriate indices are returned based an the length of mask,
which must be either Np or Nt long.
Returns
-------
A list of pore or throat indices corresponding the locations where
the received mask was True.
See Also
--------
tomask
Notes
-----
This behavior could just as easily be accomplished by using the mask
in ``pn.pores()[mask]`` or ``pn.throats()[mask]``. This method is
just a convenience function and is a complement to ``tomask``.
"""
if sp.amax(mask) > 1:
raise Exception('Received mask is invalid, with values above 1')
mask = sp.array(mask, dtype=bool)
indices = self._parse_indices(mask)
return indices
def interleave_data(self, prop):
r"""
Retrieves requested property from associated objects, to produce a full
Np or Nt length array.
Parameters
----------
prop : string
The property name to be retrieved
Returns
-------
A full length (Np or Nt) array of requested property values.
Notes
-----
This makes an effort to maintain the data 'type' when possible; however
when data are missing this can be tricky. Data can be missing in two
different ways: A set of pores is not assisgned to a geometry or the
network contains multiple geometries and data does not exist on all.
Float and boolean data is fine, but missing ints are converted to float
when nans are inserted.
Examples
--------
>>> import openpnm as op
>>> pn = op.network.Cubic(shape=[2, 2, 2])
>>> Ps = pn['pore.top']
>>> Ts = pn.find_neighbor_throats(pores=Ps)
>>> g1 = op.geometry.GenericGeometry(network=pn, pores=Ps, throats=Ts)
>>> Ts = ~pn.tomask(throats=Ts)
>>> g2 = op.geometry.GenericGeometry(network=pn, pores=~Ps, throats=Ts)
>>> g1['pore.value'] = 1
>>> print(g1['pore.value'])
[1 1 1 1]
>>> print(g2['pore.value']) # 'pore.value' is defined on g1, not g2
[nan nan nan nan]
>>> print(pn['pore.value'])
[nan 1. nan 1. nan 1. nan 1.]
>>> g2['pore.value'] = 20
>>> print(pn['pore.value'])
[20 1 20 1 20 1 20 1]
>>> pn['pore.label'] = False
>>> print(g1['pore.label']) # 'pore.label' is defined on pn, not g1
[False False False False]
"""
element = self._parse_element(prop.split('.')[0], single=True)
N = self.project.network._count(element)
# Fetch sources list depending on object type?
proj = self.project
if self._isa() in ['network', 'geometry']:
sources = list(proj.geometries().values())
elif self._isa() in ['phase', 'physics']:
sources = list(proj.find_physics(phase=self))
elif self._isa() in ['algorithm', 'base']:
sources = [self]
else:
raise Exception('Unrecognized object type, cannot find dependents')
# Attempt to fetch the requested array from each object
arrs = [item.get(prop, None) for item in sources]
locs = [self._get_indices(element, item.name) for item in sources]
sizes = [sp.size(a) for a in arrs]
if sp.all([item is None for item in arrs]): # prop not found anywhere
raise KeyError(prop)
# Check the general type of each array
atype = []
for a in arrs:
if a is not None:
t = a.dtype.name
if t.startswith('int') or t.startswith('float'):
atype.append('numeric')
elif t.startswith('bool'):
atype.append('boolean')
else:
atype.append('other')
if not all([item == atype[0] for item in atype]):
raise Exception('The array types are not compatible')
else:
dummy_val = {'numeric': sp.nan, 'boolean': False, 'other': None}
# Create an empty array of the right type and shape
for item in arrs:
if item is not None:
if len(item.shape) == 1:
temp_arr = sp.zeros((N, ), dtype=item.dtype)
else:
temp_arr = sp.zeros((N, item.shape[1]), dtype=item.dtype)
temp_arr.fill(dummy_val[atype[0]])
# Convert int arrays to float IF NaNs are expected
if temp_arr.dtype.name.startswith('int') and \
(sp.any([i is None for i in arrs]) or sp.sum(sizes) != N):
temp_arr = temp_arr.astype(float)
temp_arr.fill(sp.nan)
# Fill new array with values in the corresponding locations
for vals, inds in zip(arrs, locs):
if vals is not None:
temp_arr[inds] = vals
else:
temp_arr[inds] = dummy_val[atype[0]]
# Check if any arrays have units, if so then apply them to result
if any([hasattr(a, 'units') for a in arrs]):
[a.convert_to_mks() for a in arrs if hasattr(a, 'units')]
units = [a.units.__str__() for a in arrs if hasattr(a, 'units')]
if len(units) > 0:
if len(set(units)) == 1:
temp_arr *= sp.array([1])*getattr(unyt, units[0])
else:
raise Exception('Units on the interleaved array are not equal')
return temp_arr
def interpolate_data(self, propname):
r"""
Determines a pore (or throat) property as the average of it's
neighboring throats (or pores)
Parameters
----------
propname: string
The dictionary key to the values to be interpolated.
Returns
-------
An array containing interpolated pore (or throat) data
Notes
-----
This uses an unweighted average, without attempting to account for
distances or sizes of pores and throats.
Examples
--------
>>> import openpnm as op
>>> pn = op.network.Cubic(shape=[3, 1, 1])
>>> pn['pore.value'] = [1, 2, 3]
>>> pn.interpolate_data('pore.value')
array([1.5, 2.5])
"""
boss = self.project.find_full_domain(self)
net = self.project.network
if boss is self:
Ts = boss.throats()
Ps = boss.pores()
label = 'all'
else:
Ts = boss.throats(self.name)
Ps = boss.pores(self.name)
label = self.name
if propname.startswith('throat'):
# Upcast data to full network size
temp = sp.ones((boss.Nt,))*sp.nan
temp[Ts] = self[propname]
data = temp
temp = sp.ones((boss.Np,))*sp.nan
for pore in Ps:
neighborTs = net.find_neighbor_throats(pore)
neighborTs = net.filter_by_label(throats=neighborTs,
labels=label)
temp[pore] = sp.mean(data[neighborTs])
values = temp[Ps]
elif propname.startswith('pore'):
# Upcast data to full network size
data = sp.ones((net.Np, ))*sp.nan
data[Ps] = self[propname]
Ps12 = net['throat.conns'][Ts]
values = sp.mean(data[Ps12], axis=1)
if hasattr(self[propname], 'units'):
values *= self[propname].units
return values
def filter_by_label(self, pores=[], throats=[], labels=None, mode='or'):
r"""
Returns which of the supplied pores (or throats) has the specified
label
Parameters
----------
pores, or throats : array_like
List of pores or throats to be filtered
labels : list of strings
The labels to apply as a filter
| |
<gh_stars>0
from ..util import make_list
from ..optimizers import LayerOptimizer
from ..graphs import Link
import numpy as np
class Input(Link):
def __init__(self, shape, name=None):
self.Shape = shape # tensor shape without the minibatch dimension
self.Values = None
self.XGradSum = None
self.Inputs = []
self.Layer = None
self.Name = name
def __str__(self):
name = self.Name or "(unnamed)"
return f"[Input {name} {self.Shape}]"
def set(self, values):
assert len(values.shape)-1 == len(self.Shape) # remove the minibatch dimension
assert all(s is None or s == n for s, n in zip(self.Shape, values.shape[1:])), "Incompatible shape for input layer. Expected %s, got %s" % (('*',)+self.Shape, values.shape)
self.Values = values
self.XGradSum = np.zeros(values.shape)
def compute(self):
return self.Values
def backprop(self, grads):
self.XGradSum[...] += grads
def reset_gradsients(self):
self.XGradSum = None
class Constant(Link):
def __init__(self, value=None, name=None):
self.Values = np.ones((1,)) if value is None else value
self.Inputs = []
self.Layer = None
self.Name = name
def __str__(self):
name = self.Name or "(unnamed)"
return f"[Constant {name} {self.Value}]"
def compute(self):
return self.Value
def backprop(self, grads):
pass
def reset_gradsients(self):
pass
class Layer(object):
params = []
def __init__(self, name=None, activation=None):
self.Name = name
self.PGradSum = None
self.NSamples = 0
self.Configured = False
self.Params = []
self.StateInGradients = self.XGradients = self.WeightsGradients = None # saved for inofrmation purposes. Not used by the Layer itself
if isinstance(activation, str):
from ..activations import get_activation
self.Activation = get_activation(activation)
else:
self.Activation = activation
def __str__(self):
return "[Layer %s %s]" % (self.__class__.__name__, self.Name or "")
def link(self, *inputs):
from ..graphs import Link
if len(inputs) == 1:
if isinstance(inputs[0], (list, tuple)):
inputs = list(inputs[0])
else:
inputs = [inputs[0]]
else:
inputs = list(inputs)
#print(self, ".link(): inputs:", inputs)
if not self.Configured:
shape = self.configure(inputs)
#print(" self.Shape -> ", self.Shape)
self.Configured = True
else:
shape = self.check_configuration(inputs)
lnk = Link(self, shape, inputs)
if self.Activation is not None:
assert isinstance(self.Activation, Layer)
lnk = self.Activation.link([lnk])
return lnk
__call__ = link
def set_optimizer(self, param_optimizer):
assert self.Configured, f"Layer {self}: Can not set layer optimizer before it is configured"
self.Optimizer = LayerOptimizer(self.params, param_optimizer)
def reset_gradients(self):
#print("Layer.reset_gradients:", self)
self.PGradSum = None
self.NSamples = 0
def ____compute(self, xs, in_state):
y, out_state, context = self.call(xs, in_state)
if self.Activation is not None:
z, _, a_context = self.Activation.call(y, None) # assume activation is stateless
context = (context, y, a_context)
y = z
return y, out_state, context
def ____backprop(self, ygrads, sgrads, xs, y, context):
if self.Activation is not None:
z = y
y_context, y, a_context = context
ygrads, _, _ = self.Activation.grads(ygrads, None, [y], z, a_context) # assumes activation is a stateless and param-less
ygrads = ygrads[0]
x_grads, p_grads, s_in_grads = self.grads(ygrads, sgrads, xs, y, y_context)
else:
x_grads, p_grads, s_in_grads = self.grads(ygrads, sgrads, xs, y, context)
#print(self,".backprop: ygrads:", ygrads.shape, " pgrads:", [g.shape for g in p_grads] if p_grads else "-")
nsamples = len(ygrads)
if self.PGradSum is None:
self.PGradSum = p_grads
self.NSamples = nsamples
else:
for g, g1 in zip(self.PGradSum, p_grads):
g[...] += g1
self.NSamples += nsamples
return x_grads, s_in_grads
def backprop(self, ygrads, sgrads, xs, y, context):
x_grads, p_grads, s_in_grads = self.grads(ygrads, sgrads, xs, y, context)
#print(self,".backprop: ygrads:", ygrads.shape, " pgrads:", [g.shape for g in p_grads] if p_grads else "-")
nsamples = len(ygrads)
if self.PGradSum is None:
self.PGradSum = p_grads
self.NSamples = nsamples
else:
for g, g1 in zip(self.PGradSum, p_grads):
g[...] += g1
self.NSamples += nsamples
#print(self, ".backprop: pgardsum:", self.PGradSum)
#print(self, ".backprop: ygrads:", ygrads, " -> x_grads:", x_grads)
self.XGradients = x_grads # saved for inofrmation purposes. Not used by the Layer itself
self.StateInGradients = s_in_grads # saved for inofrmation purposes. Not used by the Layer itself
return x_grads, s_in_grads
def apply_deltas(self):
deltas = None
self.WeightsGradients = None if self.PGradSum is None else [g.copy() for g in self.PGradSum] # saved for inofrmation purposes. Not used by the Layer itself
if self.PGradSum is not None and self.NSamples > 0:
#grads = [g/self.NSamples for g in self.PGradSum]
deltas = self.Optimizer.apply_deltas(self.PGradSum, self.params)
self.reset_gradients()
self.Deltas = deltas
return deltas
def set_weights(self, weights):
if not self.Configured:
raise RuntimeError("Layer is not configured")
self._set_weights(weights)
# overridables
def _set_weights(self, weights):
raise NotImplementedError()
def get_weights(self):
return [w.copy() for w in self.params]
def configure(self, inputs):
raise NotImplementedError()
pass
# return shape
def check_configuration(self, inputs):
pass
# return shape
def compute(self, inputs, in_state):
raise NotImplementedError(f"Layer {self.__class__.__name__} does not implement compute() method")
return y, out_state, context
def grads(self, y_grads, s_out_grads, xs, y, context):
# y_grads: [mb, ...] - dL/dy[j]
# returns:
# x_grads : [mb, ...] - grad per sample
# p_grad : [...] - sum of grad over the minibatch
# s_in_grads : [mb, ...] - grad per sample
raise NotImplementedError()
return x_grads, p_grads, s_in_grads
def check_gradients(self, input_shapes, minibatch=1, attempts=1000,
xs = None, state_in="init",
include_state=True, include_value=True,
tolerance=0.001,
relative_tolerance = 0.01,
delta = 1.0e-4):
import numpy as np
import random
def tolerated(g1, g2):
g12 = (g1+g2)/2
return abs(g1-g2) < tolerance or \
g12 != 0 and abs(g1-g2)/g12 < relative_tolerance
def loss_y_and_s(y, s):
if s is None:
return np.sum(y), np.ones(y.shape), None
else:
s_is_list = isinstance(s, (list, tuple))
if s_is_list:
ssum = sum(np.sum(si) for si in s)
sgrads = [np.ones(si.shape) for si in s]
else:
ssum = np.sum(s)
sgrads = np.ones(s.shape)
return np.sum(y) + ssum, np.ones(y.shape), sgrads
def loss_s_only(y, s):
s_is_list = isinstance(s, (list, tuple))
if s_is_list:
ssum = sum(np.sum(si) for si in s)
sgrads = [np.ones(si.shape) for si in s]
else:
ssum = np.sum(s)
sgrads = np.ones(s.shape)
return ssum, np.zeros(y.shape), sgrads
def loss_y_only(y, s):
return np.sum(y), np.ones(y.shape), None
from gradnet import Input
# input shapes are shapes without the minibatch dimension
input_shapes = make_list(input_shapes)
inputs = [Input(s) for s in input_shapes]
link = self.link(inputs)
out_shape = link.Shape
weights = self.get_weights()
#print("check_grads: weights:", weights)
xs = make_list(xs)
if xs is None:
x0s = [np.random.random((minibatch,)+s)*2-1 for s in input_shapes]
else:
x0s = xs
#x0s = [np.ones((1,)+s) for s in input_shapes]
w0s = [w.copy() for w in weights]
if state_in == "init":
_, s_in_0, _ = self.compute(x0s, None) # just to get a sample of a possible input state
else:
s_in_0 = state_in
#s_in_0[...] = 0.0
y0, s_out_0, context = self.compute(x0s, s_in_0)
#print("y0=", y0)
#print("s_out_0=", s_out_0)
loss = loss_y_only if not include_state else \
(loss_s_only if not include_value else loss_y_and_s)
l0, dldy, dlds = loss(y0, s_out_0)
#print("check_gradients: dldy:", dldy.shape)
#print("check_gradients: x0s:", [x.shape for x in x0s])
#
# have the layer colculate gradients
#
x_grads, w_grads, s_in_grads = self.grads(dldy, dlds, x0s, y0, context)
#print("check_grads: point gradients:")
#print(" x:", x_grads)
#print(" w:", w_grads)
#print(" s:", s_in_grads)
x_errors = w_errors = s_errors = 0
#
# X gradients
#
for t in range(attempts):
x1s = [x.copy() for x in x0s]
i = random.randint(0, len(x1s)-1)
xi = x1s[i]
xif = xi.reshape((-1,))
n = xif.shape[-1]
j = random.randint(0, n-1)
inx = np.unravel_index(j, xi.shape)
xif[j] += delta
y1, s_out, _ = self.compute(x1s, s_in_0)
l1, _, _ = loss(y1, s_out)
#print("y1:", y1)
#print("x1s:", x1s)
#print("Loss values", l0, l1)
dldx = (l1-l0)/delta
# compare to the gradients returned by the layer
grad_i = x_grads[i]
dldx_l = grad_i[inx]
if not tolerated(dldx_l,dldx):
print(f"==== Detected difference in dL/dx[{i}][{inx}]: computed:{dldx_l}, numericaly calculated:{dldx}")
x_errors += 1
#
# Weights gradients
#
if w_grads:
for t in range(attempts):
w1s = [w.copy() for w in w0s]
i = random.randint(0, len(w1s)-1)
wi = w1s[i]
wif = wi.reshape((-1,))
n = wif.shape[-1]
j = random.randint(0, n-1)
inx = np.unravel_index(j, wi.shape)
wif[j] += delta
self.set_weights(w1s)
y1, s_out, _ = self.compute(x0s, s_in_0)
l1, _, _ = loss(y1, s_out)
self.set_weights(w0s)
dldw = (l1-l0)/delta
# compare to the gradients returned by the layer
grad_i = w_grads[i]
#print("check_grads: i=",i," grad_i=", grad_i.shape, " inx=", inx)
dldw_l = grad_i[inx]
if not tolerated(dldw_l, dldw):
print(f"==== Detected difference in dL/dw[{i}][{inx}]: computed:{dldw_l}, numericaly calculated:{dldw}")
w_errors += 1
#
# Input state gradients
#
if s_in_0 is not None and s_in_grads | |
from __future__ import annotations
import logging
import os
import pytest
zict = pytest.importorskip("zict")
from packaging.version import parse as parse_version
from dask.sizeof import sizeof
from distributed.compatibility import WINDOWS
from distributed.protocol import serialize_bytelist
from distributed.spill import SpillBuffer
from distributed.utils_test import captured_logger
def psize(*objs) -> tuple[int, int]:
return (
sum(sizeof(o) for o in objs),
sum(len(frame) for obj in objs for frame in serialize_bytelist(obj)),
)
def test_spillbuffer(tmpdir):
buf = SpillBuffer(str(tmpdir), target=300)
# Convenience aliases
assert buf.memory is buf.fast
assert buf.disk is buf.slow
assert not buf.slow.weight_by_key
assert buf.slow.total_weight == (0, 0)
assert buf.spilled_total == (0, 0)
a, b, c, d = "a" * 100, "b" * 99, "c" * 98, "d" * 97
# Test assumption made by this test, mostly for non CPython implementations
assert 100 < sizeof(a) < 200
assert psize(a)[0] != psize(a)[1]
buf["a"] = a
assert not buf.slow
assert buf.fast.weights == {"a": sizeof(a)}
assert buf.fast.total_weight == sizeof(a)
assert buf.slow.weight_by_key == {}
assert buf.slow.total_weight == (0, 0)
assert buf["a"] == a
buf["b"] = b
assert not buf.slow
assert not buf.slow.weight_by_key
assert buf.slow.total_weight == (0, 0)
buf["c"] = c
assert set(buf.slow) == {"a"}
assert buf.slow.weight_by_key == {"a": psize(a)}
assert buf.slow.total_weight == psize(a)
assert buf["a"] == a
assert set(buf.slow) == {"b"}
assert buf.slow.weight_by_key == {"b": psize(b)}
assert buf.slow.total_weight == psize(b)
buf["d"] = d
assert set(buf.slow) == {"b", "c"}
assert buf.slow.weight_by_key == {"b": psize(b), "c": psize(c)}
assert buf.slow.total_weight == psize(b, c)
# Deleting an in-memory key does not automatically move spilled keys back to memory
del buf["a"]
assert set(buf.slow) == {"b", "c"}
assert buf.slow.weight_by_key == {"b": psize(b), "c": psize(c)}
assert buf.slow.total_weight == psize(b, c)
with pytest.raises(KeyError):
buf["a"]
# Deleting a spilled key updates the metadata
del buf["b"]
assert set(buf.slow) == {"c"}
assert buf.slow.weight_by_key == {"c": psize(c)}
assert buf.slow.total_weight == psize(c)
with pytest.raises(KeyError):
buf["b"]
# Updating a spilled key moves it to the top of the LRU and to memory
buf["c"] = c * 2
assert set(buf.slow) == {"d"}
assert buf.slow.weight_by_key == {"d": psize(d)}
assert buf.slow.total_weight == psize(d)
# Single key is larger than target and goes directly into slow
e = "e" * 500
buf["e"] = e
assert set(buf.slow) == {"d", "e"}
assert buf.slow.weight_by_key == {"d": psize(d), "e": psize(e)}
assert buf.slow.total_weight == psize(d, e)
# Updating a spilled key with another larger than target updates slow directly
d = "d" * 500
buf["d"] = d
assert set(buf.slow) == {"d", "e"}
assert buf.slow.weight_by_key == {"d": psize(d), "e": psize(e)}
assert buf.slow.total_weight == psize(d, e)
requires_zict_210 = pytest.mark.skipif(
parse_version(zict.__version__) <= parse_version("2.0.0"),
reason="requires zict version > 2.0.0",
)
@requires_zict_210
def test_spillbuffer_maxlim(tmpdir):
buf = SpillBuffer(str(tmpdir), target=200, max_spill=600, min_log_interval=0)
a, b, c, d, e = "a" * 200, "b" * 100, "c" * 99, "d" * 199, "e" * 98
# size of a is bigger than target and is smaller than max_spill;
# key should be in slow
buf["a"] = a
assert not buf.fast
assert not buf.fast.weights
assert set(buf.slow) == {"a"}
assert buf.slow.weight_by_key == {"a": psize(a)}
assert buf.slow.total_weight == psize(a)
assert buf["a"] == a
# size of b is smaller than target key should be in fast
buf["b"] = b
assert set(buf.fast) == {"b"}
assert buf.fast.weights == {"b": sizeof(b)}
assert buf["b"] == b
assert buf.fast.total_weight == sizeof(b)
# size of c is smaller than target but b+c > target, c should stay in fast and b
# move to slow since the max_spill limit has not been reached yet
buf["c"] = c
assert set(buf.fast) == {"c"}
assert buf.fast.weights == {"c": sizeof(c)}
assert buf["c"] == c
assert buf.fast.total_weight == sizeof(c)
assert set(buf.slow) == {"a", "b"}
assert buf.slow.weight_by_key == {"a": psize(a), "b": psize(b)}
assert buf.slow.total_weight == psize(a, b)
# size of e < target but e+c > target, this will trigger movement of c to slow
# but the max spill limit prevents it. Resulting in e remaining in fast
with captured_logger(logging.getLogger("distributed.spill")) as logs_e:
buf["e"] = e
assert "disk reached capacity" in logs_e.getvalue()
assert set(buf.fast) == {"c", "e"}
assert buf.fast.weights == {"c": sizeof(c), "e": sizeof(e)}
assert buf["e"] == e
assert buf.fast.total_weight == sizeof(c) + sizeof(e)
assert set(buf.slow) == {"a", "b"}
assert buf.slow.weight_by_key == {"a": psize(a), "b": psize(b)}
assert buf.slow.total_weight == psize(a, b)
# size of d > target, d should go to slow but slow reached the max_spill limit then
# d will end up on fast with c (which can't be move to slow because it won't fit
# either)
with captured_logger(logging.getLogger("distributed.spill")) as logs_d:
buf["d"] = d
assert "disk reached capacity" in logs_d.getvalue()
assert set(buf.fast) == {"c", "d", "e"}
assert buf.fast.weights == {"c": sizeof(c), "d": sizeof(d), "e": sizeof(e)}
assert buf["d"] == d
assert buf.fast.total_weight == sizeof(c) + sizeof(d) + sizeof(e)
assert set(buf.slow) == {"a", "b"}
assert buf.slow.weight_by_key == {"a": psize(a), "b": psize(b)}
assert buf.slow.total_weight == psize(a, b)
# Overwrite a key that was in slow, but the size of the new key is larger than
# max_spill
a_large = "a" * 500
assert psize(a_large)[1] > 600 # size of max_spill
with captured_logger(logging.getLogger("distributed.spill")) as logs_alarge:
buf["a"] = a_large
assert "disk reached capacity" in logs_alarge.getvalue()
assert set(buf.fast) == {"a", "d", "e"}
assert set(buf.slow) == {"b", "c"}
assert buf.fast.total_weight == sizeof(d) + sizeof(a_large) + sizeof(e)
assert buf.slow.total_weight == psize(b, c)
# Overwrite a key that was in fast, but the size of the new key is larger than
# max_spill
d_large = "d" * 501
with captured_logger(logging.getLogger("distributed.spill")) as logs_dlarge:
buf["d"] = d_large
assert "disk reached capacity" in logs_dlarge.getvalue()
assert set(buf.fast) == {"a", "d", "e"}
assert set(buf.slow) == {"b", "c"}
assert buf.fast.total_weight == sizeof(a_large) + sizeof(d_large) + sizeof(e)
assert buf.slow.total_weight == psize(b, c)
class MyError(Exception):
pass
class Bad:
def __init__(self, size):
self.size = size
def __getstate__(self):
raise MyError()
def __sizeof__(self):
return self.size
@requires_zict_210
def test_spillbuffer_fail_to_serialize(tmpdir):
buf = SpillBuffer(str(tmpdir), target=200, max_spill=600, min_log_interval=0)
# bad data individually larger than spill threshold target 200
a = Bad(size=201)
# Exception caught in the worker
with pytest.raises(TypeError, match="Could not serialize"):
with captured_logger(logging.getLogger("distributed.spill")) as logs_bad_key:
buf["a"] = a
# spill.py must remain silent because we're already logging in worker.py
assert not logs_bad_key.getvalue()
assert not set(buf.fast)
assert not set(buf.slow)
b = Bad(size=100) # this is small enough to fit in memory/fast
buf["b"] = b
assert set(buf.fast) == {"b"}
c = "c" * 100
with captured_logger(logging.getLogger("distributed.spill")) as logs_bad_key_mem:
# This will go to fast and try to kick b out,
# but keep b in fast since it's not pickable
buf["c"] = c
# worker.py won't intercept the exception here, so spill.py must dump the traceback
logs_value = logs_bad_key_mem.getvalue()
assert "Failed to pickle" in logs_value # from distributed.spill
assert "Traceback" in logs_value # from distributed.spill
assert set(buf.fast) == {"b", "c"}
assert buf.fast.total_weight == sizeof(b) + sizeof(c)
assert not set(buf.slow)
@requires_zict_210
@pytest.mark.skipif(WINDOWS, reason="Needs chmod")
def test_spillbuffer_oserror(tmpdir):
buf = SpillBuffer(str(tmpdir), target=200, max_spill=800, min_log_interval=0)
a, b, c, d = (
"a" * 200,
"b" * 100,
"c" * 201,
"d" * 101,
)
# let's have something in fast and something in slow
buf["a"] = a
buf["b"] = b
assert set(buf.fast) == {"b"}
assert set(buf.slow) == {"a"}
# modify permissions of disk to be read only.
# This causes writes to raise OSError, just like in case of disk full.
os.chmod(tmpdir, 0o555)
# Add key > than target
with captured_logger(logging.getLogger("distributed.spill")) as logs_oserror_slow:
buf["c"] = c
assert "Spill to disk failed" in logs_oserror_slow.getvalue()
assert set(buf.fast) == {"b", "c"}
assert set(buf.slow) == {"a"}
assert buf.slow.weight_by_key == {"a": psize(a)}
assert buf.fast.weights == {"b": sizeof(b), "c": sizeof(c)}
del buf["c"]
assert set(buf.fast) == {"b"}
assert set(buf.slow) == {"a"}
# add key to fast which is smaller than target but when added it triggers spill,
# which triggers OSError
with captured_logger(logging.getLogger("distributed.spill")) as logs_oserror_evict:
buf["d"] = d
assert "Spill to disk failed" in logs_oserror_evict.getvalue()
assert set(buf.fast) == {"b", "d"}
assert set(buf.slow) == {"a"}
assert buf.slow.weight_by_key == {"a": psize(a)}
assert buf.fast.weights == {"b": sizeof(b), "d": sizeof(d)}
@requires_zict_210
def test_spillbuffer_evict(tmpdir):
buf = SpillBuffer(str(tmpdir), target=300, min_log_interval=0)
a_bad = Bad(size=100)
a = "a" * 100
buf["a"] = a
assert set(buf.fast) == {"a"}
assert not set(buf.slow)
assert buf.fast.weights == {"a": sizeof(a)}
# successful eviction
weight = buf.evict()
assert weight == sizeof(a)
assert not buf.fast
| |
<filename>tests/staticfiles_tests/test_storage.py<gh_stars>1-10
import json
import os
import shutil
import sys
import tempfile
import unittest
from io import StringIO
from pathlib import Path
from unittest import mock
from django.conf import settings
from django.contrib.staticfiles import finders, storage
from django.contrib.staticfiles.management.commands.collectstatic import (
Command as CollectstaticCommand,
)
from django.core.management import call_command
from django.test import SimpleTestCase, override_settings
from .cases import CollectionTestCase
from .settings import TEST_ROOT
def hashed_file_path(test, path):
fullpath = test.render_template(test.static_template_snippet(path))
return fullpath.replace(settings.STATIC_URL, '')
class TestHashedFiles:
hashed_file_path = hashed_file_path
def tearDown(self):
# Clear hashed files to avoid side effects among tests.
storage.staticfiles_storage.hashed_files.clear()
def assertPostCondition(self):
"""
Assert post conditions for a test are met. Must be manually called at
the end of each test.
"""
pass
def test_template_tag_return(self):
self.assertStaticRaises(ValueError, "does/not/exist.png", "/static/does/not/exist.png")
self.assertStaticRenders("test/file.txt", "/static/test/file.dad0999e4f8f.txt")
self.assertStaticRenders("test/file.txt", "/static/test/file.dad0999e4f8f.txt", asvar=True)
self.assertStaticRenders("cached/styles.css", "/static/cached/styles.5e0040571e1a.css")
self.assertStaticRenders("path/", "/static/path/")
self.assertStaticRenders("path/?query", "/static/path/?query")
self.assertPostCondition()
def test_template_tag_simple_content(self):
relpath = self.hashed_file_path("cached/styles.css")
self.assertEqual(relpath, "cached/styles.5e0040571e1a.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.d41d8cd98f00.css", content)
self.assertPostCondition()
def test_path_ignored_completely(self):
relpath = self.hashed_file_path("cached/css/ignored.css")
self.assertEqual(relpath, "cached/css/ignored.554da52152af.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertIn(b'#foobar', content)
self.assertIn(b'http:foobar', content)
self.assertIn(b'https:foobar', content)
self.assertIn(b'data:foobar', content)
self.assertIn(b'chrome:foobar', content)
self.assertIn(b'//foobar', content)
self.assertPostCondition()
def test_path_with_querystring(self):
relpath = self.hashed_file_path("cached/styles.css?spam=eggs")
self.assertEqual(relpath, "cached/styles.5e0040571e1a.css?spam=eggs")
with storage.staticfiles_storage.open("cached/styles.5e0040571e1a.css") as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.d41d8cd98f00.css", content)
self.assertPostCondition()
def test_path_with_fragment(self):
relpath = self.hashed_file_path("cached/styles.css#eggs")
self.assertEqual(relpath, "cached/styles.5e0040571e1a.css#eggs")
with storage.staticfiles_storage.open("cached/styles.5e0040571e1a.css") as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.d41d8cd98f00.css", content)
self.assertPostCondition()
def test_path_with_querystring_and_fragment(self):
relpath = self.hashed_file_path("cached/css/fragments.css")
self.assertEqual(relpath, "cached/css/fragments.a60c0e74834f.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertIn(b'fonts/font.b9b105392eb8.eot?#iefix', content)
self.assertIn(b'fonts/font.b8d603e42714.svg#webfontIyfZbseF', content)
self.assertIn(b'fonts/font.b8d603e42714.svg#path/to/../../fonts/font.svg', content)
self.assertIn(b'data:font/woff;charset=utf-8;base64,d09GRgABAAAAADJoAA0AAAAAR2QAAQAAAAAAAAAAAAA', content)
self.assertIn(b'#default#VML', content)
self.assertPostCondition()
def test_template_tag_absolute(self):
relpath = self.hashed_file_path("cached/absolute.css")
self.assertEqual(relpath, "cached/absolute.eb04def9f9a4.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"/static/cached/styles.css", content)
self.assertIn(b"/static/cached/styles.5e0040571e1a.css", content)
self.assertNotIn(b"/static/styles_root.css", content)
self.assertIn(b"/static/styles_root.401f2509a628.css", content)
self.assertIn(b'/static/cached/img/relative.acae32e4532b.png', content)
self.assertPostCondition()
def test_template_tag_absolute_root(self):
"""
Like test_template_tag_absolute, but for a file in STATIC_ROOT (#26249).
"""
relpath = self.hashed_file_path("absolute_root.css")
self.assertEqual(relpath, "absolute_root.f821df1b64f7.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"/static/styles_root.css", content)
self.assertIn(b"/static/styles_root.401f2509a628.css", content)
self.assertPostCondition()
def test_template_tag_relative(self):
relpath = self.hashed_file_path("cached/relative.css")
self.assertEqual(relpath, "cached/relative.c3e9e1ea6f2e.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"../cached/styles.css", content)
self.assertNotIn(b'@import "styles.css"', content)
self.assertNotIn(b'url(img/relative.png)', content)
self.assertIn(b'url("img/relative.acae32e4532b.png")', content)
self.assertIn(b"../cached/styles.5e0040571e1a.css", content)
self.assertPostCondition()
def test_import_replacement(self):
"See #18050"
relpath = self.hashed_file_path("cached/import.css")
self.assertEqual(relpath, "cached/import.f53576679e5a.css")
with storage.staticfiles_storage.open(relpath) as relfile:
self.assertIn(b"""import url("styles.5e0040571e1a.css")""", relfile.read())
self.assertPostCondition()
def test_template_tag_deep_relative(self):
relpath = self.hashed_file_path("cached/css/window.css")
self.assertEqual(relpath, "cached/css/window.5d5c10836967.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b'url(img/window.png)', content)
self.assertIn(b'url("img/window.acae32e4532b.png")', content)
self.assertPostCondition()
def test_template_tag_url(self):
relpath = self.hashed_file_path("cached/url.css")
self.assertEqual(relpath, "cached/url.902310b73412.css")
with storage.staticfiles_storage.open(relpath) as relfile:
self.assertIn(b"https://", relfile.read())
self.assertPostCondition()
def test_module_import(self):
relpath = self.hashed_file_path('cached/module.js')
self.assertEqual(relpath, 'cached/module.91b9cf9935da.js')
tests = [
# Relative imports.
b'import testConst from "./module_test.d489af3cf882.js";',
b'import relativeModule from "../nested/js/nested.866475c46bb4.js";',
b'import { firstConst, secondConst } from "./module_test.d489af3cf882.js";',
# Absolute import.
b'import rootConst from "/static/absolute_root.5586327fe78c.js";',
# Dynamic import.
b'const dynamicModule = import("./module_test.d489af3cf882.js");',
# Creating a module object.
b'import * as NewModule from "./module_test.d489af3cf882.js";',
# Aliases.
b'import { testConst as alias } from "./module_test.d489af3cf882.js";',
b'import {\n'
b' firstVar as firstVarAlias,\n'
b' secondVar as secondVarAlias\n'
b'} from "./module_test.d489af3cf882.js";',
]
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
for module_import in tests:
with self.subTest(module_import=module_import):
self.assertIn(module_import, content)
self.assertPostCondition()
def test_aggregating_modules(self):
relpath = self.hashed_file_path('cached/module.js')
self.assertEqual(relpath, 'cached/module.91b9cf9935da.js')
tests = [
b'export * from "./module_test.d489af3cf882.js";',
b'export { testConst } from "./module_test.d489af3cf882.js";',
b'export {\n'
b' firstVar as firstVarAlias,\n'
b' secondVar as secondVarAlias\n'
b'} from "./module_test.d489af3cf882.js";',
]
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
for module_import in tests:
with self.subTest(module_import=module_import):
self.assertIn(module_import, content)
self.assertPostCondition()
@override_settings(
STATICFILES_DIRS=[os.path.join(TEST_ROOT, 'project', 'loop')],
STATICFILES_FINDERS=['django.contrib.staticfiles.finders.FileSystemFinder'],
)
def test_import_loop(self):
finders.get_finder.cache_clear()
err = StringIO()
with self.assertRaisesMessage(RuntimeError, 'Max post-process passes exceeded'):
call_command('collectstatic', interactive=False, verbosity=0, stderr=err)
self.assertEqual("Post-processing 'All' failed!\n\n", err.getvalue())
self.assertPostCondition()
def test_post_processing(self):
"""
post_processing behaves correctly.
Files that are alterable should always be post-processed; files that
aren't should be skipped.
collectstatic has already been called once in setUp() for this testcase,
therefore we check by verifying behavior on a second run.
"""
collectstatic_args = {
'interactive': False,
'verbosity': 0,
'link': False,
'clear': False,
'dry_run': False,
'post_process': True,
'use_default_ignore_patterns': True,
'ignore_patterns': ['*.ignoreme'],
}
collectstatic_cmd = CollectstaticCommand()
collectstatic_cmd.set_options(**collectstatic_args)
stats = collectstatic_cmd.collect()
self.assertIn(os.path.join('cached', 'css', 'window.css'), stats['post_processed'])
self.assertIn(os.path.join('cached', 'css', 'img', 'window.png'), stats['unmodified'])
self.assertIn(os.path.join('test', 'nonascii.css'), stats['post_processed'])
# No file should be yielded twice.
self.assertCountEqual(stats['post_processed'], set(stats['post_processed']))
self.assertPostCondition()
def test_css_import_case_insensitive(self):
relpath = self.hashed_file_path("cached/styles_insensitive.css")
self.assertEqual(relpath, "cached/styles_insensitive.3fa427592a53.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.d41d8cd98f00.css", content)
self.assertPostCondition()
def test_js_source_map(self):
relpath = self.hashed_file_path('cached/source_map.js')
self.assertEqual(relpath, 'cached/source_map.9371cbb02a26.js')
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b'//# sourceMappingURL=source_map.js.map', content)
self.assertIn(
b'//# sourceMappingURL=source_map.js.99914b932bd3.map',
content,
)
self.assertPostCondition()
def test_js_source_map_sensitive(self):
relpath = self.hashed_file_path('cached/source_map_sensitive.js')
self.assertEqual(relpath, 'cached/source_map_sensitive.5da96fdd3cb3.js')
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertIn(b'//# sOuRcEMaPpInGURL=source_map.js.map', content)
self.assertNotIn(
b'//# sourceMappingURL=source_map.js.99914b932bd3.map',
content,
)
self.assertPostCondition()
@override_settings(
STATICFILES_DIRS=[os.path.join(TEST_ROOT, 'project', 'faulty')],
STATICFILES_FINDERS=['django.contrib.staticfiles.finders.FileSystemFinder'],
)
def test_post_processing_failure(self):
"""
post_processing indicates the origin of the error when it fails.
"""
finders.get_finder.cache_clear()
err = StringIO()
with self.assertRaises(Exception):
call_command('collectstatic', interactive=False, verbosity=0, stderr=err)
self.assertEqual("Post-processing 'faulty.css' failed!\n\n", err.getvalue())
self.assertPostCondition()
@override_settings(STATICFILES_STORAGE='staticfiles_tests.storage.ExtraPatternsStorage')
class TestExtraPatternsStorage(CollectionTestCase):
def setUp(self):
storage.staticfiles_storage.hashed_files.clear() # avoid cache interference
super().setUp()
def cached_file_path(self, path):
fullpath = self.render_template(self.static_template_snippet(path))
return fullpath.replace(settings.STATIC_URL, '')
def test_multi_extension_patterns(self):
"""
With storage classes having several file extension patterns, only the
files matching a specific file pattern should be affected by the
substitution (#19670).
"""
# CSS files shouldn't be touched by JS patterns.
relpath = self.cached_file_path("cached/import.css")
self.assertEqual(relpath, "cached/import.f53576679e5a.css")
with storage.staticfiles_storage.open(relpath) as relfile:
self.assertIn(b'import url("styles.5e0040571e1a.css")', relfile.read())
# Confirm JS patterns have been applied to JS files.
relpath = self.cached_file_path("cached/test.js")
self.assertEqual(relpath, "cached/test.388d7a790d46.js")
with storage.staticfiles_storage.open(relpath) as relfile:
self.assertIn(b'JS_URL("import.f53576679e5a.css")', relfile.read())
@override_settings(
STATICFILES_STORAGE='django.contrib.staticfiles.storage.ManifestStaticFilesStorage',
)
class TestCollectionManifestStorage(TestHashedFiles, CollectionTestCase):
"""
Tests for the Cache busting storage
"""
def setUp(self):
super().setUp()
temp_dir = tempfile.mkdtemp()
os.makedirs(os.path.join(temp_dir, 'test'))
self._clear_filename = os.path.join(temp_dir, 'test', 'cleared.txt')
with open(self._clear_filename, 'w') as f:
f.write('to be deleted in one test')
self.patched_settings = self.settings(
STATICFILES_DIRS=settings.STATICFILES_DIRS + [temp_dir],
)
self.patched_settings.enable()
self.addCleanup(shutil.rmtree, temp_dir)
self._manifest_strict = storage.staticfiles_storage.manifest_strict
def tearDown(self):
self.patched_settings.disable()
if os.path.exists(self._clear_filename):
os.unlink(self._clear_filename)
storage.staticfiles_storage.manifest_strict = self._manifest_strict
super().tearDown()
def assertPostCondition(self):
hashed_files = storage.staticfiles_storage.hashed_files
# The in-memory version of the manifest matches the one on disk
# since a properly created manifest should cover all filenames.
if hashed_files:
manifest = storage.staticfiles_storage.load_manifest()
self.assertEqual(hashed_files, manifest)
def test_manifest_exists(self):
filename = storage.staticfiles_storage.manifest_name
path = storage.staticfiles_storage.path(filename)
self.assertTrue(os.path.exists(path))
def test_manifest_does_not_exist(self):
storage.staticfiles_storage.manifest_name = 'does.not.exist.json'
self.assertIsNone(storage.staticfiles_storage.read_manifest())
def test_manifest_does_not_ignore_permission_error(self):
with mock.patch('builtins.open', side_effect=PermissionError):
with self.assertRaises(PermissionError):
storage.staticfiles_storage.read_manifest()
def test_loaded_cache(self):
self.assertNotEqual(storage.staticfiles_storage.hashed_files, {})
manifest_content = storage.staticfiles_storage.read_manifest()
self.assertIn(
'"version": "%s"' % storage.staticfiles_storage.manifest_version,
manifest_content
)
def test_parse_cache(self):
hashed_files = storage.staticfiles_storage.hashed_files
manifest = storage.staticfiles_storage.load_manifest()
self.assertEqual(hashed_files, manifest)
def test_clear_empties_manifest(self):
cleared_file_name = storage.staticfiles_storage.clean_name(os.path.join('test', 'cleared.txt'))
# collect the additional file
self.run_collectstatic()
hashed_files = storage.staticfiles_storage.hashed_files
self.assertIn(cleared_file_name, hashed_files)
manifest_content = storage.staticfiles_storage.load_manifest()
self.assertIn(cleared_file_name, manifest_content)
original_path = storage.staticfiles_storage.path(cleared_file_name)
self.assertTrue(os.path.exists(original_path))
# delete the original file form the app, collect with clear
os.unlink(self._clear_filename)
self.run_collectstatic(clear=True)
self.assertFileNotFound(original_path)
hashed_files = storage.staticfiles_storage.hashed_files
self.assertNotIn(cleared_file_name, hashed_files)
manifest_content = storage.staticfiles_storage.load_manifest()
self.assertNotIn(cleared_file_name, manifest_content)
def test_missing_entry(self):
missing_file_name = 'cached/missing.css'
configured_storage = storage.staticfiles_storage
self.assertNotIn(missing_file_name, configured_storage.hashed_files)
# File name not found in manifest
with self.assertRaisesMessage(ValueError, "Missing staticfiles manifest entry for '%s'" % missing_file_name):
self.hashed_file_path(missing_file_name)
configured_storage.manifest_strict = False
# File doesn't exist on disk
err_msg = "The file '%s' could not be found with %r." % (missing_file_name, configured_storage._wrapped)
with self.assertRaisesMessage(ValueError, err_msg):
self.hashed_file_path(missing_file_name)
content = StringIO()
content.write('Found')
configured_storage.save(missing_file_name, content)
# File exists on disk
self.hashed_file_path(missing_file_name)
def test_intermediate_files(self):
cached_files = os.listdir(os.path.join(settings.STATIC_ROOT, 'cached'))
# Intermediate files shouldn't be created for reference.
self.assertEqual(
len([
cached_file
for cached_file in cached_files
if cached_file.startswith('relative.')
]),
2,
)
@override_settings(STATICFILES_STORAGE='staticfiles_tests.storage.NoneHashStorage')
class TestCollectionNoneHashStorage(CollectionTestCase):
hashed_file_path = hashed_file_path
def test_hashed_name(self):
relpath = self.hashed_file_path('cached/styles.css')
self.assertEqual(relpath, 'cached/styles.css')
@override_settings(
STATICFILES_STORAGE='staticfiles_tests.storage.NoPostProcessReplacedPathStorage'
)
class TestCollectionNoPostProcessReplacedPaths(CollectionTestCase):
run_collectstatic_in_setUp = False
def test_collectstatistic_no_post_process_replaced_paths(self):
stdout = StringIO()
self.run_collectstatic(verbosity=1, stdout=stdout)
self.assertIn('post-processed', stdout.getvalue())
@override_settings(STATICFILES_STORAGE='staticfiles_tests.storage.SimpleStorage')
class TestCollectionSimpleStorage(CollectionTestCase):
hashed_file_path = hashed_file_path
def setUp(self):
storage.staticfiles_storage.hashed_files.clear() # avoid cache interference
super().setUp()
def test_template_tag_return(self):
self.assertStaticRaises(ValueError, "does/not/exist.png", "/static/does/not/exist.png")
self.assertStaticRenders("test/file.txt", "/static/test/file.deploy12345.txt")
self.assertStaticRenders("cached/styles.css", "/static/cached/styles.deploy12345.css")
self.assertStaticRenders("path/", "/static/path/")
self.assertStaticRenders("path/?query", "/static/path/?query")
def test_template_tag_simple_content(self):
relpath = self.hashed_file_path("cached/styles.css")
self.assertEqual(relpath, "cached/styles.deploy12345.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.deploy12345.css", content)
class CustomManifestStorage(storage.ManifestStaticFilesStorage):
def __init__(self, *args, manifest_storage=None, **kwargs):
manifest_storage = storage.StaticFilesStorage(
location=kwargs.pop('manifest_location'),
)
super().__init__(*args, manifest_storage=manifest_storage, **kwargs)
class TestCustomManifestStorage(SimpleTestCase):
def setUp(self):
self.manifest_path = Path(tempfile.mkdtemp())
self.addCleanup(shutil.rmtree, self.manifest_path)
self.staticfiles_storage = CustomManifestStorage(
manifest_location=self.manifest_path,
)
self.manifest_file = self.manifest_path / self.staticfiles_storage.manifest_name
# Manifest without paths.
self.manifest = {'version': self.staticfiles_storage.manifest_version}
with self.manifest_file.open('w') as manifest_file:
json.dump(self.manifest, manifest_file)
def test_read_manifest(self):
self.assertEqual(
self.staticfiles_storage.read_manifest(),
json.dumps(self.manifest),
)
def test_read_manifest_nonexistent(self):
os.remove(self.manifest_file)
self.assertIsNone(self.staticfiles_storage.read_manifest())
def test_save_manifest_override(self):
self.assertIs(self.manifest_file.exists(), True)
self.staticfiles_storage.save_manifest()
self.assertIs(self.manifest_file.exists(), True)
new_manifest = json.loads(self.staticfiles_storage.read_manifest())
self.assertIn('paths', new_manifest)
self.assertNotEqual(new_manifest, self.manifest)
def test_save_manifest_create(self):
os.remove(self.manifest_file)
self.staticfiles_storage.save_manifest()
self.assertIs(self.manifest_file.exists(), True)
new_manifest = json.loads(self.staticfiles_storage.read_manifest())
self.assertIn('paths', new_manifest)
self.assertNotEqual(new_manifest, self.manifest)
class CustomStaticFilesStorage(storage.StaticFilesStorage):
"""
Used in TestStaticFilePermissions
"""
def __init__(self, *args, **kwargs):
kwargs['file_permissions_mode'] = 0o640
kwargs['directory_permissions_mode'] = 0o740
super().__init__(*args, **kwargs)
@unittest.skipIf(sys.platform == 'win32', "Windows only partially supports chmod.")
class TestStaticFilePermissions(CollectionTestCase):
command_params = {
'interactive': False,
'verbosity': 0,
'ignore_patterns': ['*.ignoreme'],
}
def setUp(self):
self.umask = 0o027
self.old_umask = os.umask(self.umask)
super().setUp()
def tearDown(self):
os.umask(self.old_umask)
super().tearDown()
# Don't run | |
= self.axial_data
neck_rot_vert, neck_rot_horz = self.neck_rot
truk_rot_vert, truk_rot_horz = self.trunk_rot
center = self.MapVar.center
lumbar = center
sacrum = lumbar - FNS.vert_up(truk_rot_horz, truk_rot_vert, 10)
thorax = lumbar + FNS.vert_up(truk_rot_horz, truk_rot_vert, 30)
cervic = thorax + FNS.vert_up(truk_rot_horz, truk_rot_vert, 10)
base = cervic + FNS.vert_up(truk_rot_horz + neck_rot_horz, truk_rot_vert + neck_rot_vert, 5)
left_pectoral = thorax + FNS.latr_left(truk_rot_horz, 0, 10)
right_pectoral = thorax + FNS.latr_right(truk_rot_horz, 0, 10)
left_pelvic = sacrum + FNS.latr_left(0, 0, 5)
right_pelvic = sacrum + FNS.latr_right(0, 0, 5)
column = np.transpose(np.array((base, cervic, thorax, lumbar, sacrum)), (1, 0))
pectoral = np.transpose(np.array((left_pectoral, thorax, right_pectoral)), (1, 0))
pelvic = np.transpose(np.array((left_pelvic, sacrum, right_pelvic)), (1, 0))
return column, pectoral, pelvic
# draw positions of column segments
def column_plt(self, column, pectoral, pelvic):
self.MapVar.column.set_data(column[0], column[1])
self.MapVar.column.set_3d_properties(column[2])
self.MapVar.pectoral.set_data(pectoral[0], pectoral[1])
self.MapVar.pectoral.set_3d_properties(pectoral[2])
self.MapVar.pelvic.set_data(pelvic[0], pelvic[1])
self.MapVar.pelvic.set_3d_properties(pelvic[2])
cervic = (column[0][1], column[1][1], column[2][1])
sacrum = (column[0][4], column[1][4], column[2][4])
CoM = (column[0][3], column[1][3], column[2][3])
column_jnt = np.transpose(np.array((cervic, sacrum)), (1, 0))
self.MapVar.column_jnt.set_data(column_jnt[0], column_jnt[1])
self.MapVar.column_jnt.set_3d_properties(column_jnt[2])
self.MapVar.CoM.set_data(CoM[0], CoM[1])
self.MapVar.CoM.set_3d_properties(CoM[2])
# compute positions of shoulders elbows and wrists of upper limbs
def uplimb_cpt(self, shift):
FNS = self.FNS
pectoral = self.column_cpt(shift)[1]
left_shoulder = np.array((pectoral[0][0], pectoral[1][0], pectoral[2][0]))
right_shoulder = np.array((pectoral[0][2], pectoral[1][2], pectoral[2][2]))
if shift == 0:
(left_shoul_rot_vert, left_shoul_rot_horz), (left_elbow_rot_vert, left_elbow_rot_horz), \
(left_wrist_rot_vert, left_wrist_rot_horz) = self.uplimb_rot[0]
(right_shoul_rot_vert, right_shoul_rot_horz), (right_elbow_rot_vert, right_elbow_rot_horz), \
(right_wrist_rot_vert, right_wrist_rot_horz) = self.uplimb_rot[1]
else:
self.uplimb_rot = self.append_data[0]
(left_shoul_rot_vert, left_shoul_rot_horz), (left_elbow_rot_vert, left_elbow_rot_horz), \
(left_wrist_rot_vert, left_wrist_rot_horz) = self.uplimb_rot[0]
(right_shoul_rot_vert, right_shoul_rot_horz), (right_elbow_rot_vert, right_elbow_rot_horz), \
(right_wrist_rot_vert, right_wrist_rot_horz) = self.uplimb_rot[1]
left_elbow = left_shoulder + FNS.vert_down(left_shoul_rot_horz, left_shoul_rot_vert, 15)
left_wrist = left_elbow + FNS.vert_down(left_shoul_rot_horz, left_shoul_rot_vert + left_elbow_rot_vert, 10)
left_hand = left_wrist + FNS.vert_down(left_shoul_rot_horz, left_shoul_rot_vert + left_elbow_rot_vert +
left_wrist_rot_vert, 5)
right_elbow = right_shoulder + FNS.vert_down(right_shoul_rot_horz, right_shoul_rot_vert, 15)
right_wrist = right_elbow + FNS.vert_down(right_shoul_rot_horz, right_shoul_rot_vert + right_elbow_rot_vert, 10)
right_hand = right_wrist + FNS.vert_down(right_shoul_rot_horz, right_shoul_rot_vert + right_elbow_rot_vert +
right_wrist_rot_vert, 5)
left_limb = np.transpose(np.array((left_shoulder, left_elbow, left_wrist, left_hand)), (1, 0))
right_limb = np.transpose(np.array((right_shoulder, right_elbow, right_wrist, right_hand)), (1, 0))
return left_limb, right_limb
# draw positions of upper limbs
def uplimb_plt(self, left_uplimb, right_uplimb):
self.MapVar.left_uplimb.set_data(left_uplimb[0], left_uplimb[1])
self.MapVar.left_uplimb.set_3d_properties(left_uplimb[2])
left_shoul = (left_uplimb[0][0], left_uplimb[1][0], left_uplimb[2][0])
left_elbow = (left_uplimb[0][1], left_uplimb[1][1], left_uplimb[2][1])
left_wrist = (left_uplimb[0][2], left_uplimb[1][2], left_uplimb[2][2])
left_uplimb_jnt = np.transpose(np.array((left_shoul, left_elbow, left_wrist)), (1, 0))
self.MapVar.left_uplimb_jnt.set_data(left_uplimb_jnt[0], left_uplimb_jnt[1])
self.MapVar.left_uplimb_jnt.set_3d_properties(left_uplimb_jnt[2])
self.MapVar.right_uplimb.set_data(right_uplimb[0], right_uplimb[1])
self.MapVar.right_uplimb.set_3d_properties(right_uplimb[2])
right_shoul = (right_uplimb[0][0], right_uplimb[1][0], right_uplimb[2][0])
right_elbow = (right_uplimb[0][1], right_uplimb[1][1], right_uplimb[2][1])
right_wrist = (right_uplimb[0][2], right_uplimb[1][2], right_uplimb[2][2])
right_uplimb_jnt = np.transpose(np.array((right_shoul, right_elbow, right_wrist)), (1, 0))
self.MapVar.right_uplimb_jnt.set_data(right_uplimb_jnt[0], right_uplimb_jnt[1])
self.MapVar.right_uplimb_jnt.set_3d_properties(right_uplimb_jnt[2])
# compute positions of hips, knees and ankles of lower limbs
def lowlimb_cpt(self, shift):
FNS = self.FNS
pelvic = self.column_cpt(shift)[2]
left_hip = np.array((pelvic[0][0], pelvic[1][0], pelvic[2][0]))
right_hip = np.array((pelvic[0][2], pelvic[1][2], pelvic[2][2]))
if shift == 0:
(left_hip_rot_vert, left_hip_rot_horz), (left_knee_rot_vert, left_knee_rot_horz), \
(left_ankle_rot_vert, left_ankle_rot_horz) = self.lowlimb_rot[0]
(right_hip_rot_vert, right_hip_rot_horz), (right_knee_rot_vert, right_knee_rot_horz), \
(right_ankle_rot_vert, right_ankle_rot_horz) = self.lowlimb_rot[1]
else:
self.lowlimb_rot = self.append_data[1]
(left_hip_rot_vert, left_hip_rot_horz), (left_knee_rot_vert, left_knee_rot_horz), \
(left_ankle_rot_vert, left_ankle_rot_horz) = self.lowlimb_rot[0]
(right_hip_rot_vert, right_hip_rot_horz), (right_knee_rot_vert, right_knee_rot_horz), \
(right_ankle_rot_vert, right_ankle_rot_horz) = self.lowlimb_rot[1]
left_knee = left_hip + FNS.vert_down(left_hip_rot_horz, left_hip_rot_vert, 20)
left_ankle = left_knee + FNS.vert_down(left_hip_rot_horz, left_hip_rot_vert + left_knee_rot_vert, 15)
left_foot = left_ankle + FNS.vert_down(left_hip_rot_horz, left_hip_rot_vert + left_knee_rot_vert +
left_ankle_rot_vert + np.pi / 2, 5)
left_limb = np.transpose(np.array((left_hip, left_knee, left_ankle, left_foot)), (1, 0))
right_knee = right_hip + FNS.vert_down(right_hip_rot_horz, right_hip_rot_vert, 20)
right_ankle = right_knee + FNS.vert_down(right_hip_rot_horz, right_hip_rot_vert + right_knee_rot_vert, 15)
right_foot = right_ankle + FNS.vert_down(right_hip_rot_horz, right_hip_rot_vert + right_knee_rot_vert +
right_ankle_rot_vert + np.pi / 2, 5)
right_limb = np.transpose(np.array((right_hip, right_knee, right_ankle, right_foot)), (1, 0))
return left_limb, right_limb
# draw positions of lower limbs
def lowlimb_plt(self, left_lowlimb, right_lowlimb):
self.MapVar.left_lowlimb.set_data(left_lowlimb[0], left_lowlimb[1])
self.MapVar.left_lowlimb.set_3d_properties(left_lowlimb[2])
left_hip = (left_lowlimb[0][0], left_lowlimb[1][0], left_lowlimb[2][0])
left_knee = (left_lowlimb[0][1], left_lowlimb[1][1], left_lowlimb[2][1])
left_ankle = (left_lowlimb[0][2], left_lowlimb[1][2], left_lowlimb[2][2])
left_lowlimb_jnt = np.transpose(np.array((left_hip, left_knee, left_ankle)), (1, 0))
self.MapVar.left_lowlimb_jnt.set_data(left_lowlimb_jnt[0], left_lowlimb_jnt[1])
self.MapVar.left_lowlimb_jnt.set_3d_properties(left_lowlimb_jnt[2])
self.MapVar.right_lowlimb.set_data(right_lowlimb[0], right_lowlimb[1])
self.MapVar.right_lowlimb.set_3d_properties(right_lowlimb[2])
right_hip = (right_lowlimb[0][0], right_lowlimb[1][0], right_lowlimb[2][0])
right_knee = (right_lowlimb[0][1], right_lowlimb[1][1], right_lowlimb[2][1])
right_ankle = (right_lowlimb[0][2], right_lowlimb[1][2], right_lowlimb[2][2])
right_lowlimb_jnt = np.transpose(np.array((right_hip, right_knee, right_ankle)), (1, 0))
self.MapVar.right_lowlimb_jnt.set_data(right_lowlimb_jnt[0], right_lowlimb_jnt[1])
self.MapVar.right_lowlimb_jnt.set_3d_properties(right_lowlimb_jnt[2])
# test if shift of CoM would cause either feet into ground
def lowlimb_tst(self, shift):
FNS = self.FNS
neck_rot_vert, neck_rot_horz = self.axial_data[0]
truk_rot_vert, truk_rot_horz = self.axial_data[1]
center = self.MapVar.origin - shift
sacrum = center - FNS.vert_up(truk_rot_horz, truk_rot_vert, 10)
lumbar = center
thorax = center + FNS.vert_up(truk_rot_horz, truk_rot_vert, 30)
cervic = thorax + FNS.vert_up(truk_rot_horz, truk_rot_vert, 10)
base = cervic + FNS.vert_up(truk_rot_horz + neck_rot_horz, truk_rot_vert + neck_rot_vert, 5)
left_hip = sacrum + FNS.latr_left(0, 0, 5)
right_hip = sacrum + FNS.latr_right(0, 0, 5)
(left_hip_rot_vert, left_hip_rot_horz), (left_knee_rot_vert, left_knee_rot_horz), \
(left_ankle_rot_vert, left_ankle_rot_horz) = self.append_data[1][0]
(right_hip_rot_vert, right_hip_rot_horz), (right_knee_rot_vert, right_knee_rot_horz), \
(right_ankle_rot_vert, right_ankle_rot_horz) = self.append_data[1][1]
left_knee = left_hip + FNS.vert_down(left_hip_rot_horz, left_hip_rot_vert, 20)
left_ankle = left_knee + FNS.vert_down(left_hip_rot_horz, left_hip_rot_vert + left_knee_rot_vert, 15)
left_foot = left_ankle + FNS.vert_down(left_hip_rot_horz, left_hip_rot_vert + left_knee_rot_vert +
left_ankle_rot_vert + np.pi / 2, 5)
right_knee = right_hip + FNS.vert_down(right_hip_rot_horz, right_hip_rot_vert, 20)
right_ankle = right_knee + FNS.vert_down(right_hip_rot_horz, right_hip_rot_vert + right_knee_rot_vert, 15)
right_foot = right_ankle + FNS.vert_down(right_hip_rot_horz, right_hip_rot_vert + right_knee_rot_vert +
right_ankle_rot_vert + np.pi / 2, 5)
return left_foot, right_foot
# compute external torque for force of gravity and ground reaction force from joint positions of lower limbs
def ext_forc(self, shift):
FNS = self.FNS
dep = 0.2 * self.default
fof = np.zeros((2, 3, 2))
grf = np.zeros((2, 3, 2))
base, cervic, thorax, lumbar, sacrum = np.transpose(self.column_cpt(shift)[0], (1, 0))
left_hip, left_knee, left_ankle, left_foot = np.transpose(self.lowlimb_cpt(shift)[0], (1, 0))
right_hip, right_knee, right_ankle, right_foot = np.transpose(self.lowlimb_cpt(shift)[1], (1, 0))
# magnitude of external force
mass = (50 + 5 + 20) * 0.001
# moment arm of force of gravity
CoM = np.array((lumbar[0], lumbar[1], left_hip[2]))
moment = np.linalg.norm(left_hip - CoM)
fof[0][0][0] = moment * mass
CoM = np.array((lumbar[0], lumbar[1], left_knee[2]))
moment = np.linalg.norm(left_knee - CoM)
fof[0][1][0] = moment * mass
CoM = np.array((lumbar[0], lumbar[1], left_ankle[2]))
moment = np.linalg.norm(left_ankle - CoM)
fof[0][2][0] = moment * mass
CoM = np.array((lumbar[0], lumbar[1], right_hip[2]))
moment = np.linalg.norm(right_hip - CoM)
fof[1][0][0] = moment * mass
CoM = np.array((lumbar[0], lumbar[1], right_knee[2]))
moment = np.linalg.norm(right_knee - CoM)
fof[1][1][0] = moment * mass
CoM = np.array((lumbar[0], lumbar[1], right_ankle[2]))
moment = np.linalg.norm(right_ankle - CoM)
fof[1][2][0] = moment * mass
self.MapVar.fof = fof
# moment arm of ground reaction force
left_cond = FNS.delta_fn(FNS.cond_fn(left_ankle[2], -dep), 1)
right_cond = FNS.delta_fn(FNS.cond_fn(right_ankle[2], -dep), 1)
# both feet on ground
if left_cond == 1 and right_cond == 1:
mid_dist = np.linalg.norm(left_ankle - right_ankle) / 2
cent = left_ankle + 0.5 * (right_ankle - left_ankle)
CoP = np.array((cent[0], cent[1], left_ankle[2]))
moment = np.linalg.norm(left_ankle - CoP)
grf[0][2][0] = moment * mass
CoP = np.array((cent[0], cent[1], left_knee[2]))
moment = np.linalg.norm(left_knee - CoP)
grf[0][1][0] = moment * mass
CoP = np.array((cent[0], cent[1], left_hip[2]))
moment = np.linalg.norm(left_hip - CoP)
grf[0][0][0] = moment * mass
CoP = np.array((cent[0], cent[1], right_ankle[2]))
moment = np.linalg.norm(right_ankle - CoP)
grf[1][2][0] = moment * mass
CoP = np.array((cent[0], cent[1], right_knee[2]))
moment = np.linalg.norm(right_knee - CoP)
grf[1][1][0] = moment * mass
CoP = np.array((cent[0], cent[1], right_hip[2]))
moment = np.linalg.norm(right_hip - CoP)
grf[1][0][0] = moment * mass
# only left foot on ground
if left_cond == 1 and right_cond != 1:
CoP = np.array((left_ankle[0], left_ankle[1], left_ankle[2]))
moment = np.linalg.norm(left_ankle - CoP)
grf[0][2][0] = moment * mass
CoP = np.array((left_ankle[0], left_ankle[1], left_knee[2]))
moment = np.linalg.norm(left_knee - CoP)
grf[0][1][0] = moment * mass
CoP = np.array((left_ankle[0], left_ankle[1], left_hip[2]))
moment = np.linalg.norm(left_hip - CoP)
grf[0][0][0] = moment * mass
# only right foot on ground
if left_cond != 1 and right_cond == 1:
CoP = np.array((right_ankle[0], right_ankle[1], right_ankle[2]))
moment = np.linalg.norm(right_ankle - CoP)
grf[1][2][0] = moment * mass
CoP = np.array((right_ankle[0], right_ankle[1], right_knee[2]))
moment = np.linalg.norm(right_knee - CoP)
grf[1][1][0] = moment * mass
CoP = np.array((right_ankle[0], right_ankle[1], right_hip[2]))
moment = np.linalg.norm(right_hip - CoP)
grf[1][0][0] = moment * mass
self.MapVar.grf = grf
# compute muscle origins and insertions of column muscles
def col_cpt(self, shift):
FNS = self.FNS
if shift == 0:
neck_rot_vert, neck_rot_horz = self.neck_rot
truk_rot_vert, truk_rot_horz = self.trunk_rot
else:
self.neck_rot, self.trunk_rot = self.axial_data
neck_rot_vert, neck_rot_horz = self.neck_rot
truk_rot_vert, truk_rot_horz = self.trunk_rot
base, cervic, thorax, lumbar, sacrum = np.transpose(self.column_cpt(shift)[0], (1, 0))
# compute muscle origins of neck muscles
base_plane = FNS.transv_plane(base, truk_rot_horz + neck_rot_horz, truk_rot_vert + neck_rot_vert, 2)
base_left = base + FNS.latr_left(truk_rot_horz + neck_rot_horz, 0, 2)
base_right = base + FNS.latr_right(truk_rot_horz + neck_rot_horz, 0, 2)
base_front = base + FNS.latr_front(truk_rot_horz + neck_rot_horz, truk_rot_vert + neck_rot_vert, 2)
base_back = base | |
vbox.Add( self._test_script_management, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.Add( self._test_arg, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.Add( self._fetch_data, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.Add( self._example_data, CC.FLAGS_EXPAND_BOTH_WAYS )
vbox.Add( self._test_parsing, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.Add( self._results, CC.FLAGS_EXPAND_BOTH_WAYS )
test_panel.SetSizer( vbox )
#
vbox = wx.BoxSizer( wx.VERTICAL )
vbox.Add( info_st, CC.FLAGS_EXPAND_BOTH_WAYS )
info_panel.SetSizer( vbox )
#
notebook.AddPage( edit_panel, 'edit', select = True )
notebook.AddPage( test_panel, 'test', select = False )
notebook.AddPage( info_panel, 'info', select = False )
vbox = wx.BoxSizer( wx.VERTICAL )
vbox.Add( notebook, CC.FLAGS_EXPAND_SIZER_BOTH_WAYS )
self.SetSizer( vbox )
def EventFetchData( self, event ):
script = self.GetValue()
test_arg = self._test_arg.GetValue()
file_identifier_type = self._file_identifier_type.GetChoice()
if file_identifier_type == ClientParsing.FILE_IDENTIFIER_TYPE_FILE:
if not os.path.exists( test_arg ):
wx.MessageBox( 'That file does not exist!' )
return
file_identifier = test_arg
elif file_identifier_type == ClientParsing.FILE_IDENTIFIER_TYPE_USER_INPUT:
file_identifier = test_arg
else:
file_identifier = test_arg.decode( 'hex' )
try:
stop_time = HydrusData.GetNow() + 30
job_key = ClientThreading.JobKey( cancellable = True, stop_time = stop_time )
self._test_script_management.SetJobKey( job_key )
example_data = script.FetchData( job_key, file_identifier )
try:
self._example_data.SetValue( example_data )
except UnicodeDecodeError:
self._example_data.SetValue( 'The fetched data, which had length ' + HydrusData.ConvertIntToBytes( len( example_data ) ) + ', did not appear to be displayable text.' )
except Exception as e:
HydrusData.ShowException( e )
message = 'Could not fetch data!'
message += os.linesep * 2
message += HydrusData.ToUnicode( e )
wx.MessageBox( message )
finally:
job_key.Finish()
def EventTestParse( self, event ):
def wx_code( results ):
if not self:
return
result_lines = [ '*** ' + HydrusData.ConvertIntToPrettyString( len( results ) ) + ' RESULTS BEGIN ***' ]
result_lines.extend( ( ClientParsing.ConvertParseResultToPrettyString( result ) for result in results ) )
result_lines.append( '*** RESULTS END ***' )
results_text = os.linesep.join( result_lines )
self._results.SetValue( results_text )
def do_it( script, job_key, data ):
try:
results = script.Parse( job_key, data )
wx.CallAfter( wx_code, results )
except Exception as e:
HydrusData.ShowException( e )
message = 'Could not parse!'
wx.CallAfter( wx.MessageBox, message )
finally:
job_key.Finish()
script = self.GetValue()
stop_time = HydrusData.GetNow() + 30
job_key = ClientThreading.JobKey( cancellable = True, stop_time = stop_time )
self._test_script_management.SetJobKey( job_key )
data = self._example_data.GetValue()
HG.client_controller.CallToThread( do_it, script, job_key, data )
def GetExampleData( self ):
return self._example_data.GetValue()
def GetExampleURL( self ):
return self._url.GetValue()
def GetValue( self ):
name = self._name.GetValue()
url = self._url.GetValue()
query_type = self._query_type.GetChoice()
file_identifier_type = self._file_identifier_type.GetChoice()
file_identifier_string_converter = self._file_identifier_string_converter.GetValue()
file_identifier_arg_name = self._file_identifier_arg_name.GetValue()
static_args = self._static_args.GetValue()
children = self._children.GetValue()
script = ClientParsing.ParseRootFileLookup( name, url = url, query_type = query_type, file_identifier_type = file_identifier_type, file_identifier_string_converter = file_identifier_string_converter, file_identifier_arg_name = file_identifier_arg_name, static_args = static_args, children = children )
return script
class EditStringConverterPanel( ClientGUIScrolledPanels.EditPanel ):
def __init__( self, parent, string_converter, example_string_override = None ):
ClientGUIScrolledPanels.EditPanel.__init__( self, parent )
transformations_panel = ClientGUIListCtrl.BetterListCtrlPanel( self )
self._transformations = ClientGUIListCtrl.BetterListCtrl( transformations_panel, 'string_converter_transformations', 7, 35, [ ( '#', 3 ), ( 'transformation', 30 ), ( 'result', -1 ) ], self._ConvertTransformationToListCtrlTuple, delete_key_callback = self._DeleteTransformation, activation_callback = self._EditTransformation )
transformations_panel.SetListCtrl( self._transformations )
transformations_panel.AddButton( 'add', self._AddTransformation )
transformations_panel.AddButton( 'edit', self._EditTransformation, enabled_only_on_selection = True )
transformations_panel.AddButton( 'delete', self._DeleteTransformation, enabled_only_on_selection = True )
transformations_panel.AddSeparator()
transformations_panel.AddButton( 'move up', self._MoveUp, enabled_check_func = self._CanMoveUp )
transformations_panel.AddButton( 'move down', self._MoveDown, enabled_check_func = self._CanMoveDown )
self._example_string = wx.TextCtrl( self )
#
self._transformations.AddDatas( [ ( i + 1, transformation_type, data ) for ( i, ( transformation_type, data ) ) in enumerate( string_converter.transformations ) ] )
if example_string_override is None:
self._example_string.SetValue( string_converter.example_string )
else:
self._example_string.SetValue( example_string_override )
self._transformations.UpdateDatas() # to refresh, now they are all in the list
self._transformations.Sort( 0 )
#
rows = []
rows.append( ( 'example string: ', self._example_string ) )
gridbox = ClientGUICommon.WrapInGrid( self, rows )
vbox = wx.BoxSizer( wx.VERTICAL )
vbox.Add( transformations_panel, CC.FLAGS_EXPAND_BOTH_WAYS )
vbox.Add( gridbox, CC.FLAGS_EXPAND_PERPENDICULAR )
self.SetSizer( vbox )
#
self._example_string.Bind( wx.EVT_TEXT, self.EventUpdate )
def _AddTransformation( self ):
transformation_type = ClientParsing.STRING_TRANSFORMATION_APPEND_TEXT
data = ' extra text'
with ClientGUITopLevelWindows.DialogEdit( self, 'edit transformation', frame_key = 'deeply_nested_dialog' ) as dlg:
panel = self._TransformationPanel( dlg, transformation_type, data )
dlg.SetPanel( panel )
if dlg.ShowModal() == wx.ID_OK:
number = self._transformations.GetItemCount() + 1
( transformation_type, data ) = panel.GetValue()
enumerated_transformation = ( number, transformation_type, data )
self._transformations.AddDatas( ( enumerated_transformation, ) )
self._transformations.UpdateDatas() # need to refresh string after the insertion, so the new row can be included in the parsing calcs
self._transformations.Sort()
def _CanMoveDown( self ):
selected_data = self._transformations.GetData( only_selected = True )
if len( selected_data ) == 1:
( number, transformation_type, data ) = selected_data[0]
if number < self._transformations.GetItemCount():
return True
return False
def _CanMoveUp( self ):
selected_data = self._transformations.GetData( only_selected = True )
if len( selected_data ) == 1:
( number, transformation_type, data ) = selected_data[0]
if number > 1:
return True
return False
def _ConvertTransformationToListCtrlTuple( self, transformation ):
( number, transformation_type, data ) = transformation
pretty_number = HydrusData.ConvertIntToPrettyString( number )
pretty_transformation = ClientParsing.StringConverter.TransformationToUnicode( ( transformation_type, data ) )
string_converter = self._GetValue()
try:
pretty_result = ClientParsing.MakeParsedTextPretty( string_converter.Convert( self._example_string.GetValue(), number ) )
except HydrusExceptions.StringConvertException as e:
pretty_result = str( e )
display_tuple = ( pretty_number, pretty_transformation, pretty_result )
sort_tuple = ( number, number, number )
return ( display_tuple, sort_tuple )
def _DeleteTransformation( self ):
if len( self._transformations.GetData( only_selected = True ) ) > 0:
with ClientGUIDialogs.DialogYesNo( self, 'Delete all selected?' ) as dlg:
if dlg.ShowModal() == wx.ID_YES:
self._transformations.DeleteSelected()
# now we need to shuffle up any missing numbers
num_rows = self._transformations.GetItemCount()
i = 1
search_i = i
while i <= num_rows:
try:
transformation = self._GetTransformation( search_i )
if search_i != i:
self._transformations.DeleteDatas( ( transformation, ) )
( search_i, transformation_type, data ) = transformation
transformation = ( i, transformation_type, data )
self._transformations.AddDatas( ( transformation, ) )
i += 1
search_i = i
except HydrusExceptions.DataMissing:
search_i += 1
self._transformations.UpdateDatas()
self._transformations.Sort()
def _EditTransformation( self ):
selected_data = self._transformations.GetData( only_selected = True )
for enumerated_transformation in selected_data:
( number, transformation_type, data ) = enumerated_transformation
with ClientGUITopLevelWindows.DialogEdit( self, 'edit transformation', frame_key = 'deeply_nested_dialog' ) as dlg:
panel = self._TransformationPanel( dlg, transformation_type, data )
dlg.SetPanel( panel )
if dlg.ShowModal() == wx.ID_OK:
self._transformations.DeleteDatas( ( enumerated_transformation, ) )
( transformation_type, data ) = panel.GetValue()
enumerated_transformation = ( number, transformation_type, data )
self._transformations.AddDatas( ( enumerated_transformation, ) )
else:
break
self._transformations.UpdateDatas()
self._transformations.Sort()
def _GetTransformation( self, desired_number ):
for transformation in self._transformations.GetData():
( number, transformation_type, data ) = transformation
if number == desired_number:
return transformation
raise HydrusExceptions.DataMissing()
def _GetValue( self ):
enumerated_transformations = list( self._transformations.GetData() )
enumerated_transformations.sort()
transformations = [ ( transformation_type, data ) for ( number, transformation_type, data ) in enumerated_transformations ]
example_string = self._example_string.GetValue()
string_converter = ClientParsing.StringConverter( transformations, example_string )
return string_converter
def _MoveDown( self ):
selected_transformation = self._transformations.GetData( only_selected = True )[0]
( number, transformation_type, data ) = selected_transformation
swap_transformation = self._GetTransformation( number + 1 )
self._SwapTransformations( selected_transformation, swap_transformation )
self._transformations.UpdateDatas()
self._transformations.Sort()
| |
= []
for topic in topic_names:
topic_prefix = '/rostopic_pub%s_' % topic
node_names = self.master_info.node_names
for n in node_names:
if n.startswith(topic_prefix):
nodes2stop.append(n)
self.stop_nodes_by_name(nodes2stop)
def _show_topic_output(self, show_hz_only, use_ssh=False, topics=[]):
'''
Shows the output of the topic in a terminal.
'''
selected_topics = topics
if not selected_topics:
selected_topics = self.topicsFromIndexes(self.ui.topicsView.selectionModel().selectedIndexes())
ret = True
if len(selected_topics) > 5:
ret = MessageBox.question(self, "Show echo", "You are going to open the echo of " + utf8(len(selected_topics)) + " topics at once\nContinue?", buttons=MessageBox.Ok | MessageBox.Cancel)
ret = (ret == MessageBox.Ok)
if ret:
for topic in selected_topics:
self._add_topic_output2queue(topic, show_hz_only, use_ssh)
def show_topic_output(self, topic_name, show_hz_only, use_ssh=False):
'''
Shows the topic output in a new window.
'''
if self.master_info is not None:
topic = self.master_info.getTopic("%s" % topic_name)
if topic is not None:
self._add_topic_output2queue(topic, show_hz_only, use_ssh)
else:
rospy.logwarn("topic not found: %s" % topic_name)
def _add_topic_output2queue(self, topic, show_hz_only, use_ssh=False):
try:
namespace = rospy.names.namespace(topic.name)
nodename = os.path.basename(topic.name)
namespace = '/echo_%s%s%s%s' % ('hz_' if show_hz_only else '', 'ssh_' if use_ssh else '', utf8(get_hostname(self.masteruri)), namespace)
args = []
# subscription parameter
args.append("--echo %s %s %s %s" % (topic.name, topic.type, '--hz' if show_hz_only else '', '--ssh' if use_ssh else ''))
args.append("__ns:=%s" % namespace)
self._progress_queue.add2queue(utf8(uuid.uuid4()),
'start subcriber for %s' % topic.name,
nm.starter().runNodeWithoutConfig,
{'host': 'localhost',
'package': 'fkie_node_manager',
'binary': 'node_manager',
'name': nodename,
'args': args,
'masteruri': self.masteruri,
'use_nmd': False,
'auto_pw_request': False,
'user': self.current_user
})
self._start_queue(self._progress_queue)
self.__echo_topics_dialogs.add(rospy.names.ns_join(namespace, nodename))
except Exception as e:
rospy.logwarn("Echo topic '%s' failed: %s" % (topic.name, utf8(e)))
MessageBox.warning(self, "Echo of topic error",
'Echo of topic %s failed!' % topic.name,
'%s' % utf8(e))
def on_service_call_clicked(self, services=[]):
'''
calls a service.
'''
selected_services = services
if not selected_services:
selected_services = self.servicesFromIndexes(self.ui.servicesView.selectionModel().selectedIndexes())
try:
for service in selected_services:
param = ServiceDialog(service, self)
param.show()
except Exception as e:
rospy.logwarn("Call service '%s' failed: %s" % (service.name, utf8(e)))
MessageBox.warning(self, "Call service error",
'Call service %s failed!' % service.name,
'%s' % utf8(e))
def service_call(self, service_name):
service = self.master_info.getService(utf8(service_name))
if service is not None:
try:
param = ServiceDialog(service, self)
param.show()
except Exception as e:
rospy.logwarn("Call service '%s' failed: %s" % (service.name, utf8(e)))
MessageBox.warning(self, "Call service error",
'Call service %s failed!' % service.name,
'%s' % utf8(e))
def _restore_expand_state(self, tree_view, proxy_model):
'''
Expand the first item and all selected items.
'''
tree_view.collapseAll()
for selected in tree_view.selectionModel().selectedIndexes():
index = selected
while index is not None and index.isValid():
item = proxy_model.sourceModel().itemFromIndex(index)
tree_view.setExpanded(index, True)
index = index.parent()
# expand the root item. NodesView has on sync also other hosts. In this case only local host will expanded.
for root_idx in range(proxy_model.sourceModel().rowCount()):
source_index = proxy_model.sourceModel().index(root_idx, 0)
item = proxy_model.sourceModel().itemFromIndex(source_index)
if type(item) in [HostItem] and not item._local:
continue
mapped_index = proxy_model.mapFromSource(source_index)
tree_view.setExpanded(mapped_index, True)
def on_node_filter_changed(self, text):
'''
Filter the displayed nodes
'''
self.node_proxy_model.setFilterRegExp(QRegExp(text, Qt.CaseInsensitive, QRegExp.Wildcard))
if text:
self.ui.nodeTreeView.expandAll()
else:
self._restore_expand_state(self.ui.nodeTreeView, self.node_proxy_model)
def on_topic_filter_changed(self, text):
'''
Filter the displayed topics
'''
self.topic_proxyModel.setFilterRegExp(QRegExp(text, Qt.CaseInsensitive, QRegExp.Wildcard))
if text:
self.ui.topicsView.expandAll()
else:
self._restore_expand_state(self.ui.topicsView, self.topic_proxyModel)
def on_service_filter_changed(self, text):
'''
Filter the displayed services
'''
self.service_proxyModel.setFilterRegExp(QRegExp(text, Qt.CaseInsensitive, QRegExp.Wildcard))
if text:
self.ui.servicesView.expandAll()
else:
self._restore_expand_state(self.ui.servicesView, self.service_proxyModel)
def on_parameter_filter_changed(self, text):
'''
Filter the displayed parameter
'''
self.parameter_proxyModel.setFilterRegExp(QRegExp(text, Qt.CaseInsensitive, QRegExp.Wildcard))
def on_get_parameter_clicked(self):
'''
Requests parameter list from the ROS parameter server.
'''
self.parameterHandler.requestParameterList(self.masteruri)
def on_add_parameter_clicked(self):
'''
Adds a parameter to the ROS parameter server.
'''
selectedParameter = self.parameterFromIndexes(self.ui.parameterView.selectionModel().selectedIndexes())
ns = '/'
if selectedParameter:
ns = roslib.names.namespace(selectedParameter[0][0])
fields = {'name': {':value': ns}, 'type': {':type': 'string', ':value': ['string', 'int', 'float', 'bool', 'list']}, 'value': {':value': ''}}
newparamDia = ParameterDialog(fields, parent=self, store_geometry="add_parameter_dialog")
newparamDia.setWindowTitle('Add new parameter')
newparamDia.setFilterVisible(False)
newparamDia.accepted.connect(self._on_add_parameter_accepted)
newparamDia.setFocusField('name')
newparamDia.show()
newparamDia.raise_()
newparamDia.activateWindow()
def _on_add_parameter_accepted(self):
if isinstance(self.sender(), ParameterDialog):
params = self.sender().getKeywords()
try:
if params['type'] == 'int':
value = int(params['value'])
elif params['type'] == 'float':
value = float(params['value'])
elif params['type'] == 'bool':
value = bool(params['value'].lower() in ("yes", "true", "t", "1"))
elif params['type'] == 'list':
try:
value = [ruamel.yaml.load(params['value'], Loader=ruamel.yaml.Loader)]
# if there is no YAML, load() will return an
# empty string. We want an empty dictionary instead
# for our representation of empty.
if value is None:
value = []
except ruamel.yaml.MarkedYAMLError as e:
MessageBox.warning(self, self.tr("Warning"), "yaml error: %s" % utf8(e), buttons=MessageBox.Ok)
return
else:
value = params['value']
self.parameterHandler.deliverParameter(self.masteruri, {params['name']: value})
self.parameterHandler.requestParameterList(self.masteruri)
self.sender().close()
except (KeyError, ValueError) as e:
MessageBox.warning(self, "Warning",
'Error while add a parameter to the ROS parameter server',
utf8(e))
def on_delete_parameter_clicked(self):
'''
Deletes the parameter from the ROS parameter server.
'''
selectedParameter = self.parameterFromIndexes(self.ui.parameterView.selectionModel().selectedIndexes())
try:
socket.setdefaulttimeout(15)
name = rospy.get_name()
master = xmlrpcclient.ServerProxy(self.masteruri)
master_multi = xmlrpcclient.MultiCall(master)
for (key, _) in selectedParameter: # _ := value
master_multi.deleteParam(name, key)
r = master_multi()
for code, msg, parameter in r:
if code != 1:
rospy.logwarn("Error on delete parameter '%s': %s", parameter, msg)
except Exception:
rospy.logwarn("Error on delete parameter: %s", utf8(traceback.format_exc(1)))
MessageBox.warning(self, "Warning",
'Error while delete a parameter to the ROS parameter server',
utf8(traceback.format_exc(1)))
else:
self.on_get_parameter_clicked()
finally:
socket.setdefaulttimeout(None)
def on_save_parameter_clicked(self):
'''
Stores selected parameter to a file.
'''
selectedParameter = self.parameterFromIndexes(self.ui.parameterView.selectionModel().selectedIndexes())
if selectedParameter:
# (fileName, filter)
(fileName, _) = QFileDialog.getSaveFileName(self,
"Save parameter",
nm.settings().current_dialog_path,
"YAML files (*.yaml);;All files (*)")
if fileName:
nm.settings().current_dialog_path = os.path.dirname(fileName)
try:
with open(fileName, 'w+') as f:
values = dict()
# convert ROS namespaces of parameters to YAML namespaces
for (key, value) in selectedParameter:
keys = key.strip(rospy.names.SEP).split(rospy.names.SEP)
curr_v = values
for k in keys:
if k in curr_v:
curr_v = curr_v[k]
elif k != keys[-1]:
curr_v[k] = dict()
curr_v = curr_v[k]
else:
curr_v[k] = value
buf = ruamel.yaml.compat.StringIO()
ruamel.yaml.dump(values, buf, Dumper=ruamel.yaml.RoundTripDumper)
f.write(buf.getvalue())
except Exception as e:
print(utf8(traceback.format_exc(1)))
MessageBox.warning(self, "Save parameter Error",
'Error while save parameter',
utf8(e))
def on_transfer_parameter_clicked(self):
'''
Copy selected parameter to local ROS-Master.
'''
selectedParameter = self.parameterFromIndexes(self.ui.parameterView.selectionModel().selectedIndexes())
if selectedParameter:
try:
params = {}
for (key, value) in selectedParameter:
params[key] = value
if params:
dia_params = {'master': {':value': masteruri_from_ros()}}
dia = ParameterDialog(dia_params, store_geometry="transfer_param_dialog")
dia.setFilterVisible(False)
dia.setWindowTitle('Copy parameter')
dia.setFocusField('master')
if dia.exec_():
dia_params = dia.getKeywords()
url = dia_params['master']
rospy.loginfo("Copy %d parameter to %s" % (len(params), url))
self.parameterHandler.deliverParameter(url, params)
except Exception as e:
MessageBox.warning(self, "Copy parameter Error",
'Error while transfer parameter',
utf8(e))
def _replaceDoubleSlash(self, liste):
'''
used to avoid the adding of \\ for each \ in a string of a list
'''
if liste and isinstance(liste, list):
result = []
for l in liste:
val = l
if isstring(l):
val = l.replace("\\n", "\n")
# result.append("".join([val]))
elif isinstance(l, list):
val = self._replaceDoubleSlash(l)
result.append(val)
return result
return liste
def _on_parameter_item_changed(self, item):
'''
add changes to the ROS parameter server
'''
if isinstance(item, ParameterValueItem):
try:
if isinstance(item.value, bool):
value = bool(item.text().lower() in ("yes", "true", "t", "1"))
elif isinstance(item.value, int):
value = int(item.text())
elif isinstance(item.value, float):
value = float(item.text())
elif isinstance(item.value, list):
try:
value = ruamel.yaml.load(item.text(), Loader=ruamel.yaml.Loader)
# if there is no YAML, load() will return an
# empty string. We want an empty dictionary instead
# for our representation of empty.
if value is None:
value = []
value = self._replaceDoubleSlash(value)
except ruamel.yaml.MarkedYAMLError as e:
MessageBox.warning(self, self.tr("Warning"), "yaml error: %s" % utf8(e), buttons=MessageBox.Ok)
item.setText(utf8(item.value))
return
else:
value = item.text()
self.parameterHandler.deliverParameter(self.masteruri, {item.name: value})
item.value = value
except ValueError as e:
MessageBox.warning(self, "Warning",
'Error while add changes to the ROS parameter server',
utf8(e))
item.setText(item.value)
def _on_param_list(self, masteruri, code, msg, params):
'''
:param str masteruri: The URI of the ROS parameter server
:param int code: The return code of the request. If not 1, the message is set and the list can be ignored.
:param str msg: The message of the result.
:param params: The list the parameter names.
:type params: list(str)
'''
if code == 1:
params.sort()
self.parameterHandler.requestParameterValues(masteruri, params)
def _on_param_values(self, masteruri, code, msg, params):
'''
:param str masteruri: The URI of the ROS parameter server
:param int code: The return code of the request. If not 1, the message is set and the list can be ignored.
:param str msg: The message of the result.
:param params: The dictionary the parameter names and request result.
:type params: dict(paramName : (code, statusMessage, parameterValue))
'''
if code == 1:
result = {}
for p, (code_n, _, val) in params.items(): # _ := msg_n
if code_n == 1:
result[p] = val
else:
result[p] = ''
if p == '/use_sim_time':
self.__use_sim_time = (code_n == 1 and | |
},
{
"value": 24,
"color": "#6ee100"
},
{
"value": 28,
"color": "#39a500"
},
{
"value": 30,
"color": "#026900",
"legend": {
"prefix": ">"
}
}
],
"legend": {
"radix_point": 0,
"scale_by": 1,
"major_ticks": 10,
"axes_position": [0.05, 0.5, 0.89, 0.15]
}
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "annual_clear_observations",
},
{
# Included as a keyword for the layer
"label": "WOfS Annual Statistics",
# Included as a keyword for the layer
"type": "Water Summary",
# Included as a keyword for the layer
"variant": "25m",
# The WMS name for the layer
"name": "wofs_annual_summary_statistics",
# The Datacube name for the associated data product
"product_name": "wofs_annual_summary",
"abstract": """
Water Observations from Space - Annual Statistics is a set of annual statistical summaries of the water observations contained in WOfS. The layers available are: the count of clear observations; the count of wet observations; the percentage of wet observations over time.
This product is Water Observations from Space - Annual Statistics, a set of annual statistical summaries of the WOfS product that combines the many years of WOfS observations into summary products that help the understanding of surface water across Australia. As no confidence filtering is applied to this product, it is affected by noise where misclassifications have occurred in the WOfS water classifications, and hence can be difficult to interpret on its own.
The confidence layer and filtered summary are contained in the Water Observations from Space Statistics - Filtered Summary product, which provide a noise-reduced view of the water summary.
This layer contains Water Summary: what percentage of clear observations were detected as wet (ie. the ratio of wet to clear as a percentage). No clear observations of water causes an area to appear transparent, few clear observations of water correlate with red and yellow colours, deep blue and purple correspond to an area being wet through 90%-100% of clear observations.
For more information please see: https://data.dea.ga.gov.au/WOfS/annual_summary/v2.1.5/Product%20Description.pdf
For service status information, see https://status.dea.ga.gov.au""",
"min_zoom_factor": 15.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: (data[band] != data[band].attrs['nodata']),
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
"legend": {
# "url": ""
"styles": ["annual_WOfS_frequency",
"annual_WOfS_frequency_blues_transparent"]
},
"wcs_default_bands": ["frequency"],
"styles": [
{
"name": "annual_WOfS_frequency",
"title": " Water Summary",
"abstract": "WOfS annual summary showing the frequency of Wetness",
"needed_bands": ["frequency"],
"color_ramp": [
{
"value": 0.0,
"color": "#000000",
"alpha": 0.0
},
{
"value": 0.02,
"color": "#000000",
"alpha": 0.0
},
{
"value": 0.05,
"color": "#8e0101",
"alpha": 0.25
},
{
"value": 0.1,
"color": "#cf2200",
"alpha": 0.75
},
{
"value": 0.2,
"color": "#e38400"
},
{
"value": 0.3,
"color": "#e3df00"
},
{
"value": 0.4,
"color": "#62e300"
},
{
"value": 0.5,
"color": "#00e32d"
},
{
"value": 0.6,
"color": "#00e3c8"
},
{
"value": 0.7,
"color": "#0097e3"
},
{
"value": 0.8,
"color": "#005fe3"
},
{
"value": 0.9,
"color": "#000fe3"
},
{
"value": 1.0,
"color": "#5700e3"
}
],
"legend": {
"units": "%",
"radix_point": 0,
"scale_by": 100.0,
"major_ticks": 0.1
}
},
{
"name": "annual_WOfS_frequency_blues_transparent",
"title": "Water Summary (Blue)",
"abstract": "WOfS annual summary showing the frequency of Wetness",
"needed_bands": ["frequency"],
"color_ramp": [
{
"value": 0.0,
"color": "#ffffff",
"alpha": 0.0,
},
{
"value": 0.001,
"color": "#d5fef9",
"alpha": 0.0,
},
{
"value": 0.02,
"color": "#d5fef9",
},
{
"value": 0.2,
"color": "#71e3ff"
},
{
"value": 0.4,
"color": "#01ccff"
},
{
"value": 0.6,
"color": "#0178ff"
},
{
"value": 0.8,
"color": "#2701ff"
},
{
"value": 1.0,
"color": "#5700e3"
}
],
"legend": {
"units": "%",
"radix_point": 0,
"scale_by": 100.0,
"major_ticks": 0.1
}
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "annual_WOfS_frequency",
},
{
# Included as a keyword for the layer
"label": "WOfS April - October Statistics",
# Included as a keyword for the layer
"type": "Wet Count",
# Included as a keyword for the layer
"variant": "25m",
# The WMS name for the layer
"name": "wofs_apr_oct_summary_wet",
# The Datacube name for the associated data product
"product_name": "wofs_apr_oct_summary",
"abstract": """
Water Observations from Space - April to October Statistics is a set of seasonal statistical summaries of the water observations contained in WOfS. The layers available are: the count of clear observations; the count of wet observations; the percentage of wet observations over time.
This product is Water Observations from Space - April to October Statistics, a set of seasonal statistical summaries of the WOfS product that combines the many years of WOfS observations into summary products that help the understanding of surface water across Australia. As no confidence filtering is applied to this product, it is affected by noise where misclassifications have occurred in the WOfS water classifications, and hence can be difficult to interpret on its own.
The confidence layer and filtered summary are contained in the Water Observations from Space Statistics - Filtered Summary product, which provide a noise-reduced view of the water summary.
This layer contains Water Summary: what percentage of clear observations were detected as wet (ie. the ratio of wet to clear as a percentage). No clear observations of water causes an area to appear transparent, 1-50 total clear observations of water correlate with red and yellow colours, 100 clear observations of water correlate with green, 200 clear observations of water correlates with light blue, 300 clear observations of water correlates to deep blue and 400 and over observations of clear water correlate to purple.
For service status information, see https://status.dea.ga.gov.au""",
"min_zoom_factor": 15.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: (data[band] != data[band].attrs['nodata']),
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
"legend": {
# "url": ""
"styles": ["seasonal_water_observations"]
},
"wcs_default_bands": ["count_wet"],
"styles": [
{
"name": "seasonal_water_observations",
"title": "Wet Count",
"abstract": "WOfS seasonal summary showing the count of water observations",
"needed_bands": ["count_wet"],
"color_ramp": [
{
"value": 0,
"color": "#666666",
"alpha": 0
},
{
# purely for legend display
# we should not get fractional
# values in this styles
"value": 0.2,
"color": "#990000",
"alpha": 1
},
{
"value": 2,
"color": "#990000"
},
{
"value": 4,
"color": "#E38400"
},
{
"value": 6,
"color": "#E3DF00"
},
{
"value": 8,
"color": "#00E32D"
},
{
"value": 10,
"color": "#00E3C8"
},
{
"value": 12,
"color": "#0097E3"
},
{
"value": 14,
"color": "#005FE3"
},
{
"value": 16,
"color": "#000FE3"
},
{
"value": 18,
"color": "#000EA9"
},
{
"value": 20,
"color": "#5700E3",
"legend": {
"prefix": ">"
}
}
],
"legend": {
"radix_point": 0,
"scale_by": 1,
"major_ticks": 10
}
}
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "seasonal_water_observations",
},
{
# Included as a keyword for | |
buyPointsOn='%.4f CMN'%(random.uniform(100000.0000, 200000.0000)))
community.voteLeader(point, bob, leader2, 7000, providebw=bob+'/tech', keys=[bobKey,techKey])
self.accounts[bob] = 'Bob'
alicePoints = community.getPointBalance(point, alice)
bobPoints = community.getPointBalance(point, bob)
transferPoints = alicePoints * random.uniform(0.1, 0.9)
feePoints = max(transferPoints*pointParam['transfer_fee']//10000, Asset(pointParam['min_transfer_fee_points'], pointParam['max_supply'].symbol))
alicePointsAfter = alicePoints - transferPoints - feePoints
bobPointsAfter = bobPoints + transferPoints
self.assertGreaterEqual(alicePointsAfter.amount, 0)
leaders = self.getLeadersWeights(point)
leaders[leader1] += 3*alicePointsAfter.amount//10 - 3*alicePoints.amount//10
leaders[leader2] += 2*alicePointsAfter.amount//10 - 2*alicePoints.amount//10
leader2AfterAlice = leaders[leader2]
leaders[leader2] += 7*bobPointsAfter.amount//10 - 7*bobPoints.amount//10
with log_action("Alice transfer to Bob some points"):
args = {'from':alice, 'to':bob, 'quantity':str(transferPoints), 'memo':''}
trxResult = testnet.pushAction('c.point', 'transfer', alice, args,
providebw=alice+'/tech', keys=[aliceKey,techKey], output=True)
trxId = trxResult['transaction_id']
trxBlock = trxResult['processed']['block_num']
trxTrace = [
{
'receiver': 'c.point', 'code': 'c.point', 'action': 'transfer',
'auth': [{'actor': alice, 'permission': 'active'}],
'args': args,
},{
'receiver': 'c.ctrl', 'code': 'c.ctrl', 'action': 'changepoints',
'auth': [{'actor': 'c.ctrl', 'permission': 'changepoints'}],
'args': {'who': alice, 'diff': str(-transferPoints-feePoints)},
'events': ee.AllItems(
{
'code': 'c.ctrl', 'event': 'leaderstate',
'args': {'commun_code': point, 'leader': leader1, 'weight': leaders[leader1]},
},{
'code': 'c.ctrl', 'event': 'leaderstate',
'args': {'commun_code': point, 'leader': leader2, 'weight': leader2AfterAlice},
}
),
},{
'receiver': 'c.ctrl', 'code': 'c.ctrl', 'action': 'changepoints',
'args': {'who': bob, 'diff': str(transferPoints)},
'auth': [{'actor': 'c.ctrl', 'permission': 'changepoints'}],
'events': ee.AllItems(
{
'code': 'c.ctrl', 'event': 'leaderstate',
'args': {'commun_code': point, 'leader': leader2, 'weight': leaders[leader2]},
}
),
},
]
self.eeHelper.waitEvents(
[ ({'msg_type':'ApplyTrx', 'id':trxId}, {'block_num':trxBlock, 'actions':trxTrace, 'except':ee.Missing()}),
], trxBlock)
self.assertEqual(leaders, self.getLeadersWeights(point))
def test_voterBuyPoints(self):
(point, leader) = (self.point, self.leader1)
(alice, aliceKey) = community.createCommunityUser(
creator='tech', creatorKey=techKey, clientKey=clientKey,
community=point, buyPointsOn='%.4f CMN'%(random.uniform(100000.0000, 200000.0000)))
community.voteLeader(point, alice, leader, 3000, providebw=alice+'/tech', keys=[aliceKey,techKey])
self.accounts[alice] = 'Alice'
pointParam = community.getPointParam(self.point)
pointStat = community.getPointStat(self.point)
alicePoints = community.getPointBalance(self.point, alice)
tokenQuantity = Asset.fromstr('%.4f CMN'%(random.uniform(100000.0000, 200000.0000)))
if pointParam['fee'] != 0:
totalQuantity = tokenQuantity
tokenQuantity = totalQuantity * (10000-pointParam['fee']) // 10000
feeQuantity = totalQuantity - tokenQuantity
else:
feeQuantity = Asset(0, CMN)
newReserve = pointStat['reserve'] + tokenQuantity
q = pow(1.0 + tokenQuantity.amount/pointStat['reserve'].amount, pointParam['cw']/10000)
newSupply = pointStat['supply'] * q
pointQuantity = newSupply - pointStat['supply']
leaders = self.getLeadersWeights(point)
leaders[leader] += 3*(alicePoints+pointQuantity).amount//10 - 3*alicePoints.amount//10
appLeaders = self.getLeadersWeights('')
appLeaders[self.appLeader2] += 4*(pointStat['reserve']+tokenQuantity).amount//10 - 4*pointStat['reserve'].amount//10
with log_action("Alice vote for leader then buy some point"):
trxResult = community.buyCommunityPoints(alice, tokenQuantity+feeQuantity, point, aliceKey, clientKey, output=True)
trxId = trxResult['transaction_id']
trxBlock = trxResult['processed']['block_num']
args = {'from': alice, 'to': 'c.point', 'quantity': str(tokenQuantity+feeQuantity), 'memo': point}
trxTrace = [
{
'receiver': 'cyber.token', 'code': 'cyber.token', 'action': 'transfer',
'auth': [{'actor': alice, 'permission': 'active'}],
'args': args,
},{
'receiver': 'c.point', 'code': 'cyber.token', 'action': 'transfer',
'auth': [{'actor': alice,'permission': 'active'}],
'args': args,
},{
'receiver': 'c.ctrl', 'action': 'changepoints', 'code': 'c.ctrl',
'auth': [{'actor': 'c.ctrl', 'permission': 'changepoints'}],
'args': {'who': alice, 'diff': str(pointQuantity)},
'events': ee.AllItems(
{
'code': 'c.ctrl','event': 'leaderstate',
'args': {'commun_code': point, 'leader': leader, 'weight': leaders[leader]},
}
),
},{
'receiver': 'c.ctrl', 'action': 'changepoints', 'code': 'c.ctrl',
'auth': [{'actor': 'c.ctrl','permission': 'changepoints'}],
'args': {'who': self.owner, 'diff': '%d '%tokenQuantity.amount},
'events': ee.AllItems(
{
'code': 'c.ctrl','event': 'leaderstate',
'args': {'commun_code': '', 'leader': self.appLeader2, 'weight': str(appLeaders[self.appLeader2])},
}
),
},
]
self.eeHelper.waitEvents(
[ ({'msg_type':'ApplyTrx', 'id':trxId}, {'block_num':trxBlock, 'actions':trxTrace, 'except':ee.Missing()}),
], trxBlock)
self.assertEqual(leaders, self.getLeadersWeights(point))
self.assertEqual(appLeaders, self.getLeadersWeights(''))
def test_voterSellPoints(self):
(point, leader) = (self.point, self.leader1)
(alice, aliceKey) = community.createCommunityUser(
creator='tech', creatorKey=techKey, clientKey=clientKey,
community=self.point, buyPointsOn='%.4f CMN'%(random.uniform(1000.0000, 2000.0000)))
community.voteLeader(point, alice, leader, 3000, providebw=alice+'/tech', keys=[aliceKey,techKey])
self.accounts[alice] = 'Alice'
# Calculate CT/CP-quantities after transaction
pointParam = community.getPointParam(self.point)
pointStat = community.getPointStat(self.point)
alicePoints = community.getPointBalance(self.point, alice)
sellPoints = alicePoints * random.uniform(0.1, 0.9)
q = 1.0 - pow(1.0 - sellPoints.amount/pointStat['supply'].amount, 10000/pointParam['cw'])
totalQuantity = pointStat['reserve'] * q
tokenQuantity = totalQuantity * (10000-pointParam['fee']) // 10000
feeQuantity = totalQuantity - tokenQuantity
leaders = self.getLeadersWeights(point)
leaders[self.leader1] += 3*(alicePoints-sellPoints).amount//10 - 3*alicePoints.amount//10
appLeaders = self.getLeadersWeights('')
appLeaders[self.appLeader2] += 4*(pointStat['reserve']-totalQuantity).amount//10 - 4*pointStat['reserve'].amount//10
with log_action("Sell community points through transfer them to 'c.point' account"):
trxArgs = {'from':alice, 'to':'c.point', 'quantity':str(sellPoints), 'memo':''}
trxResult = testnet.pushAction('c.point', 'transfer', alice, trxArgs,
providebw=alice+'/c@providebw', keys=[aliceKey, clientKey], output=True)
trxTrx = trxResult['transaction_id']
trxBlock = trxResult['processed']['block_num']
trxTrace = [
{
'receiver': 'c.point', 'code': 'c.point', 'action': 'transfer',
'auth': [{'actor': alice, 'permission': 'active'}],
'args': trxArgs,
}, {
'receiver': 'c.ctrl', 'code': 'c.ctrl', 'action': 'changepoints',
'auth': [{'actor': 'c.ctrl', 'permission': 'changepoints'}],
'args': {'who': alice, 'diff': str(-sellPoints)},
'events': ee.AllItems(
{
'code': 'c.ctrl','event': 'leaderstate',
'args': {'commun_code': point, 'leader': leader, 'weight': leaders[leader]},
}
)
}, {
'receiver': 'c.ctrl', 'code': 'c.ctrl', 'action': 'changepoints',
'auth': [{'actor': 'c.ctrl', 'permission': 'changepoints'}],
'args': {'who': pointParam['issuer'], 'diff': '-%d '%(tokenQuantity.amount+feeQuantity.amount)},
'events': ee.AllItems(
{
'code': 'c.ctrl','event': 'leaderstate',
'args': {'commun_code': '', 'leader': self.appLeader2, 'weight': str(appLeaders[self.appLeader2])},
}
),
},
]
self.eeHelper.waitEvents(
[ ({'msg_type':'ApplyTrx', 'id':trxTrx}, {'block_num':trxBlock, 'actions':trxTrace, 'except':ee.Missing()}),
], trxBlock)
def test_voterStakeTokens(self):
appLeader = self.appLeader1
(alice, aliceKey) = community.createCommunityUser(
creator='tech', creatorKey=techKey, clientKey=clientKey,
community='', buyPointsOn='%.4f CMN'%(random.uniform(100000.0000, 200000.0000)))
community.voteLeader('', alice, appLeader, 3000, providebw=alice+'/tech', keys=[aliceKey,techKey])
self.accounts[alice] = 'Alice'
aliceTokens = community.getPointBalance('', alice)
tokenQuantity = Asset.fromstr('%.4f CMN'%(random.uniform(1000.0000, 10000.0000)))
print("Alice: %s, %s"%(aliceTokens,tokenQuantity))
appLeaders = self.getLeadersWeights('')
appLeaders[appLeader] += 3*(aliceTokens.amount + tokenQuantity.amount)//10 - 3*aliceTokens.amount//10
with log_action("Voter stake some CMN tokens"):
trxResult = community.buyCommunityPoints(alice, tokenQuantity, '', aliceKey, clientKey, output=True)
trxId = trxResult['transaction_id']
trxBlock = trxResult['processed']['block_num']
args = {'from': alice, 'to': 'c.point', 'quantity': str(tokenQuantity), 'memo': ''}
trxTrace = [
{
'receiver': 'cyber.token', 'code': 'cyber.token', 'action': 'transfer',
'auth': [{'actor': alice, 'permission': 'active'}],
'args': args,
},{
'receiver': 'c.point', 'code': 'cyber.token', 'action': 'transfer',
'auth': [{'actor': alice,'permission': 'active'}],
'args': args,
'events': ee.AllItems(
{
'code': 'c.point', 'event': 'balance',
'args': {'account': alice, 'balance': '%d '%(aliceTokens.amount + tokenQuantity.amount)}
}
)
},{
'receiver': 'c.ctrl', 'action': 'changepoints', 'code': 'c.ctrl',
'auth': [{'actor': 'c.ctrl', 'permission': 'changepoints'}],
'args': {'who': alice, 'diff': '%d '%tokenQuantity.amount},
'events': ee.AllItems(
{
'code': 'c.ctrl','event': 'leaderstate',
'args': {'commun_code': '', 'leader': appLeader, 'weight': appLeaders[appLeader]},
}
),
},
]
self.eeHelper.waitEvents(
[ ({'msg_type':'ApplyTrx', 'id':trxId}, {'block_num':trxBlock, 'actions':trxTrace, 'except':ee.Missing()}),
], trxBlock)
self.assertEqual(appLeaders, self.getLeadersWeights(''))
def test_voterWithdrawTokens(self):
appLeader = self.appLeader1
(alice, aliceKey) = community.createCommunityUser(
creator='tech', creatorKey=techKey, clientKey=clientKey,
community='', buyPointsOn='%.4f CMN'%(random.uniform(100000.0000, 200000.0000)))
community.voteLeader('', alice, appLeader, 3000, providebw=alice+'/tech', keys=[aliceKey,techKey])
self.accounts[alice] = 'Alice'
aliceTokens = community.getPointBalance('', alice)
tokenQuantity = Asset.fromstr('%.4f CMN'%(aliceTokens.amount/10000*random.uniform(0.3, 0.7)))
print("Alice: %s, %s"%(aliceTokens,tokenQuantity))
appLeaders = self.getLeadersWeights('')
appLeaders[appLeader] += 3*(aliceTokens.amount - tokenQuantity.amount)//10 - 3*aliceTokens.amount//10
with log_action("Alice withdraw some staked CMN-tokens"):
args = {'owner': alice, 'quantity': str(tokenQuantity)}
trxResult = testnet.pushAction('c.point', 'withdraw', alice, args,
providebw=alice+'/tech', keys=[aliceKey,techKey], output=True)
trxId = trxResult['transaction_id']
trxBlock = trxResult['processed']['block_num']
trxTrace = [
{
'receiver': 'c.point', 'code': 'c.point', 'action': 'withdraw',
'auth': [{'actor': alice,'permission': 'active'}],
'args': args,
'events': ee.AllItems(
{
'code': 'c.point', 'event': 'balance',
'args': {'account': alice, 'balance': '%d '%(aliceTokens.amount - tokenQuantity.amount)}
}
)
},{
'receiver': 'c.ctrl', 'action': 'changepoints', 'code': 'c.ctrl',
'auth': [{'actor': 'c.ctrl', 'permission': 'changepoints'}],
'args': {'who': alice, 'diff': '-%d '%tokenQuantity.amount},
'events': ee.AllItems(
{
'code': 'c.ctrl','event': 'leaderstate',
'args': {'commun_code': '', 'leader': appLeader, 'weight': appLeaders[appLeader]},
}
),
},
]
self.eeHelper.waitEvents(
[ ({'msg_type':'ApplyTrx', 'id':trxId}, {'block_num':trxBlock, 'actions':trxTrace, 'except':ee.Missing()}),
], trxBlock)
self.assertEqual(appLeaders, self.getLeadersWeights(''))
class RecoverTestCase(TestCase):
@classmethod
def setUpClass(self):
super().setUpClass()
with log_action("Initialize RecoverTestCase"):
self.recover_delay = 3
result = json.loads(testnet.cleos('get table c.recover c.recover params'))
if len(result['rows']) == 0 or result['rows'][0]['recover_delay'] != self.recover_delay:
testnet.pushAction('c.recover', 'setparams', 'c.recover', {
'recover_delay': self.recover_delay
}, providebw='c.recover/tech', keys=[techKey])
testnet.updateAuth('c.recover', 'recover', 'active', [recoverPublic], [],
providebw='c.recover/tech', keys=[techKey])
self.point = community.getUnusedPointSymbol()
self.owner = community.createCommunity(
community_name = self.point,
creator_auth = client,
creator_key = clientKey,
maximum_supply = Asset.fromstr('100000000.000 %s'%self.point),
reserve_amount = Asset.fromstr('1000000.0000 CMN'))
self.accounts[self.owner] = 'Owner'
def setUp(self):
super().setUp()
with log_action("Initialize RecoverTestCase test environment"):
(private, public) = testnet.createKey()
(self.alice, self.aliceKey) = community.createCommunityUser(
creator='tech', creatorKey=techKey, clientKey=clientKey,
community=self.point, buyPointsOn='%.4f CMN'%(random.uniform(10000.0000, 200000.0000)))
testnet.updateAuth(self.alice, 'owner', '', [public], ['<EMAIL>'],
providebw=self.alice+'/tech', keys=[techKey, self.aliceKey])
self.aliceOwner = private
self.accounts[self.alice] = 'Alice'
def test_unavailableRecover(self):
"Recovery is not available for accounts that did not allow it"
(bobKey, bobPublic) = testnet.createKey()
bob = testnet.createRandomAccount(bobPublic, keys=[techKey])
(private2, public2) = testnet.createKey()
with self.assertRaisesRegex(Exception, 'Key recovery for this account is not available'):
community.recover(bob, active_key=public2, provider='tech', keys=[recoverKey,techKey], output=True)
def test_recoverActiveAuthority(self):
"Recovery active authority"
globalLockItems = DataItems('scope', 'rev').add('unlocks').others(hide=True)
(alice, aliceKey) = (self.alice, self.aliceKey)
globalLock = community.getPointGlobalLock(alice)
self.assertEqual(None, globalLock)
print("Alice global lock:", self.jsonPrinter.format(globalLockItems, globalLock))
with log_action("Set new active key using recovery"):
(private2, public2) = testnet.createKey()
result = community.recover(alice, active_key=public2, provider='tech', keys=[recoverKey,techKey], output=True)
recover_time = from_time_point_sec(result['processed']['block_time'])
with log_action("Check new active key with `checkwin` action"):
testnet.pushAction('cyber', 'checkwin', alice, {},
providebw=alice+'/tech', keys=[techKey, private2], output=True)
globalLock = community.getPointGlobalLock(alice)
globalLockTo = from_time_point_sec(globalLock['unlocks'])
self.assertEqual(globalLockTo, recover_time + timedelta(seconds=self.recover_delay))
print("Alice global lock:", self.jsonPrinter.format(globalLockItems, globalLock))
def test_applyOwnerRecover(self):
"Recovery and apply owner authority"
(alice, aliceKey) = (self.alice, self.aliceKey)
with log_action("Set new owner key using recover"):
(private2, public2) = testnet.createKey()
community.recover(alice, owner_key=public2, provider='tech', keys=[recoverKey,techKey], output=True)
with log_action("Wait for 4 sec and apply owner key recovery"):
time.sleep(4)
community.applyOwner(alice, providebw=alice+'/tech', keys=[aliceKey,techKey], output=True)
with log_action("Check new owner key with `checkwin` action"):
testnet.pushAction('cyber', 'checkwin', alice+'@owner', {},
providebw=alice+'/tech', keys=[techKey, private2], output=True)
def test_cancelOwnerRecover(self):
"Recovery and cancel owner authority"
(alice, aliceKey) = (self.alice, self.aliceKey)
with log_action("Set | |
load step data at tInd = {0} in file {1}'.format(tInd,get_outfile_path(options,'stats.p'))
if not options.mpi:
print '\n\ntInd = {0}, t = {2}, obst = {1}'.format(tInd,yt.T,t)
# Read the posterior
if baseData['mpi']:
for (frameRef,pfile) in izip(stepData['post_pos_ref'],post_pkl_files):
if not firstReadDone:
pfile.seek(frameRef)
post.update(pickle.load(pfile))
else:
if not firstReadDone:
post_pkl_file.seek(stepData['post_pos_ref'][0])
post = pickle.load(post_pkl_file)
firstReadDone = True
# Compute the modes
topModes = post.roadSegmentModes(mapgraph,mapdynamics,nmsNumModes,nmsSegmentLength,nmsRadius,nmsCutoffPct)
# Dump the modes
pickle.dump(topModes,modes_pkl_file)
modes_pkl_file.close()
stats_pkl_file.close()
if baseData['mpi']:
for pfile in post_pkl_files:
pfile.close()
else:
post_pkl_file.close()
def do_display_gt(options,args):
assert(len(args) == 1)
load_config_file(options,args[0])
import matplotlib
if options.img_format == 'pdf':
matplotlib.use('pdf')
else:
matplotlib.use('agg')
import matplotlib.pyplot as plt
mapgraph,mapVersion = load_mapgraph(options)
mapSz = mapgraph.posMax - mapgraph.posMin
dpi = 100
sortedStreets = mapgraph.nodes()
sortedStreets.sort()
assert(options.gps_data != None)
if options.gps_data != None:
stateIter = open(options.gps_data,'rt')
projScale = mapgraph.graph['mercator_scale']
ptCoordOrigin = mapgraph.graph['position_offset']
latlon = np.empty(2)
fig = plt.figure()
fig.set_dpi(dpi)
fig.set_size_inches(10,10)
ax = fig.add_subplot(111)
for (tInd,statet) in enumerate(stateIter):
lineParts = statet.split()
latlon[0] = float(lineParts[0])
latlon[1] = float(lineParts[1])
gtPos = mercatorProj(latlon,projScale) - ptCoordOrigin
fig.clf()
ax = fig.add_subplot(111)
ax.set_xlim(gtPos[0] - 0.125,gtPos[0] + 0.125)
ax.set_ylim(gtPos[1] - 0.125,gtPos[1] + 0.125)
mapgraph.display(fig, ax, gtPos = gtPos)
fname = get_outfile_path(options,'gtDisplay{0:04}.{1}'.format(tInd+1,options.img_format))
extent = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
fig.savefig(fname,bbox_inches=extent.expanded(1.1, 1.1),dpi=dpi)
# fig.savefig(fname,dpi=dpi)
def do_plot_data(options):
import matplotlib.pyplot as plt
baseData = pickle.load(open(get_datafile_path(options),'rb'))
states = baseData['states']
mapdynamics = baseData['mapdynamics']
Astate_vel = mapdynamics.get_state_vel_matrix()
time_mat = np.zeros((len(states)))
states_mat = np.zeros((len(states),4))
streets_mat = np.zeros((len(states)))
vtranslp_mat = np.zeros((len(states)-1))
prevStreet = None
for i in range(len(states)):
states_mat[i,:] = np.dot(Astate_vel,states[i]['state']).T
streets_mat[i] = states[i]['street'] == prevStreet
time_mat[i] = states[i]['time']
prevStreet = states[i]['street']
if i > 0:
vtranslp_mat[i-1] = mapdynamics.street_transition_logprob(mapgraph,prevStreet,states[i-1]['street'],states[i-1]['state'])
labels = ('$d$','$\dot{d}$',r'$\theta$',r'$\dot{\theta}$')
scales = (1,1,180.0/np.pi,180.0/np.pi)
plt.figure(1)
for i in range(4):
plt.subplot(2,2,i+1)
plt.plot(time_mat,scales[i]*states_mat[:,i])
plt.title(labels[i])
plt.figure(2)
plt.subplot(2,1,1)
plt.plot(time_mat,streets_mat,'--')
plt.subplot(2,1,2)
plt.plot(time_mat[1:],np.exp(vtranslp_mat))
plt.show()
def convert_gps_trajectory(mapgraph,mapdynamics,statefile,dt,data_skip):
debugPrint = False
roadPosThresh = 1.0/1000.0
nbhdThresh = 10.0/1000.0
dirThresh = np.pi/2.0
transAngleThresh = np.pi
transDistThresh = 30.0/1000.0
errAngW = 1.0/(np.pi/8.0)
errDistW = 1.0
projScale = mapgraph.graph['mercator_scale']
ptCoordOrigin = mapgraph.graph['position_offset']
latlon = np.empty(2)
instVel = np.empty(2)
avgInstVel = np.zeros(2)
pos = None
roadSetSeq = {}
posInfos = {}
#print 'Parsing GPS data and finding nearest streets...',
stateFile = open(statefile,'rt')
for (tInd,line) in enumerate(stateFile):
lineParts = line.split()
latlon[0] = float(lineParts[0])
latlon[1] = float(lineParts[1])
instVF = float(lineParts[9])/1000.0
instVel[0] = float(lineParts[7])/1000.0
instVel[1] = float(lineParts[6])/1000.0
pos = mercatorProj(latlon,projScale) - ptCoordOrigin
heading = float(lineParts[5])
headingDir = np.array([np.cos(heading),np.sin(heading)])
if tInd == 0:
currRoads = None
nbhdRdSet = None
else:
currRoads = roadSetSeq[tInd-1]
nbhdRdSet = set()
for prevStreet in currRoads.iterkeys():
for nextStreet in mapgraph.successors_iter(prevStreet):
# if nextStreet == SINK_ROAD or np.abs(mapgraph[prevStreet][nextStreet]['transition_angle']) > transAngleThresh or mapgraph[prevStreet][nextStreet]['transition_distance'] > mapgraph.node[prevStreet]['length'] + transDistThresh:
if nextStreet == SINK_ROAD or (mapgraph[prevStreet][nextStreet]['transition_distance'] - mapgraph.node[prevStreet]['length']) > transDistThresh:
# if nextStreet == SINK_ROAD:
continue
nbhdRdSet.add(nextStreet)
if len(nbhdRdSet) == 0:
print '\nERROR: no roads found in neighbourhood set at tInd = {0}'.format(tInd)
print ' Transition Angle Threshold: {0}'.format((180.0/np.pi)*transAngleThresh)
print 'Previous Streets:'
for prevStreet in currRoads.iterkeys():
print '{0}:'.format(prevStreet)
for nextStreet in mapgraph.successors_iter(prevStreet):
if nextStreet == SINK_ROAD:
continue
print ' -> {0}:\n transAngle = {1}, transDistExcess = {2} '.format(nextStreet,(180.0/np.pi)*mapgraph[prevStreet][nextStreet]['transition_angle'],mapgraph[prevStreet][nextStreet]['transition_distance'] - mapgraph.node[prevStreet]['length'])
assert(len(nbhdRdSet) > 0)
(nbhdRds,distDataFoo) = mapgraph.getNearbyRoads(pos,headingDir,nbhdThresh,dirThresh,roadPosThresh,nbhdRdSet)
if debugPrint:
print '\n\ntInd = {0}'.format(tInd)
print ' Distance Threshold: {0}, Direction Threshold: {1}, Alpha Threshold: {2}'.format(nbhdThresh,(180.0/np.pi)*dirThresh,roadPosThresh)
if len(nbhdRds) == 0:
print '\nERROR: no nearby roads found in neighbourhood set at tInd = {0}'.format(tInd)
if (debugPrint or len(nbhdRds) == 0) and currRoads != None:
print '\nCandidate Roads at previous frame:'
for (prevRd,distInfo) in currRoads.iteritems():
print ' {0}: dist = {1}, relAngle = {2}, alpha = {3} (len = {4})'.format(prevRd,distInfo[0],(180.0/np.pi)*distInfo[1],distInfo[3],mapgraph.node[prevRd]['length'])
mapgraph.printNode(prevRd)
print '\nNeighbourhood roads:'
for nbRd in nbhdRdSet:
distInfo = mapgraph.distanceToRoad(nbRd,pos,headingDir)
print ' {0}: dist = {1}, relAngle = {2}, alpha = {3} (len = {4})'.format(nbRd,distInfo[0],(180.0/np.pi)*distInfo[1],distInfo[3],mapgraph.node[nbRd]['length'])
if debugPrint:
print '\nCurrent candidate roads:'
for (prevRd,distInfo) in nbhdRds.iteritems():
print ' {0}: dist = {1}, relAngle = {2}, alpha = {3} (len = {4})'.format(prevRd,distInfo[0],(180.0/np.pi)*distInfo[1],distInfo[3],mapgraph.node[prevRd]['length'])
assert(len(nbhdRds) > 0)
roadSetSeq[tInd] = deepcopy(nbhdRds)
posInfos[tInd] = (pos,np.copy(latlon),np.copy(instVF),np.copy(instVel),heading)
# Prune roads at previous time steps which don't connect to the current set of road possibilities
for ctInd in range(tInd,0,-1):
if len(roadSetSeq[ctInd-1]) == 1:
break
rmRoads = set()
for prevStreet in roadSetSeq[ctInd-1].iterkeys():
connected = False
for nextStreet in roadSetSeq[ctInd].iterkeys():
if mapgraph.has_edge(prevStreet,nextStreet) and (mapgraph[prevStreet][nextStreet]['transition_distance'] - mapgraph.node[prevStreet]['length']) <= transDistThresh:
connected = True
break
if not connected:
rmRoads.add(prevStreet)
# print 'back pruning {1} at {0}'.format(ctInd-1,len(rmRoads))
for delStreet in rmRoads:
del (roadSetSeq[ctInd-1])[delStreet]
if len(rmRoads) == 0:
break
stateFile.close()
#print 'done.'
#print 'Disambiguating nearby roads...',
while True:
# Find least ambiguous street.
minErr = np.inf
minErrInd = None
minErrRoad = None
for (tInd,currRoads) in roadSetSeq.iteritems():
if len(currRoads) == 1:
continue
for (currStreet,distInfo) in currRoads.iteritems():
currErr = errDistW*distInfo[0] + errAngW*np.abs(distInfo[1])
if currErr < minErr:
minErr = currErr
minErrInd = tInd
minErrRoad = currStreet
minErrDistInfo = distInfo
# No more ambiguous roads.
if minErrInd == None:
break
# Prune other possibilities
#print 'Selecting {0} at {1}'.format(minErrRoad,minErrInd)
roadSetSeq[minErrInd] = {minErrRoad:minErrDistInfo}
# Prune forwards
for tInd in range(minErrInd,len(roadSetSeq)-1,1):
prevRoads = roadSetSeq[tInd]
nextRoads = roadSetSeq[tInd+1]
if len(nextRoads) == 1:
break
rmRoads = set()
for nextStreet in nextRoads.iterkeys():
connected = False
for prevStreet in prevRoads.iterkeys():
if mapgraph.has_edge(prevStreet,nextStreet) and (mapgraph[prevStreet][nextStreet]['transition_distance'] - mapgraph.node[prevStreet]['length']) <= transDistThresh:
connected = True
break
if not connected:
rmRoads.add(nextStreet)
# print 'forward pruning {1} at {0}'.format(tInd+1,len(rmRoads))
for delStreet in rmRoads:
del roadSetSeq[tInd+1][delStreet]
if len(rmRoads) == 0:
break
# Prune backwards
for tInd in range(minErrInd,0,-1):
prevRoads = roadSetSeq[tInd-1]
nextRoads = roadSetSeq[tInd]
if len(prevRoads) == 1:
break
rmRoads = set()
for prevStreet in prevRoads.iterkeys():
connected = False
for nextStreet in nextRoads.iterkeys():
if mapgraph.has_edge(prevStreet,nextStreet) and (mapgraph[prevStreet][nextStreet]['transition_distance'] - mapgraph.node[prevStreet]['length']) <= transDistThresh:
connected = True
break
if not connected:
rmRoads.add(prevStreet)
# print 'back pruning {1} at {0}'.format(tInd-1,len(rmRoads))
for delStreet in rmRoads:
del roadSetSeq[tInd-1][delStreet]
if len(rmRoads) == 0:
break
#print 'done.'
prevStreet = None
prevPosition = None
prevOrient = None
currStreet = None
currPosition = None
currOrient = None
if data_skip == None or data_skip < 1:
data_skip = 1
else:
mapdynamics.setParameters({'dt':dt*data_skip,'dt_1':dt*data_skip})
debugStreet = None
states = []
headingErrSum = 0
posErrSum = 0
numErrs = 0
#print 'Converting to street state...'
for tInd in range(len(roadSetSeq)):
# if debugStreet != currRoads.keys()[0]:
# print '{0}: -> {1}'.format(tInd,debugStreet)
# debugStreet = currRoads.keys()[0]
if (tInd+1)%data_skip != 0:
continue
# print tInd
currRoads = roadSetSeq[tInd]
(gpsPos,latlon,instVF,instVel,heading) = posInfos[tInd]
assert(len(currRoads) == 1)
prev2Position = prevPosition
prev2Orient = prevOrient
prev2Street = prevStreet
prevStreet = currStreet
prevPosition = currPosition
prevOrient = currOrient
currStreet = currRoads.keys()[0]
currOrient = currRoads[currStreet][2]
currPosition = currRoads[currStreet][3]
projPos = mapgraph.get_road_position(currStreet,np.array([currPosition]))
posErrSum += np.sqrt(np.sum(np.power(projPos.reshape(2) - gpsPos.reshape(2),2.0)))
numErrs += 1
if prevStreet != None and not mapgraph.has_edge(prevStreet,currStreet):
print 'ERROR: missing link at tInd = {0}'.format(tInd)
print 'prevStreet:'
mapgraph.printNode(prevStreet)
print '\ncurrStreet:'
mapgraph.printNode(currStreet)
assert(prevStreet == None or mapgraph.has_edge(prevStreet,currStreet))
if currStreet != prevStreet:
if prevStreet != None:
prevOrient = prevOrient - mapgraph[prevStreet][currStreet]['angle']
prevPosition = prevPosition - mapgraph.node[prevStreet]['length']
prev2Orient = prev2Orient - mapgraph[prevStreet][currStreet]['angle']
prev2Position = prev2Position - mapgraph.node[prevStreet]['length']
else:
prevOrient = currOrient
prevPosition = currPosition - mapdynamics.dt*instVF
prev2Orient = currOrient
prev2Position = currPosition - 2*mapdynamics.dt*instVF
stateVec = mapdynamics.convert_posorient_sequence(mapgraph,[(prev2Street,prev2Position,prev2Orient),(prevStreet,prevPosition,prevOrient),(currStreet,currPosition,currOrient)])
states.append({'street':currStreet,'state':stateVec[-1],'gps_position':gpsPos,'gps_heading':heading,'map_position':projPos,'latlon':latlon})
# print 'Map projection error: {0}m'.format(1000.0*posErrSum/numErrs)
return states
def do_convert_gps_data(options,cfgFile):
load_config_file(options,cfgFile)
mapgraph,mapVersion = load_mapgraph(options)
mapdynamics = load_mapdynamics(options)
states = convert_gps_trajectory(mapgraph,mapdynamics,options.gps_data,options.dt,options.data_skip)
outName = get_mapsequencedyn_outfile_path(options,'gt_states.p',includeCrop = False)
pickle.dump(states, open(outName,'wb'))
def do_synthesize_odometry(options,args):
if options.mpi:
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
else:
size = 1
rank = 0
if size > 1:
if rank == 0:
print 'Warning: synthesize_odometry does not exploit MPI parallelization'
else:
return
assert len(args) == 4, 'Wrong number of arguments for synthesize_odometry.'
obsFile = args[0]
assert obsFile.endswith('.obs')
cfgFile = args[1]
assert cfgFile.endswith('.dcfg')
posSigma = float(args[2])
angSigma = float(args[3])
load_config_file(options,cfgFile)
mapgraph,mapVersion = load_mapgraph(options)
mapdynamics = load_mapdynamics(options)
# These signal parameters are specific to the data we're currently using
signalMag = (0.01067254, 0.16930674)
# obs = np.array([float(splitLine[0])/options.dt, float(splitLine[1])/options.dt]).reshape(2,1)
# int_pos = np.array([float(splitLine[2]), float(splitLine[3])]).reshape(2,1)
# int_ang = splitLine[4]
projScale = mapgraph.graph['mercator_scale']
ptCoordOrigin = mapgraph.graph['position_offset']
latlon = np.empty(2)
obsFile = open(obsFile,'wt')
gpsFile = open(options.gps_data,'rt')
for (tInd,line) in enumerate(gpsFile):
lineParts = line.split()
latlon[0] = float(lineParts[0])
latlon[1] = float(lineParts[1])
pos = mercatorProj(latlon,projScale) - ptCoordOrigin
heading = float(lineParts[5])
if tInd == 0:
basePos = copy(pos)
baseHeading = copy(heading)
baseR = np.array([[np.cos(heading - np.pi/2.0), | |
<reponame>njes9701/poe-archnemesis-scanner
import configparser
from email.mime import image
from re import I, T
import sys
from dataclasses import dataclass
from configparser import ConfigParser
import win32gui
from win32clipboard import *
import tkinter as tk
from tkinter import messagebox
from typing import Callable, Any, Tuple, List, Dict
import cv2
import numpy as np
from PIL import ImageTk, Image, ImageGrab
COLOR_BG = 'grey19'
COLOR_FG_WHITE = 'snow'
COLOR_FG_GREEN = 'green3'
COLOR_FG_LIGHT_GREEN = 'DarkOliveGreen3'
COLOR_FG_ORANGE = 'orange2'
FONT_BIG = ('Consolas', '14')
FONT_SMALL = ('Consolas', '9')
@dataclass
class RecipeItemNode:
item: str
components: list
class ArchnemesisItemsMap:
"""
Holds the information about all archnemesis items, recipes, images and map them together
"""
def __init__(self, scale: float):
# Put everything into the list so we could maintain the display order
zh_tw = [
('奇塔弗之觸', ['圖克哈瑪之觸', '艾貝拉斯之觸', '腐化者', '陰屍爆破']),
('善之觸', ['月神之觸', '日神之觸', '鏡像幻影', '魔靈吸取']),
('夏卡莉之觸', ['尾隨魔', '嗜魂者', '乾旱先鋒']),
('艾貝拉斯之觸', ['炎行者', '喪心病狂', '振興']),
('圖克哈瑪之觸', ['裂骨者', '劊子手', '熔岩屏障']),
('海洋王之觸', ['冰牢', '風行者', '先鋒召喚物']),
('艾爾卡莉之觸', ['陰屍爆破', '尾隨魔', '刺客']),
('日神之觸', ['刀槍不入', '熔岩屏障', '增幅召喚物']),
('月神之觸', ['刀槍不入', '霜行者', '增幅召喚物']),
('雕像', ['咒術師', '憎惡', '腐化者']),
('強化元素', ['招魂師', '鑄鋼', '混沌編織']),
('晶瑩剔透', ['永凍土', '振興', '狂戰士']),
('刀槍不入', ['哨兵', '勇士', '奉獻使徒']),
('腐化者', ['放血者', '混沌編織']),
('魔靈吸取', ['奉獻使徒', '發電機']),
('風行者', ['風暴編織', '急速']),
('鏡像幻影', ['回聲者', '魂靈牽引']),
('熔岩屏障', ['縱火', '裂骨者']),
('招魂師', ['烈焰編織', '冰霜編織', '風暴編織']),
('陰屍爆破', ['死靈師', '縱火']),
('炎行者', ['烈焰編織', '急速']),
('嗜魂者', ['魂靈牽引', '死靈師', '龐然大物']),
('冰牢', ['永凍土', '哨兵']),
('霜行者', ['冰霜編織', '急速']),
('樹人部落', ['毒素', '哨兵', '鑄鋼']),
('短暫幻想', ['勇士', '咒術師', '奧術緩衝']),
('尾隨魔', ['毒素', '放血者']),
('乾旱先鋒', ['憎惡', '銳眼']),
('咒術師', ['混沌編織', '回聲者']),
('劊子手', ['喪心病狂', '狂戰士']),
('振興', ['龐然大物', '吸血魔']),
('死靈師', ['投彈手', '超負荷']),
('詐欺師', ['超負荷', '刺客', '回聲者']),
('刺客', ['銳眼', '吸血魔']),
('增幅召喚物', ['死靈師', '劊子手', '龐然大物']),
('先鋒召喚物', ['發電機', '奧術緩衝']),
('奧術緩衝', []),
('狂戰士', []),
('放血者', []),
('投彈手', []),
('裂骨者', []),
('混沌編織', []),
('奉獻使徒', []),
('銳眼', []),
('發電機', []),
('回聲者', []),
('烈焰編織', []),
('喪心病狂', []),
('冰霜編織', []),
('龐然大物', []),
('急速', []),
('縱火', []),
('勇士', []),
('憎惡', []),
('豐饒', []),
('超負荷', []),
('永凍土', []),
('哨兵', []),
('魂靈牽引', []),
('鑄鋼', []),
('風暴編織', []),
('毒素', []),
('吸血魔', [])
]
eng = [
('Kitava-Touched', ['Tukohama-Touched', 'Abberath-Touched', 'Corrupter', 'Corpse Detonator']),
('Innocence-Touched', ['Lunaris-Touched', 'Solaris-Touched', 'Mirror Image', 'Mana Siphoner']),
('Shakari-Touched', ['Entangler', 'Soul Eater', 'Drought Bringer']),
('Abberath-Touched', ['Flame Strider', 'Frenzied', 'Rejuvenating']),
('Tukohama-Touched', ['Bonebreaker', 'Executioner', 'Magma Barrier']),
('Brine King-Touched', ['Ice Prison', 'Storm Strider', 'Heralding Minions']),
('Arakaali-Touched', ['Corpse Detonator', 'Entangler', 'Assassin']),
('Solaris-Touched', ['Invulnerable', 'Magma Barrier', 'Empowered Minions']),
('Lunaris-Touched', ['Invulnerable', 'Frost Strider', 'Empowered Minions']),
('Effigy', ['Hexer', 'Malediction', 'Corrupter']),
('Empowered Elements', ['Evocationist', 'Steel-Infused', 'Chaosweaver']),
('Crystal-Skinned', ['Permafrost', 'Rejuvenating', 'Berserker']),
('Invulnerable', ['Sentinel', 'Juggernaut', 'Consecrator']),
('Corrupter', ['Bloodletter', 'Chaosweaver']),
('Mana Siphoner', ['Consecrator', 'Dynamo']),
('Storm Strider', ['Stormweaver', 'Hasted']),
('Mirror Image', ['Echoist', 'Soul Conduit']),
('Magma Barrier', ['Incendiary', 'Bonebreaker']),
('Evocationist', ['Flameweaver', 'Frostweaver', 'Stormweaver']),
('Corpse Detonator', ['Necromancer', 'Incendiary']),
('Flame Strider', ['Flameweaver', 'Hasted']),
('Soul Eater', ['Soul Conduit', 'Necromancer', 'Gargantuan']),
('Ice Prison', ['Permafrost', 'Sentinel']),
('Frost Strider', ['Frostweaver', 'Hasted']),
('Treant Horder', ['Toxic', 'Sentinel', 'Steel-Infused']),
('Temporal Bubble', ['Juggernaut', 'Hexer', 'Arcane Buffer']),
('Entangler', ['Toxic', 'Bloodletter']),
('Drought Bringer', ['Malediction', 'Deadeye']),
('Hexer', ['Chaosweaver', 'Echoist']),
('Executioner', ['Frenzied', 'Berserker']),
('Rejuvenating', ['Gargantuan', 'Vampiric']),
('Necromancer', ['Bombardier', 'Overcharged']),
('Trickster', ['Overcharged', 'Assassin', 'Echoist']),
('Assassin', ['Deadeye', 'Vampiric']),
('Empowered Minions', ['Necromancer', 'Executioner', 'Gargantuan']),
('Heralding Minions', ['Dynamo', 'Arcane Buffer']),
('Arcane Buffer', []),
('Berserker', []),
('Bloodletter', []),
('Bombardier', []),
('Bonebreaker', []),
('Chaosweaver', []),
('Consecrator', []),
('Deadeye', []),
('Dynamo', []),
('Echoist', []),
('Flameweaver', []),
('Frenzied', []),
('Frostweaver', []),
('Gargantuan', []),
('Hasted', []),
('Incendiary', []),
('Juggernaut', []),
('Malediction', []),
('Opulent', []),
('Overcharged', []),
('Permafrost', []),
('Sentinel', []),
('Soul Conduit', []),
('Steel-Infused', []),
('Stormweaver', []),
('Toxic', []),
('Vampiric', [])
]
langs = configparser.ConfigParser()
langs.read('settings.ini')
langs_bool = langs['settings'].get('change_to_chinese')
langs_bool = True if langs_bool is not None and langs_bool == 'True' else False
if langs_bool:
self._arch_items = zh_tw
else:
self._arch_items = eng
self._images = dict()
self._small_image_size = 30
self._update_images(scale)
def _update_images(self, scale):
self._scale = scale
for item, _ in self._arch_items:
self._images[item] = dict()
image = self._load_image(item, scale)
self._image_size = image.size
self._images[item]['scan-image'] = self._create_scan_image(image)
# Convert the image to Tk image because we're going to display it
self._images[item]['display-image'] = ImageTk.PhotoImage(image=image)
image = image.resize((self._small_image_size, self._small_image_size))
self._images[item]['display-small-image'] = ImageTk.PhotoImage(image=image)
def _load_image(self, item: str, scale: float):
langs = configparser.ConfigParser()
langs.read('settings.ini')
langs_bool = langs['settings'].get('change_to_chinese')
langs_bool = True if langs_bool is not None and langs_bool == 'True' else False
if langs_bool:
image = Image.open(f'pictures_zh_tw/{item}.png')
else:
image = Image.open(f'pictures_eng/{item}.png')
# Scale the image according to the input parameter
return image.resize((int(image.width * scale), int(image.height * scale)))
def _create_scan_image(self, image):
# Remove alpha channel and replace it with predefined background color
background = Image.new('RGBA', image.size, (10, 10, 32))
image_without_alpha = Image.alpha_composite(background, image)
scan_template = cv2.cvtColor(np.array(image_without_alpha), cv2.COLOR_RGB2BGR)
w, h, _ = scan_template.shape
# Crop the image to help with scanning
return scan_template[int(h * 1.0 / 10):int(h * 2.3 / 3), int(w * 1.0 / 6):int(w * 5.5 / 6)]
def get_scan_image(self, item):
return self._images[item]['scan-image']
def get_display_image(self, item):
return self._images[item]['display-image']
def get_display_small_image(self, item):
return self._images[item]['display-small-image']
def items(self):
for item, _ in self._arch_items:
yield item
def recipes(self):
for item, recipe in self._arch_items:
if recipe:
yield (item, recipe)
def get_subtree_for(self, item: str):
tree = RecipeItemNode(item, [])
nodes = [tree]
while len(nodes) > 0:
node = nodes.pop(0)
children = self._get_item_components(node.item)
if len(children) > 0:
node.components = [RecipeItemNode(c, []) for c in children]
nodes.extend(node.components)
return tree
def get_parent_recipes_for(self, item: str) -> []:
parents = list()
for parent, components in self._arch_items:
if item in components:
parents.append(parent)
return parents
def _get_item_components(self, item) -> List[str]:
return next(l for x, l in self._arch_items if x == item)
@property
def image_size(self):
return self._image_size
@property
def scale(self) -> float:
return self._scale
@scale.setter
def scale(self, scale: float) -> None:
self._update_images(scale)
@property
def small_image_size(self):
return self._small_image_size
@dataclass
class PoeWindowInfo:
x: int = 0
y: int = 0
width: int = 0
height: int = 0
client_width: int = 0
client_height: int = 0
title_bar_height: int = 0
class ImageScanner:
"""
Implements scanning algorithm with OpenCV. Maintans the scanning window to speed up the scanning.
"""
def __init__(self, info: PoeWindowInfo, items_map: ArchnemesisItemsMap):
self._scanner_window_size = (
info.x,
info.y + int(info.client_height / 4),
int(info.client_width / 3),
int(info.client_height * 2 / 3)
)
self._items_map = items_map
self._confidence_threshold = 0.94
def scan(self) -> Dict[str, List[Tuple[int, int]]]:
bbox = (
self._scanner_window_size[0],
self._scanner_window_size[1],
self._scanner_window_size[0] + self._scanner_window_size[2],
self._scanner_window_size[1] + self._scanner_window_size[3]
)
screen = ImageGrab.grab(bbox=bbox)
screen = np.array(screen)
screen = cv2.cvtColor(screen, cv2.COLOR_RGB2BGR)
results = dict()
for item in self._items_map.items():
template = self._items_map.get_scan_image(item)
heat_map = cv2.matchTemplate(screen, template, cv2.TM_CCOEFF_NORMED)
_, confidence, _, (x, y) = cv2.minMaxLoc(heat_map)
print(f'Best match for {item}: x={x}, y={y} confidence={confidence}', 'too low' if confidence < self._confidence_threshold else '')
findings = np.where(heat_map >= self._confidence_threshold)
if len(findings[0]) > 0:
rectangles = []
ht, wt = template.shape[0], template.shape[1]
for (x, y) in zip(findings[1], findings[0]):
# Add every box to the list twice in order to retain single (non-overlapping) boxes
rectangles.append([int(x), int(y), int(wt), int(ht)])
rectangles.append([int(x), int(y), int(wt), int(ht)])
rectangles, _ = cv2.groupRectangles(rectangles, 1, 0.1)
results[item] = [(rect[0], rect[1]) for rect in rectangles]
print(results)
return results
@property
def scanner_window_size(self) -> Tuple[int, int, int, int]:
return self._scanner_window_size
@scanner_window_size.setter
def scanner_window_size(self, value: Tuple[int, int, int, int]) -> None:
self._scanner_window_size = value
@property
def confidence_threshold(self) -> float:
return self._confidence_threshold
@confidence_threshold.setter
def confidence_threshold(self, value) -> None:
self._confidence_threshold = value
class UIOverlay:
"""
Overlay window using tkinter '-topmost' property
"""
def __init__(self, root, info: PoeWindowInfo, items_map: ArchnemesisItemsMap, image_scanner: ImageScanner):
self._window_info = info
self._items_map = items_map
self._image_scanner = image_scanner
self._root = root
self._scan_results_window = None
self._recipe_browser_window = None
self._recipe_browser_current_root = ''
self._tooltip_window = None
self._highlight_windows_to_show = list()
self._scan_results_window_saved_position = (-1, 0)
self._settings = Settings(root, items_map, image_scanner)
self._create_controls()
self._root.configure(bg='')
self._root.overrideredirect(True)
self._root.geometry(f'+{info.x + 5}+{info.y + info.title_bar_height + 5}')
self._root.wm_attributes('-topmost', True)
self._root.deiconify()
@staticmethod
def create_toplevel_window(bg=''):
w = tk.Toplevel()
w.configure(bg=bg)
# Hide window outline/controls
w.overrideredirect(True)
# Make sure the window is always on top
w.wm_attributes('-topmost', True)
return w
def _create_controls(self) -> None:
l = tk.Button(self._root, text='[X]', fg=COLOR_FG_GREEN, bg=COLOR_BG, font=FONT_SMALL)
l.bind('<Button-1>', sys.exit)
l.bind('<B3-Motion>', lambda event: self._drag(self._root, -5, -5, event))
l.grid(row=0, column=0)
settings = tk.Button(self._root, text='Settings', fg=COLOR_FG_GREEN, bg=COLOR_BG, font=FONT_SMALL)
settings.bind('<Button-1>', lambda _: self._settings.show())
settings.bind('<B3-Motion>', lambda event: self._drag(self._root, -5, -5, event))
settings.grid(row=0, column=1)
self._scan_label_text = tk.StringVar(self._root, value='Scan')
self._scan_label = tk.Button(self._root, textvariable=self._scan_label_text, fg=COLOR_FG_GREEN, bg=COLOR_BG, font=FONT_SMALL)
self._scan_label.bind("<Button-1>", self._scan)
self._scan_label.bind('<B3-Motion>', lambda event: self._drag(self._root, -5, -5, event))
self._scan_label.grid(row=0, column=3)
def _drag(self, window, offset_x: int, offset_y: int, event) -> Tuple[int, int]:
x = offset_x + event.x + window.winfo_x()
y = offset_y + event.y + window.winfo_y()
window.geometry(f'+{x}+{y}')
return (x, y)
def _scan(self, _) -> None:
self._scan_label_text.set('Scanning...')
self._root.update()
results = self._image_scanner.scan()
if len(results) > 0:
| |
#
# Stripped version of "hookenv.py" (By <NAME>, 2020 <EMAIL>)
#
#
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"Interactions with the Juju environment"
# Copyright 2013 Canonical Ltd.
#
# Authors:
# <NAME> <<EMAIL>>
from __future__ import print_function
from enum import Enum
from functools import wraps
import os
import json
import yaml
import subprocess
import sys
import errno
import tempfile
from subprocess import CalledProcessError
import six
if not six.PY3:
from UserDict import UserDict
else:
from collections import UserDict
CRITICAL = "CRITICAL"
ERROR = "ERROR"
WARNING = "WARNING"
INFO = "INFO"
DEBUG = "DEBUG"
TRACE = "TRACE"
MARKER = object()
SH_MAX_ARG = 131071
class WORKLOAD_STATES(Enum):
ACTIVE = 'active'
BLOCKED = 'blocked'
MAINTENANCE = 'maintenance'
WAITING = 'waiting'
cache = {}
def cached(func):
"""Cache return values for multiple executions of func + args
For example::
@cached
def unit_get(attribute):
pass
unit_get('test')
will cache the result of unit_get + 'test' for future calls.
"""
@wraps(func)
def wrapper(*args, **kwargs):
global cache
key = json.dumps((func, args, kwargs), sort_keys=True, default=str)
try:
return cache[key]
except KeyError:
pass # Drop out of the exception handler scope.
res = func(*args, **kwargs)
cache[key] = res
return res
wrapper._wrapped = func
return wrapper
def flush(key):
"""Flushes any entries from function cache where the
key is found in the function+args """
flush_list = []
for item in cache:
if key in item:
flush_list.append(item)
for item in flush_list:
del cache[item]
def log(message, level=None):
"""Write a message to the juju log"""
command = ['juju-log']
if level:
command += ['-l', level]
if not isinstance(message, six.string_types):
message = repr(message)
command += [message[:SH_MAX_ARG]]
# Missing juju-log should not cause failures in unit tests
# Send log output to stderr
try:
subprocess.call(command)
except OSError as e:
if e.errno == errno.ENOENT:
if level:
message = "{}: {}".format(level, message)
message = "juju-log: {}".format(message)
print(message, file=sys.stderr)
else:
raise
def relation_type():
"""The scope for the current relation hook"""
return os.environ.get('JUJU_RELATION', None)
@cached
def relation_id(relation_name=None, service_or_unit=None):
"""The relation ID for the current or a specified relation"""
if not relation_name and not service_or_unit:
return os.environ.get('JUJU_RELATION_ID', None)
elif relation_name and service_or_unit:
service_name = service_or_unit.split('/')[0]
for relid in relation_ids(relation_name):
remote_service = remote_service_name(relid)
if remote_service == service_name:
return relid
else:
raise ValueError('Must specify neither or both of relation_name and service_or_unit')
def local_unit():
"""Local unit ID"""
return os.environ['JUJU_UNIT_NAME']
def remote_unit():
"""The remote unit for the current relation hook"""
return os.environ.get('JUJU_REMOTE_UNIT', None)
@cached
def remote_service_name(relid=None):
"""The remote service name for a given relation-id (or the current relation)"""
if relid is None:
unit = remote_unit()
else:
units = related_units(relid)
unit = units[0] if units else None
return unit.split('/')[0] if unit else None
@cached
def relation_get(attribute=None, unit=None, rid=None):
"""Get relation information"""
_args = ['relation-get', '--format=json']
if rid:
_args.append('-r')
_args.append(rid)
_args.append(attribute or '-')
if unit:
_args.append(unit)
try:
return json.loads(subprocess.check_output(_args).decode('UTF-8'))
except ValueError:
return None
except CalledProcessError as e:
if e.returncode == 2:
return None
raise
def relation_set(relation_id=None, relation_settings=None, **kwargs):
"""Set relation information for the current unit"""
relation_settings = relation_settings if relation_settings else {}
relation_cmd_line = ['relation-set']
accepts_file = "--file" in subprocess.check_output(
relation_cmd_line + ["--help"], universal_newlines=True)
if relation_id is not None:
relation_cmd_line.extend(('-r', relation_id))
settings = relation_settings.copy()
settings.update(kwargs)
for key, value in settings.items():
# Force value to be a string: it always should, but some call
# sites pass in things like dicts or numbers.
if value is not None:
settings[key] = "{}".format(value)
if accepts_file:
# --file was introduced in Juju 1.23.2. Use it by default if
# available, since otherwise we'll break if the relation data is
# too big. Ideally we should tell relation-set to read the data from
# stdin, but that feature is broken in 1.23.2: Bug #1454678.
with tempfile.NamedTemporaryFile(delete=False) as settings_file:
settings_file.write(yaml.safe_dump(settings).encode("utf-8"))
subprocess.check_call(
relation_cmd_line + ["--file", settings_file.name])
os.remove(settings_file.name)
else:
for key, value in settings.items():
if value is None:
relation_cmd_line.append('{}='.format(key))
else:
relation_cmd_line.append('{}={}'.format(key, value))
subprocess.check_call(relation_cmd_line)
# Flush cache of any relation-gets for local unit
flush(local_unit())
def relation_clear(r_id=None):
''' Clears any relation data already set on relation r_id '''
settings = relation_get(rid=r_id,
unit=local_unit())
for setting in settings:
if setting not in ['public-address', 'private-address']:
settings[setting] = None
relation_set(relation_id=r_id,
**settings)
@cached
def relation_ids(reltype=None):
"""A list of relation_ids"""
reltype = reltype or relation_type()
relid_cmd_line = ['relation-ids', '--format=json']
if reltype is not None:
relid_cmd_line.append(reltype)
return json.loads(
subprocess.check_output(relid_cmd_line).decode('UTF-8')) or []
return []
@cached
def related_units(relid=None):
"""A list of related units"""
relid = relid or relation_id()
units_cmd_line = ['relation-list', '--format=json']
if relid is not None:
units_cmd_line.extend(('-r', relid))
return json.loads(
subprocess.check_output(units_cmd_line).decode('UTF-8')) or []
def function_get(key=None):
"""Gets the value of an action parameter, or all key/value param pairs"""
cmd = ['function-get']
# Fallback for older charms.
if not cmd_exists('function-get'):
cmd = ['action-get']
if key is not None:
cmd.append(key)
cmd.append('--format=json')
function_data = json.loads(subprocess.check_output(cmd).decode('UTF-8'))
return function_data
def expected_related_units(reltype=None):
"""Get a generator for units we expect to join relation based on
goal-state.
Note that you can not use this function for the peer relation, take a look
at expected_peer_units() for that.
This function will raise KeyError if you request information for a
relation type for which juju goal-state does not have information. It will
raise NotImplementedError if used with juju versions without goal-state
support.
Example usage:
log('participant {} of {} joined relation {}'
.format(len(related_units()),
len(list(expected_related_units())),
relation_type()))
:param reltype: Relation type to list data for, default is to list data for
the realtion type we are currently executing a hook for.
:type reltype: str
:returns: iterator
:rtype: types.GeneratorType
:raises: KeyError, NotImplementedError
"""
if not has_juju_version("2.4.4"):
# goal-state existed in 2.4.0, but did not list individual units to
# join a relation in 2.4.1 through 2.4.3. (LP: #1794739)
raise NotImplementedError("goal-state relation unit count")
reltype = reltype or relation_type()
_goal_state = goal_state()
return (key for key in _goal_state['relations'][reltype] if '/' in key)
@cached
def relation_for_unit(unit=None, rid=None):
"""Get the json represenation of a unit's relation"""
unit = unit or remote_unit()
relation = relation_get(unit=unit, rid=rid)
for key in relation:
if key.endswith('-list'):
relation[key] = relation[key].split()
relation['__unit__'] = unit
return relation
@cached
def relations_for_id(relid=None):
"""Get relations of a specific relation ID"""
relation_data = []
relid = relid or relation_ids()
for unit in related_units(relid):
unit_data = relation_for_unit(unit, relid)
unit_data['__relid__'] = relid
relation_data.append(unit_data)
return relation_data
@cached
def metadata():
"""Get the current charm metadata.yaml contents as a python object"""
with open(os.path.join(charm_dir(), 'metadata.yaml')) as md:
return yaml.safe_load(md)
@cached
def relation_types():
"""Get a list of relation types supported by this charm"""
rel_types = []
md = metadata()
for key in ('provides', 'requires', 'peers'):
section = md.get(key)
if section:
rel_types.extend(section.keys())
return rel_types
@cached
def role_and_interface_to_relations(role, interface_name):
"""
Given a role and interface name, return a list of relation names for the
current charm that use that interface under that role (where role is one
of ``provides``, ``requires``, or ``peers``).
:returns: A list of relation names.
"""
_metadata = metadata()
results = []
for relation_name, relation in _metadata.get(role, {}).items():
if relation['interface'] == interface_name:
results.append(relation_name)
return results
@cached
def interface_to_relations(interface_name):
"""
Given an interface, return a list of relation names for the current
charm that use that interface.
:returns: A list of relation names.
"""
results = []
for role in ('provides', 'requires', 'peers'):
results.extend(role_and_interface_to_relations(role, interface_name))
return results
@cached
def unit_get(attribute):
"""Get the unit ID for the remote unit"""
_args = ['unit-get', '--format=json', attribute]
try:
return json.loads(subprocess.check_output(_args).decode('UTF-8'))
except ValueError:
return None
def charm_dir():
"""Return the root directory of the current charm"""
d = os.environ.get('JUJU_CHARM_DIR')
if d is not None:
return d
return os.environ.get('CHARM_DIR')
def cmd_exists(cmd):
"""Return True if the specified cmd exists in the path"""
return any(
os.access(os.path.join(path, cmd), os.X_OK)
for path in os.environ["PATH"].split(os.pathsep)
)
def status_set(workload_state, message, application=False):
"""Set the workload state with a message
Use status-set to set the workload state with a message which is visible
to the user via juju status. If the status-set command is not found then
assume this is juju < 1.23 and juju-log the message instead.
workload_state -- valid juju workload state. str or WORKLOAD_STATES
message -- status update message
application -- Whether this is | |
# Copyright The Linux Foundation and each contributor to CommunityBridge.
# SPDX-License-Identifier: MIT
import logging
import unittest
import cla
from cla.controllers.github import get_org_name_from_installation_event, get_github_activity_action
class TestGitHubController(unittest.TestCase):
example_1 = {
'action': 'created',
'installation': {
'id': 2,
'account': {
'login': 'Linux Foundation',
'id': 1,
'node_id': 'MDQ6VXNlcjE=',
'avatar_url': 'https://github.com/images/error/octocat_happy.gif',
'gravatar_id': '',
'url': 'https://api.github.com/users/octocat',
'html_url': 'https://github.com/octocat',
'followers_url': 'https://api.github.com/users/octocat/followers',
'following_url': 'https://api.github.com/users/octocat/following{/other_user}',
'gists_url': 'https://api.github.com/users/octocat/gists{/gist_id}',
'starred_url': 'https://api.github.com/users/octocat/starred{/owner}{/repo}',
'subscriptions_url': 'https://api.github.com/users/octocat/subscriptions',
'organizations_url': 'https://api.github.com/users/octocat/orgs',
'repos_url': 'https://api.github.com/users/octocat/repos',
'events_url': 'https://api.github.com/users/octocat/events{/privacy}',
'received_events_url': 'https://api.github.com/users/octocat/received_events',
'type': 'User',
'site_admin': False
},
'repository_selection': 'selected',
'access_tokens_url': 'https://api.github.com/installations/2/access_tokens',
'repositories_url': 'https://api.github.com/installation/repositories',
'html_url': 'https://github.com/settings/installations/2',
'app_id': 5725,
'target_id': 3880403,
'target_type': 'User',
'permissions': {
'metadata': 'read',
'contents': 'read',
'issues': 'write'
},
'events': [
'push',
'pull_request'
],
'created_at': 1525109898,
'updated_at': 1525109899,
'single_file_name': 'config.yml'
}
}
example_2 = {
"action": "created",
"comment": {
"url": "https://api.github.com/repos/grpc/grpc/pulls/comments/134346",
"pull_request_review_id": 134346,
"id": 134346,
"node_id": "MDI0OlB1bGxSZXF1ZXN0UmVredacted==",
"path": "setup.py",
"position": 16,
"original_position": 17,
"commit_id": "4bc9820redacted",
"original_commit_id": "d5515redacted",
"user": {
"login": "redacted",
"id": 134566,
"node_id": "MDQ6VXNlcjI3OTMyODI=",
"avatar_url": "https://avatars3.githubusercontent.com/u/2793282?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/veblush",
"html_url": "https://github.com/veblush",
"followers_url": "https://api.github.com/users/veblush/followers",
"following_url": "https://api.github.com/users/veblush/following{/other_user}",
"gists_url": "https://api.github.com/users/veblush/gists{/gist_id}",
"starred_url": "https://api.github.com/users/veblush/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/veblush/subscriptions",
"organizations_url": "https://api.github.com/users/veblush/orgs",
"repos_url": "https://api.github.com/users/veblush/repos",
"events_url": "https://api.github.com/users/veblush/events{/privacy}",
"received_events_url": "https://api.github.com/users/veblush/received_events",
"type": "User",
"site_admin": False
},
"pull_request_url": "https://api.github.com/repos/grpc/grpc/pulls/134566",
"author_association": "CONTRIBUTOR",
"_links": {
"self": {
"href": "https://api.github.com/repos/grpc/grpc/pulls/comments/134566"
},
"html": {
"href": "https://github.com/grpc/grpc/pull/20414#discussion_r134566"
},
"pull_request": {
"href": "https://api.github.com/repos/grpc/grpc/pulls/134566"
}
},
"in_reply_to_id": 1345667
},
"pull_request": {
"url": "https://api.github.com/repos/grpc/grpc/pulls/20414",
"id": 134566,
"node_id": "MDExxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
"html_url": "https://github.com/grpc/grpc/pull/20414",
"diff_url": "https://github.com/grpc/grpc/pull/20414.diff",
"patch_url": "https://github.com/grpc/grpc/pull/20414.patch",
"issue_url": "https://api.github.com/repos/grpc/grpc/issues/20414",
"number": 134566,
"state": "open",
"locked": False,
"title": "Added lib to gRPC python",
"user": {
"login": "redacted",
"id": 12345677,
"node_id": "MDQ6666666666666666=",
"avatar_url": "https://avatars3.githubusercontent.com/u/2793282?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/veblush",
"html_url": "https://github.com/veblush",
"followers_url": "https://api.github.com/users/veblush/followers",
"following_url": "https://api.github.com/users/veblush/following{/other_user}",
"gists_url": "https://api.github.com/users/veblush/gists{/gist_id}",
"starred_url": "https://api.github.com/users/veblush/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/veblush/subscriptions",
"organizations_url": "https://api.github.com/users/veblush/orgs",
"repos_url": "https://api.github.com/users/veblush/repos",
"events_url": "https://api.github.com/users/veblush/events{/privacy}",
"received_events_url": "https://api.github.com/users/veblush/received_events",
"type": "User",
"site_admin": False
},
"body": "Try to fix #20400 and #20174",
"created_at": "2019-10-01T06:08:53Z",
"updated_at": "2019-10-07T18:19:12Z",
"closed_at": None,
"merged_at": None,
"merge_commit_sha": "5bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
"assignee": None,
"assignees": [],
"requested_reviewers": [],
"requested_teams": [],
"labels": [
{
"id": 12345,
"node_id": "MDU66llllllllllllllllll=",
"url": "https://api.github.com/repos/grpc/grpc/labels/area/build",
"name": "area/build",
"color": "efdb40",
"default": False
},
{
"id": 12345,
"node_id": "MDU66666666666666666666=",
"url": "https://api.github.com/repos/grpc/grpc/labels/lang/Python",
"name": "lang/Python",
"color": "fad8c7",
"default": False
},
{
"id": 12345677,
"node_id": "MDUuuuuuuuuuuuuuuuuuuuu=",
"url": "https://api.github.com/repos/grpc/grpc/labels/release%20notes:%20no",
"name": "release notes: no",
"color": "0f5f75",
"default": False
}
],
"milestone": None,
"commits_url": "https://api.github.com/repos/grpc/grpc/pulls/1234/commits",
"review_comments_url": "https://api.github.com/repos/grpc/grpc/pulls/12345/comments",
"review_comment_url": "https://api.github.com/repos/grpc/grpc/pulls/comments{/number}",
"comments_url": "https://api.github.com/repos/grpc/grpc/issues/12345/comments",
"statuses_url": "https://api.github.com/repos/grpc/grpc/statuses/4444444444444444444444444444444444444444",
"head": {
"label": "redacted:fix-xyz",
"ref": "fix-xyz",
"sha": "4444444444444444444444444444444444444444",
"user": {
"login": "redacted",
"id": 1234556,
"node_id": "MDQ66llllllllllllll=",
"avatar_url": "https://avatars3.githubusercontent.com/u/2793282?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/veblush",
"html_url": "https://github.com/veblush",
"followers_url": "https://api.github.com/users/veblush/followers",
"following_url": "https://api.github.com/users/veblush/following{/other_user}",
"gists_url": "https://api.github.com/users/veblush/gists{/gist_id}",
"starred_url": "https://api.github.com/users/veblush/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/veblush/subscriptions",
"organizations_url": "https://api.github.com/users/veblush/orgs",
"repos_url": "https://api.github.com/users/veblush/repos",
"events_url": "https://api.github.com/users/veblush/events{/privacy}",
"received_events_url": "https://api.github.com/users/veblush/received_events",
"type": "User",
"site_admin": False
},
"repo": {
"id": 123456789,
"node_id": "MDEwwwwwwwwwwwwwwwwwwwwwwwwwwww=",
"name": "grpc",
"full_name": "redacted/grpc",
"private": False,
"owner": {
"login": "redacted",
"id": 1234567,
"node_id": "MDQ6666666666666666=",
"avatar_url": "https://avatars3.githubusercontent.com/u/2793282?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/veblush",
"html_url": "https://github.com/veblush",
"followers_url": "https://api.github.com/users/veblush/followers",
"following_url": "https://api.github.com/users/veblush/following{/other_user}",
"gists_url": "https://api.github.com/users/veblush/gists{/gist_id}",
"starred_url": "https://api.github.com/users/veblush/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/veblush/subscriptions",
"organizations_url": "https://api.github.com/users/veblush/orgs",
"repos_url": "https://api.github.com/users/veblush/repos",
"events_url": "https://api.github.com/users/veblush/events{/privacy}",
"received_events_url": "https://api.github.com/users/veblush/received_events",
"type": "User",
"site_admin": False
},
"html_url": "https://github.com/veblush/grpc",
"description": "The C based gRPC (C++, Python, Ruby, Objective-C, PHP, C#)",
"fork": True,
"url": "https://api.github.com/repos/veblush/grpc",
"forks_url": "https://api.github.com/repos/veblush/grpc/forks",
"keys_url": "https://api.github.com/repos/veblush/grpc/keys{/key_id}",
"collaborators_url": "https://api.github.com/repos/veblush/grpc/collaborators{/collaborator}",
"teams_url": "https://api.github.com/repos/veblush/grpc/teams",
"hooks_url": "https://api.github.com/repos/veblush/grpc/hooks",
"issue_events_url": "https://api.github.com/repos/veblush/grpc/issues/events{/number}",
"events_url": "https://api.github.com/repos/veblush/grpc/events",
"assignees_url": "https://api.github.com/repos/veblush/grpc/assignees{/user}",
"branches_url": "https://api.github.com/repos/veblush/grpc/branches{/branch}",
"tags_url": "https://api.github.com/repos/veblush/grpc/tags",
"blobs_url": "https://api.github.com/repos/veblush/grpc/git/blobs{/sha}",
"git_tags_url": "https://api.github.com/repos/veblush/grpc/git/tags{/sha}",
"git_refs_url": "https://api.github.com/repos/veblush/grpc/git/refs{/sha}",
"trees_url": "https://api.github.com/repos/veblush/grpc/git/trees{/sha}",
"statuses_url": "https://api.github.com/repos/veblush/grpc/statuses/{sha}",
"languages_url": "https://api.github.com/repos/veblush/grpc/languages",
"stargazers_url": "https://api.github.com/repos/veblush/grpc/stargazers",
"contributors_url": "https://api.github.com/repos/veblush/grpc/contributors",
"subscribers_url": "https://api.github.com/repos/veblush/grpc/subscribers",
"subscription_url": "https://api.github.com/repos/veblush/grpc/subscription",
"commits_url": "https://api.github.com/repos/veblush/grpc/commits{/sha}",
"git_commits_url": "https://api.github.com/repos/veblush/grpc/git/commits{/sha}",
"comments_url": "https://api.github.com/repos/veblush/grpc/comments{/number}",
"issue_comment_url": "https://api.github.com/repos/veblush/grpc/issues/comments{/number}",
"contents_url": "https://api.github.com/repos/veblush/grpc/contents/{+path}",
"compare_url": "https://api.github.com/repos/veblush/grpc/compare/{base}...{head}",
"merges_url": "https://api.github.com/repos/veblush/grpc/merges",
"archive_url": "https://api.github.com/repos/veblush/grpc/{archive_format}{/ref}",
"downloads_url": "https://api.github.com/repos/veblush/grpc/downloads",
"issues_url": "https://api.github.com/repos/veblush/grpc/issues{/number}",
"pulls_url": "https://api.github.com/repos/veblush/grpc/pulls{/number}",
"milestones_url": "https://api.github.com/repos/veblush/grpc/milestones{/number}",
"notifications_url": "https://api.github.com/repos/veblush/grpc/notifications{?since,all,participating}",
"labels_url": "https://api.github.com/repos/veblush/grpc/labels{/name}",
"releases_url": "https://api.github.com/repos/veblush/grpc/releases{/id}",
"deployments_url": "https://api.github.com/repos/veblush/grpc/deployments",
"created_at": "2019-04-12T16:55:24Z",
"updated_at": "2019-10-03T17:32:29Z",
"pushed_at": "2019-10-05T03:41:45Z",
"git_url": "git://github.com/veblush/grpc.git",
"ssh_url": "[email protected]:veblush/grpc.git",
"clone_url": "https://github.com/veblush/grpc.git",
"svn_url": "https://github.com/veblush/grpc",
"homepage": "https://grpc.io",
"size": 218962,
"stargazers_count": 0,
"watchers_count": 0,
"language": "C++",
"has_issues": False,
"has_projects": True,
"has_downloads": False,
"has_wiki": True,
"has_pages": False,
"forks_count": 0,
"mirror_url": None,
"archived": False,
"disabled": False,
"open_issues_count": 0,
"license": {
"key": "apache-2.0",
"name": "Apache License 2.0",
"spdx_id": "Apache-2.0",
"url": "https://api.github.com/licenses/apache-2.0",
"node_id": "MDccccccccccccc="
},
"forks": 0,
"open_issues": 0,
"watchers": 0,
"default_branch": "master"
}
},
"base": {
"label": "grpc:master",
"ref": "master",
"sha": "9999999999999999999999999999999999999999",
"user": {
"login": "grpc",
"id": 7802525,
"node_id": "MDEyyyyyyyyyyyyyyyyyyyyyyyyyyyy=",
"avatar_url": "https://avatars1.githubusercontent.com/u/7802525?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/grpc",
"html_url": "https://github.com/grpc",
"followers_url": "https://api.github.com/users/grpc/followers",
"following_url": "https://api.github.com/users/grpc/following{/other_user}",
"gists_url": "https://api.github.com/users/grpc/gists{/gist_id}",
"starred_url": "https://api.github.com/users/grpc/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/grpc/subscriptions",
"organizations_url": "https://api.github.com/users/grpc/orgs",
"repos_url": "https://api.github.com/users/grpc/repos",
"events_url": "https://api.github.com/users/grpc/events{/privacy}",
"received_events_url": "https://api.github.com/users/grpc/received_events",
"type": "Organization",
"site_admin": False
},
"repo": {
"id": 27729880,
"node_id": "MDEwwwwwwwwwwwwwwwwwwwwwwwwwww==",
"name": "grpc",
"full_name": "grpc/grpc",
"private": False,
"owner": {
"login": "grpc",
"id": 7802525,
"node_id": "MDEyyyyyyyyyyyyyyyyyyyyyyyyyyyy=",
"avatar_url": "https://avatars1.githubusercontent.com/u/7802525?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/grpc",
"html_url": "https://github.com/grpc",
"followers_url": "https://api.github.com/users/grpc/followers",
"following_url": "https://api.github.com/users/grpc/following{/other_user}",
"gists_url": "https://api.github.com/users/grpc/gists{/gist_id}",
"starred_url": "https://api.github.com/users/grpc/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/grpc/subscriptions",
"organizations_url": "https://api.github.com/users/grpc/orgs",
"repos_url": "https://api.github.com/users/grpc/repos",
"events_url": "https://api.github.com/users/grpc/events{/privacy}",
"received_events_url": "https://api.github.com/users/grpc/received_events",
"type": "Organization",
"site_admin": False
},
"html_url": "https://github.com/grpc/grpc",
"description": "The C based gRPC (C++, Python, Ruby, Objective-C, PHP, C#)",
"fork": False,
"url": "https://api.github.com/repos/grpc/grpc",
"forks_url": "https://api.github.com/repos/grpc/grpc/forks",
"keys_url": "https://api.github.com/repos/grpc/grpc/keys{/key_id}",
"collaborators_url": "https://api.github.com/repos/grpc/grpc/collaborators{/collaborator}",
"teams_url": "https://api.github.com/repos/grpc/grpc/teams",
"hooks_url": "https://api.github.com/repos/grpc/grpc/hooks",
"issue_events_url": "https://api.github.com/repos/grpc/grpc/issues/events{/number}",
"events_url": "https://api.github.com/repos/grpc/grpc/events",
"assignees_url": "https://api.github.com/repos/grpc/grpc/assignees{/user}",
"branches_url": "https://api.github.com/repos/grpc/grpc/branches{/branch}",
"tags_url": "https://api.github.com/repos/grpc/grpc/tags",
"blobs_url": "https://api.github.com/repos/grpc/grpc/git/blobs{/sha}",
"git_tags_url": "https://api.github.com/repos/grpc/grpc/git/tags{/sha}",
"git_refs_url": "https://api.github.com/repos/grpc/grpc/git/refs{/sha}",
"trees_url": "https://api.github.com/repos/grpc/grpc/git/trees{/sha}",
"statuses_url": "https://api.github.com/repos/grpc/grpc/statuses/{sha}",
"languages_url": "https://api.github.com/repos/grpc/grpc/languages",
"stargazers_url": "https://api.github.com/repos/grpc/grpc/stargazers",
"contributors_url": "https://api.github.com/repos/grpc/grpc/contributors",
"subscribers_url": "https://api.github.com/repos/grpc/grpc/subscribers",
"subscription_url": "https://api.github.com/repos/grpc/grpc/subscription",
"commits_url": "https://api.github.com/repos/grpc/grpc/commits{/sha}",
"git_commits_url": "https://api.github.com/repos/grpc/grpc/git/commits{/sha}",
"comments_url": "https://api.github.com/repos/grpc/grpc/comments{/number}",
"issue_comment_url": "https://api.github.com/repos/grpc/grpc/issues/comments{/number}",
"contents_url": "https://api.github.com/repos/grpc/grpc/contents/{+path}",
"compare_url": "https://api.github.com/repos/grpc/grpc/compare/{base}...{head}",
"merges_url": "https://api.github.com/repos/grpc/grpc/merges",
"archive_url": "https://api.github.com/repos/grpc/grpc/{archive_format}{/ref}",
"downloads_url": "https://api.github.com/repos/grpc/grpc/downloads",
"issues_url": "https://api.github.com/repos/grpc/grpc/issues{/number}",
"pulls_url": "https://api.github.com/repos/grpc/grpc/pulls{/number}",
"milestones_url": "https://api.github.com/repos/grpc/grpc/milestones{/number}",
"notifications_url": "https://api.github.com/repos/grpc/grpc/notifications{?since,all,participating}",
"labels_url": "https://api.github.com/repos/grpc/grpc/labels{/name}",
"releases_url": "https://api.github.com/repos/grpc/grpc/releases{/id}",
"deployments_url": "https://api.github.com/repos/grpc/grpc/deployments",
"created_at": "2014-12-08T18:58:53Z",
"updated_at": "2019-10-07T16:10:54Z",
"pushed_at": "2019-10-07T17:24:21Z",
"git_url": "git://github.com/grpc/grpc.git",
"ssh_url": "<EMAIL>:grpc/grpc.git",
"clone_url": "https://github.com/grpc/grpc.git",
"svn_url": "https://github.com/grpc/grpc",
"homepage": "https://grpc.io",
"size": 240231,
"stargazers_count": 23364,
"watchers_count": 23364,
"language": "C++",
"has_issues": True,
"has_projects": True,
"has_downloads": False,
"has_wiki": True,
"has_pages": True,
"forks_count": 5530,
"mirror_url": None,
"archived": False,
"disabled": False,
"open_issues_count": 886,
"license": {
"key": "apache-2.0",
"name": "Apache License 2.0",
"spdx_id": "Apache-2.0",
"url": "https://api.github.com/licenses/apache-2.0",
"node_id": "MDccccccccccccc="
},
"forks": 5530,
"open_issues": 886,
"watchers": 23364,
"default_branch": "master"
}
},
"_links": {
"self": {
"href": "https://api.github.com/repos/grpc/grpc/pulls/20414"
},
"html": {
"href": "https://github.com/grpc/grpc/pull/20414"
},
"issue": {
"href": "https://api.github.com/repos/grpc/grpc/issues/20414"
},
"comments": {
"href": "https://api.github.com/repos/grpc/grpc/issues/20414/comments"
},
"review_comments": {
"href": "https://api.github.com/repos/grpc/grpc/pulls/20414/comments"
},
"review_comment": {
"href": "https://api.github.com/repos/grpc/grpc/pulls/comments{/number}"
},
"commits": {
"href": "https://api.github.com/repos/grpc/grpc/pulls/20414/commits"
},
"statuses": {
"href": "https://api.github.com/repos/grpc/grpc/statuses/4bc982024113a7c2ced7d19af23c913adcb6bf08"
}
},
"author_association": "CONTRIBUTOR"
},
"repository": {
"id": 27729880,
"node_id": "MDEwwwwwwwwwwwwwwwwwwwwwwwwwww==",
"name": "grpc",
"full_name": "grpc/grpc",
"private": False,
"owner": {
"login": "grpc",
"id": 7802525,
"node_id": "MDEyyyyyyyyyyyyyyyyyyyyyyyyyyyy=",
"avatar_url": "https://avatars1.githubusercontent.com/u/7802525?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/grpc",
"html_url": "https://github.com/grpc",
"followers_url": "https://api.github.com/users/grpc/followers",
"following_url": "https://api.github.com/users/grpc/following{/other_user}",
"gists_url": "https://api.github.com/users/grpc/gists{/gist_id}",
"starred_url": "https://api.github.com/users/grpc/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/grpc/subscriptions",
"organizations_url": "https://api.github.com/users/grpc/orgs",
"repos_url": "https://api.github.com/users/grpc/repos",
"events_url": "https://api.github.com/users/grpc/events{/privacy}",
"received_events_url": "https://api.github.com/users/grpc/received_events",
"type": "Organization",
"site_admin": False
},
"html_url": "https://github.com/grpc/grpc",
"description": "The C based gRPC (C++, Python, Ruby, Objective-C, PHP, C#)",
"fork": False,
"url": "https://api.github.com/repos/grpc/grpc",
"forks_url": "https://api.github.com/repos/grpc/grpc/forks",
"keys_url": "https://api.github.com/repos/grpc/grpc/keys{/key_id}",
"collaborators_url": "https://api.github.com/repos/grpc/grpc/collaborators{/collaborator}",
"teams_url": "https://api.github.com/repos/grpc/grpc/teams",
"hooks_url": "https://api.github.com/repos/grpc/grpc/hooks",
"issue_events_url": "https://api.github.com/repos/grpc/grpc/issues/events{/number}",
"events_url": "https://api.github.com/repos/grpc/grpc/events",
"assignees_url": "https://api.github.com/repos/grpc/grpc/assignees{/user}",
"branches_url": "https://api.github.com/repos/grpc/grpc/branches{/branch}",
"tags_url": "https://api.github.com/repos/grpc/grpc/tags",
"blobs_url": "https://api.github.com/repos/grpc/grpc/git/blobs{/sha}",
"git_tags_url": "https://api.github.com/repos/grpc/grpc/git/tags{/sha}",
"git_refs_url": "https://api.github.com/repos/grpc/grpc/git/refs{/sha}",
"trees_url": "https://api.github.com/repos/grpc/grpc/git/trees{/sha}",
"statuses_url": "https://api.github.com/repos/grpc/grpc/statuses/{sha}",
"languages_url": "https://api.github.com/repos/grpc/grpc/languages",
"stargazers_url": "https://api.github.com/repos/grpc/grpc/stargazers",
"contributors_url": "https://api.github.com/repos/grpc/grpc/contributors",
"subscribers_url": "https://api.github.com/repos/grpc/grpc/subscribers",
"subscription_url": "https://api.github.com/repos/grpc/grpc/subscription",
"commits_url": "https://api.github.com/repos/grpc/grpc/commits{/sha}",
"git_commits_url": "https://api.github.com/repos/grpc/grpc/git/commits{/sha}",
"comments_url": "https://api.github.com/repos/grpc/grpc/comments{/number}",
"issue_comment_url": "https://api.github.com/repos/grpc/grpc/issues/comments{/number}",
"contents_url": "https://api.github.com/repos/grpc/grpc/contents/{+path}",
"compare_url": "https://api.github.com/repos/grpc/grpc/compare/{base}...{head}",
"merges_url": "https://api.github.com/repos/grpc/grpc/merges",
"archive_url": "https://api.github.com/repos/grpc/grpc/{archive_format}{/ref}",
"downloads_url": "https://api.github.com/repos/grpc/grpc/downloads",
"issues_url": "https://api.github.com/repos/grpc/grpc/issues{/number}",
"pulls_url": "https://api.github.com/repos/grpc/grpc/pulls{/number}",
"milestones_url": "https://api.github.com/repos/grpc/grpc/milestones{/number}",
"notifications_url": "https://api.github.com/repos/grpc/grpc/notifications{?since,all,participating}",
"labels_url": "https://api.github.com/repos/grpc/grpc/labels{/name}",
"releases_url": "https://api.github.com/repos/grpc/grpc/releases{/id}",
"deployments_url": "https://api.github.com/repos/grpc/grpc/deployments",
"created_at": "2014-12-08T18:58:53Z",
"updated_at": "2019-10-07T16:10:54Z",
"pushed_at": "2019-10-07T17:24:21Z",
"git_url": "git://github.com/grpc/grpc.git",
"ssh_url": "[email protected]:grpc/grpc.git",
"clone_url": "https://github.com/grpc/grpc.git",
"svn_url": "https://github.com/grpc/grpc",
"homepage": "https://grpc.io",
"size": 240231,
"stargazers_count": 23364,
"watchers_count": 23364,
"language": "C++",
"has_issues": True,
"has_projects": True,
"has_downloads": False,
"has_wiki": True,
"has_pages": True,
"forks_count": 5530,
"mirror_url": None,
"archived": False,
"disabled": False,
"open_issues_count": 886,
"license": {
"key": "apache-2.0",
"name": "Apache License 2.0",
"spdx_id": "Apache-2.0",
"url": "https://api.github.com/licenses/apache-2.0",
"node_id": "MDccccccccccccc="
},
"forks": 5530,
"open_issues": 886,
"watchers": 23364,
"default_branch": "master"
},
"organization": {
"login": "grpc",
"id": 7802525,
"node_id": "MDEEEEEEEEEEEEEEEEEEEEEEEEEEEEE=",
"url": "https://api.github.com/orgs/grpc",
"repos_url": "https://api.github.com/orgs/grpc/repos",
"events_url": "https://api.github.com/orgs/grpc/events",
"hooks_url": "https://api.github.com/orgs/grpc/hooks",
"issues_url": "https://api.github.com/orgs/grpc/issues",
"members_url": "https://api.github.com/orgs/grpc/members{/member}",
"public_members_url": "https://api.github.com/orgs/grpc/public_members{/member}",
"avatar_url": "https://avatars1.githubusercontent.com/u/7802525?v=4",
"description": "A high performance, open source, general-purpose RPC framework"
},
"sender": {
"login": "redacted",
"id": 12345692,
"node_id": "MDQVVVVVVVVVVVVVVVV=",
"avatar_url": "https://avatars3.githubusercontent.com/u/2793282?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/veblush",
"html_url": "https://github.com/veblush",
"followers_url": "https://api.github.com/users/veblush/followers",
"following_url": "https://api.github.com/users/veblush/following{/other_user}",
"gists_url": "https://api.github.com/users/veblush/gists{/gist_id}",
"starred_url": "https://api.github.com/users/veblush/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/veblush/subscriptions",
"organizations_url": "https://api.github.com/users/veblush/orgs",
"repos_url": "https://api.github.com/users/veblush/repos",
"events_url": "https://api.github.com/users/veblush/events{/privacy}",
"received_events_url": "https://api.github.com/users/veblush/received_events",
"type": "User",
"site_admin": False
},
"installation": {
"id": 1656153,
"node_id": "zzzzzzzzzzzzzz=="
}
}
example_3 = {
"action": "deleted",
"comment": {
"url": "https://api.github.com/repos/grpc/grpc/pulls/comments/134346",
"pull_request_review_id": 134346,
"id": 134346,
"node_id": "MDI0OlB1bGxSZXF1ZXN0UmVredacted==",
"path": "setup.py",
"position": 16,
"original_position": 17,
"commit_id": "4bc9820redacted",
"original_commit_id": "d5515redacted",
"user": {
"login": "redacted",
"id": 134566,
"node_id": "MDQ6VXNlcjI3OTMyODI=",
"avatar_url": "https://avatars3.githubusercontent.com/u/2793282?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/veblush",
"html_url": "https://github.com/veblush",
"followers_url": "https://api.github.com/users/veblush/followers",
"following_url": "https://api.github.com/users/veblush/following{/other_user}",
"gists_url": "https://api.github.com/users/veblush/gists{/gist_id}",
"starred_url": "https://api.github.com/users/veblush/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/veblush/subscriptions",
"organizations_url": "https://api.github.com/users/veblush/orgs",
"repos_url": "https://api.github.com/users/veblush/repos",
"events_url": "https://api.github.com/users/veblush/events{/privacy}",
"received_events_url": "https://api.github.com/users/veblush/received_events",
"type": "User",
"site_admin": False
}
}
}
@classmethod
def setUpClass(cls) -> None:
pass
@classmethod
def tearDownClass(cls) -> None:
pass
def setUp(self) -> None:
# Only show critical logging stuff
| |
-4 as might be expected
- binary.gt where left=N/A and right=4 yields True
- binary.gt where left=N/A and right=0 yields False
The behavior is caused by grabbing the non-empty value and using it directly without
performing any operation. In the case of `gt`, the non-empty value is cast to a boolean.
For these reasons, users are required to be explicit when choosing this surprising behavior.
"""
method_name = "ewise_add"
self._expect_type(other, (Matrix, TransposedMatrix), within=method_name, argname="other")
op = get_typed_op(op, self.dtype, other.dtype)
# Per the spec, op may be a semiring, but this is weird, so don't.
if require_monoid:
if op.opclass != "BinaryOp" or op.monoid is None:
self._expect_op(
op,
"Monoid",
within=method_name,
argname="op",
extra_message="A BinaryOp may be given if require_monoid keyword is False",
)
else:
self._expect_op(op, ("BinaryOp", "Monoid"), within=method_name, argname="op")
expr = MatrixExpression(
method_name,
f"GrB_Matrix_eWiseAdd_{op.opclass}",
[self, other],
op=op,
at=self._is_transposed,
bt=other._is_transposed,
)
if self.shape != other.shape:
expr.new(name="") # incompatible shape; raise now
return expr
def ewise_mult(self, other, op=binary.times):
"""
GrB_Matrix_eWiseMult
Result will contain the intersection of indices from both Matrices
Default op is binary.times
"""
method_name = "ewise_mult"
self._expect_type(other, (Matrix, TransposedMatrix), within=method_name, argname="other")
op = get_typed_op(op, self.dtype, other.dtype)
# Per the spec, op may be a semiring, but this is weird, so don't.
self._expect_op(op, ("BinaryOp", "Monoid"), within=method_name, argname="op")
expr = MatrixExpression(
method_name,
f"GrB_Matrix_eWiseMult_{op.opclass}",
[self, other],
op=op,
at=self._is_transposed,
bt=other._is_transposed,
)
if self.shape != other.shape:
expr.new(name="") # incompatible shape; raise now
return expr
def mxv(self, other, op=semiring.plus_times):
"""
GrB_mxv
Matrix-Vector multiplication. Result is a Vector.
Default op is semiring.plus_times
"""
method_name = "mxv"
self._expect_type(other, Vector, within=method_name, argname="other")
op = get_typed_op(op, self.dtype, other.dtype)
self._expect_op(op, "Semiring", within=method_name, argname="op")
expr = VectorExpression(
method_name,
"GrB_mxv",
[self, other],
op=op,
size=self._nrows,
at=self._is_transposed,
)
if self._ncols != other._size:
expr.new(name="") # incompatible shape; raise now
return expr
def mxm(self, other, op=semiring.plus_times):
"""
GrB_mxm
Matrix-Matrix multiplication. Result is a Matrix.
Default op is semiring.plus_times
"""
method_name = "mxm"
self._expect_type(other, (Matrix, TransposedMatrix), within=method_name, argname="other")
op = get_typed_op(op, self.dtype, other.dtype)
self._expect_op(op, "Semiring", within=method_name, argname="op")
expr = MatrixExpression(
method_name,
"GrB_mxm",
[self, other],
op=op,
nrows=self._nrows,
ncols=other._ncols,
at=self._is_transposed,
bt=other._is_transposed,
)
if self._ncols != other._nrows:
expr.new(name="") # incompatible shape; raise now
return expr
def kronecker(self, other, op=binary.times):
"""
GrB_kronecker
Kronecker product or sum (depending on op used)
Default op is binary.times
"""
method_name = "kronecker"
self._expect_type(other, (Matrix, TransposedMatrix), within=method_name, argname="other")
op = get_typed_op(op, self.dtype, other.dtype)
# Per the spec, op may be a semiring, but this is weird, so don't.
self._expect_op(op, ("BinaryOp", "Monoid"), within=method_name, argname="op")
return MatrixExpression(
method_name,
f"GrB_Matrix_kronecker_{op.opclass}",
[self, other],
op=op,
nrows=self._nrows * other._nrows,
ncols=self._ncols * other._ncols,
at=self._is_transposed,
bt=other._is_transposed,
)
def apply(self, op, *, left=None, right=None):
"""
GrB_Matrix_apply
Apply UnaryOp to each element of the calling Matrix
A BinaryOp can also be applied if a scalar is passed in as `left` or `right`,
effectively converting a BinaryOp into a UnaryOp
"""
method_name = "apply"
extra_message = (
"apply only accepts UnaryOp with no scalars or BinaryOp with `left` or `right` scalar."
)
if left is None and right is None:
op = get_typed_op(op, self.dtype)
self._expect_op(
op,
"UnaryOp",
within=method_name,
argname="op",
extra_message=extra_message,
)
cfunc_name = "GrB_Matrix_apply"
args = [self]
expr_repr = None
elif right is None:
if type(left) is not Scalar:
try:
left = Scalar.from_value(left)
except TypeError:
self._expect_type(
left,
Scalar,
within=method_name,
keyword_name="left",
extra_message="Literal scalars also accepted.",
)
op = get_typed_op(op, self.dtype, left.dtype)
if op.opclass == "Monoid":
op = op.binaryop
else:
self._expect_op(
op,
"BinaryOp",
within=method_name,
argname="op",
extra_message=extra_message,
)
cfunc_name = f"GrB_Matrix_apply_BinaryOp1st_{left.dtype}"
args = [_CScalar(left), self]
expr_repr = "{1.name}.apply({op}, left={0})"
elif left is None:
if type(right) is not Scalar:
try:
right = Scalar.from_value(right)
except TypeError:
self._expect_type(
right,
Scalar,
within=method_name,
keyword_name="right",
extra_message="Literal scalars also accepted.",
)
op = get_typed_op(op, self.dtype, right.dtype)
if op.opclass == "Monoid":
op = op.binaryop
else:
self._expect_op(
op,
"BinaryOp",
within=method_name,
argname="op",
extra_message=extra_message,
)
cfunc_name = f"GrB_Matrix_apply_BinaryOp2nd_{right.dtype}"
args = [self, _CScalar(right)]
expr_repr = "{0.name}.apply({op}, right={1})"
else:
raise TypeError("Cannot provide both `left` and `right` to apply")
return MatrixExpression(
method_name,
cfunc_name,
args,
op=op,
nrows=self._nrows,
ncols=self._ncols,
expr_repr=expr_repr,
at=self._is_transposed,
)
def reduce_rows(self, op=monoid.plus):
"""
GrB_Matrix_reduce
Reduce all values in each row, converting the matrix to a vector
Default op is monoid.lor for boolean and monoid.plus otherwise
"""
method_name = "reduce_rows"
op = get_typed_op(op, self.dtype)
self._expect_op(op, ("BinaryOp", "Monoid"), within=method_name, argname="op")
# Using a monoid may be more efficient, so change to one if possible.
# Also, SuiteSparse doesn't like user-defined binarops here.
if op.opclass == "BinaryOp" and op.monoid is not None:
op = op.monoid
return VectorExpression(
method_name,
f"GrB_Matrix_reduce_{op.opclass}",
[self],
op=op,
size=self._nrows,
at=self._is_transposed,
)
def reduce_columns(self, op=monoid.plus):
"""
GrB_Matrix_reduce
Reduce all values in each column, converting the matrix to a vector
Default op is monoid.lor for boolean and monoid.plus otherwise
"""
method_name = "reduce_columns"
op = get_typed_op(op, self.dtype)
self._expect_op(op, ("BinaryOp", "Monoid"), within=method_name, argname="op")
# Using a monoid may be more efficient, so change to one if possible.
# Also, SuiteSparse doesn't like user-defined binarops here.
if op.opclass == "BinaryOp" and op.monoid is not None:
op = op.monoid
return VectorExpression(
method_name,
f"GrB_Matrix_reduce_{op.opclass}",
[self],
op=op,
size=self._ncols,
at=not self._is_transposed,
)
def reduce_scalar(self, op=monoid.plus):
"""
GrB_Matrix_reduce
Reduce all values into a scalar
Default op is monoid.lor for boolean and monoid.plus otherwise
"""
method_name = "reduce_scalar"
op = get_typed_op(op, self.dtype)
if op.opclass == "BinaryOp" and op.monoid is not None:
op = op.monoid
else:
self._expect_op(op, "Monoid", within=method_name, argname="op")
return ScalarExpression(
method_name,
"GrB_Matrix_reduce_{output_dtype}",
[self],
op=op, # to be determined later
)
##################################
# Extract and Assign index methods
##################################
def _extract_element(self, resolved_indexes, dtype=None, name="s_extract"):
if dtype is None:
dtype = self.dtype
else:
dtype = lookup_dtype(dtype)
row, _ = resolved_indexes.indices[0]
col, _ = resolved_indexes.indices[1]
if self._is_transposed:
row, col = col, row
result = Scalar.new(dtype, name=name)
if (
call(f"GrB_Matrix_extractElement_{dtype}", [_Pointer(result), self, row, col])
is not NoValue
):
result._is_empty = False
return result
def _prep_for_extract(self, resolved_indexes):
method_name = "__getitem__"
rows, rowsize = resolved_indexes.indices[0]
cols, colsize = resolved_indexes.indices[1]
if rowsize is None:
# Row-only selection; GraphBLAS doesn't have this method, so we hack it using transpose
row_index = rows
return VectorExpression(
method_name,
"GrB_Col_extract",
[self, cols, colsize, row_index],
expr_repr="{0.name}[{3}, [{2} cols]]",
size=colsize,
dtype=self.dtype,
at=not self._is_transposed,
)
elif colsize is None:
# Column-only selection
col_index = cols
return VectorExpression(
method_name,
"GrB_Col_extract",
[self, rows, rowsize, col_index],
expr_repr="{0.name}[[{2} rows], {3}]",
size=rowsize,
dtype=self.dtype,
at=self._is_transposed,
)
else:
return MatrixExpression(
method_name,
"GrB_Matrix_extract",
[self, rows, rowsize, cols, colsize],
expr_repr="{0.name}[[{2} rows], [{4} cols]]",
nrows=rowsize,
ncols=colsize,
dtype=self.dtype,
at=self._is_transposed,
)
def _assign_element(self, resolved_indexes, value):
row, _ = resolved_indexes.indices[0]
col, _ = resolved_indexes.indices[1]
if type(value) is not Scalar:
try:
value = Scalar.from_value(value)
except TypeError:
self._expect_type(
value,
Scalar,
within="__setitem__",
argname="value",
extra_message="Literal scalars also accepted.",
)
# should we cast?
call(f"GrB_Matrix_setElement_{value.dtype}", [self, _CScalar(value), row, col])
def _prep_for_assign(self, resolved_indexes, value, mask=None, is_submask=False):
method_name = "__setitem__"
rows, rowsize = resolved_indexes.indices[0]
cols, colsize = resolved_indexes.indices[1]
extra_message = "Literal scalars also accepted."
if type(value) is Vector:
if rowsize is None and colsize is not None:
# Row-only selection
row_index = rows
if mask is not None and type(mask.mask) is Matrix:
if is_submask:
# C[i, J](M) << v
raise TypeError(
"Indices for subassign imply Vector submask, "
"but got Matrix mask instead"
)
else:
# C(M)[i, J] << v
# Upcast v to a Matrix and use Matrix_assign
rows = _CArray([rows.scalar.value])
rowsize = _CScalar(1)
new_value = Matrix.new(
value.dtype, nrows=1, ncols=value.size, name=f"{value.name}_as_matrix"
)
new_value[0, :] = value
delayed = MatrixExpression(
method_name,
"GrB_Matrix_assign",
[new_value, rows, rowsize, cols, colsize],
expr_repr="[[{2} rows], [{4} cols]] = {0.name}",
nrows=self._nrows,
ncols=self._ncols,
dtype=self.dtype,
)
else:
if is_submask:
# C[i, J](m) << v
# SS, SuiteSparse-specific: subassign
cfunc_name = "GrB_Row_subassign"
expr_repr = "[{1}, [{3} cols]](%s) << {0.name}" % mask.name
else:
# C(m)[i, J] << v
# C[i, J] << v
cfunc_name = "GrB_Row_assign"
expr_repr = "[{1}, [{3} cols]] = {0.name}"
delayed = MatrixExpression(
method_name,
cfunc_name,
[value, row_index, cols, colsize],
expr_repr=expr_repr,
nrows=self._nrows,
ncols=self._ncols,
dtype=self.dtype,
)
elif colsize is None and rowsize is not None:
# Column-only selection
col_index = cols
if mask is not None and type(mask.mask) is Matrix:
if is_submask:
# C[I, j](M) << v
raise TypeError(
"Indices for subassign imply Vector submask, "
"but got Matrix mask instead"
| |
from __future__ import annotations
from contextlib import contextmanager
import dataclasses
from dataclasses import dataclass
import functools
import itertools
import ply.lex
from typing import List, Union, Tuple, Optional, Dict, Iterator, \
Callable, Any, Set, TypeVar, Generic, Iterable, Mapping, cast
from typing_extensions import Protocol
import utils
from utils import OrderedSet
import z3
Token = ply.lex.LexToken
Span = Tuple[Token, Token]
B = TypeVar('B')
class Denotable:
def __init__(self) -> None:
self._hash: Optional[int] = None
def _denote(self) -> Tuple:
raise Exception('Unexpected denotable object %s does not implement _denote method' % repr(self))
def __getstate__(self) -> Any:
return dict(
self.__dict__,
_hash=None,
)
def __hash__(self) -> int:
if self._hash is None:
self._hash = hash((type(self), self._denote()))
return self._hash
def __eq__(self, other: object) -> bool:
if not isinstance(other, Denotable):
return False
return (type(self) is type(other) and
self._denote() == other._denote())
class Sort(Denotable):
def __init__(self) -> None:
super().__init__()
def __repr__(self) -> str:
raise Exception('Unexpected sort %s does not implement __repr__ method' % type(self))
def __str__(self) -> str:
raise Exception('Unexpected sort %s does not implement __str__ method' % repr(self))
def __ne__(self, other: object) -> bool:
return not (self == other)
class HasSortField(Protocol):
sort: InferenceSort
class SortInferencePlaceholder:
def __init__(self, d: Optional[HasSortField] = None) -> None:
self.backpatches: List[HasSortField] = []
self.sort: Optional[Sort] = None
self.parent: Optional[SortInferencePlaceholder] = None
if d is not None:
self.add(d)
def add(self, d: HasSortField) -> None:
self.backpatches.append(d)
def root(self) -> SortInferencePlaceholder:
if self.parent is not None:
return self.parent.root()
else:
return self
def solve(self, sort: Sort) -> None:
assert self.parent is None
assert self.sort is None
self.sort = sort
for d in self.backpatches:
d.sort = sort
def merge(self, other: SortInferencePlaceholder) -> None:
assert self.parent is None
assert other.parent is None
assert self.sort is None
assert other.sort is None
if self == other:
return
other.parent = self
self.backpatches.extend(other.backpatches)
InferenceSort = Union[Sort, SortInferencePlaceholder, None]
# Returns a set describing the *mutable* symbols used by an expression.
# Immutable symbols and bound variables are *not* included.
# In the returned set, each element is a tuple (i, ts, s), where
# - s is the name of the symbol used
# - ts is a non-empty tuple of tokens describing the sequence of definitions by which
# expr refers to the symbol s (useful for locating error messages). the tuple is ordered
# from expr to the direct reference to s. if expr directly refers to s, then the tuple
# has length one.
# - i is the "state index" of the usage. in a single-vocabulary formula, this will
# always be 0, but in a multi-vocabulary formula, it indicates how many new() operators
# the usage is under.
def symbols_used(scope: Scope, expr: Expr, state_index: int = 0) -> Set[Tuple[int, Tuple[Optional[Span], ...], str]]:
def add_caller_span(
s: Set[Tuple[int, Tuple[Optional[Span], ...], str]]
) -> Set[Tuple[int, Tuple[Optional[Span], ...], str]]:
return set((i, (expr.span,) + l, sym) for (i, l, sym) in s)
if isinstance(expr, Bool) or isinstance(expr, Int):
return set()
elif isinstance(expr, UnaryExpr):
if expr.op == 'NEW':
return symbols_used(scope, expr.arg, state_index=state_index + 1)
else:
return symbols_used(scope, expr.arg, state_index)
elif isinstance(expr, BinaryExpr):
return symbols_used(scope, expr.arg1, state_index) | symbols_used(scope, expr.arg2, state_index)
elif isinstance(expr, NaryExpr):
ans: Set[Tuple[int, Tuple[Optional[Span], ...], str]] = set()
for arg in expr.args:
ans |= symbols_used(scope, arg, state_index)
return ans
elif isinstance(expr, AppExpr):
args: Set[Tuple[int, Tuple[Optional[Span], ...], str]] = set()
for arg in expr.args:
args |= symbols_used(scope, arg, state_index)
d = scope.get(expr.callee)
assert d is not None and not isinstance(d, tuple), (d, expr.callee, expr)
if isinstance(d, DefinitionDecl):
with scope.fresh_stack():
with scope.in_scope(d.binder, [None for i in range(len(d.binder.vs))]):
callee_symbols = symbols_used(scope, d.expr, state_index)
return args | add_caller_span(callee_symbols)
elif d.mutable:
return args | {(state_index, (expr.span,), expr.callee)}
else:
return args
elif isinstance(expr, QuantifierExpr):
with scope.in_scope(expr.binder, [None for i in range(len(expr.binder.vs))]):
return symbols_used(scope, expr.body, state_index)
elif isinstance(expr, Id):
d = scope.get(expr.name)
assert d is not None, expr.name
if isinstance(d, RelationDecl) or \
isinstance(d, ConstantDecl) or \
isinstance(d, FunctionDecl):
return {(state_index, (expr.span,), expr.name)} if d.mutable else set()
elif isinstance(d, DefinitionDecl):
with scope.fresh_stack():
return add_caller_span(symbols_used(scope, d.expr, state_index))
else:
return set()
elif isinstance(expr, IfThenElse):
return symbols_used(scope, expr.branch, state_index) | \
symbols_used(scope, expr.then, state_index) | \
symbols_used(scope, expr.els, state_index)
elif isinstance(expr, Let):
s1 = symbols_used(scope, expr.val, state_index)
with scope.in_scope(expr.binder, [None for i in range(len(expr.binder.vs))]):
return s1 | symbols_used(scope, expr.body, state_index)
else:
assert False
def subst_vars_simple(expr: Expr, subst: Mapping[Id, Expr]) -> Expr:
if isinstance(expr, Bool) or isinstance(expr, Int):
return expr
elif isinstance(expr, UnaryExpr):
return UnaryExpr(op=expr.op, arg=subst_vars_simple(expr.arg, subst))
elif isinstance(expr, BinaryExpr):
return BinaryExpr(op=expr.op, arg1=subst_vars_simple(expr.arg1, subst),
arg2=subst_vars_simple(expr.arg2, subst))
elif isinstance(expr, AppExpr):
return AppExpr(callee=expr.callee, args=tuple(subst_vars_simple(a, subst) for a in expr.args))
elif isinstance(expr, Id):
return subst.get(expr, expr)
else:
print(expr)
assert False
# NOTE(capture-avoiding-substitution)
# This function is carefully written to avoid capture by following a strategy taught in CSE 490P.
# See the first 10 slides here: https://drive.google.com/file/d/1jFGF3snnC2_4N7cqpH_c0D_S6NPFknWg/view
# When going under a binding form, we avoid clashes with three kinds of names:
# - names otherwise free in the body of the binding form
# - names in the domain of the substitution gamma
# - names free in expressions in the codomain of the substitution gamma
# This strategy has undergone substantial testing and trial and error in the context of the course.
# Deviation is not recommended.
def subst(scope: Scope, e: Expr, gamma: Mapping[Id, Expr]) -> Expr:
if isinstance(e, (Bool, Int)):
return e
elif isinstance(e, UnaryExpr):
return UnaryExpr(e.op, subst(scope, e.arg, gamma))
elif isinstance(e, BinaryExpr):
return BinaryExpr(e.op, subst(scope, e.arg1, gamma), subst(scope, e.arg2, gamma))
elif isinstance(e, NaryExpr):
return NaryExpr(e.op, tuple(subst(scope, arg, gamma) for arg in e.args))
elif isinstance(e, AppExpr):
return AppExpr(e.callee, tuple(subst(scope, arg, gamma) for arg in e.args))
elif isinstance(e, QuantifierExpr):
# luv too avoid capture
avoid = free_ids(e)
avoid |= set(v.name for v in gamma)
for v in gamma:
avoid |= free_ids(gamma[v])
renaming: Dict[Id, Expr] = {}
fresh_svs = []
for sv in e.binder.vs:
fresh_name = scope.fresh(sv.name, also_avoid=list(avoid))
renaming[Id(sv.name)] = Id(fresh_name)
assert not isinstance(sv.sort, SortInferencePlaceholder)
fresh_svs.append(SortedVar(fresh_name, sv.sort))
fresh_body = subst(scope, e.body, renaming)
return QuantifierExpr(e.quant, tuple(fresh_svs), subst(scope, fresh_body, gamma))
elif isinstance(e, Id):
if e in gamma:
return gamma[e]
else:
return e
elif isinstance(e, IfThenElse):
return IfThenElse(subst(scope, e.branch, gamma), subst(scope, e.then, gamma), subst(scope, e.els, gamma))
elif isinstance(e, Let):
# luv too avoid capture
avoid = free_ids(e)
avoid |= set(v.name for v in gamma)
for v in gamma:
avoid |= free_ids(gamma[v])
assert len(e.binder.vs) == 1
sv = e.binder.vs[0]
fresh_name = scope.fresh(sv.name, also_avoid=list(avoid))
assert not isinstance(sv.sort, SortInferencePlaceholder)
fresh_sv = SortedVar(fresh_name, sv.sort)
fresh_body = subst(scope, e.body, {Id(sv.name): Id(fresh_name)})
return Let(fresh_sv, subst(scope, e.val, gamma), subst(scope, fresh_body, gamma))
else:
assert False, (type(e), e)
def as_clauses_body(expr: Expr, negated: bool = False) -> List[List[Expr]]:
'''
Convert a quantifier-free formula to CNF
'''
if isinstance(expr, Bool):
return [[Bool(expr.val != negated)]]
elif isinstance(expr, UnaryExpr):
assert expr.op == 'NOT'
return as_clauses_body(expr.arg, not negated)
elif isinstance(expr, BinaryExpr):
if expr.op in ['EQUAL', 'NOTEQ']:
op = 'NOTEQ' if (expr.op == 'NOTEQ') != negated else 'EQUAL'
return [[BinaryExpr(op, expr.arg1, expr.arg2)]]
elif expr.op == 'IMPLIES':
return as_clauses_body(Or(Not(expr.arg1), expr.arg2), negated=negated)
elif expr.op == 'IFF':
return as_clauses_body(
And(Or(Not(expr.arg1), expr.arg2),
Or(expr.arg1, Not(expr.arg2))),
negated=negated
)
else:
assert False, f'{expr.op}\n{expr}'
elif isinstance(expr, NaryExpr):
assert expr.op != 'DISTINCT', 'CNF normalization does not support "distinct" expressions'
assert expr.op in ('AND', 'OR'), expr
if negated:
other_op = 'AND' if expr.op == 'OR' else 'OR'
return as_clauses_body(NaryExpr(other_op, tuple(Not(arg) for arg in expr.args)), negated=False)
elif expr.op == 'AND':
return list(itertools.chain(*(as_clauses_body(arg, negated=False) for arg in expr.args)))
elif expr.op == 'OR':
return [list(itertools.chain(*tup))
for tup in itertools.product(*(as_clauses_body(arg, negated=False) for arg in expr.args))]
else:
assert False, expr
elif isinstance(expr, AppExpr) or isinstance(expr, Id):
if negated:
return [[Not(expr)]]
else:
return [[expr]]
else:
assert False, f'unsupported expressions in as_clauses_body: {expr}'
def as_clauses_quant(expr: Expr, negated: bool = False) -> Tuple[Tuple[SortedVar, ...], List[List[Expr]]]:
if isinstance(expr, QuantifierExpr):
if negated:
other_quant = 'EXISTS' if expr.quant == 'FORALL' else 'FORALL'
return as_clauses_quant(QuantifierExpr(other_quant, expr.binder.vs, Not(expr.body)), negated=False)
else:
assert expr.quant == 'FORALL'
new_vs, new_body = as_clauses_quant(expr.body, negated=False)
return expr.binder.vs + tuple(new_vs), new_body
elif isinstance(expr, UnaryExpr) and expr.op == 'NOT':
return as_clauses_quant(expr.arg, not negated)
else:
return (), as_clauses_body(expr, negated)
def as_clauses(expr: Expr) -> List[Expr]:
'''Conver expr to CNF (must be universally quantified, see as_clauses_quant'''
vs, clauses = as_clauses_quant(expr)
ans = []
for clause in clauses:
if len(clause) == 1:
clause += [Bool(False)]
e = Forall(vs, Or(*clause))
# TODO: should we typecheck here? Also, can we | |
self.state.func_ir, axis_var)
if labels != '' and axis is not None:
if axis != 1:
raise ValueError("only dropping columns (axis=1) supported")
columns = labels
else:
columns_var = self._get_arg('drop', rhs.args, kws, 3, 'columns', '')
err_msg = ("columns argument (constant string list) "
"or labels and axis required")
columns = self._get_str_or_list(columns_var, err_msg=err_msg)
inplace_var = self._get_arg('drop', rhs.args, kws, 5, 'inplace', '')
inplace = guard(find_const, self.state.func_ir, inplace_var)
if inplace is not None and inplace:
df_label = self.df_labels[df_var.name]
cfg = compute_cfg_from_blocks(self.state.func_ir.blocks)
# dropping columns inplace possible only when it dominates the df
# creation to keep schema consistent
if label not in cfg.backbone() and label not in cfg.post_dominators()[df_label]:
raise ValueError("dropping dataframe columns inplace inside "
"conditionals and loops not supported yet")
# TODO: rename df name
# TODO: support dropping columns of input dfs (reflection)
for cname in columns:
self.df_vars[df_var.name].pop(cname)
return []
in_df_map = self._get_df_cols(df_var)
nodes = []
out_df_map = {c: _gen_arr_copy(in_df_map[c], nodes)
for c in in_df_map.keys() if c not in columns}
self._create_df(lhs.name, out_df_map, label)
return nodes
def _get_reverse_copies(self, body):
for inst in body:
if isinstance(inst, ir.Assign) and isinstance(inst.value, ir.Var):
self.reverse_copies[inst.value.name] = inst.target.name
return
def _handle_pd_DataFrame(self, assign, lhs, rhs, label):
"""transform pd.DataFrame({'A': A}) call
"""
kws = dict(rhs.kws)
if 'data' in kws:
data = kws['data']
if len(rhs.args) != 0: # pragma: no cover
raise ValueError(
"only data argument suppoted in pd.DataFrame()")
else:
if len(rhs.args) != 1: # pragma: no cover
raise ValueError(
"data argument in pd.DataFrame() expected")
data = rhs.args[0]
arg_def = guard(get_definition, self.state.func_ir, data)
if (not isinstance(arg_def, ir.Expr)
or arg_def.op != 'build_map'): # pragma: no cover
raise ValueError(
"Invalid DataFrame() arguments (constant dict of columns expected)")
nodes, items = self._fix_df_arrays(arg_def.items)
# HACK replace build_map to avoid inference errors
arg_def.op = 'build_list'
arg_def.items = [v[0] for v in arg_def.items]
n_cols = len(items)
data_args = ", ".join('data{}'.format(i) for i in range(n_cols))
col_args = ", ".join('col{}'.format(i) for i in range(n_cols))
func_text = "def _init_df({}, index, {}):\n".format(data_args, col_args)
func_text += " return sdc.hiframes.pd_dataframe_ext.init_dataframe({}, index, {})\n".format(
data_args, col_args)
loc_vars = {}
exec(func_text, {'sdc': sdc}, loc_vars)
_init_df = loc_vars['_init_df']
# TODO: support index var
index = ir.Var(lhs.scope, mk_unique_var('df_index_none'), lhs.loc)
nodes.append(ir.Assign(ir.Const(None, lhs.loc), index, lhs.loc))
data_vars = [a[1] for a in items]
col_vars = [a[0] for a in items]
args = data_vars + [index] + col_vars
return self._replace_func(_init_df, args,
pre_nodes=nodes
)
# df_nodes, col_map = self._process_df_build_map(items)
# nodes += df_nodes
# self._create_df(lhs.name, col_map, label)
# # remove DataFrame call
# return nodes
def _handle_pd_read_csv(self, assign, lhs, rhs, label):
"""transform pd.read_csv(names=[A], dtype={'A': np.int32}) call
"""
# schema: pd.read_csv(filepath_or_buffer, sep=',', delimiter=None,
# header='infer', names=None, index_col=None, usecols=None,
# squeeze=False, prefix=None, mangle_dupe_cols=True, dtype=None,
# engine=None, converters=None, true_values=None, false_values=None,
# skipinitialspace=False, skiprows=None, nrows=None, na_values=None,
# keep_default_na=True, na_filter=True, verbose=False,
# skip_blank_lines=True, parse_dates=False,
# infer_datetime_format=False, keep_date_col=False, date_parser=None,
# dayfirst=False, iterator=False, chunksize=None, compression='infer',
# thousands=None, decimal=b'.', lineterminator=None, quotechar='"',
# quoting=0, escapechar=None, comment=None, encoding=None,
# dialect=None, tupleize_cols=None, error_bad_lines=True,
# warn_bad_lines=True, skipfooter=0, doublequote=True,
# delim_whitespace=False, low_memory=True, memory_map=False,
# float_precision=None)
kws = dict(rhs.kws)
fname = self._get_arg('read_csv', rhs.args, kws, 0, 'filepath_or_buffer')
sep = self._get_str_arg('read_csv', rhs.args, kws, 1, 'sep', ',')
sep = self._get_str_arg('read_csv', rhs.args, kws, 2, 'delimiter', sep)
# TODO: header arg
names_var = self._get_arg('read_csv', rhs.args, kws, 4, 'names', '')
dtype_var = self._get_arg('read_csv', rhs.args, kws, 10, 'dtype', '')
skiprows = self._get_str_arg('read_csv', rhs.args, kws, 16, 'skiprows', 0)
col_names = self._get_str_or_list(names_var, default=0)
if dtype_var is '':
# infer column names and types from constant filename
fname_const = guard(find_const, self.state.func_ir, fname)
if fname_const is None:
raise ValueError("pd.read_csv() requires explicit type"
"annotation using 'dtype' if filename is not constant")
rows_to_read = 100 # TODO: tune this
df = pd.read_csv(fname_const, nrows=rows_to_read, skiprows=skiprows)
# TODO: string_array, categorical, etc.
dtypes = [types.Array(numba.typeof(d).dtype, 1, 'C')
for d in df.dtypes.values]
cols = df.columns.to_list()
# overwrite column names like Pandas if explicitly provided
if col_names != 0:
cols[-len(col_names):] = col_names
else:
# a row is used for names if not provided
skiprows += 1
col_names = cols
dtype_map = {c: d for c, d in zip(col_names, dtypes)}
else:
dtype_map = guard(get_definition, self.state.func_ir, dtype_var)
if (not isinstance(dtype_map, ir.Expr)
or dtype_map.op != 'build_map'): # pragma: no cover
# try single type for all columns case
dtype_map = self._get_const_dtype(dtype_var)
else:
new_dtype_map = {}
for n_var, t_var in dtype_map.items:
# find constant column name
c = guard(find_const, self.state.func_ir, n_var)
if c is None: # pragma: no cover
raise ValueError("dtype column names should be constant")
new_dtype_map[c] = self._get_const_dtype(t_var)
# HACK replace build_map to avoid inference errors
dtype_map.op = 'build_list'
dtype_map.items = [v[0] for v in dtype_map.items]
dtype_map = new_dtype_map
if col_names == 0:
raise ValueError("pd.read_csv() names should be constant list")
usecols_var = self._get_arg('read_csv', rhs.args, kws, 6, 'usecols', '')
usecols = list(range(len(col_names)))
if usecols_var != '':
err_msg = "pd.read_csv() usecols should be constant list of ints"
usecols = self._get_str_or_list(usecols_var, err_msg=err_msg, typ=int)
# TODO: support other args
date_cols = []
if 'parse_dates' in kws:
err_msg = "pd.read_csv() parse_dates should be constant list"
date_cols = self._get_str_or_list(kws['parse_dates'], err_msg=err_msg, typ=int)
columns, data_arrs, out_types = self._get_csv_col_info(
dtype_map, date_cols, col_names, lhs)
nodes = [csv_ext.CsvReader(
fname, lhs.name, sep, columns, data_arrs, out_types, usecols,
lhs.loc, skiprows)]
n_cols = len(columns)
data_args = ", ".join('data{}'.format(i) for i in range(n_cols))
func_text = "def _init_df({}):\n".format(data_args)
func_text += " return sdc.hiframes.pd_dataframe_ext.init_dataframe({}, None, {})\n".format(
data_args, ", ".join("'{}'".format(c) for c in columns))
loc_vars = {}
exec(func_text, {'sdc': sdc}, loc_vars)
_init_df = loc_vars['_init_df']
f_block = compile_to_numba_ir(
_init_df, {'sdc': sdc}).blocks.popitem()[1]
replace_arg_nodes(f_block, data_arrs)
nodes += f_block.body[:-2]
nodes[-1].target = lhs
return nodes
def _get_csv_col_info(self, dtype_map, date_cols, col_names, lhs):
if isinstance(dtype_map, types.Type):
typ = dtype_map
data_arrs = [ir.Var(lhs.scope, mk_unique_var(cname), lhs.loc)
for cname in col_names]
return col_names, data_arrs, [typ] * len(col_names)
columns = []
data_arrs = []
out_types = []
for i, (col_name, typ) in enumerate(dtype_map.items()):
columns.append(col_name)
# get array dtype
if i in date_cols:
typ = types.Array(types.NPDatetime('ns'), 1, 'C')
out_types.append(typ)
# output array variable
data_arrs.append(
ir.Var(lhs.scope, mk_unique_var(col_name), lhs.loc))
return columns, data_arrs, out_types
def _get_const_dtype(self, dtype_var):
dtype_def = guard(get_definition, self.state.func_ir, dtype_var)
if isinstance(dtype_def, ir.Const) and isinstance(dtype_def.value, str):
typ_name = dtype_def.value
if typ_name == 'str':
return string_array_type
typ_name = 'int64' if typ_name == 'int' else typ_name
typ_name = 'float64' if typ_name == 'float' else typ_name
typ = getattr(types, typ_name)
typ = types.Array(typ, 1, 'C')
return typ
# str case
if isinstance(dtype_def, ir.Global) and dtype_def.value == str:
return string_array_type
# categorical case
if isinstance(dtype_def, ir.Expr) and dtype_def.op == 'call':
if (not guard(find_callname, self.state.func_ir, dtype_def)
== ('category', 'pandas.core.dtypes.dtypes')):
raise ValueError("pd.read_csv() invalid dtype "
"(built using a call but not Categorical)")
cats_var = self._get_arg('CategoricalDtype', dtype_def.args,
dict(dtype_def.kws), 0, 'categories')
err_msg = "categories should be constant list"
cats = self._get_str_or_list(cats_var, list_only=True, err_msg=err_msg)
typ = PDCategoricalDtype(cats)
return CategoricalArray(typ)
if not isinstance(dtype_def, ir.Expr) or dtype_def.op != 'getattr':
raise ValueError("pd.read_csv() invalid dtype")
glob_def = guard(get_definition, self.state.func_ir, dtype_def.value)
if not isinstance(glob_def, ir.Global) or glob_def.value != np:
raise ValueError("pd.read_csv() invalid dtype")
# TODO: extend to other types like string and date, check error
typ_name = dtype_def.attr
typ_name = 'int64' if typ_name == 'int' else typ_name
typ_name = 'float64' if typ_name == 'float' else typ_name
typ = getattr(types, typ_name)
typ = types.Array(typ, 1, 'C')
return typ
def _handle_pd_Series(self, assign, lhs, rhs):
"""transform pd.Series(A) call
"""
kws = dict(rhs.kws)
data = self._get_arg('pd.Series', rhs.args, kws, 0, 'data')
# match flatmap pd.Series(list(itertools.chain(*A))) and flatten
data_def = guard(get_definition, self.state.func_ir, data)
if (is_call(data_def) and guard(find_callname, self.state.func_ir, data_def)
== ('list', 'builtins') and len(data_def.args) == 1):
arg_def = guard(get_definition, self.state.func_ir, data_def.args[0])
if (is_call(arg_def) and guard(find_callname, self.state.func_ir,
arg_def) == ('chain', 'itertools')):
in_data = arg_def.vararg
arg_def.vararg = None # avoid typing error
return self._replace_func(
lambda l: sdc.hiframes.api.flatten_to_series(l),
[in_data]
)
# pd.Series() is handled in typed pass now
# return self._replace_func(lambda arr: sdc.hiframes.api.init_series(
# sdc.hiframes.api.fix_df_array(arr)),
# [data])
return [assign]
def _handle_pd_to_numeric(self, assign, lhs, rhs):
"""transform pd.to_numeric(A, errors='coerce') call here since dtype
has to be specified in locals and applied
"""
kws = dict(rhs.kws)
if 'errors' not in kws or guard(find_const, self.state.func_ir, kws['errors']) != 'coerce':
raise ValueError("pd.to_numeric() only supports errors='coerce'")
if lhs.name not in self.reverse_copies or (self.reverse_copies[lhs.name]) not in self.state.locals:
raise ValueError("pd.to_numeric() requires annotation of output type")
typ = self.state.locals.pop(self.reverse_copies[lhs.name])
dtype = numba.numpy_support.as_dtype(typ.dtype)
arg = rhs.args[0]
| |
# Copyright (C) 2010 Google Inc. All rights reserved.
# Copyright (C) 2010 <NAME> (<EMAIL>), University of Szeged
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import collections
import logging
import time
from blinkpy.web_tests.models import test_expectations
from blinkpy.web_tests.models import test_failures
from blinkpy.web_tests.models.typ_types import ResultType
_log = logging.getLogger(__name__)
class TestRunException(Exception):
def __init__(self, code, msg):
self.code = code
self.msg = msg
class TestRunResults(object):
def __init__(self, expectations, num_tests, result_sink):
self.total = num_tests
self.remaining = self.total
self.expectations = expectations
self.result_sink = result_sink
# Various counters:
self.expected = 0
self.expected_failures = 0
self.expected_skips = 0
self.total_failures = 0
self.unexpected = 0
self.unexpected_crashes = 0
self.unexpected_failures = 0
self.unexpected_timeouts = 0
# The wall clock time spent running the tests (web_test_runner.run()).
self.run_time = 0
# Map of test name to the *last* result for the test.
self.results_by_name = {}
# Map of test name to the *last* unexpected result for the test.
self.unexpected_results_by_name = {}
# All results from a run except SKIP, including all iterations.
self.all_results = []
# Map of test name to the *last* failures for the test.
self.failures_by_name = {}
self.tests_by_expectation = {}
for expected_result in \
test_expectations.EXPECTATION_DESCRIPTIONS.keys():
self.tests_by_expectation[expected_result] = set()
self.slow_tests = set()
self.interrupted = False
self.keyboard_interrupted = False
def add(self, test_result, expected, test_is_slow):
result_type_for_stats = test_result.type
self.tests_by_expectation[result_type_for_stats].add(
test_result.test_name)
if self.result_sink:
self.result_sink.sink(expected, test_result)
self.results_by_name[test_result.test_name] = test_result
if test_result.type != ResultType.Skip:
self.all_results.append(test_result)
self.remaining -= 1
if len(test_result.failures):
self.total_failures += 1
self.failures_by_name[test_result.test_name] = test_result.failures
if expected:
self.expected += 1
if test_result.type == ResultType.Skip:
self.expected_skips += 1
elif test_result.type != ResultType.Pass:
self.expected_failures += 1
else:
self.unexpected_results_by_name[test_result.test_name] = \
test_result
self.unexpected += 1
if len(test_result.failures):
self.unexpected_failures += 1
if test_result.type == ResultType.Crash:
self.unexpected_crashes += 1
elif test_result.type == ResultType.Timeout:
self.unexpected_timeouts += 1
if test_is_slow:
self.slow_tests.add(test_result.test_name)
class RunDetails(object):
def __init__(self,
exit_code,
summarized_full_results=None,
summarized_failing_results=None,
initial_results=None,
all_retry_results=None):
self.exit_code = exit_code
self.summarized_full_results = summarized_full_results
self.summarized_failing_results = summarized_failing_results
self.initial_results = initial_results
self.all_retry_results = all_retry_results or []
def _interpret_test_failures(failures):
test_dict = {}
failure_types = [type(failure) for failure in failures]
# FIXME: get rid of all this is_* values once there is a 1:1 map between
# TestFailure type and test_expectations.EXPECTATION.
if test_failures.FailureMissingAudio in failure_types:
test_dict['is_missing_audio'] = True
if test_failures.FailureMissingResult in failure_types:
test_dict['is_missing_text'] = True
if (test_failures.FailureMissingImage in failure_types
or test_failures.FailureMissingImageHash in failure_types
or test_failures.FailureReftestNoImageGenerated in failure_types
or test_failures.FailureReftestNoReferenceImageGenerated in
failure_types):
test_dict['is_missing_image'] = True
if test_failures.FailureTestHarnessAssertion in failure_types:
test_dict['is_testharness_test'] = True
return test_dict
def summarize_results(port_obj,
expectations,
initial_results,
all_retry_results,
only_include_failing=False):
"""Returns a dictionary containing a summary of the test runs, with the following fields:
'version': a version indicator
'fixable': The number of fixable tests (NOW - PASS)
'skipped': The number of skipped tests (NOW & SKIPPED)
'num_regressions': The number of non-flaky failures
'num_flaky': The number of flaky failures
'num_passes': The number of expected and unexpected passes
'tests': a dict of tests -> {'expected': '...', 'actual': '...'}
"""
results = {}
results['version'] = 3
all_retry_results = all_retry_results or []
tbe = initial_results.tests_by_expectation
results['skipped'] = len(tbe[ResultType.Skip])
# TODO(dpranke): Some or all of these counters can be removed.
num_passes = 0
num_flaky = 0
num_regressions = 0
# Calculate the number of failures by types (only in initial results).
num_failures_by_type = {}
for expected_result in initial_results.tests_by_expectation:
tests = initial_results.tests_by_expectation[expected_result]
num_failures_by_type[expected_result] = len(tests)
results['num_failures_by_type'] = num_failures_by_type
# Combine all iterations and retries together into a dictionary with the
# following structure:
# { test_name: [ (result, is_unexpected), ... ], ... }
# where result is a single TestResult, is_unexpected is a boolean
# representing whether the result is unexpected in that run.
merged_results_by_name = collections.defaultdict(list)
for test_run_results in [initial_results] + all_retry_results:
# all_results does not include SKIP, so we need results_by_name.
for test_name, result in test_run_results.results_by_name.items():
if result.type == ResultType.Skip:
is_unexpected = test_name in test_run_results.unexpected_results_by_name
merged_results_by_name[test_name].append((result,
is_unexpected))
# results_by_name only includes the last result, so we need all_results.
for result in test_run_results.all_results:
test_name = result.test_name
is_unexpected = test_name in test_run_results.unexpected_results_by_name
merged_results_by_name[test_name].append((result, is_unexpected))
# Finally, compute the tests dict.
tests = {}
for test_name, merged_results in merged_results_by_name.items():
initial_result = merged_results[0][0]
if only_include_failing and initial_result.type == ResultType.Skip:
continue
exp = expectations.get_expectations(test_name)
expected_results, bugs = exp.results, exp.reason
expected = ' '.join(expected_results)
actual = []
actual_types = []
crash_sites = []
all_pass = True
has_expected = False
has_unexpected = False
has_unexpected_pass = False
has_stderr = False
for result, is_unexpected in merged_results:
actual.append(result.type)
actual_types.append(result.type)
crash_sites.append(result.crash_site)
if result.type != ResultType.Pass:
all_pass = False
if result.has_stderr:
has_stderr = True
if is_unexpected:
has_unexpected = True
if result.type == ResultType.Pass:
has_unexpected_pass = True
else:
has_expected = True
# TODO(crbug.com/855255): This code calls a test flaky if it has both
# expected and unexpected runs (NOT pass and failure); this is generally
# wrong (really it should just be if there are multiple kinds of results),
# but this works in the normal case because a test will only be retried
# if a result is unexpected, and if you get an expected result on the
# retry, then you did get multiple results. This fails if you get
# one kind of unexpected failure initially and another kind of
# unexpected failure on the retry (e.g., TIMEOUT CRASH), or if you
# explicitly run a test multiple times and get multiple expected results.
is_flaky = has_expected and has_unexpected
test_dict = {}
test_dict['expected'] = expected
test_dict['actual'] = ' '.join(actual)
# If a flag was added then add flag specific test expectations to the per test field
flag_exp = expectations.get_flag_expectations(test_name)
if flag_exp:
base_exp = expectations.get_base_expectations(test_name)
test_dict['flag_expectations'] = list(flag_exp.results)
test_dict['base_expectations'] = list(base_exp.results)
# Fields below are optional. To avoid bloating the output results json
# too much, only add them when they are True or non-empty.
if is_flaky:
num_flaky += 1
test_dict['is_flaky'] = True
elif all_pass or has_unexpected_pass:
# We count two situations as a "pass":
# 1. All test runs pass (which is obviously non-flaky, but does not
# imply whether the runs are expected, e.g. they can be all
# unexpected passes).
# 2. The test isn't flaky and has at least one unexpected pass
# (which implies all runs are unexpected). One tricky example
# that doesn't satisfy #1 is that if a test is expected to
# crash but in fact fails and then passes, it will be counted
# as "pass".
num_passes += 1
if not has_stderr and only_include_failing:
continue
elif has_unexpected:
# Either no retries or all retries failed unexpectedly.
num_regressions += 1
rounded_run_time = round(initial_result.test_run_time, 1)
if rounded_run_time:
test_dict['time'] = rounded_run_time
if exp.is_slow_test:
test_dict['is_slow_test'] = True
if has_stderr:
test_dict['has_stderr'] = True
if bugs:
test_dict['bugs'] = bugs.split()
if initial_result.reftest_type:
test_dict.update(reftest_type=list(initial_result.reftest_type))
crash_sites = [site for site in crash_sites if site]
if len(crash_sites) > 0:
test_dict['crash_site'] = crash_sites[0]
if test_failures.has_failure_type(test_failures.FailureTextMismatch,
initial_result.failures):
for failure in initial_result.failures:
if isinstance(failure, test_failures.FailureTextMismatch):
test_dict['text_mismatch'] = \
| |
#define the rows and cols
rows = [
[7,3,1,1,7],
[1,1,2,2,1,1],
[1,3,1,3,1,1,3,1],
[1,3,1,1,6,1,3,1],
[1,3,1,5,2,1,3,1],
[1,1,2,1,1],
[7,1,1,1,1,1,7],
[3,3],
[1,2,3,1,1,3,1,1,2],
[1,1,3,2,1,1],
[4,1,4,2,1,2],
[1,1,1,1,1,4,1,3],
[2,1,1,1,2,5],
[3,2,2,6,3,1],
[1,9,1,1,2,1],
[2,1,2,2,3,1],
[3,1,1,1,1,5,1],
[1,2,2,5],
[7,1,2,1,1,1,3],
[1,1,2,1,2,2,1],
[1,3,1,4,5,1],
[1,3,1,3,10,2],
[1,3,1,1,6,6],
[1,1,2,1,1,2],
[7,2,1,2,5]]
cols = [
[7,2,1,1,7],
[1,1,2,2,1,1],
[1,3,1,3,1,3,1,3,1],
[1,3,1,1,5,1,3,1],
[1,3,1,1,4,1,3,1],
[1,1,1,2,1,1],
[7,1,1,1,1,1,7],
[1,1,3],
[2,1,2,1,8,2,1],
[2,2,1,2,1,1,1,2],
[1,7,3,2,1],
[1,2,3,1,1,1,1,1],
[4,1,1,2,6],
[3,3,1,1,1,3,1],
[1,2,5,2,2],
[2,2,1,1,1,1,1,2,1],
[1,3,3,2,1,8,1],
[6,2,1],
[7,1,4,1,1,3],
[1,1,1,1,4],
[1,3,1,3,7,1],
[1,3,1,1,1,2,1,1,4],
[1,3,1,4,3,3],
[1,1,2,2,2,6,1],
[7,1,3,2,1,1]]
partialSolvedRows = {
3 : [0,0,0,1,1,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,1,0,0,0],
8 : [0,0,0,0,0,0,1,1,0,0,1,0,0,0,1,1,0,0,1,0,0,0,0,0,0],
16 : [0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0],
21 : [0,0,0,1,1,0,0,0,0,1,1,0,0,0,0,1,0,0,0,0,1,1,0,0,0]
}
partialSolvedCols = {
3 : [0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0],
4 : [0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0],
6 : [0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0],
7 : [0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
9 : [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0],
10 : [0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0],
11 : [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0],
12 : [0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
13 : [0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
14 : [0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
15 : [0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0],
16 : [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0],
18 : [0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
20 : [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0],
21 : [0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0]
}
import time
import copy
def groupCombination(numTiles, rowSize):
acc = []
for i in xrange(0,rowSize):
current = ([0]*i) + [1]*numTiles + [0]*(rowSize - (i+numTiles))
if len(current) > rowSize:
return acc
acc.append(current)
return acc
def groupCombination1(numTiles, rowSize):
acc = []
if len(numTiles) == 0:
return []
newNumTiles = []
if len(numTiles) == 1:
return groupCombination(numTiles[0], rowSize)
currentNumTiles = numTiles[0]
if len(numTiles) >= 2:
newNumTiles = numTiles[1:]
nextNumTiles = 0
if len(numTiles) >= 1:
nextNumTiles = numTiles[1]
for i in xrange(0,rowSize):
current = ([0]*i) + [1]*currentNumTiles
leftSides = groupCombination1(newNumTiles, rowSize - (i+currentNumTiles+1))
for ls in leftSides:
whole = current + [0] + ls
if len(whole) > rowSize:
break
acc.append(whole)
return acc
def getRaw():
startTime = time.time()
rawRows = []
rawCols = []
for row in rows:
rawRows.append(groupCombination1(row,25))
for col in cols:
rawCols.append(groupCombination1(col,25))
endTime = time.time()
print "total time: " + str(endTime - startTime)
return rawRows, rawCols
def writeSolutionToFile(m):
writeStr = ''
for row in m:
for col in row:
writeStr += str(col)
writeStr += '\n'
writeStr += '\n\n'
f = open('solutions.txt', 'a')
f.write(writeStr)
f.close()
def writeToFile(rawRows, rawCols):
toRowFile = ''
toColFile = ''
for i in range(0,len(rawRows)):
toRowFile += "row " + str(i) + " " + str(rows[i]) + "\n"
toColFile += "col " + str(i) + " " + str(cols[i]) + "\n"
for rcombo in rawRows[i]:
toRowFile += "\t" + str(rcombo) + "\n"
for ccombo in rawCols[i]:
toColFile += "\t" + str(ccombo) + "\n"
f = open("rowOut.txt",'w')
f.write(toRowFile)
f.close()
f = open("colOut.txt",'w')
f.write(toColFile)
f.close()
def getCount(possibleDict):
count = 0
for index in possibleDict:
for possible in possibleDict[index]:
count += 1
return count
def getDicts(rawRows, rawCols):
rowDict = {}
colDict = {}
for i in range(0,len(rawRows)):
rowDict[i] = rawRows[i]
colDict[i] = rawCols[i]
return rowDict, colDict
def initSolutionMatrix(n):
m = []
for i in xrange(0,n):
m.append([])
for j in xrange(0,n):
m[i].append(0)
return m
def addRowToMatrix(m,rowNum,row):
m[rowNum] = row
#print "adding row: " + str(row)
#printSolutionMatrix(m)
return m
def addColToMatrix(m,colNum,col):
n = len(m)
for i in xrange(0,n):
m[i][colNum] = col[i]
#print "adding col: " + str(col)
#printSolutionMatrix(m)
return m
def addSingleSolutionsToMatrix(m, solutionsDict, solutionsLenDict, sortedSolutionsLenCount, isRow):
if(sortedSolutionsLenCount[0] == 1):
for index in solutionsLenDict[1]:
if isRow:
m = addRowToMatrix(m, index, solutionsDict[index][0])
else:
m = addColToMatrix(m, index, solutionsDict[index][0])
del solutionsDict[index]
del solutionsLenDict[1]
sortedSolutionsLenCount.pop(0)
return m, solutionsDict, solutionsLenDict, sortedSolutionsLenCount
def sortPossibleSolutions(rowColDict):
lenDict = {}
for index in rowColDict:
rowColLen = len(rowColDict[index])
temp = lenDict.get(rowColLen,[])
temp.append(index)
lenDict[rowColLen] = temp
rowColLens = []
for numPossibleSolutions in lenDict:
rowColLens.append(numPossibleSolutions)
return lenDict, sorted(rowColLens)
def printSolutionMatrix(m):
for row in m:
print row
print ''
def isSolved(rowsSolutionsDict, colsSolutionsDict):
#print rowsSolutionsDict, colsSolutionsDict
return len(rowsSolutionsDict) == 0 and len(colsSolutionsDict) == 0
def rowCanWork(mRow, row):
for i in xrange(0,len(row)):
if mRow[i] == 1 and row[i] != 1:
return False
return True
def colCanWork(n, m,colIndex, col):
for i in xrange(0,n):
if m[i][colIndex] == 1 and col[i] != 1:
return False
return True
def filterByPartials(partialDict, possibleDict):
counter = 0
for index in partialDict:
possibleList = possibleDict.get(index, [])
if len(possibleList) == 0:
continue
currentPartial = partialDict[index]
for i in xrange(0,len(currentPartial)):
for j in xrange(len(possibleList)-1, -1,-1):
if currentPartial[i] == 1 and possibleList[j][i] != 1:
#print "item filtered! " + str()
counter += 1
possibleList.pop(j)
possibleDict[index] = possibleList
print "\tfiltered " + str(counter) + " items"
return possibleDict
def filterByMatrix(m, rowsDict, colsDict):
deleteLater = []
for index in rowsDict:
for i in xrange(len(rowsDict[index])-1, -1, -1):
if not rowCanWork(m[index], rowsDict[index][i]):
rowsDict[index].pop(i)
if len(rowsDict[index]) == 1:
m = addRowToMatrix(m,index,rowsDict[index][0])
deleteLater.append(index)
for i in deleteLater:
del colsDict[i]
deleteLater = []
for index in colsDict:
for i in xrange(len(colsDict[index])-1, -1, -1):
if not colCanWork(len(m), m,index, colsDict[index][i]):
colsDict[index].pop(i)
if len(colsDict[index]) == 1:
m = addColToMatrix(m,index,colsDict[index][0])
deleteLater.append(index)
for i in deleteLater:
del colsDict[i]
return m, rowsDict, colsDict
def solve3(mO, rowsDictO, colsDictO, rowsLenDictO, sortedRowLenKeysO, colsLenDictO, sortedColLenKeysO):
m = copy.deepcopy(mO)
rowsDict = copy.deepcopy(rowsDictO)
colsDict = copy.deepcopy(colsDictO)
rowsLenDict = copy.deepcopy(rowsLenDictO)
sortedRowLenKeys = copy.deepcopy(sortedRowLenKeysO)
colsLenDict = copy.deepcopy(colsLenDictO)
sortedColLenKeys = copy.deepcopy(sortedColLenKeysO)
if isSolved(rowsDict, colsDict):
print "found solution!"
print m
writeSolutionToFile(m)
return
print "Rows left: " + str(getCount(rowsDict))
print "Cols left: " + str(getCount(colsDict)) + '\n'
rowIndexList = []
colIndexList = []
if len(sortedRowLenKeys) > 0:
rowIndexList = rowsLenDict[sortedRowLenKeys[0]]
if len(sortedColLenKeys) > 0:
colIndexList = colsLenDict[sortedColLenKeys[0]]
if len(rowIndexList) > 0 and len(colIndexList) > 0:
for rowIndex in rowIndexList:
for colIndex in colIndexList:
for row in rowsDict[rowIndex]:
newRowsDict = copy.deepcopy(rowsDict)
newRowsDict[rowIndex] = [row]
newRowLenDict, newRowLenList = sortPossibleSolutions(newRowsDict)
newM = copy.deepcopy(m)
newM, newRowsDict, newRowLenDict, newRowLenList = addSingleSolutionsToMatrix(newM, newRowsDict, newRowLenDict, newRowLenList, True)
for col in colsDict[colIndex]:
newColsDict = copy.deepcopy(colsDict)
newColsDict[colIndex] = [col]
newColLenDict, newColLenList = sortPossibleSolutions(newColsDict)
newNewM = copy.deepcopy(newM)
newNewM, newColsDict, newColLenDict, newColLenList = addSingleSolutionsToMatrix(newNewM, newColsDict, newColLenDict, newColLenList, False)
newNewM,newRowsDict,newColsDict = filterByMatrix(newNewM, newRowsDict, newColsDict)
solve3(newNewM, newRowsDict, newColsDict, newRowLenDict, newRowLenList, newColLenDict, newColLenList)
elif len(rowIndexList) > 0 and len(colIndexList) <= 0:
for rowIndex in rowIndexList:
for row in rowsDict[rowIndex]:
newRowsDict = copy.deepcopy(rowsDict)
newRowsDict[rowIndex] = [row]
newRowLenDict, newRowLenList = sortPossibleSolutions(newRowsDict)
newM = copy.deepcopy(m)
newM, newRowsDict, newRowLenDict, newRowLenList = addSingleSolutionsToMatrix(newM, newRowsDict, newRowLenDict, newRowLenList, True)
newM,newRowsDict,colsDict = filterByMatrix(newM, newRowsDict, colsDict)
solve3(newM, newRowsDict, colsDict, newRowLenDict, newRowLenList, colsLenDict, sortedColLenKeys)
elif len(rowIndexList) <= 0 and len(colIndexList) > 0:
for colIndex in colIndexList:
for col in colsDict[colIndex]:
newColsDict = copy.deepcopy(colsDict)
newColsDict[colIndex] = [col]
newColLenDict, newColLenList = sortPossibleSolutions(newColsDict)
newM = copy.deepcopy(newM)
newM, newColsDict, newColLenDict, newColLenList = addSingleSolutionsToMatrix(newM, newColsDict, newColLenDict, newColLenList, False)
newM,newRowsDict,colsDict = filterByMatrix(newM, rowsDict, newColsDict)
solve3(newM, rowsDict, newColsDict, rowsLenDict, sortedRowLenKeys, newColLenDict, newColLenList)
def solve2(m, rowsDict, colsDict, rowsLenDict, sortedRowLenKeys, colsLenDict, sortedColLenKeys):
if isSolved(rowsDict, colsDict):
print "found solution!"
print m
return
rowsLenDict, sortedRowLenKeys = sortPossibleSolutions(rowsDict)
colsLenDict, sortedColLenKeys = sortPossibleSolutions(colsDict)
m, rowsDict, rowsLenDict, sortedRowLenKeys = addSingleSolutionsToMatrix(m, rowsDict, rowsLenDict, sortedRowLenKeys, True)
m, colsDict, colsLenDict, sortedColLenKeys = addSingleSolutionsToMatrix(m, colsDict, colsLenDict, sortedColLenKeys, False)
#print "filtering Rows by hints! " + str(getCount(rowsDict))
rowsDict = filterByPartials(partialSolvedRows, rowsDict)
#print "Rows left: " + str(getCount(rowsDict)) + '\n'
#print "filtering Cols by hints! " + str(getCount(colsDict))
colsDict = filterByPartials(partialSolvedCols, colsDict)
#print "Cols left: " + str(getCount(colsDict)) + '\n'
#print "filtering by solution matrix!", "rows:", str(getCount(rowsDict)), "cols:",str(getCount(colsDict))
m,rowsDict,colsDict = filterByMatrix(m,rowsDict,colsDict)
#print "Rows left: " + str(getCount(rowsDict))
#print "Cols left: " + str(getCount(colsDict)) + '\n'
print "Rows left: " + str(getCount(rowsDict))
print "Cols left: " + str(getCount(colsDict)) + '\n'
if len(sortedRowLenKeys) > 0:
lenDict = rowsLenDict[sortedRowLenKeys.pop(0)]
for index in lenDict:
for row in rowsDict[index]:
rowsDict2 = copy.deepcopy(rowsDict)
rowsDict2[index] = [row]
#print rowsDict2[index]
solve2(copy.deepcopy(m), rowsDict2, copy.deepcopy(colsDict), copy.deepcopy(rowsLenDict), copy.deepcopy(sortedRowLenKeys), copy.deepcopy(colsLenDict), copy.deepcopy(sortedColLenKeys))
if len(sortedColLenKeys) > 0:
lenDict = colsLenDict[sortedColLenKeys.pop(0)]
for index in lenDict:
for col in colsDict[index]:
colsDict2 = copy.deepcopy(colsDict)
colsDict2[index] = [col]
solve2(copy.deepcopy(m), copy.deepcopy(rowsDict), colsDict2, copy.deepcopy(rowsLenDict), copy.deepcopy(sortedRowLenKeys), copy.deepcopy(colsLenDict), copy.deepcopy(sortedColLenKeys))
def solve(m, rowsDict, colsDict, rowsLenDict, sortedRowLenKeys, colsLenDict, sortedColLenKeys):
print "filtering Rows by hints! " + str(getCount(rowsDict))
rowsDict = filterByPartials(partialSolvedRows, rowsDict)
print "Rows left: " + str(getCount(rowsDict)) + '\n'
print "filtering Cols by hints! " + str(getCount(colsDict))
colsDict = filterByPartials(partialSolvedCols, colsDict)
print "Cols left: " + str(getCount(colsDict)) + '\n'
print "filtering by solution matrix!", "rows:", str(getCount(rowsDict)), "cols:",str(getCount(colsDict))
m,rowsDict,colsDict = filterByMatrix(m,rowsDict,colsDict)
print "Rows left: " + str(getCount(rowsDict))
print "Cols left: " + str(getCount(colsDict)) + '\n'
rowsLenDict, sortedRowLenKeys = sortPossibleSolutions(rowsDict)
colsLenDict, sortedColLenKeys = sortPossibleSolutions(colsDict)
m, rowsDict, rowsLenDict, sortedRowLenKeys = addSingleSolutionsToMatrix(m, rowsDict, rowsLenDict, sortedRowLenKeys, True)
m, colsDict, colsLenDict, sortedColLenKeys = addSingleSolutionsToMatrix(m, colsDict, colsLenDict, sortedColLenKeys, False)
print "Rows left: " + str(getCount(rowsDict))
print "Cols left: " + str(getCount(colsDict)) + '\n'
print sortedRowLenKeys, sortedColLenKeys
print rowsLenDict[sortedRowLenKeys[0]]
print "starting to solve!"
prevRowsCount = getCount(rowsDict)
prevColsCount = getCount(colsDict)
retryCounter = 0
while getCount(colsDict) != 0 and getCount(rowsDict) != 0:
if(prevRowsCount == getCount(rowsDict) or prevColsCount | |
<filename>radarly/publication.py
"""
Publications are all documents which match the query you have defined in
your projects.
"""
from os import getcwd
from os.path import abspath
from reprlib import repr as trunc_repr
import requests
from .api import RadarlyApi
from .constants import PLATFORM, TONE
from .exceptions import PublicationUpdateFailed
from .metadata import Metadata
from .model import GeneratorModel, SourceModel
from .utils.misc import parse_image_url
from .utils.jsonparser import snake_dict
from .utils.checker import check_geocode, check_language, check_list
class Publication(SourceModel):
"""Object base on ``SourceModel`` storing information about the
publication. The structure of the model can be drawn with the
``draw_structure`` method.
Args:
uid (str): unique identifier of the publication
origin (dict): dictionary which contains information about the
platform where the publication comes from.
permalink (str): link to the publication
lang (str): lang of the publication
date (datetime.datetime): creation date of the publication
impression (int): number of impressions on the publication
reach (int): estimated number of people reached by the publication
tone (str): tone of the publication
category (str): category of the publications
user (dict): information about the author of the publication
"""
def __init__(self, data, project_id):
super().__init__()
self.pid = project_id
data = snake_dict(data, blacklist=[['radar', 'tag']])
super().add_data(data)
def __repr__(self):
try:
publication_uid = trunc_repr(self['uid'])
except KeyError:
publication_uid = None
return '<Publication.uid={}>'.format(publication_uid)
@classmethod
def fetch(cls, project_id, parameter, api=None):
"""
Get publications stored inside a project.
Args:
project_id (int): identifier of a project
parameter (SearchPublicationParameter): parameters object
made with the SearchPublicationParameter instance, which will
be used as payload data in POST request. See
``SearchPublicationParameter`` to know how to build this
object.
api (RadarlyApi, optional): API object used to perform request. If
None, it will use the default API.
Returns:
list[Publication]:
"""
api = api or RadarlyApi.get_default_api()
url = api.router.publication['search'].format(project_id=project_id)
data = api.post(url, data=parameter)
return [
Publication(item, project_id) for item in data['hits']
]
@classmethod
def fetch_all(cls, project_id, parameter, api=None):
"""Get all publications matching given parameters. It yields
publications.
Args:
project_id (int): identifier of your project
parameter (SearchPublicationParameter): parameters object
made with the SearchPublicationParameter instance, which will
be used as payload data in POST request. See
``SearchPublicationParameter`` to know how to build this
object.
api (RadarlyApi, optional): API object used to perform request. If
None, it will use the default API.
Returns:
PublicationsGenerator: list of publications. On each iterations, a
Publication is yielded until there is no more publication.
"""
api = api or RadarlyApi.get_default_api()
return PublicationsGenerator(parameter,
project_id=project_id, api=api)
def get_metadata(self, params=None, api=None):
"""This method allows users to get document’s metadata.
Args:
params (dict, optional): parameter sent in the GET request. Default
to None.
Returns:
Metadata: object storing metadata information
"""
api = api or RadarlyApi.get_default_api()
url = api.router.publication['metadata'].format(project_id=self.pid)
params = {} if params is None else params
params.update(dict(
platform=self['origin']['platform'],
uid=self['uid'],
))
res_data = api.get(url, params=params)
return Metadata(res_data, self['uid'])
def get_raw(self, params=None, api=None):
"""Get the raw content of the publication.
Args:
params (dict, optional): parameter sent in the GET request. Default
to None.
Returns:
dict: dictionary storing the raw content of the publication
"""
api = api or RadarlyApi.get_default_api()
doc_platform = self['origin']['platform']
available_platform = [
PLATFORM.FORUM,
PLATFORM.BLOG,
]
assert doc_platform in available_platform, \
"{} is not compatible with raw content".format(doc_platform)
url = api.router.publication['raw'].format(project_id=self.pid)
params = {} if params is None else params
params.update(dict(
platform=doc_platform,
uid=self['uid'],
))
res_data = api.get(url, params=params)
return res_data
def set_tags(self, *args, **kwargs):
"""Update some information about a publication in Radarly. This
method is based on the ``set_publication_tags`` function defined in
the same module.
.. warning:: Unlike the ``set_publication_tags``, this function returns
None in case of success (the ``set_publication_tags`` returns a
``Publication`` object).
Args:
tone (str): tone of the publication. Can be `positive`,
`negative`, `mixed` or `neutral`.
language (str): alpha-2, alpha-3, or name of the language
country (str): alpha-2, alpha-3 or name of the country
keywords (list[str]): list of keywords for the publication
custom_tags (dict[str -> list[str]]): value of the custom tags
to set. The template for this argument is::
{<label of the custom_tag>: [<label of the subtag>]}.
Example: Given two tags (the first one named ``Product``
with ``Shoes``, ``T-Shirt`` and ``Clothes`` as subtags and
the second one named ``Price`` with ``High``, ``Medium``
and ``Low`` as subtags), a valid value for the ``custom_tags``
could be::
{'Product': ['Clothes', 'T-Shirt'], 'Price': ['High']}
Raises:
PublicationUpdateFailed: error raised if the publication failed
Returns:
None
"""
publication = set_publication_tags(
*args,
project_id=getattr(self, 'pid'),
uid=getattr(self, 'uid'),
platform=getattr(self, 'origin')['platform'],
**kwargs
)
self.add_data(publication.__dict__)
return None
def download(self, output_dir=None, chunk_size=1024):
"""Download the publication if it is an image or video.
.. warning:: This function will not raised an error even if the
download fails. To know if all the download succeed, compare the
media object of a publication with the response of the function.
Args:
output_dir (str, optional): folder where the downloaded images must
be saved. The folder must already exists. Default to the
current working directory.
chunk_size (int, optional): chunk size used during the file
download with ``requests``. Default to 1024.
Returns:
dict[str]: filepath of the downloaded medias. This dictionary has
quite the same structure of the ``media`` attribute of the
publication.
"""
def download_content(content_link, output_dir):
"""Download the content of a media and save it in a existing
directory.
Args:
content_link (str):
output_dir (str):
Returns:
dict: local version of the media object
"""
if content_link is None: return None
res = requests.get(content_link, stream=True)
try:
res.raise_for_status()
except requests.exceptions.HTTPError:
return None
img_name, img_format = parse_image_url(res.url)
filepath = '{}/{}.{}'.format(output_dir, img_name, img_format)
with open(filepath, mode='wb') as image_file:
for chunk in res.iter_content(chunk_size=chunk_size):
image_file.write(chunk)
return abspath(filepath)
output_dir = output_dir or getcwd()
media_links = dict(
image=[],
video=[]
)
if self['media'] and self['media']['image']:
downloaded_images = [
download_content(item, output_dir) for item in self['media']['image']
]
media_links['image'].extend(list(filter(None, downloaded_images)))
if self['media'] and self['media']['video']:
downloaded_videos = [
{
'url': download_content(item['url'], output_dir),
'thumbnail': download_content(item['thumbnail'], output_dir)
} for item in self['media']['video']
]
media_links['video'].extend(
filter(lambda x: x['url'] and x['thumbnail'], downloaded_videos)
)
return media_links
class PublicationsGenerator(GeneratorModel):
"""Generator which yields all publications matching some payload.
Args:
search_param (SearchPublicationParameter):
project_id (int): identifier of the project
api (RadarlyApi): api to use to perform requests
Yields:
Publication:
"""
def _fetch_items(self):
"""Get next range of publications"""
url = self._api.router.publication['search'].format(
project_id=self.project_id
)
res_data = self._api.post(url, data=self.search_param)
self.total = res_data['total']
self._items = (
Publication(item, self.project_id)
for item in res_data['hits']
)
div = self.total // self.search_param['limit']
reste = self.total % self.search_param['limit']
self.total_page = div
if reste != 0: self.total_page += 1
self.search_param = self.search_param.next_page()
def __repr__(self):
return '<PublicationsGenerator.total={}.total_page={}>'.format(
self.total, self.total_page
)
def set_publication_tags(project_id, uid, platform,
tone=None, language=None, country=None,
keyword=None, custom_tags=None, api=None):
"""Update some fields in a publication in Radarly. Only specific field
can be updated. This function is outside the ``Publication`` class in order
to give you the opportunity to update a publication without having the full
object. The ``set_tags`` method of ``Publication`` uses this function so
there is no big differences between theses two functions.
Args:
tone (str): tone of the publication. Can be `positive`,
`negative`, `mixed` or `neutral`.
language (str): alpha-2, alpha-3, or name of the language
country (str): alpha-2, alpha-3 or name of the country
keywords (list[str]): list of keywords for the publication
custom_tags (dict[str -> list[str]]): value of the custom tags
to set. The tags must already exist. The template for this
argument is::
{<label of the custom_tag>: [<label of the subtag>]}.
Example: Given two tags (the first one named ``Product``
with ``Shoes``, ``T-Shirt`` and ``Clothes`` as subtags and
the second one named ``Price`` with ``High``, ``Medium``
and ``Low`` as subtags), a valid value for the ``custom_tags``
could be::
{'Product': ['Clothes', 'T-Shirt'], 'Price': ['High']}
Raises:
PublicationUpdateFailed: error raised if the publication failed
Returns:
Publication: publication which was updated
"""
def check_update(pub, tone, language, country,
keyword, custom_tags):
"""Check the publication's update."""
not_updated_fields = []
if tone and pub['tone'] != tone:
not_updated_fields.append('tone')
if keyword and pub['keyword'] != keyword:
not_updated_fields.append('keyword')
if language and pub['lang'] != language:
not_updated_fields.append('lang')
if country and pub['geo']['inferred']['country'] != country:
not_updated_fields.append('geo.inferred.country')
if custom_tags:
_ = [
not_updated_fields.append('radar.tag.custom.{}'.format(key))
for key in custom_tags
if pub['radar']['tag']['custom'][key] != custom_tags[key]
]
if not_updated_fields:
raise PublicationUpdateFailed(fields=not_updated_fields)
return None
payload = {}
if tone:
_ = TONE.check(tone)
payload['tone'] = tone
if language:
language = check_language(language)[0].lower()
payload['lang'] = language
| |
<gh_stars>0
# encoding: utf-8
# module renderdoc
# from P:\1-Scripts\_Python\Py-Autocomplete\renderdoc.pyd
# by generator 1.146
# no doc
# imports
import enum as __enum
from .CaptureAccess import CaptureAccess
class CaptureFile(CaptureAccess):
"""
A handle to a capture file. Used for simple cheap processing and meta-data fetching
without opening the capture for analysis.
"""
def Convert(self, filename, filetype, file, progress): # real signature unknown; restored from __doc__
"""
Convert(filename, filetype, file, progress)
Converts the currently loaded file to a given format and saves it to disk.
This allows converting a native RDC to another representation, or vice-versa converting another
representation back to native RDC.
:param str filename: The filename to save to.
:param str filetype: The format to convert to.
:param SDFile file: An optional :class:`SDFile` with the structured data to source from. This is
useful in case the format specifies that it doesn't need buffers, and you already have a
:class:`ReplayController` open with the structured data. This saves the need to load the file
again. If ``None`` then structured data will be fetched if not already present and used.
:param ProgressCallback progress: A callback that will be repeatedly called with an updated progress
value for the conversion. Can be ``None`` if no progress is desired.
:return: The status of the conversion operation, whether it succeeded or failed (and how it failed).
:rtype: ReplayStatus
"""
pass
def CopyFileTo(self, filename): # real signature unknown; restored from __doc__
"""
CopyFileTo(filename)
When a capture file is opened, an exclusive lock is held on the file on disk. This
makes it impossible to copy the file to another location at the user's request. Calling this
function will copy the file on disk to a new location but otherwise won't affect the capture handle.
The new file will be locked, the old file will be unlocked - to allow deleting if necessary.
It is invalid to call this function if :meth:`OpenFile` has not previously been called to open the
file.
:param str filename: The filename to copy to.
:return: ``True`` if the operation succeeded.
:rtype: ``bool``
"""
pass
def ErrorString(self): # real signature unknown; restored from __doc__
"""
ErrorString()
Returns the human-readable error string for the last error received.
The error string is not reset by calling this function so it's safe to call multiple times. However
any other function call may reset the error string to empty.
:return: The error string, if one exists, or an empty string.
:rtype: ``str``
"""
pass
def GetCaptureFileFormats(self): # real signature unknown; restored from __doc__
"""
GetCaptureFileFormats()
Returns the list of capture file formats.
:return: The list of capture file formats available.
:rtype: ``list`` of :class:`CaptureFileFormat`
"""
pass
def GetStructuredData(self): # real signature unknown; restored from __doc__
"""
GetStructuredData()
Returns the structured data for this capture.
The lifetime of this data is scoped to the lifetime of the capture handle, so it cannot be used
after the handle is destroyed.
:return: The structured data representing the file.
:rtype: SDFile
"""
pass
def GetThumbnail(self, type, maxsize): # real signature unknown; restored from __doc__
"""
GetThumbnail(type, maxsize)
Retrieves the embedded thumbnail from the capture.
.. note:: The only supported values for :paramref:`GetThumbnail.type` are :attr:`FileType.JPG`,
:attr:`FileType.PNG`, :attr:`FileType.TGA`, and :attr:`FileType.BMP`.
:param FileType type: The image format to convert the thumbnail to.
:param int maxsize: The largest width or height allowed. If the thumbnail is larger, it's resized.
:return: The raw contents of the thumbnail, converted to the desired type at the desired max
resolution.
:rtype: Thumbnail
"""
pass
def LocalReplaySupport(self): # real signature unknown; restored from __doc__
"""
LocalReplaySupport()
Queries for how well a particular capture is supported on the local machine.
If the file was opened with a format other than native ``rdc`` this will always return no
replay support.
:return: How much support for replay exists locally.
:rtype: ReplaySupport
"""
pass
def OpenBuffer(self, buffer, filetype, progress): # real signature unknown; restored from __doc__
"""
OpenBuffer(buffer, filetype, progress)
Initialises the file handle from a raw memory buffer.
This may be useful if you don't want to parse the whole file or already have the file in memory.
For the :paramref:`OpenBuffer.filetype` parameter, see :meth:`OpenFile`.
:param bytes buffer: The buffer containing the data to process.
:param str filetype: The format of the given file.
:param ProgressCallback progress: A callback that will be repeatedly called with an updated progress
value if an import step occurs. Can be ``None`` if no progress is desired.
:return: The status of the open operation, whether it succeeded or failed (and how it failed).
:rtype: ReplayStatus
"""
pass
def OpenCapture(self, opts, progress): # real signature unknown; restored from __doc__
"""
OpenCapture(opts, progress)
Opens a capture for replay locally and returns a handle to the capture. Only supported
for handles opened with a native ``rdc`` capture, otherwise this will fail.
This function will block until the capture is fully loaded and ready.
Once the replay is created, this :class:`CaptureFile` can be shut down, there is no dependency on it
by the :class:`ReplayController`.
:param ReplayOptions opts: The options controlling how the capture should be replayed.
:param ProgressCallback progress: A callback that will be repeatedly called with an updated progress
value for the opening. Can be ``None`` if no progress is desired.
:return: A tuple containing the status of opening the capture, whether success or failure, and the
resulting :class:`ReplayController` handle if successful.
:rtype: ``tuple`` of :class:`ReplayStatus` and :class:`ReplayController`.
"""
pass
def OpenFile(self, filename, filetype, progress): # real signature unknown; restored from __doc__
"""
OpenFile(filename, filetype, progress)
Initialises the capture handle from a file.
This method supports converting from non-native representations via structured data, by specifying
the input format in the :paramref:`OpenFile.filetype` parameter. The list of supported formats can be retrieved
by calling :meth:`GetCaptureFileFormats`.
``rdc`` is guaranteed to always be a supported filetype, and will be assumed if the filetype is
empty or unrecognised.
:param str filename: The filename of the file to open.
:param str filetype: The format of the given file.
:param ProgressCallback progress: A callback that will be repeatedly called with an updated progress
value if an import step occurs. Can be ``None`` if no progress is desired.
:return: The status of the open operation, whether it succeeded or failed (and how it failed).
:rtype: ReplayStatus
"""
pass
def RecordedMachineIdent(self): # real signature unknown; restored from __doc__
"""
RecordedMachineIdent()
Retrieves the identifying string describing what type of machine created this capture.
:return: A string identifying the machine ident used to make the capture.
:rtype: ``str``
"""
pass
def SetMetadata(self, driverName, machineIdent, thumbType, thumbWidth, thumbHeight, thumbData): # real signature unknown; restored from __doc__
"""
SetMetadata(driverName, machineIdent, thumbType, thumbWidth, thumbHeight, thumbData)
Sets the matadata for this capture handle.
This function may only be called if the handle is 'empty' - i.e. no file has been opened with
:meth:`OpenFile` or :meth:`OpenBuffer`.
.. note:: The only supported values for :paramref:`SetMetadata.thumbType` are :attr:`FileType.JPG`,
:attr:`FileType.PNG`, :attr:`FileType.TGA`, and :attr:`FileType.BMP`.
:param str driverName: The name of the driver. Must be a recognised driver name (even if replay
support for that driver is not compiled in locally.
:param int machineIdent: The encoded machine identity value. Optional value and can be left to 0, as
the bits to set are internally defined, so only generally useful if copying a machine ident from
an existing capture.
:param FileType thumbType: The file type of the thumbnail. Ignored if
:paramref:`SetMetadata.thumbData` is empty.
:param int thumbWidth: The width of the thumbnail. Ignored if :paramref:`SetMetadata.thumbData` is
empty.
:param int thumbHeight: The height of the thumbnail. Ignored if :paramref:`SetMetadata.thumbData` is
empty.
:param bytes thumbData: The raw data of the thumbnail. If empty, no thumbnail is set.
"""
pass
def | |
numpy
if version >= (1, 18, 3):
module.store_in_file_system = True
def load_numpy_core_multiarray(finder: ModuleFinder, module: Module) -> None:
"""
The numpy.core.multiarray module is an extension module and the numpy
module imports * from this module; define the list of global names
available to this module in order to avoid spurious errors about missing
modules.
"""
module.AddGlobalName("arange")
def load_numpy_core_numerictypes(finder: ModuleFinder, module: Module) -> None:
"""
The numpy.core.numerictypes module adds a number of items to itself
dynamically; define these to avoid spurious errors about missing
modules.
"""
module.AddGlobalName("bool_")
module.AddGlobalName("cdouble")
module.AddGlobalName("complexfloating")
module.AddGlobalName("csingle")
module.AddGlobalName("double")
module.AddGlobalName("float64")
module.AddGlobalName("float_")
module.AddGlobalName("inexact")
module.AddGlobalName("intc")
module.AddGlobalName("int32")
module.AddGlobalName("number")
module.AddGlobalName("single")
def load_numpy_core_umath(finder: ModuleFinder, module: Module) -> None:
"""
The numpy.core.umath module is an extension module and the numpy module
imports * from this module; define the list of global names available
to this module in order to avoid spurious errors about missing
modules.
"""
module.AddGlobalName("add")
module.AddGlobalName("absolute")
module.AddGlobalName("arccos")
module.AddGlobalName("arccosh")
module.AddGlobalName("arcsin")
module.AddGlobalName("arcsinh")
module.AddGlobalName("arctan")
module.AddGlobalName("arctanh")
module.AddGlobalName("bitwise_and")
module.AddGlobalName("bitwise_or")
module.AddGlobalName("bitwise_xor")
module.AddGlobalName("ceil")
module.AddGlobalName("conj")
module.AddGlobalName("conjugate")
module.AddGlobalName("cosh")
module.AddGlobalName("divide")
module.AddGlobalName("fabs")
module.AddGlobalName("floor")
module.AddGlobalName("floor_divide")
module.AddGlobalName("fmod")
module.AddGlobalName("greater")
module.AddGlobalName("hypot")
module.AddGlobalName("invert")
module.AddGlobalName("isfinite")
module.AddGlobalName("isinf")
module.AddGlobalName("isnan")
module.AddGlobalName("less")
module.AddGlobalName("left_shift")
module.AddGlobalName("log")
module.AddGlobalName("logical_and")
module.AddGlobalName("logical_not")
module.AddGlobalName("logical_or")
module.AddGlobalName("logical_xor")
module.AddGlobalName("maximum")
module.AddGlobalName("minimum")
module.AddGlobalName("multiply")
module.AddGlobalName("negative")
module.AddGlobalName("not_equal")
module.AddGlobalName("power")
module.AddGlobalName("remainder")
module.AddGlobalName("right_shift")
module.AddGlobalName("sign")
module.AddGlobalName("sinh")
module.AddGlobalName("sqrt")
module.AddGlobalName("tan")
module.AddGlobalName("tanh")
module.AddGlobalName("true_divide")
def load_numpy_distutils_command_scons(
finder: ModuleFinder, module: Module
) -> None:
"""
The numpy.distutils.command.scons module optionally imports the numscons
module; ignore the error if the module cannot be found.
"""
module.IgnoreName("numscons")
def load_numpy_distutils_misc_util(
finder: ModuleFinder, module: Module
) -> None:
"""
The numpy.distutils.misc_util module optionally imports the numscons
module; ignore the error if the module cannot be found.
"""
module.IgnoreName("numscons")
def load_numpy_distutils_system_info(
finder: ModuleFinder, module: Module
) -> None:
"""
The numpy.distutils.system_info module optionally imports the Numeric
module; ignore the error if the module cannot be found.
"""
module.IgnoreName("Numeric")
def load_numpy_f2py___version__(finder: ModuleFinder, module: Module) -> None:
"""
The numpy.f2py.__version__ module optionally imports the __svn_version__
module; ignore the error if the module cannot be found.
"""
module.IgnoreName("__svn_version__")
def load_numpy_linalg(finder: ModuleFinder, module: Module) -> None:
"""
The numpy.linalg module implicitly loads the lapack_lite module; make
sure this happens.
"""
finder.IncludeModule("numpy.linalg.lapack_lite")
def load_numpy_random_mtrand(finder: ModuleFinder, module: Module) -> None:
"""
The numpy.random.mtrand module is an extension module and the numpy
module imports * from this module; define the list of global names
available to this module in order to avoid spurious errors about missing
modules.
"""
module.AddGlobalName("rand")
module.AddGlobalName("randn")
def load_Numeric(finder: ModuleFinder, module: Module) -> None:
"""
The Numeric module optionally loads the dotblas module; ignore the error
if this modules does not exist.
"""
module.IgnoreName("dotblas")
def load_pikepdf(finder: ModuleFinder, module: Module) -> None:
"""The pikepdf must be loaded as a package."""
finder.IncludePackage("pikepdf")
def load_PIL(finder: ModuleFinder, module: Module) -> None:
"""The Pillow must be loaded as a package."""
finder.IncludePackage("PIL")
def load_pkg_resources(finder: ModuleFinder, module: Module) -> None:
"""
The pkg_resources must be loaded as a package;
dynamically loaded modules in subpackages is growing.
"""
finder.IncludePackage("pkg_resources")
def load_postgresql_lib(finder: ModuleFinder, module: Module) -> None:
"""
The postgresql.lib module requires the libsys.sql file to be included
so make sure that file is included.
"""
filename = os.path.join(module.path[0], "libsys.sql")
finder.IncludeFiles(filename, os.path.basename(filename))
def load_pty(finder: ModuleFinder, module: Module) -> None:
"""The sgi module is not needed for this module to function."""
module.IgnoreName("sgi")
def load_pycparser(finder: ModuleFinder, module: Module) -> None:
"""
These files are missing which causes
permission denied issues on windows when they are regenerated.
"""
finder.IncludeModule("pycparser.lextab")
finder.IncludeModule("pycparser.yacctab")
def load_pygments(finder: ModuleFinder, module: Module) -> None:
"""The pygments package dynamically load styles."""
finder.IncludePackage("pygments.styles")
finder.IncludePackage("pygments.lexers")
finder.IncludePackage("pygments.formatters")
def load_pytest(finder: ModuleFinder, module: Module) -> None:
pytest = __import__("pytest")
for mod in pytest.freeze_includes():
finder.IncludeModule(mod)
def load_pythoncom(finder: ModuleFinder, module: Module) -> None:
"""
The pythoncom module is actually contained in a DLL but since those
cannot be loaded directly in Python 2.5 and higher a special module is
used to perform that task; simply use that technique directly to
determine the name of the DLL and ensure it is included as a file in
the target directory.
"""
pythoncom = __import__("pythoncom")
finder.IncludeFiles(
pythoncom.__file__,
os.path.join("lib", os.path.basename(pythoncom.__file__)),
copy_dependent_files=False,
)
def load_pytz(finder: ModuleFinder, module: Module) -> None:
"""
The pytz module requires timezone data to be found in a known directory
or in the zip file where the package is written.
"""
target_path = os.path.join("lib", "pytz", "zoneinfo")
data_path = os.path.join(module.path[0], "zoneinfo")
if not os.path.isdir(data_path):
# Fedora (and possibly other systems) use a separate location to
# store timezone data so look for that here as well
pytz = __import__("pytz")
data_path = (
getattr(pytz, "_tzinfo_dir", None)
or os.getenv("PYTZ_TZDATADIR")
or "/usr/share/zoneinfo"
)
if data_path.endswith(os.sep):
data_path = data_path[:-1]
if os.path.isdir(data_path):
finder.AddConstant("PYTZ_TZDATADIR", target_path)
if os.path.isdir(data_path):
if module.in_file_system:
finder.IncludeFiles(
data_path, target_path, copy_dependent_files=False
)
else:
finder.ZipIncludeFiles(data_path, "pytz/zoneinfo")
def load_pywintypes(finder: ModuleFinder, module: Module) -> None:
"""
The pywintypes module is actually contained in a DLL but since those
cannot be loaded directly in Python 2.5 and higher a special module is
used to perform that task; simply use that technique directly to
determine the name of the DLL and ensure it is included as a file in the
target directory.
"""
pywintypes = __import__("pywintypes")
finder.IncludeFiles(
pywintypes.__file__,
os.path.join("lib", os.path.basename(pywintypes.__file__)),
copy_dependent_files=False,
)
# cache the QtCore module
_qtcore = None
def _qt_implementation(module: Module) -> Tuple[str, Any]:
"""Helper function to get name (PyQt5) and the QtCore module."""
global _qtcore
name = module.name.split(".")[0]
if _qtcore is None:
try:
_qtcore = __import__(name, fromlist=["QtCore"]).QtCore
except RuntimeError:
print(
"WARNING: Tried to load multiple incompatible Qt wrappers. "
"Some incorrect files may be copied."
)
return name, _qtcore
def copy_qt_plugins(plugins, finder, qtcore):
"""Helper function to find and copy Qt plugins."""
# Qt Plugins can either be in a plugins directory next to the Qt libraries,
# or in other locations listed by QCoreApplication.libraryPaths()
dir0 = os.path.join(os.path.dirname(qtcore.__file__), "plugins")
for libpath in qtcore.QCoreApplication.libraryPaths() + [dir0]:
sourcepath = os.path.join(str(libpath), plugins)
if os.path.exists(sourcepath):
finder.IncludeFiles(sourcepath, plugins)
def load_PyQt5_phonon(finder: ModuleFinder, module: Module) -> None:
"""
In Windows, phonon5.dll requires an additional dll phonon_ds94.dll to
be present in the build directory inside a folder phonon_backend.
"""
if module.in_file_system:
return
_, qtcore = _qt_implementation(module)
if WIN32:
copy_qt_plugins("phonon_backend", finder, qtcore)
def sip_module_name(qtcore) -> str:
"""
Returns the name of the sip module to import.
(As of 5.11, the distributed wheels no longer provided for the sip module
outside of the PyQt5 namespace).
"""
version_string = qtcore.PYQT_VERSION_STR
try:
pyqt_version_ints = tuple(int(c) for c in version_string.split("."))
if pyqt_version_ints >= (5, 11):
return "PyQt5.sip"
except Exception:
pass
return "sip"
def load_PyQt5_QtCore(finder: ModuleFinder, module: Module) -> None:
"""
The PyQt5.QtCore module implicitly imports the sip module and,
depending on configuration, the PyQt5._qt module.
"""
if module.in_file_system:
return
name, qtcore = _qt_implementation(module)
finder.IncludeModule(sip_module_name(qtcore))
try:
finder.IncludeModule(f"{name}._qt")
except ImportError:
pass
def load_PyQt5_Qt(finder: ModuleFinder, module: Module) -> None:
"""
The PyQt5.Qt module is an extension module which imports a number of
other modules and injects their namespace into its own. It seems a
foolish way of doing things but perhaps there is some hidden advantage
to this technique over pure Python; ignore the absence of some of
the modules since not every installation includes all of them.
"""
if module.in_file_system:
return
name, _ = _qt_implementation(module)
finder.IncludeModule(f"{name}.QtCore")
finder.IncludeModule(f"{name}.QtGui")
for mod in (
"_qt",
"QtSvg",
"Qsci",
"QtAssistant",
"QtNetwork",
"QtOpenGL",
"QtScript",
"QtSql",
"QtSvg",
"QtTest",
"QtXml",
):
try:
finder.IncludeModule(f"{name}.{mod}")
except ImportError:
pass
def load_PyQt5_uic(finder: ModuleFinder, module: Module) -> None:
"""
The uic module makes use of "plugins" that need to be read directly and
cannot be frozen; the PyQt5.QtWebKit and PyQt5.QtNetwork modules are
also implicity loaded.
"""
if module.in_file_system:
return
name, _ = _qt_implementation(module)
source_dir = os.path.join(module.path[0], "widget-plugins")
finder.IncludeFiles(source_dir, f"{name}.uic.widget-plugins")
finder.IncludeModule(f"{name}.QtNetwork")
try:
finder.IncludeModule(f"{name}.QtWebKit")
except ImportError:
pass
def _QtGui(finder, module, version_str):
name, qtcore = _qt_implementation(module)
finder.IncludeModule(f"{name}.QtCore")
copy_qt_plugins("imageformats", finder, qtcore)
if version_str >= "5":
# On Qt5, we need the platform plugins. For simplicity, we just copy
# any that are installed.
copy_qt_plugins("platforms", finder, qtcore)
def load_PyQt5_QtGui(finder: ModuleFinder, module: Module) -> None:
"""
There is a chance that GUI will use some image formats
add the image format plugins.
"""
if module.in_file_system:
return
_, qtcore = _qt_implementation(module)
_QtGui(finder, module, qtcore.QT_VERSION_STR)
def load_PyQt5_QtWidgets(finder: ModuleFinder, module: Module) -> None:
if module.in_file_system:
return
finder.IncludeModule("PyQt5.QtGui")
def load_PyQt5_QtWebKit(finder: ModuleFinder, module: Module) -> None:
if module.in_file_system:
return
name, _ = _qt_implementation(module)
finder.IncludeModule(f"{name}.QtNetwork")
finder.IncludeModule(f"{name}.QtGui")
def load_PyQt5_QtMultimedia(finder: ModuleFinder, module: Module) -> None:
if module.in_file_system:
return
name, qtcore = _qt_implementation(module)
finder.IncludeModule(f"{name}.QtCore")
finder.IncludeModule(f"{name}.QtMultimediaWidgets")
| |
pairs. Each set of 2 pairs are interpreted as
the control point at the end of the curve and the endpoint
of the curve. The control point at the beginning of the
curve is assumed to be the reflection of the control point
at the end of the last curve relative to the starting point
of the curve. If the previous curve is not a cubic Bezier,
the control point is coincident with the starting point.
Returns
-------
out : `Curve`
This curve.
"""
self.last_q = None
if self.last_c is None:
self.last_c = self.points[-1]
i = 0
while i < len(xy):
ctrl = numpy.empty((4, 2))
ctrl[0] = self.points[-1]
ctrl[1] = 2 * ctrl[0] - self.last_c
for j in range(2, 4):
if isinstance(xy[i], complex):
ctrl[j, 0] = xy[i].real
ctrl[j, 1] = xy[i].imag
i += 1
else:
ctrl[j, 0] = xy[i]
ctrl[j, 1] = xy[i + 1]
i += 2
f = _func_bezier(ctrl)
uu = [0, 0.2, 0.5, 0.8, 1]
fu = [f(u) for u in uu]
iu = 1
while iu < len(fu):
test_u = 0.5 * (uu[iu - 1] + uu[iu])
test_pt = f(test_u)
test_err = 0.5 * (fu[iu - 1] + fu[iu]) - test_pt
if test_err[0] ** 2 + test_err[1] ** 2 > self.tol:
uu.insert(iu, test_u)
fu.insert(iu, test_pt)
else:
iu += 1
self.points.extend(xy for xy in fu[1:])
self.last_c = ctrl[2]
return self
def s(self, *xy):
"""
Add smooth cubic Bezier curves to the curve.
Parameters
----------
xy : numbers
Coordinate pairs. Each set of 2 pairs are interpreted as
the control point at the end of the curve and the endpoint
of the curve. The control point at the beginning of the
curve is assumed to be the reflection of the control point
at the end of the last curve relative to the starting point
of the curve. If the previous curve is not a cubic Bezier,
the control point is coincident with the starting point.
All coordinates are relative to the current end point.
Returns
-------
out : `Curve`
This curve.
"""
self.last_q = None
if self.last_c is None:
self.last_c = self.points[-1]
x0, y0 = self.points[-1]
i = 0
while i < len(xy):
ctrl = numpy.empty((4, 2))
ctrl[0, 0] = x0
ctrl[0, 1] = y0
ctrl[1] = 2 * ctrl[0] - self.last_c
for j in range(2, 4):
if isinstance(xy[i], complex):
ctrl[j, 0] = x0 + xy[i].real
ctrl[j, 1] = y0 + xy[i].imag
i += 1
else:
ctrl[j, 0] = x0 + xy[i]
ctrl[j, 1] = y0 + xy[i + 1]
i += 2
f = _func_bezier(ctrl)
uu = [0, 0.2, 0.5, 0.8, 1]
fu = [f(u) for u in uu]
iu = 1
while iu < len(fu):
test_u = 0.5 * (uu[iu - 1] + uu[iu])
test_pt = f(test_u)
test_err = 0.5 * (fu[iu - 1] + fu[iu]) - test_pt
if test_err[0] ** 2 + test_err[1] ** 2 > self.tol:
uu.insert(iu, test_u)
fu.insert(iu, test_pt)
else:
iu += 1
self.points.extend(xy for xy in fu[1:])
self.last_c = ctrl[2]
return self
def Q(self, *xy):
"""
Add quadratic Bezier curves to the curve.
Parameters
----------
xy : numbers
Coordinate pairs. Each set of 2 pairs are interpreted as
the control point and the endpoint of the curve.
Returns
-------
out : `Curve`
This curve.
"""
self.last_c = None
i = 0
while i < len(xy):
ctrl = numpy.empty((3, 2))
ctrl[0] = self.points[-1]
for j in range(1, 3):
if isinstance(xy[i], complex):
ctrl[j, 0] = xy[i].real
ctrl[j, 1] = xy[i].imag
i += 1
else:
ctrl[j, 0] = xy[i]
ctrl[j, 1] = xy[i + 1]
i += 2
f = _func_bezier(ctrl)
uu = [0, 0.2, 0.5, 0.8, 1]
fu = [f(u) for u in uu]
iu = 1
while iu < len(fu):
test_u = 0.5 * (uu[iu - 1] + uu[iu])
test_pt = f(test_u)
test_err = 0.5 * (fu[iu - 1] + fu[iu]) - test_pt
if test_err[0] ** 2 + test_err[1] ** 2 > self.tol:
uu.insert(iu, test_u)
fu.insert(iu, test_pt)
else:
iu += 1
self.points.extend(xy for xy in fu[1:])
self.last_q = ctrl[1]
return self
def q(self, *xy):
"""
Add quadratic Bezier curves to the curve.
Parameters
----------
xy : numbers
Coordinate pairs. Each set of 2 pairs are interpreted as
the control point and the endpoint of the curve.
All coordinates are relative to the current end point.
Returns
-------
out : `Curve`
This curve.
"""
self.last_c = None
x0, y0 = self.points[-1]
i = 0
while i < len(xy):
ctrl = numpy.empty((3, 2))
ctrl[0, 0] = x0
ctrl[0, 1] = y0
for j in range(1, 3):
if isinstance(xy[i], complex):
ctrl[j, 0] = x0 + xy[i].real
ctrl[j, 1] = y0 + xy[i].imag
i += 1
else:
ctrl[j, 0] = x0 + xy[i]
ctrl[j, 1] = y0 + xy[i + 1]
i += 2
f = _func_bezier(ctrl)
uu = [0, 0.2, 0.5, 0.8, 1]
fu = [f(u) for u in uu]
iu = 1
while iu < len(fu):
test_u = 0.5 * (uu[iu - 1] + uu[iu])
test_pt = f(test_u)
test_err = 0.5 * (fu[iu - 1] + fu[iu]) - test_pt
if test_err[0] ** 2 + test_err[1] ** 2 > self.tol:
uu.insert(iu, test_u)
fu.insert(iu, test_pt)
else:
iu += 1
self.points.extend(xy for xy in fu[1:])
self.last_q = ctrl[1]
return self
def T(self, *xy):
"""
Add smooth quadratic Bezier curves to the curve.
Parameters
----------
xy : numbers
Coordinates of the endpoints of the curves. The control
point is assumed to be the reflection of the control point
of the last curve relative to the starting point of the
curve. If the previous curve is not a quadratic Bezier,
the control point is coincident with the starting point.
Returns
-------
out : `Curve`
This curve.
"""
self.last_c = None
if self.last_q is None:
self.last_q = self.points[-1]
i = 0
while i < len(xy):
ctrl = numpy.empty((3, 2))
ctrl[0] = self.points[-1]
ctrl[1] = 2 * ctrl[0] - self.last_q
if isinstance(xy[i], complex):
ctrl[2, 0] = xy[i].real
ctrl[2, 1] = xy[i].imag
i += 1
else:
ctrl[2, 0] = xy[i]
ctrl[2, 1] = xy[i + 1]
i += 2
f = _func_bezier(ctrl)
uu = [0, 0.2, 0.5, 0.8, 1]
fu = [f(u) for u in uu]
iu = 1
while iu < len(fu):
test_u = 0.5 * (uu[iu - 1] + uu[iu])
test_pt = f(test_u)
test_err = 0.5 * (fu[iu - 1] + fu[iu]) - test_pt
if test_err[0] ** 2 + test_err[1] ** 2 > self.tol:
uu.insert(iu, test_u)
fu.insert(iu, test_pt)
else:
iu += 1
self.points.extend(xy for xy in fu[1:])
self.last_q = ctrl[1]
return self
def t(self, *xy):
"""
Add smooth quadratic Bezier curves to the curve.
Parameters
----------
xy : numbers
Coordinates of the endpoints of the curves. The control
point is assumed to be the reflection of the control point
of the last curve relative to the starting point of the
curve. If the previous curve is not a quadratic Bezier,
the control point is coincident with the starting point.
All coordinates are relative to the current end point.
Returns
-------
out : `Curve`
This curve.
"""
self.last_c = None
if self.last_q is None:
self.last_q = self.points[-1]
x0, y0 = self.points[-1]
i = 0
while i < len(xy):
ctrl = numpy.empty((3, 2))
ctrl[0, 0] = x0
ctrl[0, 1] = y0
ctrl[1] = 2 * ctrl[0] - self.last_q
if isinstance(xy[i], complex):
ctrl[2, 0] = x0 + xy[i].real
ctrl[2, 1] = y0 + xy[i].imag
i += 1
else:
ctrl[2, 0] = x0 + xy[i]
ctrl[2, 1] = y0 + xy[i + 1]
i += 2
f = _func_bezier(ctrl)
uu = [0, 0.2, 0.5, 0.8, 1]
fu = [f(u) for u in uu]
iu = 1
while iu < len(fu):
test_u = 0.5 * (uu[iu - 1] + uu[iu])
test_pt = f(test_u)
test_err = 0.5 * (fu[iu - 1] + fu[iu]) - test_pt
if test_err[0] ** 2 + test_err[1] ** 2 > self.tol:
uu.insert(iu, test_u)
| |
# =========================================================================== #
# ____________________ |Importation des lib/packages| ____________________ #
# =========================================================================== #
from tokens import Token
# =========================================================================== #
# _____________________ |Definition des constantes| _____________________ #
# =========================================================================== #
from constants import YELLOW, END
prec = 1e-7
# =========================================================================== #
# ______________________ |Definition des classes | ______________________ #
# =========================================================================== #
class MyMonomial(Token):
def __init__(self, coefficient, exponent):
value = str(coefficient) + 'X^' + str(exponent)
Token.__init__(self, value)
self.coefficient = coefficient
self.exponent = exponent
def __repr__(self):
s_coef, s_var, s_exp = '', '', ''
if abs(self.exponent) >= 1:
s_var = 'X'
if self.exponent > 1 or self.exponent <= -1:
s_exp = '^' + str(self.exponent)
if abs(self.coefficient) != 1 or self.exponent == 0 :
s_coef = str(self.coefficient)
elif self.coefficient == -1:
s_coef = '-'
if self.coefficient == 0:
return '0'
return '' + s_coef + s_var + s_exp
def __add__(self, other):
if isinstance(other, MyMonomial):
if self.exponent == other.exponent:
res = MyMonomial(self.coefficient + other.coefficient, self.exponent)
return (res)
else:
res = Polynomial(_conv_tokens_2_lst_(self, other))
return res
elif isinstance(other, Polynomial):
return other + self
else:
raise TypeError()
def __radd__(self, other):
if isinstance(other, MyMonomial):
if self.exponent == other.exponent:
res = MyMonomial(other.coefficient + self.coefficient, self.exponent)
else:
res = Polynomial(_conv_tokens_2_lst_(other, self))
return res
else:
raise TypeError()
def __sub__(self, other):
if isinstance(other, MyMonomial):
if self.exponent == other.exponent:
res = MyMonomial(self.coefficient - other.coefficient, self.exponent)
else:
other.coefficient = - other.coefficient
res = Polynomial(_conv_tokens_2_lst_(self, other))
return res
elif isinstance(other, Polynomial):
other.coefs = [-c for c in other.coefs]
return other + self
else:
raise TypeError()
def __rsub__(self, other):
if isinstance(other, MyMonomial):
if self.exponent == other.exponent:
res = MyMonomial(-other.coefficient + self.coefficient, self.exponent)
else:
self.coefficient = - self.coefficient
res = Polynomial(_conv_tokens_2_lst_(other, self))
return res
else:
raise TypeError()
def __mul__(self, other):
if isinstance(other, MyMonomial):
res = MyMonomial(other.coefficient * self.coefficient, self.exponent + other.exponent)
return res
elif isinstance(other, Polynomial):
return other * self
else:
raise TypeError()
def __rmul__(self, other):
if isinstance(other, MyMonomial):
res = MyMonomial(other.coefficient * self.coefficient, other.exponent + self.exponent)
return res
def __truediv__(self, other):
if isinstance(other, MyMonomial):
if other.coefficient != 0:
res = MyMonomial(self.coefficient / other.coefficient, self.exponent - other.exponent)
return res
else:
raise ZeroDivisionError()
else:
raise TypeError()
def __rtruediv__(self, other):
if isinstance(other, MyMonomial):
if self.coefficient != 0:
res = MyMonomial(other.coefficient / self.coefficient, other.exponent - self.exponent)
return res
else:
raise ZeroDivisionError()
else:
raise TypeError()
def __pow__(self, other):
if isinstance(other, MyMonomial):
if other.exponent == 0 and isinstance(other.coefficient, int):
res = MyMonomial(power(self.coefficient,other.coefficient), self.exponent * other.coefficient)
return res
else:
raise ValueError()
else:
raise TypeError()
#def __rpow__(self, other):
# pass
class Polynomial():
def __init__(self, lst_coefs):
"""Creates the polynomial instance based on the list of coefficients.
Remark:
-------
Coefficients in the list are ordered from the coef of the highest
MyMonomial degree to the MyMonomial of degree 0 (i.e. scalar constant)
Example:
--------
polynomial expr: a.Xˆ3 + b.X ˆ2 + c.X + d => [a, b, c, d]
Note:
-----
There is no gap in coefficient, meaning that a polynomial expr
without a specific MyMonomial of degree i has still its coefficient
in list_coeff but it is set to zero.
a.X^3 + b => [b, 0, 0, a]
"""
self.coefs = lst_coefs
while len(self.coefs) > 1 and self.coefs[-1] == 0:
self.coefs = self.coefs[:-1]
self.degree = len(self.coefs) - 1
def _zero_degree_resolution_(self):
"""Calculates the roots of polynomial of degree 0.
Return:
-------
* r [floats]: roots of the polynomial.
"""
if self.degree == 0:
a = self.coefs[0]
if a == 0:
r = 0
if a != 0:
r = None
return [r]
else:
s_raise = "Polynomial is not of 0th degree."
def _first_degree_resolution_(self):
"""Calculates the roots of polynomial of degree 1.
Return:
-------
* r [floats]: roots of the polynomial.
"""
if self.degree == 1:
a = self.coefs[1]
b = self.coefs[0]
r = -b / a
return [r]
else:
s_raise = "Polynomial is not of 1st degree."
raise Exception(s_raise)
def discriminant(self) -> float:
""" Calculates the discriminant of the polynomial.
Parameters:
-----------
* self [SecondOrderPolynomial class object]: ...
Return:
-------
delta [float]: value of the discriminant constituted of tkn_m1/2/3.
"""
if self.degree == 2:
a = self.coefs[2]
b = self.coefs[1]
c = self.coefs[0]
delta = b * b - 4 * a * c
return delta
elif self.degree == 3:
a = self.coefs[3]
b = self.coefs[2]
c = self.coefs[1]
d = self.coefs[0]
delta = 18 * a * b * c * d - 4 * power(b, 3) * d + power(b * c, 2) \
- 4 * a * power(c, 3) - 27 * power(a * d, 2)
return delta
else:
s_raise = "discriminant implementation for 2nd degree polynomial."
raise Exception(s_raise)
def _second_degree_resolution_(self):
"""Calculates the roots of polynomial of degree 2.
Return:
-------
* r1, r2 / r [floats/complexes]: roots of the polynomial.
"""
if self.degree == 2:
delta = self.discriminant()
a = self.coefs[2]
b = self.coefs[1]
if delta > 0:
r1 = 0.5 * (-b - babylonian_sqrt(delta)) / a
r2 = 0.5 * (-b + babylonian_sqrt(delta)) / a
return [r1, r2]
elif delta == 0:
return [(- 0.5 * b / a)]
else:
real = - 0.5 * b / a
imaginary = 0.5 * babylonian_sqrt(-delta) / a
r1 = complex(real, -imaginary)
r2 = complex(real, imaginary)
return [r1, r2]
else:
s_raise = "Polynomial is not of 2nd degree."
raise Exception(s_raise)
def _third_degree_resolution_(self):
""" Calculates the roots of polynomial of degree 3.
The function is here for the completeness of the class and avoid
the redefinition of the some methods in the class PolynomialBonus.
"""
# reference: https://fr.wikiversity.org/wiki/Équation_du_troisième_degré/Méthode_de_Cardan
if self.degree == 3:
msg = "3rd degree resolution is a project's bonus. " \
+ "Use instance of class PolynomialBonus to have access to " \
+ "to the resolution of 3rd degree polynomial."
print(YELLOW + msg + END)
return []
else:
print("Polynomial is not of 3rd degree.")
def polynomial_roots(self):
"""Calculates the roots of the polynomial.
Return:
-------
delta [float]: value of the discriminant constituted of tkn_m1/2/3.
"""
roots = None
if self.degree == 0:
roots = self._zero_degree_resolution_()
if self.degree == 1:
roots = self._first_degree_resolution_()
if self.degree == 2:
roots = self._second_degree_resolution_()
if self.degree == 3:
roots = self._third_degree_resolution_()
self.roots = roots
return roots
def lambda_polynom(self):
""" Constructs the lambda function f corresponding to the polynomial.
Return:
-------
f [lambda function]: polynomial function.
"""
lst_p = list(range(len(self.coefs)))
f = lambda x: sum([a * power(x, p) for a, p in zip(self.coefs, lst_p)])
self.lambda_p = f
return f
def lambda_first_derivative(self):
""" Constructs the lambda function df corresponding to the polynomial
first derivative.
Return:
-------
df [lambda function]: polynomial derivative function.
"""
lst_p = list(range(len(self.coefs)))
lst_p = lst_p[1:]
d_coefs = self.coefs[1:]
print(f"valeurs de coeffs: {self.coeffs} --- valeurs de d_coeffs = {d_coefs}")
print(f"valeurs de lst_p = {lst_p}")
df = lambda x: sum([a * p * power(x, p - 1) for a, p in zip(d_coefs, lst_p)])
self.lambda_dp = df
return df
def lambda_second_derivative(self):
""" Constructs the lambda function d2f corresponding to the polynomial
second derivative.
Return:
-------
d2f [lambda function : polynomial second derivative function.
"""
lst_p = list(range(len(self.coefs)))
lst_p = lst_p[2:]
d2_coefs = self.coefs[2:]
d2f = lambda x: sum([a * p * (p - 1) * power(x, p - 2) for a, p in zip(d2_coefs, lst_p)])
self.lambda_d2p = d2f
return d2f
@staticmethod
def _print_roots(roots):
""" Displays the number in the parameter roots as string. roots
parameter is expected to be the list of the root of a Polynomial object.
Parameters:
-----------
* roots [list(float/complex)]: list of all the roots of a polynomial
expression.
"""
for r in roots:
if isinstance(r, (int, float)):
print(r)
if isinstance(r, complex):
if r.imag > 0:
print(r.real, '+', f"{r.imag}.i")
else:
print(r.real, '-', f"{-r.imag}.i")
def _summarize_degree_other(self):
""" Displays type function.
Function prints:
* reduced form of the Polynomial instance.
* natural form of the Polynomial instance.
* degree of the Polynomial instance.
* message that computor does not manage the roots seach for
polynomial degree greater than 3.
"""
print("Reduced form:".ljust(20), self.__repr__() + "= 0")
print("Natural form:".ljust(20), self.natural_form() + "= 0")
print("Polynomial degree:".ljust(20), self.degree)
msg = "Resolution | |
<reponame>whywhs/Detection_and_Recognition_in_Remote_Sensing_Image
# --------------------------------------------------------
# Deformable Convolutional Networks
# Copyright (c) 2017 Microsoft
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Written by <NAME>
# Modified by <NAME>
# --------------------------------------------------------
import cPickle
import mxnet as mx
from utils.symbol import Symbol
from operator_py.proposal import *
from operator_py.proposal_quadrangle import *
from operator_py.proposal_target import *
from operator_py.proposal_target_quadrangle import *
from operator_py.box_annotator_ohem import *
from operator_py.focal_loss import *
from symbol_dpn import *
class resnet_v1_101_rcnn_quadrangle(Symbol):
def __init__(self):
"""
Use __init__ to define parameter network needs
"""
self.eps = 1e-5
self.use_global_stats = True
self.workspace = 512
self.units = (3, 4, 23, 3) # use for 101
self.filter_list = [256, 512, 1024, 2048]
self.k_R = 200
self.G = 50
self.k_sec = { 2: 4, \
3: 8, \
4: 20, \
5: 3 }
self.inc_sec= { 2: 20, \
3: 64, \
4: 64, \
5: 128 }
def get_resnet_v1_conv4(self, data):
# conv1 = (data+1)/2 stride2
conv1 = mx.symbol.Convolution(name='conv1', data=data, num_filter=64, pad=(3, 3), kernel=(7, 7), stride=(2, 2),
no_bias=True)
bn_conv1 = mx.symbol.BatchNorm(name='bn_conv1', data=conv1, use_global_stats=True, fix_gamma=False,
eps=self.eps)
scale_conv1 = bn_conv1
conv1_relu = mx.symbol.Activation(name='conv1_relu', data=scale_conv1, act_type='relu')
# pool1 = (conv1+1)/2 stride4
pool1 = mx.symbol.Pooling(name='pool1', data=conv1_relu, pooling_convention='full', pad=(0, 0), kernel=(3, 3),
stride=(2, 2), pool_type='max')
res2a_branch1 = mx.symbol.Convolution(name='res2a_branch1', data=pool1, num_filter=256, pad=(0, 0),
kernel=(1, 1),
stride=(1, 1), no_bias=True)
bn2a_branch1 = mx.symbol.BatchNorm(name='bn2a_branch1', data=res2a_branch1, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale2a_branch1 = bn2a_branch1
res2a_branch2a = mx.symbol.Convolution(name='res2a_branch2a', data=pool1, num_filter=64, pad=(0, 0),
kernel=(1, 1),
stride=(1, 1), no_bias=True)
bn2a_branch2a = mx.symbol.BatchNorm(name='bn2a_branch2a', data=res2a_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale2a_branch2a = bn2a_branch2a
res2a_branch2a_relu = mx.symbol.Activation(name='res2a_branch2a_relu', data=scale2a_branch2a, act_type='relu')
res2a_branch2b = mx.symbol.Convolution(name='res2a_branch2b', data=res2a_branch2a_relu, num_filter=64,
pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2a_branch2b = mx.symbol.BatchNorm(name='bn2a_branch2b', data=res2a_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale2a_branch2b = bn2a_branch2b
res2a_branch2b_relu = mx.symbol.Activation(name='res2a_branch2b_relu', data=scale2a_branch2b, act_type='relu')
res2a_branch2c = mx.symbol.Convolution(name='res2a_branch2c', data=res2a_branch2b_relu, num_filter=256,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch2c = mx.symbol.BatchNorm(name='bn2a_branch2c', data=res2a_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale2a_branch2c = bn2a_branch2c
res2a = mx.symbol.broadcast_add(name='res2a', *[scale2a_branch1, scale2a_branch2c])
res2a_relu = mx.symbol.Activation(name='res2a_relu', data=res2a, act_type='relu')
res2b_branch2a = mx.symbol.Convolution(name='res2b_branch2a', data=res2a_relu, num_filter=64, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2b_branch2a = mx.symbol.BatchNorm(name='bn2b_branch2a', data=res2b_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale2b_branch2a = bn2b_branch2a
res2b_branch2a_relu = mx.symbol.Activation(name='res2b_branch2a_relu', data=scale2b_branch2a, act_type='relu')
res2b_branch2b = mx.symbol.Convolution(name='res2b_branch2b', data=res2b_branch2a_relu, num_filter=64,
pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2b_branch2b = mx.symbol.BatchNorm(name='bn2b_branch2b', data=res2b_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale2b_branch2b = bn2b_branch2b
res2b_branch2b_relu = mx.symbol.Activation(name='res2b_branch2b_relu', data=scale2b_branch2b, act_type='relu')
res2b_branch2c = mx.symbol.Convolution(name='res2b_branch2c', data=res2b_branch2b_relu, num_filter=256,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2b_branch2c = mx.symbol.BatchNorm(name='bn2b_branch2c', data=res2b_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale2b_branch2c = bn2b_branch2c
res2b = mx.symbol.broadcast_add(name='res2b', *[res2a_relu, scale2b_branch2c])
res2b_relu = mx.symbol.Activation(name='res2b_relu', data=res2b, act_type='relu')
res2c_branch2a = mx.symbol.Convolution(name='res2c_branch2a', data=res2b_relu, num_filter=64, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2c_branch2a = mx.symbol.BatchNorm(name='bn2c_branch2a', data=res2c_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale2c_branch2a = bn2c_branch2a
res2c_branch2a_relu = mx.symbol.Activation(name='res2c_branch2a_relu', data=scale2c_branch2a, act_type='relu')
res2c_branch2b = mx.symbol.Convolution(name='res2c_branch2b', data=res2c_branch2a_relu, num_filter=64,
pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2c_branch2b = mx.symbol.BatchNorm(name='bn2c_branch2b', data=res2c_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale2c_branch2b = bn2c_branch2b
res2c_branch2b_relu = mx.symbol.Activation(name='res2c_branch2b_relu', data=scale2c_branch2b, act_type='relu')
res2c_branch2c = mx.symbol.Convolution(name='res2c_branch2c', data=res2c_branch2b_relu, num_filter=256,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2c_branch2c = mx.symbol.BatchNorm(name='bn2c_branch2c', data=res2c_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale2c_branch2c = bn2c_branch2c
res2c = mx.symbol.broadcast_add(name='res2c', *[res2b_relu, scale2c_branch2c])
res2c_relu = mx.symbol.Activation(name='res2c_relu', data=res2c, act_type='relu')
# res3a_branch1 = (res2c_reslu+1)/2 stride8
res3a_branch1 = mx.symbol.Convolution(name='res3a_branch1', data=res2c_relu, num_filter=512, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn3a_branch1 = mx.symbol.BatchNorm(name='bn3a_branch1', data=res3a_branch1, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale3a_branch1 = bn3a_branch1
res3a_branch2a = mx.symbol.Convolution(name='res3a_branch2a', data=res2c_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn3a_branch2a = mx.symbol.BatchNorm(name='bn3a_branch2a', data=res3a_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale3a_branch2a = bn3a_branch2a
res3a_branch2a_relu = mx.symbol.Activation(name='res3a_branch2a_relu', data=scale3a_branch2a, act_type='relu')
res3a_branch2b = mx.symbol.Convolution(name='res3a_branch2b', data=res3a_branch2a_relu, num_filter=128,
pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3a_branch2b = mx.symbol.BatchNorm(name='bn3a_branch2b', data=res3a_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale3a_branch2b = bn3a_branch2b
res3a_branch2b_relu = mx.symbol.Activation(name='res3a_branch2b_relu', data=scale3a_branch2b, act_type='relu')
res3a_branch2c = mx.symbol.Convolution(name='res3a_branch2c', data=res3a_branch2b_relu, num_filter=512,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3a_branch2c = mx.symbol.BatchNorm(name='bn3a_branch2c', data=res3a_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale3a_branch2c = bn3a_branch2c
res3a = mx.symbol.broadcast_add(name='res3a', *[scale3a_branch1, scale3a_branch2c])
res3a_relu = mx.symbol.Activation(name='res3a_relu', data=res3a, act_type='relu')
res3b1_branch2a = mx.symbol.Convolution(name='res3b1_branch2a', data=res3a_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b1_branch2a = mx.symbol.BatchNorm(name='bn3b1_branch2a', data=res3b1_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale3b1_branch2a = bn3b1_branch2a
res3b1_branch2a_relu = mx.symbol.Activation(name='res3b1_branch2a_relu', data=scale3b1_branch2a,
act_type='relu')
res3b1_branch2b = mx.symbol.Convolution(name='res3b1_branch2b', data=res3b1_branch2a_relu, num_filter=128,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b1_branch2b = mx.symbol.BatchNorm(name='bn3b1_branch2b', data=res3b1_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale3b1_branch2b = bn3b1_branch2b
res3b1_branch2b_relu = mx.symbol.Activation(name='res3b1_branch2b_relu', data=scale3b1_branch2b,
act_type='relu')
res3b1_branch2c = mx.symbol.Convolution(name='res3b1_branch2c', data=res3b1_branch2b_relu, num_filter=512,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b1_branch2c = mx.symbol.BatchNorm(name='bn3b1_branch2c', data=res3b1_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale3b1_branch2c = bn3b1_branch2c
res3b1 = mx.symbol.broadcast_add(name='res3b1', *[res3a_relu, scale3b1_branch2c])
res3b1_relu = mx.symbol.Activation(name='res3b1_relu', data=res3b1, act_type='relu')
res3b2_branch2a = mx.symbol.Convolution(name='res3b2_branch2a', data=res3b1_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b2_branch2a = mx.symbol.BatchNorm(name='bn3b2_branch2a', data=res3b2_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale3b2_branch2a = bn3b2_branch2a
res3b2_branch2a_relu = mx.symbol.Activation(name='res3b2_branch2a_relu', data=scale3b2_branch2a,
act_type='relu')
res3b2_branch2b = mx.symbol.Convolution(name='res3b2_branch2b', data=res3b2_branch2a_relu, num_filter=128,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b2_branch2b = mx.symbol.BatchNorm(name='bn3b2_branch2b', data=res3b2_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale3b2_branch2b = bn3b2_branch2b
res3b2_branch2b_relu = mx.symbol.Activation(name='res3b2_branch2b_relu', data=scale3b2_branch2b,
act_type='relu')
res3b2_branch2c = mx.symbol.Convolution(name='res3b2_branch2c', data=res3b2_branch2b_relu, num_filter=512,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b2_branch2c = mx.symbol.BatchNorm(name='bn3b2_branch2c', data=res3b2_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale3b2_branch2c = bn3b2_branch2c
res3b2 = mx.symbol.broadcast_add(name='res3b2', *[res3b1_relu, scale3b2_branch2c])
res3b2_relu = mx.symbol.Activation(name='res3b2_relu', data=res3b2, act_type='relu')
res3b3_branch2a = mx.symbol.Convolution(name='res3b3_branch2a', data=res3b2_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b3_branch2a = mx.symbol.BatchNorm(name='bn3b3_branch2a', data=res3b3_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale3b3_branch2a = bn3b3_branch2a
res3b3_branch2a_relu = mx.symbol.Activation(name='res3b3_branch2a_relu', data=scale3b3_branch2a,
act_type='relu')
res3b3_branch2b = mx.symbol.Convolution(name='res3b3_branch2b', data=res3b3_branch2a_relu, num_filter=128,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b3_branch2b = mx.symbol.BatchNorm(name='bn3b3_branch2b', data=res3b3_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale3b3_branch2b = bn3b3_branch2b
res3b3_branch2b_relu = mx.symbol.Activation(name='res3b3_branch2b_relu', data=scale3b3_branch2b,
act_type='relu')
res3b3_branch2c = mx.symbol.Convolution(name='res3b3_branch2c', data=res3b3_branch2b_relu, num_filter=512,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b3_branch2c = mx.symbol.BatchNorm(name='bn3b3_branch2c', data=res3b3_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale3b3_branch2c = bn3b3_branch2c
res3b3 = mx.symbol.broadcast_add(name='res3b3', *[res3b2_relu, scale3b3_branch2c])
res3b3_relu = mx.symbol.Activation(name='res3b3_relu', data=res3b3, act_type='relu')
# res4a_branch1 : stride16
res4a_branch1 = mx.symbol.Convolution(name='res4a_branch1', data=res3b3_relu, num_filter=1024, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn4a_branch1 = mx.symbol.BatchNorm(name='bn4a_branch1', data=res4a_branch1, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4a_branch1 = bn4a_branch1
res4a_branch2a = mx.symbol.Convolution(name='res4a_branch2a', data=res3b3_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn4a_branch2a = mx.symbol.BatchNorm(name='bn4a_branch2a', data=res4a_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4a_branch2a = bn4a_branch2a
res4a_branch2a_relu = mx.symbol.Activation(name='res4a_branch2a_relu', data=scale4a_branch2a, act_type='relu')
res4a_branch2b = mx.symbol.Convolution(name='res4a_branch2b', data=res4a_branch2a_relu, num_filter=256,
pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4a_branch2b = mx.symbol.BatchNorm(name='bn4a_branch2b', data=res4a_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4a_branch2b = bn4a_branch2b
res4a_branch2b_relu = mx.symbol.Activation(name='res4a_branch2b_relu', data=scale4a_branch2b, act_type='relu')
res4a_branch2c = mx.symbol.Convolution(name='res4a_branch2c', data=res4a_branch2b_relu, num_filter=1024,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4a_branch2c = mx.symbol.BatchNorm(name='bn4a_branch2c', data=res4a_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4a_branch2c = bn4a_branch2c
res4a = mx.symbol.broadcast_add(name='res4a', *[scale4a_branch1, scale4a_branch2c])
res4a_relu = mx.symbol.Activation(name='res4a_relu', data=res4a, act_type='relu')
res4b1_branch2a = mx.symbol.Convolution(name='res4b1_branch2a', data=res4a_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b1_branch2a = mx.symbol.BatchNorm(name='bn4b1_branch2a', data=res4b1_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b1_branch2a = bn4b1_branch2a
res4b1_branch2a_relu = mx.symbol.Activation(name='res4b1_branch2a_relu', data=scale4b1_branch2a,
act_type='relu')
res4b1_branch2b = mx.symbol.Convolution(name='res4b1_branch2b', data=res4b1_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b1_branch2b = mx.symbol.BatchNorm(name='bn4b1_branch2b', data=res4b1_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b1_branch2b = bn4b1_branch2b
res4b1_branch2b_relu = mx.symbol.Activation(name='res4b1_branch2b_relu', data=scale4b1_branch2b,
act_type='relu')
res4b1_branch2c = mx.symbol.Convolution(name='res4b1_branch2c', data=res4b1_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b1_branch2c = mx.symbol.BatchNorm(name='bn4b1_branch2c', data=res4b1_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b1_branch2c = bn4b1_branch2c
res4b1 = mx.symbol.broadcast_add(name='res4b1', *[res4a_relu, scale4b1_branch2c])
res4b1_relu = mx.symbol.Activation(name='res4b1_relu', data=res4b1, act_type='relu')
res4b2_branch2a = mx.symbol.Convolution(name='res4b2_branch2a', data=res4b1_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b2_branch2a = mx.symbol.BatchNorm(name='bn4b2_branch2a', data=res4b2_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b2_branch2a = bn4b2_branch2a
res4b2_branch2a_relu = mx.symbol.Activation(name='res4b2_branch2a_relu', data=scale4b2_branch2a,
act_type='relu')
res4b2_branch2b = mx.symbol.Convolution(name='res4b2_branch2b', data=res4b2_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b2_branch2b = mx.symbol.BatchNorm(name='bn4b2_branch2b', data=res4b2_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b2_branch2b = bn4b2_branch2b
res4b2_branch2b_relu = mx.symbol.Activation(name='res4b2_branch2b_relu', data=scale4b2_branch2b,
act_type='relu')
res4b2_branch2c = mx.symbol.Convolution(name='res4b2_branch2c', data=res4b2_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b2_branch2c = mx.symbol.BatchNorm(name='bn4b2_branch2c', data=res4b2_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b2_branch2c = bn4b2_branch2c
res4b2 = mx.symbol.broadcast_add(name='res4b2', *[res4b1_relu, scale4b2_branch2c])
res4b2_relu = mx.symbol.Activation(name='res4b2_relu', data=res4b2, act_type='relu')
res4b3_branch2a = mx.symbol.Convolution(name='res4b3_branch2a', data=res4b2_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b3_branch2a = mx.symbol.BatchNorm(name='bn4b3_branch2a', data=res4b3_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b3_branch2a = bn4b3_branch2a
res4b3_branch2a_relu = mx.symbol.Activation(name='res4b3_branch2a_relu', data=scale4b3_branch2a,
act_type='relu')
res4b3_branch2b = mx.symbol.Convolution(name='res4b3_branch2b', data=res4b3_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b3_branch2b = mx.symbol.BatchNorm(name='bn4b3_branch2b', data=res4b3_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b3_branch2b = bn4b3_branch2b
res4b3_branch2b_relu = mx.symbol.Activation(name='res4b3_branch2b_relu', data=scale4b3_branch2b,
act_type='relu')
res4b3_branch2c = mx.symbol.Convolution(name='res4b3_branch2c', data=res4b3_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b3_branch2c = mx.symbol.BatchNorm(name='bn4b3_branch2c', data=res4b3_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b3_branch2c = bn4b3_branch2c
res4b3 = mx.symbol.broadcast_add(name='res4b3', *[res4b2_relu, scale4b3_branch2c])
res4b3_relu = mx.symbol.Activation(name='res4b3_relu', data=res4b3, act_type='relu')
res4b4_branch2a = mx.symbol.Convolution(name='res4b4_branch2a', data=res4b3_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b4_branch2a = mx.symbol.BatchNorm(name='bn4b4_branch2a', data=res4b4_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b4_branch2a = bn4b4_branch2a
res4b4_branch2a_relu = mx.symbol.Activation(name='res4b4_branch2a_relu', data=scale4b4_branch2a,
act_type='relu')
res4b4_branch2b = mx.symbol.Convolution(name='res4b4_branch2b', data=res4b4_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b4_branch2b = mx.symbol.BatchNorm(name='bn4b4_branch2b', data=res4b4_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b4_branch2b = bn4b4_branch2b
res4b4_branch2b_relu = mx.symbol.Activation(name='res4b4_branch2b_relu', data=scale4b4_branch2b,
act_type='relu')
res4b4_branch2c = mx.symbol.Convolution(name='res4b4_branch2c', data=res4b4_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b4_branch2c = mx.symbol.BatchNorm(name='bn4b4_branch2c', data=res4b4_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b4_branch2c = bn4b4_branch2c
res4b4 = mx.symbol.broadcast_add(name='res4b4', *[res4b3_relu, scale4b4_branch2c])
res4b4_relu | |
the bit that puts in strings like py27np111 in the filename. It would be
# nice to get rid of this, since the hash supercedes that functionally, but not clear
# whether anyone's tools depend on this file naming right now.
for s, names, places in (('py', 'python', 2), ('np', 'numpy', 2), ('pl', 'perl', 2),
('lua', 'lua', 2), ('r', ('r', 'r-base'), 3)):
for ms in metadata.ms_depends('run'):
for name in ensure_list(names):
if ms.name == name and name in build_pkg_names:
# only append numpy when it is actually pinned
if name == 'numpy' and (not hasattr(ms, 'version') or not ms.version):
continue
log.warn("Deprecation notice: computing build string (like pyXY). This "
"functionality has been replaced with the hash (h????), which"
" can be readily inpsected with `conda inspect hash-inputs "
"<pkg-name>`. pyXY, npXYY and the like will go away in "
"conda-build 4.0. Please adapt any code that depends on filenames"
" with pyXY, npXYY, etc.")
if metadata.noarch == name or (metadata.get_value('build/noarch_python') and
name == 'python'):
res.append(s)
else:
variant_version = metadata.config.variant.get(name, "")
res.append(''.join([s] + variant_version.split('.')[:places]))
features = ensure_list(metadata.get_value('build/features', []))
if res:
res.append('_')
if features:
res.extend(('_'.join(features), '_'))
res.append('{0}'.format(metadata.build_number() if metadata.build_number() else 0))
build_str = "".join(res)
return build_str
# This really belongs in conda, and it is int conda.cli.common,
# but we don't presently have an API there.
def _get_env_path(env_name_or_path):
if not os.path.isdir(env_name_or_path):
for envs_dir in list(envs_dirs) + [os.getcwd()]:
path = os.path.join(envs_dir, env_name_or_path)
if os.path.isdir(path):
env_name_or_path = path
break
bootstrap_metadir = os.path.join(env_name_or_path, 'conda-meta')
if not os.path.isdir(bootstrap_metadir):
print("Bootstrap environment '%s' not found" % env_name_or_path)
sys.exit(1)
return env_name_or_path
def _get_dependencies_from_environment(env_name_or_path):
path = _get_env_path(env_name_or_path)
# construct build requirements that replicate the given bootstrap environment
# and concatenate them to the build requirements from the recipe
bootstrap_metadata = get_installed_packages(path)
bootstrap_requirements = []
for package, data in bootstrap_metadata.items():
bootstrap_requirements.append("%s %s %s" % (package, data['version'], data['build']))
return {'requirements': {'build': bootstrap_requirements}}
class MetaData(object):
def __init__(self, path, config=None, variant=None):
self.undefined_jinja_vars = []
# decouple this config from whatever was fed in. People must change config by
# accessing and changing this attribute.
self.config = copy.copy(get_or_merge_config(config, variant=variant))
if isfile(path):
self.meta_path = path
self.path = os.path.dirname(path)
else:
self.meta_path = find_recipe(path)
self.path = os.path.dirname(self.meta_path)
self.requirements_path = join(self.path, 'requirements.txt')
# Start with bare-minimum contents so we can call environ.get_dict() with impunity
# We'll immediately replace these contents in parse_again()
self.meta = dict()
# This is the 'first pass' parse of meta.yaml, so not all variables are defined yet
# (e.g. GIT_FULL_HASH, etc. are undefined)
# Therefore, undefined jinja variables are permitted here
# In the second pass, we'll be more strict. See build.build()
# Primarily for debugging. Ensure that metadata is not altered after "finalizing"
self.parse_again(permit_undefined_jinja=True)
if 'host' in self.get_section('requirements'):
self.config.has_separate_host_prefix = True
self.config.disable_pip = self.disable_pip
@property
def final(self):
return self.get_value('extra/final')
@final.setter
def final(self, boolean):
extra = self.meta.get('extra', {})
extra['final'] = boolean
self.meta['extra'] = extra
@property
def disable_pip(self):
return self.config.disable_pip or ('build' in self.meta and
'disable_pip' in self.meta['build'])
@disable_pip.setter
def disable_pip(self, value):
self.config.disable_pip = value
build = self.meta.get('build', {})
build['disable_pip'] = value
self.meta['build'] = build
def append_metadata_sections(self, sections_file_or_dict, merge, raise_on_clobber=False):
"""Append to or replace subsections to meta.yaml
This is used to alter input recipes, so that a given requirement or
setting is applied without manually altering the input recipe. It is
intended for vendors who want to extend existing recipes without
necessarily removing information. pass merge=False to replace sections.
"""
if hasattr(sections_file_or_dict, 'keys'):
build_config = sections_file_or_dict
else:
with open(sections_file_or_dict) as configfile:
build_config = parse(configfile.read(), config=self.config)
_merge_or_update_values(self.meta, build_config, self.path, merge=merge,
raise_on_clobber=raise_on_clobber)
def parse_again(self, permit_undefined_jinja=False):
"""Redo parsing for key-value pairs that are not initialized in the
first pass.
config: a conda-build Config object. If None, the config object passed at creation
time is used.
permit_undefined_jinja: If True, *any* use of undefined jinja variables will
evaluate to an emtpy string, without emitting an error.
"""
assert not self.final, "modifying metadata after finalization"
log = utils.get_logger(__name__)
log.addFilter(filt)
if isfile(self.requirements_path) and not self.get_value('requirements/run'):
self.meta.setdefault('requirements', {})
run_requirements = specs_from_url(self.requirements_path)
self.meta['requirements']['run'] = run_requirements
os.environ["CONDA_BUILD_STATE"] = "RENDER"
append_sections_file = None
clobber_sections_file = None
try:
# we sometimes create metadata from dictionaries, in which case we'll have no path
if self.meta_path:
self.meta = parse(self._get_contents(permit_undefined_jinja),
config=self.config,
path=self.meta_path)
if (isfile(self.requirements_path) and
not self.meta.get('requirements', {}).get('run', [])):
self.meta.setdefault('requirements', {})
run_requirements = specs_from_url(self.requirements_path)
self.meta['requirements']['run'] = run_requirements
append_sections_file = os.path.join(self.path, 'recipe_append.yaml')
clobber_sections_file = os.path.join(self.path, 'recipe_clobber.yaml')
append_sections_file = self.config.append_sections_file or append_sections_file
if append_sections_file and not os.path.isfile(append_sections_file):
log.debug('input append sections file did not exist: %s', append_sections_file)
append_sections_file = None
clobber_sections_file = self.config.clobber_sections_file or clobber_sections_file
if clobber_sections_file and not os.path.isfile(clobber_sections_file):
log.debug('input clobber sections file did not exist: %s', clobber_sections_file)
clobber_sections_file = None
if append_sections_file:
self.append_metadata_sections(append_sections_file, merge=True)
if clobber_sections_file:
self.append_metadata_sections(clobber_sections_file, merge=False)
if self.config.bootstrap:
dependencies = _get_dependencies_from_environment(self.config.bootstrap)
self.append_metadata_sections(dependencies, merge=True)
except:
raise
finally:
del os.environ["CONDA_BUILD_STATE"]
self.validate_features()
self.ensure_no_pip_requirements()
def ensure_no_pip_requirements(self):
keys = 'requirements/build', 'requirements/run', 'test/requires'
for key in keys:
if any(hasattr(item, 'keys') for item in self.get_value(key)):
raise ValueError("Dictionaries are not supported as values in requirements sections"
". Note that pip requirements as used in conda-env "
"environment.yml files are not supported by conda-build.")
self.append_requirements()
def append_requirements(self):
"""For dynamic determination of build or run reqs, based on configuration"""
reqs = self.meta.get('requirements', {})
run_reqs = reqs.get('run', [])
if bool(self.get_value('build/osx_is_app', False)) and self.config.platform == 'osx':
run_reqs.append('python.app')
self.meta['requirements'] = reqs
def parse_until_resolved(self):
"""variant contains key-value mapping for additional functions and values
for jinja2 variables"""
# undefined_jinja_vars is refreshed by self.parse again
undefined_jinja_vars = ()
# always parse again at least once.
self.parse_again(permit_undefined_jinja=True)
while set(undefined_jinja_vars) != set(self.undefined_jinja_vars):
undefined_jinja_vars = self.undefined_jinja_vars
self.parse_again(permit_undefined_jinja=True)
if undefined_jinja_vars:
sys.exit("Undefined Jinja2 variables remain ({}). Please enable "
"source downloading and try again.".format(self.undefined_jinja_vars))
# always parse again at the end, too.
self.parse_again(permit_undefined_jinja=False)
@classmethod
def fromstring(cls, metadata, config=None, variant=None):
m = super(MetaData, cls).__new__(cls)
if not config:
config = Config()
m.meta = parse(metadata, config=config, path='', variant=variant)
m.config = config
m.parse_again(permit_undefined_jinja=True)
return m
@classmethod
def fromdict(cls, metadata, config=None, variant=None):
"""
Create a MetaData object from metadata dict directly.
"""
m = super(MetaData, cls).__new__(cls)
m.path = ''
m.meta_path = ''
m.requirements_path = ''
m.meta = sanitize(metadata)
if not config:
config = Config(variant=variant)
m.config = config
m.undefined_jinja_vars = []
m.final = False
return m
def get_section(self, section):
return self.meta.get(section, {})
def get_value(self, field, default=None, autotype=True):
"""
Get a value from a meta.yaml.
:param field: Field to return
:param default: Default object to return if field doesn't exist
:param autotype: If True, return the default type of field if one exists.
False will return the default object.
:return:
"""
section, key = field.split('/')
# get correct default
if autotype and default is None and field in default_structs:
default = default_structs[field]()
value = self.get_section(section).get(key, default)
# handle yaml 1.1 boolean values
if isinstance(value, text_type):
if value.lower() in trues:
value = True
elif value.lower() in falses:
value = False
return value
def check_fields(self):
for section, submeta in iteritems(self.meta):
# anything goes in the extra section
if section == 'extra':
continue
if section not in FIELDS:
raise ValueError("unknown section: %s" % section)
for key in submeta:
if key not in FIELDS[section]:
raise ValueError("in section %r: unknown key %r" %
(section, key))
return True
def name(self):
res = self.get_value('package/name')
if not res:
sys.exit('Error: package/name missing in: %r' % self.meta_path)
res = text_type(res)
if res != res.lower():
sys.exit('Error: package/name must be lowercase, got: %r' % res)
check_bad_chrs(res, 'package/name')
return res
def version(self):
res = str(self.get_value('package/version'))
if res is None:
sys.exit("Error: package/version missing in: %r" % self.meta_path)
check_bad_chrs(res, 'package/version')
if self.final and res.startswith('.'):
raise ValueError("Fully-rendered version can't start with period - got %s", res)
return res
def build_number(self):
number = self.get_value('build/number')
# build number can come back as None if no setting (or jinja intermediate)
try:
build_int = int(number)
except (ValueError, TypeError):
build_int = ""
return build_int
def ms_depends(self, typ='run'):
res = []
names = ('python', 'numpy', 'perl', 'lua')
name_ver_list = [(name, self.config.variant[name])
for name in names
if self.config.variant.get(name)]
if self.config.variant.get('r_base'):
# r is kept for legacy installations, r-base deprecates it.
name_ver_list.extend([('r', self.config.variant['r_base']),
('r-base', self.config.variant['r_base']),
])
for spec in self.get_value('requirements/' + typ, []):
try:
ms = MatchSpec(spec)
except | |
#!/usr/bin/env python
""" Tests for the deploy module, which is used to configure and execute Overwatch scripts.
.. codeauthor:: <NAME> <<EMAIL>>, Yale University
"""
from future.utils import iteritems
import pytest
import copy
import os
try:
# For whatever reason, import StringIO from io doesn't behave nicely in python 2.
from StringIO import StringIO
except ImportError:
from io import StringIO
import signal
import stat
import inspect
import subprocess
import collections
import pkg_resources
import logging
logger = logging.getLogger(__name__)
import ruamel.yaml as yaml
from overwatch.base import deploy
def testExpandEnvironmentVars(loggingMixin):
""" Test the YAML constructor to expand environment vars. """
testYaml = """
normalVar: 3
normalWithDollarSign: "$ Hello World"
environmentVar: !expandVars $HOME
expandedWithoutVar: !expandVars "Hello world"
"""
# Setup the YAML to be read from a stream
s = StringIO()
s.write(testYaml)
s.seek(0)
config = deploy.configModule.yaml.load(s, Loader = yaml.SafeLoader)
assert config["normalVar"] == 3
# Should have no impact because it explicitly needs to be tagged (a `$` on it's own is not enough)
assert config["normalWithDollarSign"] == "$ Hello World"
assert config["environmentVar"] == os.environ["HOME"]
# Should have no impact because there are no environment ars
assert config["expandedWithoutVar"] == "Hello world"
def testRetrieveExecutable(loggingMixin):
""" Tests for retrieving executables. """
e = deploy.retrieveExecutable("zodb", config = {})
assert isinstance(e, deploy._available_executables["zodb"])
with pytest.raises(KeyError) as exceptionInfo:
e = deploy.retrieveExecutable("helloWorld", config = {})
assert exceptionInfo.value.args[0] == "Executable helloWorld is invalid."
#: Simple named tuple to contain the execution expectations.
executableExpected = collections.namedtuple("executableExpected", ["name", "description", "args", "config"])
@pytest.fixture
def setupBasicExecutable(loggingMixin, mocker):
""" Setup an executable object.
Returns:
tuple: (executable, expected) where executable is an executable object and expected are the expected
parameters.
"""
# Mock folder creation. We want to make it a noop so we don't make a bunch of random empty folders.
mMakedirs = mocker.MagicMock()
mocker.patch("overwatch.base.deploy.os.makedirs", mMakedirs)
expected = {
"name": "{label}Executable",
"description": "Basic executable for {label}ing",
"args": ["execTest", "arg1", "arg2", "test{hello}"],
"config": {"hello": "world", "label": "test"},
}
executable = deploy.executable(**expected)
for k in ["name", "description"]:
expected[k] = expected[k].format(**expected["config"])
expected["args"] = [arg.format(**expected["config"]) for arg in expected["args"]]
expected = executableExpected(**expected)
return executable, expected
@pytest.mark.parametrize("processIdentifier", [
"",
"unique process identifier",
], ids = ["Default process identifier", "Unique process identifier"])
def testSetupExecutable(setupBasicExecutable, processIdentifier):
""" Test setting up a basic executable. """
executable, expected = setupBasicExecutable
executable.processIdentifier = processIdentifier
executable.setup()
assert executable.name == expected.name
assert executable.description == expected.description
assert executable.args == expected.args
assert executable.config == expected.config
assert executable.processIdentifier == (processIdentifier if processIdentifier else " ".join(expected.args))
def testExecutableFromConfig(loggingMixin):
""" Test for configuring an executable via a config.
This duplicates some code from ``setupBasicExecutable``, but it's necessary because we need to create the
executable in the test function to properly test the initialization.
"""
expected = {
"name": "{label}Executable",
"description": "Basic executable for {label}ing",
"args": ["execTest", "arg1", "arg2", "test{hello}"],
"config": {"runInBackground": True, "enabled": True, "label": "test", "hello": "world"},
}
executable = deploy.executable(**expected)
# Run setup so names are properly formatted
executable.setup()
# Determine the expected values
for k in ["name", "description"]:
expected[k] = expected[k].format(**expected["config"])
expected["args"] = [arg.format(**expected["config"]) for arg in expected["args"]]
expected = executableExpected(**expected)
assert executable.runInBackground == expected.config["runInBackground"]
assert executable.executeTask == expected.config["enabled"]
assert executable.logFilename == os.path.join("exec", "logs", "{name}.log".format(name = expected.name))
@pytest.mark.parametrize("pid", [
[],
[1234],
], ids = ["No PIDs", "One PID"])
def testGetProcessPID(setupBasicExecutable, pid, mocker):
""" Test of getting the process PID identified by the executable properties. """
executable, expected = setupBasicExecutable
executable.setup()
# Pre-process the PID input. We don't do it above so it's easier to read here.
inputPID = "\n".join((str(p) for p in pid)) + "\n"
# Mock opening the process
m = mocker.MagicMock(return_value = inputPID)
mocker.patch("overwatch.base.deploy.subprocess.check_output", m)
outputPID = executable.getProcessPID()
assert outputPID == pid
@pytest.mark.parametrize("returnCode", [
1,
3,
], ids = ["No process found", "Unknown error"])
def testGetProcessPIDSubprocessFailure(setupBasicExecutable, mocker, returnCode):
""" Test for subprocess failure when getting the process PID. """
executable, expected = setupBasicExecutable
executable.setup()
# Test getting a process ID. We mock it up.
m = mocker.MagicMock()
m.side_effect = subprocess.CalledProcessError(returncode = returnCode, cmd = executable.args)
mocker.patch("overwatch.base.deploy.subprocess.check_output", m)
if returnCode == 1:
outputPID = executable.getProcessPID()
assert outputPID == []
else:
with pytest.raises(subprocess.CalledProcessError) as exceptionInfo:
outputPID = executable.getProcessPID()
assert exceptionInfo.value.returncode == returnCode
def testGetProcessPIDFailure(setupBasicExecutable, mocker):
""" Test failure modes of getting the process PID. """
pid = [1234, 5678]
executable, expected = setupBasicExecutable
executable.setup()
# Pre-process the PID input. We don't do it above so it's easier to read here.
inputPID = "\n".join((str(p) for p in pid)) + "\n"
# Test getting a process ID. We mock it up.
m = mocker.MagicMock(return_value = inputPID)
mocker.patch("overwatch.base.deploy.subprocess.check_output", m)
with pytest.raises(ValueError) as exceptionInfo:
executable.getProcessPID()
# We don't need to check the exact message.
assert "Multiple PIDs" in exceptionInfo.value.args[0]
@pytest.fixture
def setupKillProcess(setupBasicExecutable, mocker):
""" Setup for tests of killing a process.
Returns:
tuple: (executable, expected, mGetProcess, mKill) where executable is an executable object and expected are
the expected parameters, mGetProcess is the mock for ``executable.getProcessPID()``, and mKill is the mock
for ``executable.killExistingProcess()``.
"""
executable, expected = setupBasicExecutable
# First we return the PID to kill, then we return nothing (as if the kill worked)
mGetProcessPID = mocker.MagicMock()
mocker.patch("overwatch.base.deploy.executable.getProcessPID", mGetProcessPID)
# Also need to mock the kill command itself.
mKill = mocker.MagicMock()
mocker.patch("overwatch.base.deploy.os.kill", mKill)
# Setup
executable.setup()
return executable, expected, mGetProcessPID, mKill
# Intentionally select non-existent PID (above 65535) just in case the mocking doesn't work properly.
@pytest.mark.parametrize("pidsToKill", [
[],
[1234567],
[1234567, 1234568],
], ids = ["No PIDs", "One PID", "Multiple PIDs"])
def testKillingProcess(setupKillProcess, pidsToKill):
""" Test killing the process identified by the executable. """
executable, expected, mGetProcess, mKill = setupKillProcess
mGetProcess.side_effect = [pidsToKill, []]
# Perform the actual method that we want to test
nKilled = executable.killExistingProcess()
# Check the calls
if len(pidsToKill) == 0:
mKill.assert_not_called()
else:
for pid in pidsToKill:
if len(pidsToKill) == 1:
mKill.assert_called_once_with(pid, signal.SIGINT)
else:
mKill.assert_any_call(pid, signal.SIGINT)
# Check that the number of processes
assert nKilled == len(pidsToKill)
def testFailedKillingProces(setupKillProcess):
""" Test for the various error modes when killing a process. """
executable, expected, mGetProcess, mKill = setupKillProcess
# Setup the PIDs to always return, such that it appears as if the kill didn't work.
pidsToKill = [1234567]
mGetProcess.side_effect = [pidsToKill, pidsToKill]
with pytest.raises(RuntimeError) as exceptionInfo:
# Call the actual method that we want to test
executable.killExistingProcess()
# We don't need to check the exact message.
assert "found PIDs {PIDs} after killing the processes.".format(PIDs = pidsToKill) in exceptionInfo.value.args[0]
@pytest.fixture
def setupStartProcessWithLog(setupBasicExecutable, mocker):
""" Setup required for testing startProcessWithLog.
It mocks:
- Writing a ConfigParser configuration
- ``subprocess.Popen``
- Opening files
Returns:
tuple: (mFile, mPopen, mConfigParserWrite) where ``mFile`` is the mock for opening a file, ``mPopen`` is the mock
for ``subprocess.Popen(...)``, and ``mConfigParserWrite`` is the mock for writing a ``configparser`` config.
"""
# For standard processes
# Mock the subprocess command
mPopen = mocker.MagicMock(return_value = "Fake value")
mocker.patch("overwatch.base.deploy.subprocess.Popen", mPopen)
# For supervisor processes
# Mock write with the config parser
mConfigParserWrite = mocker.MagicMock()
mocker.patch("overwatch.base.deploy.ConfigParser.write", mConfigParserWrite)
# Shared by both
# Mock opening the log or config file
mFile = mocker.mock_open()
mocker.patch("overwatch.base.deploy.open", mFile)
return mFile, mPopen, mConfigParserWrite
def testStandardStartProcessWithLogs(setupStartProcessWithLog, setupBasicExecutable):
""" Tests for starting a process with logs in the standard manner ("Popen"). """
# Setup mocks
mFile, mPopen, mConfigParserWrite = setupStartProcessWithLog
# Setup executable
executable, expected = setupBasicExecutable
executable.setup()
# Execute
process = executable.startProcessWithLog()
# Check that it was called successfully
mFile.assert_called_once_with(os.path.join("exec", "logs", "{}.log".format(expected.name)), "a")
mPopen.assert_called_once_with(expected.args, stderr = subprocess.STDOUT, stdout = mFile())
# No need to actually mock up a subprocess.Popen class object.
assert process == "Fake value"
def testSupervisorStartProcessWithLogs(setupStartProcessWithLog, setupBasicExecutable):
""" Tests for starting a process with logs in supervisor. """
# Setup mocks
mFile, mPopen, mConfigParserWrite = setupStartProcessWithLog
# Setup executable
executable, expected = setupBasicExecutable
executable.supervisor = True
executable.setup()
# Execute
process = executable.startProcessWithLog()
mFile.assert_called_once_with("supervisord.conf", "a")
# We don't check the output itself because that would basically be testing ConfigParser, which isn't our goal.
mConfigParserWrite.assert_called_once_with(mFile())
assert process is None
@pytest.mark.parametrize("supervisor, runInBackground", [
(False, False),
(False, True),
(True, False),
], ids = ["Standard process", "Standard process run in background", "Supervisor"])
@pytest.mark.parametrize("executeTask, shortExecutionTime", [
(False, False),
(True, False),
(True, True)
], ids = ["No execute task", "Execute task", "Execute with short executable time"])
@pytest.mark.parametrize("forceRestart", [
False,
True,
], ids = ["No force restart", "Force restart"])
@pytest.mark.parametrize("returnProcessPID", [
False,
True,
], ids = ["Do not return process PID", "Return process PID"])
def testRunExecutable(setupBasicExecutable, setupStartProcessWithLog, supervisor, runInBackground, executeTask, shortExecutionTime, forceRestart, returnProcessPID, mocker):
""" Test | |
<filename>core/map_.py
# -*- coding: utf-8 -*-
# @Author: Administrator
# @Date: 2019-04-24 23:48:49
# @Last Modified by: Administrator
# @Last Modified time: 2019-05-29 18:41:17
"""
地图类
"""
__all__ = [
"Tank2Map",
]
from .const import DEBUG_MODE, COMPACT_MAP, SIDE_COUNT, TANKS_PER_SIDE, GAME_STATUS_NOT_OVER,\
GAME_STATUS_DRAW, GAME_STATUS_BLUE_WIN, GAME_STATUS_RED_WIN
from .global_ import np, functools, contextmanager
from .utils import CachedProperty, SingletonMeta, debug_print
from .action import Action
from .field import Field, EmptyField, BaseField, BrickField, SteelField, WaterField, TankField
#{ BEGIN }#
class Map(object):
def __init__(self, width, height):
self._width = width
self._height = height
self._content = [
[[] for x in range(width)] for y in range(height)
]
@property
def width(self):
return self._width
@property
def height(self):
return self._height
@property
def size(self):
return (self._width, self._height)
def in_map(self, x, y):
"""
判断 (x, y) 坐标是否位于地图内
"""
return 0 <= x < self._width and 0 <= y < self._height
def __getitem__(self, xy):
"""
获得 xy: (x, y) 的内容
"""
x, y = xy
if not self.in_map(x, y):
raise Exception("(%s, %s) is not in map" % (x, y) )
return self._content[y][x]
def get_fields(self, x, y):
return self[x, y]
class Tank2Map(Map, metaclass=SingletonMeta):
class _Counter(object):
"""
一个用于回滚计数的内部类
"""
def __init__(self):
self._counter = 0
def increase(self):
self._counter += 1
def __iter__(self):
return iter(range(self._counter))
def __repr__(self):
return self._counter.__repr__()
def __int__(self):
return self._counter
def __init__(self, width, height):
super().__init__(width, height)
self._tanks = [ [ None for _ in range(TANKS_PER_SIDE) ] for __ in range(SIDE_COUNT) ]
self._bases = [ None for _ in range(SIDE_COUNT) ]
self._turn = 0
self._destroyedRecords = [] # Stack([Record]) 记录被摧毁的 fields 用于回滚
# struct Record: (
# turn: int,
# xy: (int, int),
# field: Field,
# )
self._previousActions = [] # Stack([ [[int, int], [int, int]] ]) 所有坦克的历史动作记录,用于回滚
self._performedActionsRecord = {} # turn -> [[int, int], [int, int]] 记录 perform 所执行过的动作,用于 undo_revert
self._init_bases()
self._init_tanks()
# -----------------------
#self._revertStack = [] # [debug] 保存需要 revert 的行为
#self._revertIdx = 0 # [debug] 当前 revert 的编号
def reset(self): # 重置整个地图
self.__clean_cache()
width, height = self.size
self.__init__(width, height)
def __clean_cache(self): # 清除缓存属性
#CachedProperty.clean(self, "matrix")
#CachedProperty.clean(self, "matrix_T")
pass # 不再使用缓存啦
@property
def turn(self): # 当前回合数
return self._turn
@property
def tanks(self):
return self._tanks
@property
def bases(self):
return self._bases
#@CachedProperty # 缓存效果不明显
@property
def matrix(self):
"""
缓存 to_type_matrix 的值
WARNING:
- 因为 list 是可变对象,因此不要对返回值进行修改,以免缓存的属性值改变
- 如需修改,需要首先调用 np.copy(matrix) 获得一个副本,然后对副本进行修改
"""
return self.to_type_matrix()
#@CachedProperty # 缓存效果不明显
@property
def matrix_T(self):
return self.matrix.T
def _init_bases(self):
"""
初始化基地和基地前的钢墙
"""
assert self._width % 2 == 1, "Map width must be odd"
xc = self._width // 2 # x-center
y1 = 0
y2 = self._height - 1
basePoints = [
(xc, y1), # side 1 蓝方
(xc, y2), # side 2 红方
]
for side, (x, y) in enumerate(basePoints):
base = BaseField(x, y, side)
self._bases[side] = base
self.insert_field(base)
def _init_tanks(self):
"""
初始化坦克
"""
x1, x2 = (2, 6)
y1, y2 = (0, self._height-1)
tankPoints = [
[ (x1, y1), (x2, y1) ], # side 1 蓝方 左 0 右 1
[ (x2, y2), (x1, y2) ], # side 2 红方 左 1 右 0
]
for side, points in enumerate(tankPoints):
tanks = self._tanks[side]
for idx, (x, y) in enumerate(points):
tank = TankField(x, y, side, idx)
self.insert_field(tank)
tanks[idx] = tank
def insert_field(self, field):
self[field.xy].append(field)
field.destroyed = False
def remove_field(self, field, record=True):
self[field.xy].remove(field)
field.destroyed = True
if record: # 记录被清楚的对象
r = ( self._turn, field.xy, field )
self._destroyedRecords.append(r)
def to_type_matrix(self):
"""
转化成以 field.type 值表示的地图矩阵
Return:
- matrix np.array( [[int]] ) 二维的 type 值矩阵
WARNING:
- 矩阵的索引方法为 (y, x) ,实际使用时通常需要转置一下,使用 matrix.T
"""
width, height = self.size
matrix = np.full((height, width), Field.DUMMY, dtype=np.int8)
for y in range(height):
for x in range(width):
fields = self.get_fields(x, y)
if len(fields) == 0:
matrix[y, x] = Field.EMPTY
elif len(fields) > 2:
matrix[y, x] = Field.MULTI_TANK # 重合视为一个坦克
else:
field = fields[0]
if isinstance(field, (BaseField, TankField) ):
matrix[y, x] = field.type + 1 + field.side # 遵循 Field 中常数定义的算法
else:
matrix[y, x] = field.type
return matrix
def has_multi_tanks(self, x, y):
"""
判断某坐标点是否有多辆坦克堆叠
"""
return len( self.get_fields(x, y) ) > 1
def is_valid_move_action(self, tank, action):
"""
判断是否为合法的移动行为
"""
#assert Action.is_move(action), "action %s is not a move-action" % action
if not Action.is_move(action): # 因为模拟地图导致了一些不可测的结果,这个地方不能 assert
return False # 只要打一个补丁,开发的时候自己注意一下就好,记得 action % 4
_FIELDS_CAN_MOVE_TO = ( Field.DUMMY, Field.EMPTY ) # 遇到坦克不能移动!
x, y = tank.xy
dx, dy = Action.DIRECTION_OF_ACTION_XY[action]
x += dx
y += dy
if not self.in_map(x, y):
return False
fields = self.get_fields(x, y)
if len(fields) == 0:
return True
elif len(fields) == 1:
_type = fields[0].type
if _type in _FIELDS_CAN_MOVE_TO:
return True
return False
def is_valid_shoot_action(self, tank, action):
"""
判断是否为合法的设计行为
"""
# assert Action.is_shoot(action), "action %s is not a shoot-action" % action
if not Action.is_shoot(action):
return False
return not Action.is_shoot(tank.previousAction) # 只要不连续两回合射击都合理
def is_valid_action(self, tank, action):
"""
判断是否为合法行为
"""
if not Action.is_valid(action):
return False
elif Action.is_stay(action):
return True
elif Action.is_move(action):
return self.is_valid_move_action(tank, action)
elif Action.is_shoot(action):
return self.is_valid_shoot_action(tank, action)
else: # 未知的行为
raise Exception("unexpected action %s" % action)
def perform(self, blue_actions, red_actions):
"""
执行一回合的行为
Input:
- blue_actions [int, int] 蓝方 0, 1 号坦克将执行的动作
- red_actions [int, int] 红方 0, 1 号坦克将执行的动作
"""
self._turn += 1
self.__clean_cache()
#debug_print("Start Turn: %s" % self._turn)
#self.debug_print_out("")
_dx = Action.DIRECTION_OF_ACTION_X
_dy = Action.DIRECTION_OF_ACTION_Y
_actions = [ blue_actions, red_actions ]
self._performedActionsRecord[self._turn] = _actions
_fieldsToBeDestroyed = set() # 使用 set 避免重复
# 记录老的 previous actions
_oldPreviousActions = [ [ tank.previousAction for tank in tanks ] for tanks in self._tanks ]
self._previousActions.append(_oldPreviousActions) # 记录
# 检查 actions 合理性,修改 tank 缓存
for tanks in self._tanks:
for tank in tanks:
action = _actions[tank.side][tank.id]
if not self.is_valid_action(tank, action):
raise Exception("%s will perform an invalid action %s"
% (tank, action) )
tank.previousAction = action # 缓存本次行为,不考虑坦克是否已经挂掉
# 处理停止和移动
for tanks in self._tanks:
for tank in tanks:
action = _actions[tank.side][tank.id]
if not tank.destroyed and Action.is_move(action):
self.remove_field(tank)
tank.x += _dx[action]
tank.y += _dy[action]
self.insert_field(tank)
# 处理射击行为
for tanks in self._tanks:
for tank in tanks:
action = _actions[tank.side][tank.id]
if not tank.destroyed and Action.is_shoot(action):
x, y = tank.xy
action -= 4 # 使之与 dx, dy 的 idx 对应
while True:
x += _dx[action]
y += _dy[action]
if not self.in_map(x, y):
break
currentFields = self.get_fields(x, y)
if len(currentFields) == 0:
continue
elif len(currentFields) > 1: # 必定都是 tank
pass
else: # len(currentFields) == 1
field = currentFields[0]
if isinstance(field, (WaterField, EmptyField)):
continue # 跳过水路和空格
elif ( isinstance(field, TankField)
and not self.has_multi_tanks(x, y)
and not self.has_multi_tanks(*field.xy)
): # 对射判断,此时两方所在格子均都只有一架坦克
oppTank = field
oppAction = _actions[oppTank.side][oppTank.id]
if ( Action.is_shoot(oppAction)
and Action.is_opposite(action, oppAction)
):
break # 对射抵消
else:
pass # 坦克被摧毁
elif isinstance(field, SteelField):
break # 钢墙无法摧毁
elif isinstance(field, (BrickField, BaseField) ):
pass # 基地和土墙可以被摧毁
else:
raise Exception("unexpected field type")
_fieldsToBeDestroyed.update(currentFields)
break # 摧毁了第一个遇到的 fields
for field in _fieldsToBeDestroyed:
self.remove_field(field)
#debug_print("End Turn: %s" % self._turn)
#self.debug_print_out()
def single_simulate(self, tank, action):
"""
模拟一回合:
其中一架 tank 执行一个特定行为,其他 tank 均不动
模拟结束后,会自动回滚
Input:
- tank TankField/BattleTank 能表明坐标的 tank 对象
- action int 下回合的行动
"""
actions = [
[Action.STAY for _ in range(TANKS_PER_SIDE) ] for __ in range(SIDE_COUNT)
]
actions[tank.side][tank.id] = action
self.perform(*actions)
def multi_simulate(self, *actions):
"""
模拟一回合:
其中指定的多架坦克执行特定行为,其他 tank 均不动
模拟结束后,会自动回滚
Input:
- *args 格式为 ( (Tank, action), (Tank, action), ... ) Tank 对象要求包含 side/id 属性
"""
performedActions = [
[Action.STAY for _ in range(TANKS_PER_SIDE) ] for __ in range(SIDE_COUNT)
]
for tank, action in actions:
performedActions[tank.side][tank.id] = action
self.perform(*performedActions)
def revert(self):
"""
回滚一回合的行为
Return:
- success bool
"""
if self._turn <= 0: # 可以为 1 ,此时回滚到 Turn 0 的结束点
return False # 这表示回到地图最初的状态
currentTurn = self._turn
records = self._destroyedRecords
_actions = self._previousActions.pop()
for side, tanks in enumerate(self._tanks): # 回滚历史动作
for id_, tank in enumerate(tanks):
tank.previousAction = _actions[side][id_]
while len(records) > 0:
if records[-1][0] == currentTurn:
turn, (x, y), field = records.pop()
if isinstance(field, TankField):
tank = field
if not tank.destroyed: # tank 发生移动
self.remove_field(tank, record=False)
tank.x = x
tank.y = y
self.insert_field(tank)
else:
self.insert_field(field)
else:
break
self._turn -= 1
self.__clean_cache()
#debug_print("Revert to Turn: %s" % self._turn) # 至 turn 的结束状态
#self.debug_print_out()
return True
def undo_revert(self):
"""
从当前回合主动回滚到之前回合后,再将 revert 这个动作撤销
"""
nextTurn = self._turn + 1
assert nextTurn in self._performedActionsRecord, "no previously revert operation found"
actions = self._performedActionsRecord[nextTurn]
self.perform(*actions)
@contextmanager
def simulate_one_action(self, tank, action):
| |
to database folder
if copy_files:
_copy_db_file(params, dbname, inpath, abs_outpath, log=True)
# ------------------------------------------------------------------
# update database with key
if dbname.lower() == 'telluric':
# get object name
if hasattr(outfile, 'get_key'):
objname = outfile.get_key('KW_OBJNAME', dtype=str)
else:
objname = 'None'
# update telluric database
update_telludb(params, dbname, dbkey, outfile, night, objname, log)
elif dbname.lower() == 'calibration':
# update calibration database
update_calibdb(params, dbname, dbkey, outfile, night, log)
def copy_calibrations(params, header, **kwargs):
# set function name
func_name = display_func(params, 'copy_calibrations', __NAME__)
# get parameters from params/kwargs
mode = pcheck(params, 'CALIB_DB_MATCH', 'mode', kwargs, func_name)
# set the dbname
dbname = 'calibration'
# get the output filename directory
outpath = os.path.join(params['OUTPATH'], params['NIGHTNAME'])
# ----------------------------------------------------------------------
# get calibration database
cdb = get_full_database(params, dbname)
# get shortname
dbshort = cdb.dbshort
# ----------------------------------------------------------------------
# get each unique key get the entry
entry_tables = []
# loop around unique keys and add to list
gkwargs = dict(database=cdb, header=header, n_ent=1, required=False,
mode=mode)
for key in cdb.unique_keys:
# get closest key
entry_table = get_key_from_db(params, key, **gkwargs)
# append to list of tables if we have rows
if len(entry_table) > 0:
entry_tables.append(entry_table)
# ----------------------------------------------------------------------
# stack the tables vertically
ctable = vstack(entry_tables)
# get the filenames
filecol = cdb.file_col
infilenames = ctable[filecol]
# ----------------------------------------------------------------------
# loop around file names and copy
for infilename in infilenames:
# get absolute paths
inpath = _get_outpath(params, dbname)
inabspath = os.path.join(inpath, infilename)
outabspath = os.path.join(outpath, infilename)
# debug message
dargs = [dbname, inabspath, outabspath]
WLOG(params, 'debug',TextEntry('90-002-00001', args=dargs))
# if file exists do not copy it
if os.path.exists(outabspath):
# log copying skipped
wargs = [dbshort, infilename]
WLOG(params, '', TextEntry('40-006-00002', args=wargs))
# else copy it
else:
# log copying
wargs = [dbshort, infilename, outpath]
WLOG(params, '', TextEntry('40-006-00003', args=wargs))
# copy the database file
_copy_db_file(params, dbname, inabspath, outabspath, log=False)
def get_header_time(params, database, header):
# set function name
func_name = display_func(params, 'get_header_time', __NAME__)
# get time from header
return _get_time(params, database.dbname, header=header)
# =============================================================================
# Define database get functions
# =============================================================================
def get_key_from_db(params, key, database, header, n_ent=1, required=True,
mode=None, **kwargs):
"""
:param params:
:param key:
:param database:
:param header:
:param n_ent:
:param required:
:param mode: should be None, 'TELLU_DB_MATCH', 'CALIB_DB_MATCH', 'DB_MATCH'
or 'ALL'
:param kwargs:
:return:
"""
# set function name
func_name = display_func(params, 'get_key_from_db', __NAME__)
# ----------------------------------------------------------------------
# deal with no mode set (assume from calibDB)
if mode is None:
if database.dbname == 'telluric':
mode = pcheck(params, 'TELLU_DB_MATCH', 'mode', kwargs, func_name)
elif database.dbname == 'calibration':
mode = pcheck(params, 'CALIB_DB_MATCH', 'mode', kwargs, func_name)
else:
mode = pcheck(params, 'DB_MATCH', 'mode', kwargs, func_name)
# debug print mode using
dargs = [mode, func_name]
WLOG(params, 'debug', TextEntry('90-002-00002', args=dargs))
# ----------------------------------------------------------------------
# get time from header
if mode != 'ALL':
header_time = _get_time(params, database.dbname, header=header)
else:
header_time = None
# get the correct entry from database
gkwargs = dict(mode=mode, usetime=header_time, n_entries=n_ent,
required=required)
# return the database entries (in astropy.table format)
return database.get_entry(key, **gkwargs)
def get_full_database(params, dbname):
# set function name
func_name = display_func(params, 'get_full_database', __NAME__)
# get databse short name
dbshort = _get_dbshort(params, dbname)
# check for calibDB in params
if dbshort in params:
return params[dbshort]
# get all lines from calibration database
else:
database = Database(params, dbname)
database.read_database()
return database
def get_db_abspath(params, filename=None, where='guess'):
# set function name
func_name = display_func(params, 'get_db_abspath', __NAME__)
# ------------------------------------------------------------------
# get the calibration path and telluric path
cal_path = os.path.join(params['DRS_CALIB_DB'], filename)
tel_path = os.path.join(params['DRS_TELLU_DB'], filename)
# ------------------------------------------------------------------
# deal with where file is located
if where == 'calibration':
abspath = cal_path
elif where == 'telluric':
abspath = tel_path
elif where == 'guess':
# check full path
if os.path.exists(filename):
abspath = str(filename)
# check cal path
elif os.path.exists(cal_path):
abspath = cal_path
# check tellu path
elif os.path.exists(tel_path):
abspath = tel_path
else:
# raise error that defined filename does not exist
eargs = ['\n\t\t'.join([filename, cal_path, tel_path]), func_name]
WLOG(params, 'error', TextEntry('00-001-00036', args=eargs))
abspath = None
else:
# raise error that 'where' was not valid
eargs = [' or '.join(['calibration', 'telluric', 'guess']), func_name]
WLOG(params, 'error', TextEntry('00-001-00037', args=eargs))
abspath = None
# return the absolute path
return abspath
def get_db_file(params, abspath, ext=0, fmt='fits', kind='image',
get_image=True, get_header=False):
# set function name
func_name = display_func(params, 'get_db_file', __NAME__)
# don't lock if we aren't getting image or header
if not get_image and not get_header:
return None, None
# ------------------------------------------------------------------
# define a synchoronized lock for indexing (so multiple instances do not
# run at the same time)
lockfile = os.path.basename(abspath)
# start a lock
lock = drs_lock.Lock(params, lockfile)
# ------------------------------------------------------------------
# make locked read function
@drs_lock.synchronized(lock, params['PID'])
def locked_db():
# ------------------------------------------------------------------
# deal with npy files
if abspath.endswith('.npy'):
image = drs_path.numpy_load(abspath)
return image, None
# ------------------------------------------------------------------
# get db fits file
if (not get_image) or (not abspath.endswith('.fits')):
image = None
elif kind == 'image':
image = drs_fits.readfits(params, abspath, ext=ext)
elif kind == 'table':
image = drs_table.read_table(params, abspath, fmt=fmt)
else:
# raise error is kind is incorrect
eargs = [' or '.join(['image', 'table']), func_name]
WLOG(params, 'error', TextEntry('00-001-00038', args=eargs))
image = None
# ------------------------------------------------------------------
# get header if required (and a fits file)
if get_header and abspath.endswith('.fits'):
header = drs_fits.read_header(params, abspath, ext=ext)
else:
header = None
# return the image and header
return image, header
# ------------------------------------------------------------------
# try to run locked read function
try:
return locked_db()
except KeyboardInterrupt as e:
lock.reset()
raise e
except Exception as e:
# reset lock
lock.reset()
raise e
# =============================================================================
# Define calibration database functions
# =============================================================================
# TODO: Redo to use Database class
def update_calibdb(params, dbname, dbkey, outfile, night=None, log=True):
# set function name
func_name = display_func(params, 'update_calibdb', __NAME__)
# deal with no night name
if night is None:
night = drs_log.find_param(params, 'NIGHTNAME', func=func_name)
if night == '' or night is None:
night = 'None'
# get whether recipe is master
is_master = params['IS_MASTER']
# ----------------------------------------------------------------------
# get the hdict
hdict, header = _get_hdict(params, dbname, outfile)
# ----------------------------------------------------------------------
# get time from header
header_time = _get_time(params, dbname, header=header, hdict=hdict)
# ----------------------------------------------------------------------
# get properties for database
key = str(dbkey).strip()
nightname = str(night).strip()
filename = str(outfile.basename).strip()
human_time = str(header_time.iso).replace(' ', '_').strip()
unix_time = str(header_time.unix).strip()
# get master key
if is_master:
master = '1'
else:
master = '0'
# ----------------------------------------------------------------------
# push into list
largs = [key, master, nightname, filename, human_time, unix_time]
# construct the line
line = '\n{0} {1} {2} {3} {4} {5}'.format(*largs)
# ----------------------------------------------------------------------
# write to file
_write_line_to_database(params, key, dbname, outfile, line, log)
# =============================================================================
# Define telluric database functions
# =============================================================================
# TODO: Redo to use Database class
def update_telludb(params, dbname, dbkey, outfile, night=None, objname=None,
log=True):
# set function name
func_name = display_func(params, 'update_telludb', __NAME__)
# deal with no night name
if night is None:
night = drs_log.find_param(params, 'NIGHTNAME', func=func_name)
if night == '' or night is None:
night = 'None'
# deal with no object name
if objname is None:
objname = 'Unknown'
# ----------------------------------------------------------------------
# get the hdict
hdict, header = _get_hdict(params, dbname, outfile)
# ----------------------------------------------------------------------
# get time from header
header_time = _get_time(params, dbname, header=header, hdict=hdict)
# ----------------------------------------------------------------------
# get properties for database
key = str(dbkey).strip()
nightname = str(night).strip()
filename = str(outfile.basename).strip()
human_time = str(header_time.iso).replace(' ', '_').strip()
unix_time = str(header_time.unix).strip()
# ----------------------------------------------------------------------
# push into list
largs = [key, nightname, filename, human_time, unix_time, objname]
# construct the line
line = '\n{0} {1} {2} {3} {4} {5}'.format(*largs)
# ----------------------------------------------------------------------
# write to file
_write_line_to_database(params, key, dbname, outfile, line, log)
# =============================================================================
# Define worker functions
# =============================================================================
def get_dbkey(params, outfile):
# set function name
func_name = display_func(params, 'get_dbkey', __NAME__)
# get database key (if it exists)
if hasattr(outfile, 'dbkey'):
dbkey = outfile.get_dbkey()
else:
eargs = [outfile.name, func_name]
WLOG(params, 'error', TextEntry('00-008-00012', args=eargs))
dbkey = ''
# return dbkey
return dbkey
def _get_dbname(params, outfile):
# set function name
func_name = display_func(params, '_get_dbname', __NAME__)
# get database names
dbnames = ['telluric', 'calibration']
# deal with each database name
if hasattr(outfile, 'dbname'):
dbname = outfile.dbname.capitalize()
else:
eargs = [outfile.name, ', '.join(dbnames), func_name]
WLOG(params, 'error', TextEntry('00-002-00012', args=eargs))
dbname = None
return dbname
def _get_dbshort(params, dbname):
# set function name
func_name = display_func(params, '_get_dbshort', __NAME__)
# get database names
dbnames = ['telluric', 'calibration']
| |
boxes are zero
rotated_boxes = rotated_boxes.T
rotated_boxes = convert_coordinates_axis_aligned(rotated_boxes[:4].T, 0, 'centroids2minmax')
b1 = convert_coordinates_axis_aligned(b1[:4], 0, 'centroids2minmax')
rotated_boxes = rotated_boxes.T
# get the greater xmin and ymin values.
min_xy = np.maximum(rotated_boxes[[xmin, ymin]].T, b1[[xmin, ymin]])
# get the smaller xmax and ymax values.
max_xy = np.minimum(rotated_boxes[[xmax, ymax]].T, b1[[xmax, ymax]])
# Compute the side lengths of the intersection rectangles.
side_lengths = np.maximum(0, max_xy - min_xy)
side_lengths = side_lengths.T
inter_areas[i, :] = (side_lengths[0] * side_lengths[1]).T
return inter_areas
class Vector:
'''
Class representing a point
'''
def __init__(self, x, y):
self.x = x
self.y = y
def __add__(self, v):
return Vector(self.x + v.x, self.y + v.y)
def __sub__(self, v):
return Vector(self.x - v.x, self.y - v.y)
def cross(self, v):
return self.x * v.y - self.y * v.x
class Line:
'''
Class representing an edge of a bounding box
'''
# ax + by + c = 0
def __init__(self, v1, v2):
self.a = v2.y - v1.y
self.b = v1.x - v2.x
self.c = v2.cross(v1)
def __call__(self, p):
'''
Computes ax + by + c for a new point p
Determines on wich side of the line the point is.
Any point p with line(p) <= 0 is on the "inside" (or on the boundary),
any point p with line(p) > 0 is on the "outside".
'''
return self.a * p.x + self.b * p.y + self.c
def intersection(self, other):
'''
Get intersection point between this line and another line
'''
w = self.a * other.b - self.b * other.a
return Vector(
(self.b * other.c - self.c * other.b) / w,
(self.c * other.a - self.a * other.c) / w
)
def rectangle_vertices(cx, cy, w, h, r):
'''
Compute the angles of a bounding box and returns objects of the class Vector
'''
angle = r
dx = w / 2
dy = h / 2
dxcos = dx * np.cos(angle)
dxsin = dx * np.sin(angle)
dycos = dy * np.cos(angle)
dysin = dy * np.sin(angle)
return (
Vector(cx, cy) + Vector(-dxcos - -dysin, -dxsin + -dycos),
Vector(cx, cy) + Vector(dxcos - -dysin, dxsin + -dycos),
Vector(cx, cy) + Vector(dxcos - dysin, dxsin + dycos),
Vector(cx, cy) + Vector(-dxcos - dysin, -dxsin + dycos)
)
def intersection_area_(r1, r2):
'''
Computes the real intersection area of two rotated bounding boxes
Used during decoding in intersection_area_decoding.
Arguments:
r1 (array): a 1D Numpy array of shape `(5, )` containing the coordinates for one box in the format (cx, cy, w, h, angle)
r2 (array): a 1D Numpy array of shape `(5, )` containing the coordinates for one box in the format (cx, cy, w, h, angle)
Returns:
a float representing the intersection area of r1 and r2
'''
# First convert r1 and r2 into a sequence of vertices
rect1 = rectangle_vertices(*r1)
rect2 = rectangle_vertices(*r2)
# Use the vertices of the first rectangle as
# starting vertices of the intersection polygon.
intersection = rect1
# Loop over the edges of the second rectangle
for p, q in zip(rect2, rect2[1:] + rect2[:1]):
if len(intersection) <= 2:
break # No intersection
line = Line(p, q)
# Any point p with line(p) <= 0 is on the "inside" (or on the boundary),
# Any point p with line(p) > 0 is on the "outside".
# Loop over the edges of the intersection polygon,
# and determine which part is inside and which is outside.
new_intersection = []
line_values = [line(t) for t in intersection]
for s, t, s_value, t_value in zip(
intersection, intersection[1:] + intersection[:1],
line_values, line_values[1:] + line_values[:1]):
if s_value <= 0:
new_intersection.append(s)
if s_value * t_value < 0:
# Points are on opposite sides.
# Add the intersection of the lines to new_intersection.
intersection_point = line.intersection(Line(s, t))
new_intersection.append(intersection_point)
intersection = new_intersection
# Calculate area
if len(intersection) <= 2:
return 0
# return intersection area
return 0.5 * sum(p.x * q.y - p.y * q.x for p, q in
zip(intersection, intersection[1:] + intersection[:1]))
def intersection_area_decoding(boxes1, boxes2):
'''
Computes the intersection areas of two sets of 2D rectangular boxes.
The function is used for decoding raw predictions with non-maximum suppression (NMS)
Let `boxes1` and `boxes2` contain `m` and `n` boxes, respectively.
returns an `(m,n)` matrix with the intersection areas for all possible
combinations of the boxes in `boxes1` and `boxes2`.
Arguments:
boxes1 (array): Either a 1D Numpy array of shape `(5, )` containing the coordinates for one box in the
format centroids or a 2D Numpy array of shape `(m, 5)` containing the coordinates for `m` boxes.
boxes2 (array): Either a 1D Numpy array of shape `(5, )` containing the coordinates for one box in the
format centroids or a 2D Numpy array of shape `(n, 5)` containing the coordinates for `n` boxes.
Returns:
A 2D Numpy array of dtype float containing values with the intersection areas of the boxes in `boxes1` and `boxes2`.
'''
# Make sure the boxes have the right shapes.
if boxes1.ndim > 2: raise ValueError("boxes1 must have rank either 1 or 2, but has rank {}.".format(boxes1.ndim))
if boxes2.ndim > 2: raise ValueError("boxes2 must have rank either 1 or 2, but has rank {}.".format(boxes2.ndim))
if boxes1.ndim == 1: boxes1 = np.expand_dims(boxes1, axis=0)
if boxes2.ndim == 1: boxes2 = np.expand_dims(boxes2, axis=0)
if not (boxes1.shape[1] == boxes2.shape[1] == 5):
raise ValueError(
"All boxes must consist of 5 coordinates, but the boxes in `boxes1` and `boxes2` have {} and {} coordinates, respectively.".format(
boxes1.shape[1], boxes2.shape[1]))
m = boxes1.shape[0] # The number of boxes in `boxes1`
n = boxes2.shape[0] # The number of boxes in `boxes2`
inter_areas = np.zeros((m, n))
for i, b1 in enumerate(boxes1):
for j, b2 in enumerate(boxes2):
inter_areas[i, j] = intersection_area_(b1, b2)
return inter_areas
def sum_area_(boxes1, boxes2):
'''
Computes the sum of areas of two sets of axis-aligned 2D rectangular boxes.
Let `boxes1` and `boxes2` contain `m` and `n` boxes, respectively.
returns an `(m,n)` matrix with the sum of the areas for all possible
combinations of the boxes in `boxes1` and `boxes2`.
Arguments:
boxes1 (array): Either a 1D Numpy array of shape `(5, )` containing the coordinates for one box in the
format centroids or a 2D Numpy array of shape `(m, 5)` containing the coordinates for `m` boxes.
boxes2 (array): Either a 1D Numpy array of shape `(5, )` containing the coordinates for one box in the
format centroids or a 2D Numpy array of shape `(n, 5)` containing the coordinates for `n` boxes.
Returns:
A 2D Numpy array of dtype float containing values with the sum of the areas of the boxes in `boxes1` and `boxes2`.
'''
# Make sure the boxes have the right shapes.
if boxes1.ndim > 2: raise ValueError("boxes1 must have rank either 1 or 2, but has rank {}.".format(boxes1.ndim))
if boxes2.ndim > 2: raise ValueError("boxes2 must have rank either 1 or 2, but has rank {}.".format(boxes2.ndim))
if boxes1.ndim == 1: boxes1 = np.expand_dims(boxes1, axis=0)
if boxes2.ndim == 1: boxes2 = np.expand_dims(boxes2, axis=0)
if not (boxes1.shape[1] == boxes2.shape[1] == 5):
raise ValueError(
"All boxes must consist of 5 coordinates, but the boxes in `boxes1` and `boxes2` have {} and {} coordinates, respectively.".format(
boxes1.shape[1], boxes2.shape[1]))
m = boxes1.shape[0] # The number of boxes in `boxes1`
n = boxes2.shape[0] # The number of boxes in `boxes2`
areas1 = boxes1[:, 2] * boxes1[:, 3] # w*h
areas2 = boxes2[:, 2] * boxes2[:, 3] # w*h
s1 = np.tile(np.expand_dims(areas1, axis=1), reps=(1, n))
s2 = np.tile(np.expand_dims(areas2, axis=0), reps=(m, 1))
return s1 + s2
def ARiou180(boxes1, boxes2):
'''
Computes the modified version of intersection-over-union similarity, ARIou180, of two sets of rotated 2D rectangular boxes.
Used only for training.
Let `boxes1` and `boxes2` contain `m` and `n` boxes, respectively.
returns an `(m,n)` matrix with the ARIoU180s for all possible combinations of the boxes in `boxes1` and `boxes2`.
Arguments:
boxes1 (array): Either a 1D Numpy array of shape `(5, )` containing the coordinates for one box | |
# coding: utf-8
""" Simulate low level RFI
We are interested in the effects of RFI signals that cannot be detected in the visibility data. Therefore,
in our simulations we add attenuation selected to give SNR about 1 in the unaveraged time-frequency data.
This is about 180dB for a DTV station in Perth.
The scenario is:
* There is a TV station at a remote location (e.g. Perth), emitting a broadband signal (7MHz) of known power (50kW).
* The emission from the TV station arrives at LOW stations with phase delay and attenuation. Neither of these are
well known but they are probably static.
* The RFI enters LOW stations in a sidelobe of the station beam. Calculations by <NAME> indicate that this
provides attenuation of about 55 - 60dB for a source close to the horizon.
* The RFI enters each LOW station with fixed delay and zero fringe rate (assuming no e.g. ionospheric ducting or
reflection from a plane)
* In tracking a source on the sky, the signal from one station is delayed and fringe-rotated to stop the fringes for
one direction on the sky.
* The fringe rotation stops the fringe from a source at the phase tracking centre but phase rotates the RFI, which
now becomes time-variable.
* The correlation data are time- and frequency-averaged over a timescale appropriate for the station field of view.
This averaging decorrelates the RFI signal.
* We want to study the effects of this RFI on statistics of the visibilities, and on images made on source and
at the pole.
The simulate_low_rfi_visibility.py script averages the data producing baseline-dependent decorrelation.
The effect of averaging is not more than about -20dB but it does vary with baseline giving the radial
power spectrum we see. The 55-60 dB is part of the 180dB. To give a signal to noise on 1 or less, the
terrain propagation must be about 100dB.
The simulation is implemented in some functions in ARL, and the script simulate_low_rfi_visibility is available
in the SKA Github repository sim-lowlevel-rfi. Distributed processing is implemented via Dask. The outputs are
fits file and plots of the images: on signal channels and on pure noise channels, and for the source of
interest and the Southern Celestial Pole. The unaveraged MeasurementSets are also output, one per time chunk.
"""
import os
import pprint
import time
import matplotlib.pyplot as plt
import numpy
from astropy import units as u
from astropy.coordinates import SkyCoord, EarthLocation
import astropy.constants as const
from data_models.polarisation import PolarisationFrame
from processing_components.simulation.rfi import calculate_averaged_correlation, simulate_rfi_block
from processing_library.image.operations import create_image
from processing_library.util.array_functions import average_chunks
from processing_components.visibility.base import export_blockvisibility_to_ms
from workflows.arlexecute.imaging.imaging_arlexecute import invert_list_arlexecute_workflow, \
sum_invert_results_arlexecute
from wrappers.arlexecute.execution_support.arlexecute import arlexecute
from wrappers.arlexecute.execution_support.dask_init import get_dask_Client
from wrappers.arlexecute.image.operations import show_image, export_image_to_fits
from wrappers.arlexecute.simulation.configurations import create_named_configuration
from wrappers.arlexecute.visibility.base import create_blockvisibility
from wrappers.arlexecute.visibility.coalesce import convert_blockvisibility_to_visibility
def add_noise(bvis):
# The specified sensitivity (effective area / T_sys) is roughly 610 m ^ 2 / K in the range 160 - 200MHz
# sigma_vis = 2 k T_sys / (area * sqrt(tb)) = 2 k 512 / (610 * sqrt(tb)
sens = 610
bt = bvis.channel_bandwidth[0] * bvis.integration_time[0]
sigma = 2 * 1e26 * const.k_B.value / ((sens/512) * (numpy.sqrt(bt)))
sshape = bvis.vis.shape
bvis.data['vis'] += numpy.random.normal(0.0, sigma, sshape) + 1j * numpy.random.normal(0.0, sigma, sshape)
return bvis
def simulate_rfi_image(config, times, frequency, channel_bandwidth, phasecentre, polarisation_frame,
time_average, channel_average, attenuation, noise,
emitter_location, emitter_power, use_pole, waterfall, write_ms):
averaged_frequency = numpy.array(average_chunks(frequency, numpy.ones_like(frequency), channel_average))[0]
averaged_channel_bandwidth, wts = numpy.array(
average_chunks(channel_bandwidth, numpy.ones_like(frequency), channel_average))
averaged_channel_bandwidth *= wts
averaged_times = numpy.array(average_chunks(times, numpy.ones_like(times), time_average))[0]
s2r = numpy.pi / 43200.0
bvis = create_blockvisibility(config, s2r * times, frequency,
channel_bandwidth=channel_bandwidth,
phasecentre=phasecentre,
polarisation_frame=polarisation_frame,
zerow=False)
bvis = simulate_rfi_block(bvis, emitter_location=emitter_location,
emitter_power=emitter_power, attenuation=attenuation, use_pole=use_pole)
if noise:
bvis = add_noise(bvis)
if waterfall:
plot_waterfall(bvis)
if write_ms:
msname = "simulate_rfi_%.1f.ms" % (times[0])
export_blockvisibility_to_ms(msname, [bvis], "RFI")
averaged_bvis = create_blockvisibility(config, s2r * averaged_times, averaged_frequency,
channel_bandwidth=averaged_channel_bandwidth,
phasecentre=phasecentre,
polarisation_frame=polarisation_frame,
zerow=False)
npol = 1
for itime, _ in enumerate(averaged_times):
atime = itime * time_average
for ant2 in range(nants):
for ant1 in range(ant2, nants):
for ichan, _ in enumerate(averaged_frequency):
achan = ichan * channel_average
for pol in range(npol):
averaged_bvis.data['vis'][itime, ant2, ant1, ichan, pol] = \
calculate_averaged_correlation(
bvis.data['vis'][atime:(atime+time_average), ant2, ant1, achan:(achan+channel_average), pol],
time_average, channel_average)[0,0]
averaged_bvis.data['vis'][itime, ant1, ant2, ichan, pol] = \
numpy.conjugate(averaged_bvis.data['vis'][itime, ant2, ant1, ichan, pol])
achan += 1
atime += 1
del bvis
if noise:
averaged_bvis = add_noise(averaged_bvis)
return averaged_bvis
def plot_waterfall(bvis):
print(bvis.uvw.shape)
uvdist = numpy.hypot(bvis.uvw[0,:,:,0], bvis.uvw[0,:,:,1])
print(uvdist.shape)
uvdistmax = 0.0
max_ant1=0
max_ant2=0
for ant2 in range(bvis.nants):
for ant1 in range(ant2+1):
if uvdist[ant2, ant1] > uvdistmax:
uvdistmax = uvdist[ant2, ant1]
max_ant1 = ant1
max_ant2 = ant2
basename = os.path.basename(os.getcwd())
fig=plt.figure()
fig.suptitle('%s: Baseline [%d, %d], ha %.2f' % (basename, max_ant1, max_ant2, bvis.time[0]))
plt.subplot(121)
plt.gca().set_title("Amplitude")
plt.gca().imshow(numpy.abs(bvis.vis[: , max_ant1, max_ant2, :, 0]), origin='bottom')
plt.gca().set_xlabel('Channel')
plt.gca().set_ylabel('Time')
plt.subplot(122)
plt.gca().imshow(numpy.angle(bvis.vis[: , max_ant1, max_ant2, :, 0]), origin='bottom')
plt.gca().set_title("Phase")
plt.gca().set_xlabel('Channel')
plt.gca().set_ylabel('Time')
plt.savefig('waterfall_%d_%d_ha_%.2f.png' % (max_ant1, max_ant2, bvis.time[0]))
plt.show(block=False)
if __name__ == '__main__':
start_epoch = time.asctime()
print("\nSKA LOW RFI simulation using ARL\nStarted at %s\n" % start_epoch)
pp = pprint.PrettyPrinter()
import argparse
parser = argparse.ArgumentParser(description='Simulate DTV RFI')
parser.add_argument('--use_dask', type=str, default='True', help='Use Dask to distribute processing?')
parser.add_argument('--context', type=str, default='DTV', help='DTV')
parser.add_argument('--rmax', type=float, default=3e3, help='Maximum distance of station from centre (m)')
parser.add_argument('--seed', type=int, default=18051955, help='Random number seed')
parser.add_argument('--station_skip', type=int, default=33, help='Decimate stations by this factor')
parser.add_argument('--show', type=str, default='False', help='Show images?')
parser.add_argument('--attenuation', type=float, default=1.0, help='Attenuation factor')
parser.add_argument('--noise', type=str, default='False', help='Add noise?')
parser.add_argument('--ngroup_visibility', type=int, default=8, help='Process in visibility groups this large')
parser.add_argument('--do_psf', type=str, default="False", help='Make the PSF?')
parser.add_argument('--use_agg', type=str, default="False", help='Use Agg matplotlib backend?')
parser.add_argument('--write_fits', type=str, default="True", help='Write fits files?')
parser.add_argument('--declination', type=float, default=-45.0, help='Declination (degrees)')
parser.add_argument('--npixel', type=int, default=1025, help='Number of pixel per axis in image')
parser.add_argument('--nchannels_per_chunk', type=int, default=1024, help='Number of channels in a chunk')
parser.add_argument('--channel_average', type=int, default=16, help="Number of channels in a chunk to average")
parser.add_argument('--frequency_range', type=float, nargs=2, default=[170.5e6, 184.5e6],
help="Frequency range (Hz)")
parser.add_argument('--nintegrations_per_chunk', type=int, default=64,
help='Number of integrations in a time chunk')
parser.add_argument('--time_average', type=int, default=16, help="Number of integrations in a chunk to average")
parser.add_argument('--integration_time', type=float, default=0.25, help="Integration time (s)")
parser.add_argument('--time_range', type=float, nargs=2, default=[-6.0, 6.0], help="Hourangle range (hours)")
parser.add_argument('--emitter_longitude', type=float, default=115.8605, help="Emitter longitude")
parser.add_argument('--emitter_latitude', type=float, default=-31.9505, help="Emitter latitude")
parser.add_argument('--emitter_power', type=float, default=5e4, help="Emitter power (W)]")
parser.add_argument('--use_pole', type=str, default="False", help='Set RFI source at pole?')
parser.add_argument('--waterfall', type=str, default="False", help='Plot waterfalls?')
parser.add_argument('--write_ms', type=str, default="False", help='Write measurmentsets?')
args = parser.parse_args()
print("Starting LOW low level RFI simulation")
pp.pprint(vars(args))
write_ms = args.write_ms == "True"
numpy.random.seed(args.seed)
if args.use_dask == "True":
client = get_dask_Client(threads_per_worker=1,
processes=True,
memory_limit=32 * 1024 * 1024 * 1024,
n_workers=8)
arlexecute.set_client(client=client)
print(arlexecute.client)
else:
print("Running in serial mode")
arlexecute.set_client(use_dask=False)
emitter_location = EarthLocation(lon=args.emitter_longitude, lat=args.emitter_latitude, height=0.0)
emitter_power = args.emitter_power
print("Emitter is %.1f kW at location %s" % (1e-3 * emitter_power, emitter_location.geodetic))
if args.waterfall == "True":
waterfall = True
else:
waterfall = False
if args.noise == "True":
noise = True
print("Adding noise to simulated data")
else:
noise = False
if args.use_pole == "True":
print("Placing emitter at the southern celestial pole")
use_pole= True
else:
use_pole = False
rmax = args.rmax
low = create_named_configuration('LOWR3', rmax=rmax)
nants = len(low.names)
print("There are %d stations" % nants)
station_skip = args.station_skip
low.data = low.data[::station_skip]
nants = len(low.names)
print("There are %d stations after decimation" % nants)
npixel = args.npixel
declination = args.declination
phasecentre = SkyCoord(ra=+0.0 * u.deg, dec=declination * u.deg, frame='icrs', equinox='J2000')
pole = SkyCoord(ra=+0.0 * u.deg, dec=-90.0 * u.deg, frame='icrs', equinox='J2000')
# Number of integrations in a time chunk
nintegrations_per_chunk = args.nintegrations_per_chunk
# Integration time within a chunk
integration_time = args.integration_time
# Number of integrations to average
time_average = args.time_average
# Integration time after averaging
average_integration_time = time_average * integration_time
print("Each chunk has %d integrations of duration %.2f (s)" %
(args.nintegrations_per_chunk, integration_time))
frequency = numpy.linspace(args.frequency_range[0], args.frequency_range[1], args.nchannels_per_chunk)
channel_bandwidth = (frequency[-1] - frequency[0]) / (args.nchannels_per_chunk - 1)
channel_average = args.channel_average
print("Each chunk has %d frequency channels of width %.3f (MHz)" %
(args.nchannels_per_chunk, channel_bandwidth * 1e-6))
channel_bandwidth = numpy.ones_like(frequency) * channel_bandwidth
start_times = numpy.arange(args.time_range[0] * 3600.0, args.time_range[1] * 3600.0,
nintegrations_per_chunk * integration_time)
print("Start times", start_times)
results = list()
pole_results = list()
chunk_start_times = [start_times[i:i + args.ngroup_visibility]
for i in range(0, len(start_times), args.ngroup_visibility)]
print("Chunk start times", [c[0] for c in chunk_start_times])
dopsf = args.do_psf == "True"
# | |
<filename>scripts/iscsictl.py
#! /usr/bin/env python
# Copyright (c) 2015 SUSE LINUX GmbH, Nuernberg, Germany.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import argparse
import os
import re
import sys
import sh
#
# Example of use
# ==============
#
# If we have a network with three nodes (N1, N2, N3), we can use
# iscsictl script to deploy different iscsi configuations.
#
# Use case 1: Single device
# -------------------------
#
# Create the target service in N1
# ./iscsictl.py --service target --host N1
#
# Discover and connect both initiators
# ./iscsictl.py --service initiator --target_host N1 --host N2
# ./iscsictl.py --service initiator --target_host N1 --host N3
#
#
# Use case 2: Add a new target
# ----------------------------
#
# Create second target in N1
# ./iscsictl.py --service target --host N1 --device /dev/loop1 --id id02
#
# Discover and connect both initiators
# ./iscsictl.py --service initiator --target_host N1 --host N2 --id id02
# ./iscsictl.py --service initiator --target_host N1 --host N3 --id id02
#
#
# Use case 3: Share a block device
# --------------------------------
#
# Create a target for a existent block device:
# ./iscsictl.py --service target --host N1 --device /dev/sdc --id id03
#
# Discover and connect both initiators
# ./iscsictl.py --service initiator --target_host N1 --host N2 --id id03
# ./iscsictl.py --service initiator --target_host N1 --host N3 --id id03
#
# open stdout in unbuffered mode
sys.stdout = os.fdopen(sys.stdout.fileno(), "wb", 0)
class Key(object):
"""Class used to create and reuse temporal SSH keys."""
def __init__(self, name=None):
"""Create a new key without passphrase if there is any."""
if not name:
name = '.iscsi_fake_id_dsa'
self.name = name
if not os.path.exists(self.name):
sh.ssh_keygen('-t', 'dsa', '-f', self.name, '-N', '')
os.chmod(self.key(), 0o600)
os.chmod(self.pub_key(), 0o600)
def key(self):
"""Return the private key filename."""
return self.name
def pub_key(self):
"""Return the public key filename."""
return self.name + '.pub'
def clean_key(self):
"""Remove private and public temporal keys."""
if os.path.exists(self.key()):
os.remove(self.key())
os.remove(self.pub_key())
class SSH(object):
"""Simplify SSH connections to a remote machine."""
def __init__(self, host, user, password, new_key=True, key=None):
if new_key and not key:
key = Key(name='.%s_iscsi_fake_id_dsa' % host)
self.host = host
self.user = user
self.password = password
self.key = key
self._copy_id = False
self._connect = None
def ssh_copy_id(self):
"""Copy a fake key (key without passphrase) into a node."""
# If the ID is already there, do nothing
if not self.key or self._copy_id:
return
def _interact(char, stdin):
sys.stdout.write(char.encode())
_interact.aggregated += char
if _interact.aggregated.endswith("Password: "):
stdin.put('%s\n' % self.password)
elif char == '\n':
_interact.aggregated = ''
_interact.aggregated = ''
sh.ssh_copy_id('-i', self.key.pub_key(),
'-o', 'StrictHostKeyChecking=no',
'-o', 'UserKnownHostsFile=/dev/null',
'%s@%s' % (self.user, self.host),
_out=_interact, _out_bufsize=0, _tty_in=True)
def clean_key(self):
"""Remove key from the remote server."""
if not self._connect or not self.key:
return
key = "'%s'" % open(self.key.pub_key()).read().strip()
self._connect.grep('-v', key, '~/.ssh/authorized_keys',
'> ~/.ssh/authorized_keys.TMP')
self._connect.cp('-a', '~/.ssh/authorized_keys',
'~/.ssh/authorized_keys.BAK')
self._connect.mv('~/.ssh/authorized_keys.TMP',
'~/.ssh/authorized_keys')
self._connect = None
# Remove locally generated keys
self.key.clean_key()
def connect(self):
"""Create an SSH connection to the remote host."""
if not self._copy_id:
self.ssh_copy_id()
params = ['-o', 'StrictHostKeyChecking=no',
'-o', 'UserKnownHostsFile=/dev/null',
'%s@%s' % (self.user, self.host)]
if self.key:
params = ['-i', self.key.key()] + params
self._connect = sh.ssh.bake(*params)
return self._connect
def __getattr__(self, name):
"""Delegate missing attributes to local connection."""
if not self._connect:
self.connect()
if self._connect:
return getattr(self._connect, name)
class ISCSI(object):
"""Class for basic iSCSI management."""
START = 'start'
STOP = 'stop'
RESTART = 'restart'
def __init__(self, ssh):
self.ssh = ssh
def service(self, service, action):
if action == ISCSI.START:
self.ssh.systemctl('enable', '%s.service' % service)
self.ssh.systemctl('start', '%s.service' % service)
elif action == ISCSI.STOP:
self.ssh.systemctl('stop', '%s.service' % service)
self.ssh.systemctl('disable', '%s.service' % service)
elif action == ISCSI.RESTART:
self.ssh.systemctl('restart', '%s.service' % service)
else:
raise Exception('Service action not recognized.')
def zypper(self, package):
self.ssh.zypper('--non-interactive', 'install',
'--no-recommends', package)
def append_cfg(self, fname, lines):
"""Append only new lines in a configuration file."""
cfg = str(self.ssh.cat(fname))
# Only append the line if is not there
for line in lines:
if not re.search('^%s$' % re.escape(line), cfg, re.MULTILINE):
self.ssh.echo('-e', "'%s'" % line, '>> %s' % fname)
def remove_cfg(self, fname, lines):
"""Remove lines in a configuration file."""
cfg = str(self.ssh.cat(fname))
# Remove all matching lines, appending and EOL
for line in lines:
cfg = cfg.replace(line + '\n', '')
# Make a backup of the configuration file and replace the
# content. Check that the new content is the expected and if
# so, remove the backup.
fbackup = fname + '.BACKUP'
self.ssh.cp('-a', fname, fbackup)
self.ssh.echo('-e', '-n', '"%s"' % cfg, '> %s' % fname)
new_cfg = str(self.ssh.cat(fname))
if cfg != new_cfg:
fedit = fname + '.EDIT'
self.ssh.cp('-a', fname, fedit)
self.ssh.mv(fbackup, fname)
raise Exception('Configuration file reverted. '
'Check %s for more details' % fedit)
else:
self.ssh.rm(fbackup)
def deploy(self):
raise NotImplementedError('Deploy method not implemented')
class Target(ISCSI):
"""Define and manage an iSCSI target node."""
def __init__(self, ssh, device, path, iqn_id, size=1, reuse=False):
super(Target, self).__init__(ssh)
self.device = device
self.path = path
self.iqn_id = iqn_id
# `size` is expressed in mega (M)
self.size = size
self.reuse = reuse
def find_loop(self, loop):
"""Find an attached loop devide."""
pattern = re.compile(r'^(/dev/loop\d+):.*\((.*)\)')
for line in self.ssh.losetup('-a'):
ldev, lfile = pattern.match(line).groups()
if loop == ldev:
return (ldev, lfile)
def destroy_loop(self, loop):
"""Destroy loopback devices."""
is_in = self.find_loop(loop)
if is_in:
_, path = is_in
out = str(self.ssh.losetup('-d', loop))
if "can't delete" in out:
raise Exception(out)
self.ssh.rm(path)
def create_loop(self, loop, path, size):
"""Create a new loopback device."""
is_in = self.find_loop(loop)
if is_in and self.reuse:
return
elif is_in:
raise Exception('loop device already installed: %s / %s' %
is_in)
self.ssh.dd('if=/dev/zero', 'of=%s' % path, 'bs=1M',
'count=%d' % size)
self.ssh.fdisk(sh.echo('-e', r'o\nn\np\n1\n\n\nw'), path)
self.ssh.losetup(loop, path)
is_in = self.find_loop(loop)
if not is_in:
raise Exception('fail to create loop device: %s / %s' %
is_in)
def deploy(self):
"""Deploy, configure and launch iSCSI target."""
print("Installing lio-utils ...")
self.zypper('lio-utils')
self.service('target', ISCSI.START)
if self.device.startswith('/dev/loop'):
if self.path:
print("Creating loopback ...")
self.create_loop(self.device, self.path, self.size)
else:
raise Exception('Please, provide a path for a loop device')
# Detecting IP
print("Looking for host IP ...")
ip = str(self.ssh.ip('a', 's', 'eth0'))
ip = re.findall(r'inet (\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})/\d+', ip)
if ip:
ip = ip[0]
else:
raise Exception('IP address not found')
iqn = 'iqn.2015-01.qa.cloud.suse.de:%s' % self.iqn_id
print("Registering target for %s ..." % iqn)
self.ssh.tcm_node('--block', 'iblock_0/%s' % self.iqn_id,
self.device)
self.ssh.lio_node('--addlun', iqn, '1', '0', 'iscsi_port',
'iblock_0/%s' % self.iqn_id)
self.ssh.lio_node('--addnp', iqn, '1', '%s:3260' % ip)
self.ssh.lio_node('--disableauth', iqn, '1')
self.ssh.lio_node('--enabletpg', iqn, '1')
# Add in /etc/target/tcm_start.sh
if self.device.startswith('/dev/loop'):
print("Adding loopback to target service startup ...")
lines = (
'[[ $(losetup -j %s) == "%s"* ]] || losetup %s %s' % (
self.path, self.device, self.device, self.path),
)
self.append_cfg('/etc/target/tcm_start.sh', lines)
# Check if the device is exported
print("Checking that the target is exported ...")
result = str(self.ssh.lio_node('--listtargetnames'))
if iqn not in result:
raise Exception('Unable to deploy the iSCSI target')
class Initiator(ISCSI):
"""Define and manage an iSCSI initiator node."""
# For now, we are going to use the discovery option for iSCSI to
# populate the database. This simplify the deployment of a basic
# iSCSI scenario, with a single target point and multiple
# initiators.
def __init__(self, ssh, target_ssh, iqn_id):
"""Initialize the Initiator instance with an ip and a mount point."""
super(Initiator, self).__init__(ssh)
self.target_ssh = target_ssh
self.iqn_id = iqn_id
self.name = None
def deploy(self):
"""Deploy, configure and persist an iSCSI initiator."""
print("Installing open-iscsi ...")
self.zypper('open-iscsi')
# Default configuration only takes care of autentication
print("Configuring open-iscsi for automatic startup ...")
lines = (
'node.startup = automatic',
)
self.append_cfg('/etc/iscsid.conf', lines)
# Persist and start the service
print("Reloading the configuration ...")
self.service('iscsid', ISCSI.START)
self.service('iscsid', ISCSI.RESTART)
iqn = 'iqn.2015-01.qa.cloud.suse.de:%s' % self.iqn_id
# Get the initiator name for the ACL
print("Detecting initiator name ...")
initiator = str(self.ssh.cat('/etc/iscsi/initiatorname.iscsi'))
initiator = re.findall(r'InitiatorName=(iqn.*)', initiator)
if initiator:
initiator = initiator[0]
else:
raise Exception('Initiator name not found')
# Add the initiator name in the target ACL
| |
h_alkyl=kwargs.get('r_h_alkyl', 1.22),
cl=kwargs.get('r_cl', 1.77),
na=kwargs.get('r_na', 0.95),
# fe=kwargs.get('r_fe', 0.74),
fe=kwargs.get('r_fe', 0.6),
zn=kwargs.get('r_zn', 0.71))
def set_atom_types(self):
self.atom_types_dfq = set(self.dfq.atom_type_label)
self.atom_types_dft = set(self.dft.atom_type_label)
def split_dfs_to_atom_types(self):
for atom_type in self.atom_types_dfq:
self.dfq_atom_type[atom_type] = self.dfq[self.dfq.atom_type_label.isin([atom_type])]
for atom_type in self.atom_types_dft:
self.dft_atom_type[atom_type] = self.dft[self.dft.atom_type_label.isin([atom_type])]
@staticmethod
def make_tree(dfq_):
return BallTree(dfq_[['c_x', 'c_y', 'c_z']].values)
def make_trees(self):
for atom_type, dfq_ in self.dfq_atom_type.items():
self._balltrees[atom_type] = self.make_tree(dfq_)
@staticmethod
def prune_empty(dists, inds):
t_inds = []
dists_ = []
q_inds = []
for t_ind, (dist, q_ind) in enumerate(zip(dists, inds)):
if dist.size > 0:
t_inds.append([t_ind])
dists_.append(dist)
q_inds.append(q_ind)
return t_inds, q_inds, dists_
@staticmethod
def partition_contacts_hb_hard_cutoff(dists, q_inds, t_inds,
cc_low, cc_low_hb, cc_high, wc_high,
hb_hard_cutoff):
q_inds_clashes = []
t_inds_clashes = []
q_inds_cc = []
t_inds_cc = []
q_inds_wc = []
t_inds_wc = []
q_inds_poss_hbonds_cl = []
t_inds_poss_hbonds_cl = []
q_inds_poss_hbonds_cc = []
t_inds_poss_hbonds_cc = []
for d, i_q, i_t in zip(dists, q_inds, t_inds):
clashing = d < hb_hard_cutoff
poss_hbonds_cl_test = (d >= hb_hard_cutoff) & (d < cc_low)
poss_hbonds_cc_test = (d >= cc_low) & (d < cc_low_hb)
clashes = i_q[clashing]
cc_test = (d >= cc_low_hb) & (d < cc_high)
wc_test = (d >= cc_high) & (d < wc_high)
ccs = i_q[cc_test]
wcs = i_q[wc_test]
poss_hbonds_cl = i_q[poss_hbonds_cl_test]
poss_hbonds_cc = i_q[poss_hbonds_cc_test]
if clashes.size > 0:
q_inds_clashes.append(clashes)
t_inds_clashes.append(i_t)
if ccs.size > 0:
q_inds_cc.append(ccs)
t_inds_cc.append(i_t)
if wcs.size > 0:
q_inds_wc.append(wcs)
t_inds_wc.append(i_t)
if poss_hbonds_cl.size > 0:
q_inds_poss_hbonds_cl.append(poss_hbonds_cl)
t_inds_poss_hbonds_cl.append(i_t)
if poss_hbonds_cc.size > 0:
q_inds_poss_hbonds_cc.append(poss_hbonds_cc)
t_inds_poss_hbonds_cc.append(i_t)
return q_inds_clashes, t_inds_clashes, q_inds_cc, t_inds_cc, \
q_inds_wc, t_inds_wc, q_inds_poss_hbonds_cl, t_inds_poss_hbonds_cl, \
q_inds_poss_hbonds_cc, t_inds_poss_hbonds_cc
@staticmethod
def partition_contacts_no_hb(dists, q_inds, t_inds, cc_low, cc_high, wc_high):
q_inds_clashes = []
t_inds_clashes = []
q_inds_cc = []
t_inds_cc = []
q_inds_wc = []
t_inds_wc = []
for d, i_q, i_t in zip(dists, q_inds, t_inds):
clashing = d < cc_low
clashes = i_q[clashing]
cc_test = (d >= cc_low) & (d < cc_high)
wc_test = (d >= cc_high) & (d < wc_high)
ccs = i_q[cc_test]
wcs = i_q[wc_test]
if clashes.size > 0:
q_inds_clashes.append(clashes)
t_inds_clashes.append(i_t)
if ccs.size > 0:
q_inds_cc.append(ccs)
t_inds_cc.append(i_t)
if wcs.size > 0:
q_inds_wc.append(wcs)
t_inds_wc.append(i_t)
return q_inds_clashes, t_inds_clashes, q_inds_cc, t_inds_cc, \
q_inds_wc, t_inds_wc, [], []
def _angle_test(self, dfq, dft, q_inds_poss_hbonds):
t_is_don = ~np.isnan(dft.c_D_x.values)
t_is_acc = ~np.isnan(dft.c_A1_x.values)
if ~t_is_don and ~t_is_acc: # dft is only 1 row, so .values produces a scalar
return list(q_inds_poss_hbonds), []
q_is_don = ~np.isnan(dfq.c_D_x.values)
q_is_acc = ~np.isnan(dfq.c_A1_x.values)
if (~q_is_don).all() and (~q_is_acc).all():
return list(q_inds_poss_hbonds), []
clashing = set(q_inds_poss_hbonds)
hbonds = set()
if t_is_acc and (q_is_don).any():
# q is donor, t is acceptor
donor_inds = q_inds_poss_hbonds[q_is_don]
donor = dfq[q_is_don]
d_arr = donor[coords[3:18]].values
s = d_arr.shape
if len(s) == 1:
m = 1
else:
m = s[0]
a_arr = np.tile(dft[coords[18:]].values, (m, 1))
X = np.hstack((d_arr, a_arr))
is_hb = is_hbond(X)
is_hb = is_hb.astype(bool)
hbonds |= set(donor_inds[is_hb])
if t_is_don and (q_is_acc).any():
# q is acceptor, t is donor
acc_inds = q_inds_poss_hbonds[q_is_acc]
acc = dfq[q_is_acc]
a_arr = acc[coords[18:]].values
s = a_arr.shape
if len(s) == 1:
m = 1
else:
m = s[0]
d_arr = np.tile(dft[coords[3:18]].values, (m, 1))
X = np.hstack((d_arr, a_arr))
is_hb = is_hbond(X)
is_hb = is_hb.astype(bool)
hbonds |= set(acc_inds[is_hb])
clashing -= hbonds
return list(clashing), list(hbonds)
def _angle_test_S_acceptor(self, dfq, dft, q_inds_poss_hbonds):
t_is_don = ~np.isnan(dft.c_D_x.values)
t_is_acc = ~np.isnan(dft.c_A1_x.values)
if ~t_is_don and ~t_is_acc: # dft is only 1 row, so .values produces a scalar
return list(q_inds_poss_hbonds), []
q_is_don = ~np.isnan(dfq.c_D_x.values)
q_is_acc = ~np.isnan(dfq.c_A1_x.values)
if (~q_is_don).all() and (~q_is_acc).all():
return list(q_inds_poss_hbonds), []
clashing = set(q_inds_poss_hbonds)
hbonds = set()
if t_is_acc and (q_is_don).any():
# q is donor, t is acceptor
donor_inds = q_inds_poss_hbonds[q_is_don]
donor = dfq[q_is_don]
d_arr = donor[coords[3:18]].values
s = d_arr.shape
if len(s) == 1:
m = 1
else:
m = s[0]
a_arr = np.tile(dft[coords[18:]].values, (m, 1))
X = np.hstack((d_arr, a_arr))
is_hb = is_hbond_S_acceptor(X)
is_hb = is_hb.astype(bool)
hbonds |= set(donor_inds[is_hb])
if t_is_don and (q_is_acc).any():
# q is acceptor, t is donor
acc_inds = q_inds_poss_hbonds[q_is_acc]
acc = dfq[q_is_acc]
a_arr = acc[coords[18:]].values
s = a_arr.shape
if len(s) == 1:
m = 1
else:
m = s[0]
d_arr = np.tile(dft[coords[3:18]].values, (m, 1))
X = np.hstack((d_arr, a_arr))
is_hb = is_hbond_S_acceptor(X)
is_hb = is_hb.astype(bool)
hbonds |= set(acc_inds[is_hb])
clashing -= hbonds
return list(clashing), list(hbonds)
def angle_test(self, q_inds_poss_hbonds, t_inds_poss_hbonds, dfq_, dft_):
q_inds_clash = []
t_inds_clash = []
q_inds_hbond = []
t_inds_hbond = []
for q_inds_poss_hbond, t_ind_poss_hbond in zip(q_inds_poss_hbonds, t_inds_poss_hbonds):
df_poss_hb_t = dft_.iloc[t_ind_poss_hbond]
df_poss_hb_q = dfq_.iloc[q_inds_poss_hbond]
q_inds_clash_, q_inds_hbond_ = self._angle_test(df_poss_hb_q, df_poss_hb_t,
q_inds_poss_hbond)
if q_inds_clash_:
q_inds_clash.append(q_inds_clash_)
t_inds_clash.append(t_ind_poss_hbond)
if q_inds_hbond_:
q_inds_hbond.append(q_inds_hbond_)
t_inds_hbond.append(t_ind_poss_hbond)
return q_inds_clash, t_inds_clash, q_inds_hbond, t_inds_hbond
def angle_test_S_acceptor(self, q_inds_poss_hbonds, t_inds_poss_hbonds, dfq_, dft_):
q_inds_clash = []
t_inds_clash = []
q_inds_hbond = []
t_inds_hbond = []
for q_inds_poss_hbond, t_ind_poss_hbond in zip(q_inds_poss_hbonds, t_inds_poss_hbonds):
df_poss_hb_t = dft_.iloc[t_ind_poss_hbond]
df_poss_hb_q = dfq_.iloc[q_inds_poss_hbond]
q_inds_clash_, q_inds_hbond_ = self._angle_test_S_acceptor(df_poss_hb_q, df_poss_hb_t,
q_inds_poss_hbond)
if q_inds_clash_:
q_inds_clash.append(q_inds_clash_)
t_inds_clash.append(t_ind_poss_hbond)
if q_inds_hbond_:
q_inds_hbond.append(q_inds_hbond_)
t_inds_hbond.append(t_ind_poss_hbond)
return q_inds_clash, t_inds_clash, q_inds_hbond, t_inds_hbond
def _find_contact_indices(self, atom_type_q, atom_type_t):
tree = self._balltrees[atom_type_q]
dft_ = self.dft_atom_type[atom_type_t]
vdw_sum = self.vdw_radii[atom_type_q] + self.vdw_radii[atom_type_t]
cc_low = vdw_sum # - self.tol
cc_high = vdw_sum + self.gap_close_contact
cutoff = wc_high = vdw_sum + self.gap_wide_contact
i, d = tree.query_radius(dft_[['c_x', 'c_y', 'c_z']].values,
r=cutoff, return_distance=True)
t_inds, q_inds, dists = self.prune_empty(d, i)
if t_inds:
D_q = atom_type_q in hbond_donor_types
A_t = atom_type_t in hbond_acceptor_types
A_q = atom_type_q in hbond_acceptor_types
D_t = atom_type_t in hbond_donor_types
if not ((D_q and A_t) or (D_t and A_q)):
return self.partition_contacts_no_hb(dists, q_inds, t_inds, cc_low, cc_high, wc_high)
# This returns q_inds_clashes, t_inds_clashes, q_inds_cc, t_inds_cc,
# q_inds_wc, t_inds_wc, q_inds_hb, t_inds_hb (hb are empty lists)
if (atom_type_q in {'n', 'p', 's'}) and (atom_type_t in {'n', 'p', 's'}):
hb_hard_cutoff = cc_low - self.overlap_hb_heavy_nn
elif (atom_type_q in {'o', 'f'}) and (atom_type_t in {'f', 'n', 'p', 's'}):
hb_hard_cutoff = cc_low - self.overlap_hb_heavy_no
elif (atom_type_t in {'o', 'f'}) and (atom_type_q in {'f', 'n', 'p', 's'}):
hb_hard_cutoff = cc_low - self.overlap_hb_heavy_no
elif (atom_type_q in {'o', 'f'}) and (atom_type_t in {'o', 'f'}):
hb_hard_cutoff = cc_low - self.overlap_hb_heavy_oo
if (atom_type_q in {'n', 'o', 'p', 's', 'f'}) and (atom_type_t in {'n', 'o', 'p', 's', 'f'}):
cc_low_hb = max(3.3, cc_low)
cc_high = max(3.6, cc_low + 0.3)
wc_high = max(4.0, cc_high + 0.4)
else:
hb_hard_cutoff = cc_low - self.overlap_hb - self.tol
cc_low_hb = max(2.5, cc_low)
cc_high = max(2.8, cc_low + 0.3)
wc_high = max(3.2, cc_high + 0.4)
q_inds_clashes, t_inds_clashes, q_inds_cc, t_inds_cc, \
q_inds_wc, t_inds_wc, q_inds_poss_hbonds_cl, t_inds_poss_hbonds_cl, \
q_inds_poss_hbonds_cc, t_inds_poss_hbonds_cc = \
self.partition_contacts_hb_hard_cutoff(dists, q_inds, t_inds, cc_low,
cc_low_hb, cc_high, wc_high,
hb_hard_cutoff)
q_inds_hbond = []
t_inds_hbond = []
if q_inds_poss_hbonds_cl:
dfq_ = self.dfq_atom_type[atom_type_q]
if atom_type_q in {'s'} or atom_type_t in {'s'}:
q_inds_clash, t_inds_clash, q_inds_hbond_cl, t_inds_hbond_cl \
= self.angle_test_S_acceptor(q_inds_poss_hbonds_cl, t_inds_poss_hbonds_cl, dfq_, dft_)
else:
q_inds_clash, t_inds_clash, q_inds_hbond_cl, t_inds_hbond_cl \
= self.angle_test(q_inds_poss_hbonds_cl, t_inds_poss_hbonds_cl, dfq_, dft_)
q_inds_clashes.extend(q_inds_clash)
t_inds_clashes.extend(t_inds_clash)
q_inds_hbond.extend(q_inds_hbond_cl)
t_inds_hbond.extend(t_inds_hbond_cl)
if q_inds_poss_hbonds_cc:
dfq_ = self.dfq_atom_type[atom_type_q]
if atom_type_q in {'s'} or atom_type_t in {'s'}:
q_inds_cc_, t_inds_cc_, q_inds_hbond_cc, t_inds_hbond_cc \
= self.angle_test_S_acceptor(q_inds_poss_hbonds_cc, t_inds_poss_hbonds_cc, dfq_, dft_)
else:
q_inds_cc_, t_inds_cc_, q_inds_hbond_cc, t_inds_hbond_cc \
= self.angle_test(q_inds_poss_hbonds_cc, t_inds_poss_hbonds_cc, dfq_, dft_)
q_inds_cc.extend(q_inds_cc_)
t_inds_cc.extend(t_inds_cc_)
q_inds_hbond.extend(q_inds_hbond_cc)
t_inds_hbond.extend(t_inds_hbond_cc)
return q_inds_clashes, t_inds_clashes, q_inds_cc, t_inds_cc, \
q_inds_wc, t_inds_wc, q_inds_hbond, t_inds_hbond
else:
return q_inds_clashes, t_inds_clashes, q_inds_cc, t_inds_cc, \
q_inds_wc, t_inds_wc, q_inds_hbond, t_inds_hbond
else:
return [[]] * 8
def extend_global_indices(self, atom_type_q, atom_type_t, q_inds, t_inds, contact_type):
q_global_inds = list()
t_global_inds = list()
dfq = self.dfq_atom_type[atom_type_q]
dft = self.dft_atom_type[atom_type_t]
for q_inds_, t_ind in zip(q_inds, t_inds):
q_global_inds.extend(dfq['num_tag'].iloc[q_inds_].values)
t_global_inds.extend([dft['num_tag'].iat[t_ind[0]]] * len(q_inds_))
self.q_global_indices.extend(q_global_inds)
self.t_global_indices.extend(t_global_inds)
self.contact_types.extend([contact_type] * len(q_global_inds))
def _find(self):
for atom_type_q in self.atom_types_dfq:
for atom_type_t in self.atom_types_dft:
q_inds_clashes, t_inds_clashes, q_inds_cc, t_inds_cc, \
q_inds_wc, t_inds_wc, q_inds_hbond, t_inds_hbond \
= self._find_contact_indices(atom_type_q, atom_type_t)
if q_inds_clashes:
self.extend_global_indices(atom_type_q, atom_type_t,
q_inds_clashes, t_inds_clashes, 'cl')
if q_inds_cc:
self.extend_global_indices(atom_type_q, atom_type_t,
q_inds_cc, t_inds_cc, 'cc')
if q_inds_wc:
self.extend_global_indices(atom_type_q, atom_type_t,
q_inds_wc, t_inds_wc, 'wc')
if q_inds_hbond:
self.extend_global_indices(atom_type_q, atom_type_t,
q_inds_hbond, t_inds_hbond, 'hb')
dfq_ = self.dfq.iloc[self.q_global_indices]
dft_ = self.dft.iloc[self.t_global_indices]
df = dfq_.reset_index(drop=True).join(dft_.reset_index(drop=True),
how='outer', lsuffix='_q', rsuffix='_t')
df.loc[:, 'contact_type'] = self.contact_types
self.df_contacts = df
def find(self):
if self.atom_types_dfq is None:
self.set_atom_types()
if not self.dfq_atom_type:
self.split_dfs_to_atom_types()
if not self._balltrees:
self.make_trees()
self._find()
class ClashVDM:
'''Do Cbeta, do non-Cbeta, combine results'''
def __init__(self, dfq, dft):
self.dfq = dfq
self.dft = dft
self.exclude = None
self.q_grouping = None
self.dfq_cb_clash_free = None
self.dfq_non_cb_clash_free = None
self.dfq_clash_free = None
self.dft_for_non_cb = None
self.dft_for_cb = None
self.dfq_non_cb = None
self.dfq_cb = None
def set_grouping(self, grouping):
self.q_grouping = grouping
def set_exclude(self, exclude):
self.exclude = exclude
def setup(self):
df = self.dft
resnum = self.exclude[0]
chain = self.exclude[1]
| |
self.timed_command(command,4,10)
status, response, delta = self.timed_command(command,4,60)
if status != 0:
E=4
message = "QUERY_CLEAN %i: %s => %i,%s" % \
(E, command, status, response)
Trace.log(e_errors.ERROR, message)
return ("ERROR", E, response, "", message)
else:
#Get the information from the robot.
for line in response:
if line.find("ACSSA") >= 0 or \
line.find("Cleaning Cartridge Status") >= 0 or \
line.find("Identifier") >= 0 \
or len(line) == 0:
#This is some other information.
continue
# The returned line of interest looks like:
# For ACSLS version 7:
# CLN179 1, 5,12, 8, 1 50 0 home LTO-CLNU
#
# For ACSLS version 8:
# CLN565 0, 1, 6, 0, 0 100 0 home STK2W
# get rid of extra whitespaces
tline = ' '.join(line.translate(None, string.whitespace[:5]).split())
# now it looks like:
# CLN565 0, 1, 6, 0, 0 100 0 home STK2W
# get rid of space before number in address
tline2 = tline.replace(", ", ",")
# now it looks like:
# CLN565 0,1,6,0,0 100 0 home STK2W
s_line = tline2.split(' ')
Trace.trace(21, "line %s"%(s_line,))
volume = s_line[0]
location = s_line[1]
max_usage = int(s_line[2])
current_usage = int(s_line[3])
status = s_line[4]
media_type = s_line[5]
remaining_usage = max_usage - current_usage #AML2 compatibility
clean_list.append({"volume" : volume,
"location" : location,
"max_usage" : max_usage,
"current_usage" : current_usage,
"remaining_usage" : remaining_usage,
"status" : status,
"type" : media_type,
})
#Put the list of cleaning tapes into the reply ticket.
reply['clean_list'] = clean_list
#Send the information. (success or failure)
try:
r = callback.write_tcp_obj(sock, reply)
sock.close()
if r:
Trace.log(e_errors.ERROR,
"Error calling write_tcp_obj. Callback addr. %s"
% (ticket['callback_addr'],))
except:
Trace.handle_error()
Trace.log(e_errors.ERROR,
"Callback address %s" % (ticket['callback_addr'],))
E=6
return (e_errors.NET_ERROR, E, str(sys.exc_info()[1]), "", "")
return (e_errors.OK, 0, None, "", "")
#########################################################################
# These functions are internal functions specific to STK media changer.
#########################################################################
#Query a volume.
def query(self, volume, media_type=""):
__pychecker__ = "unusednames=media_type"
# build the command, and what to look for in the response
command = "query vol %s" % (volume,)
answer_lookfor = "%s " % (volume,)
# execute the command and read the response
# efb (dec 22, 2005) - up timeout from 10 to 60 as the queries are hanging
#status,response, delta = self.timed_command(command,4,10)
status,response, delta = self.timed_command(command,4,60)
if status != 0:
E=1
msg = "QUERY %i: %s => %i,%s" % (E,command,status,response)
Trace.log(e_errors.ERROR, msg)
return ("ERROR", E, response, '', msg)
# got response, parse it and put it into the standard form
answer = string.strip(response[3])
if string.find(answer, answer_lookfor, 0) != 0:
E=2
msg = "QUERY %i: %s => %i,%s" % (E,command,status,response)
Trace.log(e_errors.ERROR, msg)
return ("ERROR", E, response, '', msg)
elif string.find(answer,' home ') != -1:
msg = "%s => %i,%s" % (command,status,answer)
Trace.log(e_errors.INFO, msg)
return (e_errors.OK,0,answer, 'O', msg) # occupied
elif string.find(answer,' in drive ') != -1:
msg = "%s => %i,%s" % (command,status,answer)
Trace.log(e_errors.INFO, msg)
return (e_errors.OK,0,answer, 'M', msg) # mounted
elif string.find(answer,' in transit ') != -1:
msg = "%s => %i,%s" % (command,status,answer)
Trace.log(e_errors.INFO, msg)
return (e_errors.OK,0,answer, 'T', msg) # transit
else:
E=3
msg = "QUERY %i: %s => %i,%s" % (E,command,status,response)
Trace.log(e_errors.ERROR, msg)
return ("ERROR", E, answer, '', msg)
def query_drive(self,drive):
# build the command, and what to look for in the response
command = "query drive %s" % (drive,)
answer_lookfor = "%s " % (drive,)
# execute the command and read the response
# FIXME - what if this hangs?
# efb (dec 22, 2005) - up timeout from 10 to 60 as the queries are hanging
#status,response, delta = self.timed_command(command,4,10)
status,response, delta = self.timed_command(command,4,60)
if status != 0:
E=4
msg = "QUERY_DRIVE %i: %s => %i,%s" % (E,command,status,response)
Trace.log(e_errors.ERROR, msg)
return ("ERROR", E, response, '', msg)
# got response, parse it and put it into the standard form
answer = string.strip(response[3])
answer = string.replace(answer,', ',',') # easier to part drive id
if string.find(answer, answer_lookfor,0) != 0:
E=5
msg = "QUERY_DRIVE %i: %s => %i,%s" % (E,command,status,answer)
Trace.log(e_errors.ERROR, msg)
return ("ERROR", E, answer, '', msg)
elif string.find(answer,' online ') == -1:
E=6
msg = "QUERY_DRIVE %i: %s => %i,%s" % (E,command,status,answer)
Trace.log(e_errors.ERROR, msg)
return ("ERROR", E, answer, '', msg)
elif string.find(answer,' available ') != -1:
msg = "%s => %i,%s" % (command,status,answer)
Trace.log(e_errors.INFO, msg)
return (e_errors.OK,0,answer, '', msg) # empty
elif string.find(answer,' in use ') != -1:
loc = string.find(answer,' in use ')
volume = string.split(answer[loc+8:])[0]
msg = "%s => %i,%s" % (command,status,answer)
Trace.log(e_errors.INFO, msg)
return (e_errors.OK,0,answer, volume, msg) # mounted and in use
else:
E=7
msg = "QUERY_DRIVE %i: %s => %i,%s" % (E,command,status,answer)
Trace.log(e_errors.ERROR, msg)
return ("ERROR", E, answer, '', msg)
def display_drive(self,drive):
# build the command, and what to look for in the response
command = "display drive %s -f wwn" % (drive,)
answer_lookfor = "%s " % (drive,)
# execute the command and read the response
status,response, delta = self.timed_command(command,4,60)
if status != 0:
E=4
msg = "DISPLAY_DRIVE %i: %s => %i,%s" % (E,command,status,response)
Trace.log(e_errors.ERROR, msg)
return ("ERROR", E, response, '', msg)
# got response, parse it and put it into the standard form
answer = string.strip(response[3])
if 'No records found' in answer:
E=6
msg = "DISPLAY_DRIVE %i: %s => %i,%s" % (E,command,status,response)
Trace.log(e_errors.ERROR, msg)
return ("ERROR", E, response, '', msg)
answer = string.replace(answer,', ',',') # easier to part drive id
# the format of the answer is like:
# 2 2 1 12 50.01.04.f0.00.a2.b5.06
# convert it to what we expect
answer = ' '.join(answer.translate(None, string.whitespace[:5]).split()).replace(' ', ',',3)
if answer.find(answer_lookfor,0) != 0:
E=5
msg = "DISPLAY_DRIVE %i: %s => %i,%s" % (E,command,status,answer)
Trace.log(e_errors.ERROR, msg)
return ("ERROR", E, answer, '', msg)
else:
msg=''
Trace.log(e_errors.INFO, msg)
return (e_errors.OK,0,answer, '', msg) # mounted and in use
def mount(self, volume, drive, media_type="", view_first=1, ticket = {}):
#############################################################
# ok, this is a test only - see if we can mount readonly for
# 9840 and 9940 tapes
if media_type in ('9840', '9940', '9940B'):
vol_ticket = ticket.get('vol_ticket', {})
si = vol_ticket.get('system_inhibit', ('none', 'none'))
ui = vol_ticket.get('user_inhibit', ('none', 'none'))
if enstore_functions2.is_readonly_state(si[1]) or \
enstore_functions2.is_readonly_state(ui[1]):
readonly = 1
else:
readonly = 0
else:
readonly = 0
#############################################################
# build the command, and what to look for in the response
command = "mount %s %s" % (volume,drive)
if readonly:
command = command + " readonly"
answer_lookfor = "Mount: %s mounted on " % (volume,)
# check if tape is in the storage location or somewhere else
if view_first:
status,stat,response,attrib,com_sent = self.query(volume, media_type)
if stat!=0:
E=e_errors.MC_FAILCHKVOL
msg = "MOUNT %i: %s => %i,%s" % (E,command,stat,response)
Trace.log(e_errors.ERROR, msg)
return ("ERROR", E, response, "", msg)
if attrib != "O": # look for tape in tower (occupied="O")
E=e_errors.MC_VOLNOTHOME
msg = "MOUNT %i: Tape is not in home position. %s => %s,%s" % (E,command,status,response)
Trace.log(e_errors.ERROR, msg)
return ("ERROR", E, response, "", msg)
# check if any tape is mounted in this drive
status,stat,response,volser,com_sent = self.query_drive(drive)
if stat!=0:
E=e_errors.MC_FAILCHKDRV
msg = "MOUNT %i: %s => %i,%s" % (E,command,stat,response)
Trace.log(e_errors.ERROR, msg)
return ("ERROR", E, response, "", msg)
if volser != "": # look for any tape mounted in this drive
E=e_errors.MC_DRVNOTEMPTY
msg = "MOUNT %i: Drive %s is not empty =>. %s => %s,%s" % (E,drive,command,status,response)
Trace.log(e_errors.ERROR, msg)
return ("ERROR", E, response, "", msg)
# execute the command and read the response
status,response, delta = self.timed_command(command,2,60*10)
if status != 0:
E=12
msg = "MOUNT %i: %s => %i,%s" % (E,command,status,response)
Trace.log(e_errors.ERROR, msg)
return ("ERROR", E, response, "", msg)
# got response, parse it and put it into the standard form
answer = string.strip(response[1])
if string.find(answer, answer_lookfor,0) != 0:
# during cap operations acsls returns an error message containing the information that the volume was actually mounted
# if this is a case, process it
compared = 0
try:
Trace.log(e_errors.INFO, "Ckecking ASCLS message %s %s"%(response, answer_lookfor)) # remove after debugging AM
for l in response:
if answer_lookfor in l:
# ok the volume is actually mounted
# but in what drive?
requeseted_drive=drive.split(',')
l=l.replace(',',' ')
ar=l.split()
Trace.log(e_errors.INFO, "Requested Drive %s. Comparing to %s"%(requeseted_drive, ar)) # remove after debugging AM
same_drive = 0
for i in range(len(requeseted_drive)):
if int(requeseted_drive[-(i+1)]) != int(ar[-(i+1)]):
break
else:
same_drive = 1
if same_drive:
compared = 1
Trace.log(e_errors.INFO, "The error was | |
<filename>postprocessedtracer/frame.py
from dataclasses import dataclass, field
from typing import Tuple, Callable
import numba as nb
import numpy as np
def _convert_type(x):
if isinstance(x, np.integer):
return int(x)
if isinstance(x, np.floating):
return float(x)
if isinstance(x, bytes):
return x.decode('ascii', 'replace')
if isinstance(x, np.ndarray):
if issubclass(np.obj2sctype(x.dtype), bytes):
return _convert_type(x.tolist())
return x.astype(np.obj2sctype(x.dtype))
if isinstance(x, list):
return list(map(_convert_type, x))
return x
@dataclass(init=False, order=True)
class Frame:
filename: str = field(compare=False)
time: float = field(repr=False)
header: dict = field(compare=False, repr=False)
data: dict = field(compare=False, repr=False)
num_ghost: int = field(compare=False, repr=False)
num_dimension: int = field(compare=False, repr=False)
boundaries: Tuple[Tuple[str, str], Tuple[str, str], Tuple[str, str]] = field(compare=False, repr=False)
mesh_position_to_fractional_position_root: Callable = field(compare=False, repr=False)
mesh_position_to_fractional_position_meshblock: Callable = field(compare=False, repr=False)
mesh_position_to_meshblock_id: Callable = field(compare=False, repr=False)
mesh_position_to_global_indices: Callable = field(compare=False, repr=False)
mesh_position_to_local_indices: Callable = field(compare=False, repr=False)
global_indices_to_mesh_position: Callable = field(compare=False, repr=False)
velocity_to_derivatives: Callable = field(compare=False, repr=False)
interpolate_cell_centered: Callable = field(compare=False, repr=False)
apply_boundaries: Callable = field(compare=False, repr=False)
get_finite_volume: Callable = field(compare=False, repr=False)
def __init__(self, filename, boundaries=None):
self.filename = filename
if boundaries is None:
self.boundaries = (('none',) * 2,) * 3
else:
try:
(ix1, ox1), (ix2, ox2), (ix3, ox3) = boundaries
self.boundaries = ((str(ix1), str(ox1)), (str(ix2), str(ox2)), (str(ix3), str(ox3)))
except ValueError:
raise ValueError('boundaries has to be in the form: ((ix1, ox1), (ix2, ox2), (ix3, ox3))')
self._load_header()
self.time = self.header['Time']
self.data = {}
self._prepare_functions()
def load(self, quantities=None):
import h5py
if quantities is None:
quantities = list(self.header['VariableNames'])
num_variables = self.header['NumVariables']
dataset_prefix = np.cumsum(num_variables)
dataset_offsets = dataset_prefix - dataset_prefix[0]
with h5py.File(self.filename, 'r') as f:
dataset_names = self.header['DatasetNames']
variable_names = self.header['VariableNames']
for q in quantities:
if q in self.data:
continue
if q not in variable_names:
raise RuntimeError(f'Quantity "{q}" not found, '
f'available quantities include {variable_names}')
variable_index = variable_names.index(q)
dataset_index = np.searchsorted(dataset_prefix, variable_index)
variable_index -= dataset_offsets[dataset_index]
self.data[q] = _convert_type(f[dataset_names[dataset_index]][variable_index])
def unload(self):
self.data = {}
def _load_header(self):
import h5py
with h5py.File(self.filename, 'r') as f:
self.header = {}
for key in f.attrs:
self.header[key] = _convert_type(f.attrs[key])
dataset_names = self.header['DatasetNames']
for key in f.keys():
if key in dataset_names:
continue
self.header[key] = _convert_type(f[key][:])
def _prepare_functions(self):
nx_root = self.header['RootGridSize']
nx_meshblock = self.header['MeshBlockSize']
maxlevel = self.header['MaxLevel']
llocs = self.header['LogicalLocations']
levels = self.header['Levels']
x1f = self.header['x1f']
x2f = self.header['x2f']
x3f = self.header['x3f']
x1v = self.header['x1v']
x2v = self.header['x2v']
x3v = self.header['x3v']
nx1rt, nx2rt, nx3rt = nx_root
x1minrt, x1maxrt, x1ratrt = self.header['RootGridX1']
x2minrt, x2maxrt, x2ratrt = self.header['RootGridX2']
x3minrt, x3maxrt, x3ratrt = self.header['RootGridX3']
x1ratnxrt, x2ratnxrt, x3ratnxrt = x1ratrt ** nx1rt, x2ratrt ** nx2rt, x3ratrt ** nx3rt
nx1mb, nx2mb, nx3mb = nx_meshblock
x1minmb, x1maxmb, x1ratmb = x1f[:, 0], x1f[:, -1], x1ratrt ** (1 / (1 << levels))
x2minmb, x2maxmb, x2ratmb = x2f[:, 0], x2f[:, -1], x2ratrt ** (1 / (1 << levels))
x3minmb, x3maxmb, x3ratmb = x3f[:, 0], x3f[:, -1], x3ratrt ** (1 / (1 << levels))
x1ratnxmb, x2ratnxmb, x3ratnxmb = x1ratmb ** nx1mb, x2ratmb ** nx2mb, x3ratmb ** nx3mb
coordinates = self.header['Coordinates']
ndim = int(np.sum(nx_root > 1))
self.num_dimension = ndim
(ix1, ox1), (ix2, ox2), (ix3, ox3) = self.boundaries
# detect the number of ghost zone, assume the first meshblock has the logical location (0, 0, 0)
ngh = np.searchsorted(x1v[0], x1minrt)
self.num_ghost = ngh
# calculate the numbers of finest meshblock needed
nmb = [nx_root[d] // (nx_meshblock[d] - 2 * ngh) << maxlevel if nx_root[d] > 1 else 1 for d in range(3)]
# assign meshblock ids to table
mbtable = np.empty(nmb, dtype=int)
for mb in range(self.header['NumMeshBlocks']):
rngs = tuple(slice(llocs[mb, d] << levels[mb], llocs[mb, d] + 1 << levels[mb]) if nx_root[d] > 1 else 0
for d in range(3))
mbtable[tuple(rngs)] = mb
# start preparing functions...
# given mesh position, return fractional position in root grid
@nb.njit(fastmath=True)
def mesh_position_to_fractional_position_root(x1_, x2_, x3_):
if x1ratrt == 1.0:
frac1_ = (x1_ - x1minrt) / (x1maxrt - x1minrt)
else:
frac1_ = np.log2(1 - (x1_ - x1minrt) / (x1maxrt - x1minrt) * (1 - x1ratnxrt)) / np.log2(x1ratnxrt)
if x2ratrt == 1.0:
frac2_ = (x2_ - x2minrt) / (x2maxrt - x2minrt)
else:
frac2_ = np.log2(1 - (x2_ - x2minrt) / (x2maxrt - x2minrt) * (1 - x2ratnxrt)) / np.log2(x2ratnxrt)
if x3ratrt == 1.0:
frac3_ = (x3_ - x3minrt) / (x3maxrt - x3minrt)
else:
frac3_ = np.log2(1 - (x3_ - x3minrt) / (x3maxrt - x3minrt) * (1 - x3ratnxrt)) / np.log2(x3ratnxrt)
return frac1_, frac2_, frac3_
self.mesh_position_to_fractional_position_root = mesh_position_to_fractional_position_root
# given mesh position, return fractional position in meshblock
@nb.njit(fastmath=True)
def mesh_position_to_fractional_position_meshblock(mb_, x1_, x2_, x3_):
x1min_, x1max_, x1rat_ = x1minmb[mb_], x1maxmb[mb_], x1ratmb[mb_]
x2min_, x2max_, x2rat_ = x2minmb[mb_], x2maxmb[mb_], x2ratmb[mb_]
x3min_, x3max_, x3rat_ = x3minmb[mb_], x3maxmb[mb_], x3ratmb[mb_]
x1ratnx_, x2ratnx_, x3ratnx_ = x1ratnxmb[mb_], x2ratnxmb[mb_], x3ratnxmb[mb_]
if x1rat_ == 1.0:
frac1_ = (x1_ - x1min_) / (x1max_ - x1min_)
else:
frac1_ = np.log2(1 - (x1_ - x1min_) / (x1max_ - x1min_) * (1 - x1ratnx_)) / np.log2(x1ratnx_)
if x2rat_ == 1.0:
frac2_ = (x2_ - x2min_) / (x2max_ - x2min_)
else:
frac2_ = np.log2(1 - (x2_ - x2min_) / (x2max_ - x2min_) * (1 - x1ratnx_)) / np.log2(x1ratnx_)
if x3rat_ == 1.0:
frac3_ = (x3_ - x3min_) / (x3max_ - x3min_)
else:
frac3_ = np.log2(1 - (x3_ - x3min_) / (x3max_ - x3min_) * (1 - x3ratnx_)) / np.log2(x3ratnx_)
return frac1_, frac2_, frac3_
self.mesh_position_to_fractional_position_meshblock = mesh_position_to_fractional_position_meshblock
# given mesh position, return meshblock id of the meshblock that contains the position
@nb.njit(fastmath=True)
def mesh_position_to_meshblock_id(x1_, x2_, x3_):
frac1_, frac2_, frac3_ = mesh_position_to_fractional_position_root(x1_, x2_, x3_)
mb1_ = min(max(0, int(frac1_ * mbtable.shape[0])), mbtable.shape[0] - 1)
mb2_ = min(max(0, int(frac2_ * mbtable.shape[1])), mbtable.shape[1] - 1)
mb3_ = min(max(0, int(frac3_ * mbtable.shape[2])), mbtable.shape[2] - 1)
return mbtable[mb1_, mb2_, mb3_]
self.mesh_position_to_meshblock_id = mesh_position_to_meshblock_id
# given mesh position, return indices in root grid
@nb.njit(fastmath=True)
def mesh_position_to_global_indices(x1_, x2_, x3_):
frac1_, frac2_, frac3_ = mesh_position_to_fractional_position_root(x1_, x2_, x3_)
gidx1_ = frac1_ * nx1rt
gidx2_ = frac2_ * nx2rt
gidx3_ = frac3_ * nx3rt
return gidx1_, gidx2_, gidx3_
self.mesh_position_to_global_indices = mesh_position_to_global_indices
# given mesh position, return indices in root grid
@nb.njit(fastmath=True)
def mesh_position_to_local_indices(mb_, x1_, x2_, x3_):
frac1_, frac2_, frac3_ = mesh_position_to_fractional_position_meshblock(mb_, x1_, x2_, x3_)
lidx1_ = frac1_ * nx1mb
lidx2_ = frac2_ * nx2mb
lidx3_ = frac3_ * nx3mb
return lidx1_, lidx2_, lidx3_
self.mesh_position_to_local_indices = mesh_position_to_local_indices
# given indices in root grid, return mesh position
@nb.njit(fastmath=True)
def global_indices_to_mesh_position(gidx1_, gidx2_, gidx3_):
if x1ratrt == 1.0:
x1_ = x1minrt + (x1maxrt - x1minrt) * gidx1_ / nx1rt
else:
x1_ = x1minrt + (x1maxrt - x1minrt) * (1 - x1ratrt ** gidx1_) / (1 - x1ratnxrt)
if x2ratrt == 1.0:
x2_ = x2minrt + (x2maxrt - x2minrt) * gidx2_ / nx2rt
else:
x2_ = x2minrt + (x2maxrt - x2minrt) * (1 - x2ratrt ** gidx2_) / (1 - x2ratnxrt)
if x3ratrt == 1.0:
x3_ = x3minrt + (x3maxrt - x3minrt) * gidx3_ / nx3rt
else:
x3_ = x3minrt + (x3maxrt - x3minrt) * (1 - x3ratrt ** gidx3_) / (1 - x3ratnxrt)
return x1_, x2_, x3_
self.global_indices_to_mesh_position = global_indices_to_mesh_position
# given mesh positions and velocities, return derivatives in mesh position
@nb.njit(fastmath=True)
def velocity_to_derivatives(x1_, x2_, _x3_, v1_, v2_, v3_):
if coordinates == 'cartesian':
dx1_ = v1_
dx2_ = v2_
dx3_ = v3_
elif coordinates == 'cylindrical':
dx1_ = v1_
dx2_ = v2_ / x1_
dx3_ = v3_
elif coordinates == 'spherical_polar':
dx1_ = v1_
dx2_ = v2_ / x1_
dx3_ = v3_ / (x1_ * np.sin(x2_))
else:
raise RuntimeError('Unrecognized coordinates: ' + coordinates)
return dx1_, dx2_, dx3_
self.velocity_to_derivatives = velocity_to_derivatives
# given mesh position, return the interpolated cell-centered quantities
@nb.njit(fastmath=True)
def interpolate_cell_centered(quantities_, mb_, x1_, x2_, x3_):
if ndim == 1:
w_ = np.ones((1, 1, 2), dtype=np.float64)
elif ndim == 2:
w_ = np.ones((1, 2, 2), dtype=np.float64)
elif ndim == 3:
w_ = np.ones((2, 2, 2), dtype=np.float64)
else:
raise RuntimeError('Unrecognized number of dimension: ' + str(ndim))
lidx1_, lidx2_, lidx3_ = mesh_position_to_local_indices(mb_, x1_, x2_, x3_)
l1s_ = int(lidx1_)
if x1_ < x1v[mb_, l1s_]:
w1_ = 0.5 + 0.5 * (x1_ - x1f[mb_, l1s_]) / (x1v[mb_, l1s_] - x1f[mb_, l1s_])
l1s_, l1e_ = l1s_ - 1, l1s_ + 1
else:
w1_ = 0.5 * (x1_ - x1v[mb_, l1s_]) / (x1f[mb_, l1s_ + 1] - x1v[mb_, l1s_])
l1s_, l1e_ = l1s_, | |
high
## ($\lfloor \texttt{OH}_k(\mathbf{M}_{(j - 1) / 2}) / 2^{64}\rfloor$)
## 64-bit halves from `OH`'s outputs.
## The polynomial hash is parameterised on a single multiplier $f \in
## \mathbb{F}$ and evaluates to
##
## $$
## CW_f(y) = \left(\sum_{j=0}^{d - 1} y_j \cdot f^{d - j}\right) \bmod 2^{61} - 1,
## $$
##
## a polynomial of degree $d = 2n$, twice the number of `OH` blocks.
##
## The last step is a finalizer that reversibly mixes the mod $2^{64}
## - 8$ polynomial hash value to improve its distribution and pass
## SMHasher.
##
## # Where do collisions come from?
##
## For strings of 8 or fewer bytes, UMASH uses a different
## parameterised mixing routine for each input size $s \in [0, 8].$
## Each of these routines is invertible, and the random parameter
## ensures any input value can be mapped to any output value with
## probability $2^{-64}$ (i.e., they're universal hash functions) This
## means the short-string mixers never collide values of the same
## length, and otherwise collides with probability
## $\varepsilon_{\textrm{short}} \approx 2^{-64},$
##
## For strings of 9 or more bytes, we will show that the second-level
## Carter-Wegman polynomial quickly becomes the dominant source of
## collisions: each block of 256 bytes (except the last, which may be
## short) is compressed to 16 bytes, which increases the polynomial's
## degree by *two*, one for each 64-bit half of the compressed output.
##
## The polynomial is in in $\mathbb{F} = \mathbb{Z}/(2^{61} - 1)\mathbb{Z},$
## so the collision probability for two polynomials of degree at most
## $d$ is $\approx d \cdot 2^{-61}$, i.e., $\lceil s / 256\rceil 2^{-60}$
## for strings of $s$ or fewer bytes.
##
## We now have to show that the probability of collision when
## compressing two blocks, a constant, is much smaller than
## the polynomial's, for reasonably sized $s$ (asymptotically,
## that clearly holds).
##
## Let's do so by casting the first-level block compressor as the XOR
## composition of independently sampled mixers, one for each 16-byte
## chunk. For any block, the last mixer is a member of the [`ENH` family](https://eprint.iacr.org/2004/319.pdf#page=4),
## and takes into account both the last 16-byte chunk, and the original
## (pre-extension) size of the block in bytes.
##
## This ENH mixer is $2^{-64}-$almost-universal: the probability that
## two different tuples $(\texttt{chunk}, \texttt{size})$ mix to
## the same value is at most $2^{-64}$.
##
## All other chunks are mixed with [functions from the `PH` family](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.105.9929&rep=rep1&type=pdf#page=3),
## which is $2^{-64}$-almost-XOR-universal: not only will two
## different chunks collide with probability at most $2^{-64}$,
## but, in fact, the bitwise XOR of their mixed values will not take
## any specific value with probability greater than $2^{-64}$.
##
## We can snow show that xor-ing together the output of these mixers
## is $2^{-64}-$almost-universal, by induction on the number of
## chunks. Clearly, that's the case for blocks of one chunk: the ENH
## mixer is $2^{-64}-$almost-universal. Assume two blocks $x$ and $y$
## of $n > 1$ chunks differ. If their first chunk is identical, they
## only collide if xoring the mixers for the remaining chunks (and
## length tag) collides, and the induction hypothesis says that
## happens with probability at most $2^{-64}.$ If their first chunk
## differs, they'll be mixed with a `PH` function $g$, and they will
## only collide if $g(x_1) \oplus g(y_1)$ matches the difference
## between the hash for the remainder of the chunks. We know `PH` is
## $2^{-64}-$almost-XOR-universal, so that happens with probability at
## most $2^{-64}.$
##
## If the blocks are identical except for the length tag, the inputs
## to the ENH mixer still differ, so the proof of
## $2^{-64}-$almost-universality holds.
##
## Finally, if one block contains more chunks than the other, the two
## blocks only collide if the longer block's final `ENH` hash matches
## exactly the difference between the two compressed outputs so far.
## This happens for only one `NH` value $x$, and any one value
## occurs with probability at most $2^{-63}$ ($x=0$, the worst case).
## Collisions thus occur between blocks with different chunk counts
## with probability at most $2^{-63}$.
##
## In all cases, the probability of collisions between different blocks
## is at most $2^{-63}.$ However, we feed the 128-bit compressed output
## to the polynomial hash as two values in $\mathbb{F} = \mathbb{Z}/(2^{61} - 1)\mathbb{Z},$
## which loses less than 7 bits of entropy. In short, the first-level
## block compressor introduces collisions with probability less than
## $2^{-56}.$
##
## The second-level polynomial hash starts with collision probability
## $\approx 2^{-60},$ but, already at 16 blocks (4096 bytes), we find
## $d = 32,$ and thus a collision probability $\varepsilon_{poly} <
## 2^{-56};$ let's conservatively bump that to $\varepsilon < 2^{-55}$
## to take the block compressor's collisions into account. Longer
## strings will follow the same progression, so we can claim a
## collision probability $\varepsilon < \lceil s/4096\rceil \cdot 2^{-55}.$
##
## # What if that's not enough?
##
## We could use a Toeplitz extension to reuse most of the random
## parameters. However, we don't actually need double the
## entropy: in practice, anything stronger than $\approx 2^{-70}$ is
## dominated by hardware failure, incinerated datacenters, or war.
##
## The plan is to apply nearly-reversible *public* shufflers $xs_i$ to
## each `PH` output, and to generate an additional chunk for each
## block by xoring together the chunks and the corresponding `PH`
## parameters. In other words, we will derive an additional
## "checksum" chunk for each block with $\bigoplus m_i \oplus k_i.$
## This checksum ensures that any two blocks that differ now differ in
## at least two chunks, and is mixed with
## $\mathtt{PH}_{\mathrm{checksum}}$ (i.e., parameterised with an
## independently sampled 128-bit value) before xoring it in the
## secondary compressor's output.
##
## The shuffler $xs$ is the identity for the `ENH` value, and for the
## checksum `PH` mixed value. For the last `PH` value in a block of $n$ chunks,
## $xs_{n - 1}(x) = x \texttt{<<} 1,$ where the bit shift is computed
## independently over $x$'s two 64-bit halves. For other `PH`
## values, $xs_{n - i}(x) = (x \texttt{<<} 1) \oplus (x \texttt{<<} i).$
##
## What's the quality of this secondary compressor?
##
## The $xs$ shufflers are *nearly* reversible: they only lose two bits to
## the $\texttt{<<} 1$ (a plain xor-shift is fully reversible). The
## composition of $xs$ and `PH` is thus
## $2^{-62}-$almost-XOR-universal. The checksum's plain `PH` is
## $2^{-64}-$almost-XOR-universal, and the `ENH` mixer is still
## $2^{-64}-$almost-universal (and still collides blocks of different
## length with probability at most $2^{-63}$).
##
## After accounting for the bits lost when converting each 64-bit half
## to $\mathbb{F}$ for the polynomial hash, the new block compressor
## collides different blocks with probability at most $2^{-55}.$ Combined
## with an independently sampled second-level polynomial string hash,
## we find an end-to-end collision probability
## $\varepsilon_{\mathrm{secondary}} < \lceil s/8192\rceil \cdot 2^{-54}.$
## For large enough strings, the new secondary hash is just as good as
## the original UMASH.
##
## However, the real question is what's the probability of collision
## for the secondary UMASH hash when the original collides? The two
## hashes clearly aren't independent, since the secondary hash reuses
## *all* the `PH` and `ENH` outputs, and merely shuffles some of them
## before xoring with each other and with the new checksum chunk's own
## `PH` value.
##
## In the common case, the collision in the primary UMASH is
## introduced by the polynomial hash (i.e., the block compressor
## doesn't collide). In that case, the secondary UMASH's polynomial
## hash is fully independent, so the probability of a collision
## in the secondary hash is still $\varepsilon_{\mathrm{secondary}} < \lceil s/8192\rceil \cdot 2^{-54}.$
##
## Now, what's the probability that both the secondary block compressor
## collides when the primary compressor already collides?
##
## When the derived checksums differ, `PH`'s
## $2^{-64}-$almost-XOR-universality means that the secondary compressor
## collides with probability at most $2^{-64},$ regardless of what
## happened to the primary compressor.
##
## For blocks of different chunk counts, the checksums match with
## probability at most $2^{-128}$: the checksums $\bigoplus m_i
## \oplus k_i$ include different random 128-bit parameters $k_i$ (the longer
## block includes more random parameters). This leaves
## a collision probability less than $2^{-63}$ for the secondary
## compressor, even when the primary compressor collides.
##
## Otherwise, we have two blocks with identical chunk count $n$, and
## | |
12 - 12: Oo0Ooo . o0oOOo0O0Ooo - i1IIi - oO0o % IiII . I11i
if 17 - 17: i1IIi % OoO0O00 + i11iIiiIii % I1Ii111 * ooOoO0o . I1ii11iIi11i
if 64 - 64: O0 - iII111i
if 82 - 82: O0
if 37 - 37: I1Ii111
if 98 - 98: iII111i - OoOoOO00 / I1Ii111 . OOooOOo - OOooOOo - ooOoO0o
if 84 - 84: OOooOOo * ooOoO0o / O0
def lisp_get_any_translated_port ( ) :
for I11i111 in lisp_db_list :
for IIiO0Ooo in I11i111 . rloc_set :
if ( IIiO0Ooo . translated_rloc . is_null ( ) ) : continue
return ( IIiO0Ooo . translated_port )
if 96 - 96: I11i . I11i % II111iiii
if 14 - 14: iII111i / OoooooooOO
return ( None )
if 8 - 8: OOooOOo + I1IiiI - Oo0Ooo + i1IIi . Ii1I . I1Ii111
if 38 - 38: I1IiiI / II111iiii * OoOoOO00 / I1Ii111
if 80 - 80: I1ii11iIi11i / ooOoO0o * ooOoO0o . Oo0Ooo
if 44 - 44: Ii1I * i1IIi % OoOoOO00 . OoOoOO00
if 16 - 16: Oo0Ooo / i1IIi / iIii1I11I1II1 / iIii1I11I1II1 % o0oOOo0O0Ooo / I1ii11iIi11i
if 11 - 11: I1IiiI
if 45 - 45: OOooOOo / i1IIi * IiII * I1Ii111
if 34 - 34: ooOoO0o / iIii1I11I1II1 . iII111i
if 91 - 91: OoO0O00
def lisp_get_any_translated_rloc ( ) :
for I11i111 in lisp_db_list :
for IIiO0Ooo in I11i111 . rloc_set :
if ( IIiO0Ooo . translated_rloc . is_null ( ) ) : continue
return ( IIiO0Ooo . translated_rloc )
if 8 - 8: oO0o
if 96 - 96: IiII
return ( None )
if 37 - 37: Ii1I % i11iIiiIii + iIii1I11I1II1 % Oo0Ooo - iIii1I11I1II1
if 26 - 26: o0oOOo0O0Ooo . i1IIi
if 62 - 62: IiII * I1ii11iIi11i % iIii1I11I1II1 / II111iiii - OoO0O00
if 52 - 52: iII111i . I11i - I11i + oO0o + iIii1I11I1II1
if 83 - 83: I11i * iIii1I11I1II1 + OoOoOO00
if 81 - 81: ooOoO0o * OOooOOo / OoO0O00 + I1ii11iIi11i % I1Ii111
if 37 - 37: i11iIiiIii - OoooooooOO - OoOoOO00 * oO0o / Ii1I
def lisp_get_all_translated_rlocs ( ) :
OooOo = [ ]
for I11i111 in lisp_db_list :
for IIiO0Ooo in I11i111 . rloc_set :
if ( IIiO0Ooo . is_rloc_translated ( ) == False ) : continue
o0o0O00 = IIiO0Ooo . translated_rloc . print_address_no_iid ( )
OooOo . append ( o0o0O00 )
if 82 - 82: i11iIiiIii * OoOoOO00 . i1IIi + IiII * ooOoO0o
if 75 - 75: iIii1I11I1II1 / IiII / II111iiii . I11i
return ( OooOo )
if 23 - 23: OOooOOo . ooOoO0o - iII111i % Ii1I . I1ii11iIi11i + IiII
if 81 - 81: I11i
if 5 - 5: OoooooooOO
if 5 - 5: iII111i + oO0o % O0 . OoooooooOO + i1IIi
if 55 - 55: I1ii11iIi11i
if 34 - 34: OoO0O00 * iIii1I11I1II1 . iIii1I11I1II1
if 39 - 39: o0oOOo0O0Ooo
if 29 - 29: Oo0Ooo . Oo0Ooo * OoO0O00 % Ii1I - ooOoO0o
def lisp_update_default_routes ( map_resolver , iid , rtr_list ) :
OOOoOO = ( os . getenv ( "LISP_RTR_BEHIND_NAT" ) != None )
if 67 - 67: I1IiiI % O0 + I1IiiI * I1Ii111 * OoOoOO00 * II111iiii
oOoO0 = { }
for oOOoo0O00 in rtr_list :
if ( oOOoo0O00 == None ) : continue
o0o0O00 = rtr_list [ oOOoo0O00 ]
if ( OOOoOO and o0o0O00 . is_private_address ( ) ) : continue
oOoO0 [ oOOoo0O00 ] = o0o0O00
if 76 - 76: i11iIiiIii . I1IiiI - I1Ii111
rtr_list = oOoO0
if 6 - 6: I1IiiI / i1IIi + IiII / iIii1I11I1II1
iIi1i = [ ]
for oOo00Oo0o00oo in [ LISP_AFI_IPV4 , LISP_AFI_IPV6 , LISP_AFI_MAC ] :
if ( oOo00Oo0o00oo == LISP_AFI_MAC and lisp_l2_overlay == False ) : break
if 5 - 5: OoOoOO00 . iIii1I11I1II1 + iII111i
if 63 - 63: i1IIi
if 24 - 24: i11iIiiIii % iII111i . oO0o
if 44 - 44: II111iiii - OoO0O00 + i11iIiiIii
if 34 - 34: I1ii11iIi11i % ooOoO0o / II111iiii * O0 % OOooOOo
OOoOOoo = lisp_address ( oOo00Oo0o00oo , "" , 0 , iid )
OOoOOoo . make_default_route ( OOoOOoo )
Iii1 = lisp_map_cache . lookup_cache ( OOoOOoo , True )
if ( Iii1 ) :
if ( Iii1 . checkpoint_entry ) :
lprint ( "Updating checkpoint entry for {}" . format ( green ( Iii1 . print_eid_tuple ( ) , False ) ) )
if 9 - 9: I1ii11iIi11i / I1ii11iIi11i - OOooOOo . iIii1I11I1II1
elif ( Iii1 . do_rloc_sets_match ( rtr_list . values ( ) ) ) :
continue
if 33 - 33: I1IiiI + oO0o % I1IiiI / iII111i - ooOoO0o - i11iIiiIii
Iii1 . delete_cache ( )
if 39 - 39: i11iIiiIii / oO0o
if 71 - 71: I1Ii111 * iIii1I11I1II1 - I1Ii111
iIi1i . append ( [ OOoOOoo , "" ] )
if 87 - 87: I1IiiI / Ii1I
if 54 - 54: OoooooooOO / Ii1I
if 26 - 26: o0oOOo0O0Ooo + OoO0O00
if 59 - 59: Ii1I * IiII
ii1I1 = lisp_address ( oOo00Oo0o00oo , "" , 0 , iid )
ii1I1 . make_default_multicast_route ( ii1I1 )
o0OiiI1iiI11 = lisp_map_cache . lookup_cache ( ii1I1 , True )
if ( o0OiiI1iiI11 ) : o0OiiI1iiI11 = o0OiiI1iiI11 . source_cache . lookup_cache ( OOoOOoo , True )
if ( o0OiiI1iiI11 ) : o0OiiI1iiI11 . delete_cache ( )
if 3 - 3: I11i
iIi1i . append ( [ OOoOOoo , ii1I1 ] )
if 55 - 55: OoO0O00 . i11iIiiIii . o0oOOo0O0Ooo % iIii1I11I1II1 . I1ii11iIi11i * I11i
if ( len ( iIi1i ) == 0 ) : return
if 7 - 7: OoOoOO00 * iII111i - i11iIiiIii
if 79 - 79: OOooOOo
if 2 - 2: I11i % I1Ii111 - OoO0O00 % OoO0O00 % OOooOOo - OoO0O00
if 3 - 3: iIii1I11I1II1 + iIii1I11I1II1 + OoO0O00
iii1Ii1i1i1I = [ ]
for iI11I1I in rtr_list :
oo00OOo000o0 = rtr_list [ iI11I1I ]
IIiO0Ooo = lisp_rloc ( )
IIiO0Ooo . rloc . copy_address ( oo00OOo000o0 )
IIiO0Ooo . priority = 254
IIiO0Ooo . mpriority = 255
IIiO0Ooo . rloc_name = "RTR"
iii1Ii1i1i1I . append ( IIiO0Ooo )
if 82 - 82: OoOoOO00 - OoOoOO00 + iIii1I11I1II1 + o0oOOo0O0Ooo + IiII - o0oOOo0O0Ooo
if 65 - 65: I1Ii111 + OOooOOo
for OOoOOoo in iIi1i :
Iii1 = lisp_mapping ( OOoOOoo [ 0 ] , OOoOOoo [ 1 ] , iii1Ii1i1i1I )
Iii1 . mapping_source = map_resolver
Iii1 . map_cache_ttl = LISP_MR_TTL * 60
Iii1 . add_cache ( )
lprint ( "Add {} to map-cache with RTR RLOC-set: {}" . format ( green ( Iii1 . print_eid_tuple ( ) , False ) , rtr_list . keys ( ) ) )
if 97 - 97: oO0o % OoOoOO00 * oO0o % II111iiii + iIii1I11I1II1
iii1Ii1i1i1I = copy . deepcopy ( iii1Ii1i1i1I )
if 11 - 11: ooOoO0o . o0oOOo0O0Ooo
return
if 94 - 94: ooOoO0o . oO0o * OoooooooOO % oO0o
if 77 - 77: ooOoO0o % I1IiiI
if 26 - 26: o0oOOo0O0Ooo
if 72 - 72: I1IiiI
if 90 - 90: ooOoO0o
if 67 - 67: iIii1I11I1II1 + i1IIi * I1IiiI * OoooooooOO
if 23 - 23: IiII
if 32 - 32: OoOoOO00 - iII111i % oO0o / I1ii11iIi11i - o0oOOo0O0Ooo
if 52 - 52: Ii1I / OoooooooOO % i11iIiiIii + iII111i
if 59 - 59: Ii1I / o0oOOo0O0Ooo / oO0o + iII111i * I1ii11iIi11i - o0oOOo0O0Ooo
def lisp_process_info_reply ( source , packet , store ) :
if 70 - 70: O0 / I1ii11iIi11i + ooOoO0o . OoO0O00 - OoO0O00 / i11iIiiIii
if 1 - 1: iIii1I11I1II1 % I1ii11iIi11i
if 49 - 49: iII111i + o0oOOo0O0Ooo % I1ii11iIi11i . O0 % OoooooooOO . o0oOOo0O0Ooo
if 3 - 3: i11iIiiIii - i1IIi * o0oOOo0O0Ooo / OoOoOO00 % Oo0Ooo
i1iii11iiiI1I = lisp_info ( )
| |
<reponame>flare561/berry
# -*- coding: utf-8 -*-
import HTMLParser
import random
import requests
import datetime
import socket
import oembed
import urllib2
import urllib
import threading
import functools
import lxml.html
import lxml.etree as etree
from lxml import html
import wikipedia as wiki
import re
import arrow
import string
import romkan
from googletrans import Translator
from urlparse import urlparse
from evaluate_function import solve_equation
def register(tag, value):
def wrapped(fn):
@functools.wraps(fn)
def wrapped_f(*args, **kwargs):
return fn(*args, **kwargs)
setattr(wrapped_f, tag, value)
return wrapped_f
return wrapped
def is_str_allowed(str, bannedwords):
for pattern in bannedwords:
escapedv = re.escape(pattern)
escapedv = escapedv.replace('\\*', '.*')
matches = re.search(escapedv, str)
if matches:
return False
return True
def is_all_str_allowed(strs, bannedwords):
for str in strs:
if not is_str_allowed(str, bannedwords):
return False
return True
class commands:
def __init__(self, send_message, send_action, banned_words, config):
self.send_message = send_message
self.config = config
self.send_action = send_action
self.banned_words = banned_words
def regex_yt(self, event):
ytmatch = re.compile(
"https?:\/\/(?:[0-9A-Z-]+\.)?(?:youtu\.be\/|youtube\.com\S*[^\w\-\s"
"])([\w\-]{11})(?=[^\w\-]|$)(?![?=&+%\w]*(?:['\"][^<>]*>|<\/a>))[?="
"&+%\w-]*",
flags=re.I)
matches = ytmatch.findall(event.message)
for x in matches:
try:
t = requests.get(
'https://www.googleapis.com/youtube/v3/videos',
params=dict(
part='statistics,contentDetails,snippet',
fields='items/snippet/title,'
'items/snippet/channelTitle,'
'items/contentDetails/duration,'
'items/statistics/viewCount,'
'items/statistics/likeCount,'
'items/statistics/dislikeCount,'
'items/snippet/publishedAt',
maxResults='1',
key=self.config['googleKey'],
id=x))
t = t.json()['items'][0]
title = t['snippet']['title']
uploader = t['snippet']['channelTitle']
viewcount = t['statistics']['viewCount']
timediff = arrow.get(t['snippet']['publishedAt']).humanize()
if 'likeCount' in t['statistics'] and 'dislikeCount' in t['statistics']:
likes = float(t['statistics']['likeCount'])
dislikes = float(t['statistics']['dislikeCount'])
if (dislikes > 0):
rating = str(int((likes /
(likes + dislikes)) * 100)) + '%'
elif dislikes == 0 and likes == 0:
rating = 'unrated'
else:
rating = "100%"
else:
rating = 'unrated'
durationregex = re.compile(
'PT(?:(\d+)H)?(?:(\d+)M)?(?:(\d+)S)?', re.I)
matches = durationregex.findall(
t['contentDetails']['duration'])
if matches:
matches = matches[0]
hours = int(matches[0]) if matches[0] != '' else 0
minutes = int(matches[1]) if matches[1] != '' else 0
seconds = int(matches[2]) if matches[2] != '' else 0
duration = str(
datetime.timedelta(
hours=hours, minutes=minutes, seconds=seconds))
else:
duration = 'Ongoing'
viewcount = format(int(viewcount), ',')
self.send_message(event.respond,
u'{} | {} | {} | {} | {} | {}'.format(
title, uploader, viewcount, timediff,
rating, duration).encode(
'utf-8', 'replace'))
except:
raise
def command_yt(self, event):
'''Usage: ~yt <terms> Used to search youtube with the given terms'''
try:
j = requests.get(
'https://www.googleapis.com/youtube/v3/search',
params=dict(
part='snippet',
fields='items/id',
safeSearch='none',
maxResults='1',
key=self.config['googleKey'],
type='video',
q=event.params)).json()
vidid = j['items'][0]['id']['videoId']
t = requests.get(
'https://www.googleapis.com/youtube/v3/videos',
params=dict(
part='statistics,contentDetails,snippet',
fields='items/snippet/title,'
'items/snippet/channelTitle,'
'items/contentDetails/duration,'
'items/statistics/viewCount,'
'items/statistics/likeCount,'
'items/statistics/dislikeCount,'
'items/snippet/publishedAt',
maxResults='1',
key=self.config['googleKey'],
id=vidid)).json()['items'][0]
title = t['snippet']['title']
uploader = t['snippet']['channelTitle']
viewcount = t['statistics']['viewCount']
timediff = arrow.get(t['snippet']['publishedAt']).humanize()
if 'likeCount' in t['statistics'] and 'dislikeCount' in t['statistics']:
likes = float(t['statistics']['likeCount'])
dislikes = float(t['statistics']['dislikeCount'])
if (dislikes > 0):
rating = str(int((likes / (likes + dislikes)) * 100)) + '%'
else:
rating = "100%"
else:
rating = 'unrated'
durationregex = re.compile('PT(?:(\d+)H)?(?:(\d+)M)?(?:(\d+)S)?',
re.I)
matches = durationregex.findall(t['contentDetails']['duration'])[0]
hours = int(matches[0]) if matches[0] != '' else 0
minutes = int(matches[1]) if matches[1] != '' else 0
seconds = int(matches[2]) if matches[2] != '' else 0
duration = str(
datetime.timedelta(
hours=hours, minutes=minutes, seconds=seconds))
viewcount = format(int(viewcount), ',')
self.send_message(
event.respond,
u'https://youtu.be/{} > {} | {} | {} | {} | {} | {}'.format(
vidid, title, uploader, viewcount, timediff, rating,
duration).encode('utf-8', 'replace'))
except:
self.send_message(event.respond, "No results")
raise
def command_g(self, event):
'''Usage: ~g <terms> Used to search google with the given terms'''
try:
t = requests.get(
'https://www.googleapis.com/customsearch/v1',
params=dict(
q=event.params,
cx=self.config['googleengine'],
key=self.config['googleKey'],
safe='off')).json()
index = 0
while (len(t['items']) > index + 1 and not is_all_str_allowed([
t['items'][index]['title'], t['items'][index]['link']
], self.banned_words)):
index += 1
t = t['items'][index]
self.send_message(event.respond, u'{}: {}'.format(
t['title'], t['link']).encode('utf-8', 'replace'))
except:
self.send_message(event.respond, "No results")
raise
def command_tvtropes(self, event):
'''Usage: ~tvtropes <terms> Searches TvTropes for a given trope.'''
event.params = 'site:tvtropes.org ' + event.params
self.command_g(event)
@register('nsfw', True)
def command_rande621(self, event):
'''Usage: ~rande621 <tags> Used to search e621.net for a random picture with the given tags'''
try:
j = requests.get(
"https://e621.net/posts.json",
params=dict(limit="100", tags=event.params),
headers={'User-Agent': 'Raribot IRC Bot github.com/flare561/berry'}).json()['posts']
if (len(j) > 0):
try:
selection = random.choice(j)
if selection['tags']['artist'] != []:
artist = " & ".join(selection['tags']['artist'])
else:
artist = 'N/A'
if selection['rating'] == 'e':
rating = 'Explicit'
elif selection['rating'] == 's':
rating = 'Safe'
else:
rating = 'Questionable'
self.send_message(
event.respond,
u'http://e621.net/posts/{0[id]} | Artist(s): {1} | Score: {0[score][total]} | Rating: {2} | Post Date: {3}'.
format(selection, artist, rating,
arrow.get(selection['created_at']).format(
'YYYY-MM-DD')).encode('utf-8', 'replace'))
except:
self.send_message(
event.respond,
"An error occurred while fetching your post.")
raise
else:
self.send_message(event.respond, "No Results")
except:
self.send_message(event.respond,
"An error occurred while fetching your post.")
raise
def command_randdev(self, event):
'''Usage: ~randdev Returns a random image from Deviant Art'''
try:
if event.params != '':
search_for = event.params
source = requests.get('https://www.deviantart.com/newest/?q={}'.format(search_for)).text
else:
for _ in range(10):
search_for = ''.join(random.choice(string.ascii_lowercase) for _ in range(3))
source = requests.get('https://www.deviantart.com/newest/?q={}'.format(search_for)).text
if 'Sorry, we found no relevant results.' not in source:
break
parsed = html.fromstring(source)
results = parsed.xpath('//*[@id="page-1-results"]//*[@data-super-alt and @href]')
final = random.choice(results)
self.send_message(event.respond, '{} | {}'.format(final.attrib['data-super-alt'], final.attrib['href']))
except:
self.send_message(event.respond, 'Something is a little fucky wucky!')
@register('nsfw', True)
def command_randgel(self, event):
'''Usage: ~randgel <tags> Used to search gelbooru.com for a random picture with the given tags'''
try:
ratings = {'e': 'Explicit',
's': 'Safe',
'q': 'Questionable'}
params = dict(
page='dapi',
s='post',
q='index',
json='1',
limit='100',
api_key="anonymous",
user_id="9455",
tags=event.params)
resp = requests.get('https://gelbooru.com/index.php',
params=params,
cookies={"fringeBenefits":"yep"}).json()
if len(resp) > 0:
select = random.choice(resp)
response = []
response.append('https://gelbooru.com/index.php?page=post&s=view&id=%s' % select[u'id'])
params['s'] = 'tag'
params['names'] = select[u'tags']
del params['tags']
resp = requests.get('https://gelbooru.com/index.php',
params=params,
cookies={"fringeBenefits":"yep"}).json()
for tag in resp:
if tag[u'type'].lower() == 'artist':
response.append('Artist: %s' % tag[u'tag'])
break
response.append('Rating: %s' % ratings[select[u'rating']])
response.append('Score: %s' % select[u'score'])
if 'loli' in select[u'tags'] or 'shota' in select[u'tags']:
response.append('Direct: %s' % select[u'file_url'])
self.send_message(
event.respond, " | ".join(response).encode('utf-8', 'replace'))
else:
self.send_message(event.respond, 'No Results')
except:
self.send_message(event.respond,
'An error occurred while fetching your post.')
raise
@register('nsfw', True)
def command_clop(self, event):
'''Usage: ~clop <optional extra tags> Searches e621 for a random image with the tags rating:e and my_little_pony'''
event.params += ' rating:e my_little_pony'
self.command_rande621(event)
def command_git(self, event):
'''Usage: Links to the repository for lazy fucks. ~git <arg> will link to the line for that command, if applicable.'''
if not event.params:
self.send_message(event.respond, 'https://github.com/flare561/berry')
else:
try:
code = requests.get("https://raw.githubusercontent.com/flare561/berry/master/commands.py").text
for i, line in enumerate(code.split("\n")):
if "def command_{}".format(event.params) in line:
self.send_message(event.respond, 'https://github.com/flare561/berry/blob/master/commands.py#L{}'.format(i+1))
break
else:
self.send_message(event.respond, 'Command not found! Try checking your spelling?')
except:
self.send_message(event.respond, 'Command not found! Maybe github is down?')
def command_ja(self, event):
'''Usage: ~ja <k/h/r> <arg> displays katakana/hiragana/romaji for a given argument, converting between romaji and kana'''
try:
dest, phrase = event.params.split(' ', 1)
dest = dest.lower()
if dest == 'k':
resp = romkan.to_katakana(phrase)
elif dest == 'h':
resp = romkan.to_hiragana(phrase)
elif dest == 'r':
resp = romkan.to_roma(phrase.decode('utf-8'))
else:
raise
self.send_message(event.respond, resp)
except:
self.send_message(event.respond, 'Invalid input, please check syntax.')
raise
def command_translate(self, event):
'''Usage: Just use the right fucking command.'''
self.command_tr(event)
def command_tr(self, event):
'''Usage: ~tr <languageTo> <phrase> The bot will auto-detect the language of the targeted text.'''
try:
translator = Translator()
phrase = event.params.split()
translated = translator.translate(' '.join(phrase[1:]),dest= phrase[0])
text = 'Translated from {}: {}'.format(translated.src,translated.text.encode('utf-8', 'replace'))
if len(text) > 397:
text = text[0:396] + '...'
self.send_message(event.respond, text)
except:
self.send_message(
event.respond,
'Translation unsuccessful! Maybe the service is down?')
def command_trs(self, event):
'''Usage: It's like ~tr, but more specific. Use it by doing ~trs <languageFrom> <languageTo> <phrase>'''
try:
translator = Translator()
phrase = event.params.split()
translated = translator.translate(' '.join(phrase[2:]),dest=phrase[1],src=phrase[0])
text = 'Translated from {} to {}: {}'.format(translated.src,translated.dest,translated.text.encode('utf-8', 'replace'))
if len(text) > 397:
text = text[0:396] + '...'
self.send_message(event.respond, text)
except:
self.send_message(
event.respond,
'Translation unsuccessful! Maybe the service is down?')
raise
def command_wolf(self, event):
'''Usage: ~wolf <query> Searches wolfram alpha for your query'''
try:
s = requests.get(
"http://api.wolframalpha.com/v2/query",
params=dict(
input=event.params, appid=self.config['wolframKey'])).text
x = etree.fromstring(s.encode('UTF-8', 'replace'))
d = x.xpath('//pod[@primary="true"]/subpod/plaintext')
results = [
o.text.replace('\n', ' ').encode('utf-8', 'replace') for o in d
]
search_url = "http://www.wolframalpha.com/input/?i={}".format(
urllib.quote(event.params, ''))
if len(results) < 1:
responseStr = "No results available, try the query page:"
else:
responseStr = '; '.join(results)
if (len(responseStr) + len(search_url)) > 390:
responseStr = responseStr[:(390 - len(search_url))] + "..."
responseStr += " " + search_url
self.send_message(event.respond, responseStr)
except:
self.send_message(event.respond, "Error with the service")
raise
def command_weather(self, event):
'''Usage: ~weather <location> Gets the weather for a location from wolfram alpha'''
event.params = 'weather ' + event.params
self.command_wolf(event)
def command_define(self, event):
'''Usage: ~define <word> Gets the definition of a word from wolfram alpha'''
event.params = 'define ' + event.params
self.command_wolf(event)
def command_imdb(self, event):
'''Usage: ~imdb <movie title> Provides basic information of a given movie, | |
<filename>ndrive/client.py
# -*- coding: utf-8 -*-
"""
====================================
ndrive
====================================
What is ndrive
==============
ndrive is a Naver Ndrive wrapper for python
Getting started
===============
git clone https://github.com/carpedm20/pyndrive.git
Copyright
=========
Copyright 2014 <NAME>
"""
import os, sys
from os.path import expanduser
import urllib, urllib2
import requests
import simplejson as json
import magic
import datetime
import re
from .auth import getCookie
from .urls import ndrive_urls as nurls
from .utils import byte_readable
###############################################################################
class Ndrive(object):
"""Initialize ``NdriveClient`` instance.
Using given user information, login to ndrive server and create a session
:param bool debug: (optional) print all metadata of http requests
:param str NID_AUT: (optional) Naver account authentication info
:param str NID_SES: (optional) Naver account session info
Usage::
>>> from ndrive import Ndrive
>>> nd = Ndrive()
"""
debug = False
session = requests.session()
def __init__(self, debug = False, NID_AUT = None, NID_SES= None):
self.debug = debug
self.session.headers["User-Agent"] = \
"Mozilla/5.0 (Windows NT 6.2; WOW64) Chrome/32.0.1700.76 Safari/537.36"
self.session.cookies.set('NID_AUT', NID_AUT)
self.session.cookies.set('NID_SES', NID_SES)
def login(self, user_id, password, svctype = "Android NDrive App ver", auth = 0):
"""Log in Naver and get cookie
>>> s = nd.login("YOUR_ID", "YOUR_PASSWORD")
:param str user_id: Naver account's login id
:param str password: Naver account's login password
:param str svctype: Service type
:param auth: ???
:return: ``True`` when success to login or ``False``
"""
self.user_id = user_id
self.password = password
if self.user_id == None or self.password == None:
print "[*] Error __init__: user_id and password is needed"
return False
try:
cookie = getCookie(user_id, password)
except:
print "[*] Error getCookie: failed"
return False
self.session.cookies.set('NID_AUT', cookie["NID_AUT"])
self.session.cookies.set('NID_SES', cookie["NID_SES"])
s, metadata = self.getRegisterUserInfo(svctype, auth)
if s is True:
return True
else:
print "[*] Error getRegisterUserInfo: " + metadata['message']
return False
def checkAccount(self):
if self.useridx is None:
return False, "Error checkStatus: useridx is not defined"
elif self.user_id is None:
return False, "Error checkStatus: userId is not defined"
else:
return True, None
def GET(self, func, data):
"""Send GET request to execute Ndrive API
:param func: The function name you want to execute in Ndrive API.
:param params: Parameter data for HTTP request.
:returns: metadata when success or False when failed
"""
if func not in ['getRegisterUserInfo']:
s, message = self.checkAccount()
if s is False:
return False, message
url = nurls[func]
r = self.session.get(url, params = data)
r.encoding = 'utf-8'
if self.debug:
print r.text
try:
try:
metadata = json.loads(r.text)
except:
metadata = json.loads(r.text[r.text.find('{'):-1])
message = metadata['message']
if message == 'success':
return True, metadata['resultvalue']
else:
return False, message
except:
for e in sys.exc_info():
print e
sys.exit(1)
return False, "Error %s: Failed to send GET request" %func
def POST(self, func, data):
"""Send POST request to execute Ndrive API
:param func: The function name you want to execute in Ndrive API.
:param params: Parameter data for HTTP request.
:returns: ``metadata`` when success or ``False`` when failed
"""
s, message = self.checkAccount()
if s is False:
return False, message
url = nurls[func]
r = self.session.post(url, data = data)
r.encoding = 'utf-8'
if self.debug:
print r.text.encode("utf-8")
try:
metadata = json.loads(r.text)
message = metadata['message']
if message == 'success':
try:
return True, metadata['resultvalue']
except:
return True, metadata['resultcode']
else:
return False, "Error %s: %s" %(func, message)
except:
#for e in sys.exc_info():
# print e
#sys.exit(1)
return False, "Error %s: Failed to send POST request" %func
def getRegisterUserInfo(self, svctype = "Android NDrive App ver", auth = 0):
"""Retrieve information about useridx
:param svctype: Information about the platform you are using right now.
:param auth: Authentication type
:return: ``True`` when success or ``False`` when failed
"""
data = {'userid': self.user_id,
'svctype': svctype,
'auth': auth
}
s, metadata = self.GET('getRegisterUserInfo', data)
if s is True:
self.useridx = metadata['useridx']
return True, metadata
else:
return False, metadata
def checkStatus(self):
"""Check status
Check whether it is possible to access Ndrive or not.
:return: ``True`` when success or ``False`` when failed.
"""
self.checkAccount()
data = {'userid': self.user_id,
'useridx': self.useridx
}
r = self.session.post(nurls['checkStatus'], data = data)
r.encoding = 'utf-8'
p = re.compile(r'\<message\>(?P<message>.+)\</message\>')
message = p.search(r.text).group('message')
if message == 'success':
return True
else:
return False
def uploadFile(self, file_obj, full_path, overwrite = False):
"""Upload a file as Ndrive really do.
>>> nd.uploadFile('~/flower.png','/Picture/flower.png',True)
This function imitates the process when Ndrive uploads a local file to its server. The process follows 7 steps:
1. POST /CheckStatus.ndrive
2. POST /GetDiskSpace.ndrive
3. POST /CheckUpload.ndrive
4. PUT /FILE_PATH
5. POST /GetList.ndrive
6. POST /GetWasteInfo.ndrive
7. POST /GetDiskSpace.ndrive
nd.uploadFile('./flower.png','/Picture/flower.png')
:param file_obj: A file-like object to check whether possible to upload. You can pass a string as a file_obj or a real file object.
:param full_path: The full path to upload the file to, *including the file name*. If the destination directory does not yet exist, it will be created.
:param overwrite: Whether to overwrite an existing file at the given path. (Default ``False``.)
"""
s = self.checkStatus()
s = self.getDiskSpace()
s = self.checkUpload(file_obj, full_path, overwrite)
if s is True:
self.put(file_obj, full_path, overwrite)
def getDiskSpace(self):
"""Get disk space information.
>>> disk_info = nd.getDiskSpace()
:return: ``metadata`` if success or ``error message``
:metadata:
- expandablespace
- filemaxsize
- largefileminsize
- largefileunusedspace
- largefileusedspace
- paymentspace
- totallargespace
- totalspace
- unusedspace
- usedspace
"""
data = {'userid': self.user_id,
'useridx': self.useridx,
}
s, metadata = self.POST('getDiskSpace',data)
if s is True:
usedspace = byte_readable(metadata['usedspace'])
totalspace = byte_readable(metadata['totalspace'])
print "Capacity: %s / %s" % (usedspace, totalspace)
return metadata
else:
print message
def checkUpload(self, file_obj, full_path = '/', overwrite = False):
"""Check whether it is possible to upload a file.
>>> s = nd.checkUpload('~/flower.png','/Picture/flower.png')
:param file_obj: A file-like object to check whether possible to upload. You can pass a string as a file_obj or a real file object.
:param str full_path: The full path to upload the file to, *including the file name*. If the destination directory does not yet exist, it will be created.
:param overwrite: Whether to overwrite an existing file at the given path. (Default ``False``.)
:return: ``True`` if possible to upload or ``False`` if impossible to upload.
"""
try:
file_obj = file_obj.name
except:
file_obj = file_obj # do nothing
file_size = os.stat(file_obj).st_size
now = datetime.datetime.now().isoformat()
data = {'uploadsize': file_size,
'overwrite': 'T' if overwrite else 'F',
'getlastmodified': now,
'dstresource': full_path,
'userid': self.user_id,
'useridx': self.useridx,
}
s, metadata = self.POST('checkUpload', data)
if not s:
print metadata
return s
def downloadFile(self, from_path, to_path = ''):
"""Download a file.
>>> nd.downloadFile('/Picture/flower.png', '~/flower.png')
:param from_path: The full path to download the file to, *including the file name*. If the destination directory does not yet exist, it will be created.
:param to_path: The full path of a file to be saved in local directory.
:returns: File object
"""
if to_path == '':
file_name = os.path.basename(from_path)
to_path = os.path.join(os.getcwd(), file_name)
url = nurls['download'] + from_path
data = {'attachment':2,
'userid': self.user_id,
'useridx': self.useridx,
'NDriveSvcType': "NHN/ND-WEB Ver",
}
if '~' in to_path:
to_path = expanduser(to_path)
with open(to_path, 'wb') as handle:
request = self.session.get(url, params = data, stream=True)
for block in request.iter_content(1024):
if not block:
break
handle.write(block)
return handle
def put(self, file_obj, full_path, overwrite = False):
"""Upload a file.
>>> nd.put('./flower.png','/Picture/flower.png')
>>> nd.put(open('./flower.png','r'),'/Picture/flower.png')
:param file_obj: A file-like object to check whether possible to upload. You can pass a string as a file_obj or a real file object.
:param full_path: The full path to upload the file to, *including the file name*. If the destination directory does not yet exist, it will be created.
:return: ``True`` when succcess to upload a file or ``False``
"""
try:
file_obj = open(file_obj, 'r')
except:
file_obj = file_obj # do nothing
content = file_obj.read()
file_name = os.path.basename(full_path)
now = datetime.datetime.now().isoformat()
url = nurls['put'] + full_path
if overwrite:
overwrite = 'T'
else:
overwrite = 'F'
headers = {'userid': self.user_id,
'useridx': self.useridx,
'MODIFYDATE': now,
'Content-Type': magic.from_file(file_obj.name, mime=True),
'charset': 'UTF-8',
'Origin': 'http://ndrive2.naver.com',
'OVERWRITE': overwrite,
'X-Requested-With': 'XMLHttpRequest',
'NDriveSvcType': 'NHN/DRAGDROP Ver',
}
r = self.session.put(url = url, data = content, headers = headers)
r.encoding = 'utf-8'
message = json.loads(r.text)['message']
if message != 'success':
print "Error put: " + message
return False
else:
print "Success put: " + file_obj.name
return True
def delete(self, full_path):
"""Delete a file | |
<reponame>cgruber/make-open-easy<filename>moe/scrubber/scrubber.py
#!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
"""Scrubber scrubs.
Usage:
scrubber [DIRECTORY]
Args:
directory: a directory to scan
"""
__author__ = '<EMAIL> (<NAME>)'
import locale
import os
import re
import shutil
import stat
import subprocess
import sys
import tempfile
from google.apputils import app
from google.apputils import file_util
import gflags as flags
from google.apputils import resources
from google.apputils import stopwatch
from moe import config_utils
from moe.scrubber import base
from moe.scrubber import c_include_scrubber
from moe.scrubber import comment_scrubber
from moe.scrubber import gwt_xml_scrubber
from moe.scrubber import java_scrubber
from moe.scrubber import line_scrubber
from moe.scrubber import python_scrubber
from moe.scrubber import renamer
from moe.scrubber import replacer
from moe.scrubber import sensitive_string_scrubber
from moe.scrubber import usernames
from moe.scrubber import whitelist
FLAGS = flags.FLAGS
flags.DEFINE_bool('modify', False, 'Modify files to scrub information')
flags.DEFINE_bool('stopwatch', True, 'Detail where time went (for debugging)')
flags.DEFINE_string('output_tar', '',
'Path of where to write a tar of scrubbed codebase')
flags.DEFINE_string('config_file', '',
'Path to config file')
flags.DEFINE_string('config_data', '',
'Text of the scrubber config')
flags.DEFINE_string('explicit_inputfile_list', '',
'List of files (with same base directory) to scrub')
flags.DEFINE_string('temp_dir', '',
'Path of a temporary directory to use')
DIFFS_DIR = 'diffs'
ORIGINAL_DIR = 'originals'
OUTPUT_DIR = 'output'
MODIFIED_DIR = 'modified'
MAIN_USAGE_STRING = (
'List exactly one directory to scrub and, if you want, set '
'--explicit_inputfile_list to provide a list of input files.')
class ScrubberConfig(object):
"""The config for a run of the scrubber.
ScrubberConfig holds all immutable config, so the only members in
ScrubberContext should be mutated or derived data. This allows
other scrubbing-binaries to replace all the configuration.
"""
def __init__(self, codebase, input_files, extension_to_scrubber_map,
default_scrubbers, modify, output_tar, temp_dir):
# Other object state.
self.codebase = os.path.abspath(codebase)
self.input_files = input_files
self.modify = modify
self.output_tar = output_tar
self.temp_dir = temp_dir
self._comment_scrubbers = None
self._sensitive_string_scrubbers = None
# General options.
#If no ignore_files_re given, then we want to ignore no files,
# which means matching no strings. Simiarly for
# do_not_scrub_files. '$a' is a regex that matches no strings.
self.ignore_files_re = re.compile('$a')
self.do_not_scrub_files_re = re.compile('$a')
self.extension_map = []
self.sensitive_words = []
self.sensitive_res = []
self.whitelist = whitelist.Whitelist([])
self.scrub_sensitive_comments = True
self.rearranging_config = {}
self.string_replacements = []
self.regex_replacements = []
# Username options.
self.scrubbable_usernames = None
self.publishable_usernames = None
self.usernames_file = None
self.scrub_unknown_users = False
self.scrub_authors = True
self.scrub_proto_comments = False
self.scrub_non_documentation_comments = False
self.scrub_all_comments = False
# C/C++-specific options.
self.c_includes_config_file = None
# Java-specific options.
self.scrub_java_testsize_annotations = False
self.maximum_blank_lines = 0
self.empty_java_file_action = base.ACTION_IGNORE
self.java_renames = []
# Javascript-specific options.
self.js_directory_renames = []
# Python-specific options.
self.python_module_renames = []
self.python_module_removes = []
self.python_shebang_replace = None
# GWT-specific options.
self.scrub_gwt_inherits = []
# TODO(dborowitz): Make this a config option.
self.known_filenames = set([
'.gitignore',
'AUTHORS',
'CONTRIBUTORS',
'COPYING',
'LICENSE',
'Makefile',
'README'])
self.ResetScrubbers(extension_to_scrubber_map, default_scrubbers)
def ResetScrubbers(self, extension_to_scrubber_map, default_scrubbers):
"""Reset scrubbers in this config given the arguments."""
self._sensitive_string_scrubbers = None
self._comment_scrubbers = None
self.username_filter = usernames.UsernameFilter(
usernames_file=self.usernames_file,
publishable_usernames=self.publishable_usernames,
scrubbable_usernames=self.scrubbable_usernames,
scrub_unknown_users=self.scrub_unknown_users)
if extension_to_scrubber_map is not None:
self.extension_to_scrubber_map = extension_to_scrubber_map
else:
self._comment_scrubbers = None
go_and_c_scrubbers = self._PolyglotFileScrubbers()
if self.c_includes_config_file:
go_and_c_scrubbers.append(
c_include_scrubber.IncludeScrubber(self.c_includes_config_file))
# See also _ResetBatchScrubbers() for other scrubbers by extension.
self.extension_to_scrubber_map = {
'.go': go_and_c_scrubbers,
'.h': go_and_c_scrubbers,
'.c': go_and_c_scrubbers,
'.cc': go_and_c_scrubbers,
'.hgignore': self._MakeShellScrubbers(),
'.gitignore': self._MakeShellScrubbers(),
'.html': self._MakeHtmlScrubbers(),
'.java': self._MakeJavaScrubbers(),
'.jj': self._MakeJavaScrubbers(),
'.js': self._MakeJsScrubbers(),
# .jslib is an output from a js_library build rule
'.jslib': self._MakeJsScrubbers(),
'.l': go_and_c_scrubbers,
'.php': self._MakePhpScrubbers(),
'.php4': self._MakePhpScrubbers(),
'.php5': self._MakePhpScrubbers(),
'.proto': self._MakeProtoScrubbers(),
'.protodevel': self._MakeProtoScrubbers(),
'.py': self._MakePythonScrubbers(),
'.css': self._PolyglotFileScrubbers(),
'.yaml': self._MakeShellScrubbers(),
'.sh': self._MakeShellScrubbers(),
'.json': self._PolyglotFileScrubbers(),
'.swig': go_and_c_scrubbers,
# Jars often have short sensitive strings in them based only on the
# byte sequences these are. We might still like to scan jars, but a
# way to reduce the false-positive rate is needed.
'.jar': [],
'.gif': [],
'.png': [],
'.jpg': [],
'.xml': self._MakeGwtXmlScrubbers(),
}
self._ResetBatchScrubbers()
if default_scrubbers is not None:
self.default_scrubbers = default_scrubbers
else:
self.default_scrubbers = self._PolyglotFileScrubbers()
# NB(yparghi): The "pre- and post-" batch scrubbing flow isn't natural, but
# suits our current use cases. Ideally, batch and non-batch scrubbing would
# be arranged by an optimized execution graph, where, for example, all the
# by-file scrubbers prerequisite to batch scrubber A are executed, then A is
# executed, then the next by-file or batch scrubber...
def _ResetBatchScrubbers(self):
"""Set batch scrubbers to run before and after by-file scrubbing.
See ScrubberContext.Scan below. First, the pre-batch scrubbers are run for
applicable files, then by-file scrubbers, then post-batch scrubbers.
("Pre-batch" and "post-batch" are misnomers, but used for simplicity.
Respectively, they're really "pre-(by-file)" and "post-(by-file)".)
"""
c_like_comment_pre_batch_scrubbers = [
comment_scrubber.CommentScrubber(
comment_scrubber.CLikeCommentExtractor(),
self._CommentScrubbers())
]
java_pre_batch_scrubbers = []
java_pre_batch_scrubbers.extend(c_like_comment_pre_batch_scrubbers)
proto_pre_batch_scrubbers = (self.scrub_proto_comments and
c_like_comment_pre_batch_scrubbers or [])
self.extension_to_pre_batch_scrubbers_map = {
'.c': c_like_comment_pre_batch_scrubbers,
'.cc': c_like_comment_pre_batch_scrubbers,
'.go': c_like_comment_pre_batch_scrubbers,
'.h': c_like_comment_pre_batch_scrubbers,
'.java': java_pre_batch_scrubbers,
'.jj': java_pre_batch_scrubbers,
'.js': c_like_comment_pre_batch_scrubbers,
'.jslib': c_like_comment_pre_batch_scrubbers,
'.l': c_like_comment_pre_batch_scrubbers,
'.php': c_like_comment_pre_batch_scrubbers,
'.php4': c_like_comment_pre_batch_scrubbers,
'.php5': c_like_comment_pre_batch_scrubbers,
'.proto': proto_pre_batch_scrubbers,
'.protodevel': proto_pre_batch_scrubbers,
'.swig': c_like_comment_pre_batch_scrubbers,
}
java_post_batch_scrubbers = []
if self.empty_java_file_action != base.ACTION_IGNORE:
java_post_batch_scrubbers.append(
java_scrubber.EmptyJavaFileScrubber(self.empty_java_file_action))
self.extension_to_post_batch_scrubbers_map = {
'.java': java_post_batch_scrubbers,
'.jj': java_post_batch_scrubbers,
}
def _SensitiveStringScrubbers(self):
if not self._sensitive_string_scrubbers:
self._sensitive_string_scrubbers = [
sensitive_string_scrubber.SensitiveWordScrubber(self.sensitive_words),
sensitive_string_scrubber.SensitiveReScrubber(self.sensitive_res),
]
return self._sensitive_string_scrubbers
def _PolyglotFileScrubbers(self):
result = []
if self.string_replacements:
r = replacer.ReplacerScrubber(
(r['original'], r['replacement']) for r in self.string_replacements)
result.append(r)
if self.regex_replacements:
r = replacer.RegexReplacerScrubber(
(r['original'], r['replacement']) for r in self.regex_replacements)
result.append(r)
result += self._SensitiveStringScrubbers()
return result
def _CommentScrubbers(self):
if not self._comment_scrubbers:
self._comment_scrubbers = []
if self.scrub_all_comments:
self._comment_scrubbers.append(comment_scrubber.AllCommentScrubber())
elif self.scrub_non_documentation_comments:
self._comment_scrubbers.append(
comment_scrubber.AllNonDocumentationCommentScrubber())
self._comment_scrubbers.append(
comment_scrubber.TodoScrubber(self.username_filter))
if self.scrub_authors:
self._comment_scrubbers.append(
comment_scrubber.AuthorDeclarationScrubber(self.username_filter))
if self.scrub_sensitive_comments:
for s in self._SensitiveStringScrubbers():
scrubber = comment_scrubber.SensitiveStringCommentScrubber(
self.whitelist, s)
self._comment_scrubbers.append(scrubber)
return self._comment_scrubbers
def _PolyglotLineOrientedScrubbers(self):
scrubbers = []
return scrubbers
def _MakeGwtXmlScrubbers(self):
gwt_scrubbers = []
if self.scrub_gwt_inherits:
to_scrub = set(self.scrub_gwt_inherits)
gwt_scrubbers.append(gwt_xml_scrubber.GwtXmlScrubber(to_scrub))
return gwt_scrubbers
def _MakeHtmlScrubbers(self):
html_scrubbers = []
html_scrubbers.append(
comment_scrubber.CommentScrubber(
comment_scrubber.HtmlCommentExtractor(),
self._CommentScrubbers()))
line_scrubbers = self._PolyglotLineOrientedScrubbers()
for js_directory_rename in self.js_directory_renames:
line_scrubbers.append(js_directory_rename)
html_scrubbers.append(line_scrubber.LineScrubber(line_scrubbers))
html_scrubbers.extend(self._PolyglotFileScrubbers())
return html_scrubbers
def _MakeJavaScrubbers(self):
java_scrubbers = []
line_scrubbers = self._PolyglotLineOrientedScrubbers()
java_scrubbers.append(line_scrubber.LineScrubber(line_scrubbers))
java_scrubbers.extend(self.java_renames)
if self.scrub_java_testsize_annotations:
java_scrubbers.append(java_scrubber.TestSizeAnnotationScrubber())
java_scrubbers.append(java_scrubber.UnusedImportStrippingScrubber())
if self.maximum_blank_lines:
java_scrubbers.append(
java_scrubber.CoalesceBlankLinesScrubber(self.maximum_blank_lines))
java_scrubbers.extend(self._PolyglotFileScrubbers())
return java_scrubbers
def _MakeJsScrubbers(self):
js_scrubbers = []
line_scrubbers = self._PolyglotLineOrientedScrubbers()
for js_directory_rename in self.js_directory_renames:
line_scrubbers.append(js_directory_rename)
js_scrubbers.append(line_scrubber.LineScrubber(line_scrubbers))
js_scrubbers.extend(self._PolyglotFileScrubbers())
return js_scrubbers
def _MakePhpScrubbers(self):
php_scrubbers = []
php_scrubbers.append(line_scrubber.LineScrubber(
self._PolyglotLineOrientedScrubbers()))
php_scrubbers.extend(self._PolyglotFileScrubbers())
return php_scrubbers
def _MakePythonScrubbers(self):
py_scrubbers = []
py_scrubbers.append(
comment_scrubber.CommentScrubber(
comment_scrubber.PythonCommentExtractor(),
self._CommentScrubbers()))
line_scrubbers = []
line_scrubbers.extend(self.python_module_renames)
line_scrubbers.extend(self.python_module_removes)
if self.scrub_authors:
line_scrubbers.append(
line_scrubber.PythonAuthorDeclarationScrubber(self.username_filter))
if self.python_shebang_replace:
py_scrubbers.append(self.python_shebang_replace)
line_scrubbers += self._PolyglotLineOrientedScrubbers()
py_scrubbers.append(line_scrubber.LineScrubber(line_scrubbers))
py_scrubbers.extend(self._PolyglotFileScrubbers())
return py_scrubbers
def _MakeProtoScrubbers(self):
proto_scrubbers = []
proto_scrubbers.append(
line_scrubber.LineScrubber(self._PolyglotLineOrientedScrubbers()))
proto_scrubbers.extend(self._PolyglotFileScrubbers())
return proto_scrubbers
def _MakeShellScrubbers(self):
shell_scrubbers = []
shell_scrubbers.append(
comment_scrubber.CommentScrubber(
comment_scrubber.ShellLikeCommentExtractor(),
comment_scrubbers=self._CommentScrubbers()))
shell_scrubbers.extend(self._PolyglotFileScrubbers())
return shell_scrubbers
class ScrubberContext(object):
"""The ScrubberContext collects the context for a scrub.
Right now, this only includes errors. In the next iteration, it will
also be possible to add a revision. At the end of the run, based on a flag,
the revisions will either be applied in-place or just have their diffs
saved somewhere.
"""
def __init__(self, scrubber_config):
locale.setlocale(locale.LC_ALL, 'en_US.utf-8')
os.environ['LANG'] = 'en_US.UTF-8'
self.config = scrubber_config
self._errors = []
self.CreateTempDir()
self.files = self.FindFiles(scrubber_config)
self._unscrubbed_file_extensions = set()
self._unscrubbed_files = set()
def CreateTempDir(self):
self._temp_dir = self.config.temp_dir or tempfile.mkdtemp(prefix='scrubber')
base.MakeDirs(self._temp_dir)
def GetScratchDir(self):
"""Return a temp directory suitable for writing temporary output."""
return os.path.join(self._temp_dir, 'scratch')
def AddError(self, error):
"""Add base.ScrubberError or str error to the list of errors."""
# First, check if it's in our whitelist
if self.config.whitelist.Allows(error):
return
self._errors.append(error)
def Report(self):
"""Report on this run of scrubber to stdout."""
print 'Scanned %d files' % len(self.files)
print 'Found %d files to modify' % len(self.ModifiedFiles())
username_to_count_map = {}
unknown_username_instances = 0
for error in self._errors:
if isinstance(error, comment_scrubber.TodoError):
username_to_count_map[error.username] = username_to_count_map.get(
error.username, 0) + 1
unknown_username_instances += 1
else:
if isinstance(error, str):
report_string = str
else:
report_string = (
'ERROR[entry:<filter:"%s" trigger:"%s" filename:"%s">]: %s' % (
error.filter, error.trigger, error.file_obj.relative_filename,
error.ReportText()))
print report_string
if unknown_username_instances:
print 'Found unknown usernames %d times' % unknown_username_instances
for username, count in username_to_count_map.iteritems():
print u' %s %d' % (username, count)
print 'Wrote results into %s' % self._temp_dir
if self._unscrubbed_file_extensions:
print 'Did not know how to scan the following extensions:'
for extension in self._unscrubbed_file_extensions:
print ' ', extension
if self._unscrubbed_files:
print 'Did not know how to scan the following files:'
for filename in self._unscrubbed_files:
print ' ', filename
def Status(self):
"""Return a status code suitable for process exit status."""
if self._errors:
return 1
return 0
def ModifiedFiles(self):
return [f for f in self.files if f.is_modified]
def WriteOutput(self):
"""Write out the output of this ScrubberContext.
Side Effects:
Always:
output, original, and modified files are written to temporary directory
If self.config.modify
Files are scrubbed in place.
If self.config.output_tar:
Modified output | |
error occurred. Please check the asset configuration and|or action parameters."
message = "Status Code: {0}. Data from server:\n{1}\n".format(status_code,
error_text)
message = message.replace('{', '{{').replace('}', '}}')
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
def _process_json_response(self, r, action_result):
# Try a json parse
try:
resp_json = r.json()
except Exception as e:
error_code, error_msg = _get_error_message_from_exception(self._python_version, e, self)
error_txt = "Error Code: {0}. Error Message: {1}".format(error_code, error_msg)
return RetVal(action_result.set_status(phantom.APP_ERROR, "Unable to parse JSON response. {0}".format(error_txt)), None)
# Please specify the status codes here
if 200 <= r.status_code < 399:
return RetVal(phantom.APP_SUCCESS, resp_json)
try:
error_code = ""
error_text = ""
error_message = ""
error = resp_json.get('error', '')
error_desc = resp_json.get('error_description', '')
if isinstance(error, dict):
error_code = error.get('code')
error_message = error.get('message')
if error_message:
try:
soup = BeautifulSoup(resp_json.get('error', {}).get('message'), "html.parser")
# Remove the script, style, footer and navigation part from the HTML message
for element in soup(["script", "style", "footer", "nav"]):
element.extract()
error_text = soup.text
split_lines = error_text.split('\n')
split_lines = [x.strip() for x in split_lines if x.strip()]
error_text = '\n'.join(split_lines)
if len(error_text) > 500:
error_text = 'Error while connecting to a server (Please check input parameters or asset configuration parameters)'
except:
error_text = "Cannot parse error details"
try:
error_text = _handle_py_ver_compat_for_input_str(self._python_version, error_text, self)
except TypeError:
error_text = "Error occurred while handling python 2to3 compatibility for the error message"
except:
error_text = "Unknown error occurred while parsing the error message"
if error_code:
error_text = "{}. {}".format(error_code, error_text)
if error_desc:
try:
error_desc = _handle_py_ver_compat_for_input_str(self._python_version, error_desc, self)
except TypeError:
error_desc = "Error occurred while handling python 2to3 compatibility for the error_description"
except:
error_desc = "Unknown error occurred while parsing the error_description"
error_text = "{}. {}".format(error_desc, error_text)
if not error_text:
error_text = r.text.replace('{', '{{').replace('}', '}}')
except:
error_text = r.text.replace('{', '{{').replace('}', '}}')
try:
error_text = _handle_py_ver_compat_for_input_str(self._python_version, error_text, self)
except TypeError:
error_text = "Error occurred while handling python 2to3 compatibility for the error string"
except:
error_text = "Unknown error occurred. Please check the asset configuration and|or action parameters."
# You should process the error returned in the json
message = "Error from server. Status Code: {0} Data from server: {1}".format(
r.status_code, error_text)
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
def _process_response(self, r, action_result):
# store the r_text in debug data, it will get dumped in the logs if the action fails
if hasattr(action_result, 'add_debug_data'):
action_result.add_debug_data({'r_status_code': r.status_code})
action_result.add_debug_data({'r_text': r.text})
action_result.add_debug_data({'r_headers': r.headers})
# Process each 'Content-Type' of response separately
# Process a json response
content_type = r.headers.get('Content-Type', '')
if 'json' in content_type or 'javascript' in content_type:
return self._process_json_response(r, action_result)
# Process an HTML resonse, Do this no matter what the api talks.
# There is a high chance of a PROXY in between phantom and the rest of
# world, in case of errors, PROXY's return HTML, this function parses
# the error and adds it to the action_result.
if 'html' in r.headers.get('Content-Type', ''):
return self._process_html_response(r, action_result)
if r.status_code == 404:
return RetVal(action_result.set_status(phantom.APP_ERROR, "Email not found"), None)
if 200 <= r.status_code <= 204:
return RetVal(phantom.APP_SUCCESS, None)
# it's not content-type that is to be parsed, handle an empty response
if not r.text:
return self._process_empty_reponse(r, action_result)
# everything else is actually an error at this point
message = "Can't process response from server. Status Code: {0} Data from server: {1}".format(
r.status_code, r.text.replace('{', '{{').replace('}', '}}'))
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
def _make_rest_call(self, action_result, url, verify=True, headers={}, params=None, data=None, method="get"):
resp_json = None
try:
request_func = getattr(requests, method)
except AttributeError:
return RetVal(action_result.set_status(phantom.APP_ERROR, "Invalid method: {0}".format(method)), resp_json)
try:
r = request_func(
url,
data=data,
headers=headers,
verify=verify,
params=params)
except Exception as e:
error_code, error_msg = _get_error_message_from_exception(self._python_version, e, self)
return RetVal(action_result.set_status(phantom.APP_ERROR, "Error connecting to server. Error Code: {0}. Error Message: {1}".format(
error_code, error_msg)), resp_json)
return self._process_response(r, action_result)
def _get_asset_name(self, action_result):
asset_id = self.get_asset_id()
rest_endpoint = PHANTOM_ASSET_INFO_URL.format(url=self.get_phantom_base_url(), asset_id=asset_id)
ret_val, resp_json = self._make_rest_call(action_result, rest_endpoint, False)
if phantom.is_fail(ret_val):
return (ret_val, None)
asset_name = resp_json.get('name')
if not asset_name:
return (action_result.set_status(phantom.APP_ERROR, "Asset Name for ID: {0} not found".format(asset_id), None))
return (phantom.APP_SUCCESS, asset_name)
def _get_phantom_base_url(self, action_result):
ret_val, resp_json = self._make_rest_call(action_result, PHANTOM_SYS_INFO_URL.format(url=self.get_phantom_base_url()), False)
if phantom.is_fail(ret_val):
return (ret_val, None)
phantom_base_url = resp_json.get('base_url').rstrip("/")
if not phantom_base_url:
return (action_result.set_status(phantom.APP_ERROR,
"Phantom Base URL not found in System Settings. Please specify this value in System Settings"), None)
return (phantom.APP_SUCCESS, phantom_base_url)
def _get_url_to_app_rest(self, action_result=None):
if not action_result:
action_result = ActionResult()
# get the phantom ip to redirect to
ret_val, phantom_base_url = self._get_phantom_base_url(action_result)
if phantom.is_fail(ret_val):
return (action_result.get_status(), None)
# get the asset name
ret_val, asset_name = self._get_asset_name(action_result)
if phantom.is_fail(ret_val):
return (action_result.get_status(), None)
self.save_progress('Using Phantom base URL as: {0}'.format(phantom_base_url))
app_json = self.get_app_json()
app_name = app_json['name']
app_dir_name = _get_dir_name_from_app_name(app_name)
url_to_app_rest = "{0}/rest/handler/{1}_{2}/{3}".format(phantom_base_url, app_dir_name, app_json['appid'], asset_name)
return (phantom.APP_SUCCESS, url_to_app_rest)
def _make_rest_call_helper(self, action_result, endpoint, verify=True, headers=None, params=None, data=None, method="get", nextLink=None):
if nextLink:
url = nextLink
else:
url = "{0}{1}".format(MSGRAPH_API_URL, endpoint)
if headers is None:
headers = {}
headers.update({
'Authorization': 'Bearer {0}'.format(self._access_token),
'Accept': 'application/json',
'Content-Type': 'application/json'})
ret_val, resp_json = self._make_rest_call(action_result, url, verify, headers, params, data, method)
# If token is expired, generate a new token
msg = action_result.get_message()
if msg and 'token is invalid' in msg or ('Access token has expired' in
msg) or ('ExpiredAuthenticationToken' in msg) or ('AuthenticationFailed' in msg):
ret_val = self._get_token(action_result)
headers.update({ 'Authorization': 'Bearer {0}'.format(self._access_token)})
ret_val, resp_json = self._make_rest_call(action_result, url, verify, headers, params, data, method)
if phantom.is_fail(ret_val):
return action_result.get_status(), None
return phantom.APP_SUCCESS, resp_json
def _handle_attachment(self, attachment, container_id, artifact_json=None):
try:
if hasattr(Vault, "create_attachment"):
vault_ret = Vault.create_attachment(base64.b64decode(attachment.pop('contentBytes')), container_id, file_name=attachment['name'])
else:
if hasattr(Vault, 'get_vault_tmp_dir'):
temp_dir = Vault.get_vault_tmp_dir()
else:
temp_dir = '/opt/phantom/vault/tmp'
temp_dir = temp_dir + '/{}'.format(uuid.uuid4())
os.makedirs(temp_dir)
file_path = os.path.join(temp_dir, attachment['name'])
with open(file_path, 'w') as f:
f.write(base64.b64decode(attachment.pop('contentBytes')))
vault_ret = Vault.add_attachment(file_path, container_id, file_name=attachment['name'])
except Exception as e:
error_code, error_msg = _get_error_message_from_exception(self._python_version, e, self)
error_txt = "Error Code: {0}. Error Message: {1}".format(error_code, error_msg)
self.debug_print("Error saving file to vault: {0}".format(error_txt))
return phantom.APP_ERROR
if not vault_ret.get('succeeded'):
self.debug_print("Error saving file to vault: ", vault_ret.get('message', "Could not save file to vault"))
return phantom.APP_ERROR
if artifact_json is None:
attachment['vaultId'] = vault_ret[phantom.APP_JSON_HASH]
return phantom.APP_SUCCESS
artifact_json['name'] = 'Vault Artifact'
artifact_json['label'] = 'attachment'
artifact_json['container_id'] = container_id
artifact_json['source_data_identifier'] = attachment['id']
artifact_cef = {}
artifact_cef['size'] = attachment['size']
artifact_cef['lastModified'] = attachment['lastModifiedDateTime']
artifact_cef['filename'] = attachment['name']
artifact_cef['mimeType'] = attachment['contentType']
artifact_cef['vault_id'] = vault_ret[phantom.APP_JSON_HASH]
artifact_json['cef'] = artifact_cef
return phantom.APP_SUCCESS
def _create_email_artifacts(self, container_id, email):
artifacts = []
email_artifact = {}
artifacts.append(email_artifact)
email_artifact['label'] = 'email'
email_artifact['name'] = 'Email Artifact'
email_artifact['container_id'] = container_id
email_artifact['cef_types'] = {'id': ['email id']}
email_artifact['source_data_identifier'] = email['id']
cef = {}
email_artifact['cef'] = cef
try:
email_items = email.iteritems()
except:
email_items = email.items()
for k, v in email_items:
if v is not None:
# self.save_progress("Key: {}\r\nValue: {}".format(k, v))
if k == 'from':
from_obj = v.get('emailAddress', {})
cef[k] = from_obj
cef['fromEmail'] = from_obj.get('address', '')
elif k == 'toRecipients':
cef[k] = v
# add first email to To
recipients = v
if len(recipients):
cef['toEmail'] = recipients[0].get('emailAddress', {}).get('address', '')
else:
cef[k] = v
body = email['body']['content']
ips = []
self._process_email._get_ips(body, ips)
for ip in ips:
ip_artifact = {}
artifacts.append(ip_artifact)
ip_artifact['name'] = 'IP Artifact'
ip_artifact['label'] = 'artifact'
ip_artifact['cef'] = ip
ip_artifact['container_id'] = container_id
ip_artifact['source_data_identifier'] = email['id']
urls = []
domains = []
self._process_email._extract_urls_domains(body, urls, domains)
for url in urls:
url_artifact = {}
artifacts.append(url_artifact)
url_artifact['name'] = 'URL Artifact'
url_artifact['label'] = 'artifact'
url_artifact['cef'] = url
url_artifact['container_id'] = container_id
url_artifact['source_data_identifier'] = email['id']
for domain in domains:
domain_artifact = {}
artifacts.append(domain_artifact)
domain_artifact['name'] = 'Domain Artifact'
domain_artifact['label'] = 'artifact'
domain_artifact['cef'] = domain
domain_artifact['container_id'] = container_id
domain_artifact['source_data_identifier'] = email['id']
return artifacts
def _handle_test_connectivity(self, param):
""" Function that handles the test connectivity action, it is much simpler than other action handlers."""
action_result = self.add_action_result(ActionResult(param))
if not self._admin_access or not self._admin_consent:
self.save_progress("Getting App REST endpoint URL")
# Get the URL to the app's REST Endpiont, this is the url that the TC dialog
# box will ask the user to connect to
ret_val, app_rest_url = self._get_url_to_app_rest(action_result)
app_state = {}
if phantom.is_fail(ret_val):
self.save_progress("Unable to get the URL to the app's REST Endpoint. Error: {0}".format(
action_result.get_message()))
return self.set_status(phantom.APP_ERROR)
# create the url that the oauth server should re-direct to after the auth is completed
# (success and failure), this is added to the state so that the request handler will access
| |
Descriptors for\n'
' another way in which attributes of a class retrieved via its\n'
' instances may differ from the objects actually stored in the\n'
' class\'s "__dict__". If no class attribute is found, and the\n'
' object\'s class has a "__getattr__()" method, that is called to\n'
' satisfy the lookup.\n'
'\n'
" Attribute assignments and deletions update the instance's\n"
" dictionary, never a class's dictionary. If the class has a\n"
' "__setattr__()" or "__delattr__()" method, this is called '
'instead\n'
' of updating the instance dictionary directly.\n'
'\n'
' Class instances can pretend to be numbers, sequences, or '
'mappings\n'
' if they have methods with certain special names. See section\n'
' Special method names.\n'
'\n'
' Special attributes: "__dict__" is the attribute dictionary;\n'
' "__class__" is the instance\'s class.\n'
'\n'
'Files\n'
' A file object represents an open file. File objects are created '
'by\n'
' the "open()" built-in function, and also by "os.popen()",\n'
' "os.fdopen()", and the "makefile()" method of socket objects '
'(and\n'
' perhaps by other functions or methods provided by extension\n'
' modules). The objects "sys.stdin", "sys.stdout" and '
'"sys.stderr"\n'
' are initialized to file objects corresponding to the '
"interpreter's\n"
' standard input, output and error streams. See File Objects for\n'
' complete documentation of file objects.\n'
'\n'
'Internal types\n'
' A few types used internally by the interpreter are exposed to '
'the\n'
' user. Their definitions may change with future versions of the\n'
' interpreter, but they are mentioned here for completeness.\n'
'\n'
' Code objects\n'
' Code objects represent *byte-compiled* executable Python '
'code,\n'
' or *bytecode*. The difference between a code object and a\n'
' function object is that the function object contains an '
'explicit\n'
" reference to the function's globals (the module in which it "
'was\n'
' defined), while a code object contains no context; also the\n'
' default argument values are stored in the function object, '
'not\n'
' in the code object (because they represent values calculated '
'at\n'
' run-time). Unlike function objects, code objects are '
'immutable\n'
' and contain no references (directly or indirectly) to '
'mutable\n'
' objects.\n'
'\n'
' Special read-only attributes: "co_name" gives the function '
'name;\n'
' "co_argcount" is the number of positional arguments '
'(including\n'
' arguments with default values); "co_nlocals" is the number '
'of\n'
' local variables used by the function (including arguments);\n'
' "co_varnames" is a tuple containing the names of the local\n'
' variables (starting with the argument names); "co_cellvars" '
'is a\n'
' tuple containing the names of local variables that are\n'
' referenced by nested functions; "co_freevars" is a tuple\n'
' containing the names of free variables; "co_code" is a '
'string\n'
' representing the sequence of bytecode instructions; '
'"co_consts"\n'
' is a tuple containing the literals used by the bytecode;\n'
' "co_names" is a tuple containing the names used by the '
'bytecode;\n'
' "co_filename" is the filename from which the code was '
'compiled;\n'
' "co_firstlineno" is the first line number of the function;\n'
' "co_lnotab" is a string encoding the mapping from bytecode\n'
' offsets to line numbers (for details see the source code of '
'the\n'
' interpreter); "co_stacksize" is the required stack size\n'
' (including local variables); "co_flags" is an integer '
'encoding a\n'
' number of flags for the interpreter.\n'
'\n'
' The following flag bits are defined for "co_flags": bit '
'"0x04"\n'
' is set if the function uses the "*arguments" syntax to accept '
'an\n'
' arbitrary number of positional arguments; bit "0x08" is set '
'if\n'
' the function uses the "**keywords" syntax to accept '
'arbitrary\n'
' keyword arguments; bit "0x20" is set if the function is a\n'
' generator.\n'
'\n'
' Future feature declarations ("from __future__ import '
'division")\n'
' also use bits in "co_flags" to indicate whether a code '
'object\n'
' was compiled with a particular feature enabled: bit "0x2000" '
'is\n'
' set if the function was compiled with future division '
'enabled;\n'
' bits "0x10" and "0x1000" were used in earlier versions of\n'
' Python.\n'
'\n'
' Other bits in "co_flags" are reserved for internal use.\n'
'\n'
' If a code object represents a function, the first item in\n'
' "co_consts" is the documentation string of the function, or\n'
' "None" if undefined.\n'
'\n'
' Frame objects\n'
' Frame objects represent execution frames. They may occur in\n'
' traceback objects (see below).\n'
'\n'
' Special read-only attributes: "f_back" is to the previous '
'stack\n'
' frame (towards the caller), or "None" if this is the bottom\n'
' stack frame; "f_code" is the code object being executed in '
'this\n'
' frame; "f_locals" is the dictionary used to look up local\n'
' variables; "f_globals" is used for global variables;\n'
' "f_builtins" is used for built-in (intrinsic) names;\n'
' "f_restricted" is a flag indicating whether the function is\n'
' executing in restricted execution mode; "f_lasti" gives the\n'
' precise instruction (this is an index into the bytecode '
'string\n'
' of the code object).\n'
'\n'
' Special writable attributes: "f_trace", if not "None", is a\n'
' function called at the start of each source code line (this '
'is\n'
' used by the debugger); "f_exc_type", "f_exc_value",\n'
' "f_exc_traceback" represent the last exception raised in the\n'
' parent frame provided another exception was ever raised in '
'the\n'
' current frame (in all other cases they are "None"); '
'"f_lineno"\n'
' is the current line number of the frame --- writing to this '
'from\n'
' within a trace function jumps to the given line (only for '
'the\n'
' bottom-most frame). A debugger can implement a Jump command\n'
' (aka Set Next Statement) by writing to f_lineno.\n'
'\n'
' Traceback objects\n'
' Traceback objects represent a stack trace of an exception. '
'A\n'
' traceback object is created when an exception occurs. When '
'the\n'
' search for an exception handler unwinds the execution stack, '
'at\n'
' each unwound level a traceback object is inserted in front '
'of\n'
' the current traceback. When an exception handler is '
'entered,\n'
' the stack trace is made available to the program. (See '
'section\n'
' The try statement.) It is accessible as "sys.exc_traceback", '
'and\n'
' also as the third item of the tuple returned by\n'
' "sys.exc_info()". The latter is the preferred interface, '
'since\n'
' it works correctly when the program is using multiple '
'threads.\n'
' When the program contains no suitable handler, the stack '
'trace\n'
' is written (nicely formatted) to the standard error stream; '
'if\n'
' the interpreter is interactive, it is also made available to '
'the\n'
' user as "sys.last_traceback".\n'
'\n'
' Special read-only attributes: "tb_next" is the next level in '
'the\n'
' stack trace (towards the frame where the exception occurred), '
'or\n'
' "None" if there is no next level; "tb_frame" points to the\n'
' execution frame of the current level; "tb_lineno" gives the '
'line\n'
' number where the exception occurred; "tb_lasti" indicates '
'the\n'
' precise instruction. The line number and last instruction '
'in\n'
' the traceback may differ from the line number of its frame\n'
' object if the exception occurred in a "try" statement with '
'no\n'
' matching except clause or with a finally clause.\n'
'\n'
' Slice objects\n'
' Slice objects are used to represent slices when *extended '
'slice\n'
' syntax* is used. This is a slice using two colons, or '
'multiple\n'
' slices or ellipses separated by commas, e.g., "a[i:j:step]",\n'
' "a[i:j, k:l]", or "a[..., i:j]". They are also created by '
'the\n'
' built-in "slice()" function.\n'
'\n'
' Special read-only attributes: "start" is | |
- 2)):
for c in range(my_head["x"], width - 1):
if (snake_head_test(data, c, my_head["y"] + 1)):
test = (c, my_head["y"] + 1)
if (test not in hazards):
if ("left" not in preferred_moves_modified):
preferred_moves_modified.append("left")
if ("right" in possible_moves):
if (my_head["y"] == 1):
for c in range(0, my_head["x"] ):
if (snake_head_test(data, c, 0)):
test = (c, 0)
if (test not in hazards):
if ("right" not in preferred_moves_modified):
preferred_moves_modified.append("right")
if (my_head["y"] == (height - 2)):
for c in range(0, my_head["x"]):
if (snake_head_test(data, c, my_head["y"] + 1)):
test = (c, my_head["y"] + 1)
if (test not in hazards):
if ("right" not in preferred_moves_modified):
preferred_moves_modified.append("right")
if ("down" in possible_moves):
if (my_head["x"] == 1):
for c in range(my_head["y"] + 1, height - 1):
if (snake_head_test(data, 0, c)):
test = (0, c)
if (test not in hazards):
if ("down" not in preferred_moves_modified):
preferred_moves_modified.append("down")
if (my_head["x"] == (width - 2)):
for c in range(my_head["y"], height - 1):
if (snake_head_test(data, my_head["x"] + 1, c)):
test = (my_head["x"] + 1, c)
if (test not in hazards):
if ("down" not in preferred_moves_modified):
preferred_moves_modified.append("down")
if ("up" in possible_moves):
if (my_head["x"] == 1):
for c in range(0, my_head["y"]):
if (snake_head_test(data, 0, c)):
test = (0, c)
if (test not in hazards):
if ("up" not in preferred_moves_modified):
preferred_moves_modified.append("up")
if (my_head["x"] == (width - 2)):
for c in range(0, my_head["y"]):
if (snake_head_test(data, my_head["x"], c)):
test = (my_head["x"], c)
if (test not in hazards):
if ("up" not in preferred_moves_modified):
preferred_moves_modified.append("up")
if (len(preferred_moves_modified) > 0):
print("DEBUG: Attempting straight line kill of snake: {}".format(preferred_moves_modified))
return preferred_moves_modified
for pm in preferred_moves:
if pm == "down":
if (((my_head["y"] - 1) != 0) or (my_head["x"] != 0) or (my_head["x"] != (width - 1)) or (hungry == True)):
if ("down" not in preferred_moves_modified):
preferred_moves_modified.append("down")
if pm == "up":
if (((my_head["y"] + 1) != (height - 1)) or (my_head["x"] != 0) or (my_head["x"] != (width - 1)) or (hungry == True)):
if ("up" not in preferred_moves_modified):
preferred_moves_modified.append("up")
if pm == "left":
if (((my_head["x"] - 1) != 0) or (my_head["y"] != 0) or (my_head["y"] != (height - 1)) or (hungry == True)):
if ("left" not in preferred_moves_modified):
preferred_moves_modified.append("left")
if pm == "right":
if (((my_head["x"] - 1) != (width - 1)) or (my_head["y"] != 0) or (my_head["y"] != (height - 1)) or (hungry == True)):
if ("right" not in preferred_moves_modified):
preferred_moves_modified.append("right")
return preferred_moves_modified
def which_directions_are_away_from_snake_heads(my_head, snake_heads, data):
retval = []
for sh in snake_heads:
if (is_snake_longer_than_me(data, sh)):
x = my_head["x"] - sh[0]
if (x > 0):
if ("right" not in snake_heads):
retval.append("right")
if (x < 0):
if ("left" not in snake_heads):
retval.append("left")
y = my_head["y"] - sh[1]
if (y < 0):
if ("down" not in snake_heads):
retval.append("down")
if (y > 0):
if ("up" not in snake_heads):
retval.append("up")
return retval
def get_risk_score(move, risk_moves):
risk_score = 999999
for lrm in risk_moves:
if (lrm[0] == move):
risk_score = lrm[1]
break
return risk_score
def get_directions_of_my_tail(my_head, my_tail, possible_moves):
directions = []
x = my_head["x"] - my_tail["x"]
y = my_head["y"] - my_tail["y"]
if (x > 0):
if ("left" in possible_moves):
directions.append("left")
if (x < 0):
if ("right" in possible_moves):
directions.append("right")
if (y > 0):
if ("up" in possible_moves):
directions.append("up")
if (y < 0):
if ("down" in possible_moves):
directions.append("down")
print("DEBUG: directions of my tail: {}".format(directions))
return directions
def validate_direction(move, matrix, risk_moves, ff_moves, ff_moves_no_tails, data, tail_moves, hungry):
good_direction = None
my_head = data["you"]["body"][0]
my_size = len(data["you"]["body"])
tails = get_snake_array(-1, data)
risk_score = get_risk_score(move, risk_moves)
if (risk_score > 0.0):
good_direction = check_ff_size(move, ff_moves, my_size)
if (good_direction == None):
good_direction = check_ff_size(move, ff_moves_no_tails, my_size)
if (good_direction == None):
print("DEBUG: validate_direction: floodfill size with and without tails NOT sufficient: {}".format(move))
cp = check_for_clear_path(matrix, move, my_head["x"], my_head["y"], tails)
if (cp == True):
good_direction = move
print("DEBUG: validate_direction: found a clear path to a tail: {}".format(move))
else:
print("DEBUG: validate_direction: no clear path to a tail: {}".format(move))
if (move in tail_moves and hungry == False):
good_direction = move
print("DEBUG: validate_direction: no clear path, but a tail move: {}".format(move))
else:
good_direction = check_ff_size(move, ff_moves, my_size)
if (good_direction != None):
print("DEBUG: validate_direction: risk score is zero: {}".format(move))
else:
print("DEBUG: validate_direction: risk score is zero, but not enough room: {}".format(move))
cp = check_for_clear_path(matrix, move, my_head["x"], my_head["y"], tails)
if (cp == True):
good_direction = move
print("DEBUG: validate_direction: risk score zero, found a clear path to a tail: {}".format(move))
if (good_direction != None):
bad_move = check_for_bad_move(move, my_head["x"], my_head["y"], get_snake_array(0, data), data)
if (bad_move == True):
print("DEBUG: validate_direction: Determined BAD move: {}".format(move))
good_direction = None
return good_direction
# make_decision: logic to pick the desired move of the snake
# preferred_moves: array of the preffered directions to move to get to target
# last_ditch_possible_moves: array of possible moves before they have been filtered to use as last resort
# risk_moves: array of riskiest moves sorted least to most
# ff_moves: array of flood fill moves sorted best to worst
# my_size: length of my snake
# returns: final direction to move
def make_decision(preferred_moves, possible_moves, last_ditch_possible_moves, risk_moves, ff_moves, ff_moves_no_tails, my_size, data, m, snake_heads, snake_tails, hungry):
# final decision
direction = None
my_head = data["you"]["body"][0]
my_size = len(data["you"]["body"])
my_tail = data["you"]["body"][my_size-1]
preferred_moves_modified = modify_preferred_moves(preferred_moves, possible_moves, data, hungry)
print("DEBUG: Modified Preferred Moves: {}".format(preferred_moves_modified))
for pmm in preferred_moves_modified:
if pmm not in preferred_moves:
print("DEBUG: Adjusting preferred move risk: {}".format(pmm))
risk_moves.append((pmm, -1.0))
tail_moves = is_move_a_tail(my_head, data["board"]["snakes"], my_size)
if (my_size > 3):
for tm in tail_moves:
if tm not in possible_moves:
possible_moves.append(tm)
if tm not in extract_1(risk_moves):
risk_moves.append((tm, 0.0))
if tm not in extract_1(ff_moves):
ff_moves.append((tm, 999999))
print("DEBUG: Tail Moves!: {}".format(tail_moves))
# preferred direction
ordered_preferred = get_common_elements(extract_1(risk_moves), preferred_moves_modified)
away_from_heads = which_directions_are_away_from_snake_heads(my_head, get_snake_array(0, data), data)
directions_of_my_tail = get_directions_of_my_tail(my_head, my_tail, possible_moves)
print("DEBUG: Directions away snake heads = {}".format(away_from_heads))
ordered_preferred_refined = get_common_elements(ordered_preferred, away_from_heads)
# ordered_preferred_refined = get_common_elements(ordered_preferred_refined, directions_of_my_tail)
for op in ordered_preferred_refined:
temp_direction = validate_direction(op, m, risk_moves, ff_moves, ff_moves_no_tails, data, tail_moves, hungry)
if (temp_direction != None):
risk_score = get_risk_score(temp_direction, risk_moves)
if (risk_score <= 3.0):
direction = temp_direction
print("DEBUG: Preferred direction GOOD = {}".format(temp_direction))
break
if (direction == None):
for rm in risk_moves:
temp_direction = validate_direction(rm[0], m, risk_moves, ff_moves, ff_moves_no_tails, data, tail_moves, hungry)
if (temp_direction != None):
direction = temp_direction
print("DEBUG: Least risk direction GOOD = {}".format(temp_direction))
break
else:
print("DEBUG: Least risk direction NOT GOOD = {}".format(temp_direction))
if (direction == None):
for pm in possible_moves:
if pm in tail_moves:
if (hungry == False):
print("DEBUG: Simply taking tail move = {}".format(pm))
direction = pm
break
if (direction == None):
for ff in ff_moves_no_tails:
print("DEBUG: Simply taking largest ff = {}".format(ff[0]))
direction = ff[0]
break
if (direction == None):
for dt in directions_of_my_tail:
print("DEBUG: Direction of my tail = {}".format(dt[0]))
direction = dt[0]
break
if (direction == None):
for domt in last_ditch_possible_moves:
direction = domt
print("DEBUG: Last ditch direction = {}".format(direction))
break
# in trouble now - pick a random direction
if (direction == None):
direction = random.choice(["left", "right", "up", "down"])
print("DEBUG: No options left - choose RANDOM direction: {}".format(direction))
print("DEBUG: Direction={}".format(direction))
return direction
@bottle.post('/move')
def move():
data = bottle.request.json
print(json.dumps(data))
# Make a list of all the bad coordinates and try to avoid them
height = data["board"]["height"]
width = data["board"]["width"]
# build list of bad coordinates
bad_coords = populate_bad_coords(width, height)
# build list of all snake coordinates on the board
snakes = data["board"]["snakes"]
snake_coords = populate_snake_coords(data, False)
snake_coords_no_tails = populate_snake_coords(data, True)
num_snakes = len(snakes)
# obtain information about my snake
my_size = len(data["you"]["body"])
my_health = data["you"]["health"]
my_head = data["you"]["body"][0]
my_tail = data["you"]["body"][my_size-1]
# snake bodies
snake_heads = get_snake_array(0, data)
snake_tails = get_snake_array(-1, data)
# get details on the shortest snake on the board
shortest_snake = get_shortest_snake(data)
shortest_length = len(shortest_snake)
# check if we have a longer snake on the board
longer_snake = is_there_a_longer_snake(data)
# get number of active snakes
number_of_active_snakes = len(data["board"]["snakes"])
hazards = get_hazard_array(data)
# specify health threshold to go get food
health_threshold = 30
amount_of_food = len(data["board"]["food"])
if (amount_of_food > 10):
if (num_snakes > 2):
health_threshold = 5
hungry = | |
repository.targets.add_verification_key(new_targets_public_key)
repository.timestamp.remove_verification_key(old_timestamp_public_key)
repository.timestamp.add_verification_key(new_timestamp_public_key)
repository.snapshot.remove_verification_key(old_snapshot_public_key)
repository.snapshot.add_verification_key(new_snapshot_public_key)
# Unload the old signing keys so that the new metadata only contains
# signatures produced by the new signing keys. Since this is based on
# keyid, the public key can be used.
repository.targets.unload_signing_key(old_targets_public_key)
repository.snapshot.unload_signing_key(old_snapshot_public_key)
repository.timestamp.unload_signing_key(old_timestamp_public_key)
# Load the new signing keys to write metadata. The root key is unchanged,
# and in the demo it is already loaded.
repository.targets.load_signing_key(new_targets_private_key)
repository.snapshot.load_signing_key(new_snapshot_private_key)
repository.timestamp.load_signing_key(new_timestamp_private_key)
# The root role is not automatically marked as dirty when the verification
# keys are updated via repository.<non-root-role>.add_verification_key().
# TODO: Verify this behavior with the latest version of the TUF codebase.
repository.mark_dirty(['root'])
# Push the changes to "live".
write_to_live()
def sign_with_compromised_keys_attack(vin=None):
"""
<Purpose>
Re-generate Timestamp, Snapshot, and Targets metadata for all vehicles and
sign each of these roles with its previously revoked key. The default key
names (director, directorsnapshot, directortimestamp, etc.) of the key
files are used if prefix_of_previous_keys is None, otherwise
'prefix_of_previous_keys' is prepended to them. This is a high-level
version of the common function to update a role key. The director service
instance is also updated with the key changes.
<Arguments>
vin (optional)
If not provided, all known vehicles will be attacked. You may also provide
a single VIN (string) indicating one vehicle to attack.
<Side Effects>
None.
<Exceptions>
None.
<Returns>
None.
"""
global director_service_instance
print(LOG_PREFIX + 'ATTACK: arbitrary metadata, old key, all vehicles')
# Start by backing up the repository before the attack occurs so that we
# can restore it afterwards in undo_sign_with_compromised_keys_attack.
backup_repositories(vin)
# Load the now-revoked keys.
old_targets_private_key = demo.import_private_key('director')
old_timestamp_private_key = demo.import_private_key('directortimestamp')
old_snapshot_private_key = demo.import_private_key('directorsnapshot')
current_targets_private_key = director_service_instance.key_dirtarg_pri
current_timestamp_private_key = director_service_instance.key_dirtime_pri
current_snapshot_private_key = director_service_instance.key_dirsnap_pri
# Ensure the director service uses the old (now-revoked) keys.
director_service_instance.key_dirtarg_pri = old_targets_private_key
director_service_instance.key_dirtime_pri = old_timestamp_private_key
director_service_instance.key_dirsnap_pri = old_snapshot_private_key
repo_dir = None
if vin is None:
vehicles_to_attack = director_service_instance.vehicle_repositories.keys()
else:
vehicles_to_attack = [vin]
for vin in vehicles_to_attack:
repository = director_service_instance.vehicle_repositories[vin]
repo_dir = repository._repository_directory
repository.targets.unload_signing_key(current_targets_private_key)
repository.snapshot.unload_signing_key(current_snapshot_private_key)
repository.timestamp.unload_signing_key(current_timestamp_private_key)
# Load the old signing keys to generate the malicious metadata. The root
# key is unchanged, and in the demo it is already loaded.
repository.targets.load_signing_key(old_targets_private_key)
repository.snapshot.load_signing_key(old_snapshot_private_key)
repository.timestamp.load_signing_key(old_timestamp_private_key)
repository.timestamp.version = repository.targets.version + 1
repository.timestamp.version = repository.snapshot.version + 1
repository.timestamp.version = repository.timestamp.version + 1
# Metadata must be partially written, otherwise write() will throw
# a UnsignedMetadata exception due to the invalid signing keys (i.e.,
# we are using the old signing keys, which have since been revoked.
repository.write(write_partial=True)
# Copy the staged metadata to a temp directory we'll move into place
# atomically in a moment.
shutil.copytree(os.path.join(repo_dir, 'metadata.staged'),
os.path.join(repo_dir, 'metadata.livetemp'))
# Empty the existing (old) live metadata directory (relatively fast).
if os.path.exists(os.path.join(repo_dir, 'metadata')):
shutil.rmtree(os.path.join(repo_dir, 'metadata'))
# Atomically move the new metadata into place.
os.rename(os.path.join(repo_dir, 'metadata.livetemp'),
os.path.join(repo_dir, 'metadata'))
print(LOG_PREFIX + 'COMPLETED ATTACK')
def undo_sign_with_compromised_keys_attack(vin=None):
"""
<Purpose>
Undo the actions executed by sign_with_compromised_keys_attack(). Namely,
move the valid metadata into the live and metadata.staged directories, and
reload the valid keys for each repository.
<Arguments>
vin (optional)
If not provided, all known vehicles will be reverted to normal state from
attacked state. You may also provide a single VIN (string) indicating
one vehicle to undo the attack for.
<Side Effects>
None.
<Exceptions>
None.
<Returns>
None.
"""
# Re-load the valid keys, so that the repository objects can be updated to
# reference them and replace the compromised keys set.
valid_targets_private_key = demo.import_private_key('new_director')
valid_timestamp_private_key = demo.import_private_key('new_directortimestamp')
valid_snapshot_private_key = demo.import_private_key('new_directorsnapshot')
current_targets_private_key = director_service_instance.key_dirtarg_pri
current_timestamp_private_key = director_service_instance.key_dirtime_pri
current_snapshot_private_key = director_service_instance.key_dirsnap_pri
# Set the new private keys in the director service. These keys are shared
# between all vehicle repositories.
director_service_instance.key_dirtarg_pri = valid_targets_private_key
director_service_instance.key_dirtime_pri = valid_timestamp_private_key
director_service_instance.key_dirsnap_pri = valid_snapshot_private_key
# Revert to the last backup for all metadata in the Director repositories.
restore_repositories(vin)
if vin is None:
vehicles_to_attack = director_service_instance.vehicle_repositories.keys()
else:
vehicles_to_attack = [vin]
for vin in vehicles_to_attack:
repository = director_service_instance.vehicle_repositories[vin]
repo_dir = repository._repository_directory
# Load the new signing keys to write metadata.
repository.targets.load_signing_key(valid_targets_private_key)
repository.snapshot.load_signing_key(valid_snapshot_private_key)
repository.timestamp.load_signing_key(valid_timestamp_private_key)
print(LOG_PREFIX + 'COMPLETED UNDO ATTACK')
def add_target_to_director(target_fname, filepath_in_repo, vin, ecu_serial):
"""
For use in attacks and more specific demonstration.
Given the filename of the file to add, the path relative to the repository
root to which to copy it, the VIN of the vehicle whose repository it should
be added to, and the ECU's serial directory, adds that file
as a target file (calculating its cryptographic hash and length) to the
appropriate repository for the given VIN.
<Arguments>
target_fname
The full filename of the file to be added as a target to the Director's
targets role metadata. This file doesn't have to be in any particular
place; it will be copied into the repository directory structure.
filepath_in_repo
The path relative to the root of the repository's targets directory
where this file will be kept and accessed by clients. (e.g. 'file1.txt'
or 'brakes/firmware.tar.gz')
ecu_serial
The ECU to assign this target to in the targets metadata.
Complies with uptane.formats.ECU_SERIAL_SCHEMA
"""
uptane.formats.VIN_SCHEMA.check_match(vin)
uptane.formats.ECU_SERIAL_SCHEMA.check_match(ecu_serial)
tuf.formats.RELPATH_SCHEMA.check_match(target_fname)
tuf.formats.RELPATH_SCHEMA.check_match(filepath_in_repo)
if vin not in director_service_instance.vehicle_repositories:
raise uptane.UnknownVehicle('The VIN provided, ' + repr(vin) + ' is not '
'that of a vehicle known to this Director.')
repo = director_service_instance.vehicle_repositories[vin]
repo_dir = repo._repository_directory
print(LOG_PREFIX + 'Copying target file into place.')
destination_filepath = os.path.join(repo_dir, 'targets', filepath_in_repo)
# TODO: This should probably place the file into a common targets directory
# that is then softlinked to all repositories.
shutil.copy(target_fname, destination_filepath)
print(LOG_PREFIX + 'Adding target ' + repr(target_fname) + ' for ECU ' +
repr(ecu_serial))
# This calls the appropriate vehicle repository.
director_service_instance.add_target_for_ecu(
vin, ecu_serial, destination_filepath)
def host():
"""
Hosts the Director repository (http serving metadata files) as a separate
process. Should be stopped with kill_server().
Note that you must also run listen() to start the Director services (run on
xmlrpc).
If this module already started a server process to host the repo, nothing will
be done.
"""
global repo_server_process
if repo_server_process is not None:
print(LOG_PREFIX + 'Sorry: there is already a server process running.')
return
# Prepare to host the director repo contents.
os.chdir(demo.DIRECTOR_REPO_DIR)
command = []
if sys.version_info.major < 3: # Python 2 compatibility
command = ['python', '-m', 'SimpleHTTPServer', str(demo.DIRECTOR_REPO_PORT)]
else:
command = ['python3', '-m', 'http.server', str(demo.DIRECTOR_REPO_PORT)]
# Begin hosting the director's repository.
repo_server_process = subprocess.Popen(command, stderr=subprocess.PIPE)
os.chdir(uptane.WORKING_DIR)
print(LOG_PREFIX + 'Director repo server process started, with pid ' +
str(repo_server_process.pid) + ', serving on port ' +
str(demo.DIRECTOR_REPO_PORT) + '. Director repo URL is: ' +
demo.DIRECTOR_REPO_HOST + ':' + str(demo.DIRECTOR_REPO_PORT) + '/')
# Kill server process after calling exit().
atexit.register(kill_server)
# Wait / allow any exceptions to kill the server.
# try:
# time.sleep(1000000) # Stop hosting after a while.
# except:
# print('Exception caught')
# pass
# finally:
# if repo_server_process.returncode is None:
# print('Terminating Director repo server process ' + str(repo_server_process.pid))
# repo_server_process.kill()
# Restrict director requests to a particular path.
# Must specify RPC2 here for the XML-RPC interface to work.
class RequestHandler(xmlrpc_server.SimpleXMLRPCRequestHandler):
rpc_paths = ('/RPC2',)
def register_vehicle_manifest_wrapper(
vin, primary_ecu_serial, signed_vehicle_manifest):
"""
This function is a wrapper for director.Director::register_vehicle_manifest().
The purpose of this wrapper is to make sure that the data that goes to
director.register_vehicle_manifest is what is expected.
In the demo, there are two scenarios:
- If we're using ASN.1/DER, then the vehicle manifest is a binary object
and signed_vehicle_manifest had to be wrapped in an XMLRPC Binary()
object. The reference implementation has no notion of XMLRPC (and should
not), so the vehicle manifest has to be extracted from the XMLRPC Binary()
object that is signed_vehicle_manifest in this case.
- If we're using any other data format / encoding (e.g. JSON), then the
vehicle manifest was transfered as an object that the reference
implementation can already understand, and we just pass the argument
along to the director module.
"""
if tuf.conf.METADATA_FORMAT == 'der':
director_service_instance.register_vehicle_manifest(
vin, primary_ecu_serial, signed_vehicle_manifest.data)
else:
director_service_instance.register_vehicle_manifest(
vin, primary_ecu_serial, signed_vehicle_manifest)
def listen():
"""
Listens on DIRECTOR_SERVER_PORT for xml-rpc calls to functions:
- submit_vehicle_manifest
- register_ecu_serial
Note that you must also run host() in order to serve the metadata files | |
,
u'絞' : [u'x', u'j'] ,
u'䇡' : [u'z'] ,
u'烫' : [u't'] ,
u'鉬' : [u'm'] ,
u'啮' : [u'n'] ,
u'㿵' : [u'r'] ,
u'棻' : [u'f'] ,
u'詼' : [u'h'] ,
u'䵾' : [u'f'] ,
u'粈' : [u'r'] ,
u'茍' : [u'j'] ,
u'䘏' : [u'x'] ,
u'醖' : [u'y'] ,
u'甙' : [u'd'] ,
u'咘' : [u'b'] ,
u'覦' : [u'y'] ,
u'洩' : [u'x'] ,
u'䲨' : [u'h'] ,
u'箲' : [u'x'] ,
u'舷' : [u'x'] ,
u'䔹' : [u's'] ,
u'郀' : [u'k'] ,
u'参' : [u'c', u's', u'd'] ,
u'裐' : [u'j'] ,
u'汓' : [u'q'] ,
u'䯒' : [u'h'] ,
u'竜' : [u'l'] ,
u'腡' : [u'l', u'g'] ,
u'獭' : [u't'] ,
u'勬' : [u'j'] ,
u'蟺' : [u'c', u's', u'd', u't'] ,
u'歽' : [u'z'] ,
u'䫼' : [u'x'] ,
u'傁' : [u's'] ,
u'鮇' : [u'w'] ,
u'搎' : [u's'] ,
u'䢑' : [u'd'] ,
u'尞' : [u'l'] ,
u'斣' : [u'c'] ,
u'蜤' : [u's'] ,
u'礰' : [u'l'] ,
u'嶳' : [u'd'] ,
u'㨲' : [u'c'] ,
u'蒹' : [u'j'] ,
u'兀' : [u'w'] ,
u'盅' : [u'c', u'z'] ,
u'㯇' : [u'b'] ,
u'顆' : [u'k'] ,
u'䥐' : [u'm'] ,
u'滕' : [u't'] ,
u'駛' : [u's'] ,
u'扢' : [u'x', u'g'] ,
u'䛥' : [u'x'] ,
u'揷' : [u'c'] ,
u'蕸' : [u'x'] ,
u'送' : [u's'] ,
u'瞄' : [u'm'] ,
u'㢆' : [u'c'] ,
u'䄋' : [u'y'] ,
u'蠑' : [u'r'] ,
u'澔' : [u'h'] ,
u'隚' : [u't'] ,
u'稝' : [u'p'] ,
u'㼟' : [u'b'] ,
u'䞤' : [u'q'] ,
u'躪' : [u'l'] ,
u'刭' : [u'j'] ,
u'鴳' : [u'y'] ,
u'悶' : [u'm'] ,
u'䨽' : [u'f'] ,
u'壆' : [u'b'] ,
u'菌' : [u'j'] ,
u'杏' : [u'x'] ,
u'痘' : [u'd'] ,
u'㛚' : [u't'] ,
u'彟' : [u'h'] ,
u'虥' : [u'z'] ,
u'淨' : [u'j'] ,
u'铮' : [u'z'] ,
u'硱' : [u'k'] ,
u'㵳' : [u'l'] ,
u'賾' : [u'z'] ,
u'隃' : [u'y'] ,
u'焂' : [u's'] ,
u'喅' : [u'y'] ,
u'躓' : [u'z'] ,
u'椒' : [u'j'] ,
u'䶕' : [u'p'] ,
u'㞙' : [u'n'] ,
u'粟' : [u's'] ,
u'舠' : [u'd'] ,
u'䄢' : [u'q'] ,
u'醭' : [u'p', u'b'] ,
u'瀬' : [u'l'] ,
u'咯' : [u'k', u'l', u'g'] ,
u'覽' : [u'l'] ,
u'格' : [u'g'] ,
u'䲿' : [u'c'] ,
u'㛃' : [u'j'] ,
u'翉' : [u'p'] ,
u'鵊' : [u'j'] ,
u'䁌' : [u'y'] ,
u'郗' : [u'x', u'c'] ,
u'獖' : [u'b', u'f'] ,
u'埙' : [u'x'] ,
u'裧' : [u'c'] ,
u'武' : [u'w'] ,
u'俩' : [u'l'] ,
u'绳' : [u'y', u's', u'm'] ,
u'鱴' : [u'm'] ,
u'䍶' : [u'd'] ,
u'犀' : [u'x'] ,
u'锅' : [u'g'] ,
u'堇' : [u'j'] ,
u'檐' : [u'y', u'd'] ,
u'贕' : [u'd'] ,
u'㨛' : [u's'] ,
u'龞' : [u'b'] ,
u'挡' : [u'd', u't'] ,
u'䊠' : [u'x'] ,
u'鐯' : [u'z'] ,
u'嬱' : [u'c'] ,
u'㲴' : [u'z'] ,
u'斺' : [u'x'] ,
u'谿' : [u'q', u'x'] ,
u'㕅' : [u'j'] ,
u'黈' : [u't'] ,
u'手' : [u's'] ,
u'巊' : [u'y'] ,
u'静' : [u'j'] ,
u'婛' : [u'j'] ,
u'㿞' : [u'm'] ,
u'擤' : [u'x'] ,
u'轩' : [u'x'] ,
u'㑯' : [u'x'] ,
u'絵' : [u'h'] ,
u'峴' : [u'x'] ,
u'稆' : [u'l'] ,
u'庉' : [u'd'] ,
u'㼈' : [u'l'] ,
u'薏' : [u'y'] ,
u'刖' : [u'y'] ,
u'箛' : [u'g'] ,
u'餜' : [u'g'] ,
u'䨦' : [u'p'] ,
u'厫' : [u'a'] ,
u'骱' : [u'x', u'j', u'g'] ,
u'朸' : [u'l'] ,
u'䮻' : [u'c', u'd'] ,
u'彈' : [u'd', u't'] ,
u'惍' : [u'j'] ,
u'虎' : [u'h'] ,
u'瑚' : [u'h'] ,
u'壝' : [u'w'] ,
u'㥜' : [u'w'] ,
u'蟣' : [u'q', u'j'] ,
u'汪' : [u'h', u'w'] ,
u'痯' : [u'g'] ,
u'㫱' : [u'n'] ,
u'魰' : [u'w'] ,
u'淿' : [u'm'] ,
u'圃' : [u'p'] ,
u'鸉' : [u'y'] ,
u'憌' : [u'c'] ,
u'伓' : [u'b'] ,
u'妜' : [u'y'] ,
u'肢' : [u's', u'z'] ,
u'搥' : [u'c'] ,
u'錫' : [u'x', u't'] ,
u'皮' : [u'p'] ,
u'㮰' : [u'p', u'b'] ,
u'尵' : [u't'] ,
u'謻' : [u'y', u'c'] ,
u'溾' : [u'w'] ,
u'闄' : [u'y'] ,
u'祇' : [u'q', u'c', u'z'] ,
u'㹉' : [u'y'] ,
u'䛎' : [u'y', u'h', u'x'] ,
u'跔' : [u'j'] ,
u'兗' : [u'y'] ,
u'顝' : [u'k'] ,
u'揠' : [u'y'] ,
u'䥧' : [u'h', u'x'] ,
u'寰' : [u'h'] ,
u'苶' : [u'n'] ,
u'晹' : [u'y'] ,
u'蒋' : [u'j'] ,
u'笊' : [u'z'] ,
u'䞍' : [u'q'] ,
u'皗' : [u'c'] ,
u'逘' : [u'y'] ,
u'匚' : [u'f'] ,
u'溧' : [u'l'] ,
u'蠨' : [u'x'] ,
u'䬪' : [u'b', u'f'] ,
u'鮵' : [u'd'] ,
u'稴' : [u'l'] ,
u'䚷' : [u'y', u'x'] ,
u'痁' : [u's', u'd'] ,
u'靂' : [u'l'] ,
u'剄' : [u'j'] ,
u'淑' : [u's', u'c'] ,
u'轒' : [u'f'] ,
u'䩔' : [u'd'] ,
u'髟' : [u'p', u's', u'b'] ,
u'神' : [u's'] ,
u'䗡' : [u'y'] ,
u'瓫' : [u'p'] ,
u'陬' : [u'z'] ,
u'兮' : [u'x'] ,
u'泻' : [u'x'] ,
u'蹼' : [u'p'] ,
u'䥾' : [u'x'] ,
u'㞂' : [u't'] ,
u'碈' : [u'm'] ,
u'蜍' : [u'y', u'c', u's'] ,
u'䈏' : [u'b'] ,
u'焙' : [u'b'] ,
u'傘' : [u's'] ,
u'趦' : [u'z'] ,
u'羲' : [u'x'] ,
u'蘷' : [u'k'] ,
u'䄹' : [u't', u'n'] ,
u'铀' : [u'y'] ,
u'灃' : [u'f'] ,
u'埂' : [u'g'] ,
u'賐' : [u'x'] ,
u'桓' : [u'h'] ,
u'俒' : [u'h'] ,
u'㗖' : [u'd'] ,
u'络' : [u'l'] ,
u'蕡' : [u'f'] ,
u'䁣' : [u'c'] ,
u'睭' : [u'z'] ,
u'囬' : [u'h'] ,
u'菺' : [u'j'] ,
u'潽' : [u'p'] ,
u'仼' : [u'w'] ,
u'㔀' : [u'q', u'l'] ,
u'龇' : [u'z'] ,
u'怎' : [u'z'] ,
u'䲑' : [u'y'] ,
u'堞' : [u'd'] ,
u'憣' : [u'f'] ,
u'茤' : [u'j', u'd'] ,
u'細' : [u'x'] ,
u'妳' : [u'n'] ,
u'㸲' : [u'z'] ,
u'肹' : [u'x', u'b'] ,
u'啀' : [u'a'] ,
u'狅' : [u'q'] ,
u'㿇' : [u'x'] ,
u'鱆' : [u'z'] ,
u'䵐' : [u'c'] ,
u'櫕' : [u'c'] ,
u'鷛' : [u'y', u'r'] ,
u'晢' : [u'x', u'z'] ,
u'䋥' : [u'l'] ,
u'干' : [u'h', u'g'] ,
u'柷' : [u'z'] ,
u'腸' : [u'c'] ,
u'鐁' : [u's'] ,
u'玄' : [u'x'] ,
u'㲆' : [u'l'] ,
u'谑' : [u'x', u'n'] ,
u'殔' : [u'y'] ,
u'銚' : [u'y', u'q', u'd', u't'] ,
u'縝' : [u'c', u'z'] ,
u'䎤' : [u'c', u'j'] ,
u'說' : [u'y', u's', u't'] ,
u'嘭' : [u'p'] ,
u'餳' : [u'x', u't'] ,
u'丽' : [u'l'] ,
u'㑁' : [u'z'] ,
u'峆' : [u'h'] ,
u'蟌' : [u'c'] ,
u'捏' : [u'n'] ,
u'燘' : [u'm'] ,
u'孟' : [u'm'] ,
u'艥' : [u'j'] ,
u'槨' : [u'g'] ,
u'籱' : [u'z'] ,
u'㥳' : [u'y'] ,
u'䇸' : [u'c', u'z', u's'] ,
u'裾' : [u'j'] ,
u'銃' : [u'c'] ,
u'甂' : [u'p', u'b'] ,
u'内' : [u'n'] ,
u'誓' : [u's'] ,
u'洒' : [u'x', u's', u'c'] ,
u'碟' : [u's', u'd'] ,
u'蘠' : [u'q'] ,
u'䔢' : [u'h'] ,
u'閭' : [u'l'] ,
u'琬' : [u'w'] ,
u'傯' : [u'z'] ,
u'趽' : [u'f'] ,
u'氼' : [u'n'] ,
u'築' : [u'z'] ,
u'饊' : [u's'] ,
u'䑌' : [u'l'] ,
u'铗' : [u'j'] ,
u'睖' : [u'l'] ,
u'叙' : [u'x'] ,
u'賧' : [u't'] ,
u'潦' : [u'l'] ,
u'竳' : [u'd'] ,
u'顴' : [u'q'] ,
u'䝶' : [u'l'] ,
u'皀' : [u'b', u'j'] ,
u'鄅' : [u'y'] ,
u'將' : [u'q', u'j'] ,
u'㦊' : [u'h'] ,
u'源' : [u'y'] ,
u'褕' : [u'y'] ,
u'㸛' : [u's'] ,
u'鮞' : [u'e'] ,
u'朡' : [u'z'] ,
u'䚠' : [u'h'] ,
u'逯' : [u'l', u'd'] ,
u'弱' : [u'r'] ,
u'憺' : [u'd'] ,
u'蠿' : [u'z'] ,
u'髈' : [u'p', u'b'] ,
u'晋' : [u'j'] ,
u'姊' : [u'z'] ,
u'鍙' : [u'h'] ,
u'幛' : [u'z'] ,
u'㯞' : [u'y', u'j'] ,
u'惤' : [u'j'] ,
u'譩' : [u'y'] ,
u'鷲' : [u'j'] ,
u'祵' : [u'h', u'k'] ,
u'壴' : [u'z'] ,
u'縆' : [u'g'] ,
u'媉' : [u'w'] ,
u'膏' : [u'g'] ,
u'嘖' : [u'z'] ,
u'羛' : [u'y', u'x'] ,
u'㲝' : [u'r'] ,
u'鴜' : [u'z'] ,
u'並' : [u'b'] ,
u'垫' : [u'd'] ,
u'麱' : [u'f'] ,
u'挸' : [u'j'] ,
u'侻' : [u't'] ,
u'孈' : [u's'] ,
u'操' : [u'c'] ,
u'艎' : [u'h'] ,
u'灚' : [u'j'] ,
u'峝' : [u't', u'd'] ,
u'菣' : [u'q'] ,
u'桪' : [u'x'] ,
u'燯' : [u'l'] ,
u'齰' : [u'c', u'z'] ,
u'䁺' : [u's'] ,
u'槿' : [u'q', u'j'] ,
u'匃' : [u'g'] ,
u'騉' : [u'k'] ,
u'斌' : [u'b'] ,
u'䬓' : [u'a'] ,
u'㔗' : [u'f'] ,
u'嶜' : [u'j'] ,
u'蒢' : [u'c'] ,
u'急' : [u'j'] ,
u'霫' : [u'x'] ,
u'犮' : [u'b'] ,
u'㾰' : [u'h'] ,
u'堵' : [u'd'] ,
u'輻' : [u'f'] ,
u'檾' : [u'q', u'j'] ,
u'釄' : [u'm'] ,
u'絇' : [u'q', u'j'] ,
u'㩉' : [u'x'] ,
u'䋎' : [u'z'] ,
u'觔' : [u'j'] ,
u'啗' : [u'd'] ,
u'鱝' : [u'f'] ,
u'柠' : [u'n'] ,
u'䵧' : [u'z'] ,
u'㝫' : [u'l'] ,
u'忰' : [u'c'] ,
u'蛶' : [u'l'] ,
u'批' : [u'p'] ,
u'㸄' : [u'j'] ,
u'袋' : [u'd'] ,
u'眊' : [u'm'] ,
u'䮍' : [u'x', u'n'] ,
u'窗' : [u'c'] ,
u'弚' : [u't'] ,
u'抧' : [u'z'] ,
u'萨' : [u's'] ,
u'䜪' : [u'q'] ,
u'㴮' : [u'x'] ,
u'鞵' : [u'x'] ,
u'瘴' : [u'z'] ,
u'䪷' : [u'h'] ,
u'私' : [u's'] ,
u'魂' : [u'h'] ,
u'幄' : [u'w'] ,
u'懑' : [u'm'] | |
<reponame>tillbiskup/aspecd<gh_stars>1-10
"""
Plotting: Graphical representations of data extracted from datasets.
Plotting relies on `matplotlib <https://matplotlib.org/>`_, and mainly its
object-oriented interface should be used for the actual plotting. Each
plotter contains references to the respective figure and axes created usually
by a call similar to::
fig, ax = matplotlib.pyplot.subplots()
For convenience, short hands for the :attr:`figure` and :attr:`axes`
properties of a plotter are available, named :attr:`fig` and :attr:`ax`,
respectively. For details on handling (own) figure and axes objects, see below.
Generally, two types of plotters can be distinguished:
* Plotters for handling single datasets
Shall be derived from :class:`aspecd.plotting.SinglePlotter`.
* Plotters for handling multiple datasets
Shall be derived from :class:`aspecd.plotting.MultiPlotter`.
In the first case, the plot is usually handled using the :meth:`plot` method
of the respective :obj:`aspecd.dataset.Dataset` object. Additionally,
those plotters always only operate on the data of a single dataset, and the
plot can easily be attached as a representation to the respective dataset.
Plotters handling single datasets should always inherit from the
:class:`aspecd.plotting.SinglePlotter` class.
In the second case, the plot is handled using the :meth:`plot` method of the
:obj:`aspecd.plotting.Plotter` object, and the datasets are stored as a list
within the plotter. As these plots span several datasets, there is no easy
connection between a single dataset and such a plot in sense of
representations stored in datasets. Plotters handling multiple datasets should
always inherit from the :class:`aspecd.plotting.MultiPlotter` class.
In a certain sense, there is a third type of plotters:
* Plotters consisting of more than one axes
Shall be derived from :class:`aspecd.plotting.CompositePlotter`.
However, practically mostly these composite plotters will behave like
plotters handling either single or multiple datasets. Generally,
these composite plotters will use other types of plotters to perform the
actual plot tasks. This modular approach allows for great flexibility.
Regardless of the type of plotter, **saving plots** is always done using
objects of the :class:`aspecd.plotting.Saver` class. The actual task of
saving a plot is as easy as calling the :meth:`save` method of a plotter
with a saver object as its argument.
A note on array dimensions and axes
===================================
Something often quite confusing is the apparent inconsistency between the
order of array dimensions and the order of axes. While we are used to assign
axes in the order *x*, *y*, *z*, and assuming *x* to be horizontal,
*y* vertical (and *z* sticking out of the paper plane), arrays are usually
indexed row-first, column-second. That means, however, that if you simply
plot a 2D array in axes, your *first* dimension is along the *y* axis,
the *second* dimension along the *x* axis.
Therefore, as the axes of your datasets will always correspond to the array
dimensions of your data, in case of 2D plots you will need to *either* use
the information contained in the second axis object for your *x* axis label,
and the information from the first axis object for your *y* axis label,
*or* to transpose the data array.
Another aspect to have in mind is the position of the origin. Usually,
in a Cartesian coordinate system, convention is to have the origin (0,
0) in the *lower left* of the axes (for the positive quadrant). However,
for images, convention is to have the corresponding (0, 0) pixel located in
the *upper left* edge of your image. Therefore, those plotting methods
dealing with images will usually *revert* the direction of your *y* axis.
Most probably, eventually you will have to check with real data and ensure
the plotters to plot data and axes in a consistent fashion.
Types of concrete plotters
==========================
The ASpecD framework comes with a series of concrete plotters included ready
to be used. As stated above, plotters can generally be divided into two
types: plotters operating on single datasets and plotters combining the data
of multiple datasets into a single figure.
Additionally, plotters can be categorised with regard to creating figures
consisting of a single or multiple axes. The latter are plotters inheriting
from the :class:`aspecd.plotting.CompositePlotter` class. The latter can be
thought of as templates for the other plotters to operate on, *i.e.* they
provide the axes for other plotters to display their results.
Concrete plotters for single datasets
-------------------------------------
* :class:`aspecd.plotting.SinglePlotter1D`
Basic line plots for single datasets, allowing to plot a series of
line-type plots, including (semi)log plots
* :class:`aspecd.plotting.SinglePlotter2D`
Basic 2D plots for single datasets, allowing to plot a series of 2D plots,
including contour plots and image-type display
* :class:`aspecd.plotting.SinglePlotter2DStacked`
Stacked plots of 2D data, converting a 2D display into a series of 1D line
plots stacked on top of each other.
* :class:`aspecd.plotting.SingleCompositePlotter`
Composite plotter for single datasets, allowing to plot different views of
one and the same datasets by using existing plotters for single datasets.
Concrete plotters for multiple datasets
---------------------------------------
* :class:`aspecd.plotting.MultiPlotter1D`
Basic line plots for multiple datasets, allowing to plot a series of
line-type plots, including (semi)log plots
* :class:`aspecd.plotting.MultiPlotter1DStacked`
Stacked line plots for multiple datasets, allowing to plot a series of
line-type plots, including (semi)log plots
Plotting to existing axes
=========================
Figure and axes properties of a plotter object will only be populated upon
calling the method :meth:`aspecd.plotting.Plotter.plot`, therefore by using
the :meth:`plot` method of the respective plotter class.
Furthermore, figure and axes properties will only be populated if both are not
existing already. Therefore, if you like to use a plotter to plot to an
existing axis, set its figure and axes properties before calling the
:meth:`aspecd.plotting.Plotter.plot` method.
.. important::
If you do so, make sure to set *both*, figure and axes properties,
as failing to set a valid figure property will cause matplotlib to throw
exceptions.
A simple example may look like this::
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
plotter = aspecd.plotting.SinglePlotter1D()
plotter.figure = fig
plotter.axes = ax
plotter.plot()
In this case, the plotter will plot to the axis specified before calling its
:meth:`plot` method. Thus, it should be straight-forward to write plotter
classes that create complex plots consisting of several subplots by reusing
available plotter classes. This is what the
:class:`aspecd.plotting.CompositePlotter` class is for, and how it basically
works.
Module API documentation
========================
"""
import copy
import logging
import os
import matplotlib as mpl
# pylint: disable=unused-import
import matplotlib.collections
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
import aspecd.dataset
import aspecd.exceptions
import aspecd.history
import aspecd.utils
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
class Plotter:
"""Base class for plots.
Each class actually plotting data should inherit from this class.
Furthermore, all parameters, implicit and explicit, necessary to
perform the plot, should eventually be stored in the property
:attr:`parameters` (currently a dictionary).
Further things that need to be changed upon inheriting from this class
are the string stored in :attr:`description`, being basically a one-liner.
The actual implementation of the plotting is done in the private method
:meth:`_create_plot` that in turn gets called by :meth:`plot`.
.. note::
Usually, you will never implement an instance of this class for
actual plotting tasks, but rather one of the child classes.
Attributes
----------
name : :class:`str`
Name of the plotter.
Defaults always to the full class name, don't change!
parameters : :class:`dict`
All parameters necessary for the plot, implicit and explicit
The following keys exist:
show_legend : :class:`bool`
Whether to show a legend in the plot
Default: False
show_zero_lines : :class:`bool`
Whether to show zero lines in the plot
Regardless of whether you set this to true, zero lines will only be
added to the final plot if the zero value is within the current
axes limits.
Zero line properties can be set via the
:attr:`aspecd.plotting.Plotter.properties` attribute.
Default: True
properties : :class:`aspecd.plotting.PlotProperties`
Properties of the plot, defining its appearance
description : :class:`str`
Short description, to be set in class definition
figure : :class:`matplotlib.figure.Figure`
Reference to figure object
axes : :class:`matplotlib.axes.Axes`
Reference to axes object used for actual plotting
filename : :class:`str`
Name of file to save the plot to
Actual saving is done using an :obj:`aspecd.plotting.Saver` object.
caption : :class:`aspecd.plotting.Caption`
User-supplied information for the figure.
legend : :class:`matplotlib.legend.Legend`
Legend object
style : :class:`str`
plotting style to use
You can use all plotting styles understood by matplotlib. See
:mod:`matplotlib.style` for details.
.. note::
If you set the style via :attr:`aspecd.plotting.Plotter.style`,
all following figures will use this style, until you set another style.
As it seems, there is no way in matplotlib to find out the current
style, and hence reset to it. One way to fix this problem would be
to revert to the default style by | |
# coding: utf-8
# # Optimization Methods
#
# Until now, you've always used Gradient Descent to update the parameters and minimize the cost. In this notebook, you will learn more advanced optimization methods that can speed up learning and perhaps even get you to a better final value for the cost function. Having a good optimization algorithm can be the difference between waiting days vs. just a few hours to get a good result.
#
# Gradient descent goes "downhill" on a cost function $J$. Think of it as trying to do this:
# <img src="images/cost.jpg" style="width:650px;height:300px;">
# <caption><center> <u> **Figure 1** </u>: **Minimizing the cost is like finding the lowest point in a hilly landscape**<br> At each step of the training, you update your parameters following a certain direction to try to get to the lowest possible point. </center></caption>
#
# **Notations**: As usual, $\frac{\partial J}{\partial a } = $ `da` for any variable `a`.
#
# To get started, run the following code to import the libraries you will need.
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
import scipy.io
import math
import sklearn
import sklearn.datasets
from opt_utils import load_params_and_grads, initialize_parameters, forward_propagation, backward_propagation
from opt_utils import compute_cost, predict, predict_dec, plot_decision_boundary, load_dataset
from testCases import *
get_ipython().magic('matplotlib inline')
plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# ## 1 - Gradient Descent
#
# A simple optimization method in machine learning is gradient descent (GD). When you take gradient steps with respect to all $m$ examples on each step, it is also called Batch Gradient Descent.
#
# **Warm-up exercise**: Implement the gradient descent update rule. The gradient descent rule is, for $l = 1, ..., L$:
# $$ W^{[l]} = W^{[l]} - \alpha \text{ } dW^{[l]} \tag{1}$$
# $$ b^{[l]} = b^{[l]} - \alpha \text{ } db^{[l]} \tag{2}$$
#
# where L is the number of layers and $\alpha$ is the learning rate. All parameters should be stored in the `parameters` dictionary. Note that the iterator `l` starts at 0 in the `for` loop while the first parameters are $W^{[1]}$ and $b^{[1]}$. You need to shift `l` to `l+1` when coding.
# In[2]:
# GRADED FUNCTION: update_parameters_with_gd
def update_parameters_with_gd(parameters, grads, learning_rate):
"""
Update parameters using one step of gradient descent
Arguments:
parameters -- python dictionary containing your parameters to be updated:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients to update each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
learning_rate -- the learning rate, scalar.
Returns:
parameters -- python dictionary containing your updated parameters
"""
L = len(parameters) // 2 # number of layers in the neural networks
# Update rule for each parameter
for l in range(L):
### START CODE HERE ### (approx. 2 lines)
parameters["W" + str(l+1)] = parameters["W" + str(l+1)] - (learning_rate * grads["dW" + str(l+1)])
parameters["b" + str(l+1)] = parameters["b" + str(l+1)] - (learning_rate * grads["db" + str(l+1)])
### END CODE HERE ###
return parameters
# In[3]:
parameters, grads, learning_rate = update_parameters_with_gd_test_case()
parameters = update_parameters_with_gd(parameters, grads, learning_rate)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
# **Expected Output**:
#
# <table>
# <tr>
# <td > **W1** </td>
# <td > [[ 1.63535156 -0.62320365 -0.53718766]
# [-1.07799357 0.85639907 -2.29470142]] </td>
# </tr>
#
# <tr>
# <td > **b1** </td>
# <td > [[ 1.74604067]
# [-0.75184921]] </td>
# </tr>
#
# <tr>
# <td > **W2** </td>
# <td > [[ 0.32171798 -0.25467393 1.46902454]
# [-2.05617317 -0.31554548 -0.3756023 ]
# [ 1.1404819 -1.09976462 -0.1612551 ]] </td>
# </tr>
#
# <tr>
# <td > **b2** </td>
# <td > [[-0.88020257]
# [ 0.02561572]
# [ 0.57539477]] </td>
# </tr>
# </table>
#
# A variant of this is Stochastic Gradient Descent (SGD), which is equivalent to mini-batch gradient descent where each mini-batch has just 1 example. The update rule that you have just implemented does not change. What changes is that you would be computing gradients on just one training example at a time, rather than on the whole training set. The code examples below illustrate the difference between stochastic gradient descent and (batch) gradient descent.
#
# - **(Batch) Gradient Descent**:
#
# ``` python
# X = data_input
# Y = labels
# parameters = initialize_parameters(layers_dims)
# for i in range(0, num_iterations):
# # Forward propagation
# a, caches = forward_propagation(X, parameters)
# # Compute cost.
# cost = compute_cost(a, Y)
# # Backward propagation.
# grads = backward_propagation(a, caches, parameters)
# # Update parameters.
# parameters = update_parameters(parameters, grads)
#
# ```
#
# - **Stochastic Gradient Descent**:
#
# ```python
# X = data_input
# Y = labels
# parameters = initialize_parameters(layers_dims)
# for i in range(0, num_iterations):
# for j in range(0, m):
# # Forward propagation
# a, caches = forward_propagation(X[:,j], parameters)
# # Compute cost
# cost = compute_cost(a, Y[:,j])
# # Backward propagation
# grads = backward_propagation(a, caches, parameters)
# # Update parameters.
# parameters = update_parameters(parameters, grads)
# ```
#
# In Stochastic Gradient Descent, you use only 1 training example before updating the gradients. When the training set is large, SGD can be faster. But the parameters will "oscillate" toward the minimum rather than converge smoothly. Here is an illustration of this:
#
# <img src="images/kiank_sgd.png" style="width:750px;height:250px;">
# <caption><center> <u> <font color='purple'> **Figure 1** </u><font color='purple'> : **SGD vs GD**<br> "+" denotes a minimum of the cost. SGD leads to many oscillations to reach convergence. But each step is a lot faster to compute for SGD than for GD, as it uses only one training example (vs. the whole batch for GD). </center></caption>
#
# **Note** also that implementing SGD requires 3 for-loops in total:
# 1. Over the number of iterations
# 2. Over the $m$ training examples
# 3. Over the layers (to update all parameters, from $(W^{[1]},b^{[1]})$ to $(W^{[L]},b^{[L]})$)
#
# In practice, you'll often get faster results if you do not use neither the whole training set, nor only one training example, to perform each update. Mini-batch gradient descent uses an intermediate number of examples for each step. With mini-batch gradient descent, you loop over the mini-batches instead of looping over individual training examples.
#
# <img src="images/kiank_minibatch.png" style="width:750px;height:250px;">
# <caption><center> <u> <font color='purple'> **Figure 2** </u>: <font color='purple'> **SGD vs Mini-Batch GD**<br> "+" denotes a minimum of the cost. Using mini-batches in your optimization algorithm often leads to faster optimization. </center></caption>
#
# <font color='blue'>
# **What you should remember**:
# - The difference between gradient descent, mini-batch gradient descent and stochastic gradient descent is the number of examples you use to perform one update step.
# - You have to tune a learning rate hyperparameter $\alpha$.
# - With a well-turned mini-batch size, usually it outperforms either gradient descent or stochastic gradient descent (particularly when the training set is large).
# ## 2 - Mini-Batch Gradient descent
#
# Let's learn how to build mini-batches from the training set (X, Y).
#
# There are two steps:
# - **Shuffle**: Create a shuffled version of the training set (X, Y) as shown below. Each column of X and Y represents a training example. Note that the random shuffling is done synchronously between X and Y. Such that after the shuffling the $i^{th}$ column of X is the example corresponding to the $i^{th}$ label in Y. The shuffling step ensures that examples will be split randomly into different mini-batches.
#
# <img src="images/kiank_shuffle.png" style="width:550px;height:300px;">
#
# - **Partition**: Partition the shuffled (X, Y) into mini-batches of size `mini_batch_size` (here 64). Note that the number of training examples is not always divisible by `mini_batch_size`. The last mini batch might be smaller, but you don't need to worry about this. When the final mini-batch is smaller than the full `mini_batch_size`, it will look like this:
#
# <img src="images/kiank_partition.png" style="width:550px;height:300px;">
#
# **Exercise**: Implement `random_mini_batches`. We coded the shuffling part for you. To help you with the partitioning step, we give you the following code that selects the indexes for the $1^{st}$ and $2^{nd}$ mini-batches:
# ```python
# first_mini_batch_X = shuffled_X[:, 0 : mini_batch_size]
# second_mini_batch_X = shuffled_X[:, mini_batch_size | |
#!/usr/bin/env python3
import functools
import re
import subprocess
import sys
import packaging.version
import pkg_resources
import requests
import urllib3
urllib3.disable_warnings()
verbose = len([arg for arg in sys.argv[1:] if arg == '-v'])
Packages = {
'advancecomp': {
'git': 'https://github.com/amadvance/advancecomp.git',
're': r'v([0-9]+\.[0-9]+(|\.[0-9]+))$'
},
'armadillo': {
'filelist': 'https://sourceforge.net/projects/arma/files/',
're': r'armadillo-([0-9]+\.[0-9]+(|\.[0-9]+)).tar.(gz|xz)/download$',
'session': False,
},
'blosc': {
'git': 'https://github.com/Blosc/c-blosc.git',
're': r'v([0-9]+\.[0-9]+(|\.[0-9]+))$'
},
'boost': {
'git': 'https://github.com/boostorg/boost.git',
're': r'boost-([0-9]+\.[0-9]+(|\.[0-9]+))$'
},
'cairo': {
'git': 'https://gitlab.freedesktop.org/cairo/cairo.git',
're': r'([0-9]+\.[0-9]+(|\.[0-9]+))$'
},
'charls': {
'gitsha': 'https://github.com/team-charls/charls.git',
'branch': '1.x-master'
},
'charls-release': {
'git': 'https://github.com/team-charls/charls.git',
're': r'([0-9]+\.[0-9]+(|\.[0-9]+))$'
},
'cmake': {
'git': 'https://github.com/Kitware/CMake.git',
're': r'v([0-9]+\.[0-9]+(|\.[0-9]+))$'
},
'curl': {
'git': 'https://github.com/curl/curl.git',
're': r'curl-([0-9]+_[0-9]+(|_[0-9]+))$'
},
'fitsio': {
'filelist': 'https://heasarc.gsfc.nasa.gov/FTP/software/fitsio/c/',
're': r'^cfitsio([0-9]+).tar.(gz|xz)$',
'session': False,
'insecure': True,
},
'fontconfig': {
'git': 'https://gitlab.freedesktop.org/fontconfig/fontconfig.git',
're': r'([0-9]+\.[0-9]+(|\.[0-9]+))$'
},
'fossil': {
'json': 'https://www.fossil-scm.org/index.html/juvlist',
'keys': lambda data: [entry['name'] for entry in data],
're': r'fossil-linux-x64-([0-9]+\.[0-9]+(|\.[0-9]+)).tar.(gz|xz)$'
},
'freetype': {
'filelist': 'https://download.savannah.gnu.org/releases/freetype',
're': r'freetype-([0-9]+\.[0-9]+(|\.[0-9]+)).tar.(gz|xz)$'
},
'freexl': {
'fossil': 'https://www.gaia-gis.it/fossil/freexl/timeline?n=10&r=trunk&ss=x',
# 'filelist': 'https://www.gaia-gis.it/fossil/freexl/',
# 're': r'freexl-([0-9]+\.[0-9]+(|\.[0-9]+)).tar.(gz|xz)$'
},
'fyba': {
'gitsha': 'https://github.com/kartverket/fyba.git',
},
'fyba-release': {
'git': 'https://github.com/kartverket/fyba.git',
're': r'([0-9]+\.[0-9]+(|\.[0-9]+))$'
},
'gdal': {
'gitsha': 'https://github.com/OSGeo/gdal.git',
},
'gdal-pypi': {
'pypi': 'GDAL',
},
'gdal-release': {
'git': 'https://github.com/OSGeo/gdal.git',
're': r'v([0-9]+\.[0-9]+(|\.[0-9]+))$'
},
'gdk-pixbuf': {
'json': 'https://download.gnome.org/sources/gdk-pixbuf/cache.json',
'keys': lambda data: list(data[1]['gdk-pixbuf']),
're': r'^([0-9]+\.[0-9]+(|\.[0-9]+)(|\.[0-9]+))$'
},
'geos': {
'git': 'https://github.com/libgeos/geos.git',
're': r'([0-9]+\.[0-9]+(|\.[0-9]+))$'
},
'gettext': {
'filelist': 'https://ftp.gnu.org/pub/gnu/gettext/',
're': r'gettext-([0-9]+\.[0-9]+(|\.[0-9]+)).tar.(gz|xz)$'
},
'giflib': {
'filelist': 'https://sourceforge.net/projects/giflib/files/',
're': r'giflib-([0-9]+\.[0-9]+(|\.[0-9]+)).tar.gz\/download'
},
'glib': {
'json': 'https://download.gnome.org/sources/glib/cache.json',
'keys': lambda data: list(data[1]['glib']),
're': r'^([0-9]+\.[0-9]+(|\.[0-9]+)(|\.[0-9]+))$'
},
'glymur': {
'git': 'https://github.com/quintusdias/glymur.git',
're': r'v([0-9]+\.[0-9]+(|\.[0-9]+)(|-[0-9]+))$'
},
'glymur-pypi': {
'pypi': 'glymur'
},
'gobject-introspection': {
'json': 'https://download.gnome.org/sources/gobject-introspection/cache.json',
'keys': lambda data: list(data[1]['gobject-introspection']),
're': r'^([0-9]+\.[0-9]+(|\.[0-9]+)(|\.[0-9]+))$'
},
'harfbuzz': {
'git': 'https://github.com/harfbuzz/harfbuzz.git',
're': r'([0-9]+\.[0-9]+(|\.[0-9]+))$'
},
'hdf4': {
'git': 'https://github.com/HDFGroup/hdf4.git',
're': r'hdf-([0-9]+_[0-9]+(|_[0-9]+))$',
},
'hdf5': {
'git': 'https://github.com/HDFGroup/hdf5.git',
're': r'hdf5-([0-9]+_[0-9]+(|_[0-9]+))$',
},
'icu4c': {
'git': 'https://github.com/unicode-org/icu.git',
're': r'release-([0-9]+-[0-9]+(|-[0-9]+))$',
},
'imagemagick': {
'git': 'https://github.com/ImageMagick/ImageMagick.git',
're': r'([0-9]+\.[0-9]+(|\.[0-9]+)(|-[0-9]+))$'
},
'jasper': {
'git': 'https://github.com/mdadams/jasper.git',
're': r'version-([0-9]+\.[0-9]+(|\.[0-9]+)(|-[0-9]+))$'
},
'javabridge': {
'git': 'https://github.com/CellProfiler/python-javabridge.git',
're': r'([0-9]+\.[0-9]+(|\.[0-9]+))$',
},
'javabridge-pypi': {
'pypi': 'python-javabridge',
},
'jbigkit': {
'filelist': 'https://www.cl.cam.ac.uk/~mgk25/jbigkit/download/',
're': r'jbigkit-([0-9]+\.[0-9]+(|\.[0-9]+)).tar.(gz|xz)$'
},
'jpeg-xl': {
'git': 'https://gitlab.com/wg1/jpeg-xl.git',
're': r'v([0-9]+\.[0-9]+(|\.[0-9]+))$'
},
'krb5': {
'filelist': 'https://kerberos.org/dist/',
're': r'krb5-([0-9]+\.[0-9]+(|\.[0-9]+)).tar.(gz|xz)$'
},
'lapack': {
'git': 'https://github.com/Reference-LAPACK/lapack.git',
're': r'v([0-9]+\.[0-9]+(|\.[0-9]+))$'
},
'lerc': {
'git': 'https://github.com/Esri/lerc.git',
're': r'v([0-9]+\.[0-9]+(|\.[0-9]+))$'
},
'libbrotli': {
'git': 'https://github.com/google/brotli.git',
're': r'v([0-9]+\.[0-9]+(|\.[0-9]+))$'
},
'libcroco': {
'json': 'https://download.gnome.org/sources/libcroco/cache.json',
'keys': lambda data: list(data[1]['libcroco']),
're': r'^([0-9]+\.[0-9]+(|\.[0-9]+)(|\.[0-9]+))$'
},
'libdap': {
'git': 'https://github.com/OPENDAP/libdap4.git',
're': r'version-([0-9]+\.[0-9]+(|\.[0-9]+))$',
},
'libde265': {
'git': 'https://github.com/strukturag/libde265.git',
're': r'v([0-9]+\.[0-9]+(|\.[0-9]+))$'
},
'libdeflate': {
'git': 'https://github.com/ebiggers/libdeflate.git',
're': r'v([0-9]+\.[0-9]+(|\.[0-9]+))$'
},
'libexpat': {
'git': 'https://github.com/libexpat/libexpat.git',
're': r'R_([0-9]+_[0-9]+(|_[0-9]+))$'
},
'libffi': {
'git': 'https://github.com/libffi/libffi.git',
're': r'v([0-9]+\.[0-9]+(|\.[0-9]+))$'
},
'libgeotiff': {
'git': 'https://github.com/OSGeo/libgeotiff.git',
're': r'([0-9]+\.[0-9]+(|\.[0-9]+))$'
},
'libgsf': {
'json': 'https://download.gnome.org/sources/libgsf/cache.json',
'keys': lambda data: list(data[1]['libgsf']),
're': r'^([0-9]+\.[0-9]+(|\.[0-9]+)(|\.[0-9]+))$'
},
'libgta': {
'git': 'https://github.com/marlam/gta-mirror.git',
're': r'libgta-([0-9]+\.[0-9]+(|\.[0-9]+))$'
},
'libheif': {
'git': 'https://github.com/strukturag/libheif.git',
're': r'v([0-9]+\.[0-9]+(|\.[0-9]+))$'
},
'libhwy': {
'git': 'https://github.com/google/highway.git',
're': r'([0-9]+\.[0-9]+(|\.[0-9]+))$'
},
'libiconv': {
'filelist': 'https://ftp.gnu.org/pub/gnu/libiconv/',
're': r'libiconv-([0-9]+\.[0-9]+(|\.[0-9]+)).tar.(gz|xz)$'
},
'libimagequant': {
'git': 'https://github.com/ImageOptim/libimagequant.git',
're': r'([0-9]+\.[0-9]+(|\.[0-9]+)(|\.[0-9]+))$'
},
'libjpeg-turbo': {
'git': 'https://github.com/libjpeg-turbo/libjpeg-turbo.git',
're': r'([0-9]+\.[0-9]+(|\.[0-9]+))$'
},
'libmemcached': {
'filelist': 'https://launchpad.net/libmemcached/+download',
're': r'libmemcached-([0-9]+\.[0-9]+(|\.[0-9]+)).tar.(gz|xz)$'
},
'libpng': {
'filelist': 'https://sourceforge.net/projects/libpng/files/libpng16/',
're': r'libpng16\/([0-9]+\.[0-9]+(|\.[0-9]+))\/$'
},
'librasterlite2': {
'fossil': 'https://www.gaia-gis.it/fossil/librasterlite2/timeline?n=10&r=trunk&ss=x',
# 'filelist': 'https://www.gaia-gis.it/fossil/librasterlite2/',
# 're': r'librasterlite2-([0-9]+\.[0-9]+(|\.[0-9]+)(|-beta[0-9]+)).tar.(gz|xz)$'
},
'librsvg': {
'json': 'https://download.gnome.org/sources/librsvg/cache.json',
'keys': lambda data: list(data[1]['librsvg']),
're': r'^([0-9]+\.[0-9]+(|\.[0-9]+)(|\.[0-9]+))$'
},
'libspatialite': {
'fossil': 'https://www.gaia-gis.it/fossil/libspatialite/timeline?n=10&r=trunk&ss=x',
# 'filelist': 'https://www.gaia-gis.it/fossil/libspatialite/',
# 're': r'libspatialite-([0-9]+\.[0-9]+(|\.[0-9]+)(|[a-z])).tar.(gz|xz)$'
},
'libssh2': {
'git': 'https://github.com/libssh2/libssh2.git',
're': r'libssh2-([0-9]+\.[0-9]+(|\.[0-9]+))$'
},
'libtiff': {
'filelist': 'https://download.osgeo.org/libtiff/',
're': r'tiff-([0-9]+\.[0-9]+(|\.[0-9]+)).tar.(gz|xz)$'
},
'libvips': {
'git': 'https://github.com/libvips/libvips.git',
're': r'v([0-9]+\.[0-9]+(|\.[0-9]+))$'
},
'libwebp': {
'git': 'https://github.com/webmproject/libwebp.git',
're': r'v([0-9]+\.[0-9]+(|\.[0-9]+))$'
},
'libxcrypt': {
'git': 'https://github.com/besser82/libxcrypt.git',
're': r'v([0-9]+\.[0-9]+(|\.[0-9]+))$'
},
'libxml2': {
'filelist': 'http://xmlsoft.org/sources/',
're': r'libxml2-([0-9]+\.[0-9]+(|\.[0-9]+)(|[a-z])).tar.(gz|xz)$'
},
'libzip': {
'git': 'https://github.com/nih-at/libzip.git',
're': r'v([0-9]+\.[0-9]+(|\.[0-9]+))$',
},
'lz4': {
'git': 'https://github.com/lz4/lz4.git',
're': r'v([0-9]+\.[0-9]+(|\.[0-9]+))$'
},
'm4': {
'filelist': 'https://ftp.gnu.org/gnu/m4/',
're': r'm4-([0-9]+\.[0-9]+(|\.[0-9]+)).tar.(gz|xz)$'
},
'manylinux2014': {
# See also https://github.com/pypa/manylinux
'json': 'https://quay.io/api/v1/repository/pypa/manylinux2014_x86_64?includeTags=true',
'keys': lambda data: [data['tags']['latest']['manifest_digest']],
're': r':([0-9a-fA-F]+)$'
},
'mapnik': {
'gitsha': 'https://github.com/mapnik/mapnik.git',
},
'mapnik-release': {
'git': 'https://github.com/mapnik/mapnik.git',
're': r'v([0-9]+\.[0-9]+(|\.[0-9]+))$'
},
'minizip': {
'git': 'https://github.com/nmoinvaz/minizip.git',
're': r'([0-9]+\.[0-9]+(|\.[0-9]+))$'
},
# MrSID's listing of versions is behind an agreement page, which prevents
# easily checking the version.
# 'mrsid': {
# 'filelist': 'https://www.extensis.com/support/developers',
# 're': r'MrSID_DSDK-([0-9]+\.[0-9]+(|\.[0-9]+(|\.[0-9]+)))-rhel6.x86-64.gcc531.tar.gz$'
# },
'mysql': {
'filelist': 'https://dev.mysql.com/downloads/mysql/?tpl=version&os=src',
're': r'mysql-boost-([0-9]+\.[0-9]+(|\.[0-9]+)).tar.(gz|xz)(&|$)'
},
'netcdf': {
'git': 'https://github.com/Unidata/netcdf-c.git',
're': r'v([0-9]+\.[0-9]+(|\.[0-9]+))$'
},
'nifti': {
'filelist': 'https://sourceforge.net/projects/niftilib/files/nifticlib/',
're': r'nifticlib_([0-9]+_[0-9]+(|_[0-9]+))\/$'
},
'ogdi': {
'git': 'https://github.com/libogdi/ogdi.git',
're': r'ogdi_([0-9]+_[0-9]+(|_[0-9]+))$'
},
'openblas': {
'git': 'https://github.com/xianyi/OpenBLAS.git',
're': r'v([0-9]+\.[0-9]+(|\.[0-9]+))$'
},
'openexr': {
'git': 'https://github.com/AcademySoftwareFoundation/openexr.git',
're': r'v([0-9]+\.[0-9]+(|\.[0-9]+))$'
},
'openjpeg': {
'git': 'https://github.com/uclouvain/openjpeg.git',
're': r'v([0-9]+\.[0-9]+(|\.[0-9]+))$'
},
'openldap': {
'git': 'https://git.openldap.org/openldap/openldap.git',
're': r'OPENLDAP_REL_ENG_([0-9]+_[0-9]+(|_[0-9]+))$'
},
'openmpi': {
'filelist': 'https://www.open-mpi.org/software/ompi/v4.1/',
're': r'openmpi-([0-9]+\.[0-9]+(|\.[0-9]+)).tar.(gz|xz)$'
},
'openslide': {
'gitsha': 'https://github.com/openslide/openslide.git',
},
'openslide-release': {
'git': 'https://github.com/openslide/openslide.git',
're': r'v([0-9]+\.[0-9]+(|\.[0-9]+))$'
},
'openslide-python': {
'gitsha': 'https://github.com/openslide/openslide-python.git',
},
'openslide-python-pypi': {
'pypi': 'openslide-python',
},
'openssl-1.x': {
'git': 'https://github.com/openssl/openssl.git',
're': r'OpenSSL_(1_[0-9]+_[0-9]+[a-z])$',
},
'orc': {
'git': 'https://github.com/GStreamer/orc.git',
're': r'([0-9]+\.[0-9]+(|\.[0-9]+))$'
},
'pango': {
'json': 'https://download.gnome.org/sources/pango/cache.json',
'keys': lambda data: list(data[1]['pango']),
're': r'^([0-9]+\.[0-9]+(|\.[0-9]+)(|\.[0-9]+))$'
},
'patchelf': {
'git': 'https://github.com/NixOS/patchelf.git',
're': r'([0-9]+\.[0-9]+(|\.[0-9]+))$',
},
'pcre': {
'filelist': 'https://ftp.pcre.org/pub/pcre/',
're': r'pcre-([0-9]+\.[0-9]+(|\.[0-9]+)).tar.(gz|xz)$'
},
'pixman': {
'git': 'https://gitlab.freedesktop.org/pixman/pixman.git',
're': r'pixman-([0-9]+\.[0-9]+(|\.[0-9]+))$'
},
'pkgconfig': {
'git': 'https://gitlab.freedesktop.org/pkg-config/pkg-config.git',
're': r'pkg-config-([0-9]+\.[0-9]+(|\.[0-9]+))$'
},
'pnetcdf': {
'git': 'https://github.com/Parallel-NetCDF/PnetCDF.git',
're': r'checkpoint\.([0-9]+\.[0-9]+(|\.[0-9]+))$'
},
'poppler': {
'git': 'https://gitlab.freedesktop.org/poppler/poppler.git',
're': r'poppler-([0-9]+\.[0-9]+(|\.[0-9]+))$'
},
'postgresql': {
'filelist': 'https://ftp.postgresql.org/pub/source/',
're': r'v([0-9]+\.[0-9]+(|\.[0-9]+))\/$'
},
'proj.4': {
'gitsha': 'https://github.com/OSGeo/proj.4.git',
},
'proj.4-release': {
'git': 'https://github.com/OSGeo/proj.4.git',
're': r'([0-9]+\.[0-9]+(|\.[0-9]+))$'
},
'proj-datumgrid': {
'filelist': 'http://download.osgeo.org/proj/',
're': r'proj-datumgrid-([0-9]+\.[0-9]+(|\.[0-9]+)).(tgz|zip)$'
},
'psutil': {
'git': 'https://github.com/giampaolo/psutil.git',
're': r'release-([0-9]+\.[0-9]+(|\.[0-9]+))$'
},
'pylibmc': {
'git': 'https://github.com/lericson/pylibmc.git',
're': r'([0-9]+\.[0-9]+(|\.[0-9]+))$'
},
'pylibtiff': {
'gitsha': 'https://github.com/pearu/pylibtiff.git',
},
'pylibtiff-pypi': {
'pypi': 'libtiff',
},
# 'pyproj4': {
# 'gitsha': 'https://github.com/pyproj4/pyproj.git',
# },
'pyproj4-release': {
'git': 'https://github.com/pyproj4/pyproj.git',
're': r'v([0-9]+\.[0-9]+(|\.[0-9]+))rel$'
},
'pyproj4-pypi': {
'pypi': 'pyproj',
},
'python-mapnik': {
'gitsha': 'https://github.com/mapnik/python-mapnik.git',
},
'python-mapnik-pypi': {
'pypi': 'mapnik',
},
'pyvips': {
'gitsha': 'https://github.com/libvips/pyvips.git',
},
'pyvips-pypi': {
'pypi': 'pyvips',
},
'sqlite': {
'text': 'https://www.sqlite.org/download.html',
'keys': lambda data: [re.search(r'sqlite-autoconf-([0-9]+).tar.(gz|xz)', data).group(1)]
},
'superlu': {
'git': 'https://github.com/xiaoyeli/superlu.git',
're': r'v([0-9]+\.[0-9]+(|\.[0-9]+))$'
},
'ultrajson': {
'gitsha': 'https://github.com/esnme/ultrajson.git',
},
'ultrajson-release': {
'git': 'https://github.com/esnme/ultrajson.git',
're': r'([0-9]+\.[0-9]+(|\.[0-9]+))$'
},
'ultrajson-pypi': {
'pypi': 'ujson',
},
'util-linux': {
'git': 'https://github.com/karelzak/util-linux.git',
're': r'v([0-9]+\.[0-9]+(|\.[0-9]+))$'
},
'xerces-c': {
'filelist': 'http://xerces.apache.org/xerces-c/download.cgi',
're': r'xerces-c-([0-9]+\.[0-9]+(|\.[0-9]+)).tar.(gz|xz)$'
},
# 'xz': {
# 'filelist': 'https://sourceforge.net/projects/lzmautils/files/',
# 're': r'xz-([0-9]+\.[0-9]+(|\.[0-9]+)).tar.gz\/download'
# },
'zlib': {
'filelist': 'https://zlib.net/',
're': r'zlib-([0-9]+\.[0-9]+(|\.[0-9]+)).tar.(gz|xz)$'
},
'zstd': {
'git': 'https://github.com/facebook/zstd.git',
're': r'v([0-9]+\.[0-9]+(|\.[0-9]+))$'
},
}
def compareVersions(a, b):
if packaging.version.parse(a) < packaging.version.parse(b):
return -1
if packaging.version.parse(a) > packaging.version.parse(b):
return 1
return 0
session = requests.Session()
retries = urllib3.util.retry.Retry(
total=10, backoff_factor=0.1, status_forcelist=[104, 500, 502, 503, 504])
session.mount('http://', requests.adapters.HTTPAdapter(max_retries=retries))
session.mount('https://', requests.adapters.HTTPAdapter(max_retries=retries))
for pkg in sorted(Packages): # noqa
try:
pkginfo = Packages[pkg]
entries = None
versions = None
if 'filelist' in pkginfo:
data = (session if pkginfo.get('session') is not False else requests).get(
pkginfo['filelist'],
**({'verify': False} if pkginfo.get('insecure') else {})).text
if verbose >= 2:
print(pkg, 'filelist data', data)
data = data.replace('<A ', '<a ').replace('HREF="', 'href="')
entries = [
entry.split('href="', 1)[-1].split('"')[0] for entry in data.split('<a ')[1:]]
if verbose >= 1:
print(pkg, 'filelist entries', entries)
elif 'git' in pkginfo:
cmd = ['git', 'ls-remote', '--refs', '--tags', pkginfo['git']]
entries = [entry for entry in
subprocess.check_output(cmd).decode('utf8').split('\n')
if '/' in entry]
if verbose >= 1:
print(pkg, 'git entries', entries)
elif 'gitsha' in pkginfo:
cmd = ['git', 'ls-remote', pkginfo['gitsha'], pkginfo.get('branch', 'HEAD')]
versions = [subprocess.check_output(cmd).decode('utf8').split()[0]]
if verbose >= 1:
print(pkg, 'gitsha versions', versions)
elif 'json' in pkginfo:
data = session.get(pkginfo['json']).json()
if verbose >= 2:
print(pkg, 'json data', data)
entries = pkginfo['keys'](data)
if verbose >= 1:
print(pkg, 'json entries', entries)
elif 'pypi' in pkginfo:
url = 'https://pypi.python.org/pypi/%s/json' % pkginfo['pypi']
releases = session.get(url).json()['releases']
if verbose >= 2:
print(pkg, 'pypi releases', entries)
versions = sorted(releases, key=pkg_resources.parse_version)
if verbose >= 1:
print(pkg, 'pypi versions', versions)
elif 'text' in pkginfo:
data = session.get(pkginfo['text']).content.decode('utf8')
if verbose >= 2:
print(pkg, 'text data', data)
entries = pkginfo['keys'](data)
if verbose >= 1:
print(pkg, 'text entries', entries)
elif 'fossil' in pkginfo:
data = session.get(pkginfo['fossil']).text
if verbose >= 2:
print(pkg, 'fossil data', data)
entries = [entry.split(']<')[0]
for entry in data.split('<span class="timelineHistDsp">[')[1:]]
if verbose >= 1:
print(pkg, 'fossil entries', entries)
if 're' in pkginfo:
entries = [entry for entry in entries if re.search(pkginfo['re'], entry)]
if verbose >= 2:
print(pkg, 're entries', entries)
versions = [re.search(pkginfo['re'], entry).group(1) for entry in entries]
if verbose >= 2:
print(pkg, 're versions', versions)
versions.sort(key=functools.cmp_to_key(compareVersions))
if 'subre' in pkginfo:
pversions = versions
for pos in range(-1, -len(pversions) - 1, -1):
data = session.get(pkginfo['filelist'] + pkginfo['sub'](pversions[pos])).text
if verbose >= 2:
print(pkg, 'subre data', data)
data = data.replace('<A ', '<a ').replace('HREF="', 'href="')
entries = [entry.split('href="', 1)[-1].split('"')[0]
for entry in data.split('<a ')[1:]]
if verbose >= 2:
print(pkg, 'subre entries', entries)
entries = | |
<filename>heat/optim/dp_optimizer.py
import torch
import torch.distributed
from torch.nn.parallel import DistributedDataParallel as tDDP
from ..core.communication import MPICommunication
from ..core.communication import MPI
from ..core.communication import MPI_WORLD
from .utils import DetectMetricPlateau
from typing import Union, List, Tuple, Dict
import time
import math
import queue
import threading
import gc
# gc.enable()
__all__ = ["DataParallelOptimizer", "SkipBatches"]
def print0(*args, **kwargs):
if MPI_WORLD.rank == 0:
print(*args, **kwargs)
def queue_thread(q: queue.Queue):
while True:
items = q.get()
if isinstance(items, tuple):
func = items[0]
args = items[1:]
func(*args)
else:
items()
q.task_done()
def __sum_f16_cb(buffer_a, buffer_b, _):
tens_a = torch.HalfTensor().set_(torch.HalfStorage.from_buffer(buffer_a, "native"))
tens_b = torch.HalfTensor().set_(torch.HalfStorage.from_buffer(buffer_b, "native"))
tens_b += tens_a
nelem = torch.prod(torch.tensor(tens_b.shape)).item()
new_buff = MPI.memory.fromaddress(tens_b.data_ptr(), nbytes=tens_b.element_size() * nelem)
buffer_b[:] = new_buff
def __sum_bfloat_cb(buffer_a, buffer_b, _):
tens_a = torch.BFloat16Tensor().set_(torch.BFloat16Storage.from_buffer(buffer_a, "native"))
tens_b = torch.BFloat16Tensor().set_(torch.BFloat16Storage.from_buffer(buffer_b, "native"))
tens_b += tens_a
nelem = int(tens_b.numel())
new_buff = MPI.memory.fromaddress(tens_b.data_ptr(), nbytes=nelem * tens_b.element_size())
buffer_b[:] = new_buff
# create new OP
mpi_sum_f16 = MPI.Op.Create(__sum_f16_cb, commute=True)
mpi_sum_bfloat = MPI.Op.Create(__sum_bfloat_cb, commute=True)
class DataParallelOptimizer:
"""
Uses a Torch.optim.Optimizer for data parallelism. It should be used in combination with DataParallel (DP) class.
To optimize a DP module, DP optimizer has to be passed to DP module during its initialization.
See :func:`..nn.DataParallel` for a basic example of usage.
Attributes
----------
torch_optimizer : torch.optim.Optimizer
the wrapped Torch optimizer
blocking : bool
use blocking communications or not. will typically be overwritten by heat.nn.DataParallel
"""
def __init__(self, torch_optimizer: torch.optim.Optimizer, blocking: bool = False):
self.torch_optimizer = torch_optimizer
if not isinstance(blocking, bool):
raise TypeError(f"blocking parameter must be a boolean, currently {type(blocking)}")
# flag indicating if communication during parameter updates is blocking.
self.blocking_parameter_updates = blocking
# flag indicating if optimizer should take a step during next iteration (only relevant for non-blocking)
self.update_next = False
# reference of optimizer's params
self.params_ref = torch_optimizer.param_groups[0]["params"]
def step(self) -> None:
"""
Force torch optimizer to update model parameters. For blocking, optimizer immediately updates parameters. For
non-blocking, optimizer will update parameters during next forward.
"""
if self.blocking_parameter_updates:
self.torch_optimizer.step()
else:
self.update_next = True
def zero_grad(self) -> None:
"""
Reset gradients of optimizer's params.
"""
# reset view onto params in order to reset all gradients
self.torch_optimizer.param_groups[0]["params"] = self.params_ref[:]
self.torch_optimizer.zero_grad()
class SkipBatches:
"""
Optimizer which skips batches
"""
def __init__(
self,
local_optimizer: torch.optim.Optimizer,
total_epochs: int,
comm: MPICommunication = MPI_WORLD,
warmup_epochs: int = 4,
finalize_epochs: int = 5,
scheduler: torch.optim.lr_scheduler = None,
stablitiy_level: float = 0.05, # originally (imagenet: 0.075)
max_global_skips: int = 8,
loc_gpus: int = None,
):
self.comm = comm
self.lcl_optimizer = local_optimizer
self.params_ref = local_optimizer.param_groups[0]["params"]
# reference of optimizer's params
self.scheduler = scheduler
rank = comm.rank
loc_gpus = torch.cuda.device_count() if loc_gpus is None else loc_gpus
self.loc_gpus = loc_gpus
local_rank = rank % loc_gpus
self.local_skip = 1
if loc_gpus > 1:
base_loc_ranks = list(range(0, comm.size, loc_gpus))
reduced_comms, reduced_ranks = [], []
for i in range(loc_gpus):
lp_ranks = [j + i for j in base_loc_ranks]
newGroup = MPI_WORLD.group.Incl(lp_ranks)
newComm = MPI_WORLD.Create_group(newGroup)
reduced_comms.append(MPICommunication(newComm, group=True))
reduced_ranks.append(tuple(lp_ranks))
self.reduced_comms, self.reduced_ranks = reduced_comms, reduced_ranks
self.base_loc_ranks = base_loc_ranks
if loc_gpus != torch.cuda.device_count():
self.device = "cuda:0"
else:
self.device = "cuda:" + str(local_rank)
torch.cuda.set_device(device=self.device)
self.current_batch, self.last_batch = 0, None
self._prev_params = []
self.epoch = 0
self._send_mod, self._send_mod_m1 = 0, None
self.global_skip = 0
self.local_skip = 0
self.batches_to_wait = 0
self.epochs_to_wait = 3
self.max_gs = max_global_skips
self.warmup_epochs = warmup_epochs
self.finalize_epochs = finalize_epochs
self.total_epochs = total_epochs
# used in the sending of the params
self._param_send_buffer_shape = None
self.param_dict, self.shapes = None, None
self._param_send_shp = None
self.split = None
self.stability = DetectMetricPlateau(
mode="min",
patience=2, # running : 3
threshold=stablitiy_level, # working well at -0.0125 and -0.025
threshold_mode="rel",
eps=1e-8,
)
self._gs8_waits = 3
self._gs8_waited = 0
self.split_val = 10_000_000 # 5?
# TODO: add these to the class params
self.split_inds = None
self.amp = False
def set_model(self, model):
self.module = model
def _stop_local_sync(self):
# stop local synchronizations for tDDP
if not isinstance(self.module, tDDP) or not self.module.require_backward_grad_sync:
# this has no effect if the module is not locally distributed in torch
return
self.module.require_backward_grad_sync = False
def _start_local_sync(self):
# *start* local synchronizations for tDDP
if not isinstance(self.module, tDDP) or self.module.require_backward_grad_sync:
# this has no effect if the module is not locally distributed in torch
return
self.module.require_backward_grad_sync = True
@torch.no_grad()
def epoch_loss_logic(self, loss, loss_globally_averaged=False):
if not loss_globally_averaged:
loss_send = torch.zeros(self.comm.size)
# loss.data -> this will get the raw number from the lass value and nothing else
loss_send[self.comm.rank] = loss.data if isinstance(loss, torch.Tensor) else loss
self.comm.Allreduce(MPI.IN_PLACE, loss_send, MPI.SUM)
avg_loss = torch.mean(loss_send)
else:
avg_loss = torch.tensor(loss)
if self.epoch < self.warmup_epochs:
self.global_skip = 0
self.local_skip = 0
self.batches_to_wait = 0
print0("\t\t", self.global_skip, self.local_skip, self.batches_to_wait)
return
elif self.warmup_epochs == self.epoch:
self.global_skip = 4
self.local_skip = 1
self.batches_to_wait = 1
# self.stability.reset()
print0("\t\t", self.global_skip, self.local_skip, self.batches_to_wait)
if self.epoch >= self.total_epochs - self.finalize_epochs:
self.global_skip = 0
self.local_skip = 0
self.batches_to_wait = 0
print0("\t\t", self.global_skip, self.local_skip, self.batches_to_wait)
return
if self.global_skip == self.max_gs and self.max_gs > 4:
self._gs8_waited += 1
print0(
"current best:",
self.stability.best * (1.0 - self.stability.threshold),
"avg loss",
avg_loss,
"bad epochs",
self.stability.num_bad_epochs,
)
stable = self.stability.test_if_improving(avg_loss)
if (stable and self.global_skip > 1) or (self._gs8_waited == self._gs8_waits):
# drop gs by factor of 2
self.global_skip //= 2
self.local_skip //= 2
self.batches_to_wait -= 1 # old was //= 2
# self.epochs_to_wait += 1
# self._prev_losses_mean = []
print0("dropping skips")
if self.global_skip > 0:
if self.batches_to_wait == 0:
self.batches_to_wait = 1
if self.local_skip == 0:
self.local_skip = 1
# if self._gs8_waited == self._gs8_waits:
# self.stability.reset()
self._gs8_waited = 0
elif self.global_skip == 1 and stable and (self._gs8_waited != self._gs8_waits):
self.global_skip = self.max_gs
self.local_skip = self.max_gs // 4
self.batches_to_wait = self.max_gs // 4 # + 1 # 2
self._gs8_waited = 0
# self._prev_losses_mean = []
# self.epochs_to_wait = 3
print0(
"\t\t",
self.global_skip,
self.local_skip,
self.batches_to_wait,
"\t",
avg_loss,
self.stability.num_bad_epochs,
)
def add_scaler(self, scaler):
self.scaler = scaler
self.amp = True
def step(self):
# TODO: raise error is last batch is not set
# collect the parameters from the current batch -> save + (non?)blocking send
# test for receive from last batch,
# if yes: receive, update parameters with rcved stuff
# copy and send the parameter dictionary
if self.amp:
self.scaler.step(self.lcl_optimizer)
# todo: add something to tell if the grads have infs or nans
# Updates the scale for next iteration.
self.scaler.update()
elif self.scheduler is None:
self.lcl_optimizer.step()
else:
self.scheduler.step()
# gc.collect()
# print("step start")
batch = self.current_batch
next_batch = batch + 1
gs = self.global_skip
ls = self.local_skip
gmod = batch % gs if gs > 0 else 0
lmod = batch % ls if ls > 0 else 0
batches_to_wait = self.batches_to_wait
btw = (
batches_to_wait
if batches_to_wait + batch <= self.last_batch
else self.last_batch - batch
)
# do full synce on global skips and on the last batch
if batch == self.last_batch or gmod == 0:
return self._full_global_sync(btw)
if next_batch % gs == 0:
self._start_local_sync()
self.current_batch += 1
return
if gmod < btw:
# do nothing on these batches
self.current_batch += 1
if next_batch == self.last_batch:
self._start_local_sync()
return
elif gmod == btw:
# local updates should be on before this is called!
self._update_parameters()
self._local_torch_param_update(self._send_mod_m1)
if ls > 1:
self._stop_local_sync()
if ls == 1 and next_batch != self.last_batch:
self.current_batch += 1
self._start_local_sync()
return
if lmod == 0:
self._stop_local_sync()
elif next_batch % ls == 0:
self._start_local_sync()
if next_batch == self.last_batch:
self._start_local_sync()
self.current_batch += 1
@torch.no_grad()
def _full_global_sync(self, batches_to_wait):
# print("top of full global sync")
current_comm = self.reduced_comms[self._send_mod]
current_ranks = self.reduced_ranks[self._send_mod]
if self.comm.rank in current_ranks:
self._global_send_update(current_comm, batches_to_wait)
if self.batches_to_wait != 0:
# update parameters from the last sending (if there)
self._update_parameters() # -> splits off irrelevant ranks
# needs to happen on all ranks:
self._local_torch_param_update(self._send_mod_m1)
if self.current_batch == self.last_batch or self.batches_to_wait == 0:
# todo: abstract last batch?
# receive the sent data to sync params across all ranks
if self.comm.rank in current_ranks:
self._update_last_batch(current_ranks)
else:
if len(self._prev_params) > 0:
raise ValueError(
f"DEBUG: OFF RANKS! len(prev_params) > 0! {len(self._prev_params)}"
f" batch number {self.current_batch}"
)
# self.comm.Barrier()
self._local_torch_param_update(self._send_mod)
self._send_mod_m1 = None
if self.current_batch == self.last_batch:
self._send_mod = 0
self.epoch += 1
self.current_batch = 0
else:
self.current_batch += 1
self._send_mod = self._send_mod + 1 if self._send_mod <= self.loc_gpus - 2 else 0
else:
self.current_batch += 1
self._send_mod_m1 = | |
#!/usr/bin/python
# Copyright 2008,2009 <NAME>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License version 2
# as published by the Free Software Foundation;
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
#
#
#
# Analyze Apache logfiles in order to count downloads
#
#
# This script parses a MirrorBrain-enhanced access_log and does the following:
# - select lines on that the log analysis is supposed to run
# (StatsLogMask directive, which defaults to a regexp suitable for a MirrorBrain logfile)
# The expression also selects data from the log line, for example the
# country where a client request originated from.
# - a little ring buffer filters requests recurring within a sliding time
# window (keyed by ip+url+referer+user-agent
# length of the sliding window: StatsDupWindow
# - arbitrary log lines can be ignored by regexp (StatsIgnoreMask)
# - IP addresses can be ignored by string prefix match (StatsIgnoreIP)
# - apply prefiltering to the request (regular expressions with substitution)
# with one or more StatsPrefilter directives
# - parse the remaining request url into the values to be logged
# (StatsCount directive)
# - apply optional post-filtering to the parsed data (StatsPostfilter)
#
#
# The script should serve as model implementation for the Apache module which
# does the same in realtime.
#
#
# Usage:
# ./dlcount.py /var/log/apache2/download.services.openoffice.org/2009/11/download.services.openoffice.org-20091123-access_log.bz2 | sort -u
#
# Uncompressed, gzip or bzip2 compressed files are transparently opened.
#
#
# This script uses Python generators, which means that it doesn't allocate
# memory according to the log size. It rather works like a Unix pipe.
# (The implementation of the generator pipeline is based on David Beazley's
# PyCon UK 08 great talk about generator tricks for systems programmers.)
#
__version__ = '0.91'
__author__ = '<NAME> <<EMAIL>>'
__copyright__ = 'Peter poeml <<EMAIL>>'
__license__ = 'GPLv2'
__url__ = 'http://mirrorbrain.org/'
try:
import sys
import os
import os.path
import re
import hashlib
import time
from datetime import datetime
from optparse import OptionParser
set
except NameError:
from sets import Set as set # Python 2.3 fallback
try:
sorted
except NameError:
def sorted(in_value): # Python 2.3 fallback
"A naive implementation of sorted"
out_value = list(in_value)
out_value.sort()
return out_value
def gen_open(filenames):
"""Open a sequence of filenames"""
import gzip
import bz2
for name in filenames:
if name.endswith(".gz"):
yield gzip.open(name)
elif name.endswith(".bz2"):
yield bz2.BZ2File(name)
else:
yield open(name)
def gen_cat(sources):
"""Concatenate items from one or more
source into a single sequence of items"""
for s in sources:
for item in s:
yield item
def gen_grep(pat, lines):
import re
patc = re.compile(pat)
for line in lines:
if patc.search(line):
yield line
def gen_fragments(lines, pat):
"""Generate a sequence of line fragments, according to
a given regular expression"""
for line in lines:
m = pat.match(line)
if m:
yield m.groups()
# else:
# print 'no match for:'
# print line
class RingBuffer:
"""Here is a simple circular buffer, or ring buffer, implementation in
Python. It is a first-in, first-out (FIFO) buffer with a fixed size.
Here is an example where the buffer size is 4. Ten integers, 0-9, are
inserted, one at a time, at the end of the buffer. Each iteration, the first
element is removed from the front of the buffer.
buf = RingBuffer(4)
for i in xrange(10):
buf.append(i)
print buf.get()
Here are the results:
[None, None, None, 0]
[None, None, 0, 1]
[None, 0, 1, 2]
[0, 1, 2, 3]
[1, 2, 3, 4]
[2, 3, 4, 5]
[3, 4, 5, 6]
[4, 5, 6, 7]
[5, 6, 7, 8]
[6, 7, 8, 9]
from http://www.saltycrane.com/blog/2007/11/python-circular-buffer/
"""
def __init__(self, size):
self.data = [None for i in xrange(size)]
def append(self, x):
self.data.pop(0)
self.data.append(x)
def get(self):
return self.data
def readconf(filename):
"""we'd need Apache's config parser here..."""
known_directives = ['StatsLogMask',
'StatsIgnoreMask',
'StatsIgnoreIP',
'StatsDupWindow',
'StatsPreFilter',
'StatsCount',
'StatsPostFilter']
known_directives_lower = [i.lower() for i in known_directives]
# regular expressions to parse arguments
parse_1_in_quotes = re.compile(r'"(.*)"')
parse_2_in_quotes = re.compile(r'"(.*)"\s+"(.*)"')
# create a dictionary to hold the config
# each item is a list (because the directives could occur more than once)
# each list item will correspond to one directive occurrence
conf = {}
for i in known_directives_lower:
conf[i] = list()
conf['statsdupwindow'] = 200
for line in open(filename):
# remove trailing and leading whitespace and newlines
line = line.strip()
# ignore comment lines
if line.startswith('#'):
continue
# and empty lines
if not line:
continue
# split line into 1st word plus rest
# will fail if it's not a valid config line
try:
word, val = line.split(None, 1)
except:
sys.exit('error: can\'t parse the line %r' % line)
if word.lower() not in known_directives_lower:
sys.exit('unknown config directive: %r' % word)
directive = word.lower()
val = val
# this is just a single integer
if directive in ['statsdupwindow']:
conf[directive] = int(val)
# directives with one argument: a regexp
elif directive in ['statslogmask', 'statsignoremask']:
m = parse_1_in_quotes.match(val)
regex = m.group(1).replace('\\"', '"')
regex_compiled = re.compile(regex)
conf[directive].append((regex_compiled, regex))
# these come with two args: a regexp and a substitution rule
elif directive in ['statsprefilter', 'statscount', 'statspostfilter']:
m = parse_2_in_quotes.match(val)
# print 'substitute %s by %s' % (m.group(1), m.group(2))
regex = m.group(1).replace('\\"', '"')
subst = m.group(2).replace('\\"', '"')
regex_compiled = re.compile(regex)
conf[directive].append((regex_compiled, subst, regex))
elif directive in ['statsignoreip']:
conf[directive].append(val)
else:
sys.exit('unparsed directive (implementation needed)', directive)
# set defaults for directives that didn't occur in the config
if not len(conf['statslogmask']):
regex = '^(\S+).+\[(.*?)\] "GET (\S*) HTTP.*" (200|302) [^"]+ "([^"]*)" "([^"]*)".* \w\w:(\w\w) ASN:'
regex_compiled = re.compile(regex)
conf['statslogmask'] = [(regex_compiled, regex)]
#import pprint
# pprint.pprint(conf)
# sys.exit(0)
return conf
# class Countable():
# """This holds a result from a parsed log line
# which consists of a date and 5 attributes"""
# #def __init__(self, date, a0, a1, a2, a3, a4):
# def __init__(self, (date, a0, a1, a2, a3, a4, a5)):
# self.date = date
# self.a0 = a0
# self.a1 = a1
# self.a2 = a2
# self.a3 = a3
# self.a4 = a4
# self.a5 = a5
class Req():
"""This helps us in housekeeping while parsing a log line"""
def __init__(self):
# url_raw contains the original url, if needed
self.url_raw = None
self.tstamp = None
self.tstamp_raw = None
self.date = None
self.status = None
self.referer = None
self.ua = None
self.country = None
# this is the processed URL, after running through all the regexps
self.url = None
self.countable = False
def __str__(self):
return '%-80s' % self.url
def as_tuple(self):
return self.tuple
# def as_obj(self):
# return Countable(self.tuple)
def gen_processreqs(reqs, conf, options):
"""process a tuple of request data, and return the parsed in the form of a generator"""
known = RingBuffer(conf['statsdupwindow'])
for req in reqs:
rq = Req()
if len(req) == 7:
(ip, tstamp_raw, url, status, referer, ua, country) = req
elif len(req) == 6:
(ip, tstamp_raw, url, status, referer, ua) = req
country = ''
skip = False
for r, mreg in conf['statsignoremask']:
if r.match(url):
# print 'ignoring req %s because it matches %s' %(url, mreg)
skip = True
break
if skip:
continue
for i in conf['statsignoreip']:
if ip.startswith(i):
# print 'ignoring ip %s because it matches %s' %(ip, i)
skip = True
break
if skip:
continue
# over a window of StatsDupWindow last requests, the same request must
# not have occured already. If it did, ignore it. If it didn't, put
# it into the ring buffer.
if conf['statsdupwindow'] > 0:
m = hashlib.md5()
m.update(ip)
m.update(url)
m.update(referer)
m.update(ua)
md = m.digest()
if md in known.data:
continue
known.append(md)
rq.url_raw = url
rq.status = status
rq.referer = referer
rq.ua = ua
rq.country = country.lower()
tstamp_raw = tstamp_raw.split()[0] # split off timezone offset - we ignore it
rq.tstamp = time.strptime(tstamp_raw, '%d/%b/%Y:%H:%M:%S')
rq.tstamp_raw = tstamp_raw
# apply the prefiltering rules
for r, s, mreg in conf['statsprefilter']:
url = r.sub(s, url)
matched = False
for r, s, mreg in conf['statscount']:
if r.match(url):
if matched:
# FIXME: eventually, we want to allow multiple matches. But | |
<reponame>crwsr124/GANsNRoses<gh_stars>0
import argparse
import math
import random
import os
from util import *
import numpy as np
import torch
torch.backends.cudnn.benchmark = True
from torch import nn, autograd
from torch import optim
from torch.nn import functional as F
from torch.utils import data
import torch.distributed as dist
from torchvision import transforms, utils
from tqdm import tqdm
from torch.optim import lr_scheduler
import copy
import kornia.augmentation as K
import kornia
import lpips
from model_cr import *
from dataset import ImageFolder
from distributed import (
get_rank,
synchronize,
reduce_loss_dict,
reduce_sum,
get_world_size,
)
mse_criterion = nn.MSELoss()
smooth_l1 = nn.SmoothL1Loss()
@torch.no_grad()
def getGaussianKernel(ksize, sigma=0):
if sigma <= 0:
# 根据 kernelsize 计算默认的 sigma,和 opencv 保持一致
sigma = 0.3 * ((ksize - 1) * 0.5 - 1) + 0.8
center = ksize // 2
xs = (np.arange(ksize, dtype=np.float32) - center) # 元素与矩阵中心的横向距离
kernel1d = np.exp(-(xs ** 2) / (2 * sigma ** 2)) # 计算一维卷积核
# 根据指数函数性质,利用矩阵乘法快速计算二维卷积核
kernel = kernel1d[..., None] @ kernel1d[None, ...]
kernel = torch.from_numpy(kernel)
kernel = kernel / kernel.sum() # 归一化
return kernel
def bilateralFilter(batch_img, ksize, sigmaColor=None, sigmaSpace=None):
device = batch_img.device
if sigmaSpace is None:
sigmaSpace = 0.15 * ksize + 0.35 # 0.3 * ((ksize - 1) * 0.5 - 1) + 0.8
if sigmaColor is None:
sigmaColor = sigmaSpace
pad = (ksize - 1) // 2
batch_img_pad = F.pad(batch_img, pad=[pad, pad, pad, pad], mode='reflect')
# batch_img 的维度为 BxcxHxW, 因此要沿着第 二、三维度 unfold
# patches.shape: B x C x H x W x ksize x ksize
patches = batch_img_pad.unfold(2, ksize, 1).unfold(3, ksize, 1)
patch_dim = patches.dim() # 6
# 求出像素亮度差
diff_color = patches - batch_img.unsqueeze(-1).unsqueeze(-1)
# 根据像素亮度差,计算权重矩阵
weights_color = torch.exp(-(diff_color ** 2) / (2 * sigmaColor ** 2))
# 归一化权重矩阵
weights_color = weights_color / weights_color.sum(dim=(-1, -2), keepdim=True)
# 获取 gaussian kernel 并将其复制成和 weight_color 形状相同的 tensor
weights_space = getGaussianKernel(ksize, sigmaSpace).to(device)
weights_space_dim = (patch_dim - 2) * (1,) + (ksize, ksize)
weights_space = weights_space.view(*weights_space_dim).expand_as(weights_color)
# 两个权重矩阵相乘得到总的权重矩阵
weights = weights_space * weights_color
# 总权重矩阵的归一化参数
weights_sum = weights.sum(dim=(-1, -2))
# 加权平均
weighted_pix = (weights * patches).sum(dim=(-1, -2)) / weights_sum
return weighted_pix
def test(args, genA2B, genB2A, testA_loader, testB_loader, name, step, A_bg, B_bg):
testA_loader = iter(testA_loader)
testB_loader = iter(testB_loader)
with torch.no_grad():
test_sample_num = 16
genA2B.eval(), genB2A.eval()
A2B = []
B2A = []
for i in range(test_sample_num):
real_A = testA_loader.next()
real_B = testB_loader.next()
real_A, real_B = real_A.cuda(), real_B.cuda()
A2B_content, A2B_style = genA2B.encode(real_A)
B2A_content, B2A_style = genB2A.encode(real_B)
if i % 2 == 0:
A2B_mod1 = torch.randn([1, args.latent_dim]).cuda()
B2A_mod1 = torch.randn([1, args.latent_dim]).cuda()
A2B_mod2 = torch.randn([1, args.latent_dim]).cuda()
B2A_mod2 = torch.randn([1, args.latent_dim]).cuda()
a_c, a_s = G_A2B.encode(real_A)
fake_A2A, alphaA2A = G_B2A.decode(a_c, a_s)
b_c, b_s = G_B2A.encode(real_B)
fake_B2B, alphaB2B = G_A2B.decode(b_c, b_s)
# fake_B2B, _, _ = genA2B(real_B)
# fake_A2A, _, _ = genB2A(real_A)
colsA = [real_A, fake_A2A]
alphaA2A = alphaA2A.repeat(1, 3, 1, 1)
colsA.append(alphaA2A)
colsB = [real_B, fake_B2B]
alphaB2B = alphaB2B.repeat(1, 3, 1, 1)
colsB.append(alphaB2B)
fake_A2B_1, alpha = genA2B.decode(A2B_content, A2B_mod1)
fake_B2A_1, alpha = genB2A.decode(B2A_content, B2A_mod1)
fake_A2B_2, alpha = genA2B.decode(A2B_content, A2B_mod2)
fake_B2A_2, alpha = genB2A.decode(B2A_content, B2A_mod2)
fake_A2B_3, alpha1 = genA2B.decode(A2B_content, B2A_style)
fake_B2A_3, alpha2 = genB2A.decode(B2A_content, A2B_style)
fake_A2B_3 = fake_A2B_3*alpha1 + (1-alpha1)*B_bg[0:1, :, :, :]
fake_B2A_3 = fake_B2A_3*alpha2 + (1-alpha2)*A_bg[0:1, :, :, :]
fake_A2B_2[:, 0:1, :, :] = alpha1
fake_A2B_2[:, 1:2, :, :] = alpha1
fake_A2B_2[:, 2:3, :, :] = alpha1
fake_B2A_2[:, 0:1, :, :] = alpha2
fake_B2A_2[:, 1:2, :, :] = alpha2
fake_B2A_2[:, 2:3, :, :] = alpha2
colsA += [fake_A2B_3, fake_A2B_1, fake_A2B_2]
colsB += [fake_B2A_3, fake_B2A_1, fake_B2A_2]
fake_A2B2A, _, _, alpha = genB2A(fake_A2B_3, A2B_style)
fake_B2A2B, _, _, alpha = genA2B(fake_B2A_3, B2A_style)
colsA.append(fake_A2B2A)
colsB.append(fake_B2A2B)
fake_A2B2A, _, _, alpha = genB2A(fake_A2B_1, A2B_style)
fake_B2A2B, _, _, alpha = genA2B(fake_B2A_1, B2A_style)
colsA.append(fake_A2B2A)
colsB.append(fake_B2A2B)
# fake_A2B2A, _, _, alpha = genB2A(fake_A2B_2, A2B_style)
# fake_B2A2B, _, _, alpha = genA2B(fake_B2A_2, B2A_style)
# colsA.append(fake_A2B2A)
# colsB.append(fake_B2A2B)
fake_A2B2A, _, _, alpha = genB2A(fake_A2B_1, B2A_mod1)
fake_B2A2B, _, _, alpha = genA2B(fake_B2A_1, A2B_mod1)
colsA.append(fake_A2B2A)
colsB.append(fake_B2A2B)
colsA = torch.cat(colsA, 2).detach().cpu()
colsB = torch.cat(colsB, 2).detach().cpu()
A2B.append(colsA)
B2A.append(colsB)
A2B = torch.cat(A2B, 0)
B2A = torch.cat(B2A, 0)
utils.save_image(A2B, f'{im_path}/{name}_A2B_{str(step).zfill(6)}.jpg', normalize=True, range=(-1, 1), nrow=16)
utils.save_image(B2A, f'{im_path}/{name}_B2A_{str(step).zfill(6)}.jpg', normalize=True, range=(-1, 1), nrow=16)
genA2B.train(), genB2A.train()
def train(args, trainA_loader, trainB_loader, testA_loader, testB_loader, G_A2B, G_B2A, D_A, D_B, G_optim, D_optim, device, trainA_bg_loader, trainB_bg_loader):
G_A2B.train(), G_B2A.train(), D_A.train(), D_B.train()
trainA_loader = sample_data(trainA_loader)
trainB_loader = sample_data(trainB_loader)
trainA_bg_loader = sample_data(trainA_bg_loader)
trainB_bg_loader = sample_data(trainB_bg_loader)
G_scheduler = lr_scheduler.StepLR(G_optim, step_size=100000, gamma=0.5)
D_scheduler = lr_scheduler.StepLR(D_optim, step_size=100000, gamma=0.5)
pbar = range(args.iter)
if get_rank() == 0:
pbar = tqdm(pbar, initial=args.start_iter, dynamic_ncols=True, smoothing=0.1)
loss_dict = {}
mean_path_length_A2B = 0
mean_path_length_B2A = 0
path_loss = torch.tensor(0.0, device=device)
path_lengths = torch.tensor(0.0, device=device)
mean_path_length = 0
if args.distributed:
G_A2B_module = G_A2B.module
G_B2A_module = G_B2A.module
D_A_module = D_A.module
D_B_module = D_B.module
D_L_module = D_L.module
else:
G_A2B_module = G_A2B
G_B2A_module = G_B2A
D_A_module = D_A
D_B_module = D_B
D_L_module = D_L
for idx in pbar:
i = idx + args.start_iter
if i > args.iter:
print('Done!')
break
#G_A2B.train(), G_A2B.encoder.eval(), G_B2A.eval(), D_A.train(), D_B.train()
#for p_i in G_A2B.encoder.parameters():
# p_i.requires_grad=False
#for p_i in G_B2A.parameters():
# p_i.requires_grad=False
ori_A = next(trainA_loader)
ori_B = next(trainB_loader)
A_bg = next(trainA_bg_loader)
B_bg = next(trainB_bg_loader)
if isinstance(ori_A, list):
ori_A = ori_A[0]
if isinstance(ori_B, list):
ori_B = ori_B[0]
if isinstance(A_bg, list):
A_bg = A_bg[0]
if isinstance(B_bg, list):
B_bg = B_bg[0]
ori_A = ori_A.to(device)
ori_B = ori_B.to(device)
A_bg = augA2(A_bg.to(device))
B_bg = augB2(B_bg.to(device))
#aug_A = augA2(ori_A)
# aug_A_smooth = bilateralFilter(aug_A, 15, 0.15, 5).detach()
#aug_B = augB2(ori_B)
aug_A = augA(ori_A)
aug_B = augB(ori_B)
# aug_A = DiffAugment(ori_A, policy='color,translation,cutout')
# aug_B = DiffAugment(ori_B, policy='color,translation,cutout')
# A = augA(ori_A[[np.random.randint(args.batch)]].expand_as(ori_A))
# B = augB(ori_B[[np.random.randint(args.batch)]].expand_as(ori_B))
# A = augA(ori_A)
# B = augB(ori_B)
# A = ori_A
# B = ori_B
batch_id = np.random.randint(args.batch)
single_A_batch = ori_A[[batch_id]].expand_as(ori_A)
single_B_batch = ori_B[[batch_id]].expand_as(ori_B)
# single_A_batch = ori_A[[batch_id]].expand(ori_A.shape[0]+1, ori_A.shape[1], ori_A.shape[2], ori_A.shape[3])
# single_B_batch = ori_B[[batch_id]].expand(ori_B.shape[0]+1, ori_B.shape[1], ori_B.shape[2], ori_B.shape[3])
#A = augA3(single_A_batch)
A = augA(single_A_batch)
A[1] = torch.flip(A[0],[2])
# B = augB3(single_B_batch)
B = augB(single_B_batch)
B[1] = torch.flip(B[0],[2])
# A = augA2(ori_A)
# B = augB2(ori_B)
if i % args.d_reg_every == 0:
aug_A.requires_grad = True
aug_B.requires_grad = True
# if i % args.d_reg_every == 0:
# A.requires_grad = True
# B.requires_grad = True
A2B_content, A2B_style = G_A2B.encode(A)
B2A_content, B2A_style = G_B2A.encode(B)
A_aug_style = G_A2B.style_encode(augA(single_A_batch))
B_aug_style = G_B2A.style_encode(augB(single_B_batch))
# get new style
aug_A2B_style = G_B2A.style_encode(aug_B)
aug_B2A_style = G_A2B.style_encode(aug_A)
rand_A2B_style = torch.randn([args.batch, args.latent_dim]).to(device).requires_grad_()
rand_B2A_style = torch.randn([args.batch, args.latent_dim]).to(device).requires_grad_()
#print(rand_A2B_style.shape)
# styles
idx = torch.randperm(2*args.batch)
#print(idx)
#print(rand_A2B_style)
#print(aug_A2B_style)
input_A2B_style = torch.cat([rand_A2B_style, aug_A2B_style], 0)[idx][:args.batch]
#print(A2B_style.shape)
#print(input_A2B_style)
idx = torch.randperm(2*args.batch)
input_B2A_style = torch.cat([rand_B2A_style, aug_B2A_style], 0)[idx][:args.batch]
fake_A2B, fake_A2B_alpha = G_A2B.decode(A2B_content, input_A2B_style)
fake_B2A, fake_B2A_alpha = G_B2A.decode(B2A_content, input_B2A_style)
b_c, b_s = G_B2A.encode(B_bg)
B_bg, _ = G_A2B.decode(b_c, input_A2B_style)
a_c, a_s = G_A2B.encode(A_bg)
A_bg, _ = G_B2A.decode(a_c, input_B2A_style)
B_bg = B_bg.detach()
A_bg = A_bg.detach()
if i % 2 == 0:
A_bg[1] = torch.flip(A_bg[0],[2])
B_bg[1] = torch.flip(B_bg[0],[2])
fake_A2B_detach = fake_A2B.detach()
fake_B2A_detach = fake_B2A.detach()
fake_A2B = fake_A2B_detach*fake_A2B_alpha + (1.0-fake_A2B_alpha)*(B_bg)
fake_B2A = fake_B2A_detach*fake_B2A_alpha + (1.0-fake_B2A_alpha)*(A_bg)
# train disc
# aug_A_smooth = bilateralFilter(aug_A, 15, 0.15, 5)
real_A_logit = D_A(aug_A)
real_B_logit = D_B(aug_B)
# A_smooth = bilateralFilter(A, 15, 0.15, 5)
# real_A_logit = D_A(A_smooth)
# real_B_logit = D_B(B)
real_L_logit1 = D_L(rand_A2B_style)
real_L_logit2 = D_L(rand_B2A_style)
fake_B_logit = D_B(fake_A2B.detach())
fake_A_logit = D_A(fake_B2A.detach())
# fake_B_logit = D_B(DiffAugment(fake_A2B.detach(), policy='color,translation,cutout'))
# fake_A_logit = D_A(DiffAugment(fake_B2A.detach(), policy='color,translation,cutout'))
fake_L_logit1 = D_L(aug_A2B_style.detach())
fake_L_logit2 = D_L(aug_B2A_style.detach())
# global loss
D_loss = d_logistic_loss(real_A_logit, fake_A_logit) +\
d_logistic_loss(real_B_logit, fake_B_logit) +\
d_logistic_loss(real_L_logit1, fake_L_logit1) +\
d_logistic_loss(real_L_logit2, fake_L_logit2)
loss_dict['D_adv'] = D_loss
if i % args.d_reg_every == 0:
# r1_A_loss = d_r1_loss(real_A_logit, A)
# r1_B_loss = d_r1_loss(real_B_logit, B)
r1_A_loss = d_r1_loss(real_A_logit, aug_A)
r1_B_loss = d_r1_loss(real_B_logit, aug_B)
r1_L_loss = d_r1_loss(real_L_logit1, rand_A2B_style) + d_r1_loss(real_L_logit2, rand_B2A_style)
r1_loss = r1_A_loss + r1_B_loss + r1_L_loss
D_r1_loss = (args.r1 / 2 * r1_loss * args.d_reg_every)
D_loss += D_r1_loss
D_optim.zero_grad()
D_loss.backward()
D_optim.step()
#Generator
# adv loss
fake_B_logit = D_B(fake_A2B)
fake_A_logit = D_A(fake_B2A)
# fake_B_logit = D_B(DiffAugment(fake_A2B, policy='color,translation,cutout'))
# fake_A_logit = D_A(DiffAugment(fake_B2A, policy='color,translation,cutout'))
fake_L_logit1 = D_L(aug_A2B_style)
fake_L_logit2 = D_L(aug_B2A_style)
lambda_adv = (1, 1, 1)
G_adv_loss = 1 * (g_nonsaturating_loss(fake_A_logit, lambda_adv) +\
g_nonsaturating_loss(fake_B_logit, lambda_adv) +\
2*g_nonsaturating_loss(fake_L_logit1, (1,)) +\
2*g_nonsaturating_loss(fake_L_logit2, (1,)))
# style consis loss
G_con_loss = 50 * (A2B_style.var(0, unbiased=False).sum() + B2A_style.var(0, unbiased=False).sum())
# G_con_loss = 50 * (cosine_distance(A2B_style).sum() + cosine_distance(B2A_style).sum())
# cycle recon
A2B2A_content, A2B2A_style = G_B2A.encode(fake_A2B)
#print(A2B2A_content.shape)
B2A2B_content, B2A2B_style = G_A2B.encode(fake_B2A)
# fake_A2B2A = G_B2A.decode(A2B2A_content, shuffle_batch(A2B_style))
# fake_B2A2B = G_A2B.decode(B2A2B_content, shuffle_batch(B2A_style))
fake_A2B2A, fake_A2B2A_alpha = G_B2A.decode(A2B2A_content, shuffle_batch(A_aug_style))
fake_B2A2B, | |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import getpass
import argparse
import readline
import os
import cmd
from prettytable import PrettyTable
from cm_api.api_client import ApiResource, ApiException
from urllib2 import URLError
# Config
CONFIG = {'cluster': None, 'output_type': 'table', 'seperator': None}
# Initial Prompt
INIT_PROMPT = "cloudera> "
# Banner shown at interactive shell login
BANNER = "Welcome to the Cloudera Manager Console\nSelect a cluster using 'show clusters' and 'use'"
# If true, than the user is running a non-interactive shell (ie: scripting)
EXECUTE = False
# Readline fix for hyphens
readline.set_completer_delims(readline.get_completer_delims().replace('-', ''))
# Global API object
api = None
class ClouderaShell(cmd.Cmd):
"""
Interactive shell for communicating with your
Cloudera Cluster making use of the cm_api
"""
# Set initial cloudera prompt
prompt = INIT_PROMPT
# Set login banner
intro = BANNER
# Help headers
doc_header = "Cloudera Manager Commands"
undoc_header = "Other Commands"
# Initial cache is blank
# when autocomplete for one of these components
# is triggered, it will automatically cache them
CACHED_ROLES = {}
CACHED_SERVICES = None
CACHED_CLUSTERS = None
def preloop(self):
"Checks if the cluster was pre-defined"
if CONFIG['cluster']:
self.set_cluster(CONFIG['cluster'])
else:
self.cluster_object = None
def generate_output(self, headers, rows, align=None):
if CONFIG['output_type'] == "table":
table = PrettyTable(headers)
if align:
for h in align:
table.align[h] = 'l'
for r in rows:
table.add_row(r)
print(table)
if CONFIG['output_type'] == "csv":
print(','.join(headers))
for r in rows:
print(','.join(r))
if CONFIG['output_type'] == "custom":
SEP = CONFIG['seperator']
print(SEP.join(headers))
for r in rows:
print(SEP.join(r))
def emptyline(self):
"""Called each time a user hits enter, by
default it will redo the last command, this
is an extension so it does nothing."""
pass
def set_cluster(self, cluster):
try:
cluster = api.get_cluster(cluster)
except ApiException:
print("Cluster Not Found!")
return None
self.cluster_object = cluster
if not EXECUTE:
print("Connected to %s" % (cluster.name))
self.prompt = cluster.name + "> "
return True
@property
def cluster(self):
if EXECUTE:
if not self.set_cluster(CONFIG['cluster']):
sys.exit(1)
return self.cluster_object.name
if self.cluster_object:
return self.cluster_object.name
else:
return None
def has_cluster(self):
if not self.cluster:
print("Error: No cluster currently selected")
return None
else:
return True
def get_log(self, role, log_type=None):
if not role:
return None
if not self.has_cluster():
return None
if '-' not in role:
print("Please enter a valid role name")
return None
try:
service = api.get_cluster(self.cluster).get_service(role.split('-')[0])
role = service.get_role(role)
try:
if EXECUTE:
output = sys.stdout
else:
output = os.popen("less", "w")
if log_type == "full":
output.write(role.get_full_log())
if log_type == "stdout":
output.write(role.get_stdout())
if log_type == "stderr":
output.write(role.get_stderr())
if not EXECUTE:
output.close()
except IOError:
pass
except ApiException:
print("Error: Role or Service Not Found")
def do_status(self, service):
"""
List all services on the cluster
Usage:
> status
"""
if service:
self.do_show("services", single=service)
else:
self.do_show("services")
def do_log(self, role):
"""
Download log file for role
Usage:
> log <role> Download log
"""
self.get_log(role, log_type="full")
def do_stdout(self, role):
"""
Download stdout file for role
Usage:
> stdout <role> Download stdout
"""
self.get_log(role, log_type="stdout")
def do_stderr(self, role):
"""
Download stderr file for role
Usage:
> stderr <role> Download stderr
"""
self.get_log(role, log_type="stderr")
def do_show(self, option, single=None):
"""
General System Information
Usage:
> show clusters list of clusters this CM manages
> show hosts list of all hosts CM manages
> show services list of all services on this cluster
including their health.
"""
headers = []
rows = []
align = None
# show clusters
if option == "clusters":
"Display list of clusters on system"
headers = ["CLUSTER NAME"]
clusters = api.get_all_clusters()
for cluster in clusters:
rows.append([cluster.name])
# show hosts
if option == "hosts":
"Display a list of hosts avaiable on the system"
headers = ["HOSTNAME", "IP ADDRESS", "RACK"]
align = ["HOSTNAME", "IP ADDRESS", "RACK"]
for host in api.get_all_hosts():
rows.append([host.hostname, host.ipAddress, host.rackId])
# show services
if option == "services":
"Show list of services on the cluster"
headers = ["NAME", "SERVICE", "STATUS", "HEALTH", "CONFIG"]
align = ["NAME", "SERVICE"]
# Check if the user has selected a cluster
if not self.has_cluster():
print("Error: Please select a cluster first")
return None
if not single:
for s in api.get_cluster(self.cluster).get_all_services():
if s.configStale:
config = "STALE"
else:
config = "UP TO DATE"
rows.append([s.name, s.type, s.serviceState, s.healthSummary, config])
else:
s = api.get_cluster(self.cluster).get_service(single)
if s.configStale:
config = "STALE"
else:
config = "UP TO DATE"
rows.append([s.name, s.type, s.serviceState, s.healthSummary, config])
self.generate_output(headers, rows, align=align)
def complete_log(self, text, line, start_index, end_index):
return self.roles_autocomplete(text, line, start_index, end_index)
def complete_stdout(self, text, line, start_index, end_index):
return self.roles_autocomplete(text, line, start_index, end_index)
def complete_stderr(self, text, line, start_index, end_index):
return self.roles_autocomplete(text, line, start_index, end_index)
def complete_show(self, text, line, start_index, end_index):
show_commands = ["clusters", "hosts", "services"]
if text:
return [c for c in show_commands if c.startswith(text)]
else:
return show_commands
def service_action(self, service, action):
"Perform given action on service for the selected cluster"
try:
service = api.get_cluster(self.cluster).get_service(service)
except ApiException:
print("Service not found")
return None
if action == "start":
service.start()
if action == "restart":
service.restart()
if action == "stop":
service.stop()
return True
def services_autocomplete(self, text, line, start_index, end_index, append=[]):
if not self.cluster:
return None
else:
if not self.CACHED_SERVICES:
services = [s.name for s in api.get_cluster(self.cluster).get_all_services()]
self.CACHED_SERVICES = services
if text:
return [s for s in self.CACHED_SERVICES + append if s.startswith(text)]
else:
return self.CACHED_SERVICES + append
def do_start_service(self, service):
"""
Start a service
Usage:
> start_service <service>
"""
if not self.has_cluster():
return None
if self.service_action(service=service, action="start"):
print("%s is being started" % (service))
else:
print("Error starting service")
return None
def complete_start_service(self, text, line, start_index, end_index):
return self.services_autocomplete(text, line, start_index, end_index)
def do_restart_service(self, service):
"""
Restart a service
Usage:
> restart_service <service>
"""
if not self.has_cluster():
return None
if self.service_action(service=service, action="restart"):
print("%s is being restarted" % (service))
else:
print("Error restarting service")
return None
def complete_restart_service(self, text, line, start_index, end_index):
return self.services_autocomplete(text, line, start_index, end_index)
def do_stop_service(self, service):
"""
Stop a service
Usage:
> stop_service <service>
"""
if not self.has_cluster():
return None
if self.service_action(service=service, action="stop"):
print("%s is being stopped" % (service))
else:
print("Error stopping service")
return None
def complete_stop_service(self, text, line, start_index, end_index):
return self.services_autocomplete(text, line, start_index, end_index)
def do_use(self, cluster):
"""
Connect to Cluster
Usage:
> use <cluster>
"""
if not self.set_cluster(cluster):
print("Error setting cluster")
def cluster_autocomplete(self, text, line, start_index, end_index):
"autocomplete for the use command, obtain list of clusters first"
if not self.CACHED_CLUSTERS:
clusters = [cluster.name for cluster in api.get_all_clusters()]
self.CACHED_CLUSTERS = clusters
if text:
return [cluster for cluster in self.CACHED_CLUSTERS if cluster.startswith(text)]
else:
return self.CACHED_CLUSTERS
def complete_use(self, text, line, start_index, end_index):
return self.cluster_autocomplete(text, line, start_index, end_index)
def do_roles(self, service):
"""
Role information
Usage:
> roles <servicename> Display role information for service
> roles all Display all role information for cluster
"""
if not self.has_cluster():
return None
if not service:
return None
if service == "all":
if not self.CACHED_SERVICES:
self.services_autocomplete('', service, 0, 0)
for s in self.CACHED_SERVICES:
print("= " + s.upper() + " =")
self.do_roles(s)
return None
try:
service = api.get_cluster(self.cluster).get_service(service)
headers = ["ROLE TYPE", "HOST", "ROLE NAME", "STATE", "HEALTH", "CONFIG"]
align = ["ROLE TYPE", "ROLE NAME", "HOST"]
rows = []
for roletype in service.get_role_types():
for role in service.get_roles_by_type(roletype):
if role.configStale:
config = "STALE"
else:
config = "UP TO DATE"
rows.append([role.type, role.hostRef.hostId, role.name, role.roleState, role.healthSummary, config])
self.generate_output(headers, rows, align=align)
except ApiException:
print("Service not found")
def complete_roles(self, text, line, start_index, end_index):
return self.services_autocomplete(text, line, start_index, end_index, append=["all"])
def roles_autocomplete(self, text, line, start_index, end_index):
"Return full list of roles"
if '-' not in line:
# Append a dash to each service, makes for faster autocompletion of
# roles
return [s + '-' for s in self.services_autocomplete(text, line, start_index, end_index)]
else:
key, role = line.split()[1].split('-', | |
Works for both packed and unpacked inputs.
atten_idx = tf.reshape(tf.transpose(atten_idx), [-1])
input_embs += self.task_emb.EmbLookup(theta.task_emb, targets.task_ids)
if p.model_dim != self._token_emb_dim:
input_embs = self.emb_proj.FProp(theta.emb_proj, input_embs)
input_embs = tf.transpose(input_embs, [1, 0, 2])
input_embs = self.input_dropout.FProp(theta.input_dropout, input_embs)
if not p.packed_input:
src_enc_len = tf.reduce_sum(1 - source_paddings, axis=0)
num_hyps_per_beam = tf.div(
py_utils.GetShape(target_paddings)[1],
py_utils.GetShape(source_paddings)[1])
src_enc_len = self._ExpandToNumHyps(src_enc_len, num_hyps_per_beam)
layer_in = input_embs
per_layer_attn_probs = []
for i, (layer, layer_theta) in enumerate(zip(self.trans, theta.trans)):
# [time, batch, model_dim]
layer_out, probs = layer.FProp(
layer_theta,
layer_in,
target_paddings,
source_encs[i],
source_paddings,
source_segment_id=target_segment_id,
aux_segment_id=src_segment_id,
atten_idx=atten_idx)
layer_in = layer_out
pl_probs = tf.transpose(probs, [1, 0, 2])
if p.packed_input:
# For packed inputs we are currently not removing the EOS token.
per_layer_attn_probs.append(pl_probs)
else:
# Remove attention weight on last (EOS) token and re-normalize
# so that last dimension sums to 1. See b/129097156.
# Original probs shape: [trg time, batch, src time]
norma_atten_probs_3d = self._RemoveEOSProbs(p, pl_probs, src_enc_len)
per_layer_attn_probs.append(norma_atten_probs_3d)
# per_layer_attn_probs shape: [batch, trg time, src time]
self._AddAttenProbsSummary(source_paddings, targets, per_layer_attn_probs)
# Aggregate per-layer attention probs.
aggregated_atten_probs = (
tf.math.add_n(per_layer_attn_probs) / len(per_layer_attn_probs))
attention_map = py_utils.NestedMap(probs=aggregated_atten_probs)
return py_utils.NestedMap(
softmax_input=layer_out, attention=attention_map)
def AddExtraDecodingInfo(self, encoder_outputs, targets):
"""Adds extra decoding information to encoded_outputs.
Args:
encoder_outputs: a NestedMap computed by encoder.
targets: a NestedMap containing target input fields.
Returns:
encoder_ouputs with extra information used for decoding.
"""
p = self.params
if p.task_emb:
encoder_outputs['target_task_ids'] = targets.task_ids[:, 0]
if p.init_step_ids:
encoder_outputs['init_step_ids'] = targets.ids[:, 0]
return encoder_outputs
def ExtendStep(self, theta, encoder_outputs, new_ids, t, prefix_states):
"""Extend prefix as represented by `prefix_states` by one more step.
This function is expected to be called during fast decoding of Transformer
models.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
encoder_outputs: a NestedMap computed by encoder, containing:
- encoded: source encoding, of shape [time, batch, depth]. Can be [time,
bs, depth, num_trans_layers] if is_transparent is set.
- padding: source encoding's padding, of shape [time, batch].
new_ids: new input ids, of shape [batch].
t: a scalar, the current time step, 0-based.
prefix_states: a `.NestedMap` representing the prefix that has already
been decoded.
Returns:
A tuple (last_decoder_out, prefix_states, atten_probs), where
last_decoder_out is the output of the last decoder layer of
shape [batch, model_dim], `prefix_states` is the update prefix states,
and atten_probs contains attention in shape [batch, src_len] for the
given target position.
"""
p = self.params
source_paddings = encoder_outputs.padding
time, batch = py_utils.GetShape(source_paddings, 2)
if p.is_transparent:
source_encs = py_utils.HasShape(
encoder_outputs.encoded,
[time, batch, p.source_dim, p.num_trans_layers])
source_encs = tf.unstack(source_encs, axis=3)
else:
source_encs = py_utils.HasShape(encoder_outputs.encoded,
[time, batch, p.source_dim])
source_encs = [source_encs] * p.num_trans_layers
with tf.name_scope(p.name):
# Embedding layer
# [batch, time, model_dim]
if not self._share_sm_emb:
token_embs = self.token_emb.EmbLookup(theta.token_emb, new_ids)
else:
token_embs = self.softmax.EmbLookup(theta.softmax, new_ids)
# [time, model_dim]
posit_embs = tf.slice(
self.position_emb.FProp(theta.position_emb, p.target_seq_len), [t, 0],
[1, p.model_dim])
input_embs = token_embs + posit_embs
# Infer num_hyps_per_beam: new_ids has orig_batch_size * num_hyps_per_beam
# source_paddings has orig_batch_size.
num_hyps_per_beam = tf.div(
py_utils.GetShape(new_ids)[0],
py_utils.GetShape(source_paddings)[1])
atten_idx = None
if p.task_emb:
task_ids = self._ExpandToNumHyps(encoder_outputs.target_task_ids,
num_hyps_per_beam)
if p.use_lang_dependent_atten:
atten_idx = task_ids
input_embs += self.task_emb.EmbLookup(theta.task_emb, task_ids)
if p.model_dim != self._token_emb_dim:
input_embs = self.emb_proj.FProp(theta.emb_proj, input_embs)
input_embs = self.input_dropout.FProp(theta.input_dropout, input_embs)
# Make a copy of the input.
out_prefix_states = prefix_states.Pack(prefix_states.Flatten())
layer_in = input_embs
# Infer true source encoder length from the padding.
src_enc_len = tf.reduce_sum(1 - source_paddings, axis=0)
# Need to expand src_enc_len to reflect multiple hypotheses.
src_enc_len = self._ExpandToNumHyps(src_enc_len, num_hyps_per_beam)
atten_probs = []
for i, (layer, layer_theta) in enumerate(zip(self.trans, theta.trans)):
# [time, batch, model_dim]
layer_prefix_states = prefix_states['layer_%i' % i]
layer_out, probs, updated_prefix_states = layer.ExtendStep(
layer_theta,
layer_in,
layer_prefix_states,
source_encs[i],
source_paddings,
t if p.beam_search.name == 'tpu_beam_search' else None,
atten_idx=atten_idx)
out_prefix_states['layer_%i' % i] = updated_prefix_states
layer_in = layer_out
# Enforce shape: [batch, src_len]
probs = tf.squeeze(probs)
# Remove attention weight on last (EOS) token and re-normalize
# so that last dimension sums to 1. See b/129097156.
probs_3d = tf.expand_dims(probs, axis=1)
probs_3d = self._RemoveEOSProbs(p, probs_3d, src_enc_len)
probs = tf.squeeze(probs_3d, axis=1)
atten_probs.append(probs)
# Aggregate per-layer attention probs.
aggregated_atten_probs = tf.math.add_n(atten_probs) / len(atten_probs)
return layer_out, out_prefix_states, aggregated_atten_probs
def ComputePredictions(self, theta, encoder_outputs, targets):
"""Decodes `targets` given encoded source.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
encoder_outputs: a NestedMap computed by encoder. Expected to contain:
encoded - source encoding, of shape [time, batch, depth]. Can be [time,
batch, depth, num_layers] if is_transparent is set.
padding - source encoding's padding, of shape [time, batch].
segment_id - source segment id, of shape [time, batch].
targets: A dict of string to tensors representing the targets one try to
predict. Each tensor in targets is of shape [batch, time].
Returns:
A `.NestedMap` containing output of last decoder layer and attention probs
- softmax_input: Tensor of shape [time, batch, params.softmax.input_dim].
- attention: `.NestedMap` of attention distributions of shape
[batch, time, source_len].
"""
return self._FProp(theta, encoder_outputs, targets)
def SampleSequenceDecode(self, encoder_outputs):
"""Decode via sampling from softmax at each step.
Args:
encoder_outputs: the outputs of the encoder.
Returns:
BeamSearchDecodeOutput, same as what BeamSearchDecode returns.
"""
p = self.params
non_tpu = p.beam_search.name != 'tpu_beam_search'
def InitCallback(theta, encoder_outputs, num_hyps_per_beam=1):
"""Wrapper for _InitBeamSearchStateCallback for sequence sampler.
The main change is to ensure state tensors have fixed shapes.
Args:
theta: A `.NestedMap` object containing weights' values of this layer
and its children layers.
encoder_outputs: a NestedMap computed by encoder.
num_hyps_per_beam: An int, number hyps to keep for source sentence.
Returns:
A NestedMap of
- initial_results: a `.NestedMap` of initial results.
- states: a `.NestedMap` of initial model states.
"""
init_results, states = self._InitBeamSearchStateCallback(
theta, encoder_outputs, num_hyps_per_beam)
if non_tpu:
prefix_states = states['prefix_states']
for layer in range(p.num_trans_layers):
key = prefix_states['layer_%d' % layer]['key']
value = prefix_states['layer_%d' % layer]['value']
bs = key.shape[1]
atten_dim = key.shape[2]
zeros = tf.zeros([p.target_seq_len, bs, atten_dim],
dtype=py_utils.FPropDtype(p))
prefix_states['layer_%d' % layer]['key'] = tf.concat([key, zeros], 0)
prefix_states['layer_%d' % layer]['value'] = tf.concat([value, zeros],
0)
return init_results, states
def PreBeamSearchCallback(theta,
encoder_outputs,
step_ids,
states,
num_hyps_per_beam=1):
"""Wrapper for _PreBeamSearchStepCallback for sequence sampler.
The main change is to ensure state tensors have fixed shapes.
Args:
theta: A `.NestedMap` object containing weights' values of this layer
and its children layers.
encoder_outputs: a NestedMap computed by encoder.
step_ids: A tensor of shape [tgt_batch, 1].
states: A `.NestedMap` of tensors representing states that the clients
would like to keep track of for each of the active hyps.
num_hyps_per_beam: Beam size.
Returns:
A NestedMap of
- results: A `.NestedMap` of beam search results.
- out_states: A `.NestedMap`. The updated states.
"""
if non_tpu:
# Strip off paddings.
prefix_states = states['prefix_states']
target_time = states.time_step
for layer in range(p.num_trans_layers):
key = prefix_states['layer_%d' % layer]['key']
val = prefix_states['layer_%d' % layer]['value']
prefix_states['layer_%d' % layer]['key'] = tf.slice(
key, [0, 0, 0], [target_time, -1, -1])
prefix_states['layer_%d' % layer]['value'] = tf.slice(
val, [0, 0, 0], [target_time, -1, -1])
bs_results, new_states = self._PreBeamSearchStepCallback(
theta, encoder_outputs, step_ids, states, num_hyps_per_beam)
if non_tpu:
# Add back paddings (to maintain paddings shape).
bs = tf.shape(new_states.prefix_states['layer_0']['key'])[1]
dim = tf.shape(new_states.prefix_states['layer_0']['key'])[2]
pad = tf.zeros([p.target_seq_len - new_states.time_step, bs, dim],
dtype=py_utils.FPropDtype(p))
for layer in range(p.num_trans_layers):
key = new_states.prefix_states['layer_%d' % layer]['key']
val = new_states.prefix_states['layer_%d' % layer]['value']
new_states.prefix_states['layer_%d' % layer]['key'] = tf.concat(
[key, pad], axis=0)
new_states.prefix_states['layer_%d' % layer]['value'] = tf.concat(
[val, pad], axis=0)
return bs_results, new_states
random_seed = tf.random_uniform(
shape=[], maxval=(2**31 - 1), dtype=tf.int32, seed=p.random_seed)
sample = self.target_sequence_sampler.Sample(
self.theta, encoder_outputs, random_seed, InitCallback,
PreBeamSearchCallback, self._PostBeamSearchStepCallback)
bs = tf.shape(sample.ids)[0]
# Only need to make sure topk_hyps has the right shape
# [bs, num_hyps_per_beam], where num_hyps_per_beam=1 for sampling.
# TODO(yuancao): Support sampling multiple sequences and remove
# num_hyps_per_beam constraint.
assert self.params.beam_search.num_hyps_per_beam == 1
sample.topk_hyps = tf.zeros([bs, 1], dtype=tf.string)
sample.topk_ids = sample.ids
weights = 1 - sample.paddings
sample.topk_lens = tf.to_int32(tf.reduce_sum(weights, axis=1))
sample.topk_scores = tf.reduce_sum(
tf.log(tf.reduce_max(tf.nn.softmax(sample.logits), axis=2)) * weights,
axis=1)
return sample
def _InitBeamSearchStateCallback(self, theta, encoder_outputs,
num_hyps_per_beam):
"""Returns initial beams search states.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
encoder_outputs: a NestedMap computed by encoder.
num_hyps_per_beam: An int, number hyps to keep for source sentence.
Returns:
A tuple (initial_results, states).
initial_results: a `.NestedMap` of initial results.
atten_probs:
The initial attention probs, of shape [tgt_batch, src_len].
states: a `.NestedMap` of initial model | |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import json
import logging
import uuid
from datetime import datetime
from typing import ClassVar, Dict, Iterator, List, Optional, Sequence, Set, Tuple, Type, Union, cast
import boto3
from botocore.exceptions import ClientError
from intelliflow.core.platform.definitions.aws.cw.client_wrapper import put_composite_alarm
from intelliflow.core.platform.definitions.aws.cw.dashboard import (
CW_DASHBOARD_WIDTH_MAX,
create_alarm_status_widget,
create_metric_widget,
create_text_widget,
)
from intelliflow.core.platform.definitions.aws.sns.client_wrapper import find_subscription
from intelliflow.core.platform.definitions.common import ActivationParams
from intelliflow.core.signal_processing import Signal
from intelliflow.core.signal_processing.definitions.metric_alarm_defs import (
AlarmDimension,
AlarmRule,
AlarmRuleOperator,
AlarmState,
CompositeAlarmParams,
MetricDimension,
MetricExpression,
MetricStatisticData,
MetricStatType,
MetricValueCountPairData,
)
from intelliflow.core.signal_processing.dimension_constructs import AnyVariant, DimensionVariant
from intelliflow.core.signal_processing.routing_runtime_constructs import Route
from intelliflow.core.signal_processing.signal import SignalType
from intelliflow.core.signal_processing.signal_source import (
CWAlarmSignalSourceAccessSpec,
CWCompositeAlarmSignalSourceAccessSpec,
CWMetricSignalSourceAccessSpec,
InternalAlarmSignalSourceAccessSpec,
InternalCompositeAlarmSignalSourceAccessSpec,
InternalDatasetSignalSourceAccessSpec,
InternalMetricSignalSourceAccessSpec,
MetricSignalSourceAccessSpec,
SignalSourceAccessSpec,
SignalSourceType,
)
from ...constructs import (
BaseConstruct,
ConstructInternalMetricDesc,
ConstructParamsDict,
ConstructPermission,
ConstructPermissionGroup,
ConstructSecurityConf,
Diagnostics,
EncryptionKeyAllocationLevel,
UpstreamGrants,
UpstreamRevokes,
)
from ...definitions.aws.aws_lambda.client_wrapper import add_permission, remove_permission
from ...definitions.aws.common import CommonParams as AWSCommonParams
from ...definitions.aws.common import exponential_retry, generate_statement_id
from ...definitions.aws.kms.client_wrapper import (
KMS_MIN_DELETION_WAITING_PERIOD_IN_DAYS,
create_alias,
create_cmk,
create_default_policy,
delete_alias,
enable_key_rotation,
get_cmk,
put_cmk_policy,
schedule_cmk_deletion,
update_alias,
)
from ...definitions.aws.s3.bucket_wrapper import (
bucket_exists,
create_bucket,
delete_bucket,
get_bucket,
put_notification,
put_policy,
update_policy,
)
from ...definitions.aws.s3.object_wrapper import (
build_object_key,
delete_objects,
empty_bucket,
get_object,
list_objects,
object_exists,
put_object,
)
from ..aws_common import AWSConstructMixin
module_logger = logging.getLogger(__name__)
class AWSCloudWatchDiagnostics(AWSConstructMixin, Diagnostics):
INTERNAL_ALARM_NAME_PREFIX: ClassVar[str] = "if-{0}-"
TOPIC_NAME_FORMAT: ClassVar[str] = "if-{0}-{1}"
"""Diagnostics hub impl based on CloudWatch.
Trade-offs:
Pros:
Cons:
"""
def __init__(self, params: ConstructParamsDict) -> None:
super().__init__(params)
self._cw = self._session.client(service_name="cloudwatch", region_name=self._region)
self._sns = self._session.client(service_name="sns", region_name=self._region)
self._topic_name = None
self._topic_arn = None
self._topic_root_policy = None
def get_event_channel_type(self) -> str:
return "SNS"
def get_event_channel_resource_path(self) -> str:
return self._topic_arn
def _get_internal_alarm_name_prefix(self) -> str:
unique_context_id = self._params[ActivationParams.UNIQUE_ID_FOR_CONTEXT]
return self.INTERNAL_ALARM_NAME_PREFIX.format(unique_context_id)
# overrides
def get_unique_internal_alarm_name(self, alarm_name: str) -> str:
"""To guarantee that a given alarm_name for an internal alarm will have no conflict within the same account,
we use context_uuid as a prefix to make it unique.
"""
return self._get_internal_alarm_name_prefix() + alarm_name
def map_incoming_event(self, source_type: SignalSourceType, resource_path: str) -> Optional[SignalSourceAccessSpec]:
"""Map an external diagnostics event to its internal representation (if it is actually governed by this driver).
This is necessary for the abstraction of diagnostics impl from routing where internal signals are always
kept in generic internal format.
Returns a new access spec with the internal version of the 'source_type' and the internal representation of
the resource_path (if different). If the external event does not map to an internal signal, then this method
return None.
"""
if source_type == SignalSourceType.CW_METRIC:
context_id = MetricSignalSourceAccessSpec.extract_context_id(resource_path)
if context_id == self._params[ActivationParams.UNIQUE_ID_FOR_CONTEXT]:
return SignalSourceAccessSpec(SignalSourceType.INTERNAL_METRIC, resource_path, None)
elif source_type == SignalSourceType.CW_ALARM:
cw_alarm_spec = CWAlarmSignalSourceAccessSpec.from_resource_path(resource_path)
if cw_alarm_spec.account_id == self.account_id and cw_alarm_spec.region_id == self.region:
if cw_alarm_spec.name.startswith(self._get_internal_alarm_name_prefix()):
mapped_resource_path = cw_alarm_spec.name.replace(self._get_internal_alarm_name_prefix(), "") + resource_path.replace(
cw_alarm_spec.arn, ""
)
return SignalSourceAccessSpec(SignalSourceType.INTERNAL_ALARM, mapped_resource_path, None)
elif source_type == SignalSourceType.CW_COMPOSITE_ALARM:
cw_alarm_spec = CWCompositeAlarmSignalSourceAccessSpec.from_resource_path(resource_path)
if cw_alarm_spec.account_id == self.account_id and cw_alarm_spec.region_id == self.region:
if cw_alarm_spec.name.startswith(self._get_internal_alarm_name_prefix()):
mapped_resource_path = (
InternalCompositeAlarmSignalSourceAccessSpec.PATH_PREFIX
+ InternalAlarmSignalSourceAccessSpec.path_delimiter()
+ cw_alarm_spec.name.replace(self._get_internal_alarm_name_prefix(), "")
+ resource_path.replace(cw_alarm_spec.arn, "")
)
return SignalSourceAccessSpec(SignalSourceType.INTERNAL_COMPOSITE_ALARM, mapped_resource_path, None)
return None
def map_external_access_spec(self, access_spec: Optional[SignalSourceAccessSpec] = None) -> Optional[SignalSourceAccessSpec]:
"""Map an external diagnostic signal to its internal version (if it is actually internal and governed by
this driver)."""
if access_spec.source == SignalSourceType.CW_METRIC:
if access_spec.context_id == self._params[ActivationParams.UNIQUE_ID_FOR_CONTEXT]:
return InternalMetricSignalSourceAccessSpec(access_spec.sub_dimensions, **access_spec.attrs)
elif access_spec.source == SignalSourceType.CW_ALARM:
if access_spec.account_id == self.account_id and access_spec.region_id == self.region:
if access_spec.name.startswith(self._get_internal_alarm_name_prefix()):
mapped_id = access_spec.name.replace(self._get_internal_alarm_name_prefix(), "")
return InternalAlarmSignalSourceAccessSpec(mapped_id, access_spec.alarm_params, **access_spec.attrs)
elif access_spec.source == SignalSourceType.CW_COMPOSITE_ALARM:
if access_spec.account_id == self.account_id and access_spec.region_id == self.region:
if access_spec.name.startswith(self._get_internal_alarm_name_prefix()):
mapped_id = access_spec.name.replace(self._get_internal_alarm_name_prefix(), "")
return InternalCompositeAlarmSignalSourceAccessSpec(mapped_id, access_spec.alarm_params, **access_spec.attrs)
return None
def map_internal_access_spec(self, data_access_spec: SignalSourceAccessSpec) -> SignalSourceAccessSpec:
"""Map an internal diagnostic signal to its external/raw version"""
if data_access_spec.source == SignalSourceType.INTERNAL_ALARM:
return CWAlarmSignalSourceAccessSpec(
self.get_unique_internal_alarm_name(data_access_spec.alarm_id),
self.account_id,
self.region,
data_access_spec.alarm_params,
**data_access_spec.attrs,
)
elif data_access_spec.source == SignalSourceType.INTERNAL_COMPOSITE_ALARM:
return CWAlarmSignalSourceAccessSpec(
self.get_unique_internal_alarm_name(data_access_spec.alarm_id),
self.account_id,
self.region,
data_access_spec.alarm_params,
**data_access_spec.attrs,
)
elif data_access_spec.source == SignalSourceType.INTERNAL_METRIC:
return CWMetricSignalSourceAccessSpec(data_access_spec.context_id, data_access_spec.sub_dimensions, **data_access_spec.attrs)
else:
raise ValueError(
f"Input diagnostic data access spec is not of type Internal! Problematic access spec: " f" {data_access_spec!r}"
)
def map_internal_signal(self, signal: Signal) -> Signal:
mapped_resource_spec = self.map_internal_access_spec(signal.resource_access_spec)
signal_type = None
if signal.resource_access_spec.source in [SignalSourceType.INTERNAL_ALARM, SignalSourceType.INTERNAL_COMPOSITE_ALARM]:
signal_type = SignalType.CW_ALARM_STATE_CHANGE
elif signal.resource_access_spec.source == SignalSourceType.INTERNAL_METRIC:
signal_type = SignalType.CW_METRIC_DATA_CREATION
return Signal(
signal_type,
mapped_resource_spec,
signal.domain_spec,
signal.alias,
signal.is_reference,
signal.range_check_required,
signal.nearest_the_tip_in_range,
signal.is_termination,
signal.is_inverted,
)
def _do_emit(
self,
metric_signal: Signal,
value: Union[float, MetricStatisticData, List[Union[float, MetricValueCountPairData]]],
timestamp: Optional[datetime] = None,
) -> None:
"""Refer
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/cloudwatch.html#CloudWatch.Client.put_metric_data
- dynamic handling of 'StorageResolution' based on the period value of the metric to be emitted. If it is
less than 60 seconds, set it as 1 or 60. Also, if it is not specified, then we default to 1.
- Use 'alias' as the MetricName if left as '*'.
"""
period = metric_signal.domain_spec.dimension_filter_spec.find_dimension_by_name(MetricDimension.PERIOD).value
storage_resolution = 60
if period == AnyVariant.ANY_DIMENSION_VALUE_SPECIAL_CHAR or period < 60:
storage_resolution = 1
metric_access_spec = cast("MetricSignalSourceAccessSpec", metric_signal.resource_access_spec)
# Metric emission in IF chooses the first materialized path.
metric_stat: MetricStatType = metric_access_spec.create_stats_from_filter(metric_signal.domain_spec.dimension_filter_spec)[0]
metric_data = {
"MetricName": metric_stat["Metric"]["MetricName"],
"Dimensions": metric_stat["Metric"]["Dimensions"],
"Timestamp": timestamp if timestamp else datetime.utcnow(),
# RheocerOS does not use Unit !
"Unit": "None",
"StorageResolution": storage_resolution,
}
if isinstance(value, float) or isinstance(value, int):
metric_data.update({"Value": float(value)})
elif isinstance(value, MetricStatisticData):
metric_data.update(
{
"StatisticValues": {
"SampleCount": float(value.SampleCount),
"Sum": float(value.Sum),
"Minimum": float(value.Minimum),
"Maximum": float(value.Maximum),
}
}
)
elif isinstance(value, list):
if not value or any([not isinstance(pair, (MetricValueCountPairData, float)) for pair in value]):
raise ValueError(
f"Cannot emit metric {metric_signal.alias!r}! Value list should not be empty and "
f"it should contain only entities of type float or "
f"{MetricValueCountPairData.__class__.__name__!r}"
)
metric_data.update(
{
"Values": [float(val) if isinstance(val, float) or isinstance(val, int) else val.Value for val in value],
"Counts": [1.0 if isinstance(val, float) or isinstance(val, int) else val.Count for val in value],
}
)
else:
raise ValueError(f"Value type {type(value)!r} is not supported for metric emission! MetricData={metric_data!r}")
exponential_retry(
self._cw.put_metric_data, {"InternalServiceFault"}, Namespace=metric_stat["Metric"]["Namespace"], MetricData=[metric_data]
)
def is_internal(self, source_type: SignalSourceType, resource_path: str) -> bool:
if source_type == SignalSourceType.CW_METRIC:
context_id = MetricSignalSourceAccessSpec.extract_context_id(resource_path)
if context_id == self._params[ActivationParams.UNIQUE_ID_FOR_CONTEXT]:
return True
elif source_type in SignalSourceType.CW_ALARM:
cw_alarm_spec = CWAlarmSignalSourceAccessSpec.from_resource_path(resource_path)
if cw_alarm_spec.account_id == self.account_id and cw_alarm_spec.region_id == self.region:
if cw_alarm_spec.name.startswith(self._get_internal_alarm_name_prefix()):
return True
elif source_type in SignalSourceType.CW_COMPOSITE_ALARM:
cw_alarm_spec = CWCompositeAlarmSignalSourceAccessSpec.from_resource_path(resource_path)
if cw_alarm_spec.account_id == self.account_id and cw_alarm_spec.region_id == self.region:
if cw_alarm_spec.name.startswith(self._get_internal_alarm_name_prefix()):
return True
return False
def dev_init(self, platform: "DevelopmentPlatform") -> None:
super().dev_init(platform)
self._topic_name = self.TOPIC_NAME_FORMAT.format(self._dev_platform.context_id, self.__class__.__name__)
self._topic_arn = f"arn:aws:sns:{self._region}:{self._account_id}:{self._topic_name}"
def _deserialized_init(self, params: ConstructParamsDict) -> None:
super()._deserialized_init(params)
self._cw = self._session.client(service_name="cloudwatch", region_name=self._region)
self._sns = self._session.client(service_name="sns", region_name=self._region)
def _serializable_copy_init(self, org_instance: "BaseConstruct") -> None:
AWSConstructMixin._serializable_copy_init(self, org_instance)
self._cw = None
self._sns = None
def runtime_init(self, platform: "RuntimePlatform", context_owner: "BaseConstruct") -> None:
"""Whole platform got bootstrapped at runtime. For other runtime services, this
construct should be initialized (ex: context_owner: Lambda, Glue, etc)"""
AWSConstructMixin.runtime_init(self, platform, context_owner)
self._cw = boto3.client(service_name="cloudwatch", region_name=self._region)
self._sns = boto3.client(service_name="sns", region_name=self._region)
def provide_runtime_trusted_entities(self) -> List[str]:
# Is there any scenario when any of the AWS services (s3) from this impl should assume our exec role?
# No
return []
def provide_runtime_default_policies(self) -> List[str]:
return []
def provide_runtime_permissions(self) -> List[ConstructPermission]:
# allow exec-role (post-activation, cumulative list of all trusted entities [AWS services]) to do the following;
permissions = [
ConstructPermission(
[self._topic_arn],
# our exec-role might need to reply back to SNS once a token is received at runtime
# following a 'subscribe' call.
["sns:ConfirmSubscription", "sns:Receive"],
),
# no need to be picky about resources here since the only action that requires 'resource type' is
# DescribeAlarmHistory and we are ok to allow '*' with it.
# ref
# https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazoncloudwatch.html
ConstructPermission(["*"], ["cloudwatch:DescribeAlarmHistory", "cloudwatch:PutMetricData", "cloudwatch:GetMetricData"]),
]
return permissions
@classmethod
def provide_devtime_permissions(cls, params: ConstructParamsDict) -> List[ConstructPermission]:
# dev-role should be able to do the following.
return [
ConstructPermission(["*"], ["sns:*"]),
ConstructPermission(
["*"],
[
"cloudwatch:DeleteAlarms",
"cloudwatch:DescribeAlarmHistory",
"cloudwatch:DescribeAlarms",
"cloudwatch:PutCompositeAlarm",
"cloudwatch:PutMetricAlarm",
"cloudwatch:PutMetricData", # particular for testing (integ-tests, etc)
"cloudwatch:GetMetricData",
"cloudwatch:TagResource",
"cloudwatch:PutDashboard",
],
),
]
def _provide_route_metrics(self, route: Route) -> List[ConstructInternalMetricDesc]:
# TODO
return []
def _provide_internal_metrics(self) -> List[ConstructInternalMetricDesc]:
"""Provide internal metrics (of type INTERNAL_METRIC) that should be managed by RheocerOS and emitted by this
driver via Diagnostics::emit.
These metrics are logical metrics generated by the driver (with no assumption on other drivers and other details
about the underlying platform). So as a driver impl, you want Diagnostics driver to manage those metrics and
bind them to alarms, etc. Example: Routing metrics.
"""
return []
def _provide_internal_alarms(self) -> List[Signal]:
"""Provide internal alarms (of type INTERNAL_ALARM OR INTERNAL_COMPOSITE_ALARM) managed/emitted
by this driver impl"""
return []
def _provide_system_metrics(self) -> List[Signal]:
"""Provide metrics auto-generated by the underlying system | |
which should be considered arbitrary. To retrieve a transcoded
fixed-size jpeg version of the thumbnail, use :meth:`ShotgunDataRetriever.download_thumbnail`
instead.
This is a helper method meant to make it easy to port over synchronous legacy
code - for a better solution, we recommend using the thumbnail retrieval
that runs in a background thread.
:param str entity_type: Shotgun entity type with which the thumb is associated.
:param int entity_id: Shotgun entity id with which the thumb is associated.
:param bundle: App, Framework or Engine object requesting the download.
:returns: A path to the thumbnail on disk.
"""
thumb_source_url = urlparse.urlunparse((
bundle.shotgun.config.scheme, bundle.shotgun.config.server,
"/thumbnail/full/%s/%s" % (urllib.quote(str(entity_type)),
urllib.quote(str(entity_id))), None, None, None
))
path_to_cached_thumb, thumb_exists = ShotgunDataRetriever._get_thumbnail_path(
thumb_source_url, bundle
)
if not thumb_exists:
# create folders on disk
bundle.ensure_folder_exists(os.path.dirname(path_to_cached_thumb))
# download using standard core method. This will ensure that
# proxy and connection settings as set in the SG API are used.
# Allow the core method to determine the file type extension
# for the url about to be downloaded. Capture the full path to the
# thumbnail file as returned by sgtk.util.download_url().
try:
full_path = sgtk.util.download_url(
bundle.shotgun, thumb_source_url, path_to_cached_thumb, True
)
path_to_cached_thumb = full_path
except TypeError, e:
# This may be raised if an older version of core is in use
# that doesn't have the final `use_url_extension` arg implemented
# in sgtk.util.download_url() (set to True above). Since the source
# thumbnail url spec does not contain the file type extension, there
# is no way to determine the proper file name to download to.
# Raise a TankError indicating that a newer version of core must be
# used in conjunction with this method.
raise TankError(
"Caught error: \n%s\n"
"Unable to download source thumbnail URL '%s' because the "
"file type extension cannot be determined. Must update to a "
"newer version of core to use ShotgunDataRetriever."
"download_thumbnail_source()." % (e, thumb_source_url)
)
# modify the permissions of the file so it's writeable by others
old_umask = os.umask(0)
try:
os.chmod(path_to_cached_thumb, 0666)
finally:
os.umask(old_umask)
return path_to_cached_thumb
def start(self):
"""
Start the retriever thread.
:raises: TankError if there is no :class:`~task_manager.BackgroundTaskManager` associated with this instance
"""
if not self._task_manager:
raise TankError("Unable to start the ShotgunDataRetriever as it has no BackgroundTaskManager!")
self._task_manager.start_processing()
def stop(self):
"""
Gracefully stop the receiver.
Once stop() has been called, the object needs to be discarded.
This is a blocking call. It will synchronously wait
until any potential currently processing item has completed.
Note that once stopped the data retriever can't be restarted as the handle to the
:class:`~task_manager.BackgroundTaskManager` instance is released.
"""
if not self._task_manager:
return
if self._owns_task_manager:
# we own the task manager so we'll need to completely shut it down before
# returning
self._task_manager.shut_down()
self._task_manager = None
else:
# we don't own the task manager so just stop any tasks we might be running
# and disconnect from it:
self._task_manager.stop_task_group(self._bg_tasks_group)
# make sure we don't get exceptions trying to disconnect if the
# signals were never connected or somehow disconnected externally.
try:
self._task_manager.task_completed.disconnect(self._on_task_completed)
except (TypeError, RuntimeError), e: # was never connected
self._bundle.log_warning(
"Could not disconnect '_on_task_completed' slot from the "
"task manager's 'task_completed' signal: %s" % (e,)
)
try:
self._task_manager.task_failed.disconnect(self._on_task_failed)
except (TypeError, RuntimeError), e: # was never connected
self._bundle.log_debug(
"Could not disconnect '_on_task_failed' slot from the "
"task manager's 'task_failed' signal: %s" % (e,)
)
self._task_manager = None
def clear(self):
"""
Clears the queue.
Any currently processing item will complete without interruption, and signals will be
sent out for these items.
"""
if not self._task_manager:
return
# stop any tasks running in the task group:
self._task_manager.stop_task_group(self._bg_tasks_group)
def stop_work(self, task_id):
"""
Stop the specified task
:param task_id: The task to stop
"""
if not self._task_manager:
return
# stop the task:
self._task_manager.stop_task(task_id)
def get_schema(self, project_id=None):
"""
Execute the schema_read and schema_entity_read methods asynchronously
:param project_id: If specified, the schema listing returned will
be constrained by the schema settings for
the given project.
:returns: A unique identifier representing this request. This
identifier is also part of the payload sent via the
work_completed and work_failure signals, making it
possible to match them up.
"""
return self._add_task(self._task_get_schema,
priority = ShotgunDataRetriever._SG_DOWNLOAD_SCHEMA_PRIORITY,
task_kwargs = {"project_id":project_id})
def execute_find(self, *args, **kwargs):
"""
Executes a Shotgun find query asynchronously.
This method takes the same parameters as the Shotgun find() call.
The query will be queued up and once processed, either a
work_completed or work_failure signal will be emitted.
:param ``*args``: args to be passed to the Shotgun find() call
:param ``**kwargs``: Named parameters to be passed to the Shotgun find() call
:returns: A unique identifier representing this request. This
identifier is also part of the payload sent via the
work_completed and work_failure signals, making it
possible to match them up.
"""
return self._add_task(self._task_execute_find,
priority = ShotgunDataRetriever._SG_CALL_PRIORITY,
task_args = args,
task_kwargs = kwargs)
def execute_find_one(self, *args, **kwargs):
"""
Executes a Shotgun find_one query asynchronously.
This method takes the same parameters as the Shotgun find_one() call.
The query will be queued up and once processed, either a
work_completed or work_failure signal will be emitted.
:param ``*args``: args to be passed to the Shotgun find_one() call
:param ``**kwargs``: Named parameters to be passed to the Shotgun find_one() call
:returns: A unique identifier representing this request. This
identifier is also part of the payload sent via the
work_completed and work_failure signals, making it
possible to match them up.
"""
return self._add_task(self._task_execute_find_one,
priority = ShotgunDataRetriever._SG_CALL_PRIORITY,
task_args = args,
task_kwargs = kwargs)
def execute_update(self, *args, **kwargs):
"""
Execute a Shotgun update call asynchronously
This method takes the same parameters as the Shotgun update() call.
The query will be queued up and once processed, either a
work_completed or work_failure signal will be emitted.
:param ``*args``: args to be passed to the Shotgun update() call
:param ``**kwargs``: Named parameters to be passed to the Shotgun update() call
:returns: A unique identifier representing this request. This
identifier is also part of the payload sent via the
work_completed and work_failure signals, making it
possible to match them up.
"""
return self._add_task(self._task_execute_update,
priority = ShotgunDataRetriever._SG_CALL_PRIORITY,
task_args = args,
task_kwargs = kwargs)
def execute_create(self, *args, **kwargs):
"""
Execute a Shotgun create call asynchronously
The query will be queued up and once processed, either a
work_completed or work_failure signal will be emitted.
This method takes the same parameters as the Shotgun create() call.
:param ``*args``: args to be passed to the Shotgun create() call
:param ``**kwargs``: Named parameters to be passed to the Shotgun create() call
:returns: A unique identifier representing this request. This
identifier is also part of the payload sent via the
work_completed and work_failure signals, making it
possible to match them up.
"""
return self._add_task(self._task_execute_create,
priority = ShotgunDataRetriever._SG_CALL_PRIORITY,
task_args = args,
task_kwargs = kwargs)
def execute_delete(self, *args, **kwargs):
"""
Execute a Shotgun delete call asynchronously
This method takes the same parameters as the Shotgun delete() call.
The query will be queued up and once processed, either a
work_completed or work_failure signal will be emitted.
:param ``*args``: args to be passed to the Shotgun delete() call
:param ``**kwargs``: Named parameters to be passed to the Shotgun delete() call
:returns: A unique identifier representing this request. This
identifier is also part of the payload sent via the
work_completed and work_failure signals, making it
possible to match them up.
"""
return self._add_task(self._task_execute_delete,
priority = ShotgunDataRetriever._SG_CALL_PRIORITY,
task_args = args,
task_kwargs = kwargs)
def execute_method(self, method, *args, **kwargs):
"""
Executes a generic execution of a method asynchronously. This is pretty much a
wrapper for executing a task through the :class:`~task_manager.BackgroundTaskManager`.
The specified method will be called on the following form::
method(sg, data)
Where sg is a shotgun API instance. Data is typically
a dictionary with specific data that the method needs.
The query will be queued up and once processed, either a
work_completed | |
639
MFn_kNurbsCircular2PtArc = 638
MFn_kCurveCurveIntersect = 636
MFn_kManipContainer = 148
MFn_kCurveFromMeshEdge = 635
MFn_kScript = 634
MFn_kDistanceManip = 633
MFn_kNumericData = 587
MFn_kSoftModManip = 632
MFn_kOffsetCosManip = 171
MFn_kDeformWaveManip = 631
MFn_kDeformSineManip = 630
MFn_kReverse = 464
MFn_kRenderUtilityList = 463
MFn_kRecord = 462
MFn_kProjection = 461
MFn_kPostProcessList = 460
MFn_kPolySeparate = 459
MFn_kPlusMinusAverage = 457
MFn_kPolyCreateToolManip = 140
MFn_kNComponent = 987
MFn_kPluginLocatorNode = 456
MFn_kPluginDependNode = 455
MFn_kPlace3dTexture = 454
MFn_kPlace2dTexture = 453
MFn_kPartition = 452
MFn_kParticleTransparencyMapper = 451
MFn_kParticleColorMapper = 449
MFn_kParticleCloud = 448
MFn_kParticleAgeMapper = 447
MFn_kOpticalFX = 446
MFn_kOldGeometryConstraint = 445
MFn_kMultiplyDivide = 444
MFn_kMultilisterLight = 443
MFn_kMotionPath = 441
MFn_kPolyUnite = 440
MFn_kPolyCreateFacet = 439
MFn_kPolyTorus = 438
MFn_kPolySphere = 437
MFn_kPolyMesh = 436
MFn_kPolyCylinder = 435
MFn_kPolyCone = 433
MFn_kPolyPrimitive = 432
MFn_kPolyCreator = 431
MFn_kPolyTriangulate = 430
MFn_kPolySubdFacet = 429
MFn_kSubdProjectionManip = 879
MFn_kLatticeGeom = 280
MFn_kPolySubdEdge = 428
MFn_kPolySplit = 427
MFn_kPolySoftEdge = 425
MFn_kPluginBlendShape = 1114
MFn_kPolySmooth = 424
MFn_kPolyQuad = 423
MFn_kPolyProj = 422
MFn_kPolyPlanProj = 421
MFn_kPolyNormal = 420
MFn_kPolyMoveVertex = 418
MFn_kSubdivGeom = 807
MFn_kPolyMoveUV = 417
MFn_kPolyMoveFacetUV = 416
MFn_kPolyMoveFacet = 415
MFn_kPolyMoveEdge = 414
MFn_kPolyMergeFacet = 413
MFn_kDrag = 258
MFn_kPolyMergeEdge = 412
MFn_kPolyMapDel = 410
MFn_kPolyMapCut = 409
MFn_kPolyExtrudeFacet = 408
MFn_kPolyDelVertex = 407
MFn_kImageRender = 653
MFn_kPolyDelFacet = 406
MFn_kPolyDelEdge = 405
MFn_kContainer = 1006
MFn_kPolyCylProj = 404
MFn_kPolyCollapseF = 403
MFn_kPhong = 372
MFn_kPolyCollapseEdge = 402
MFn_kPolyCloseBorder = 401
MFn_kPolyChipOff = 400
MFn_kPolyAppend = 399
MFn_kPolyTweak = 398
MFn_kPolyBevel = 397
MFn_kMidModifierWithMatrix = 396
MFn_kDeformSquash = 622
MFn_kMatrixWtAdd = 394
MFn_kMatrixPass = 393
MFn_kMatrixMult = 392
MFn_kMatrixHold = 391
MFn_kMatrixAdd = 390
MFn_kMaterialInfo = 389
MFn_kLightSourceMaterial = 388
MFn_kBlinnMaterial = 386
MFn_kLambertMaterial = 385
MFn_kDiffuseMaterial = 384
MFn_kMaterial = 383
MFn_kSubdAddTopology = 887
MFn_kModifyEdgeBaseManip = 833
MFn_kCreateBPManip = 832
MFn_kCurveFromSubdivEdge = 831
MFn_kSymmetryMapCurve = 830
MFn_kSymmetryMapVector = 829
MFn_kSymmetryLocator = 828
MFn_kTransformBoxManip = 827
MFn_kScalePointManip = 826
MFn_kModifyEdgeManip = 825
MFn_kModifyEdgeCrvManip = 824
MFn_kSubdBoolean = 822
MFn_kDropOffFunction = 821
MFn_kCrossSectionEditManip = 820
MFn_kCreateSectionManip = 819
MFn_kCrossSectionManager = 818
MFn_kEditCurveManip = 817
MFn_kEditCurve = 816
MFn_kObjectBinFilter = 938
MFn_kXsectionSubdivEdit = 814
MFn_kSectionManip = 813
MFn_kMeshMapComponent = 812
MFn_kSubdivReverseFaces = 811
MFn_kPolySplitEdge = 810
MFn_kInt64ArrayData = 809
MFn_kUInt64ArrayData = 808
MFn_kSubdivData = 806
MFn_kWriteToVectorBuffer = 1038
MFn_kGuide = 356
MFn_kPolySplitVert = 805
MFn_kClip = 804
MFn_kCreateUVSet = 803
MFn_kCopyUVSet = 802
MFn_kParticleSamplerInfo = 801
MFn_kSubdivCollapse = 800
MFn_kCharacterMap = 798
MFn_kSubdBlindData = 797
MFn_kSubdHierBlind = 796
MFn_kDeleteUVSet = 795
MFn_kVolumeAxis = 794
MFn_kSubdivCompId = 793
MFn_kNurbsCircular3PtArc = 637
MFn_kMeshComponent = 546
MFn_kDecayRegionComponent = 545
MFn_kDecayRegionCapComponent = 544
MFn_kSurfaceRangeComponent = 543
MFn_kLatticeComponent = 542
MFn_kEdgeComponent = 541
MFn_kSurfaceKnotComponent = 540
MFn_kSurfaceEPComponent = 539
MFn_kPivotComponent = 537
MFn_kPluginMotionPathNode = 442
MFn_kIsoparmComponent = 536
MFn_kCurveParamComponent = 535
MFn_kCurveKnotComponent = 534
MFn_kCurveEPComponent = 533
MFn_kCurveCVComponent = 532
MFn_kComponent = 531
MFn_kVectorProduct = 529
kMFnNurbsEpsilon = 0.001
MFn_kUnknown = 528
MFn_kUseBackground = 527
MFn_kUnitToTimeConversion = 526
MFn_kUnitConversion = 525
MFn_kArrayMapper = 524
MFn_kHardwareRenderGlobals = 523
MFn_kRenderQuality = 521
MFn_kRenderGlobalsList = 520
MFn_kRenderGlobals = 519
MFn_kRenderSetup = 518
MFn_kTimeToUnitConversion = 517
MFn_kTime = 516
MFn_kWood = 515
MFn_kStucco = 513
MAngle_kLast = 5
MFn_kSolidFractal = 512
MFn_kSnow = 511
MFn_kRock = 510
MFn_kMarble = 509
MFn_kLeather = 508
MDagMessage_kTranslateZ = 2048
MFn_kGranite = 507
MFn_kNoise = 874
MFn_kSubdSubdivideFace = 873
MFn_kSubdAutoProj = 872
MFn_kVolumeNoise = 871
MFn_kOcean = 870
MFn_kSubdMapSewMove = 869
MFn_kSubdMapCut = 867
MFn_kSubdTweakUV = 866
MFn_kVolumeFog = 865
MFn_kSubdSplitFace = 864
MFn_kRenderBox = 863
MFn_kBox = 862
MFn_kBoxData = 861
MFn_kSubdMergeVert = 860
MFn_kSubdCloseBorder = 859
MFn_kDiskCache = 858
MFn_kGlobalCacheControls = 857
MFn_kJiggleDeformer = 856
MFn_kSubdivMapComponent = 855
MFn_kSnapshotShape = 854
MFn_kSubdDelFace = 853
MFn_kSubdMoveEdge = 851
MFn_kSubdMoveVertex = 850
MFn_kSubdModifier = 849
MFn_kPolyMapSewMove = 848
MFn_kPolyLayoutUV = 847
MFn_kPolyAutoProj = 846
MFn_kPolyAverageVertex = 845
MFn_kUnused6 = 843
MFn_kUnused5 = 842
MFn_kUnused4 = 841
MFn_kUnused3 = 840
MFn_kUnused2 = 839
MFn_kUnused1 = 838
MFn_kCurveFromSubdivFace = 837
MFn_kSubdivSurfaceVarGroup = 835
MFn_kSubdExtrudeFace = 834
MFn_kFluidEmitter = 915
MFn_kStudioClearCoat = 914
MFn_kMidModifier = 395
MFn_kStringShadingSwitch = 913
MFn_kSmear = 912
MFn_kFluidData = 911
MFn_kFluidGeom = 910
MFn_kFluid = 909
MFn_kPluginTransformNode = 908
MFn_kAlignManip = 907
MFn_kPolyStraightenUVBorder = 906
MFn_kPolyMergeUV = 905
MFn_kFluidTexture2D = 904
MFn_kPolyPokeManip = 902
MFn_kPolyMirrorManipContainer = 901
MFn_kPolyCutManip = 900
MFn_kPolyCutManipContainer = 899
MFn_kPolyWedgeFace = 898
MFn_kPolyPoke = 897
MFn_kPolyCut = 896
MFn_kLayeredTexture = 799
MFn_kBevelPlus = 894
MFn_kOceanShader = 893
MFn_kVolumeLight = 892
MFn_kRampShader = 891
MFn_kComponentListData = 579
MFn_kImplicitSphere = 890
MFn_kImplicitCone = 889
MFn_kSubdCleanTopology = 888
MFn_kPluginHwShaderNode = 886
MFn_kPluginHardwareShader = 885
MFn_kHwShaderNode = 884
MFn_kPolyFlipUV = 883
MFn_kFilter = 329
MFn_kPolyNormalizeUV = 882
MFn_kHardwareReflectionMap = 881
MFn_kSubdMappingManip = 880
MFn_kSubdTweak = 878
MFn_kSubdPlanProj = 877
MFn_kData4Double = 876
MFn_kAttribute4Double = 875
MFn_kCurveNormalizerLinear = 997
MFn_kCurveNormalizerAngle = 996
MFn_kCacheTrack = 994
MFn_kCacheBlend = 993
MFn_kCacheBase = 992
MFn_kNBase = 991
MFn_kNucleus = 990
MFn_kCacheableNode = 989
MFn_kPolyBridgeEdge = 988
MFn_kDynamicConstraint = 986
MFn_kTransferAttributes = 985
MFn_kClosestPointOnMesh = 984
cvar = None
MFn_kHistorySwitch = 983
MFn_kCacheFile = 982
MFn_kPolyHelix = 981
MFn_kPolyComponentData = 980
MFn_kHikFloorContactMarker = 978
MFn_kPolyPipe = 977
MFn_kPolyPlatonicSolid = 976
MFn_kHikEffector = 956
MFn_kPolyPrimitiveMisc = 975
MFn_kPolyPinUV = 955
MFn_kPolyArrow = 974
MFn_kPolyCreaseEdge = 954
MFn_kKeyframeRegionManip = 995
MFn_kLineModifier = 973
MFn_kNurbsCurve = 267
MFn_kPolyMirror = 953
MFn_kAISEnvFacade = 972
MFn_kKeyframeDeltaBreakdown = 952
MFn_kKeyframeDeltaWeighted = 951
MFn_kMaterialFacade = 970
MFn_kKeyframeDeltaTangent = 950
MFn_kFacade = 969
MFn_kKeyframeDeltaInfType = 949
MFn_kPolyDuplicateEdge = 968
MFn_kKeyframeDeltaBlockAddRemove = 948
MFn_kToonLineAttributes = 967
MFn_kKeyframeDeltaAddRemove = 947
MFn_kPfxToon = 966
MFn_kPolySplitRing = 965
MFn_kKeyframeDeltaMove = 945
MFn_kPolyPyramid = 964
MFn_kKeyframeDelta = 944
MFn_kPsdFileTexture = 943
MFn_kSpotCylinderManip = 187
MFn_kPolyAutoProjManip = 962
MFn_kHairTubeShader = 942
MFn_kProxyManager = 961
MFn_kPfxHair = 941
MFn_kHikHandle = 960
MFn_kPfxGeometry = 940
MFn_kHikSolver = 959
MFn_kPolySmoothProxy = 939
MFn_kHikFKJoint = 958
MFn_kHikIKEffector = 957
MFn_kMentalRayTexture = 937
MFn_kTimeFunction = 936
MFn_kHairConstraint = 935
MFn_kWriteToColorBuffer = 1037
MFn_kRemapHsv = 934
MFn_kWriteToFrameBuffer = 1036
MFn_kRemapColor = 933
MFn_kPolySelectEditFeedbackManip = 1035
MFn_kRemapValue = 932
MFn_kPolyToolFeedbackManip = 1034
MFn_kHairSystem = 931
MFn_kUint64SingleIndexedComponent = 1033
MFn_kTwoPointArcManip = 645
MFn_kMergeVertsToolManip = 1032
MFn_kCurveFromMeshCoM = 929
MFn_kMembrane = 1031
MFn_kRotateBoxManip = 214
MFn_kTrimWithBoundaries = 928
MFn_kConstraint = 927
MFn_kNId = 1029
MFn_kMute = 926
MFn_kNIdData = 1028
MFn_kXformManip = 925
MFn_kPluginManipulatorNode = 1027
MFn_kViewManip = 924
MFn_kBlendNodeAdditiveRotation = 1026
MFn_kTextManip = 923
MFn_kBlendNodeAdditiveScale = 1025
MFn_kBlendNodeInt32 = 1024
MFn_kPolyExtrudeVertex = 921
MFn_kBlendNodeInt16 = 1023
MFn_kQuadShadingSwitch = 920
MFn_kPluginObjectSet = 919
MFn_kBlendNodeFloatAngle = 1021
MFn_kSnapshotPath = 918
MFn_kBlendNodeFloat = 1020
MFn_kInsertKnotSrf = 76
MFn_kGeoConnector = 917
MFn_kBlendNodeEnum = 1019
MFn_kHeightField = 916
MFn_kBlendNodeDoubleLinear = 1018
MFn_kBlendNodeDoubleAngle = 1017
MFn_kBlendNodeDouble = 1016
MFn_kBlendNodeBoolean = 1015
MFn_kAnimLayer = 1013
MFn_kPolyEdgeToCurve = 1012
MFn_kAsset = 1011
MFn_kPluginConstraintNode = 1010
MFn_kCombinationShape = 337
MFn_kNObject = 1009
MFn_kNObjectData = 1008
MFn_kFloatVectorArrayData = 1007
MFn_kPluginCameraSet = 1005
MFn_kCameraSet = 1004
MFn_kPluginParticleAttributeMapperNode = 1003
MFn_kNRigid = 1002
MFn_kNParticle = 1001
MFn_kNCloth = 1000
MFn_kPluginImagePlaneNode = 999
MFileIO_kVersion2014 = 179
MFn_kPoseInterpolatorManager = 1120
MFn_kOceanDeformer = 1119
MFn_kShapeEditorManager = 1118
MDagMessage_kRotateZ = 256
MFn_kPolyClean = 1117
MFn_kTrackInfoManager = 1116
MFn_kDummy = 254
MFn_kPolyPassThru = 1115
MFn_kCollision = 253
MFn_kPolyMoveVertexUV = 419
MFn_kSoftMod = 252
MFn_kPluginGeometryFilter = 1113
MFn_kCluster = 251
MFn_kPluginSkinCluster = 1112
MFn_kCamera = 250
MFn_kNodeGraphEditorBookmarkInfo = 1111
MFn_kBaseLattice = 249
MFn_kNodeGraphEditorBookmarks = 1110
MFn_kShape = 248
MFn_kNodeGraphEditorInfo = 1109
MFn_kDoubleShadingSwitch = 614
MFn_kContourProjectionManip = 1108
MFn_kUnknownTransform = 246
MFn_kPolyContourProj = 1107
MFn_kTangentConstraint = 245
MFn_kScaleConstraint = 244
MFn_kPolyModifierManipContainer = 1105
MFn_kPoleVectorConstraint = 243
MFn_kFloatArrayData = 1030
MFn_kPolyCaddyManip = 1104
MFn_kParentConstraint = 242
MFn_kCaddyManipBase = 1103
MFn_kSymmetryConstraint = 241
MFn_kTimeEditorAnimSource = 1102
MFn_kTimeEditorInterpolator = 1101
MFn_kTimeEditorTracks = 1100
MFn_kTimeEditor = 1099
MFn_kGreasePlane = 1079
MFn_kMandelbrot3D = 1078
MFn_kTimeEditorClipEvaluator = 1097
MFn_kMandelbrot = 1077
MFn_kTimeEditorClipBase = 1096
MFn_kClipToGhostData = 1076
MFn_kPolyBevel3 = 1095
MFn_kClipGhostShape = 1075
MFn_kColorMgtGlobals = 1094
MFn_kAssembly = 1074
MFn_kGeomBind = 1093
MFn_kCrater = 506
MFn_kPolyCBoolOp = 1092
MFn_kPluginThreadedDevice = 1072
MFn_kPolyBevel2 = 1091
MFn_kPluginClientDevice = 1071
MFn_kClientDevice = 1070
MFn_kShrinkWrapFilter = 1089
MFn_kThreadedDevice = 1069
MFn_kNLE = 1088
MFn_kPolyExtrudeManipContainer = 1068
MFn_kToolContext = 1087
MFn_kPolyExtrudeManip = 1067
MFn_kSnapUVManip2D = 1086
MFn_kRenderingList = 1066
MFn_kFosterParent = 1085
MFn_kWrapFilter = 739
MFn_kPolyEditEdgeFlow = 1084
MFn_kHardwareRenderingGlobals = 1064
MFn_kCreaseSet = 1083
MFn_kPolyUVRectangle = 1063
MFn_kDagContainer = 1062
MFn_kGreasePencilSequence = 1081
MFn_kPolyCube = 434
MFn_kContainerBase = 1061
MFn_kGreasePlaneRenderShape = 1080
MFn_kAdskMaterial = 1060
MFn_kColorProfile = 1059
MFn_kNearestPointOnCurve = 1058
MFn_kVolumeBindManip = 1056
MFn_kSkinBinding = 1055
MFn_kStereoCameraMaster = 1041
MFn_kPolyConnectComponents = 1054
MFn_kPointOnPolyConstraint = 1053
MFn_kPolyHoleFace = 1052
MFn_kPolySpinEdge = 1051
MFn_kBezierCurveToNurbs = 1050
MFn_kBezierCurveData = 1048
MFn_kBlendNodeFloatLinear = 1022
MFn_kBezierCurve = 1047
MFn_kCreateBezierManip = 1046
MFn_kBlendNodeTime = 1045
MFn_kShot = 1044
MFn_kSequencer = 1043
MFn_kSequenceManager = 1042
MFn_kWriteToLabelBuffer = 1040
MFn_kWriteToDepthBuffer = 1039
MFn_kBlindData = 751
MFn_kReference = 750
MFn_kObjectTypeFilter = 674
MFn_kRevolvedPrimitive = 95
MFn_kAttribute3Int = 749
MFn_kAttribute3Short = 748
MFn_kAttribute3Double = 746
MFn_kLast = 1127
MFn_kAttribute2Int = 745
MFn_kAttribute2Short = 744
MFn_kAttribute2Float = 743
MFn_kAttribute2Double = 742
MFn_kBinaryData = 741
MFn_kMeshVtxFaceComponent = 740
MFn_kFFblendSrf = 68
MFn_kDynSweptGeometryData = 738
MFn_kCharacterMappingData = 737
MFn_kPolyColorDel = 736
MNodeMessage_kOtherPlugSet = 16384
MFn_kPolyColorMod = 735
MNodeMessage_kAttributeArrayRemoved = 8192
MFn_kBlendColorSet = 734
MNodeMessage_kAttributeArrayAdded = 4096
MFn_kCopyColorSet = 733
MNodeMessage_kIncomingDirection = 2048
MFn_kDeleteColorSet = 732
MFn_kCreateColorSet = 731
MFn_kPolyColorPerVertex = 730
MFn_kDisplayLayerManager = 729
MFn_kDisplayLayer = 728
MFn_kPluginSpringNode = 727
MFn_kPluginEmitterNode = 726
MFn_kPluginFieldNode = 725
MFn_kDynArrayAttrsData = 724
MFn_kDynAttenuationManip = 723
MFn_kPolyRemesh = 1106
MFn_kDynSpreadManip = 722
MFn_kDynTurbulenceManip = 721
MFn_kDynNewtonManip = 720
MFn_kDynAirManip = 719
MFn_kDynBaseFieldManip = 718
MFn_kDynFieldsManip = 717
MFn_kDynEmitterManip = 716
MFn_kSubdivToPoly = 714
MFn_kSquareSrfManip = 713
MFn_kSquareSrf = 712
MFn_kExtendSurfaceDistanceManip = 711
MVector_kTol = 1e-10
MTransformationMatrix_kTol = 1e-10
kQuaternionEpsilon = 1e-10
MPoint_kTol = 1e-10
MMatrix_kTol = 1e-10
kMFnSubdPointTolerance = 1e-10
kMFnSubdTolerance = 0.001
MFn_kPhongMaterial = 387
kMFnMeshPointTolerance = 1e-10
kMFnMeshTolerance = 0.001
MFloatVector_kTol = 1e-05
MFloatPoint_kTol = 1e-05
MFloatMatrix_kTol = 1e-05
kEulerRotationEpsilon = 1e-10
MFn_kStoryBoard = 479
MFn_kLodThresholds = 766
MFn_kSphericalProjectionManip = 222
kUnknownParameter = 'unknown'
MFn_kCompoundAttribute = 571
MFn_kPairBlend = 922
MFn_kPlane = 288
MFn_kManipulator = 230
MFn_kCommEdgeOperManip = 606
MFn_kSoftModFilter = 348
MFn_kSubdivToNurbs = 815
MNodeMessage_kAttributeUnkeyable = 1024
MNodeMessage_kAttributeKeyable = 512
MDagMessage_kRotatePivotTrans = 14680064
MDagMessage_kScalePivotTrans = 1835008
MDagMessage_kRotatePivot = 229376
MDagMessage_kScalePivot = 28672
MDagMessage_kTranslation = 3584
MDagMessage_kRotation = 448
MDagMessage_kAll = 268435455
MDagMessage_kRotateOrder = 134217728
MDagMessage_kRotateOrientZ = 67108864
MDagMessage_kRotateOrientY = 33554432
MDagMessage_kRotateOrientX = 16777216
MDagMessage_kRotateTransZ = 8388608
MDagMessage_kRotateTransY = 4194304
MDagMessage_kRotateTransX = 2097152
MDagMessage_kScaleTransZ = 1048576
MDagMessage_kScaleTransX = 262144
MDagMessage_kRotatePivotZ = 131072
MDagMessage_kRotatePivotY = 65536
MDagMessage_kRotatePivotX = 32768
MDagMessage_kScalePivotZ = 16384
MDagMessage_kScalePivotY = 8192
MDagMessage_kScalePivotX = 4096
MDagMessage_kTranslateY = 1024
MFn_kPolySphProj = 426
MDagMessage_kTranslateX = 512
MAYA_API_VERSION = 20180000
MFn_kImageFilter = 661
MFn_kData3Long = 594
MFn_kData2Long = 590
MFn_kAttribute3Long = 749
MFn_kAttribute2Long = 745
MFn_kAttribute3Float = 747
MFn_kReorderUVSet = 1126
MFn_kArubaTesselate = 1125
MFn_kPolyCircularize = 1124
MFn_kCustomEvaluatorClusterNode = 1123
MFn_kReForm = 1122
MFn_kKeyingGroup = 682
MFn_kControllerTag = 1121
MFn_kSurfaceInfo = 103
MFn_kEnvBall = 487
MFn_kPolyModifierManip = 195
MFn_kMoveUVShellManip2D = 705
MFn_kMeshEdgeComponent = 547
MFn_kStyleCurve = 895
MFn_kFluidTexture3D = 903
MFn_kAttachSurface = 44
MFn_kMoveVertexManip = 758
MFn_kDoubleAngleAttribute = 563
MFn_kForceUpdateManip = 690
MFn_kHikGroundPlane = 979
MFn_kSurfaceCVComponent = 538
MFn_kDeformFlareManip = 629
MFn_kTimeWarp = 1073
MFn_kMatrixArrayData = 598
MFn_kTimeEditorClip = 1098
MDagMessage_kRotateOrient = 117440512
MFn_kBlendNodeBase = 1014
MFn_kEditsManager = 1090
MFn_kAxesActionManip = 124
kDefaultNodeType = 'dependNode'
MFn_kNurbsCurveToBezier = 1049
MFnDagNode_kNextPos = 255
MFn_kSCsolver = 364
MFileIO_kVersion2011 = 156
MFn_kSubdivEdgeComponent = 698
MFn_kEditMetadata = 1082
MFnData_kSubdSurface = 21
MFn_kMesh = 296
MFn_kSubdModifyEdge = 823
MFn_kResolution = 522
MFn_kPassContributionMap = 782
MFn_kComponentManip = 669
MFn_kTexture3d = 503
MFn_kSetGroupComponent = 555
MFn_kDropoffManip = 163
MFn_kTxSl = 514
MNodeMessage_kLast = 32768
MFn_kVolumeShader = 530
MFn_kCurveFromSurfaceCoS = 60
MFn_kDiscManip = 132
MFn_kPointMatrixMult = 458
MFn_kPolyProjectCurve = 1065
MFn_kUniform = 263
MFn_kVortex = 264
MFn_kGeometric = 265
MFn_kSPbirailSrf = 52
MFn_kRigidConstraint = 313
MFn_kCreateEPManip = 158
MFn_kEnvFacade = 971
MFn_kCreateCVManip = 157
MFn_kCoiManip = 155
MFn_kCameraManip = 154
MFn_kButtonManip = 153
MFileIO_kVersion2010 = 152
MFn_kBevelManip = 151
MFn_kBarnDoorManip = 150
MFn_kAverageCurveManip = 149
MFileIO_kVersion2009 = 147
MFn_kIsoparmManip = | |
= self.GetPatchSeries()
patch1, patch2, patch3, patch4 = patches = self.GetPatches(4)
self.SetPatchDeps(patch1)
self.SetPatchDeps(patch2, [patch1.id])
self.SetPatchDeps(patch3)
self.SetPatchDeps(patch4)
self.SetPatchApply(patch1).AndRaise(
cros_patch.ApplyPatchException(patch1))
self.SetPatchApply(patch3)
self.SetPatchApply(patch4).AndRaise(
cros_patch.ApplyPatchException(patch1, inflight=True))
self.mox.ReplayAll()
self.assertResults(series, patches,
[patch3], [patch2, patch1], [patch4])
self.mox.VerifyAll()
def testApplyMissingChangeId(self):
"""Test that applies changes correctly with a dep with missing changeid."""
series = self.GetPatchSeries()
patch1, patch2 = patches = self.GetPatches(2)
git_repo = os.path.join(self.build_root, patch1.project)
patch1.Fetch(git_repo)
patch1.GerritDependencies(
git_repo,
'refs/remotes/cros/master').AndRaise(
cros_patch.BrokenChangeID(patch1, 'Could not find changeid'))
self.SetPatchDeps(patch2)
self.SetPatchApply(patch2)
self.mox.ReplayAll()
self.assertResults(series, patches, [patch2], [patch1], [])
self.mox.VerifyAll()
def testComplexApply(self):
"""More complex deps test.
This tests a total of 2 change chains where the first change we see
only has a partial chain with the 3rd change having the whole chain i.e.
1->2, 3->1->2, 4->nothing. Since we get these in the order 1,2,3,4 the
order we should apply is 2,1,3,4.
This test also checks the patch order to verify that Apply re-orders
correctly based on the chain.
"""
series = self.GetPatchSeries()
patch1, patch2, patch3, patch4, patch5 = patches = self.GetPatches(5)
self.SetPatchDeps(patch1, [patch2.id])
self.SetPatchDeps(patch2)
self.SetPatchDeps(patch3, [patch1.id, patch2.id])
self.SetPatchDeps(patch4, cq=[patch5.id])
self.SetPatchDeps(patch5)
for patch in (patch2, patch1, patch3, patch4, patch5):
self.SetPatchApply(patch)
self.mox.ReplayAll()
self.assertResults(
series, patches, [patch2, patch1, patch3, patch4, patch5])
self.mox.VerifyAll()
def testApplyStandalonePatches(self):
"""Simple apply of two changes with no dependent CL's."""
series = self.GetPatchSeries()
patches = self.GetPatches(3)
for patch in patches:
self.SetPatchDeps(patch)
for patch in patches:
self.SetPatchApply(patch)
self.mox.ReplayAll()
self.assertResults(series, patches, patches)
self.mox.VerifyAll()
# pylint: disable=W0212,R0904
class TestCoreLogic(base):
"""Tests the core resolution and applying logic of
validation_pool.ValidationPool."""
def setUp(self):
self.mox.StubOutWithMock(gerrit.GerritHelper,
'FindContentMergingProjects')
def MakePool(self, overlays=constants.PUBLIC_OVERLAYS, build_number=1,
builder_name='foon', is_master=True, dryrun=True, **kwds):
handlers = kwds.pop('handlers', False)
kwds.setdefault('helper_pool', validation_pool.HelperPool.SimpleCreate())
kwds.setdefault('changes', [])
pool = validation_pool.ValidationPool(
overlays, self.build_root, build_number, builder_name, is_master,
dryrun, **kwds)
self.mox.StubOutWithMock(pool, '_SendNotification')
if handlers:
self.mox.StubOutWithMock(pool, '_HandleApplySuccess')
self.mox.StubOutWithMock(pool, '_HandleApplyFailure')
self.mox.StubOutWithMock(pool, '_HandleCouldNotApply')
self.mox.StubOutWithMock(pool, '_patch_series')
return pool
def MakeFailure(self, patch, inflight=True):
return cros_patch.ApplyPatchException(patch, inflight=inflight)
def GetPool(self, changes, applied=(), tot=(),
inflight=(), dryrun=True, **kwds):
pool = self.MakePool(changes=changes, **kwds)
applied = list(applied)
tot = [self.MakeFailure(x, inflight=False) for x in tot]
inflight = [self.MakeFailure(x, inflight=True) for x in inflight]
# pylint: disable=E1123
pool._patch_series.Apply(
changes, dryrun=dryrun, manifest=mox.IgnoreArg()
).AndReturn((applied, tot, inflight))
for patch in applied:
pool._HandleApplySuccess(patch).AndReturn(None)
if tot:
pool._HandleApplyFailure(tot).AndReturn(None)
# We stash this on the pool object so we can reuse it during validation.
# We could stash this in the test instances, but that would break
# for any tests that do multiple pool instances.
pool._test_data = (changes, applied, tot, inflight)
return pool
def runApply(self, pool, result):
self.assertEqual(result, pool.ApplyPoolIntoRepo())
self.assertEqual(pool.changes, pool._test_data[1])
failed_inflight = pool.changes_that_failed_to_apply_earlier
expected_inflight = set(pool._test_data[3])
# Intersect the results, since it's possible there were results failed
# results that weren't related to the ApplyPoolIntoRepo call.
self.assertEqual(set(failed_inflight).intersection(expected_inflight),
expected_inflight)
self.assertEqual(pool.changes, pool._test_data[1])
def testPatchSeriesInteraction(self):
"""Verify the interaction between PatchSeries and ValidationPool.
Effectively, this validates data going into PatchSeries, and coming back
out; verifies the hand off to _Handle* functions, but no deeper.
"""
patches = self.GetPatches(3)
apply_pool = self.GetPool(patches, applied=patches, handlers=True)
all_inflight = self.GetPool(patches, inflight=patches, handlers=True)
all_tot = self.GetPool(patches, tot=patches, handlers=True)
mixed = self.GetPool(patches, tot=patches[0:1], inflight=patches[1:2],
applied=patches[2:3], handlers=True)
self.mox.ReplayAll()
self.runApply(apply_pool, True)
self.runApply(all_inflight, False)
self.runApply(all_tot, False)
self.runApply(mixed, True)
self.mox.VerifyAll()
def testHandleApplySuccess(self):
"""Validate steps taken for successfull application."""
patch = self.GetPatches(1)
pool = self.MakePool()
pool._SendNotification(patch, mox.StrContains('has picked up your change'))
self.mox.ReplayAll()
pool._HandleApplySuccess(patch)
self.mox.VerifyAll()
def testHandleApplyFailure(self):
failures = [cros_patch.ApplyPatchException(x) for x in self.GetPatches(4)]
notified_patches = failures[:2]
unnotified_patches = failures[2:]
master_pool = self.MakePool(dryrun=False)
slave_pool = self.MakePool(is_master=False)
self.mox.StubOutWithMock(gerrit.GerritHelper, 'RemoveCommitReady')
for failure in notified_patches:
master_pool._SendNotification(
failure.patch,
mox.StrContains('failed to apply your change'),
failure=mox.IgnoreArg())
# This pylint suppressin shouldn't be necessary, but pylint is invalidly
# thinking that the first arg isn't passed in; we suppress it to suppress
# the pylnt bug.
# pylint: disable=E1120
gerrit.GerritHelper.RemoveCommitReady(failure.patch, dryrun=False)
self.mox.ReplayAll()
master_pool._HandleApplyFailure(notified_patches)
slave_pool._HandleApplyFailure(unnotified_patches)
self.mox.VerifyAll()
def testSubmitPoolFailures(self):
pool = self.MakePool(dryrun=False)
patch1, patch2, patch3 = patches = self.GetPatches(3)
failed = self.GetPatches(3)
pool.changes = patches[:]
# While we don't do anything w/ these patches, that's
# intentional; we're verifying that it isn't submitted
# if there is a failure.
pool.changes_that_failed_to_apply_earlier = failed[:]
self.mox.StubOutWithMock(pool, '_SubmitChange')
self.mox.StubOutWithMock(pool, '_HandleCouldNotSubmit')
self.mox.StubOutWithMock(gerrit.GerritHelper, 'IsChangeCommitted')
pool._SubmitChange(patch1).AndReturn(None)
gerrit.GerritHelper.IsChangeCommitted(
str(patch1.gerrit_number), False).AndReturn(True)
pool._SubmitChange(patch2).AndReturn(None)
gerrit.GerritHelper.IsChangeCommitted(
str(patch2.gerrit_number), False).InAnyOrder().AndReturn(False)
pool._HandleCouldNotSubmit(patch2).InAnyOrder()
pool._SubmitChange(patch3).AndRaise(
cros_build_lib.RunCommandError('blah', None))
pool._HandleCouldNotSubmit(patch3).InAnyOrder().AndReturn(None)
cros_build_lib.TreeOpen(
validation_pool.ValidationPool.STATUS_URL,
validation_pool.ValidationPool.SLEEP_TIMEOUT).AndReturn(True)
self.mox.ReplayAll()
self.assertRaises(validation_pool.FailedToSubmitAllChangesException,
pool.SubmitPool)
self.mox.VerifyAll()
def testSubmitPool(self):
pool = self.MakePool(dryrun=False)
passed = self.GetPatches(3)
failed = self.GetPatches(3)
pool.changes = passed
pool.changes_that_failed_to_apply_earlier = failed[:]
self.mox.StubOutWithMock(pool, '_SubmitChange')
self.mox.StubOutWithMock(pool, '_HandleCouldNotSubmit')
self.mox.StubOutWithMock(pool, '_HandleApplyFailure')
self.mox.StubOutWithMock(gerrit.GerritHelper, 'IsChangeCommitted')
for patch in passed:
pool._SubmitChange(patch).AndReturn(None)
gerrit.GerritHelper.IsChangeCommitted(
str(patch.gerrit_number), False).AndReturn(True)
pool._HandleApplyFailure(failed)
cros_build_lib.TreeOpen(
validation_pool.ValidationPool.STATUS_URL,
validation_pool.ValidationPool.SLEEP_TIMEOUT).AndReturn(True)
self.mox.ReplayAll()
pool.SubmitPool()
self.mox.VerifyAll()
def testSubmitNonManifestChanges(self):
"""Simple test to make sure we can submit non-manifest changes."""
pool = self.MakePool(dryrun=False)
patch1, patch2 = passed = self.GetPatches(2)
pool.non_manifest_changes = passed[:]
self.mox.StubOutWithMock(pool, '_SubmitChange')
self.mox.StubOutWithMock(pool, '_HandleCouldNotSubmit')
self.mox.StubOutWithMock(gerrit.GerritHelper, 'IsChangeCommitted')
pool._SubmitChange(patch1).AndReturn(None)
gerrit.GerritHelper.IsChangeCommitted(
str(patch1.gerrit_number), False).AndReturn(True)
pool._SubmitChange(patch2).AndReturn(None)
gerrit.GerritHelper.IsChangeCommitted(
str(patch2.gerrit_number), False).AndReturn(True)
cros_build_lib.TreeOpen(
validation_pool.ValidationPool.STATUS_URL,
validation_pool.ValidationPool.SLEEP_TIMEOUT).AndReturn(True)
self.mox.ReplayAll()
pool.SubmitNonManifestChanges()
self.mox.VerifyAll()
def testGerritSubmit(self):
"""Tests submission review string looks correct."""
pool = self.MakePool(dryrun=False)
patch = self.GetPatches(1)
# Force int conversion of gerrit_number to ensure the test is sane.
cmd = ('ssh -p 29418 gerrit.chromium.org gerrit review '
'--submit %i,%i' % (int(patch.gerrit_number), patch.patch_number))
validation_pool._RunCommand(cmd.split(), False).AndReturn(None)
self.mox.ReplayAll()
pool._SubmitChange(patch)
self.mox.VerifyAll()
def testUnhandledExceptions(self):
"""Test that CQ doesn't loop due to unhandled Exceptions."""
pool = self.MakePool(dryrun=False)
patches = self.GetPatches(2)
pool.changes = patches[:]
class MyException(Exception):
pass
self.mox.StubOutWithMock(pool._patch_series, 'Apply')
# pylint: disable=E1123
pool._patch_series.Apply(
patches, dryrun=False, manifest=mox.IgnoreArg()).AndRaise(
MyException)
def _ValidateExceptioN(changes):
for patch in changes:
self.assertTrue(isinstance(patch, validation_pool.InternalCQError),
msg="Expected %s to be type InternalCQError, got %r" %
(patch, type(patch)))
self.assertEqual(set(patches),
set(x.patch for x in changes))
self.mox.ReplayAll()
self.assertRaises(MyException, pool.ApplyPoolIntoRepo)
self.mox.VerifyAll()
def testFilterDependencyErrors(self):
"""Verify that dependency errors are correctly filtered out."""
failures = [cros_patch.ApplyPatchException(x) for x in self.GetPatches(2)]
failures += [cros_patch.DependencyError(x, y) for x, y in
zip(self.GetPatches(2), failures)]
failures[0].patch.approval_timestamp = time.time()
failures[-1].patch.approval_timestamp = time.time()
self.mox.ReplayAll()
result = validation_pool.ValidationPool._FilterDependencyErrors(failures)
self.assertEquals(set(failures[:-1]), set(result))
self.mox.VerifyAll()
def testFilterNonCrosProjects(self):
"""Runs through a filter of own manifest and fake changes.
This test should filter out the tacos/chromite project as its not real.
"""
base_func = itertools.cycle(['chromiumos', 'chromeos']).next
patches = self.GetPatches(8)
for patch in patches:
patch.project = '%s/%i' % (base_func(), _GetNumber())
patch.tracking_branch = str(_GetNumber())
non_cros_patches = self.GetPatches(2)
for patch in non_cros_patches:
patch.project = str(_GetNumber())
filtered_patches = patches[:4]
allowed_patches = []
projects = {}
for idx, patch in enumerate(patches[4:]):
fails = bool(idx % 2)
# Vary the revision so we can validate that it checks the branch.
revision = ('monkeys' if fails
else 'refs/heads/%s' % patch.tracking_branch)
if fails:
filtered_patches.append(patch)
else:
allowed_patches.append(patch)
projects.setdefault(patch.project, {})['revision'] = revision
manifest = MockManifest(self.build_root, projects=projects)
self.mox.ReplayAll()
results = validation_pool.ValidationPool._FilterNonCrosProjects(
patches + non_cros_patches, manifest)
def compare(list1, list2):
mangle = lambda c:(c.id, c.project, c.tracking_branch)
self.assertEqual(list1, list2,
msg="Comparison failed:\n list1: %r\n list2: %r"
% (map(mangle, list1), map(mangle, list2)))
compare(results[0], allowed_patches)
compare(results[1], filtered_patches)
class TestPickling(cros_test_lib.TempDirTestCase):
"""Tests to validate pickling of ValidationPool, covering CQ's needs"""
def testSelfCompatibility(self):
"""Verify compatibility of current git HEAD against itself."""
self._CheckTestData(self._GetTestData())
def testToTCompatibility(self):
"""Validate that ToT can use our pickles, and that we can use ToT's data."""
repo = os.path.join(self.tempdir, 'chromite')
reference = os.path.abspath(__file__)
reference = os.path.normpath(os.path.join(reference, '../../'))
repository.CloneGitRepo(repo,
'%s/chromiumos/chromite' % constants.GIT_HTTP_URL,
reference=reference)
code = """
import sys
from chromite.buildbot import validation_pool_unittest
if not hasattr(validation_pool_unittest, 'TestPickling'):
sys.exit(0)
sys.stdout.write(validation_pool_unittest.TestPickling.%s)
"""
# Verify ToT can take our pickle.
cros_build_lib.RunCommandCaptureOutput(
['python', '-c', code % '_CheckTestData(sys.stdin.read())'],
cwd=self.tempdir, print_cmd=False,
input=self._GetTestData())
# Verify we can handle ToT's pickle.
ret = cros_build_lib.RunCommandCaptureOutput(
['python', '-c', code % '_GetTestData()'],
cwd=self.tempdir, print_cmd=False)
self._CheckTestData(ret.output)
@staticmethod
def _GetCrosInternalPatch(patch_info):
return cros_patch.GerritPatch(
patch_info,
constants.INTERNAL_REMOTE,
constants.GERRIT_INT_SSH_URL)
@staticmethod
def _GetCrosPatch(patch_info):
return cros_patch.GerritPatch(
patch_info,
constants.EXTERNAL_REMOTE,
constants.GERRIT_SSH_URL)
@classmethod
def _GetTestData(cls):
ids = [cros_patch.MakeChangeId() for _ in xrange(3)]
changes = [cls._GetCrosInternalPatch(GetTestJson(ids[0]))]
non_os = [cls._GetCrosPatch(GetTestJson(ids[1]))]
conflicting = [cls._GetCrosInternalPatch(GetTestJson(ids[2]))]
conflicting = [cros_patch.PatchException(x)for x in conflicting]
pool = validation_pool.ValidationPool(
constants.PUBLIC_OVERLAYS,
'/fake/pathway', 1,
'testing', True, True,
changes=changes, non_os_changes=non_os,
conflicting_changes=conflicting)
return pickle.dumps([pool, changes, non_os, conflicting])
@staticmethod
def _CheckTestData(data):
results = pickle.loads(data)
pool, changes, non_os, conflicting = results
def _f(source, value, getter=lambda x:x):
assert len(source) == len(value)
for s_item, v_item in zip(source, value):
assert getter(s_item).id == getter(v_item).id
assert getter(s_item).remote == getter(v_item).remote
_f(pool.changes, changes)
_f(pool.non_manifest_changes, non_os)
_f(pool.changes_that_failed_to_apply_earlier, conflicting,
getter=lambda s:getattr(s, 'patch', s))
return ''
class TestFindSuspects(base):
"""Tests validation_pool.ValidationPool._FindSuspects"""
def setUp(self):
overlay = 'chromiumos/overlays/chromiumos-overlay'
self.overlay_patch = self.GetPatches(project=overlay)
self.power_manager = 'chromiumos/platform/power_manager'
self.power_manager_pkg = 'chromeos-base/power_manager'
self.power_manager_patch = self.GetPatches(project=self.power_manager)
self.kernel = 'chromiumos/third_party/kernel'
self.kernel_pkg = 'sys-kernel/chromeos-kernel'
self.kernel_patch = self.GetPatches(project=self.kernel)
self.secret = 'chromeos/secret'
self.secret_patch = self.GetPatches(project=self.secret,
remote=constants.INTERNAL_REMOTE)
@staticmethod
def _GetBuildFailure(pkg):
"""Create a PackageBuildFailure for the specified |pkg|.
Args:
pkg: Package that failed to build.
"""
ex = cros_build_lib.RunCommandError('foo', cros_build_lib.CommandResult())
return results_lib.PackageBuildFailure(ex, | |
+= area.sum()*0
continue
bbox_gt = gt_bboxes[i]
cls_score = flatten_cls_scores1[i, pos_inds, labels[pos_inds] - 1].sigmoid().detach()
cls_score = cls_score[area>1.0]
pos_inds = pos_inds[area > 1.0]
ious = bbox_overlaps(bbox_gt[idx_gt]/2, bbox_dt, is_aligned=True)
with torch.no_grad():
weighting = cls_score * ious
weighting = weighting/(torch.sum(weighting)+0.0001)*len(weighting)
gt_mask = F.interpolate(gt_masks[i].unsqueeze(0), scale_factor=0.5, mode='bilinear', align_corners=False).squeeze(0)
shape = np.minimum(feat_masks[i].shape, gt_mask.shape)
gt_mask_new = gt_mask.new_zeros(gt_mask.shape[0], mask_h, mask_w)
gt_mask_new[:gt_mask.shape[0], :shape[1], :shape[2]] = gt_mask[:gt_mask.shape[0], :shape[1], :shape[2]]
gt_mask_new = gt_mask_new.gt(0.5).float()
gt_mask_new = torch.index_select(gt_mask_new,0,idx_gt).permute(1, 2, 0).contiguous()
#######spp###########################
img_mask1 = img_mask.permute(1,2,0)
pos_masks00 = torch.sigmoid(img_mask1 @ cof_pred[:, 0:32].t())
pos_masks01 = torch.sigmoid(img_mask1 @ cof_pred[:, 32:64].t())
pos_masks10 = torch.sigmoid(img_mask1 @ cof_pred[:, 64:96].t())
pos_masks11 = torch.sigmoid(img_mask1 @ cof_pred[:, 96:128].t())
pred_masks = torch.stack([pos_masks00, pos_masks01, pos_masks10, pos_masks11], dim=0)
pred_masks = self.crop_cuda(pred_masks, bbox_dt)
gt_mask_crop = self.crop_gt_cuda(gt_mask_new, bbox_dt)
# pred_masks, gt_mask_crop = crop_split(pos_masks00, pos_masks01, pos_masks10, pos_masks11, bbox_dt,
# gt_mask_new)
pre_loss = F.binary_cross_entropy(pred_masks, gt_mask_crop, reduction='none')
pos_get_csize = center_size(bbox_dt)
gt_box_width = pos_get_csize[:, 2]
gt_box_height = pos_get_csize[:, 3]
pre_loss = pre_loss.sum(dim=(0, 1)) / gt_box_width / gt_box_height / pos_get_csize.shape[0]
loss_mask += torch.sum(pre_loss*weighting.detach())
if self.rescoring_flag:
pos_labels = labels[pos_inds] - 1
input_iou = pred_masks.detach().unsqueeze(0).permute(3, 0, 1, 2)
pred_iou = self.convs_scoring(input_iou)
pred_iou = self.relu(self.mask_scoring(pred_iou))
pred_iou = F.max_pool2d(pred_iou, kernel_size=pred_iou.size()[2:]).squeeze(-1).squeeze(-1)
pred_iou = pred_iou[range(pred_iou.size(0)), pos_labels]
with torch.no_grad():
mask_pred = (pred_masks > 0.4).float()
mask_pred_areas = mask_pred.sum((0, 1))
overlap_areas = (mask_pred * gt_mask_new).sum((0, 1))
gt_full_areas = gt_mask_new.sum((0, 1))
iou_targets = overlap_areas / (mask_pred_areas + gt_full_areas - overlap_areas + 0.1)
iou_weights = ((iou_targets > 0.1) & (iou_targets <= 1.0) & (gt_full_areas >= 10 * 10)).float()
loss_iou += self.loss_iou(pred_iou.view(-1, 1), iou_targets.view(-1, 1), iou_weights.view(-1, 1))
num_iou += torch.sum(iou_weights.detach())
loss_mask = loss_mask/num_imgs
if self.rescoring_flag:
loss_iou = loss_iou * 10 / num_iou.detach()
return dict(
loss_cls=loss_cls,
loss_bbox=loss_bbox,
loss_centerness=loss_centerness,
loss_mask=loss_mask,
loss_iou=loss_iou)
else:
return dict(
loss_cls=loss_cls,
loss_bbox=loss_bbox,
loss_centerness=loss_centerness,
loss_mask=loss_mask)
@force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))
def get_bboxes(self,
cls_scores,
bbox_preds,
centernesses,
cof_preds,
feat_masks,
img_metas,
cfg,
rescale=None):
assert len(cls_scores) == len(bbox_preds)
num_levels = len(cls_scores)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
mlvl_points, mlvl_strides = self.get_points(featmap_sizes, bbox_preds[0].dtype,
bbox_preds[0].device)
result_list = []
for img_id in range(len(img_metas)):
cls_score_list = [
cls_scores[i][img_id].detach() for i in range(num_levels)
]
bbox_pred_list = [
bbox_preds[i][img_id].detach() for i in range(num_levels)
]
centerness_pred_list = [
centernesses[i][img_id].detach() for i in range(num_levels)
]
cof_pred_list = [
cof_preds[i][img_id].detach() for i in range(num_levels)
]
feat_mask_list = feat_masks[img_id]
img_shape = img_metas[img_id]['img_shape']
ori_shape = img_metas[img_id]['ori_shape']
scale_factor = img_metas[img_id]['scale_factor']
det_bboxes = self.get_bboxes_single(cls_score_list, bbox_pred_list,
centerness_pred_list, cof_pred_list, feat_mask_list,
mlvl_points, img_shape, ori_shape,
scale_factor, cfg, rescale)
result_list.append(det_bboxes)
return result_list
def get_bboxes_single(self,
cls_scores,
bbox_preds,
centernesses,
cof_preds,
feat_mask,
mlvl_points,
img_shape,
ori_shape,
scale_factor,
cfg,
rescale=False):
assert len(cls_scores) == len(bbox_preds) == len(mlvl_points)
mlvl_bboxes = []
mlvl_scores = []
mlvl_centerness = []
mlvl_cofs = []
for cls_score, bbox_pred, cof_pred, centerness, points in zip(
cls_scores, bbox_preds, cof_preds, centernesses, mlvl_points):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
scores = cls_score.permute(1, 2, 0).reshape(
-1, self.cls_out_channels).sigmoid()
centerness = centerness.permute(1, 2, 0).reshape(-1).sigmoid()
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
cof_pred = cof_pred.permute(1,2,0).reshape(-1,32*4)
nms_pre = cfg.get('nms_pre', -1)
if nms_pre > 0 and scores.shape[0] > nms_pre:
max_scores, _ = (scores * centerness[:, None]).max(dim=1)
_, topk_inds = max_scores.topk(nms_pre)
points = points[topk_inds, :]
bbox_pred = bbox_pred[topk_inds, :]
cof_pred = cof_pred[topk_inds, :]
scores = scores[topk_inds, :]
centerness = centerness[topk_inds]
bboxes = distance2bbox(points, bbox_pred, max_shape=img_shape)
mlvl_cofs.append(cof_pred)
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
mlvl_centerness.append(centerness)
mlvl_bboxes = torch.cat(mlvl_bboxes)
mlvl_cofs = torch.cat(mlvl_cofs)
if rescale:
mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
mlvl_scores = torch.cat(mlvl_scores)
padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
mlvl_scores = torch.cat([padding, mlvl_scores], dim=1)
mlvl_centerness = torch.cat(mlvl_centerness)
if self.ssd_flag is False:
det_bboxes, det_labels, idxs_keep = multiclass_nms_idx(
mlvl_bboxes,
mlvl_scores,
cfg.score_thr,
cfg.nms,
cfg.max_per_img,
score_factors=mlvl_centerness)
else:
mlvl_scores = mlvl_scores*mlvl_centerness.view(-1,1)
det_bboxes, det_labels, det_cofs = self.fast_nms(mlvl_bboxes, mlvl_scores[:, 1:].transpose(1, 0).contiguous(),
mlvl_cofs, iou_threshold=cfg.nms.iou_thr, score_thr=cfg.score_thr)
cls_segms = [[] for _ in range(self.num_classes - 1)]
mask_scores = [[] for _ in range(self.num_classes - 1)]
if det_bboxes.shape[0]>0:
scale = 2
if self.ssd_flag is False:
det_cofs = mlvl_cofs[idxs_keep]
#####spp########################
img_mask1 = feat_mask.permute(1,2,0)
pos_masks00 = torch.sigmoid(img_mask1 @ det_cofs[:, 0:32].t())
pos_masks01 = torch.sigmoid(img_mask1 @ det_cofs[:, 32:64].t())
pos_masks10 = torch.sigmoid(img_mask1 @ det_cofs[:, 64:96].t())
pos_masks11 = torch.sigmoid(img_mask1 @ det_cofs[:, 96:128].t())
pos_masks = torch.stack([pos_masks00,pos_masks01,pos_masks10,pos_masks11],dim=0)
pos_masks = self.crop_cuda(pos_masks, det_bboxes[:,:4] * det_bboxes.new_tensor(scale_factor) / scale)
# pos_masks = crop_split(pos_masks00, pos_masks01, pos_masks10, pos_masks11,
# det_bboxes * det_bboxes.new_tensor(scale_factor) / scale)
pos_masks = pos_masks.permute(2, 0, 1)
# masks = F.interpolate(pos_masks.unsqueeze(0), scale_factor=scale/scale_factor, mode='bilinear', align_corners=False).squeeze(0)
if self.ssd_flag:
masks = F.interpolate(pos_masks.unsqueeze(0), scale_factor=scale / scale_factor[3:1:-1], mode='bilinear', align_corners=False).squeeze(0)
else:
masks = F.interpolate(pos_masks.unsqueeze(0), scale_factor=scale / scale_factor, mode='bilinear', align_corners=False).squeeze(0)
masks.gt_(0.4)
if self.rescoring_flag:
pred_iou = pos_masks.unsqueeze(1)
pred_iou = self.convs_scoring(pred_iou)
pred_iou = self.relu(self.mask_scoring(pred_iou))
pred_iou = F.max_pool2d(pred_iou, kernel_size=pred_iou.size()[2:]).squeeze(-1).squeeze(-1)
pred_iou = pred_iou[range(pred_iou.size(0)), det_labels].squeeze()
mask_scores = pred_iou*det_bboxes[:, -1]
mask_scores = mask_scores.cpu().numpy()
mask_scores = [mask_scores[det_labels.cpu().numpy() == i] for i in range(self.num_classes - 1)]
for i in range(det_bboxes.shape[0]):
label = det_labels[i]
mask = masks[i].cpu().numpy()
im_mask = np.zeros((ori_shape[0], ori_shape[1]), dtype=np.uint8)
shape = np.minimum(mask.shape, ori_shape[0:2])
im_mask[:shape[0],:shape[1]] = mask[:shape[0],:shape[1]]
rle = mask_util.encode(
np.array(im_mask[:, :, np.newaxis], order='F'))[0]
cls_segms[label].append(rle)
if self.rescoring_flag:
return det_bboxes, det_labels, (cls_segms, mask_scores)
else:
return det_bboxes, det_labels, cls_segms
def get_points(self, featmap_sizes, dtype, device):
"""Get points according to feature map sizes.
Args:
featmap_sizes (list[tuple]): Multi-level feature map sizes.
dtype (torch.dtype): Type of points.
device (torch.device): Device of points.
Returns:
tuple: points of each image.
"""
mlvl_points = []
mlvl_strides = []
for i in range(len(featmap_sizes)):
points, strides = self.get_points_single(featmap_sizes[i], self.strides[i],
dtype, device)
mlvl_points.append(points)
mlvl_strides.append(strides)
return mlvl_points, mlvl_strides
def get_points_single(self, featmap_size, stride, dtype, device):
h, w = featmap_size
x_range = torch.arange(
0, w * stride, stride, dtype=dtype, device=device)
y_range = torch.arange(
0, h * stride, stride, dtype=dtype, device=device)
y, x = torch.meshgrid(y_range, x_range)
points = torch.stack(
(x.reshape(-1), y.reshape(-1)), dim=-1) + stride // 2
strides = points[:,0]*0+stride
return points, strides
def center_target(self, gt_bboxes_raw, gt_masks_raw, featmap_size):
stride = 8
h, w = featmap_size
x_range = torch.arange(0, w, 1, dtype=gt_bboxes_raw[0].dtype, device=gt_bboxes_raw[0].device)
y_range = torch.arange(0, h, 1, dtype=gt_bboxes_raw[0].dtype, device=gt_bboxes_raw[0].device)
y, x = torch.meshgrid(y_range, x_range)
center_targets = []
labels = []
for n in range(len(gt_bboxes_raw)):
center_target = gt_bboxes_raw[n].new(featmap_size[0], featmap_size[1],4) + 0
label = gt_bboxes_raw[n].new_zeros(featmap_size)
gt_bboxes = gt_bboxes_raw[n]/stride
gt_masks = gt_masks_raw[n]
mask_size = gt_masks.shape
pos_left = torch.floor(gt_bboxes[:, 0]).long().clamp(0, gt_masks.shape[2]//stride - 1)
pos_right = torch.ceil(gt_bboxes[:, 2]).long().clamp(0, gt_masks.shape[2]//stride - 1)
pos_top = torch.floor(gt_bboxes[:, 1]).long().clamp(0, gt_masks.shape[1]//stride - 1)
pos_down = torch.ceil(gt_bboxes[:, 3]).long().clamp(0, gt_masks.shape[1]//stride - 1)
for px1, py1, px2, py2, gt_mask, (x1, y1, x2, y2) in \
zip(pos_left, pos_top, pos_right, pos_down, gt_masks, gt_bboxes):
gt_mask = mmcv.imrescale(gt_mask, scale=1. / stride)
gt_mask = torch.Tensor(gt_mask)
label[py1:py2 + 1, px1:px2 + 1] = gt_mask[py1:py2 + 1, px1:px2 + 1]
center_target[py1:py2 + 1, px1:px2 + 1, 0] = x1 / w
center_target[py1:py2 + 1, px1:px2 + 1, 1] = y1 / h
center_target[py1:py2 + 1, px1:px2 + 1, 2] = x2 / w
center_target[py1:py2 + 1, px1:px2 + 1, 3] = y2 / h
center_targets.append(center_target.reshape(-1, 4))
labels.append(label.reshape(-1, 1))
labels = torch.cat(labels)
center_targets = torch.cat(center_targets)
return labels, center_targets
def fcos_target(self, points, gt_bboxes_list, gt_labels_list):
assert len(points) == len(self.regress_ranges)
num_levels = len(points)
# expand regress ranges to align with points
expanded_regress_ranges = [
points[i].new_tensor(self.regress_ranges[i])[None].expand_as(
points[i]) for i in range(num_levels)
]
# concat all levels points and regress ranges
concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0)
concat_points = torch.cat(points, dim=0)
# the number of points per img, per lvl
num_points = [center.size(0) for center in points]
# get labels and bbox_targets of each image
labels_list, bbox_targets_list, gt_inds = multi_apply(
self.fcos_target_single,
gt_bboxes_list,
gt_labels_list,
points=concat_points,
regress_ranges=concat_regress_ranges,
num_points_per_lvl=num_points)
# split to per img, per level
labels_list = [labels.split(num_points, 0) for labels in labels_list]
bbox_targets_list = [
bbox_targets.split(num_points, 0)
for bbox_targets in bbox_targets_list
]
# concat per level image
concat_lvl_labels = []
concat_lvl_bbox_targets = []
for i in range(num_levels):
concat_lvl_labels.append(
torch.cat([labels[i] for labels in labels_list]))
concat_lvl_bbox_targets.append(
torch.cat(
[bbox_targets[i] for bbox_targets in bbox_targets_list]))
return concat_lvl_labels, concat_lvl_bbox_targets, labels_list, bbox_targets_list, gt_inds
def fcos_target_single(self, gt_bboxes, gt_labels, points, regress_ranges,
num_points_per_lvl):
num_points = points.size(0)
num_gts = gt_labels.size(0)
if num_gts == 0:
return gt_labels.new_zeros(num_points), \
gt_bboxes.new_zeros((num_points, 4))
areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0] + 1) * (
gt_bboxes[:, 3] - gt_bboxes[:, 1] + 1)
# TODO: figure out why these two are different
# areas = areas[None].expand(num_points, num_gts)
areas = areas[None].repeat(num_points, 1)
regress_ranges = regress_ranges[:, None, :].expand(
num_points, num_gts, 2)
gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4)
xs, ys = points[:, 0], points[:, 1]
xs = xs[:, None].expand(num_points, num_gts)
ys = ys[:, None].expand(num_points, num_gts)
left = xs - gt_bboxes[..., 0]
right = gt_bboxes[..., 2] - xs
top = ys - gt_bboxes[..., 1]
bottom = gt_bboxes[..., 3] - ys
bbox_targets = | |
<reponame>mir-group/CiderPress
from pyscf import scf, dft, gto, ao2mo, df, lib, cc
from pyscf.dft.numint import eval_ao, eval_rho
from pyscf.dft.gen_grid import Grids
from pyscf.pbc.tools.pyscf_ase import atoms_from_ase
import numpy as np
import logging
CALC_TYPES = {
'RHF' : scf.hf.RHF,
'UHF' : scf.uhf.UHF,
'RKS' : dft.rks.RKS,
'UKS' : dft.uks.UKS,
'CCSD' : cc.ccsd.CCSD,
'UCCSD' : cc.uccsd.UCCSD
}
SCF_TYPES = {
'RHF' : scf.hf.RHF,
'ROHF' : scf.rohf.ROHF,
'UHF' : scf.uhf.UHF,
'RKS' : dft.RKS,
'UKS' : dft.UKS
}
########################################################
# BASIC HELPER ROUTINES FOR RUNNING PYSCF CALCULATIONS #
########################################################
def mol_from_ase(atoms, basis, spin=0, charge=0):
"""
Get a pyscf gto.Mole object from an ase Atoms object (atoms).
Assign it the atomic basis set (basis).
Return the Mole object.
"""
mol = gto.Mole()
mol.atom = atoms_from_ase(atoms)
mol.basis = basis
mol.spin = spin
mol.charge = charge
mol.build()
return mol
def setup_rks_calc(mol, xc, grid_level=3, vv10=False, **kwargs):
"""
Set up a PySCF RKS calculation with sensible defaults.
"""
rks = dft.RKS(mol)
rks.xc = xc
rks.grids.level = grid_level
rks.grids.build()
logging.warning('xc: {}, grid level: {}'.format(xc, grid_level))
if vv10:
logging.warning('Using VV10 in UKS setup')
rks.nlc = 'VV10'
if np.array([gto.charge(mol.atom_symbol(i)) <= 18 for i in range(mol.natm)]).all():
rks.nlcgrids.prune = dft.gen_grid.sg1_prune
else:
rks.nlcgrids.prune = None
rks.nlcgrids.level = 1
return rks
def setup_uks_calc(mol, xc, grid_level=3, vv10=False, **kwargs):
"""
Set up a PySCF UKS calculation with sensible defaults.
"""
uks = dft.UKS(mol)
uks.xc = xc
uks.grids.level = grid_level
uks.grids.build()
logging.warning('xc: {}, grid level: {}'.format(xc, grid_level))
if vv10:
logging.warning('Using VV10 in UKS setup')
uks.nlc = 'VV10'
if np.array([gto.charge(mol.atom_symbol(i)) <= 18 for i in range(mol.natm)]).all():
uks.nlcgrids.prune = dft.gen_grid.sg1_prune
else:
uks.nlcgrids.prune = None
uks.nlcgrids.level = 1
return uks
def run_scf(mol, calc_type, functional=None, remove_ld=False, dm0=None):
"""
Run an SCF calculation on a gto.Mole object (Mole)
of a given calc_type in SCF_TYPES. Return the calc object.
Note, if RKS or UKS is the calc_type, default functional is used.
"""
if not calc_type in SCF_TYPES:
raise ValueError('Calculation type must be in {}'.format(list(SCF_TYPES.keys())))
calc = SCF_TYPES[calc_type](mol)
if remove_ld:
logging.info("Removing linear dependence from overlap matrix")
calc = scf.addons.remove_linear_dep_(calc)
if 'KS' in calc_type and functional is not None:
calc.xc = functional
if 'MN' in functional:
logging.info('MN grid level 4')
calc.grids.level = 4
if functional == 'wB97M_V':
logging.info('Using Specialized wB97M-V params')
calc.nlc = 'VV10'
calc.grids.prune = None
calc.grids.level = 4
if np.array([gto.charge(mol.atom_symbol(i)) <= 18 for i in range(mol.natm)]).all():
calc.nlcgrids.prune = dft.gen_grid.sg1_prune
else:
calc.nlcgrids.prune = None
calc.nlcgrids.level = 1
calc.kernel(dm0 = dm0)
return calc
def run_cc(hf):
"""
Run and return a restricted CCSD calculation on mol,
with HF molecular orbital coefficients in the RHF object hf.
"""
if type(hf) == SCF_TYPES['RHF']:
calc_cls = cc.CCSD
elif type(hf) == SCF_TYPES['UHF']:
calc_cls = cc.UCCSD
else:
raise NotImplementedError('HF type {} not supported'.format(type(hf)) +\
'\nSupported Types: {}'.format(SCF_TYPES['RHF'], SCF_TYPES['UHF']))
calc = calc_cls(hf)
calc.kernel()
return calc
#############################################
# HELPER FUNCTIONS FOR THE analyzers MODULE #
#############################################
def get_grid(mol, level=3):
"""
Get the real-space grid of a molecule for numerical integration.
"""
grid = Grids(mol)
grid.level = level
grid.kernel()
return grid
def get_ha_total(rdm1, eeint):
return np.sum(np.sum(eeint * rdm1, axis=(2,3)) * rdm1)
def get_hf_coul_ex_total(mol, hf):
rdm1 = hf.make_rdm1()
jmat, kmat = hf.get_jk(mol, rdm1)
return np.sum(jmat * rdm1) / 2, -np.sum(kmat * rdm1) / 4
def get_hf_coul_ex_total2(rdm1, jmat, kmat):
if len(rdm1.shape) == 2:
return np.sum(jmat * rdm1) / 2, -np.sum(kmat * rdm1) / 4
else:
return np.sum(jmat * np.sum(rdm1, axis=0)) / 2, -np.sum(kmat * rdm1) / 2
def get_hf_coul_ex_total_unrestricted(mol, hf):
rdm1 = hf.make_rdm1()
jmat, kmat = hf.get_jk(mol, rdm1)
return np.sum(jmat * np.sum(rdm1, axis=0)) / 2, -np.sum(kmat * rdm1) / 2
def transform_basis_1e(mat, coeff):
"""
Transforms the 1-electron matrix mat into the basis
described by coeff (with the basis vectors being the columns).
To transform AO operator to MO operator, pass mo_coeff.
To transform MO operator to AO operator, pass inv(mo_coeff).
To transform AO density matrix to MO density matrix, pass inv(transpose(mo_coeff)).
To transform MO density matrix to AO density matrix, pass transpose(mo_coeff).
"""
if len(coeff.shape) == 2:
return np.matmul(coeff.transpose(), np.matmul(mat, coeff))
else:
if len(coeff) != 2 or len(mat) != 2:
raise ValueError('Need two sets of orbitals, two mats for unrestricted case.')
part0 = np.matmul(coeff[0].transpose(), np.matmul(mat[0], coeff[0]))
part1 = np.matmul(coeff[1].transpose(), np.matmul(mat[1], coeff[1]))
return np.array([part0, part1])
def make_rdm2_from_rdm1(rdm1):
"""
For an RHF calculation, return the 2-RDM from
a given 1-RDM. Given D2(ijkl)=<psi| i+ k+ l j |psi>,
and D(ij)=<psi| i+ j |psi>, then
D2(ijkl) = D(ij) * D(kl) - 0.5 * D(lj) * D(ki)
"""
rdm1copy = rdm1.copy()
part1 = np.einsum('ij,kl->ijkl', rdm1, rdm1copy)
part2 = np.einsum('lj,ki->ijkl', rdm1, rdm1copy)
return part1 - 0.5 * part2
def make_rdm2_from_rdm1_unrestricted(rdm1):
"""
For a UHF calculation, return the 2-RDM from
a given 1-RDM. Given D2(ijkl)=<psi| i+ k+ l j |psi>,
and D(ij)=<psi| i+ j |psi>, then:
For like spin, D2(ijkl) = D(ij) * D(kl) - D(lj) * D(ki).
For opposite spin, D2(ijkl) = D(ij) * D(kl)
Return D(uu,ijkl), D(ud,ijkl), D(dd,ijkl)
"""
spinparts = []
rdm1copy = rdm1.copy()
for s in [0,1]:
part1 = np.einsum('ij,kl->ijkl', rdm1[s], rdm1copy[s])
part2 = np.einsum('lj,ki->ijkl', rdm1[s], rdm1copy[s])
spinparts.append(part1 - part2)
mixspinpart = np.einsum('ij,kl->ijkl', rdm1[0], rdm1copy[1])
return np.array([spinparts[0], mixspinpart, spinparts[1]])
def get_ao_vals(mol, points):
return eval_ao(mol, points)
def get_mgga_data(mol, grid, rdm1):
"""
Get atomic orbital and density data.
See eval_ao and eval_rho docs for details.
Briefly, returns 0-3 derivatives of the atomic orbitals
in ao_data;
and the density, first derivatives of density,
Laplacian of density, and kinetic energy density
in rho_data.
"""
ao_data = eval_ao(mol, grid.coords, deriv=3)
if len(rdm1.shape) == 2:
rho_data = eval_rho(mol, ao_data, rdm1, xctype='mGGA')
else:
part0 = eval_rho(mol, ao_data, rdm1[0], xctype='mGGA')
part1 = eval_rho(mol, ao_data, rdm1[1], xctype='mGGA')
rho_data = np.array([part0, part1])
return ao_data, rho_data
def get_tau_and_grad_helper(mol, grid, rdm1, ao_data):
"""
Passes the derivatives of the atomic orbitals
to eval_rho to get the kinetic energy density and its
derivatives. Not sure if this works.
"""
# 0 1 2 3 4 5 6 7 8 9
# 0 x y z xx xy xz yy yz zz
aox = ao_data[[1, 4, 5, 6]]
aoy = ao_data[[2, 5, 7, 8]]
aoz = ao_data[[3, 6, 8, 9]]
tau = eval_rho(mol, aox, rdm1, xctype='GGA')
tau += eval_rho(mol, aoy, rdm1, xctype='GGA')
tau += eval_rho(mol, aoz, rdm1, xctype='GGA')
return 0.5 * tau
def get_tau_and_grad(mol, grid, rdm1, ao_data):
if len(rdm1.shape) == 2:
return get_tau_and_grad_helper(mol, grid, rdm1, ao_data)
else:
return np.array([get_tau_and_grad_helper(mol, grid, rdm1[0], ao_data),\
get_tau_and_grad_helper(mol, grid, rdm1[1], ao_data)])
def get_rho_second_deriv_helper(mol, grid, dm, ao):
from pyscf.dft.numint import _contract_rho, _dot_ao_dm
from pyscf.dft.gen_grid import make_mask, BLKSIZE
nao = mol.nao_nr()
N = grid.weights.shape[0]
non0tab = np.ones(((N+BLKSIZE-1)//BLKSIZE, mol.nbas),
dtype=np.uint8)
shls_slice = (0, mol.nbas)
ao_loc = mol.ao_loc_nr()
c0 = _dot_ao_dm(mol, ao[0], dm, non0tab, shls_slice, ao_loc)
c1 = np.zeros((3, N, nao))
# 0 1 2 3 4 5 6 7 8 9
# 0 x y z xx xy xz yy yz zz
# - - - - 0 1 2 3 4 5
# - - - - 11 12 13 22 23 33
ddrho = np.zeros((6, N))
alphas = [0, 0, 0, 1, 1, 2]
betas = [0, 1, 2, 1, 2, 2]
for i in range(3):
c1[i] = _dot_ao_dm(mol, ao[i+1], dm.T, non0tab, shls_slice, ao_loc)
for i in range(6):
term1 = _contract_rho(c0, ao[i + 4])
term2 = _contract_rho(c1[alphas[i]], ao[betas[i]+1])
total = term1 + term2
ddrho[i] = total + total.conj()
return ddrho
def get_rho_second_deriv(mol, grid, rdm1, ao_data):
if len(rdm1.shape) == 2:
return get_rho_second_deriv_helper(mol, grid, rdm1, ao_data)
else:
return np.array([get_rho_second_deriv_helper(mol, grid, rdm1[0], ao_data),\
get_rho_second_deriv_helper(mol, grid, rdm1[1], ao_data)])
def get_vele_mat(mol, points):
"""
Return shape (N, nao, nao)
"""
auxmol = gto.fakemol_for_charges(points)
vele_mat = df.incore.aux_e2(mol, auxmol)
return np.ascontiguousarray(np.transpose(vele_mat, axes=(2,0,1)))
def get_mo_vals(ao_vals, mo_coeff):
"""
Args:
ao_vals shape (N,nao)
mo_coeff shape (nao,nao)
Returns
shape (N,nao)
"""
return np.matmul(ao_vals, mo_coeff)
def get_mo_vele_mat(vele_mat, mo_coeff):
"""
Convert the return value of get_vele_mat to the MO basis.
"""
if len(mo_coeff.shape) == 2:
return np.matmul(mo_coeff.transpose(),
np.matmul(vele_mat, mo_coeff))
else:
tmp = np.einsum('puv,svj->spuj', vele_mat, mo_coeff)
return np.einsum('sui,spuj->spij', mo_coeff, tmp)
def get_vele_mat_chunks(mol, points, num_chunks, orb_vals, mo_coeff=None):
"""
Generate chunks of vele_mat on the fly to reduce memory load.
"""
num_pts = points.shape[0]
for i in range(num_chunks):
start = (i * num_pts) // num_chunks
end = ((i+1) * num_pts) // num_chunks
auxmol = gto.fakemol_for_charges(points[start:end])
orb_vals_chunk = orb_vals[start:end]
vele_mat_chunk = df.incore.aux_e2(mol, auxmol)
vele_mat_chunk = np.ascontiguousarray(np.transpose(
vele_mat_chunk, axes=(2,0,1)))
if mo_coeff is not None:
vele_mat_chunk = get_mo_vele_mat(vele_mat_chunk, mo_coeff)
yield vele_mat_chunk, orb_vals_chunk
def get_vele_mat_generator(mol, points, | |
<filename>baguette/app.py
import inspect
import ssl
import typing
from . import rendering
from .config import Config
from .headers import make_headers
from .httpexceptions import BadRequest
from .middleware import Middleware
from .middlewares import DefaultHeadersMiddleware, ErrorMiddleware
from .request import Request
from .responses import FileResponse, Response, make_response
from .router import Route, Router
from .types import Handler, HeadersType, Receive, Result, Scope, Send
from .view import View
class Baguette:
"""Implements an ASGI application.
This class is the main class for any application written with the baguette
framework.
Keyword Arguments
-----------------
config : :class:`Config`
Config to use for the application.
This replaces the other keyword arguments except ``middlewares``.
Default: see :class:`Config` defaults.
debug : :class:`bool`
Whether to run the application in debug mode.
Default: ``False``.
default_headers : :class:`list` of ``(str, str)`` tuples, \
:class:`dict` or :class:`Headers`
Default headers to include in every response.
Default: No headers.
static_url_path : :class:`str`
URL path for the static file handler.
Default: ``"static"``.
static_directory : :class:`str`
Path to the folder containing static files.
Default: ``"static"``.
templates_directory : :class:`str`
Path to the folder containing the HTML templates.
Default: ``"templates"``.
error_response_type : :class:`str`
Type of response to use in case of error.
One of: ``"plain"``, ``"json"``, ``"html"``.
Default: ``"plain"``.
error_include_description : :class:`bool`
Whether to include the error description in the response
in case of error.
If debug is ``True``, this will also be ``True``.
Default: ``True``.
middlewares : :class:`list` of middleware classes
The middlewares to add to the application.
Default: ``[]``.
Attributes
----------
config : :class:`Config`
The configuration of the app.
router : :class:`~baguette.router.Router`
The URL router of the app.
renderer : :class:`~baguette.rendering.Renderer`
Class that renders the templates.
debug : :class:`bool`
Whether the application is running in debug mode.
default_headers : :class:`Headers`
Default headers included in every response.
static_url_path : :class:`str`
URL path for the static file handler.
static_directory : :class:`str`
Path to the folder containing static files.
templates_directory : :class:`str`
Path to the folder containing the HTML templates.
error_response_type : :class:`str`
Type of response to use in case of error.
One of: ``"plain"``, ``"json"``, ``"html"``
error_include_description : :class:`bool`
Whether the error description is included in the response
in case of error.
If debug is ``True``, this will also be ``True``.
middlewares : :class:`list` of middlewares
The middlewares of the application.
"""
def __init__(
self,
*,
config: Config = None,
debug: bool = False,
default_headers: HeadersType = None,
static_url_path: str = "static",
static_directory: str = "static",
templates_directory: str = "static",
error_response_type: str = "plain",
error_include_description: bool = True,
middlewares: typing.List[typing.Type[Middleware]] = [],
):
self.router = Router()
self.config = config or Config(
debug=debug,
default_headers=default_headers,
static_url_path=static_url_path,
static_directory=static_directory,
templates_directory=templates_directory,
error_response_type=error_response_type,
error_include_description=error_include_description,
)
self.renderer = rendering.init(self.config.templates_directory)
self.add_route(
path=f"{self.config.static_url_path}/<filename:path>",
handler=self.handle_static_file,
name="static",
)
self.build_middlewares(
[ErrorMiddleware, *middlewares, DefaultHeadersMiddleware]
)
def __getattr__(self, name):
return getattr(self.config, name)
# --------------------------------------------------------------------------
# ASGI stuff
async def __call__(self, scope: Scope, receive: Receive, send: Send):
"""Entry point of the ASGI application."""
asgi_handlers = {
"http": self._handle_http,
"lifespan": self._handle_lifespan,
}
asgi_handler = asgi_handlers.get(scope["type"])
if asgi_handler is None:
raise NotImplementedError(
"{0!r} scope is not supported".format(scope["type"])
)
await asgi_handler(scope, receive, send)
async def _handle_http(self, scope: Scope, receive: Receive, send: Send):
"""Handles rquests where ``scope["type"] == "http"``."""
request = Request(self, scope, receive)
response = await self.handle_request(request)
await response._send(send)
async def _handle_lifespan(
self, scope: Scope, receive: Receive, send: Send
):
"""Handles rquests where ``scope["type"] == "lifespan"``."""
while True:
message = await receive()
if message["type"] == "lifespan.startup":
try:
await self.startup()
except Exception as e:
await send(
{
"type": "lifespan.startup.failed",
"message": str(e),
}
)
else:
await send({"type": "lifespan.startup.complete"})
elif message["type"] == "lifespan.shutdown":
try:
await self.shutdown()
except Exception as e:
await send(
{
"type": "lifespan.shutdown.failed",
"message": str(e),
}
)
else:
await send({"type": "lifespan.shutdown.complete"})
return
# --------------------------------------------------------------------------
# Lifespan
async def startup(self):
"""Runs on application startup.
This will be executed when you start the application.
For example, you can connect to a database.
.. versionadded:: 0.1.0
"""
async def shutdown(self):
"""Runs on application shutdown.
This will be executed when you stop the application.
For example, you can disconnect from a database.
.. versionadded:: 0.1.0
"""
# --------------------------------------------------------------------------
# HTTP
async def handle_request(self, request: Request) -> Response:
"""Handles a request and returns a response.
Arguments
---------
request: :class:`Request`
The request to handle.
Returns
-------
:class:`Response`
A response.
"""
return await self.middlewares[0](request)
async def dispatch(self, request: Request) -> Response:
"""Dispatches a request to the correct handler and return its result.
Arguments
---------
request: :class:`Request`
The request to handle.
Returns
-------
:class:`Response`
The handler response.
.. versionchanged:: 0.3.0
The return value isn't the handler return value anymore, but instead
a :class:`Response`.
"""
route: Route = self.router.get(request.path, request.method)
handler: Handler = route.handler
try:
kwargs = route.convert(request.path)
except ValueError:
raise BadRequest(description="Failed to convert URL parameters")
kwargs["request"] = request
if not route.handler_is_class:
kwargs = {
k: v for k, v in kwargs.items() if k in route.handler_kwargs
}
result: Result = await handler(**kwargs)
return make_response(result)
# --------------------------------------------------------------------------
# Routes
def add_route(
self,
path: str,
handler: Handler,
methods: typing.List[str] = None,
name: str = None,
defaults: dict = None,
) -> Route:
"""Adds a route to the application router.
Arguments
---------
handler: Async callable
An asynchronous callable (function or class)
that can handle a request.
path: :class:`str`
The path that the handler will handle.
Can be dynamic, see :ref:`dynamic_routing`.
methods: :class:`list` of :class:`str`
Allowed methods for this path.
Default: ``["GET", "HEAD"]``.
name: :class:`str`
Name of the route.
Default: handler function name.
defaults: Optional :class:`dict`
Default arguments to give to your handler.
Default: ``{}``.
Returns
-------
:class:`~baguette.router.Route`
The created route.
"""
return self.router.add_route(
path=path,
name=name,
handler=handler,
methods=methods or ["GET", "HEAD"],
defaults=defaults or {},
)
def route(
self,
path: str,
methods: typing.List[str] = None,
name: str = None,
defaults: dict = None,
):
"""Decorator to add a handler function to the router with the given
path.
Arguments
---------
path: :class:`str`
The path that the handler will handle.
Can be dynamic, see :ref:`dynamic_routing`.
methods: Optional :class:`list` of :class:`str`
Allowed methods for this path.
Default: ``["GET", "HEAD"]``.
name: Optional :class:`str`
Name of the route.
Default: handler function name.
defaults: Optional :class:`dict`
Default arguments to give to your handler.
Default: ``{}``.
.. versionchanged:: 0.0.3
Renamed from ``Baguette.endpoint`` to :meth:`Baguette.route`
"""
def decorator(func_or_class):
if inspect.isclass(func_or_class) and issubclass(
func_or_class, View
):
handler: Handler = func_or_class(self)
allowed_methods = handler.methods
else:
allowed_methods = methods
handler: Handler = func_or_class
self.add_route(
path=path,
name=name or func_or_class.__name__,
handler=handler,
methods=allowed_methods,
defaults=defaults or {},
)
return func_or_class
return decorator
# --------------------------------------------------------------------------
# Middleware
def build_middlewares(
self, middlewares: typing.List[typing.Type[Middleware]] = []
):
"""Builds the middleware stack from a list of middleware classes.
.. note::
The first middleware in the list will be the first called on a
request.
.. seealso::
There are middlewares included by default.
See :ref:`default_middlewares`.
Arguments
---------
middlewares: :class:`list` of :class:`Middleware`
The middlewares to add.
.. versionadded:: 0.3.0
"""
# first in the list, first called
self._raw_middlewares = middlewares
self.middlewares: typing.List[Middleware] = []
last = self.dispatch
for middleware in reversed(middlewares):
last = middleware(last, self.config)
self.middlewares.insert(0, last)
def add_middleware(
self, middleware: typing.Type[Middleware], index: int = 1
):
"""Adds a middleware to the middleware stack.
Arguments
---------
middleware: :class:`Middleware`
The middleware to add.
index: Optional :class:`int`
The index to add the middlware to.
Default: ``1`` (second middleware, called after
:class:`~baguette.middlewares.ErrorMiddleware`)
.. versionadded:: 0.3.0
"""
middlewares = self._raw_middlewares.copy()
middlewares.insert(index, middleware)
self.build_middlewares(middlewares)
def remove_middleware(self, middleware: typing.Type[Middleware]):
"""Removes a middleware from the middleware stack.
Arguments
---------
middleware: :class:`Middleware`
The middleware to remove.
.. versionadded:: 0.3.0
"""
middlewares = self._raw_middlewares.copy()
middlewares.remove(middleware)
self.build_middlewares(middlewares)
def middleware(
self,
index: int = 1,
):
"""Decorator to add a middleware to the app.
Arguments
---------
index: Optional :class:`int`
The index to add the middlware to.
Default: ``1`` (second middleware, called after
:class:`~baguette.middlewares.ErrorMiddleware`)
.. versionadded:: 0.3.0
"""
def decorator(func_or_class):
if inspect.isclass(func_or_class):
middleware: typing.Type[Middleware] = func_or_class
else:
class middleware(Middleware):
async def __call__(self, request: Request) -> Response:
return await func_or_class(
self.next_middleware, request
)
self.add_middleware(middleware, index)
return middleware
return decorator
# --------------------------------------------------------------------------
# Other methods
async def handle_static_file(self, filename: str) -> FileResponse:
return FileResponse(self.config.static_directory, filename)
def run(
self,
*,
host: str = "127.0.0.1",
port: int = 8000,
uds: | |
software overlay
method simulates the effect of hardware overlay.BLEND, specifying a hint of blend
method. The blend method combines the color of the underlying pixel with the desired
color producing an approximation of the transient graphics.The default value is
(HARDWARE_OVERLAY, XOR, SOFTWARE_OVERLAY, BLEND).The values of this sequence are applied
by Abaqus when you start a session in first to last order. The first successful value
becomes the default highlight method. Not all graphics adapters support the
HARDWARE_OVERLAY value and you must use the *highlightMethodHint* argument to provide an
alternative.You can use a single value to set the first element of the list, or you can
use a tuple with one to four unique values. Abaqus sets any remaining elements of the
tuple to unique values based on the default order.
dragMode
A SymbolicConstant specifying which rendering is used during dynamic rotations of the
view. Possible values are:FAST, specifying a rendering mode where the image is rendered
in wireframe.AS_IS, specifying a rendering mode where the image is rendered as is.The
default value is AS_IS.When set to *dragMode*=FAST, a wireframe outline is drawn during
view changes by rotation, pan, or zoom. When *dragMode*=AS_IS, everything displayed in
the window will be drawn during view changes; however, the display may lag behind the
mouse movement when the model is complex especially if you are using an older or slower
system. For newer systems with graphics hardware acceleration the AS_IS setting can be
accommodated without significant loss of performance.
antiAlias
A Boolean specifying whether lines will be smoothed to reduce the jagged effect of
rasterization. The default value is ON.
autoFitAfterRotate
A Boolean specifying whether the model is automatically resized to fit the viewport
after each view rotation. The default value is OFF.
polygonOffsetConstant
A Float specifying the offset added when drawing the faces of a polygon. The
*polygonOffsetConstant* argument affects the behavior of only the OpenGL driver.
Possible values are 0.0 ≤≤ *polygonOffsetConstant* ≤≤ 100.0. The default value is
platform dependent and is typically between 0.0 and 2.0.
polygonOffsetSlope
A Float specifying the factor that multiplies the slope of each line before the line is
added to the vertexes of a polygon face. The *polygonOffsetSlope* argument affects the
behavior of only the OpenGL driver. Possible values are 0.0 ≤≤ *polygonOffsetSlope* ≤≤
100.0. The default value is platform dependent and is typically between 0.0 and 2.0.
printPolygonOffsetConstant
A Float specifying the offset added when drawing the faces of a polygon.
*printPolygonOffsetConstant* is similar to *polygonOffsetConstant*; however,
*printPolygonOffsetConstant* is used when printing and *polygonOffsetConstant* is used
for display. Some systems, especially Windows, use different OpenGL drivers for printing
and display, and you may have to use different offset values for each driver.
printPolygonOffsetSlope
A Float specifying the factor that multiplies the slope of each line before the line is
added to the vertexes of a polygon face. *printPolygonOffsetSlope* is similar to
*polygonOffsetSlope*; however, *printPolygonOffsetSlope* is used when printing and
*polygonOffsetSlope* is used for display. Some systems, especially Windows, use
different OpenGL drivers for printing and display, and you may have to use different
offset values for each driver.
vertexArrays
A Boolean specifying how the three-dimensional vertices of the model are processed. When
*vertexArrays*=OFF, each vertex of the model is processed separately. When
*vertexArrays*=ON, the vertices are processed in large blocks resulting in faster
display. Not all graphics adapters support this capability correctly. An indicator that
the graphics adapters is not processing three-dimensional vertices correctly is the
absence of graphics during rubber banding operations. For example, when dynamically
dragging the radius of a circle in the Sketcher, the circle should be visible. The
default value is ON.
vertexArraysInDisplayLists
A Boolean specifying whether the *vertexArrays* setting should temporarily be set to OFF
when building a display list. The default value is ON.Some graphics adapters do not
properly support using vertex arrays inside a display list. Setting
*vertexArraysInDisplayLists* to OFF has a smaller impact on graphics performance than
setting *vertexArrays* or *displayLists* to OFF.
viewManipDisplayListThreshold
An Int specifying how large a display list may be created in order to accelerate view
manipulation operations. Increasing this value when viewing large models will increase
the delay before a view manipulation operation begins in order to obtain improved
graphics performance during the view manipulation. If set high with a large model, the
delay can be many seconds. In excessive cases, graphics memory can be exceeded and the
result may be an empty display list (no visible model) for the view manipulation. This
setting is treated as 0 if *displayLists*=OFF. Possible values are 0 ≤≤
*viewManipDisplayListThreshold* ≤≤ 20000. The default value is 40.
directRendering
A Boolean specifying how Abaqus renders X11 graphics operations. When
*directRendering*=OFF, the graphics are rendered through the X Server. When
*directRendering*=ON, the graphics operations are sent directly to the graphics adapter
producing faster displays. For maximum performance, the initial value is ON. This
argument is used only when you first start Abaqus/CAE; you cannot configure
*directRendering* during a session.
hardwareAcceleration
A Boolean specifying whether a hardware accelerated OpenGL graphics driver will be used
on Windows platforms. The default value is ON.When *hardwareAcceleration*=OFF, the
graphics driver uses a software implementation of OpenGL that is included with the
operating system. This results in slower drawing on most systems; however, you may have
to use the software implementation of OpenGL if the hardware graphics driver is
incompatible with Abaqus/CAE.*hardwareAcceleration* is used only when you first start
Abaqus/CAE on a Windows platform; you cannot configure *hardwareAcceleration* during a
session.*hardwareAcceleration* is not used when you start Abaqus/CAE on an X-Windows
platform and display to a Windows platform running Exceed or any other X-Windows server.
hardwareOverlay
A Boolean specifying whether hardware overlay planes will be used if available. The
default value is the same value as the *hardwareOverlayAvailable* member.When
*hardwareOverlayAvailable*=OFF, it will not be possible to set *hardwareOverlay* to ON
and the HARDWARE_OVERLAY highlight method will not be available. If viewports display a
solid color and will not display the model, it will be necessary to inhibit hardware
overlay completely by setting the ABAQUS_EMULATE_OVERLAYS environment variable (to any
value) before starting Abaqus/CAE.*hardwareOverlay* is used only when you first start
Abaqus/CAE; you cannot configure *hardwareOverlay* during a session.
textureMapping
A Boolean specifying whether textures will be used to display contour plots. The default
value is ON.Turning off texture mapping is necessary only if viewports will not
correctly display a contour plot of your model.
printTextureMapping
A Boolean specifying whether textures will be used to display contour plots. The default
value is ON.Turning off texture mapping for printing is necessary only if printed output
does not correctly display a contour plot of your model. *printTextureMapping* is
similar to *textureMapping*; however, *printTextureMapping* is used when printing and
*textureMapping* is used for display. Some systems, especially Windows, use different
OpenGL drivers for printing and display, and you may have to use different settings for
each driver.
backgroundStyle
A SymbolicConstant specifying the background style to be used for all viewport windows.
Possible values are SOLID and GRADIENT. The default value is SOLID.If
*backgroundStyle*=SOLID, the viewport background will appear as a solid color as
specified by *backgroundColor*. If *backgroundStyle*=GRADIENT, the viewport | |
*,
as_dict: bool,
) -> Union[List[DF], Dict[Any, DF]]:
...
def partition_by(
self: DF,
groups: Union[str, List[str]],
maintain_order: bool = True,
*,
as_dict: bool = False,
) -> Union[List[DF], Dict[Any, DF]]:
"""
Split into multiple DataFrames partitioned by groups.
Parameters
----------
groups
Groups to partition by
maintain_order
Keep predictable output order. This is slower as it requires and extra sort operation.
as_dict
Return as dictionary
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": ["A", "A", "B", "B", "C"],
... "N": [1, 2, 2, 4, 2],
... "bar": ["k", "l", "m", "m", "l"],
... }
... )
>>> df.partition_by(groups="foo", maintain_order=True)
[shape: (2, 3)
┌─────┬─────┬─────┐
│ foo ┆ N ┆ bar │
│ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ str │
╞═════╪═════╪═════╡
│ A ┆ 1 ┆ k │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ A ┆ 2 ┆ l │
└─────┴─────┴─────┘,
shape: (2, 3)
┌─────┬─────┬─────┐
│ foo ┆ N ┆ bar │
│ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ str │
╞═════╪═════╪═════╡
│ B ┆ 2 ┆ m │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ B ┆ 4 ┆ m │
└─────┴─────┴─────┘,
shape: (1, 3)
┌─────┬─────┬─────┐
│ foo ┆ N ┆ bar │
│ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ str │
╞═════╪═════╪═════╡
│ C ┆ 2 ┆ l │
└─────┴─────┴─────┘]
"""
if isinstance(groups, str):
groups = [groups]
if as_dict:
out: Dict[Any, DF] = dict()
if len(groups) == 1:
for _df in self._df.partition_by(groups, maintain_order):
df = self._from_pydf(_df)
out[df[groups][0, 0]] = df
else:
for _df in self._df.partition_by(groups, maintain_order):
df = self._from_pydf(_df)
out[df[groups].row(0)] = df
return out
else:
return [
self._from_pydf(_df)
for _df in self._df.partition_by(groups, maintain_order)
]
def shift(self: DF, periods: int) -> DF:
"""
Shift the values by a given period and fill the parts that will be empty due to this operation
with `Nones`.
Parameters
----------
periods
Number of places to shift (may be negative).
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.shift(periods=1)
shape: (3, 3)
┌──────┬──────┬──────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞══════╪══════╪══════╡
│ null ┆ null ┆ null │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ 1 ┆ 6 ┆ a │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ 2 ┆ 7 ┆ b │
└──────┴──────┴──────┘
>>> df.shift(periods=-1)
shape: (3, 3)
┌──────┬──────┬──────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞══════╪══════╪══════╡
│ 2 ┆ 7 ┆ b │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ 3 ┆ 8 ┆ c │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ null ┆ null ┆ null │
└──────┴──────┴──────┘
"""
return self._from_pydf(self._df.shift(periods))
def shift_and_fill(
self: DF, periods: int, fill_value: Union[int, str, float]
) -> DF:
"""
Shift the values by a given period and fill the parts that will be empty due to this operation
with the result of the `fill_value` expression.
Parameters
----------
periods
Number of places to shift (may be negative).
fill_value
fill None values with this value.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.shift_and_fill(periods=1, fill_value=0)
shape: (3, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪═════╡
│ 0 ┆ 0 ┆ 0 │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 1 ┆ 6 ┆ a │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 7 ┆ b │
└─────┴─────┴─────┘
"""
return (
self.lazy()
.shift_and_fill(periods, fill_value)
.collect(no_optimization=True, string_cache=False)
)
def is_duplicated(self) -> "pli.Series":
"""
Get a mask of all duplicated rows in this DataFrame.
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [1, 2, 3, 1],
... "b": ["x", "y", "z", "x"],
... }
... )
>>> df.is_duplicated()
shape: (4,)
Series: '' [bool]
[
true
false
false
true
]
"""
return pli.wrap_s(self._df.is_duplicated())
def is_unique(self) -> "pli.Series":
"""
Get a mask of all unique rows in this DataFrame.
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [1, 2, 3, 1],
... "b": ["x", "y", "z", "x"],
... }
... )
>>> df.is_unique()
shape: (4,)
Series: '' [bool]
[
false
true
true
false
]
"""
return pli.wrap_s(self._df.is_unique())
def lazy(self: DF) -> "pli.LazyFrame[DF]":
"""
Start a lazy query from this point. This returns a `LazyFrame` object.
Operations on a `LazyFrame` are not executed until this is requested by either calling:
* `.fetch()` (run on a small number of rows)
* `.collect()` (run on all data)
* `.describe_plan()` (print unoptimized query plan)
* `.describe_optimized_plan()` (print optimized query plan)
* `.show_graph()` (show (un)optimized query plan) as graphviz graph)
Lazy operations are advised because they allow for query optimization and more parallelization.
"""
return self._lazyframe_class._from_pyldf(self._df.lazy())
def select(
self: DF,
exprs: Union[
str,
"pli.Expr",
Sequence[Union[str, "pli.Expr", bool, int, float, "pli.Series"]],
"pli.Series",
],
) -> DF:
"""
Select columns from this DataFrame.
Parameters
----------
exprs
Column or columns to select.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.select("foo")
shape: (3, 1)
┌─────┐
│ foo │
│ --- │
│ i64 │
╞═════╡
│ 1 │
├╌╌╌╌╌┤
│ 2 │
├╌╌╌╌╌┤
│ 3 │
└─────┘
"""
return (
self.lazy()
.select(exprs) # type: ignore
.collect(no_optimization=True, string_cache=False)
)
def with_columns(self: DF, exprs: Union["pli.Expr", List["pli.Expr"]]) -> DF:
"""
Add or overwrite multiple columns in a DataFrame.
Parameters
----------
exprs
List of Expressions that evaluate to columns.
"""
if not isinstance(exprs, list):
exprs = [exprs]
return (
self.lazy()
.with_columns(exprs)
.collect(no_optimization=True, string_cache=False)
)
def n_chunks(self) -> int:
"""
Get number of chunks used by the ChunkedArrays of this DataFrame.
"""
return self._df.n_chunks()
@overload
def max(self: DF, axis: Literal[0] = ...) -> DF:
...
@overload
def max(self, axis: Literal[1]) -> "pli.Series":
...
@overload
def max(self: DF, axis: int = 0) -> Union[DF, "pli.Series"]:
...
def max(self: DF, axis: int = 0) -> Union[DF, "pli.Series"]:
"""
Aggregate the columns of this DataFrame to their maximum value.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.max()
shape: (1, 3)
┌─────┬─────┬──────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪══════╡
│ 3 ┆ 8 ┆ null │
└─────┴─────┴──────┘
"""
if axis == 0:
return self._from_pydf(self._df.max())
if axis == 1:
return pli.wrap_s(self._df.hmax())
raise ValueError("Axis should be 0 or 1.") # pragma: no cover
@overload
def min(self: DF, axis: Literal[0] = ...) -> DF:
...
@overload
def min(self, axis: Literal[1]) -> "pli.Series":
...
@overload
def min(self: DF, axis: int = 0) -> Union[DF, "pli.Series"]:
...
def min(self: DF, axis: int = 0) -> Union[DF, "pli.Series"]:
"""
Aggregate the columns of this DataFrame to their minimum value.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.min()
shape: (1, 3)
┌─────┬─────┬──────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪══════╡
│ 1 ┆ 6 ┆ null │
└─────┴─────┴──────┘
"""
if axis == 0:
return self._from_pydf(self._df.min())
if axis == 1:
return pli.wrap_s(self._df.hmin())
raise ValueError("Axis should be 0 or 1.") # pragma: no cover
@overload
def sum(self: DF, *, axis: Literal[0] = ..., null_strategy: str = "ignore") -> DF:
...
@overload
def sum(self, *, axis: Literal[1], null_strategy: str = "ignore") -> "pli.Series":
...
@overload
def sum(
self: DF, *, axis: int = 0, null_strategy: str = "ignore"
) -> Union[DF, "pli.Series"]:
...
def sum(
self: DF, *, axis: int | |
element matching each trial start
if take == 'last':
iall, iu = np.unique(np.flip(ind), return_index=True)
t_event_nans[iall] = t_event[- (iu - ind.size + 1)]
elif take == 'first':
iall, iu = np.unique(ind, return_index=True)
t_event_nans[iall] = t_event[iu]
else: # if the index is arbitrary, needs to be numeric (could be negative if from the end)
iall = np.unique(ind)
minsize = take + 1 if take >= 0 else - take
# for each trial, take the takenth element if there are enough values in trial
for iu in iall:
match = t_event[iu == ind]
if len(match) >= minsize:
t_event_nans[iu] = match[take]
return t_event_nans
def _get_sync_fronts(sync, channel_nb, tmin=None, tmax=None):
selection = sync['channels'] == channel_nb
selection = np.logical_and(selection, sync['times'] <= tmax) if tmax else selection
selection = np.logical_and(selection, sync['times'] >= tmin) if tmin else selection
return Bunch({'times': sync['times'][selection],
'polarities': sync['polarities'][selection]})
def _clean_frame2ttl(frame2ttl, display=False):
"""
Frame 2ttl calibration can be unstable and the fronts may be flickering at an unrealistic
pace. This removes the consecutive frame2ttl pulses happening too fast, below a threshold
of F2TTL_THRESH
"""
dt = np.diff(frame2ttl['times'])
iko = np.where(np.logical_and(dt < F2TTL_THRESH, frame2ttl['polarities'][:-1] == -1))[0]
iko = np.unique(np.r_[iko, iko + 1])
frame2ttl_ = {'times': np.delete(frame2ttl['times'], iko),
'polarities': np.delete(frame2ttl['polarities'], iko)}
if iko.size > (0.1 * frame2ttl['times'].size):
_logger.warning(f'{iko.size} ({iko.size / frame2ttl["times"].size * 100} %) '
f'frame to TTL polarity switches below {F2TTL_THRESH} secs')
if display:
from ibllib.plots import squares
plt.figure()
squares(frame2ttl['times'] * 1000, frame2ttl['polarities'], yrange=[0.1, 0.9])
squares(frame2ttl_['times'] * 1000, frame2ttl_['polarities'], yrange=[1.1, 1.9])
import seaborn as sns
sns.displot(dt[dt < 0.05], binwidth=0.0005)
return frame2ttl_
def extract_wheel_sync(sync, chmap=None):
"""
Extract wheel positions and times from sync fronts dictionary for all 16 chans
Output position is in radians, mathematical convention
:param sync: dictionary 'times', 'polarities' of fronts detected on sync trace
:param chmap: dictionary containing channel indices. Default to constant.
chmap = {'rotary_encoder_0': 13, 'rotary_encoder_1': 14}
:return: timestamps (np.array)
:return: positions (np.array)
"""
wheel = {}
channela = _get_sync_fronts(sync, chmap['rotary_encoder_0'])
channelb = _get_sync_fronts(sync, chmap['rotary_encoder_1'])
wheel['re_ts'], wheel['re_pos'] = _rotary_encoder_positions_from_fronts(
channela['times'], channela['polarities'], channelb['times'], channelb['polarities'],
ticks=WHEEL_TICKS, radius=1, coding='x4')
return wheel['re_ts'], wheel['re_pos']
def extract_behaviour_sync(sync, chmap=None, display=False, bpod_trials=None, tmax=np.inf):
"""
Extract wheel positions and times from sync fronts dictionary
:param sync: dictionary 'times', 'polarities' of fronts detected on sync trace for all 16 chans
:param chmap: dictionary containing channel index. Default to constant.
chmap = {'bpod': 7, 'frame2ttl': 12, 'audio': 15}
:param display: bool or matplotlib axes: show the full session sync pulses display
defaults to False
:return: trials dictionary
"""
bpod = _get_sync_fronts(sync, chmap['bpod'], tmax=tmax)
if bpod.times.size == 0:
raise err.SyncBpodFpgaException('No Bpod event found in FPGA. No behaviour extraction. '
'Check channel maps.')
frame2ttl = _get_sync_fronts(sync, chmap['frame2ttl'], tmax=tmax)
frame2ttl = _clean_frame2ttl(frame2ttl)
audio = _get_sync_fronts(sync, chmap['audio'], tmax=tmax)
# extract events from the fronts for each trace
t_trial_start, t_valve_open, t_iti_in = _assign_events_bpod(bpod['times'], bpod['polarities'])
# one issue is that sometimes bpod pulses may not have been detected, in this case
# perform the sync bpod/FPGA, and add the start that have not been detected
if bpod_trials:
bpod_start = bpod_trials['intervals_bpod'][:, 0]
fcn, drift, ibpod, ifpga = dsp.utils.sync_timestamps(
bpod_start, t_trial_start, return_indices=True)
# if it's drifting too much
if drift > 200 and bpod_start.size != t_trial_start.size:
raise err.SyncBpodFpgaException("sync cluster f*ck")
missing_bpod = fcn(bpod_start[np.setxor1d(ibpod, np.arange(len(bpod_start)))])
t_trial_start = np.sort(np.r_[t_trial_start, missing_bpod])
else:
_logger.warning("Deprecation Warning: calling FPGA trials extraction without a bpod trials"
" dictionary will result in an error.")
t_ready_tone_in, t_error_tone_in = _assign_events_audio(
audio['times'], audio['polarities'])
trials = Bunch({
'goCue_times': _assign_events_to_trial(t_trial_start, t_ready_tone_in, take='first'),
'errorCue_times': _assign_events_to_trial(t_trial_start, t_error_tone_in),
'valveOpen_times': _assign_events_to_trial(t_trial_start, t_valve_open),
'stimFreeze_times': _assign_events_to_trial(t_trial_start, frame2ttl['times'], take=-2),
'stimOn_times': _assign_events_to_trial(t_trial_start, frame2ttl['times'], take='first'),
'stimOff_times': _assign_events_to_trial(t_trial_start, frame2ttl['times']),
'itiIn_times': _assign_events_to_trial(t_trial_start, t_iti_in)
})
# feedback times are valve open on good trials and error tone in on error trials
trials['feedback_times'] = np.copy(trials['valveOpen_times'])
ind_err = np.isnan(trials['valveOpen_times'])
trials['feedback_times'][ind_err] = trials['errorCue_times'][ind_err]
trials['intervals'] = np.c_[t_trial_start, trials['itiIn_times']]
if display:
width = 0.5
ymax = 5
if isinstance(display, bool):
plt.figure("Ephys FPGA Sync")
ax = plt.gca()
else:
ax = display
r0 = _get_sync_fronts(sync, chmap['rotary_encoder_0'])
plots.squares(bpod['times'], bpod['polarities'] * 0.4 + 1, ax=ax, color='k')
plots.squares(frame2ttl['times'], frame2ttl['polarities'] * 0.4 + 2, ax=ax, color='k')
plots.squares(audio['times'], audio['polarities'] * 0.4 + 3, ax=ax, color='k')
plots.squares(r0['times'], r0['polarities'] * 0.4 + 4, ax=ax, color='k')
plots.vertical_lines(t_ready_tone_in, ymin=0, ymax=ymax,
ax=ax, label='goCue_times', color='b', linewidth=width)
plots.vertical_lines(t_trial_start, ymin=0, ymax=ymax,
ax=ax, label='start_trial', color='m', linewidth=width)
plots.vertical_lines(t_error_tone_in, ymin=0, ymax=ymax,
ax=ax, label='error tone', color='r', linewidth=width)
plots.vertical_lines(t_valve_open, ymin=0, ymax=ymax,
ax=ax, label='valveOpen_times', color='g', linewidth=width)
plots.vertical_lines(trials['stimFreeze_times'], ymin=0, ymax=ymax,
ax=ax, label='stimFreeze_times', color='y', linewidth=width)
plots.vertical_lines(trials['stimOff_times'], ymin=0, ymax=ymax,
ax=ax, label='stim off', color='c', linewidth=width)
plots.vertical_lines(trials['stimOn_times'], ymin=0, ymax=ymax,
ax=ax, label='stimOn_times', color='tab:orange', linewidth=width)
c = _get_sync_fronts(sync, chmap['left_camera'])
plots.squares(c['times'], c['polarities'] * 0.4 + 5, ax=ax, color='k')
c = _get_sync_fronts(sync, chmap['right_camera'])
plots.squares(c['times'], c['polarities'] * 0.4 + 6, ax=ax, color='k')
c = _get_sync_fronts(sync, chmap['body_camera'])
plots.squares(c['times'], c['polarities'] * 0.4 + 7, ax=ax, color='k')
ax.legend()
ax.set_yticklabels(['', 'bpod', 'f2ttl', 'audio', 're_0', ''])
ax.set_yticks([0, 1, 2, 3, 4, 5])
ax.set_ylim([0, 5])
return trials
def extract_sync(session_path, overwrite=False, ephys_files=None):
"""
Reads ephys binary file (s) and extract sync within the binary file folder
Assumes ephys data is within a `raw_ephys_data` folder
:param session_path: '/path/to/subject/yyyy-mm-dd/001'
:param overwrite: Bool on re-extraction, forces overwrite instead of loading existing files
:return: list of sync dictionaries
"""
session_path = Path(session_path)
if not ephys_files:
ephys_files = spikeglx.glob_ephys_files(session_path)
syncs = []
outputs = []
for efi in ephys_files:
bin_file = efi.get('ap', efi.get('nidq', None))
if not bin_file:
continue
alfname = dict(object='sync', namespace='spikeglx')
if efi.label:
alfname['extra'] = efi.label
file_exists = alf.io.exists(bin_file.parent, **alfname)
if not overwrite and file_exists:
_logger.warning(f'Skipping raw sync: SGLX sync found for probe {efi.label} !')
sync = alf.io.load_object(bin_file.parent, **alfname)
out_files, _ = alf.io._ls(bin_file.parent, **alfname)
else:
sr = spikeglx.Reader(bin_file)
sync, out_files = _sync_to_alf(sr, bin_file.parent, save=True, parts=efi.label)
outputs.extend(out_files)
syncs.extend([sync])
return syncs, outputs
def _get_all_probes_sync(session_path, bin_exists=True):
# round-up of all bin ephys files in the session, infer revision and get sync map
ephys_files = spikeglx.glob_ephys_files(session_path, bin_exists=bin_exists)
version = spikeglx.get_neuropixel_version_from_files(ephys_files)
# attach the sync information to each binary file found
for ef in ephys_files:
ef['sync'] = alf.io.load_object(ef.path, 'sync', namespace='spikeglx', short_keys=True)
ef['sync_map'] = get_ibl_sync_map(ef, version)
return ephys_files
def get_main_probe_sync(session_path, bin_exists=False):
"""
From 3A or 3B multiprobe session, returns the main probe (3A) or nidq sync pulses
with the attached channel map (default chmap if none)
:param session_path:
:return:
"""
ephys_files = _get_all_probes_sync(session_path, bin_exists=bin_exists)
if not ephys_files:
raise FileNotFoundError(f"No ephys files found in {session_path}")
version = spikeglx.get_neuropixel_version_from_files(ephys_files)
if version == '3A':
# the sync master is the probe with the most sync pulses
sync_box_ind = np.argmax([ef.sync.times.size for ef in ephys_files])
elif version == '3B':
# the sync master is the nidq breakout box
sync_box_ind = np.argmax([1 if ef.get('nidq') else 0 for ef in ephys_files])
sync = ephys_files[sync_box_ind].sync
sync_chmap = ephys_files[sync_box_ind].sync_map
return sync, sync_chmap
class ProbaContrasts(extractors_base.BaseBpodTrialsExtractor):
"""
Bpod pre-generated values for probabilityLeft, contrastLR, phase, quiescence
"""
save_names = ('_ibl_trials.contrastLeft.npy', '_ibl_trials.contrastRight.npy', None, None,
'_ibl_trials.probabilityLeft.npy', None)
var_names = ('contrastLeft', 'contrastRight', 'phase',
'position', 'probabilityLeft', 'quiescence')
def _extract(self, **kwargs):
"""Extracts positions, contrasts, quiescent delay, stimulus phase and probability left
from pregenerated session files.
Optional: saves alf contrastLR and probabilityLeft npy files"""
pe = self.get_pregenerated_events(self.bpod_trials, self.settings)
return [pe[k] for k in sorted(pe.keys())]
@staticmethod
def get_pregenerated_events(bpod_trials, settings):
num = settings.get("PRELOADED_SESSION_NUM", None)
if num is None:
num = settings.get("PREGENERATED_SESSION_NUM", None)
if num is None:
fn = settings.get('SESSION_LOADED_FILE_PATH', '')
fn = PureWindowsPath(fn).name
num = ''.join([d for d in fn if d.isdigit()])
if num == '':
raise ValueError("Can't extract left probability behaviour.")
# Load the pregenerated file
ntrials = len(bpod_trials)
sessions_folder = Path(raw_data_loaders.__file__).parent.joinpath(
"extractors", "ephys_sessions")
fname = f"session_{num}_ephys_pcqs.npy"
pcqsp = np.load(sessions_folder.joinpath(fname))
pos = pcqsp[:, 0]
con = pcqsp[:, 1]
pos = pos[: ntrials]
con = con[: ntrials]
contrastRight = con.copy()
contrastLeft = con.copy()
contrastRight[pos < 0] = np.nan
contrastLeft[pos > 0] = np.nan
qui = pcqsp[:, 2]
qui = qui[: ntrials]
phase = pcqsp[:, 3]
phase = phase[: ntrials]
pLeft = pcqsp[:, 4]
pLeft = pLeft[: ntrials]
phase_path = sessions_folder.joinpath(f"session_{num}_stim_phase.npy")
is_patched_version = parse_version(
settings.get('IBLRIG_VERSION_TAG', 0)) > parse_version('6.4.0')
if phase_path.exists() and is_patched_version:
phase = np.load(phase_path)[:ntrials]
return {'position': pos, 'quiescence': qui, 'phase': phase, 'probabilityLeft': pLeft,
'contrastRight': contrastRight, 'contrastLeft': contrastLeft}
class FpgaTrials(extractors_base.BaseExtractor):
save_names = ('_ibl_trials.feedbackType.npy', '_ibl_trials.choice.npy',
'_ibl_trials.rewardVolume.npy', '_ibl_trials.intervals_bpod.npy',
'_ibl_trials.intervals.npy', '_ibl_trials.response_times.npy',
'_ibl_trials.goCueTrigger_times.npy', None, None, None, None, None,
'_ibl_trials.feedback_times.npy', '_ibl_trials.goCue_times.npy', None, None,
'_ibl_trials.stimOff_times.npy', '_ibl_trials.stimOn_times.npy', None,
'_ibl_trials.firstMovement_times.npy', '_ibl_wheel.timestamps.npy',
'_ibl_wheel.position.npy', '_ibl_wheelMoves.intervals.npy',
'_ibl_wheelMoves.peakAmplitude.npy')
var_names = | |
from CommonServerPython import *
""" IMPORTS """
import requests
import ast
from datetime import datetime
# disable insecure warnings
requests.packages.urllib3.disable_warnings()
# remove proxy if not set to true in params
if not demisto.params().get("proxy"):
del os.environ["HTTP_PROXY"]
del os.environ["HTTPS_PROXY"]
del os.environ["http_proxy"]
del os.environ["https_proxy"]
""" GLOBAL VARS """
CONTEXT = demisto.getIntegrationContext()
USE_SSL = not demisto.params().get("unsecure", False)
DEMISTOBOT = "https://demistobot.demisto.com/azuresc-token"
SUBSCRIPTION_ID = CONTEXT.get("subscription_id")
SUBSCRIPTION_URL = "/subscriptions/{}".format(SUBSCRIPTION_ID)
TOKEN = demisto.params().get("token")
TENANT_ID = demisto.params().get("tenant_id")
BASE_URL = demisto.params().get("server_url")
RESOURCE = "https://management.azure.com/"
AUTH_GRANT_TYPE = "client_credentials"
# API Versions
ALERT_API_VERSION = "2015-06-01-preview"
LOCATION_API_VERSION = "2015-06-01-preview"
ATP_API_VERSION = "2017-08-01-preview"
APS_API_VERSION = "2017-08-01-preview"
IPP_API_VERSION = "2017-08-01-preview"
JIT_API_VERSION = "2015-06-01-preview"
STORAGE_API_VERSION = "2018-07-01"
""" HELPER FUNCTIONS """
def set_subscription_id():
"""
Setting subscription ID to the context and returning it
"""
headers = {"Authorization": TOKEN, "Accept": "application/json"}
params = {"tenant": TENANT_ID, "product": "AzureSecurityCenter"}
r = requests.get(DEMISTOBOT, headers=headers, params=params, verify=USE_SSL)
try:
data = r.json()
if r.status_code != requests.codes.ok:
return_error(
"Error in API call to Azure Security Center [{}] - {}".format(
r.status_code, r.text
)
)
sub_id = data.get("subscription_id")
demisto.setIntegrationContext(
{
"token": data.get("token"),
"stored": epoch_seconds(),
"subscription_id": sub_id,
}
)
return sub_id
except ValueError:
return_error("There was problem with your request: {}".format(r.content))
def epoch_seconds(d=None):
"""
Return the number of seconds for given date. If no date, return current.
"""
if not d:
d = datetime.utcnow()
return int((d - datetime.utcfromtimestamp(0)).total_seconds())
def get_token():
"""
Check if we have a valid token and if not get one
"""
token = CONTEXT.get("token")
stored = CONTEXT.get("stored")
if token and stored:
if epoch_seconds() - stored < 60 * 60 - 30:
return token
headers = {"Authorization": TOKEN, "Accept": "application/json"}
r = requests.get(
DEMISTOBOT,
headers=headers,
params={"tenant": TENANT_ID, "product": "AzureSecurityCenter"},
verify=USE_SSL,
)
data = r.json()
if r.status_code != requests.codes.ok:
return_error(
"Error in API call to Azure Security Center [{}] - {}".format(
r.status_code, r.text
)
)
demisto.setIntegrationContext(
{
"token": data.get("token"),
"stored": epoch_seconds(),
"subscription_id": data.get("subscription_id"),
}
)
return data.get("token")
def http_request(method, url_suffix, body=None, params=None, add_subscription=True):
"""
Generic request to the graph
"""
token = get_token()
headers = {
"Authorization": "Bearer " + token,
"Content-Type": "application/json",
"Accept": "application/json",
}
if add_subscription:
url = BASE_URL + SUBSCRIPTION_URL + url_suffix
else:
url = BASE_URL + url_suffix
r = requests.request(method, url, json=body, params=params, headers=headers)
if r.status_code not in {200, 201, 202, 204}:
return_error(
"Error in API call to Azure Security Center [{}] - {}".format(
r.status_code, r.text
)
)
try:
r = r.json()
return r
except ValueError:
return dict()
# Format ports in JIT access policy rule to (portNum, protocol, allowedAddress, maxDuration)
def format_jit_port_rule(ports):
port_array = list()
for port in ports:
# for each item in unicode, has to use str to decode to ascii
p_num = str(port.get("number"))
p_src_addr = (
str(port.get("allowedSourceAddressPrefix"))
if port.get("allowedSourceAddressPrefix") != "*"
else "any"
)
p_protocol = str(port.get("protocol")) if port.get("protocol") != "*" else "any"
p_max_duration = str(port.get("maxRequestAccessDuration"))
port_array.append(str((p_num, p_protocol, p_src_addr, p_max_duration)))
return ", ".join(port_array)
# Format ports in JIT access request to (portNum, allowedAddress, endTime, status)
def format_jit_port_request(ports):
port_array = list()
for port in ports:
# for each item in unicode, has to use str to decode to ascii
p_num = str(port.get("number"))
p_src_addr = (
str(port.get("allowedSourceAddressPrefix"))
if port.get("allowedSourceAddressPrefix") != "*"
else "any"
)
p_status = str(port.get("status"))
p_end_time = str(port.get("endTimeUtc"))
port_array.append(str((p_num, p_src_addr, p_end_time, p_status)))
return ", ".join(port_array)
def normalize_context_key(string):
"""Normalize context keys
Function will normalize the string (remove white spaces and tailings)
Args:
string (str):
Returns:
Normalized string
"""
tmp = string[:1].upper() + string[1:]
return tmp.replace(" ", "")
""" FUNCTIONS """
""" Alert Start """
def get_alert_command(args):
"""Getting specified alert from API
Args
args (dict): dictionary containing commands args
"""
resource_group_name = args.get("resource_group_name")
asc_location = args.get("asc_location")
alert_id = args.get("alert_id")
alert = get_alert(resource_group_name, asc_location, alert_id)
final_output = list()
# Basic Property Table
properties = alert.get("properties")
if properties:
basic_table_output = [
{
"DisplayName": properties.get("alertDisplayName"),
"CompromisedEntity": properties.get("compromisedEntity"),
"Description": properties.get("description"),
"DetectedTime": properties.get("detectedTimeUtc"),
"ReportedTime": properties.get("reportedTimeUtc"),
"ReportedSeverity": properties.get("reportedSeverity"),
"ConfidenceScore": properties.get("confidenceScore", "None"),
"State": properties.get("state"),
"ActionTaken": properties.get("actionTaken"),
"CanBeInvestigated": properties.get("canBeInvestigated"),
"RemediationSteps": properties.get("remediationSteps"),
"VendorName": properties.get("vendorName"),
"AssociatedResource": properties.get("associatedResource"),
"AlertName": properties.get("alertName"),
"InstanceID": properties.get("instanceId", "None"),
"ID": alert.get("name"),
"ExtendedProperties": properties.get("extendedProperties"),
"Entities": properties.get("entities"),
"SubscriptionID": properties.get("subscriptionId"),
}
]
md = tableToMarkdown(
"Azure Security Center - Get Alert - Basic Property",
basic_table_output,
[
"DisplayName",
"CompromisedEntity",
"Description",
"DetectedTime",
"ReportedTime",
"ReportedSeverity",
"ConfidenceScore",
"State",
"ActionTaken",
"CanBeInvestigated",
"RemediationSteps",
"VendorName",
"AssociatedResource",
"AlertName",
"InstanceID",
"ID",
],
removeNull=True,
)
ec = {
"AzureSecurityCenter.Alert(val.ID && val.ID === obj.ID)": basic_table_output
}
basic_table_entry = {
"Type": entryTypes["note"],
"Contents": alert,
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": md,
"EntryContext": ec,
}
final_output.append(basic_table_entry)
# Extended Properties Table
if (
alert.get("properties")
and alert.get("properties")
and alert.get("properties").get("extendedProperties")
):
extended_properties = dict()
properties = alert.get("properties")
if isinstance(properties.get("extendedProperties"), dict):
for key, value in alert["properties"]["extendedProperties"].items():
extended_properties[normalize_context_key(key)] = value
extended_table_entry = {
"Type": entryTypes["note"],
"Contents": alert["properties"]["extendedProperties"],
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": tableToMarkdown(
"Azure Security Center - Get Alert - Extended Property",
extended_properties,
removeNull=True,
),
}
final_output.append(extended_table_entry)
# Entities Table
entities = properties.get("entities")
if entities:
if isinstance(entities, dict):
entities_table_output = list()
for entity in entities:
entities_table_output.append(
{
"Content": ast.literal_eval(str(entity)),
"Type": entity["type"],
}
)
md = tableToMarkdown(
"Azure Security Center - Get Alert - Entity",
entities_table_output,
removeNull=True,
)
entities_table_entry = {
"Type": entryTypes["note"],
"Contents": alert.get("properties").get("entities"),
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": md,
}
final_output.append(entities_table_entry)
demisto.results(final_output)
def get_alert(resource_group_name, asc_location, alert_id):
"""Building query
Args:
resource_group_name (str): ResourceGroupName
asc_location (str): Azure Security Center location
alert_id (str): Alert ID
Returns:
response body (dict)
"""
cmd_url = ""
if resource_group_name:
cmd_url += "/resourceGroups/{}".format(resource_group_name)
cmd_url += "/providers/Microsoft.Security/locations/{}/alerts/{}?api-version={}".format(
asc_location, alert_id, ALERT_API_VERSION
)
response = http_request("GET", cmd_url)
return response
def list_alerts_command(args):
"""Getting all alerts
Args:
args (dict): usually demisto.args()
"""
resource_group_name = args.get("resource_group_name")
asc_location = args.get("asc_location")
filter_query = args.get("filter")
select_query = args.get("select")
expand_query = args.get("expand")
alerts = list_alerts(
resource_group_name, asc_location, filter_query, select_query, expand_query
).get("value")
outputs = list()
for alert in alerts:
properties = alert.get("properties")
if properties:
outputs.append(
{
"DisplayName": properties.get("alertDisplayName"),
"CompromisedEntity": properties.get("compromisedEntity"),
"DetectedTime": properties.get("detectedTimeUtc"),
"ReportedSeverity": properties.get("reportedSeverity"),
"State": properties.get("state"),
"ActionTaken": properties.get("actionTaken"),
"Description": properties.get("description"),
"ID": alert.get("name"),
}
)
md = tableToMarkdown(
"Azure Security Center - List Alerts",
outputs,
[
"DisplayName",
"CompromisedEntity",
"DetectedTime",
"ReportedSeverity",
"State",
"ActionTaken",
"Description",
"ID",
],
removeNull=True,
)
ec = {"AzureSecurityCenter.Alert(val.ID && val.ID === obj.ID)": outputs}
entry = {
"Type": entryTypes["note"],
"Contents": alerts,
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": md,
"EntryContext": ec,
}
demisto.results(entry)
def get_alerts(
resource_group_name, asc_location, filter_query, select_query, expand_query
):
"""Building query
Args:
resource_group_name (str): ResourceGroupName
asc_location (str): Azure Security Center location
filter_query (str): what to filter
select_query (str): what to select
expand_query (str): what to expand
Returns:
dict: contains response body
"""
cmd_url = ""
if resource_group_name:
cmd_url += "/resourceGroups/{}/providers/Microsoft.Security".format(
resource_group_name
)
# ascLocation muse be using with specifying resourceGroupName
if asc_location:
cmd_url += "/locations/{}".format(asc_location)
else:
cmd_url += "/providers/Microsoft.Security"
cmd_url += "/alerts?api-version={}".format(ALERT_API_VERSION)
if filter_query:
cmd_url += "&$filter={}".format(filter_query)
if select_query:
cmd_url += "&$select={}".format(select_query)
if expand_query:
cmd_url += "&$expand={}".format(expand_query)
response = http_request("GET", cmd_url)
return response
def list_alerts(
resource_group_name, asc_location, filter_query, select_query, expand_query
):
"""Listing alerts
Args:
resource_group_name (str): ResourceGroupName
asc_location (str): Azure Security Center location
filter_query (str): what to filter
select_query (str): what to select
expand_query (str): what to expand
Returns:
dict: contains response body
"""
cmd_url = ""
if resource_group_name:
cmd_url += "/resourceGroups/{}/providers/Microsoft.Security".format(
resource_group_name
)
# ascLocation must be using with specifying resourceGroupName
if asc_location:
cmd_url += "/locations/{}".format(asc_location)
else:
cmd_url += "/providers/Microsoft.Security"
cmd_url += "/alerts?api-version={}".format(ALERT_API_VERSION)
if filter_query:
cmd_url += "&$filter={}".format(filter_query)
if select_query:
cmd_url += "&$select={}".format(select_query)
if expand_query:
cmd_url += "&$expand={}".format(expand_query)
response = http_request("GET", cmd_url)
return response
def update_alert_command(args):
"""Update given alert
Args:
args (dict): usually demisto.args()
"""
resource_group_name = args.get("resource_group_name")
asc_location = args.get("asc_location")
alert_id = args.get("alert_id")
alert_update_action_type = args.get("alert_update_action_type")
response = update_alert(
resource_group_name, asc_location, alert_id, alert_update_action_type
)
outputs = {"ID": response.get("id"), "ActionTaken": alert_update_action_type}
ec = {"AzureSecurityCenter.Alert(val.ID && val.ID === obj.ID)": outputs}
demisto.results(
{
"Type": entryTypes["note"],
"Contents": "Alert - {} has been set to {}.".format(
alert_id, alert_update_action_type
),
"ContentsFormat": formats["text"],
"EntryContext": ec,
}
)
def update_alert(resource_group_name, asc_location, alert_id, alert_update_action_type):
"""Building query
Args:
resource_group_name (str): Resource Name Group
asc_location (str): Azure Security Center Location
alert_id (str): Alert ID
alert_update_action_type (str): What update type need to update
Returns:
dict: response body
"""
cmd_url = ""
if resource_group_name:
cmd_url += "/resourceGroups/{}".format(resource_group_name)
cmd_url += "/providers/Microsoft.Security/locations/{}/alerts/{}/{}?api-version={}".format(
asc_location, alert_id, alert_update_action_type, ALERT_API_VERSION
)
return http_request("POST", cmd_url)
""" Alert End """
""" Location Start """
def list_locations_command():
"""Getting all locations
"""
locations = list_locations().get("value")
outputs = list()
if locations:
for location in locations:
if location.get("properties") and location.get("properties").get(
"homeRegionName"
):
home_region_name = location.get("properties").get("homeRegionName")
else:
home_region_name = None
outputs.append(
{
"HomeRegionName": home_region_name,
"Name": location.get("name"),
"ID": location.get("id"),
}
)
md = | |
heights : np.array or list
heights along flowline
climate_type : str
either 'monthly' or 'annual', if annual floor of year is used,
if monthly float year is converted into month and year
Returns
-------
(temp, tempformelt, prcp, prcpsol)
"""
y, m = floatyear_to_date(year)
if self.repeat:
y = self.ys + (y - self.ys) % (self.ye - self.ys + 1)
if y < self.ys or y > self.ye:
raise ValueError('year {} out of the valid time bounds: '
'[{}, {}]'.format(y, self.ys, self.ye))
if self.mb_type == 'mb_real_daily' or climate_type == 'annual':
if climate_type == 'annual':
#if type(year) == float:
# raise InvalidParamsError('')
pok = np.where(self.years == year)[0]
if len(pok) < 1:
raise ValueError('Year {} not in record'.format(int(year)))
else:
pok = np.where((self.years == y) & (self.months == m))[0]
if len(pok) < 28:
warnings.warn('something goes wrong with amount of entries\
per month for mb_real_daily')
else:
pok = np.where((self.years == y) & (self.months == m))[0][0]
# Read time series
# (already temperature bias and precipitation factor corrected!)
itemp = self.temp[pok]
iprcp = self.prcp[pok]
igrad = self.grad[pok]
# For each height pixel:
# Compute temp and tempformelt (temperature above melting threshold)
heights = np.asarray(heights)
npix = len(heights)
if self.mb_type == 'mb_real_daily' or climate_type == 'annual':
grad_temp = np.atleast_2d(igrad).repeat(npix, 0)
if len(pok) != 12 and self.mb_type != 'mb_real_daily':
warnings.warn('something goes wrong with amount of entries'
'per year')
grad_temp *= (heights.repeat(len(pok)).reshape(grad_temp.shape) -
self.ref_hgt)
temp2d = np.atleast_2d(itemp).repeat(npix, 0) + grad_temp
# temp_for_melt is computed separately depending on mb_type
temp2dformelt = self._get_tempformelt(temp2d, pok)
# Compute solid precipitation from total precipitation
prcp = np.atleast_2d(iprcp).repeat(npix, 0)
fac = 1 - (temp2d - self.t_solid) / (self.t_liq - self.t_solid)
prcpsol = prcp * clip_array(fac, 0, 1)
return temp2d, temp2dformelt, prcp, prcpsol
else:
temp = np.ones(npix) * itemp + igrad * (heights - self.ref_hgt)
# temp_for_melt is computed separately depending on mb_type
tempformelt = self._get_tempformelt(temp, pok)
prcp = np.ones(npix) * iprcp
fac = 1 - (temp - self.t_solid) / (self.t_liq - self.t_solid)
prcpsol = prcp * clip_array(fac, 0, 1)
return temp, tempformelt, prcp, prcpsol
def get_2d_avg_annual_air_hydro_temp(self, heights, year=None,
avg_climate = False, hs = 15,
hydro_m = 10):
raise NotImplementedError('not tested and only useful'
' for estimating refreezing')
# WIP
# (I won't work on this for the next half a year, but leave
# it inside as atemplate)
# only computes avg annual air temperature in hydrological years,
# this is necessary for the refreezing estimate of Woodward et al.1997
# only tested for NH here
# IMPORTANT: this does not take into account that months
# have different amount of days (should I include the weighting as
# it is inside of PyGEM??? [see: https://github.com/drounce/PyGEM/blob/ac619bd1fd862b93a01068a0887efa2a97478b99/pygem/utils/_funcs.py#L10 )
#pok = np.where(self.years == year)[0]
if avg_climate:
year0 = year - hs
year1 = year + hs
else:
year0 = year
year1 = year
if self.mb_type != 'mb_real_daily':
pok_begin = np.where((self.years == year0-1) & (self.months == hydro_m))[0]
pok_end = np.where((self.years == year1) & (self.months == hydro_m-1))[0]
pok_hydro = np.arange(pok_begin, pok_end + 1, 1)
else:
pok_begin = np.where((self.years == year0-1) & (self.months == hydro_m))[0][0]
pok_end = np.where((self.years == year1) & (self.months == hydro_m-1))[0][-1]
pok_hydro = np.arange(pok_begin, pok_end + 1, 1)
assert self.months[pok_hydro[0]] == hydro_m
assert self.months[pok_hydro[-1]] == hydro_m - 1
if len(pok_hydro) < 1:
raise ValueError('Year {} not in record'.format(int(year)))
# Read time series
# (already temperature bias and precipitation factor corrected!)
itemp = self.temp[pok_hydro]
igrad = self.grad[pok_hydro]
heights = np.asarray(heights)
npix = len(heights)
grad_temp = np.atleast_2d(igrad).repeat(npix, 0)
if self.mb_type != 'mb_real_daily' and hydro_m == 10 and not avg_climate:
assert np.all(self.months[pok_hydro] == np.array([10, 11, 12, 1, 2, 3, 4, 5, 6, 7, 8, 9]))
if len(pok_hydro) != 12 and self.mb_type != 'mb_real_daily' and not avg_climate:
warnings.warn('something goes wrong with amount of entries'
'per year')
grad_temp *= (heights.repeat(len(pok_hydro)).reshape(grad_temp.shape) -
self.ref_hgt)
temp2d = np.atleast_2d(itemp).repeat(npix, 0) + grad_temp
return temp2d
def _get_2d_monthly_climate(self, heights, year=None):
# first get the climate data
#warnings.warn('Attention: this has not been tested enough to be sure that '
# 'it works')
if self.mb_type == 'mb_real_daily':
return self._get_climate(heights, 'monthly', year=year)
else:
raise InvalidParamsError('_get_2d_monthly_climate works only\
with mb_real_daily as mb_type!!!')
def get_monthly_climate(self, heights, year=None):
# first get the climate data
#warnings.warn('Attention: this has not been tested enough to be sure that \
# it works')
if self.mb_type == 'mb_real_daily':
t, tfmelt, prcp, prcpsol = self._get_climate(heights, 'monthly',
year=year)
return (t.mean(axis=1), tfmelt.sum(axis=1),
prcp.sum(axis=1), prcpsol.sum(axis=1))
else:
return self._get_climate(heights, 'monthly', year=year)
# if it is mb_real_daily, the data has daily resolution (2d array then)
def get_daily_climate(self, heights, year=None):
raise NotImplementedError('look at _get_2d_daily_climate instead')
def _get_2d_annual_climate(self, heights, year):
return self._get_climate(heights, 'annual', year=year)
def _get_2d_daily_climate(self, heights, year = None):
return self._get_climate(heights, 'annual', year=year)
# If I also want to use this outside of the class because
# (e.g. in climate.py), I have to change this again and remove the self...
# and somehow there is a problem if I put not self in
# _get_tempformelt when it is inside the class
def _get_tempformelt(self, temp, pok):
""" Helper function to compute tempformelt to avoid code duplication
in get_monthly_climate() and _get2d_annual_climate()
If using this again outside of this class, need to remove the "self",
such as for 'mb_climate_on_height' in climate.py, that has no self....
(would need to change temp, t_melt ,temp_std, mb_type, N, loop)
Input: stuff that is different for the different methods
temp: temperature time series
pok: indices of time series
Returns
-------
(tempformelt)
"""
tempformelt_without_std = temp - self.t_melt
# computations change only if 'mb_pseudo_daily' as mb_type!
if self.mb_type == 'mb_monthly' or self.mb_type == 'mb_real_daily':
tempformelt = tempformelt_without_std
elif self.mb_type == 'mb_pseudo_daily':
itemp_std = self.temp_std[pok]
# matrix with N values that are distributed around 0
# showing how much fake 'daily' values vary from the mean
z_scores_mean = stats.norm.ppf(np.arange(1/self.N-1/(2*self.N),
1, 1/self.N))
z_std = np.matmul(np.atleast_2d(z_scores_mean).T,
np.atleast_2d(itemp_std))
# there are two possibilities,
# not using the loop is most of the times faster
if self.loop is False:
# without the loop: but not much faster ..
tempformelt_daily = np.atleast_3d(tempformelt_without_std).T + \
np.atleast_3d(z_std)
clip_min(tempformelt_daily, 0, out=tempformelt_daily)
tempformelt_with_std = tempformelt_daily.mean(axis=0).T
else:
shape_tfm = np.shape(tempformelt_without_std)
tempformelt_with_std = np.full(shape_tfm, np.NaN)
for h in np.arange(0, np.shape(tempformelt_without_std)[0]):
h_tfm_daily_ = np.atleast_2d(tempformelt_without_std[h, :])
h_tempformelt_daily = h_tfm_daily_ + z_std
clip_min(h_tempformelt_daily, 0, out=h_tempformelt_daily)
h_tempformelt_monthly = h_tempformelt_daily.mean(axis=0)
tempformelt_with_std[h, :] = h_tempformelt_monthly
tempformelt = tempformelt_with_std
else:
raise InvalidParamsError('mb_type can only be "mb_monthly,\
mb_pseudo_daily or mb_real_daily" ')
# replace all values below zero to zero
clip_min(tempformelt, 0, out=tempformelt)
return tempformelt
# same as in OGGM default
def get_annual_climate(self, heights, year=None):
"""Annual climate information at given heights.
Note that prcp is corrected with the precipitation factor and that
all other model biases (temp and prcp) are applied.
Returns
-------
(temp, tempformelt, prcp, prcpsol)
"""
t, tfmelt, prcp, prcpsol = self._get_2d_annual_climate(heights, year)
return (t.mean(axis=1), tfmelt.sum(axis=1),
prcp.sum(axis=1), prcpsol.sum(axis=1))
class TIModel(TIModel_Parent):
""" child class of TIMOdel_Parent that does not use surface type distinction! """
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def get_monthly_mb(self, heights, year=None, add_climate=False,
**kwargs):
""" computes annual mass balance in m of ice per second!
Attention year is here in hydro float year
year has to be given as float hydro year from what the month is taken,
hence year 2000 -> y=2000, m = 1, & year = 2000.09, y=2000, m=2 ...
which corresponds to the real year 1999 an months October or November
if hydro year starts in October
"""
# get_monthly_mb and get_annual_mb are only different
# to OGGM default for mb_real_daily
if self.mb_type == 'mb_real_daily':
# get 2D values, dependencies on height and time (days)
out = self._get_2d_monthly_climate(heights, year)
t, temp2dformelt, prcp, prcpsol = out
#(days per month)
#dom = 365.25/12 # len(prcpsol.T)
fact = 12/365.25
# attention, I should not use the days of years as the melt_f is
# per month ~mean days of that year 12/daysofyear
# to have the same unit of melt_f, which is
# the monthly temperature sensitivity (kg /m² /mth /K),
mb_daily = prcpsol - self.melt_f * temp2dformelt * fact
mb_month = np.sum(mb_daily, axis=1)
| |
= None
v_0, h_0, v_k, h_k = self.contrastive_divergence(
v,
k=k,
beta=betas[r],
h_0=h
)
res.append((v_k, h_k))
if include_negative_shift:
neg_res.append((v_0, h_0))
# 3. Simulated Annealing to perform swaps ("exchange particles")
for r in range(R - 1, 0, -1):
a = np.exp((betas[r] - betas[r - 1]) *
(self.energy(*res[r]) - self.energy(*res[r - 1])))
u = np.random.random(batch_size)
# acceptance mask
acc_mask = (u < a).reshape(batch_size, 1)
# reject mask
rej_mask = ~acc_mask
v = res[r][0] * acc_mask + res[r - 1][0] * rej_mask
h = res[r][1] * acc_mask + res[r - 1][1] * rej_mask
res[r - 1] = v, h
# TODO: this is useless, right? We're not ever using `res[r]` again
# in this iteration
v = res[r - 1][0] * acc_mask + res[r][0] * rej_mask
h = res[r - 1][1] * acc_mask + res[r][1] * rej_mask
res[r] = v, h
# warn user if very small/large number of samples rejected/accepted
# but don't if the `batch_size` is super small..
if r == 1 and batch_size > 2 and self._warned_acceptance < 10:
num_acc = acc_mask[acc_mask].shape[0]
if num_acc >= 0.9 * batch_size:
_log.warn(f"Large portion of tempered samples accepted ({num_acc} / {batch_size})")
self._warned_acceptance += 1
elif num_acc <= 0.1 * batch_size:
_log.warn(f"Small portion of tempered samples accepted ({num_acc} / {batch_size})")
self._warned_acceptance += 1
# possibly perform same for the negative shift
if include_negative_shift:
for r in range(R - 1, 0, -1):
a = np.exp((betas[r] - betas[r - 1]) *
(self.energy(*neg_res[r]) - self.energy(*neg_res[r - 1])))
u = np.random.random(batch_size)
# acceptance mask
acc_mask = (u < a).reshape(batch_size, 1)
# reject mask
rej_mask = ~acc_mask
v = neg_res[r][0] * acc_mask + neg_res[r - 1][0] * rej_mask
h = neg_res[r][1] * acc_mask + neg_res[r - 1][1] * rej_mask
neg_res[r - 1] = v, h
v = neg_res[r - 1][0] * acc_mask + neg_res[r][0] * rej_mask
h = neg_res[r - 1][1] * acc_mask + neg_res[r][1] * rej_mask
neg_res[r] = v, h
res_v = [r[0] for r in res]
res_h = [r[1] for r in res]
# return final state
if include_negative_shift:
neg_res_v = [r[0] for r in neg_res]
neg_res_h = [r[1] for r in neg_res]
return neg_res_v, neg_res_h, res_v, res_h
else:
return res_v, res_h
def _update(self, grad, lr=0.1):
# in case using `cupy`, can't use `np.shape`
# to obtain "shape" of single element; this is a fix
lr = np.asarray(lr)
gamma = lr
for i in range(len(self.variables)):
if lr.shape:
gamma = lr[i]
self.variables[i] -= gamma * grad[i]
def _apply_weight_decay(self, lmbda=0.01):
for i in range(len(self.variables)):
# default is gradient DEscent, so weight-decay also switches signs
self.variables[i] += lmbda * self.variables[i]
def step(self, v, k=1, lr=0.1, lmbda=0.0, **sampler_kwargs):
"Performs a single gradient DEscent step on the batch `v`."
# compute gradient for each observed visible configuration
grad = self.grad(v, k=k, **sampler_kwargs)
# update parameters
self._update(grad, lr=lr)
# possibly apply weight-decay
if lmbda > 0.0:
self._apply_weight_decay(lmbda=lmbda)
def reconstruct(self, v, num_samples=100):
samples = self.sample_visible(self.sample_hidden(v))
for _ in range(num_samples - 1):
samples += self.sample_visible(self.sample_hidden(v))
probs = samples / num_samples
return probs
def grad(self, v, burnin=-1, persist=False, **sampler_kwargs):
if self.sampler_method.lower() == 'cd':
v_0, h_0, v_k, h_k = self.contrastive_divergence(
v,
**sampler_kwargs
)
elif self.sampler_method.lower() == 'pcd':
# Persistent Contrastive Divergence
if self._prev is not None:
v_0, h_0 = self._prev
else:
# ``burnin`` specified, we perform this to initialize the chain
if burnin > 0:
_log.info(f"Performing burnin of {burnin} steps to initialize PCD")
_, _, h_0, v_0 = self.contrastive_divergence(v, k=burnin, **sampler_kwargs)
else:
h_0 = self.sample_hidden(v, **sampler_kwargs)
v_0 = v
v_0, h_0, v_k, h_k = self.contrastive_divergence(
v,
h_0=h_0,
**sampler_kwargs
)
# persist
self._prev = (v_k, h_k)
elif self.sampler_method.lower() == 'pt':
h_0 = None
if self._prev is not None:
v_0, h_0 = self._prev
else:
_log.info("Initializing PT chain...")
v_0 = self._init_parallel_tempering(v, **sampler_kwargs)
# FIXME: make compatible with `parallel_tempering` returning
# all the states
if h_0 is None:
v_0, h_0, v_k, h_k = self.parallel_tempering(
v_0,
hs=h_0,
include_negative_shift=True,
**sampler_kwargs
)
elif sampler_kwargs.get("include_negative_shift", False):
v_0, h_0, v_k, h_k = self.parallel_tempering(
v_0,
hs=h_0,
**sampler_kwargs
)
else:
# FIXME: make compatible with `parallel_tempering` returning
# all the states
v_k, h_k = self.parallel_tempering(
v_0,
hs=h_0,
**sampler_kwargs
)
if persist:
self._prev = (v_k, h_k)
# take the first tempered distribution, i.e. the one corresponding
# the target distribution
v_0 = v_0[0]
h_0 = h_0[0]
v_k = v_k[0]
h_k = v_k[0]
else:
raise ValueError(f"{self.sampler_method} is not supported")
# all expressions below using `v` or `mean_h` will contain
# AT LEAST one factor of `1 / v_sigma` and `1 / h_sigma`, respectively
# so we include those right away
v_0 = v_0 / self.v_sigma
v_k = v_k / self.v_sigma
mean_h_0 = self.mean_hidden(v_0) / self.h_sigma
mean_h_k = self.mean_hidden(v_k) / self.h_sigma
# Recall: `v_sigma` and `h_sigma` has no affect if they are set to 1
# v_0 / (v_sigma^2) - v_k / (v_sigma^2)
delta_v_bias = (v_0 - v_k) / self.v_sigma
# E[h_0 | v_0] / (h_sigma^2) - E[h_k | v_k] / (h_sigma^2)
delta_h_bias = (mean_h_0 - mean_h_k) / self.h_sigma
# Gradient wrt. W
# (v_0 / v_sigma) (1 / h_sigma) E[h_0 | v_0] - (v_k / v_sigma) (1 / h_sigma) E[h_k | v_k]
x = mean_h_0.reshape(mean_h_0.shape[0], 1, mean_h_0.shape[1])
y = v_0.reshape(v_0.shape[0], v_0.shape[1], 1)
z_0 = np.matmul(y, x)
x = mean_h_k.reshape(mean_h_k.shape[0], 1, mean_h_k.shape[1])
y = v_k.reshape(v_k.shape[0], v_k.shape[1], 1)
z_k = np.matmul(y, x)
delta_W = z_0 - z_k
# average over batch take the negative
delta_v_bias = - np.mean(delta_v_bias, axis=0)
delta_h_bias = - np.mean(delta_h_bias, axis=0)
delta_W = - np.mean(delta_W, axis=0)
grads = [delta_v_bias, delta_h_bias, delta_W]
# variances
if self.visible_type == UnitType.GAUSSIAN \
and self.estimate_visible_sigma:
# in `GaussianRBM`, where only VISIBLE units Gaussian,
# we only compute `v_sigma`
# (((v_0 - b)^2 / (v_sigma^2)) - (v / (v_sigma)) \sum_{\mu} E[h_{\mu} | v] / sigma_{\mu}) / v_sigma
delta_v_sigma_data = (((v_0 - (self.v_bias / self.v_sigma)) ** 2)
- v_0 * (np.matmul(mean_h_0, self.W.T)))
delta_v_sigma_model = (((v_k - (self.v_bias / self.v_sigma)) ** 2)
- v_k * (np.matmul(mean_h_k, self.W.T)))
delta_v_sigma = (delta_v_sigma_data - delta_v_sigma_model) / self.v_sigma
# average over batch take the negative
delta_v_sigma = - np.mean(delta_v_sigma, axis=0)
grads.append(delta_v_sigma)
if self.hidden_type == UnitType.GAUSSIAN \
and self.estimate_hidden_sigma:
# TODO: Implement
raise NotImplementedError("gradients for gaussian hidden"
" units not yet implemented")
delta_h_sigma_data = (((h_0 - (self.h_bias / self.h_sigma)) ** 2)
- h_0 * (np.matmul(mean_h_0, self.W.T)))
delta_h_sigma_model = (((h_k - (self.h_bias / self.h_sigma)) ** 2)
- h_k * (np.matmul(mean_h_k, self.W.T)))
delta_h_sigma = delta_h_sigma_data - delta_h_sigma_model
# average over batch take the negative
delta_h_sigma = - np.mean(delta_h_sigma, axis=0)
grads.append(delta_h_sigma)
return grads
def fit(self, train_data,
k=1,
learning_rate=0.01,
num_epochs=5,
batch_size=64,
test_data=None,
show_progress=True,
weight_decay=0.0,
early_stopping=-1,
callbacks={},
**sampler_kwargs):
"""
Parameters
----------
train_data: array-like
Data to fit RBM on.
k: int, default=1
Number of sampling steps to perform. Used by CD-k, PCD-k and PT.
learning_rate: float or array, default=0.01
Learning rate used when updating the parameters.
Can also be array of same length as `self.variables`, in
which case the learning rate at index `i` will be used to
to update ``RBM.variables[i]``.
num_epochs: int, default=5
Number of epochs to train.
batch_size: int, default=64
Batch size to within the epochs.
test_data: array-like, default=None
Data similar to ``train_data``, but this will only be used as
validation data, not trained on.
If specified, will compute and print the free energy / negative
log-likelihood on this dataset after each epoch.
show_progress: bool, default=True
If true, will display progress bar for each epoch.
weight_decay: float, default=0.0
If greater than 0.0, weight decay will be applied to the
parameter updates. See :func:`RBM.step` for more information.
early_stopping: int, default=-1
If ``test_data`` is given and ``early_stopping > 0``, training
will terminate after epoch if the free energy of the
``test_data`` did not improve over the fast ``early_stopping``
epochs.
Returns
-------
nlls_train, nlls_test : array-like, array-like
Returns the free energy of both ``train_data`` and ``test_data``
as computed at each epoch.
"""
num_samples = train_data.shape[0]
indices = np.arange(num_samples)
np.random.shuffle(indices)
nlls_train = []
nlls = []
prev_best = None
for epoch in range(1, num_epochs + 1):
if "pre_epoch" in callbacks:
for | |
<filename>keras_text_summarization/library/attention.py
from __future__ import division, print_function
import abc
from collections import OrderedDict
from warnings import warn
import numpy as np
from keras import backend as K
from keras.engine import InputSpec
from keras.layers import Dense, concatenate
from keras.layers.recurrent import Recurrent
from keras_text_summarization.library.children_layers_mixin import ChildLayersMixin
from keras_text_summarization.library.distribution import (
MixtureDistributionABC,
DistributionOutputLayer
)
class RecurrentAttention(ChildLayersMixin, Recurrent):
"""Abstract base class for recurrent attention layers.
Do not use in a model -- it's not a valid layer! Use its children classes
`X`, `Y` and `Z` instead.
All recurrent attention layers (`X`, `Y`, `Z`) also follow the
specifications of this class and accept the keyword arguments listed below.
# TODO add general description, example, and references.
Attention implementations extending this class should implement the
following methods:
attention_build
attention_step
as well as the property:
attention_output_dim
If the attention implementation requires state(s) to be passed between
attention computations at each timestep (apart from previous attention
representation `attention_h` which is passed by default) the following
method an properties should also be modified accordingly:
get_attention_initial_state
attention_states
attention_state_specs
See docs of respective method/property for further details.
# Arguments
recurrent_layer: layers.recurrent.Recurrent. The recurrent layer to
wrap with attention implemented by this class (see attention_step).
The following keyword arguments [return_sequences, return_state,
go_backwards, stateful] should be set to their default value
False, will otherwise be overwritten. The corresponding
keyword argument should be passed to this class instead. Moreover
it is required that recurrent_layer.implementation == 1, i.e.
preprocessed_inputs should be identical to inputs after calling:
preprocessed_inputs = recurrent_layer.preprocess_input(inputs).
return_attention: Boolean (default False). Whether to return attention
representation `attention_h` besides wrapped recurrent layers
output or just the output.
concatenate_input: Boolean (default True). Whether to pass the
concatenation of the attention representation and input at each
timestep to the wrapped recurrent_layer.step() or just the
attention representation `attention_h`.
attend_after: Boolean (default False). Whether to compute attention
representation `attention_h` after recurrent_layer.step operation
(based on states_t and used as input for recurrent_layer.step at
t+1) or before (based on states_{t-1} and used as input for
recurrent_layer.step at t). See methods `attend_after_step` and
`attend_before_step` for more details.
# Keyword Arguments passed to superclass Recurrent
return_sequences: Boolean (default False). Whether to return the last
output in the output sequence, or the full sequence. Same goes for
attention representation `attention_h` if return_attention = True.
return_state: Boolean (default False). Whether to return the last state
in addition to the output. This includes attention states.
Apart from these arguments, this layer also accept all keyword
arguments of its superclass Recurrent.
"""
def __init__(self, recurrent_layer,
return_attention=False,
concatenate_input=True,
attend_after=False,
**kwargs):
super(RecurrentAttention, self).__init__(**kwargs)
self.recurrent_layer = self.add_child(
'recurrent_layer',
recurrent_layer
)
self.return_attention = return_attention
self.concatenate_input = concatenate_input
self.attend_after = attend_after
self.input_spec = [InputSpec(ndim=3), None]
self._attended_spec = InputSpec(ndim=2)
self._attention_step_output_spec = InputSpec(ndim=2)
self._attention_state_spec = [InputSpec(ndim=2)]
self._attention_states = [None]
# will be set in call, then passed to step by get_constants
self._attended = None
@abc.abstractmethod
def attention_build(
self,
attended_shape,
step_input_shape,
recurrent_state_shapes
):
"""Build transformations related to attention mechanism, will be called
in build method.
# Arguments
attended_shape: Tuple. Shape of attended.
step_input_shape: Tuple. Shape of input at _one_ timestep
recurrent_state_shapes: [Tuple]. shape of wrapped recurrent
states
"""
pass
@abc.abstractproperty
def attention_output_dim(self):
"""Must be defined after attention_build is called, _independently_ of
input shape.
Normally we would pass input_shape to compute_output_shape but
this would lead to infinite recursion as the output from the wrapped
recurrent layer is passed as input to the attention mechanism, and the
output of the attention mechanism is passed as input to the wrapped
recurrent layer. This should normally not cause any problems as
attention_output_dim should be completely defined after attention_build
is called.
# Returns
dimension of attention output (int)
"""
pass
@abc.abstractmethod
def attention_step(
self,
attended,
attention_states,
step_input,
recurrent_states,
):
"""This method implements the core logic for computing the attention
representation.
# Arguments
attended: the same tensor at each timestep
attention_states: states from previous attention step, by
default attention from last step but can be extended
step_input: the input at current timesteps
recurrent_states: states for recurrent layer (excluding constants
like dropout tensors) from previous state if attend_after=False
otherwise from current time step.
# Returns
attention_h: the computed attention representation at current
timestep
attention_states: states to be passed to next attention_step, by
default this is just [attention_h]. NOTE if more states are
used, these should be _appeded_ to attention states,
attention_states[0] should always be attention_h.
"""
pass
def get_attention_initial_state(self, inputs):
"""Creates initial state for attention mechanism. By default the
attention representation `attention_h` computed by attention_step is
passed as attention state between timesteps.
Extending attention implementations that requires additional states
must modify over implement this method accordingly.
# Arguments
inputs: layer inputs
# Returns
list (length one) of initial state (zeros)
"""
# build an all-zero tensor of shape (samples, output_dim)
initial_state = K.zeros_like(inputs) # (samples, timesteps, input_dim)
initial_state = K.sum(initial_state, axis=(1, 2)) # (samples,)
initial_state = K.expand_dims(initial_state) # (samples, 1)
initial_state = K.tile(initial_state, [1, self.attention_output_dim]) # (samples, output_dim)
return [initial_state]
def _validate_wrapped_recurrent(self, recurrent_layer):
"""Only default keyword arguments should be used for wrapped recurrent
layer for keywords listed below.
"""
wrapped_recurrent_expected_attrs = dict(
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False
)
for attr, expected_value in wrapped_recurrent_expected_attrs.items():
if not getattr(recurrent_layer, attr) == expected_value:
warn(
'non default value for {recurrent_class}.{attr}, '
'found {existing}, expected {expected}. This attribute '
'will be overwritten with expected value. The '
'corresponding argument should instead be passed to the '
'{self_class}'.format(
recurrent_class=recurrent_layer.__class__.__name__,
attr=attr,
existing=getattr(recurrent_layer, attr),
expected=expected_value,
self_class=self.__class__.__name__
)
)
setattr(recurrent_layer, attr, expected_value)
@property
def attended_spec(self):
return self._attended_spec
@property
def attention_step_output_spec(self):
return self._attention_step_output_spec
@property
def attention_state_spec(self):
return self._attention_state_spec
@property
def attention_states(self):
return self._attention_states
@property
def n_attention_states(self):
return len(self.attention_states)
@property
def n_recurrent_states(self):
return len(self.recurrent_layer.states)
@property
def states(self):
return self.attention_states + self.recurrent_layer.states
@property
def state_specs(self):
recurrent_state_spec = self.recurrent_layer.state_spec
if not isinstance(recurrent_state_spec, list):
recurrent_state_spec = [recurrent_state_spec]
return self.attention_state_spec + recurrent_state_spec
def compute_output_shape(self, input_shape):
""""""
[input_shape, attention_shape] = input_shape
recurrent_output_shape = self._compute_recurrent_step_output_shape(
input_shape
)
if self.return_sequences:
if self.return_attention:
output_shape = [
(input_shape[0], input_shape[1], self.attention_output_dim),
input_shape[:2] + recurrent_output_shape[1:]
]
else:
output_shape = input_shape[:2] + recurrent_output_shape[1:]
else:
if self.return_attention:
output_shape = [
(input_shape[0], self.attention_output_dim),
recurrent_output_shape
]
else:
output_shape = recurrent_output_shape
if self.return_state:
if not isinstance(output_shape, list):
output_shape = [output_shape]
attention_state_shape = [
(input_shape[0], spec.shape[-1])
for spec in self.attention_state_spec
]
recurrent_state_shape = [
(input_shape[0], self.recurrent_layer.units)
for _ in self.recurrent_layer.states
]
return output_shape + attention_state_shape + recurrent_state_shape
else:
return output_shape
def _compute_recurrent_step_output_shape(
self,
recurrent_input_shape,
):
"""Computes wrapped recurrent "step" output shape (no time/sequence
dimension).
Normally this output shape is simply:
(input_shape[0], wrapped_recurrent.units)
However the approach in this method is more safe for custom recurrent
layers where this might not be the case.
# Returns
The wrapped recurrent (step) output shape (int, int)
"""
wrapped_recurrent_input_shape = (
recurrent_input_shape[0],
recurrent_input_shape[1],
self.attention_output_dim + recurrent_input_shape[-1]
if self.concatenate_input else self.attention_output_dim
)
return self.recurrent_layer.compute_output_shape(
wrapped_recurrent_input_shape
)
# it is verified that this will return the step output shape
# since return sequences must be False in recurrent layer
def build(self, input_shape):
[input_shape, attended_shape] = input_shape
self.input_spec = [ # TODO remove?
InputSpec(shape=input_shape),
InputSpec(shape=attended_shape)
]
step_input_shape = (input_shape[0], input_shape[-1])
# TODO for existing keras recurrent layers state size is always units
# but that is not very general...
recurrent_state_shapes = [
input_shape[:1] + spec.shape[1:]
for spec in self.recurrent_layer.state_spec
] if isinstance(self.recurrent_layer.state_spec, list) else [(
input_shape[:1] + self.recurrent_layer.state_spec.shape[1:]
)]
self.attention_build(
attended_shape,
step_input_shape,
recurrent_state_shapes
)
self._attended_spec = InputSpec(shape=attended_shape)
wrapped_recurrent_input_shape = (
input_shape[0],
input_shape[1],
self.attention_output_dim + input_shape[-1]
if self.concatenate_input else self.attention_output_dim
)
self.recurrent_layer.build(wrapped_recurrent_input_shape)
self.built = True
def __call__(self, inputs, initial_state=None, **kwargs):
"""
# Arguments
inputs: list of [recurrent_input, attended]
TODO separate keyword for attended?
"""
outputs = super(RecurrentAttention, self).__call__(
inputs,
initial_state=initial_state,
**kwargs
)
if self.return_attention:
output = outputs[0][:self.recurrent_layer.units]
attention = output[0][self.recurrent_layer.units:]
outputs = [output, attention] + outputs[1:]
return outputs
def call(
self,
inputs,
mask=None,
training=None,
initial_state=None,
):
inputs, self._attended = inputs
return super(RecurrentAttention, self).call(
inputs,
mask=mask,
training=training,
initial_state=initial_state,
)
def get_constants(self, inputs, training=None):
constants = self.recurrent_layer.get_constants(
inputs,
training=training
)
constants.append(self._attended)
return constants
def step(self, inputs, states):
state_components = self.get_states_components(states)
if self.attend_after:
return self.attend_after_step(inputs, *state_components)
else:
return self.attend_before_step(inputs, *state_components)
def attend_before_step(
self,
inputs,
attended,
attention_states_tm1,
recurrent_states_tm1,
| |
4, 3, 1, 1)
for r in range(4, 5):
self.main_tab_grid_layout_0.setRowStretch(r, 1)
for c in range(3, 4):
self.main_tab_grid_layout_0.setColumnStretch(c, 1)
def _phase_rad_probe():
while True:
val = self.phase_probe.level()
try:
self.set_phase_rad(val)
except AttributeError:
pass
time.sleep(1.0 / (10))
_phase_rad_thread = threading.Thread(target=_phase_rad_probe)
_phase_rad_thread.daemon = True
_phase_rad_thread.start()
self._phase_label_tool_bar = Qt.QToolBar(self)
if None:
self._phase_label_formatter = None
else:
self._phase_label_formatter = lambda x: eng_notation.num_to_str(x)
self._phase_label_tool_bar.addWidget(Qt.QLabel("phase_label"+": "))
self._phase_label_label = Qt.QLabel(str(self._phase_label_formatter(self.phase_label)))
self._phase_label_tool_bar.addWidget(self._phase_label_label)
self.main_tab_grid_layout_3.addWidget(self._phase_label_tool_bar, 4, 0, 1, 1)
for r in range(4, 5):
self.main_tab_grid_layout_3.setRowStretch(r, 1)
for c in range(0, 1):
self.main_tab_grid_layout_3.setColumnStretch(c, 1)
self._phase_deg_tool_bar = Qt.QToolBar(self)
self._phase_deg_tool_bar.addWidget(Qt.QLabel("phase_deg"+": "))
self._phase_deg_line_edit = Qt.QLineEdit(str(self.phase_deg))
self._phase_deg_tool_bar.addWidget(self._phase_deg_line_edit)
self._phase_deg_line_edit.returnPressed.connect(
lambda: self.set_phase_deg(eng_notation.str_to_num(str(self._phase_deg_line_edit.text().toAscii()))))
self.main_tab_grid_layout_3.addWidget(self._phase_deg_tool_bar, 4, 4, 1, 1)
for r in range(4, 5):
self.main_tab_grid_layout_3.setRowStretch(r, 1)
for c in range(4, 5):
self.main_tab_grid_layout_3.setColumnStretch(c, 1)
self.low_pass_filter_1 = filter.fir_filter_ccf(1, firdes.low_pass(
1, samp_rate, lpf_cut, 1e3, firdes.WIN_HAMMING, 6.76))
self.low_pass_filter_0 = filter.fir_filter_ccf(1, firdes.low_pass(
1, samp_rate, lpf_cut, 1e3, firdes.WIN_HAMMING, 6.76))
self._idx_label_tool_bar = Qt.QToolBar(self)
if None:
self._idx_label_formatter = None
else:
self._idx_label_formatter = lambda x: eng_notation.num_to_str(x)
self._idx_label_tool_bar.addWidget(Qt.QLabel("idx_label"+": "))
self._idx_label_label = Qt.QLabel(str(self._idx_label_formatter(self.idx_label)))
self._idx_label_tool_bar.addWidget(self._idx_label_label)
self.main_tab_grid_layout_2.addWidget(self._idx_label_tool_bar, 4, 3, 1, 1)
for r in range(4, 5):
self.main_tab_grid_layout_2.setRowStretch(r, 1)
for c in range(3, 4):
self.main_tab_grid_layout_2.setColumnStretch(c, 1)
self.fft_vxx_2 = fft.fft_vcc(nfft, True, (window.blackmanharris(nfft)), True, 4)
self.fft_vxx_1 = fft.fft_vcc(nfft, True, (window.blackmanharris(nfft)), True, 4)
self.fft_vxx_0 = fft.fft_vcc(nfft, True, (window.blackmanharris(nfft)), True, 4)
self.blocks_vector_to_stream_0_0 = blocks.vector_to_stream(gr.sizeof_gr_complex*1, nfft)
self.blocks_throttle_1 = blocks.throttle(gr.sizeof_gr_complex*1, samp_rate,True)
self.blocks_throttle_0 = blocks.throttle(gr.sizeof_gr_complex*1, samp_rate,True)
self.blocks_sub_xx_1_0 = blocks.sub_ff(1)
self.blocks_sub_xx_1 = blocks.sub_ff(1)
self.blocks_sub_xx_0_0 = blocks.sub_ff(1)
self.blocks_sub_xx_0 = blocks.sub_ff(1)
self.blocks_stream_to_vector_1 = blocks.stream_to_vector(gr.sizeof_gr_complex*1, nfft)
self.blocks_stream_to_vector_0 = blocks.stream_to_vector(gr.sizeof_gr_complex*1, nfft)
self.blocks_skiphead_0 = blocks.skiphead(gr.sizeof_gr_complex*1, 95)
self.blocks_short_to_float_0 = blocks.short_to_float(1, 1)
self.blocks_null_sink_0 = blocks.null_sink(gr.sizeof_short*1)
self.blocks_multiply_const_vxx_2_0 = blocks.multiply_const_vff((samp_rate/(2*math.pi), ))
self.blocks_multiply_const_vxx_2 = blocks.multiply_const_vff((samp_rate/(2*math.pi), ))
self.blocks_multiply_const_vxx_1_1_0 = blocks.multiply_const_vff((180.0/math.pi, ))
self.blocks_multiply_const_vxx_1_1 = blocks.multiply_const_vff((180.0/math.pi, ))
self.blocks_multiply_const_vxx_1_0_2_0 = blocks.multiply_const_vff((180.0/math.pi, ))
self.blocks_multiply_const_vxx_1_0_2 = blocks.multiply_const_vff((180.0/math.pi, ))
self.blocks_multiply_const_vxx_1_0_1 = blocks.multiply_const_vcc((complex(0,1), ))
self.blocks_multiply_const_vxx_1_0_0_2_0 = blocks.multiply_const_vff((180.0/math.pi, ))
self.blocks_multiply_const_vxx_1_0_0_2 = blocks.multiply_const_vff((180.0/math.pi, ))
self.blocks_multiply_const_vxx_1_0_0_1 = blocks.multiply_const_vcc((complex(math.cos(-1*phase_rad2),math.sin(-1*phase_rad2)), ))
self.blocks_multiply_const_vxx_1_0_0_0 = blocks.multiply_const_vcc((complex(0,-1), ))
self.blocks_multiply_const_vxx_1_0_0 = blocks.multiply_const_vff((180.0/math.pi, ))
self.blocks_multiply_const_vxx_1_0 = blocks.multiply_const_vff((180.0/math.pi, ))
self.blocks_multiply_const_vxx_1 = blocks.multiply_const_vff((180.0/math.pi, ))
self.blocks_multiply_conjugate_cc_0 = blocks.multiply_conjugate_cc(nfft)
self.blocks_moving_average_xx_0_1 = blocks.moving_average_ff(int(avg_len), 1.0/avg_len, 4000, 1)
self.blocks_moving_average_xx_0_0_0 = blocks.moving_average_ff(int(avg_len), 1.0/avg_len, 4000, 1)
self.blocks_moving_average_xx_0_0 = blocks.moving_average_ff(int(avg_len), 1.0/avg_len, 4000, 1)
self.blocks_moving_average_xx_0 = blocks.moving_average_ff(int(avg_len), 1.0/avg_len, 4000, 1)
self.blocks_keep_m_in_n_0_0_0 = blocks.keep_m_in_n(gr.sizeof_float, 1, nfft, int(idx_max))
self.blocks_delay_0_0 = blocks.delay(gr.sizeof_gr_complex*1, int(ew_delay))
self.blocks_delay_0 = blocks.delay(gr.sizeof_gr_complex*1, int(ns_delay))
self.blocks_complex_to_mag_0 = blocks.complex_to_mag(nfft)
self.blocks_complex_to_arg_0_0_0_2 = blocks.complex_to_arg(1)
self.blocks_complex_to_arg_0_0_0_1 = blocks.complex_to_arg(1)
self.blocks_complex_to_arg_0_0_0_0_1 = blocks.complex_to_arg(1)
self.blocks_complex_to_arg_0_0_0_0_0 = blocks.complex_to_arg(1)
self.blocks_complex_to_arg_0_0_0_0 = blocks.complex_to_arg(1)
self.blocks_complex_to_arg_0_0_0 = blocks.complex_to_arg(1)
self.blocks_complex_to_arg_0 = blocks.complex_to_arg(1)
self.blocks_argmax_xx_0 = blocks.argmax_fs(nfft)
self.blocks_add_xx_0_0 = blocks.add_vcc(1)
self.blocks_add_xx_0 = blocks.add_vcc(1)
self.blocks_add_const_vxx_0 = blocks.add_const_vff((-nfft/2.0, ))
self.analog_pll_freqdet_cf_0_0 = analog.pll_freqdet_cf(math.pi/pll_lbw, math.pi/pll_freq, -1*math.pi/pll_freq)
self.analog_pll_freqdet_cf_0 = analog.pll_freqdet_cf(math.pi/pll_lbw, math.pi/pll_freq, -1*math.pi/pll_freq)
self.analog_pll_carriertracking_cc_0_0 = analog.pll_carriertracking_cc(math.pi/pll_lbw, math.pi/pll_freq, -math.pi/pll_freq)
self.analog_pll_carriertracking_cc_0 = analog.pll_carriertracking_cc(math.pi/pll_lbw, math.pi/pll_freq, -math.pi/pll_freq)
##################################################
# Connections
##################################################
self.connect((self.analog_pll_carriertracking_cc_0, 0), (self.qtgui_freq_sink_x_0_0_0_0, 1))
self.connect((self.analog_pll_carriertracking_cc_0, 0), (self.qtgui_waterfall_sink_x_0_0_0_0_0, 0))
self.connect((self.analog_pll_carriertracking_cc_0_0, 0), (self.qtgui_freq_sink_x_0_0_0_0, 0))
self.connect((self.analog_pll_carriertracking_cc_0_0, 0), (self.qtgui_waterfall_sink_x_0_1_0_0, 0))
self.connect((self.analog_pll_freqdet_cf_0, 0), (self.blocks_multiply_const_vxx_2, 0))
self.connect((self.analog_pll_freqdet_cf_0_0, 0), (self.blocks_multiply_const_vxx_2_0, 0))
self.connect((self.blocks_add_const_vxx_0, 0), (self.max_idx_probe1, 0))
self.connect((self.blocks_add_const_vxx_0, 0), (self.qtgui_histogram_sink_x_0, 0))
self.connect((self.blocks_add_const_vxx_0, 0), (self.qtgui_number_sink_0, 0))
self.connect((self.blocks_add_xx_0, 0), (self.analog_pll_carriertracking_cc_0_0, 0))
self.connect((self.blocks_add_xx_0, 0), (self.analog_pll_freqdet_cf_0, 0))
self.connect((self.blocks_add_xx_0, 0), (self.blocks_complex_to_arg_0_0_0_2, 0))
self.connect((self.blocks_add_xx_0, 0), (self.qtgui_freq_sink_x_0_0_0, 0))
self.connect((self.blocks_add_xx_0, 0), (self.qtgui_waterfall_sink_x_0_1_0, 0))
self.connect((self.blocks_add_xx_0_0, 0), (self.analog_pll_carriertracking_cc_0, 0))
self.connect((self.blocks_add_xx_0_0, 0), (self.analog_pll_freqdet_cf_0_0, 0))
self.connect((self.blocks_add_xx_0_0, 0), (self.blocks_complex_to_arg_0_0_0_0_1, 0))
self.connect((self.blocks_add_xx_0_0, 0), (self.qtgui_freq_sink_x_0_0_0, 1))
self.connect((self.blocks_add_xx_0_0, 0), (self.qtgui_waterfall_sink_x_0_0_0_0, 0))
self.connect((self.blocks_argmax_xx_0, 1), (self.blocks_null_sink_0, 0))
self.connect((self.blocks_argmax_xx_0, 0), (self.blocks_short_to_float_0, 0))
self.connect((self.blocks_complex_to_arg_0, 0), (self.blocks_keep_m_in_n_0_0_0, 0))
self.connect((self.blocks_complex_to_arg_0_0_0, 0), (self.blocks_multiply_const_vxx_1_1, 0))
self.connect((self.blocks_complex_to_arg_0_0_0, 0), (self.blocks_sub_xx_0, 0))
self.connect((self.blocks_complex_to_arg_0_0_0_0, 0), (self.blocks_multiply_const_vxx_1_0, 0))
self.connect((self.blocks_complex_to_arg_0_0_0_0, 0), (self.blocks_sub_xx_0, 1))
self.connect((self.blocks_complex_to_arg_0_0_0_0_0, 0), (self.blocks_multiply_const_vxx_1_0_0, 0))
self.connect((self.blocks_complex_to_arg_0_0_0_0_0, 0), (self.blocks_sub_xx_0_0, 1))
self.connect((self.blocks_complex_to_arg_0_0_0_0_1, 0), (self.blocks_multiply_const_vxx_1_0_0_2_0, 0))
self.connect((self.blocks_complex_to_arg_0_0_0_1, 0), (self.blocks_multiply_const_vxx_1_1_0, 0))
self.connect((self.blocks_complex_to_arg_0_0_0_1, 0), (self.blocks_sub_xx_0_0, 0))
self.connect((self.blocks_complex_to_arg_0_0_0_2, 0), (self.blocks_multiply_const_vxx_1_0_0_2, 0))
self.connect((self.blocks_complex_to_mag_0, 0), (self.blocks_argmax_xx_0, 0))
self.connect((self.blocks_complex_to_mag_0, 0), (self.qtgui_vector_sink_f_0, 0))
self.connect((self.blocks_delay_0, 0), (self.blocks_throttle_0, 0))
self.connect((self.blocks_delay_0_0, 0), (self.low_pass_filter_1, 0))
self.connect((self.blocks_delay_0_0, 0), (self.qtgui_freq_sink_x_0, 1))
self.connect((self.blocks_delay_0_0, 0), (self.qtgui_waterfall_sink_x_0_0, 0))
self.connect((self.blocks_keep_m_in_n_0_0_0, 0), (self.blocks_multiply_const_vxx_1, 0))
self.connect((self.blocks_moving_average_xx_0, 0), (self.blocks_sub_xx_1, 0))
self.connect((self.blocks_moving_average_xx_0, 0), (self.qtgui_histogram_sink_x_1, 0))
self.connect((self.blocks_moving_average_xx_0, 0), (self.qtgui_time_sink_x_2, 0))
self.connect((self.blocks_moving_average_xx_0_0, 0), (self.blocks_sub_xx_1, 1))
self.connect((self.blocks_moving_average_xx_0_0, 0), (self.qtgui_histogram_sink_x_1, 1))
self.connect((self.blocks_moving_average_xx_0_0, 0), (self.qtgui_time_sink_x_2, 1))
self.connect((self.blocks_moving_average_xx_0_0_0, 0), (self.blocks_sub_xx_1_0, 1))
self.connect((self.blocks_moving_average_xx_0_1, 0), (self.blocks_sub_xx_1_0, 0))
self.connect((self.blocks_multiply_conjugate_cc_0, 0), (self.fft_vxx_2, 0))
self.connect((self.blocks_multiply_const_vxx_1, 0), (self.qtgui_histogram_sink_x_0_0, 0))
self.connect((self.blocks_multiply_const_vxx_1, 0), (self.qtgui_number_sink_0, 1))
self.connect((self.blocks_multiply_const_vxx_1_0, 0), (self.qtgui_time_sink_x_0, 1))
self.connect((self.blocks_multiply_const_vxx_1_0_0, 0), (self.qtgui_time_sink_x_0_1, 1))
self.connect((self.blocks_multiply_const_vxx_1_0_0_0, 0), (self.blocks_add_xx_0_0, 1))
self.connect((self.blocks_multiply_const_vxx_1_0_0_1, 0), (self.blocks_complex_to_arg_0_0_0_0_0, 0))
self.connect((self.blocks_multiply_const_vxx_1_0_0_1, 0), (self.blocks_multiply_const_vxx_1_0_0_0, 0))
self.connect((self.blocks_multiply_const_vxx_1_0_0_1, 0), (self.blocks_multiply_const_vxx_1_0_1, 0))
self.connect((self.blocks_multiply_const_vxx_1_0_0_1, 0), (self.qtgui_time_sink_x_1, 1))
self.connect((self.blocks_multiply_const_vxx_1_0_0_2, 0), (self.blocks_moving_average_xx_0_1, 0))
self.connect((self.blocks_multiply_const_vxx_1_0_0_2_0, 0), (self.blocks_moving_average_xx_0_0_0, 0))
self.connect((self.blocks_multiply_const_vxx_1_0_1, 0), (self.blocks_add_xx_0, 1))
self.connect((self.blocks_multiply_const_vxx_1_0_2, 0), (self.qtgui_time_sink_x_0, 2))
self.connect((self.blocks_multiply_const_vxx_1_0_2_0, 0), (self.qtgui_time_sink_x_0_1, 2))
self.connect((self.blocks_multiply_const_vxx_1_1, 0), (self.qtgui_time_sink_x_0, 0))
self.connect((self.blocks_multiply_const_vxx_1_1_0, 0), (self.qtgui_time_sink_x_0_1, 0))
self.connect((self.blocks_multiply_const_vxx_2, 0), (self.blocks_moving_average_xx_0, 0))
self.connect((self.blocks_multiply_const_vxx_2_0, 0), (self.blocks_moving_average_xx_0_0, 0))
self.connect((self.blocks_short_to_float_0, 0), (self.blocks_add_const_vxx_0, 0))
self.connect((self.blocks_skiphead_0, 0), (self.blocks_delay_0, 0))
self.connect((self.blocks_stream_to_vector_0, 0), (self.fft_vxx_0, 0))
self.connect((self.blocks_stream_to_vector_1, 0), (self.fft_vxx_1, 0))
self.connect((self.blocks_sub_xx_0, 0), (self.blocks_multiply_const_vxx_1_0_2, 0))
self.connect((self.blocks_sub_xx_0, 0), (self.phase_probe, 0))
self.connect((self.blocks_sub_xx_0_0, 0), (self.blocks_multiply_const_vxx_1_0_2_0, 0))
self.connect((self.blocks_sub_xx_1, 0), (self.qtgui_time_sink_x_2, 2))
self.connect((self.blocks_sub_xx_1_0, 0), (self.qtgui_histogram_sink_x_1_0, 0))
self.connect((self.blocks_throttle_0, 0), (self.low_pass_filter_0, 0))
self.connect((self.blocks_throttle_0, 0), (self.qtgui_freq_sink_x_0, 0))
self.connect((self.blocks_throttle_0, 0), (self.qtgui_waterfall_sink_x_0, 0))
self.connect((self.blocks_throttle_1, 0), (self.blocks_delay_0_0, 0))
self.connect((self.blocks_vector_to_stream_0_0, 0), (self.blocks_complex_to_arg_0, 0))
self.connect((self.fft_vxx_0, 0), (self.blocks_multiply_conjugate_cc_0, 0))
self.connect((self.fft_vxx_1, 0), (self.blocks_multiply_conjugate_cc_0, 1))
self.connect((self.fft_vxx_2, 0), (self.blocks_complex_to_mag_0, 0))
self.connect((self.fft_vxx_2, 0), (self.blocks_vector_to_stream_0_0, 0))
self.connect((self.low_pass_filter_0, 0), (self.blocks_add_xx_0, 0))
self.connect((self.low_pass_filter_0, 0), (self.blocks_add_xx_0_0, 0))
self.connect((self.low_pass_filter_0, 0), (self.blocks_complex_to_arg_0_0_0, 0))
self.connect((self.low_pass_filter_0, 0), (self.blocks_complex_to_arg_0_0_0_1, 0))
self.connect((self.low_pass_filter_0, 0), (self.blocks_stream_to_vector_0, 0))
self.connect((self.low_pass_filter_0, 0), (self.qtgui_time_sink_x_1, 0))
self.connect((self.low_pass_filter_0, 0), (self.qtgui_time_sink_x_1_0, 0))
self.connect((self.low_pass_filter_0, 0), (self.rational_resampler_xxx_0, 0))
self.connect((self.low_pass_filter_1, 0), (self.blocks_complex_to_arg_0_0_0_0, 0))
self.connect((self.low_pass_filter_1, 0), (self.blocks_multiply_const_vxx_1_0_0_1, 0))
self.connect((self.low_pass_filter_1, 0), (self.blocks_stream_to_vector_1, 0))
self.connect((self.low_pass_filter_1, 0), (self.qtgui_time_sink_x_1_0, 1))
self.connect((self.low_pass_filter_1, 0), (self.rational_resampler_xxx_0_0, 0))
self.connect((self.rational_resampler_xxx_0, 0), (self.qtgui_freq_sink_x_0_0, 0))
self.connect((self.rational_resampler_xxx_0, 0), (self.qtgui_waterfall_sink_x_0_1, 0))
self.connect((self.rational_resampler_xxx_0_0, 0), (self.qtgui_freq_sink_x_0_0, 1))
self.connect((self.rational_resampler_xxx_0_0, 0), (self.qtgui_waterfall_sink_x_0_0_0, 0))
self.connect((self.sigmf_source_0, 0), (self.blocks_skiphead_0, 0))
self.connect((self.sigmf_source_1, 0), (self.blocks_throttle_1, 0))
def closeEvent(self, event):
self.settings = Qt.QSettings("GNU Radio", "rtlsdr_v3_dual_WWV_1")
self.settings.setValue("geometry", self.saveGeometry())
event.accept()
def get_path(self):
return self.path
def set_path(self, path):
self.path = path
self.set_fp_1("{:s}/{:s}".format(self.path, self.fn_1))
self.set_fp_0("{:s}/{:s}".format(self.path, self.fn_0))
def get_signal_type(self):
return self.signal_type
def set_signal_type(self, signal_type):
self.signal_type = signal_type
def get_ts_str(self):
return self.ts_str
def set_ts_str(self, ts_str):
self.ts_str = ts_str
self.set_fn_wav("{:s}_{:s}_{:s}.wav".format(signal_type.upper(), pol.upper(),self.ts_str))
self.set_fn_1("{:s}_{:s}_{:s}".format(signal_type.upper(), antenna_1.upper(),self.ts_str))
self.set_fn_0("{:s}_{:s}_{:s}".format(signal_type.upper(), antenna_0.upper(),self.ts_str))
def get_interp_1(self):
return self.interp_1
def set_interp_1(self, interp_1):
self.interp_1 = interp_1
self.set_samp_rate(2048000/self.decim_1*self.interp_1)
def get_decim_1(self):
return self.decim_1
def set_decim_1(self, decim_1):
self.decim_1 = decim_1
self.set_samp_rate(2048000/self.decim_1*self.interp_1)
def get_antenna_1(self):
return self.antenna_1
def set_antenna_1(self, antenna_1):
self.antenna_1 = antenna_1
def get_antenna_0(self):
return self.antenna_0
def set_antenna_0(self, antenna_0):
self.antenna_0 = antenna_0
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.qtgui_waterfall_sink_x_0_1_0_0.set_frequency_range(0, self.samp_rate)
self.qtgui_waterfall_sink_x_0_1_0.set_frequency_range(0, self.samp_rate)
self.qtgui_waterfall_sink_x_0_1.set_frequency_range(0, self.samp_rate/5)
self.qtgui_waterfall_sink_x_0_0_0_0_0.set_frequency_range(0, self.samp_rate)
self.qtgui_waterfall_sink_x_0_0_0_0.set_frequency_range(0, self.samp_rate)
self.qtgui_waterfall_sink_x_0_0_0.set_frequency_range(0, self.samp_rate/5)
self.qtgui_waterfall_sink_x_0_0.set_frequency_range(self.rx_freq, self.samp_rate)
self.qtgui_waterfall_sink_x_0.set_frequency_range(self.rx_freq, self.samp_rate)
self.qtgui_time_sink_x_2.set_samp_rate(self.samp_rate)
self.qtgui_time_sink_x_1_0.set_samp_rate(self.samp_rate)
self.qtgui_time_sink_x_1.set_samp_rate(self.samp_rate)
self.qtgui_time_sink_x_0_1.set_samp_rate(self.samp_rate)
self.qtgui_time_sink_x_0.set_samp_rate(self.samp_rate)
self.qtgui_freq_sink_x_0_0_0_0.set_frequency_range(0, self.samp_rate)
self.qtgui_freq_sink_x_0_0_0.set_frequency_range(0, self.samp_rate)
self.qtgui_freq_sink_x_0_0.set_frequency_range(0, self.samp_rate/5)
self.qtgui_freq_sink_x_0.set_frequency_range(self.rx_freq*0, self.samp_rate)
self.set_offset(self.samp_rate/4)
self.low_pass_filter_1.set_taps(firdes.low_pass(1, self.samp_rate, self.lpf_cut, 1e3, firdes.WIN_HAMMING, 6.76))
self.low_pass_filter_0.set_taps(firdes.low_pass(1, self.samp_rate, self.lpf_cut, 1e3, firdes.WIN_HAMMING, 6.76))
self.blocks_throttle_1.set_sample_rate(self.samp_rate)
self.blocks_throttle_0.set_sample_rate(self.samp_rate)
self.blocks_multiply_const_vxx_2_0.set_k((self.samp_rate/(2*math.pi), ))
self.blocks_multiply_const_vxx_2.set_k((self.samp_rate/(2*math.pi), ))
def get_pol(self):
return self.pol
def set_pol(self, pol):
self.pol = pol
def get_phase_rad(self):
return self.phase_rad
def set_phase_rad(self, phase_rad):
self.phase_rad = phase_rad
self.set_phase_label(self._phase_label_formatter(self.phase_rad*180.0/math.pi))
def get_phase_deg(self):
return self.phase_deg
def set_phase_deg(self, phase_deg):
self.phase_deg = phase_deg
self.set_phase_rad2(self.phase_deg*math.pi/180.0)
Qt.QMetaObject.invokeMethod(self._phase_deg_line_edit, "setText", Qt.Q_ARG("QString", eng_notation.num_to_str(self.phase_deg)))
def get_idx_max(self):
return self.idx_max
def set_idx_max(self, idx_max):
self.idx_max = idx_max
self.set_idx_label(self._idx_label_formatter(self.idx_max))
self.blocks_keep_m_in_n_0_0_0.set_offset(int(self.idx_max))
def get_fn_1(self):
return self.fn_1
def set_fn_1(self, fn_1):
self.fn_1 = fn_1
self.set_fp_1("{:s}/{:s}".format(self.path, self.fn_1))
def get_fn_0(self):
return self.fn_0
def set_fn_0(self, fn_0):
self.fn_0 = fn_0
self.set_fp_0("{:s}/{:s}".format(self.path, self.fn_0))
def get_rx_gain(self):
return self.rx_gain
def set_rx_gain(self, rx_gain):
self.rx_gain = rx_gain
Qt.QMetaObject.invokeMethod(self._rx_gain_line_edit, "setText", Qt.Q_ARG("QString", eng_notation.num_to_str(self.rx_gain)))
def get_rx_freq(self):
return self.rx_freq
def set_rx_freq(self, rx_freq):
self.rx_freq = rx_freq
Qt.QMetaObject.invokeMethod(self._rx_freq_line_edit, "setText", Qt.Q_ARG("QString", eng_notation.num_to_str(self.rx_freq)))
self.qtgui_waterfall_sink_x_0_0.set_frequency_range(self.rx_freq, self.samp_rate)
self.qtgui_waterfall_sink_x_0.set_frequency_range(self.rx_freq, self.samp_rate)
self.qtgui_freq_sink_x_0.set_frequency_range(self.rx_freq*0, self.samp_rate)
def get_ppm(self):
return self.ppm
def set_ppm(self, ppm):
self.ppm = ppm
Qt.QMetaObject.invokeMethod(self._ppm_line_edit, "setText", Qt.Q_ARG("QString", eng_notation.num_to_str(self.ppm)))
def get_pll_lbw(self):
return self.pll_lbw
def set_pll_lbw(self, pll_lbw):
self.pll_lbw = pll_lbw
Qt.QMetaObject.invokeMethod(self._pll_lbw_line_edit, "setText", Qt.Q_ARG("QString", eng_notation.num_to_str(self.pll_lbw)))
self.analog_pll_freqdet_cf_0_0.set_loop_bandwidth(math.pi/self.pll_lbw)
self.analog_pll_freqdet_cf_0.set_loop_bandwidth(math.pi/self.pll_lbw)
self.analog_pll_carriertracking_cc_0_0.set_loop_bandwidth(math.pi/self.pll_lbw)
self.analog_pll_carriertracking_cc_0.set_loop_bandwidth(math.pi/self.pll_lbw)
def get_pll_freq(self):
return self.pll_freq
def set_pll_freq(self, pll_freq):
self.pll_freq = pll_freq
Qt.QMetaObject.invokeMethod(self._pll_freq_line_edit, "setText", Qt.Q_ARG("QString", eng_notation.num_to_str(self.pll_freq)))
self.analog_pll_freqdet_cf_0_0.set_max_freq(math.pi/self.pll_freq)
self.analog_pll_freqdet_cf_0_0.set_min_freq(-1*math.pi/self.pll_freq)
self.analog_pll_freqdet_cf_0.set_max_freq(math.pi/self.pll_freq)
self.analog_pll_freqdet_cf_0.set_min_freq(-1*math.pi/self.pll_freq)
self.analog_pll_carriertracking_cc_0_0.set_max_freq(math.pi/self.pll_freq)
self.analog_pll_carriertracking_cc_0_0.set_min_freq(-math.pi/self.pll_freq)
self.analog_pll_carriertracking_cc_0.set_max_freq(math.pi/self.pll_freq)
self.analog_pll_carriertracking_cc_0.set_min_freq(-math.pi/self.pll_freq)
def get_phase_rad2(self):
return self.phase_rad2
def set_phase_rad2(self, phase_rad2):
self.phase_rad2 = phase_rad2
self.blocks_multiply_const_vxx_1_0_0_1.set_k((complex(math.cos(-1*self.phase_rad2),math.sin(-1*self.phase_rad2)), ))
def get_phase_label(self):
return self.phase_label
def set_phase_label(self, phase_label):
self.phase_label = phase_label
Qt.QMetaObject.invokeMethod(self._phase_label_label, "setText", Qt.Q_ARG("QString", self.phase_label))
def get_offset(self):
return self.offset
def set_offset(self, offset):
self.offset = offset
def get_ns_delay(self):
return self.ns_delay
def set_ns_delay(self, ns_delay):
self.ns_delay = ns_delay
Qt.QMetaObject.invokeMethod(self._ns_delay_line_edit, "setText", Qt.Q_ARG("QString", eng_notation.num_to_str(self.ns_delay)))
self.blocks_delay_0.set_dly(int(self.ns_delay))
def get_nfft(self):
return self.nfft
def set_nfft(self, nfft):
self.nfft = nfft
self.qtgui_histogram_sink_x_0.set_bins(self.nfft)
self.qtgui_histogram_sink_x_0.set_bins(self.nfft)
self.blocks_keep_m_in_n_0_0_0.set_n(self.nfft)
self.blocks_add_const_vxx_0.set_k((-self.nfft/2.0, ))
def get_lpf_cut(self):
return self.lpf_cut
def set_lpf_cut(self, lpf_cut):
self.lpf_cut = lpf_cut
Qt.QMetaObject.invokeMethod(self._lpf_cut_line_edit, "setText", Qt.Q_ARG("QString", eng_notation.num_to_str(self.lpf_cut)))
self.low_pass_filter_1.set_taps(firdes.low_pass(1, self.samp_rate, self.lpf_cut, 1e3, firdes.WIN_HAMMING, 6.76))
self.low_pass_filter_0.set_taps(firdes.low_pass(1, self.samp_rate, self.lpf_cut, 1e3, firdes.WIN_HAMMING, 6.76))
def get_idx_label(self):
return self.idx_label
def set_idx_label(self, idx_label):
self.idx_label = idx_label
Qt.QMetaObject.invokeMethod(self._idx_label_label, "setText", Qt.Q_ARG("QString", self.idx_label))
def get_fp_1(self):
return self.fp_1
def set_fp_1(self, fp_1):
self.fp_1 = fp_1
def get_fp_0(self):
return self.fp_0
def set_fp_0(self, fp_0):
self.fp_0 = fp_0
def get_fn_wav(self):
return self.fn_wav
def set_fn_wav(self, fn_wav):
self.fn_wav = fn_wav
def get_ew_delay(self):
return self.ew_delay
def set_ew_delay(self, ew_delay):
self.ew_delay = ew_delay
Qt.QMetaObject.invokeMethod(self._ew_delay_line_edit, "setText", Qt.Q_ARG("QString", eng_notation.num_to_str(self.ew_delay)))
self.blocks_delay_0_0.set_dly(int(self.ew_delay))
def get_avg_len(self):
return self.avg_len
def set_avg_len(self, avg_len):
self.avg_len = avg_len
Qt.QMetaObject.invokeMethod(self._avg_len_line_edit, "setText", Qt.Q_ARG("QString", eng_notation.num_to_str(self.avg_len)))
self.blocks_moving_average_xx_0_1.set_length_and_scale(int(self.avg_len), | |
= RobotState()
def setup_multiple_action_clients(self, action_topics, wait_duration=2.0):
"""
Tries to set up a MoveIt MoveGroup action client for calling it later.
@param action_topics : list of tuples of Action type and topic names
@param wait_duration: Defines how long to wait for the given client if it is not available right now.
"""
if action_topics is not None:
print "ProxyMoveItClient: setup_multiple_action_clients ..."
for action_type, topics in action_topics:
print "Action type: ",action_type," : ",topics
for topic in topics:
self.setup_action_client(topic, action_type, wait_duration)
else:
print "ProxyMoveItClient: no additional action clients"
def setup_action_client(self, action_topic, action_type, wait_duration):
"""
Tries to set up a MoveIt MoveGroup action client for calling it later.
@param action_topic : string - The topic of the action to call.
@param action_type : string - class definition name for interface
@param wait_duration : Defines how long to wait for the given client if it is not available right now.
"""
if not isinstance(action_topic,str):
raise Exception(" ProxyMoveItClient - Invalid action topic %s " % ( action_topic ))
if action_topic not in ProxyMoveItClient._action_clients:
# We have not initialized this client yet
Logger.loginfo("Initializing proxy MoveIt client for "+action_topic+" ...")
try:
ProxyMoveItClient._action_clients[action_topic] = ProxyActionClient({action_topic: eval(action_type) }, wait_duration)
except Exception as e:
Logger.logwarn("ProxyMoveItClient setup error - %s"% (str(e)))
raise e
def dump_moveit_client(self):
'''
This dumps the ProxyMoveItClient data for debugging and status
'''
data = "\n*******************************\n"
data += "------ ProxyMoveItClient ------\n"
data += " Robot: "+ProxyMoveItClient._robot_description+"\n"
data += " --- Action Clients ---\n"
for topic in ProxyMoveItClient._action_clients:
data += " client: "+topic+"\n"
data += "----------------------------\n"
for group in ProxyMoveItClient._move_group_list:
data += str(self.dump_moveit_goal(group))
data += "----------------------------\n\n"
data += "--------------------------------\n"
data += "################################\n"
return data
def dump_moveit_goal(self, group):
'''
This dumps the current MoveGroupGoal data for debugging and status
@param group : string for a specific move group or
'''
data = " group: "+group+"\n"
data += " --- MotionPlanRequest ---\n"
data += " "+str(ProxyMoveItClient._motion_plan_requests[group])
data += "\n ----------------------------\n"
data += " --- PlanningOption ---\n"
data += " "+str(ProxyMoveItClient._planning_options[group])
data += "\n ----------------------------\n"
return data
def get_error_msg(self, error_code):
'''
Returns error message string based on
@param error_code : code returned from MoveIt!
'''
for key, value in MoveItErrorCodes.__dict__.items():
if value == error_code.val and key[0] != "_":
return key + (" (%s)" % str(error_code.val))
return "unknown error (%s)" % str(error_code.val)
def is_available(self, action_topic):
"""
Checks if the client on the given action topic is available.
@param topic: string The topic of interest.
"""
if(not action_topic in ProxyMoveItClient._action_clients):
raise Exception("ProxyMoveItClient - topic %s is not initialized yet!" % (action_topic))
return ProxyMoveItClient._action_clients[action_topic].is_available(action_topic)
def get_state(self, action_topic):
"""
Returns current state of the ActionLib client.
@param topic: string The topic of interest.
"""
if(not action_topic in ProxyMoveItClient._action_clients):
raise Exception("ProxyMoveItClient - topic %s is not initialized yet!" % (action_topic))
return ProxyMoveItClient._action_clients[action_topic].get_state(action_topic)
def has_result(self, action_topic):
"""
Checks if the client on the given action topic has an active result.
@param topic: string The topic of interest.
"""
if(not action_topic in ProxyMoveItClient._action_clients):
raise Exception("ProxyMoveItClient - topic %s is not initialized yet!" % (action_topic))
return ProxyMoveItClient._action_clients[action_topic].has_result(action_topic)
def get_result(self, action_topic):
"""
Gets that latest result from the given action topic.
@param topic: string The topic of interest.
"""
if(not action_topic in ProxyMoveItClient._action_clients):
raise Exception("ProxyMoveItClient - topic %s is not initialized yet!" % (action_topic))
return ProxyMoveItClient._action_clients[action_topic].get_result(action_topic)
def cancel(self, action_topic):
"""
Cancel any active goals on the given action topic
@param topic: string The topic of interest.
"""
if(not action_topic in ProxyMoveItClient._action_clients):
raise Exception("ProxyMoveItClient - topic %s is not initialized yet!" % (action_topic))
return ProxyMoveItClient._action_clients[action_topic].cancel(action_topic)
def connect_action_server(self, action_topic, action_type, wait_duration=0.0):
"""
Checks if the client on the given action topic is available.
@param action_topic: string The topic of interest.
@param action_type: string The interface type
@param wait_duration : float How long to wait for a connection
"""
if not action_topic in ProxyMoveItClient._action_clients:
raise Exception("ProxyMoveItClient - topic %s is not initialized yet!" % (action_topic))
ProxyMoveItClient._action_clients[action_topic].setupClient(action_topic, eval(action_type) , wait_duration)
return ProxyMoveItClient._action_clients[action_topic].is_available(action_topic)
def is_active(self, action_topic):
"""
Determines if an action request is already being processed on the given topic.
@type topic: string
@param topic: The topic of interest.
"""
if not action_topic in ProxyMoveItClient._action_clients:
raise Exception("ProxyMoveItClient - is_active topic %s is not initialized yet!" % (action_topic))
return ProxyMoveItClient._action_clients[action_topic].is_active(action_topic)
def reset_motion_plan_request(self, move_group=None):
'''
Clears the specified dictionary for move group
@param move_group : string or list specifying a particular move group(s) (default: None - change all relevant )
'''
flag = False
if move_group is None:
for group in self._move_group_list:
try:
ret = self.reset_motion_plan_request(group)
if ret:
flag=ret
except:
pass
# Check to see that something was reset
if flag:
return flag
else:
raise Exception(" Failed to reset dictionary for any relevant topic for move_group=(%s) !" % (str(move_group)))
elif bool(move_group) and all([isinstance(elem,basestring) for elem in move_group]):
# List of strings
for group in move_group:
try:
ret = self.reset_motion_plan_request(group)
if ret:
flag=ret
except:
pass
# Check to see that something was reset
if flag:
return flag
else:
raise Exception(" Failed to reset dictionary for any relevant topic for move_group=(%s) !" % (str(move_group)))
else:
# Base case to reset specified motion plan requests options
try:
# Reset the planning options for a given move group
ProxyMoveItClient._motion_plan_requests[move_group] = copy.deepcopy(ProxyMoveItClient._default_motion_plan_requests[move_group])
return True
except Exception as e:
Logger.logerr(" Invalid move group %s for motion plan request - not configured yet!" % (str(move_group)))
return False
def reset_planning_options(self, move_group=None):
'''
Reset the planning options to match the defaults
@param move_group : string or list specifying a particular move group(s) (default: None - change all relevant )
'''
flag = False
if move_group is None:
for group in self._move_group_list:
try:
ret = self.reset_planning_options(group)
if ret:
flag=ret
except:
pass
# Check to see that something was reset
if flag:
return flag
else:
raise Exception(" Failed to reset planning options for any relevant topic for move_group=(%s) !" % (str(move_group)))
elif bool(move_group) and all([isinstance(elem,basestring) for elem in move_group]):
# List of strings
for group in move_group:
try:
ret = self.reset_planning_options(group)
if ret:
flag=ret
except:
pass
# Check to see that something was reset
if flag:
return flag
else:
raise Exception(" Failed to reset planning options for any relevant topic for move_group=(%s) !" % (str(move_group)))
else:
# Base case to reset specified motion plan requests options
try:
# Reset the planning options for a given move group
ProxyMoveItClient._planning_options[move_group] = copy.deepcopy(ProxyMoveItClient._default_planning_options[move_group])
return True
except Exception as e:
Logger.logerr(" Invalid move group %s for planning options - not configured yet!" % (str(move_group)))
return False
def reset_joint_constraints(self, move_group=None):
'''
Reset the planning options to match the defaults
@param move_group : string or list specifying a particular move group(s) (default: None - change all relevant )
'''
flag = False
if move_group is None:
for group in self._move_group_list:
try:
ret = self.reset_joint_constraints(group)
if ret:
flag=ret
except:
pass
# Check to see that something was reset
if flag:
return flag
else:
raise Exception(" Failed to reset joint constraints for any relevant topic for move_group=(%s) !" % (str(move_group)))
elif bool(move_group) and all([isinstance(elem,basestring) for elem in move_group]):
# List of strings
for group in move_group:
try:
ret = self.reset_joint_constraints(group)
if ret:
flag=ret
except:
pass
# Check to see that something was reset
if flag:
return flag
else:
raise Exception(" Failed to reset joint constraints for any relevant topic for move_group=(%s) !" % (str(move_group)))
else:
# Base case to reset specified motion plan requests options
try:
# Reset the planning options for a given move group
ProxyMoveItClient._joint_constraints[move_group] = copy.deepcopy(ProxyMoveItClient._default_joint_constraints[move_group])
return True
except Exception as e:
Logger.logerr(" Invalid move group %s for joint constraints - not configured yet!" % (str(move_group)))
return False
def reset_position_constraints(self, move_group):
'''
Reset the current position constraints to the defaults
@param move_group : string specifying a particular move group
'''
#@TODO - handle reset all as with joints
ProxyMoveItClient._position_constraints[move_group] = copy.deepcopy(ProxyMoveItClient._default_position_constraints[move_group])
def reset_orientation_constraints(self, move_group):
'''
Reset the current orientation constraints to the defaults
@param move_group : string specifying a particular move group
'''
#@TODO - handle reset all as with joints
ProxyMoveItClient._orientation_constraints[move_group] = copy.deepcopy(ProxyMoveItClient._default_orientation_constraints[move_group])
def | |
'seven teen' or 'Seven teen' or 'Seven Teen' or 'seven-teen' or 'Seven-Teen' or 'Seven-teen':
correct += 1
else:
wrong += 1
Cedric_Diggory["age"] = q6
q7 = input(f"{name.title()}, what is Cedric's dad's name? ")
if q7 == 'Amos Diggory' or 'amos diggory' or 'Amos diggory' or 'amos Diggory':
correct += 1
else:
wrong += 1
Cedric_Diggory["parents"] = q7
print(F"{name.title()}, that is the end. Note: Since Cedric's mother's name is unknown and Cedric doesn't have a middle name/middle name unknown, there will be no question on that.")
print('Here is what you put in earlier, you will get a confirmation message.')
for key, value in Cedric_Diggory.items():
print(f'\tHis {key} is {value}\n')
Cedric_Diggory_correct = {"species":"human",
"gender":"male",
"house":"Hufflepuff",
"blood status":"half-blood",
"eye color":"gray",
"age":"17",
"parents":"<NAME>, mother name unknown"}
print(f"Here are your results:\nYou got {correct} correct and {wrong} wrong out of 7")
print('Here are the correct answers:')
for key, value in Cedric_Diggory_correct.items():
print(f'\tHis {key} is {value}\n')
q8 = input(f"{name.title()}, would you like to do the last quiz, or do this again? [L]ast quiz, [D]o this again ")
if q8 == 'l' or 'L':
Luna_Lovegood()
# Questions: Name, species, gender, house, blood status, eye color, age, parents.
def Voldemort():
status = True
while status:
correct = 0
wrong = 0
voldemort = {}
q1 = input(f"{name.title()}, what is Voldemort's real and full name? ")
if q1 == '<NAME>' or '<NAME>' or '<NAME>' or '<NAME>':
correct += 1
else:
wrong += 1
voldemort["name"] = q1
q2 = input(f"{name.title()}, what is Voldemort's species? ")
if q2 == 'human' or 'Human':
correct += 1
else:
wrong += 1
voldemort["species"] = q2
q3 = input(f"{name.title()}, what is Voldemort's gender?")
if q3 == 'male' or 'Male':
correct += 1
else:
wrong += 1
voldemort["gender"] = q3
q4 = input(f"{name.title()}, what is Voldemort's house? ")
if q4 == 'Slytherin' or 'slytherin':
correct += 1
else:
wrong += 1
voldemort["house"] = q4
q5 = input(f"{name.title()}, what is Voldemort's blood status? ")
if q5 == 'half blood' or 'Half Blood' or 'Half blood' or 'half Blood' or 'Half-Blood' or 'Half-blood' or 'half-blood':
correct += 1
else:
wrong += 1
voldemort["blood status"] = q5
q6 = input(f"{name.title()}, what is Voldemort's eye color? ")
if q6 == 'red' or 'Red':
correct += 1
else:
wrong += 1
voldemort["eye color"] = q6
q7 = input(f"{name.title()}, what is Voldemort's age (at death)? ")
if q7 == '71' or 'seventy-one' or 'Seventy-One' or 'Seventy-one' or 'Seventy one' or 'Seventy one' or 'Seventy One':
correct += 1
else:
wrong += 1
voldemort["age"] = q7
q8 = input(f"{name.title()}, what are Voldemort's parent's names? ")
if q8 == 'Merope and <NAME>' or 'merope and tom riddle' or 'Merope and <NAME>' or 'Merope And <NAME>' or 'Tom and M<NAME>iddle' or 'tom and merope riddle':
correct += 1
else:
wrong += 1
voldemort["parents"] = q8
print(f'Here is what you put in earlier. You will get a confirmation message. ')
for key, value in voldemort.items():
print(f'\tHis {key} is {value}.\n')
print(f'Here are your results:\nYou got {correct} right and {wrong} wrong out of 8.')
voldemort_correct_answers = {"name":"<NAME>",
"species":"human",
"gender":"male",
"house":"Slytherin",
"blood status":"half-blood",
"eye color":"red",
"age":"71 at death",
"parents":"Merope and <NAME>"
}
print('here are the correct answers:')
for key, value in voldemort_correct_answers.items():
print(f'His {key} is {value}')
q0 = input(f"{name.title()}, what would you like to do? [D]o this again, or [N]ext quiz? ")
if q0 == 'N' or 'n':
Cedric_Diggory()
# Questions: Name, species, gender, house, blood status, eye color, age, parents.
def hermione_granger():
status = True
while status:
correct = 0
wrong = 0
hermione_granger = {}
q1 = input(f'{name.title()}, what is Hermione' + "'s full name? ")
if q1 == 'Hermione Jean Granger' or 'Hermione jean granger' or 'hermione jean granger':
correct += 1
else:
wrong += 1
hermione_granger["name"] = q1
q2 = input(f"{name.title()}, what is Hermione's species? ")
if q2 == 'human' or 'Human':
correct += 1
else:
wrong += 1
hermione_granger["species"] = q2
q3 = input(f"{name.title()}, what is Hermione's gender? ")
if q3 == 'female' or 'Female' or 'girl' or 'Girl' or 'woman' or 'Woman':
correct += 1
else:
wrong += 1
hermione_granger["gender"] = q3
q4 = input(f"{name}, what is Hermione's house? ")
if q4 == 'gryffindor' or 'Gryffindor':
correct += 1
else:
wrong += 1
hermione_granger["house"] = q4
q5 = input(f"{name.title()}, what is Hermione's blood status? ")
if q5 == 'Muggle Born' or 'muggle born' or 'Muggle born' or 'muggle-born' or 'Muggle-Born' or 'Muggle-born':
correct += 1
elif q5 == 'mudblood' or 'Mudblood' or 'mud blood' or 'Mud Blood' or 'Mud blood':
print(f"{name}, that is very offensive, you may not continue!".upper())
e(0)
else:
wrong += 1
hermione_granger["blood status"] = q5
q6 = input(f"{name.title()}, what is Hermione's eye color? ")
if q6 == 'brown' or 'Brown':
correct += 1
else:
wrong += 1
hermione_granger["eye color"] = q6
q7 = input(f"{name.title()}, what is Hermione's age? ")
if q7 == '41' or 'forty one' or 'Forty one' or 'Forty One' or 'forty-one' or 'Forty-one' or 'Forty-One':
correct += 1
else:
wrong += 1
hermione_granger["age"] = q7
print(f'{name.title()}, that was the last question. NOTE: Since Hermione' + "'s parent's names are unknown, I will not do a question on them.")
print("Here is what you put in earlier:")
for key, value in hermione_granger.items():
print(f'\tHer {key} is {value}.\n')
print(f'Here are your results:\nYou got {correct} right and {wrong} wrong out of 7.')
hermione_granger_correct_answers = {"name":"<NAME>",
"species":"human",
"gender":"female",
"house":"Gryffindor",
"blood status":"muggle-born",
"eye color":"brown",
"age":"41"
}
print('Here are the correct answers:\n')
for key, value in hermione_granger_correct_answers.items():
print(f'\tHer {key} is {value}.\n')
q10 = input(f"{name.title()}, what would you like to do? [D]o this quiz again, or [N]ext quiz? ")
if q10 == 'N' or 'n':
Voldemort()
# Questions: Name, species, gender, house, blood status, eye color, age, parents.
def ron_Weasley():
status = True
wrong = 0
correct = 0
while status:
# Questions: Name, species,
ron_Weasley = {}
q1 = input(f"What is Ron's full name? ")
if q1 == '<NAME>' or '<NAME>':
correct += 1
else:
wrong += 1
ron_Weasley["name"] = q1
q2 = input(F"{name.title()}, what is Ron's species? ")
if q2 == 'human' or 'Human':
correct += 1
else:
wrong += 1
ron_Weasley["species"] = q2
q3 = input(f"{name.title()}, what is Ron's gender? ")
if q3 == 'male' or 'Male' or 'boy' or 'Boy' or 'man' or 'Man':
correct += 1
else:
wrong += 1
ron_Weasley["gender"] = q3
q4 = input(f"{name.title()}, what is Ron's house? ")
if q4 == 'gryffindor' or 'Gryffindor':
correct += 1
else:
wrong += 1
ron_Weasley["house"] = q4
q5 = input(f"{name.title()}, what is Ron's blood status? ")
if q5 == 'pure blood' or 'Pure Blood' or 'Pure blood' or 'Pure-blood' or 'pure-blood' or 'pureblood':
correct += 1
else:
wrong += 1
ron_Weasley["blood status"] = q5
q6 = input(f"{name.title()}, what is Ron's eye color? ")
if q6 == 'blue' or 'Blue':
correct += 1
else:
wrong += 1
ron_Weasley["eye color"] = q6
q7 = input(f"{name.title()}, what is Ron's age? ")
if q7 == '40' or 'forty' or 'Forty':
correct += 1
else:
wrong += 1
ron_Weasley["age"] = q7
q8 = input(f"{name.title()}, what are Ron's parents? ")
if q8 == 'Molly and Arthur Weasley' or 'molly and arthur weasley' or 'Molly and arthur weasley' or 'arthur and molly Weasley' or 'Arthur and Molly Weasley':
correct += 1
else:
wrong += 1
ron_Weasley["parents"] = q7
print("What you put in will appear shortly.")
for key, value in ron_Weasley.items():
print(f'\tHis {key} is {value}\n')
q9 = input(f'{name.title()}, is this info correct? y/n ')
if q9 == 'y':
print("Awesome, here are the results:")
else:
print(f'{name.title()}, try again. ')
status = False
print(f'You got {correct} correct and {wrong} wrong out of 8.')
ron_Weasley_correct_answers = {"name":"<NAME>",
"species":"human",
"gender":"male",
"house":"Gryffindor",
"blood status":"pure-blood"}
print('Here are the correct answers:')
for key, value in ron_Weasley_correct_answers.items():
print(f'His {key} is {value}.\n\t')
q10 = input(f'What would you like to do? [D]o the quiz again, or [N]ext quiz? ')
if q10 == 'N' or 'n':
hermione_granger()
def harry_potter():
status = True
correct = 0
wrong = 0
while status:
Harry_Potter = {}
# Questions: Name, species, gender, house, blood status, eye color, age, parents.
q1 = input(f"What is Harry's full name? ")
if q1 == '<NAME>':
correct += 1
else:
wrong += 1
Harry_Potter["name"] = q1
q2 = input(F"{name.title()}, what is Harry's species? ")
if q2 == 'human' or 'Human':
correct += 1
else:
wrong += 1
Harry_Potter["species"] = q2
q3 = input(f"{name.title()}, what is Harry's gender? ")
if q3 == 'male' or 'Male' or 'boy' or 'Boy' or 'man' or 'Man':
correct += 1
else:
wrong += 1
Harry_Potter["gender"] = q3
q4 = input(f"{name.title()}, what is Harry's house? ")
if q4 == 'gryffindor' or 'Gryffindor':
correct += 1
else:
wrong += 1
Harry_Potter["house"] = q4
q5 = input(f"{name.title()}, what is Harry's blood status? ")
if q5 == 'pure blood' or 'Pure Blood' or 'Pure blood' or 'Pure-blood' or 'pure-blood' or 'pureblood':
correct += 1
else:
wrong += 1
Harry_Potter["blood status"] = q5
q6 = input(f"{name.title()}, what is Harry's eye color? ")
if q6 == 'blue' or 'Blue':
correct += 1
else:
wrong += 1
Harry_Potter["eye color"] = q6
q7 = input(f"{name.title()}, what is Harry's age? ")
if q7 == '40' or 'forty' or 'Forty':
correct += 1
else:
wrong += 1
Harry_Potter["age"] = q7
q8 = input(f"{name.title()}, what are Harry's parents? ")
if q8 == 'James and <NAME>':
correct += 1
else:
wrong += 1
Harry_Potter["parents"] = q7
print("What you put in will appear shortly.")
for key, value in Harry_Potter.items():
print(f'\tHis {key} is {value}\n')
q9 = input(f'{name.title()}, is this info correct? y/n ')
if q9 == 'y':
print("Awesome, here are the results:")
else:
print(f'{name.title()}, try again. ')
status = False
print(f'You got {correct} correct and {wrong} wrong out of 8.')
Harry_Potter_correct_answers = {"name":"<NAME>",
"species":"human",
"gender":"male",
"house":"Gryffindor",
"blood status":"half-blood"}
print('Here are the correct answers:')
for key, value in Harry_Potter_correct_answers.items():
print(f'His {key} | |
from netapp.connection import NaConnection
from extension_list_info import ExtensionListInfo # 1 properties
from event_name import EventName # 0 properties
from fpolicy_policy_get_iter_key_td import FpolicyPolicyGetIterKeyTd # 2 properties
from monitored_operation_info import MonitoredOperationInfo # 1 properties
from fpolicy_event_options_config import FpolicyEventOptionsConfig # 6 properties
from secondary_server_info import SecondaryServerInfo # 1 properties
from fpolicy_policy_event_get_iter_key_td import FpolicyPolicyEventGetIterKeyTd # 2 properties
from fpolicy_proto import FpolicyProto # 0 properties
from fpolicy_policy_status_info import FpolicyPolicyStatusInfo # 4 properties
from fpolicy_volumes_list_info import FpolicyVolumesListInfo # 1 properties
from fpolicy_filter import FpolicyFilter # 0 properties
from fpolicy_policy_info import FpolicyPolicyInfo # 7 properties
from engine_name import EngineName # 0 properties
from policy_info import PolicyInfo # 10 properties
from fpolicy_policy_external_engine_get_iter_key_td import FpolicyPolicyExternalEngineGetIterKeyTd # 2 properties
from fpolicy_external_engine_info import FpolicyExternalEngineInfo # 17 properties
from fpolicy_server_status_info import FpolicyServerStatusInfo # 9 properties
from fpolicy_operation import FpolicyOperation # 0 properties
from server_info import ServerInfo # 11 properties
from fpolicy_server_type import FpolicyServerType # 0 properties
from fpolicy_policy_status_get_iter_key_td import FpolicyPolicyStatusGetIterKeyTd # 2 properties
from common_name import CommonName # 0 properties
from fpolicy_ssl_opts import FpolicySslOpts # 0 properties
from fpolicy_scope_config import FpolicyScopeConfig # 11 properties
from fpolicy_server_status_get_iter_key_td import FpolicyServerStatusGetIterKeyTd # 4 properties
from monitored_protocol_info import MonitoredProtocolInfo # 1 properties
from fpolicy_policy_scope_get_iter_key_td import FpolicyPolicyScopeGetIterKeyTd # 2 properties
from fpolicy_server_status import FpolicyServerStatus # 0 properties
from external_engine_type import ExternalEngineType # 0 properties
class FpolicyConnection(NaConnection):
def fpolicy_server_disconnect(self, node, policy_name, server):
"""
Terminate connection to FPolicy server
:param node: Cluster node name.
:param policy_name: Name of the policy.
:param server: FPolicy server.
"""
return self.request( "fpolicy-server-disconnect", {
'node': [ node, 'node', [ basestring, 'None' ], False ],
'policy_name': [ policy_name, 'policy-name', [ basestring, 'policy-name' ], False ],
'server': [ server, 'server', [ basestring, 'ip-address' ], False ],
}, {
} )
def fpolicy_volume_list_set(self, policy_name, list_type, volumes):
"""
Manipulate a list of volumes in an exclude or include set.
This limits the set of volumes for which client requests
trigger (include) or suppress (exclude) fpolicy processing
for the provided policy.
The list provided will replace the list currently in place,
if any. Note that if a policy has both an exclude list and
an include list, the include list is ignored by the filer.
:param policy_name: Name of the policy.
:param list_type: Defines to which set (exclude or include) a list
will be applied.
Possible values: "exclude", "include".
:param volumes: List of volume specifications.
"""
return self.request( "fpolicy-volume-list-set", {
'policy_name': [ policy_name, 'policy-name', [ basestring, 'None' ], False ],
'list_type': [ list_type, 'list-type', [ basestring, 'None' ], False ],
'volumes': [ volumes, 'volumes', [ FpolicyVolumesListInfo, 'None' ], True ],
}, {
} )
def fpolicy_set_required(self, policy_name, required):
"""
Sets policy's "required" option to on/off.
:param policy_name: Name of the policy.
:param required: Indicator if the policy is required. If set to true,
the request will fail if there is no server to evaluate it.
If it's false, the request will succeed.
"""
return self.request( "fpolicy-set-required", {
'policy_name': [ policy_name, 'policy-name', [ basestring, 'None' ], False ],
'required': [ required, 'required', [ bool, 'None' ], False ],
}, {
} )
def fpolicy_enable(self):
"""
Sets options fpolicy enable to on.
"""
return self.request( "fpolicy-enable", {
}, {
} )
def fpolicy_server_stop(self, server_ip, policy_name):
"""
Stops specific primary server serving the policy.
Effectively, this will unregister the fpolicy server.
:param server_ip: The ip address, in dotted-decimal format, of the server.
:param policy_name: Name of the policy.
"""
return self.request( "fpolicy-server-stop", {
'server_ip': [ server_ip, 'server-ip', [ basestring, 'ip-address' ], False ],
'policy_name': [ policy_name, 'policy-name', [ basestring, 'None' ], False ],
}, {
} )
def fpolicy_server_connect(self, node, policy_name, server):
"""
Make a connection to FPolicy server
:param node: Cluster node name.
:param policy_name: Name of the policy.
:param server: FPolicy server.
"""
return self.request( "fpolicy-server-connect", {
'node': [ node, 'node', [ basestring, 'None' ], False ],
'policy_name': [ policy_name, 'policy-name', [ basestring, 'policy-name' ], False ],
'server': [ server, 'server', [ basestring, 'ip-address' ], False ],
}, {
} )
def fpolicy_get_required_info(self, policy_name):
"""
Shows current options for the policy.
:param policy_name: Name of the policy.
"""
return self.request( "fpolicy-get-required-info", {
'policy_name': [ policy_name, 'policy-name', [ basestring, 'None' ], False ],
}, {
'is-required': [ bool, False ],
} )
def fpolicy_disable_policy(self, policy_name):
"""
Disables a specific named policy.
:param policy_name: Name of the policy.
"""
return self.request( "fpolicy-disable-policy", {
'policy_name': [ policy_name, 'policy-name', [ basestring, 'None' ], False ],
}, {
} )
def fpolicy_enable_policy(self, policy_name, sequence_number):
"""
Enables a specific named policy. The operation will fail
if the policy doesn't exist.
:param policy_name: Name of the policy.
:param sequence_number: Policy Sequence Number
"""
return self.request( "fpolicy-enable-policy", {
'policy_name': [ policy_name, 'policy-name', [ basestring, 'None' ], False ],
'sequence_number': [ sequence_number, 'sequence-number', [ int, 'None' ], False ],
}, {
} )
def fpolicy_policy_modify(self, policy_name, engine_name=None, privileged_user_name=None, events=None, is_mandatory=None, allow_privileged_access=None):
"""
Modify a policy.
:param policy_name: Name of the policy.
:param engine_name: Name of the Engine. Default Engine is 'native'.
:param privileged_user_name: User name for privileged access. No default value is set for this
attribute.
:param events: Events for file access monitoring.
:param is_mandatory: Indicator if the screening with this policy is required, i.e. it
will fail if no servers are able process the notification
registered as a part of external engine. If set to true, the
request will fail if there is no server to evaluate it. If it's
false, the request will succeed. Default value is true.
:param allow_privileged_access: Indicator if privileged access should be given to FPolicy servers
registered for the policy. Default Value is no.
"""
return self.request( "fpolicy-policy-modify", {
'engine_name': [ engine_name, 'engine-name', [ basestring, 'engine-name' ], False ],
'privileged_user_name': [ privileged_user_name, 'privileged-user-name', [ basestring, 'None' ], False ],
'policy_name': [ policy_name, 'policy-name', [ basestring, 'policy-name' ], False ],
'events': [ events, 'events', [ basestring, 'event-name' ], True ],
'is_mandatory': [ is_mandatory, 'is-mandatory', [ bool, 'None' ], False ],
'allow_privileged_access': [ allow_privileged_access, 'allow-privileged-access', [ bool, 'None' ], False ],
}, {
} )
def fpolicy_policy_create(self, engine_name, policy_name, events, privileged_user_name=None, return_record=None, is_mandatory=None, allow_privileged_access=None):
"""
Create a policy.
:param engine_name: Name of the Engine. Default Engine is 'native'.
:param policy_name: Name of the policy.
:param events: Events for file access monitoring.
:param privileged_user_name: User name for privileged access. No default value is set for this
attribute.
:param return_record: If set to true, returns the fpolicy-policy on successful
creation.
Default: false
:param is_mandatory: Indicator if the screening with this policy is required, i.e. it
will fail if no servers are able process the notification
registered as a part of external engine. If set to true, the
request will fail if there is no server to evaluate it. If it's
false, the request will succeed. Default value is true.
:param allow_privileged_access: Indicator if privileged access should be given to FPolicy servers
registered for the policy. Default Value is no.
"""
return self.request( "fpolicy-policy-create", {
'engine_name': [ engine_name, 'engine-name', [ basestring, 'engine-name' ], False ],
'privileged_user_name': [ privileged_user_name, 'privileged-user-name', [ basestring, 'None' ], False ],
'policy_name': [ policy_name, 'policy-name', [ basestring, 'policy-name' ], False ],
'return_record': [ return_record, 'return-record', [ bool, 'None' ], False ],
'events': [ events, 'events', [ basestring, 'event-name' ], True ],
'is_mandatory': [ is_mandatory, 'is-mandatory', [ bool, 'None' ], False ],
'allow_privileged_access': [ allow_privileged_access, 'allow-privileged-access', [ bool, 'None' ], False ],
}, {
'result': [ FpolicyPolicyInfo, False ],
} )
def fpolicy_policy_event_modify(self, event_name, volume_operation=None, protocol=None, file_operations=None, filter_string=None):
"""
Set FPolicy event options. FPolicy event is consist of protocol,
file operation, volume operation and f
ilters.
:param event_name: Name of the Event.
:param volume_operation: Indicator if the volume operation required for the event.Default
Value is false.
:param protocol: Name of protocol for which event is created. By default no
protocol is selected.
Possible values:
<ul>
<li> "cifs" - CIFS protocol,
| |
"""
return {self.name: getattr(obj, self.attname)}
def get_attname(self):
return self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_cache_name(self):
return '_%s_cache' % self.name
def get_internal_type(self):
return self.__class__.__name__
def pre_save(self, model_instance, add):
"""Return field's value just before saving."""
return getattr(model_instance, self.attname)
def get_prep_value(self, value):
"""Perform preliminary non-db specific value checks and conversions."""
if isinstance(value, Promise):
value = value._proxy____cast()
return value
def get_db_prep_value(self, value, connection, prepared=False):
"""
Return field's value prepared for interacting with the database backend.
Used by the default implementations of get_db_prep_save().
"""
if not prepared:
value = self.get_prep_value(value)
return value
def get_db_prep_save(self, value, connection):
"""Return field's value prepared for saving into a database."""
return self.get_db_prep_value(value, connection=connection, prepared=False)
def has_default(self):
"""Return a boolean of whether this field has a default value."""
return self.default is not NOT_PROVIDED
def get_default(self):
"""Return the default value for this field."""
return self._get_default()
@cached_property
def _get_default(self):
if self.has_default():
if callable(self.default):
return self.default
return lambda: self.default
if not self.empty_strings_allowed or self.null and not connection.features.interprets_empty_strings_as_nulls:
return return_None
return str # return empty string
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH, limit_choices_to=None):
"""
Return choices with a default blank choices included, for use
as <select> choices for this field.
"""
blank_defined = False
choices = list(self.choices) if self.choices else []
named_groups = choices and isinstance(choices[0][1], (list, tuple))
if not named_groups:
for choice, __ in choices:
if choice in ('', None):
blank_defined = True
break
first_choice = (blank_choice if include_blank and
not blank_defined else [])
if self.choices:
return first_choice + choices
rel_model = self.remote_field.model
limit_choices_to = limit_choices_to or self.get_limit_choices_to()
if hasattr(self.remote_field, 'get_related_field'):
lst = [(getattr(x, self.remote_field.get_related_field().attname),
smart_text(x))
for x in rel_model._default_manager.complex_filter(
limit_choices_to)]
else:
lst = [(x._get_pk_val(), smart_text(x))
for x in rel_model._default_manager.complex_filter(
limit_choices_to)]
return first_choice + lst
def value_to_string(self, obj):
"""
Return a string value of this field from the passed obj.
This is used by the serialization framework.
"""
return force_text(self.value_from_object(obj))
def _get_flatchoices(self):
"""Flattened version of choices tuple."""
flat = []
for choice, value in self.choices:
if isinstance(value, (list, tuple)):
flat.extend(value)
else:
flat.append((choice, value))
return flat
flatchoices = property(_get_flatchoices)
def save_form_data(self, instance, data):
setattr(instance, self.name, data)
def formfield(self, form_class=None, choices_form_class=None, **kwargs):
"""Return a django.forms.Field instance for this field."""
defaults = {'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
if self.has_default():
if callable(self.default):
defaults['initial'] = self.default
defaults['show_hidden_initial'] = True
else:
defaults['initial'] = self.get_default()
if self.choices:
# Fields with choices get special treatment.
include_blank = (self.blank or
not (self.has_default() or 'initial' in kwargs))
defaults['choices'] = self.get_choices(include_blank=include_blank)
defaults['coerce'] = self.to_python
if self.null:
defaults['empty_value'] = None
if choices_form_class is not None:
form_class = choices_form_class
else:
form_class = forms.TypedChoiceField
# Many of the subclass-specific formfield arguments (min_value,
# max_value) don't apply for choice fields, so be sure to only pass
# the values that TypedChoiceField will understand.
for k in list(kwargs):
if k not in ('coerce', 'empty_value', 'choices', 'required',
'widget', 'label', 'initial', 'help_text',
'error_messages', 'show_hidden_initial'):
del kwargs[k]
defaults.update(kwargs)
if form_class is None:
form_class = forms.CharField
return form_class(**defaults)
def value_from_object(self, obj):
"""Return the value of this field in the given model instance."""
return getattr(obj, self.attname)
class AutoField(Field):
description = _("Integer")
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be an integer."),
}
def __init__(self, *args, **kwargs):
kwargs['blank'] = True
super().__init__(*args, **kwargs)
def check(self, **kwargs):
errors = super().check(**kwargs)
errors.extend(self._check_primary_key())
return errors
def _check_primary_key(self):
if not self.primary_key:
return [
checks.Error(
'AutoFields must set primary_key=True.',
obj=self,
id='fields.E100',
),
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
del kwargs['blank']
kwargs['primary_key'] = True
return name, path, args, kwargs
def get_internal_type(self):
return "AutoField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def rel_db_type(self, connection):
return IntegerField().db_type(connection=connection)
def validate(self, value, model_instance):
pass
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
value = connection.ops.validate_autopk_value(value)
return value
def get_prep_value(self, value):
value = super().get_prep_value(value)
if value is None:
return None
return int(value)
def contribute_to_class(self, cls, name, **kwargs):
assert not cls._meta.auto_field, "A model can't have more than one AutoField."
super().contribute_to_class(cls, name, **kwargs)
cls._meta.auto_field = self
def formfield(self, **kwargs):
return None
class BigAutoField(AutoField):
description = _("Big (8 byte) integer")
def get_internal_type(self):
return "BigAutoField"
def rel_db_type(self, connection):
return BigIntegerField().db_type(connection=connection)
class BooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be either True or False."),
}
description = _("Boolean (Either True or False)")
def __init__(self, *args, **kwargs):
kwargs['blank'] = True
super().__init__(*args, **kwargs)
def check(self, **kwargs):
errors = super().check(**kwargs)
errors.extend(self._check_null(**kwargs))
return errors
def _check_null(self, **kwargs):
if getattr(self, 'null', False):
return [
checks.Error(
'BooleanFields do not accept null values.',
hint='Use a NullBooleanField instead.',
obj=self,
id='fields.E110',
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
del kwargs['blank']
return name, path, args, kwargs
def get_internal_type(self):
return "BooleanField"
def to_python(self, value):
if value in (True, False):
# if value is 1 or 0 than it's equal to True or False, but we want
# to return a true bool for semantic reasons.
return bool(value)
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def get_prep_value(self, value):
value = super().get_prep_value(value)
if value is None:
return None
return self.to_python(value)
def formfield(self, **kwargs):
# Unlike most fields, BooleanField figures out include_blank from
# self.null instead of self.blank.
if self.choices:
include_blank = not (self.has_default() or 'initial' in kwargs)
defaults = {'choices': self.get_choices(include_blank=include_blank)}
else:
defaults = {'form_class': forms.BooleanField}
defaults.update(kwargs)
return super().formfield(**defaults)
class CharField(Field):
description = _("String (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.validators.append(validators.MaxLengthValidator(self.max_length))
def check(self, **kwargs):
errors = super().check(**kwargs)
errors.extend(self._check_max_length_attribute(**kwargs))
return errors
def _check_max_length_attribute(self, **kwargs):
if self.max_length is None:
return [
checks.Error(
"CharFields must define a 'max_length' attribute.",
obj=self,
id='fields.E120',
)
]
elif not isinstance(self.max_length, int) or self.max_length <= 0:
return [
checks.Error(
"'max_length' must be a positive integer.",
obj=self,
id='fields.E121',
)
]
else:
return []
def get_internal_type(self):
return "CharField"
def to_python(self, value):
if isinstance(value, str) or value is None:
return value
return force_text(value)
def get_prep_value(self, value):
value = super().get_prep_value(value)
return self.to_python(value)
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
defaults = {'max_length': self.max_length}
# TODO: Handle multiple backends with different feature flags.
if self.null and not connection.features.interprets_empty_strings_as_nulls:
defaults['empty_value'] = None
defaults.update(kwargs)
return super().formfield(**defaults)
class CommaSeparatedIntegerField(CharField):
default_validators = [validators.validate_comma_separated_integer_list]
description = _("Comma-separated integers")
system_check_removed_details = {
'msg': (
'CommaSeparatedIntegerField is removed except for support in '
'historical migrations.'
),
'hint': (
'Use CharField(validators=[validate_comma_separated_integer_list]) '
'instead.'
),
'id': 'fields.E901',
}
class DateTimeCheckMixin:
def check(self, **kwargs):
errors = super().check(**kwargs)
errors.extend(self._check_mutually_exclusive_options())
errors.extend(self._check_fix_default_value())
return errors
def _check_mutually_exclusive_options(self):
# auto_now, auto_now_add, and default are mutually exclusive
# options. The use of more than one of these options together
# will trigger an Error
mutually_exclusive_options = [self.auto_now_add, self.auto_now, self.has_default()]
enabled_options = [option not in (None, False) for option in mutually_exclusive_options].count(True)
if enabled_options > 1:
return [
checks.Error(
"The options auto_now, auto_now_add, and default "
"are mutually exclusive. Only one of these options "
"may be present.",
obj=self,
id='fields.E160',
)
]
else:
return []
def _check_fix_default_value(self):
return []
class DateField(DateTimeCheckMixin, Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid date format. It must be "
"in YYYY-MM-DD format."),
'invalid_date': _("'%(value)s' value has the correct format (YYYY-MM-DD) "
"but it is an invalid date."),
}
description = _("Date (without time)")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
super().__init__(verbose_name, name, **kwargs)
def _check_fix_default_value(self):
"""
Warn that using an actual date or datetime value is probably wrong;
it's only evaluated on server startup.
"""
if not self.has_default():
return []
now = timezone.now()
if not timezone.is_naive(now):
now = timezone.make_naive(now, timezone.utc)
value = self.default
if isinstance(value, datetime.datetime):
if not timezone.is_naive(value):
value = timezone.make_naive(value, timezone.utc)
value = value.date()
elif isinstance(value, datetime.date):
# Nothing to | |
import copy
import glob
import os
import gym
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data.sampler import BatchSampler, SubsetRandomSampler
from arguments import get_args
from common.vec_env.subproc_vec_env import SubprocVecEnvMt
from envs import make_env
from kfac import KFACOptimizer
from model import CNNPolicy, MLPPolicy
from storage import RolloutStorage
from visualize import visdom_plot
from arguments import debugging, gtn_M
from arguments import exp, title, title_html
is_use_afs = True
args = get_args()
assert args.algo in ['a2c', 'ppo', 'acktr']
if args.algo == 'ppo':
assert args.num_processes * args.num_steps % args.batch_size == 0
'''num_frames: number of frames to train (default: 10e6)
num_steps: agent every time updata need steps
'''
num_updates = int(args.num_frames) // args.num_steps // args.num_processes
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
mt_env_id_dic_all = {
'mt test pong':[
'PongNoFrameskip-v4',
'BreakoutNoFrameskip-v4',
],
'mt high performance':[
'BeamRiderNoFrameskip-v4',
'BreakoutNoFrameskip-v4',
'PongNoFrameskip-v4',
'QbertNoFrameskip-v4',
'SpaceInvadersNoFrameskip-v4',
'SeaquestNoFrameskip-v4',
],
'mt_as_ewc_test':[
'CrazyClimberNoFrameskip-v4',
'RiverraidNoFrameskip-v4',
'BreakoutNoFrameskip-v4',
'PongNoFrameskip-v4',
'StarGunnerNoFrameskip-v4',
'DemonAttackNoFrameskip-v4',
'AsteroidsNoFrameskip-v4',
'SpaceInvadersNoFrameskip-v4',
],
'mt shooting':[
'BeamRiderNoFrameskip-v4',
'PhoenixNoFrameskip-v4',
'AtlantisNoFrameskip-v4',
'CentipedeNoFrameskip-v4',
'RiverraidNoFrameskip-v4',
'DemonAttackNoFrameskip-v4',
'GravitarNoFrameskip-v4',
'SeaquestNoFrameskip-v4',
'ChopperCommandNoFrameskip-v4',
'AssaultNoFrameskip-v4',
'AsteroidsNoFrameskip-v4',
'SpaceInvadersNoFrameskip-v4',
'YarsRevengeNoFrameskip-v4',
'CarnivalNoFrameskip-v4',
'CrazyClimberNoFrameskip-v4',
'ZaxxonNoFrameskip-v4',
'PooyanNoFrameskip-v4',
'StarGunnerNoFrameskip-v4',
],
'mt all atari':[
'CarnivalNoFrameskip-v4',
'AlienNoFrameskip-v4',
'AmidarNoFrameskip-v4',
'BankHeistNoFrameskip-v4',
'MsPacmanNoFrameskip-v4',
'TutankhamNoFrameskip-v4',
'VentureNoFrameskip-v4',
'WizardOfWorNoFrameskip-v4',
'AssaultNoFrameskip-v4',
'AsteroidsNoFrameskip-v4',
'BeamRiderNoFrameskip-v4',
'CentipedeNoFrameskip-v4',
'ChopperCommandNoFrameskip-v4',
'CrazyClimberNoFrameskip-v4',
'DemonAttackNoFrameskip-v4',
'AtlantisNoFrameskip-v4',
'GravitarNoFrameskip-v4',
'PhoenixNoFrameskip-v4',
'PooyanNoFrameskip-v4',
'RiverraidNoFrameskip-v4',
'SeaquestNoFrameskip-v4',
'SpaceInvadersNoFrameskip-v4',
'StarGunnerNoFrameskip-v4',
'TimePilotNoFrameskip-v4',
'ZaxxonNoFrameskip-v4',
'YarsRevengeNoFrameskip-v4',
'AsterixNoFrameskip-v4',
'ElevatorActionNoFrameskip-v4',
'BerzerkNoFrameskip-v4',
'FreewayNoFrameskip-v4',
'FrostbiteNoFrameskip-v4',
'JourneyEscapeNoFrameskip-v4',
'KangarooNoFrameskip-v4',
'KrullNoFrameskip-v4',
'PitfallNoFrameskip-v4',
'SkiingNoFrameskip-v4',
'UpNDownNoFrameskip-v4',
'QbertNoFrameskip-v4',
'RoadRunnerNoFrameskip-v4',
'DoubleDunkNoFrameskip-v4',
'IceHockeyNoFrameskip-v4',
'MontezumaRevengeNoFrameskip-v4',
'GopherNoFrameskip-v4',
'BreakoutNoFrameskip-v4',
'PongNoFrameskip-v4',
'PrivateEyeNoFrameskip-v4',
'TennisNoFrameskip-v4',
'VideoPinballNoFrameskip-v4',
'FishingDerbyNoFrameskip-v4',
'NameThisGameNoFrameskip-v4',
'BowlingNoFrameskip-v4',
'BattleZoneNoFrameskip-v4',
'BoxingNoFrameskip-v4',
'JamesbondNoFrameskip-v4',
'RobotankNoFrameskip-v4',
'SolarisNoFrameskip-v4',
'EnduroNoFrameskip-v4',
'KungFuMasterNoFrameskip-v4',
],
}
mt_env_id_dic_selected = mt_env_id_dic_all[args.env_name]
for env_id in mt_env_id_dic_selected:
log_dir = args.log_dir+env_id+'/'
try:
os.makedirs(log_dir)
except OSError:
files = glob.glob(os.path.join(log_dir, '*.monitor.json'))
for f in files:
os.remove(f)
afs_offset = [0.0, 0.0, 0.0, 0.0, 0.0]
reward_dict={}
def rec_last_100_epi_reward(reward,done_list):
# num = 0
'''
arguments statement:
reward :episode reward
done_list: the finished signal from env
'''
for index,done in enumerate(done_list):
env_name = mt_env_id_dic_selected[index // args.num_processes]
# print (env_name)
if done:
try:
reward_dict["{}_entire".format(env_name)].append(reward[index])
try:
reward_dict["{}_average".format(env_name)].append(np.mean(np.asarray(reward_dict["{}_entire".format(env_name)])))
except:
reward_dict["{}_average".format(env_name)] =[]
if len(reward_dict["{}_entire".format(env_name)])>100:
try:
reward_dict["{}_last_100".format(env_name)].append(np.mean(np.asarray(reward_dict["{}_entire".format(env_name)][-100:])))
except:
reward_dict["{}_last_100".format(env_name)]=[]
except Exception as e:
reward_dict["{}_entire".format(env_name)]=[]
reward_dict["{}_average".format(env_name)] =[]
reward[index] = 0
return reward
def break_line_html(string):
for x in range(0,len(string),40):
string = string[:x] + '<br>' + string[x:]
return string
def main():
print("#######")
print("WARNING: All rewards are clipped so you need to use a monitor (see envs.py) or visdom plot to get true rewards")
print("#######")
os.environ['OMP_NUM_THREADS'] = '1'
if args.vis:
from visdom import Visdom
viz = Visdom()
win = []
win_dic ={}
for i in range(len(mt_env_id_dic_selected)):
win += [None]
win_afs_per_m = None
win_afs_loss = None
win_basic_loss = None
plot_dic = {}
envs = []
''' Because the oral program has only one game per model, so Song add loop i
So whatever you wanna run , just put in SubprocVecEnvMt!
'''
for i in range(len(mt_env_id_dic_selected)):
log_dir = args.log_dir+mt_env_id_dic_selected[i]+'/'
for j in range(args.num_processes):
envs += [make_env(mt_env_id_dic_selected[i], args.seed, j, log_dir)]
''' This envs is an intergration of all the running env'''
envs = SubprocVecEnvMt(envs)
num_processes_total = args.num_processes * len(mt_env_id_dic_selected)
'''(1,128,128)'''
obs_shape = envs.observation_space.shape
#num_stack :number of frames to stack
obs_shape = (obs_shape[0] * args.num_stack, *obs_shape[1:])
from arguments import is_restore
if is_restore and args.save_dir:
load_path = os.path.join(args.save_dir, args.algo)
actor_critic =torch.load(os.path.join(load_path, args.env_name + ".pt"))
# print ("restored previous model!")
# print (actor_critic.Variable)
# print (sss)
else:
if len(envs.observation_space.shape) == 3:
actor_critic = CNNPolicy(obs_shape[0], envs.action_space)
else:
actor_critic = MLPPolicy(obs_shape[0], envs.action_space)
if envs.action_space.__class__.__name__ == "Discrete":
action_shape = 1
else:
action_shape = envs.action_space.shape[0]
if args.cuda:
actor_critic.cuda()
if args.algo == 'a2c':
optimizer = optim.RMSprop(actor_critic.parameters(), args.lr, eps=args.eps, alpha=args.alpha)
elif args.algo == 'ppo':
optimizer = optim.Adam(actor_critic.parameters(), args.lr, eps=args.eps)
elif args.algo == 'acktr':
optimizer = KFACOptimizer(actor_critic)
#'args.num_steps: number of forward steps in A2C
#rollouts is an intergration of state\ reward\ next state\action and so on
rollouts = RolloutStorage(args.num_steps, num_processes_total, obs_shape, envs.action_space)
current_state = torch.zeros(num_processes_total, *obs_shape)
''' not sure about it'''
def update_current_state(state):
shape_dim0 = envs.observation_space.shape[0]
# print (shape_dim0)
# print (sss)
state = torch.from_numpy(state).float()
if args.num_stack > 1:
current_state[:, :-shape_dim0] = current_state[:, shape_dim0:]
current_state[:, -shape_dim0:] = state
state = envs.reset()
update_current_state(state)
rollouts.states[0].copy_(current_state)
# These variables are used to compute average rewards for all processes.
episode_rewards = torch.zeros([num_processes_total, 1])
final_rewards = torch.zeros([num_processes_total, 1])
if args.cuda:
current_state = current_state.cuda()
rollouts.cuda()
if args.algo == 'ppo':
old_model = copy.deepcopy(actor_critic)
from arguments import ewc, ewc_lambda, ewc_interval
afs_per_m = []
afs_offset = [0.0]*gtn_M
afs_loss_list = []
basic_loss_list = []
episode_reward_rec = 0.0
one = torch.FloatTensor([1]).cuda()
mone = one * -1
'''for one whole game '''
for j in range(num_updates):
for step in range(args.num_steps):
if ewc == 1:
try:
states_store = torch.cat([states_store, rollouts.states[step].clone()], 0)
except Exception as e:
states_store = rollouts.states[step].clone()
# Sample actions
'''act fun refer to "observe it!"'''
value, action = actor_critic.act(Variable(rollouts.states[step], volatile=True))
cpu_actions = action.data.squeeze(1).cpu().numpy()
# Obser reward and next state
state, reward, done = envs.step(cpu_actions)
'''record the last 100 episodes rewards'''
episode_reward_rec += reward
episode_reward_rec = rec_last_100_epi_reward(episode_reward_rec,done)
reward = torch.from_numpy(np.expand_dims(np.stack(reward), 1)).float()
'''reward is shape of process_num_total, not batch-size'''
# print ((reward).size())
# print (done)
# print (sss)
episode_rewards += reward
################
# rec_last_100_epi_reward(reward,done)
# episode_reward_ppo += reward[0]
# If done then clean the history of observations. final_rewards is used for compute after one whole num_step
masks = torch.FloatTensor([[0.0] if done_ else [1.0] for done_ in done])
final_rewards *= masks
final_rewards += (1 - masks) * episode_rewards
episode_rewards *= masks
if args.cuda:
masks = masks.cuda()
if current_state.dim() == 4:
current_state *= masks.unsqueeze(2).unsqueeze(2)
else:
current_state *= masks
update_current_state(state)
rollouts.insert(step, current_state, action.data, value.data, reward, masks)
next_value = actor_critic(Variable(rollouts.states[-1], volatile=True))[0].data
if hasattr(actor_critic, 'obs_filter'):
actor_critic.obs_filter.update(rollouts.states[:-1].view(-1, *obs_shape))
rollouts.compute_returns(next_value, args.use_gae, args.gamma, args.tau)
if args.algo in ['a2c', 'acktr']:
# reset gradient
optimizer.zero_grad()
# forward
values, action_log_probs, dist_entropy, conv_list = actor_critic.evaluate_actions(Variable(rollouts.states[:-1].view(-1, *obs_shape)), Variable(rollouts.actions.view(-1, action_shape)))
# pre-process
values = values.view(args.num_steps, num_processes_total, 1)
action_log_probs = action_log_probs.view(args.num_steps, num_processes_total, 1)
# compute afs loss
afs_per_m_temp, afs_loss = actor_critic.get_afs_per_m(
action_log_probs=action_log_probs,
conv_list=conv_list,
)
if len(afs_per_m_temp)>0:
afs_per_m += [afs_per_m_temp]
if (afs_loss is not None) and (afs_loss.data.cpu().numpy()[0]!=0.0):
afs_loss.backward(mone, retain_graph=True)
afs_loss_list += [afs_loss.data.cpu().numpy()[0]]
advantages = Variable(rollouts.returns[:-1]) - values
value_loss = advantages.pow(2).mean()
action_loss = -(Variable(advantages.data) * action_log_probs).mean()
final_loss_basic = value_loss * args.value_loss_coef + action_loss - dist_entropy * args.entropy_coef
ewc_loss = None
if j != 0:
if ewc == 1:
ewc_loss = actor_critic.get_ewc_loss(lam=ewc_lambda)
if ewc_loss is None:
final_loss = final_loss_basic
else:
final_loss = final_loss_basic + ewc_loss
# print (final_loss_basic.data.cpu().numpy()[0])
# final_loss_basic
basic_loss_list += [final_loss_basic.data.cpu().numpy()[0]]
final_loss.backward()
if args.algo == 'a2c':
nn.utils.clip_grad_norm(actor_critic.parameters(), args.max_grad_norm)
optimizer.step()
elif args.algo == 'ppo':
advantages = rollouts.returns[:-1] - rollouts.value_preds[:-1]
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-5)
old_model.load_state_dict(actor_critic.state_dict())
if hasattr(actor_critic, 'obs_filter'):
old_model.obs_filter = actor_critic.obs_filter
for _ in range(args.ppo_epoch):
sampler = BatchSampler(SubsetRandomSampler(range(num_processes_total * args.num_steps)), args.batch_size * num_processes_total, drop_last=False)
for indices in sampler:
indices = torch.LongTensor(indices)
if args.cuda:
indices = indices.cuda()
states_batch = rollouts.states[:-1].view(-1, *obs_shape)[indices]
actions_batch = rollouts.actions.view(-1, action_shape)[indices]
return_batch = rollouts.returns[:-1].view(-1, 1)[indices]
# Reshape to do in a single forward pass for all steps
values, action_log_probs, dist_entropy, conv_list = actor_critic.evaluate_actions(Variable(states_batch), Variable(actions_batch))
_, old_action_log_probs, _, old_conv_list= old_model.evaluate_actions(Variable(states_batch, volatile=True), Variable(actions_batch, volatile=True))
ratio = torch.exp(action_log_probs - Variable(old_action_log_probs.data))
adv_targ = Variable(advantages.view(-1, 1)[indices])
surr1 = ratio * adv_targ
surr2 = torch.clamp(ratio, 1.0 - args.clip_param, 1.0 + args.clip_param) * adv_targ
action_loss = -torch.min(surr1, surr2).mean() # PPO's pessimistic surrogate (L^CLIP)
value_loss = (Variable(return_batch) - values).pow(2).mean()
optimizer.zero_grad()
final_loss_basic = (value_loss + action_loss - dist_entropy * args.entropy_coef)
basic_loss_list += [final_loss_basic.data.cpu().numpy()[0]]
final_loss_basic.backward()
optimizer.step()
rollouts.states[0].copy_(rollouts.states[-1])
# if j % int(num_updates/2-10) == 0 and args.save_dir != "":
if j % args.save_interval == 0 and args.save_dir != "":
save_path = os.path.join(args.save_dir, args.algo)
try:
os.makedirs(save_path)
except OSError:
pass
# A really ugly way to save a model to CPU
save_model = actor_critic
if args.cuda:
save_model = copy.deepcopy(actor_critic).cpu()
torch.save(save_model, os.path.join(save_path, args.env_name + ".pt"))
import pickle
with open(os.path.join(save_path, args.env_name + "_last_100_reward"), "wb") as f:
pickle.dump(reward_dict, f)
if j % args.log_interval == 0:
print("Updates {}, num frames {}, mean/median reward {:.1f}/{:.1f}, min/max reward {:.1f}/{:.1f}, entropy {:.5f}, value loss {:.5f}, policy loss {:.5f}".
format(j, (j + 1) * args.num_processes * args.num_steps,
final_rewards.mean(),
final_rewards.median(),
final_rewards.min(),
final_rewards.max(), -dist_entropy.data[0],
value_loss.data[0], action_loss.data[0]))
try:
print("ewc loss {:.5f}".
format(ewc_loss.data.cpu().numpy()[0]))
except Exception as e:
pass
if j > 5 and j % args.vis_interval == 0 and args.vis:
''' load from the folder'''
for ii in range(len(mt_env_id_dic_selected)):
log_dir = args.log_dir+mt_env_id_dic_selected[ii]+'/'
win[ii] = visdom_plot(viz, win[ii], log_dir, mt_env_id_dic_selected[ii], args.algo)
plot_dic = reward_dict
for plot_name in plot_dic.keys():
# if | |
what user entered
if doStaircase:
if len(thisInfo[dlgLabelsOrdered.index('staircaseTrials')]) >0:
staircaseTrials = int( thisInfo[ dlgLabelsOrdered.index('staircaseTrials') ] ) #convert string to integer
print('staircaseTrials entered by user=',staircaseTrials)
logging.info('staircaseTrials entered by user=',staircaseTrials)
if len(thisInfo[dlgLabelsOrdered.index('easyTrials')]) >0:
prefaceStaircaseTrialsN = int( thisInfo[ dlgLabelsOrdered.index('easyTrials') ] ) #convert string to integer
print('prefaceStaircaseTrialsN entered by user=',thisInfo[dlgLabelsOrdered.index('easyTrials')])
logging.info('prefaceStaircaseTrialsN entered by user=',prefaceStaircaseTrialsN)
else: #not doing staircase
group = thisInfo[dlgLabelsOrdered.index('group')]
trialsPerCondition = int( thisInfo[ dlgLabelsOrdered.index('trialsPerCondition') ] ) #convert string to integer
print('trialsPerCondition=',trialsPerCondition)
logging.info('trialsPerCondition =',trialsPerCondition)
defaultNoiseLevel = 0 #int (thisInfo[ dlgLabelsOrdered.index('defaultNoiseLevel') ])
else:
print('User cancelled from dialog box.')
logging.flush()
core.quit()
if not demo:
allowGUI = False
print(group)
test = '0'
speedSeq = ['slow','fast','slow','fast']
if (group=='1'):
conditionSeq = ['short','short','long','long']
whichWordsSeq = ['1','3','5','7']
if (group=='2'):
conditionSeq = ['short','short','long','long']
whichWordsSeq = ['3','5','7','1']
if (group=='3'):
conditionSeq = ['long','long','short','short']
whichWordsSeq = ['5','7','1','3']
if (group=='4'):
conditionSeq = ['long','long','short','short']
whichWordsSeq = ['7','1','3','5']
if (group=='5'):
conditionSeq = ['short','short','long','long']
whichWordsSeq = ['2','4','6','8']
if (group=='6'):
conditionSeq = ['short','short','long','long']
whichWordsSeq = ['4','6','8','2']
if (group=='7'):
conditionSeq = ['long','long','short','short']
whichWordsSeq = ['6','8','2','4']
if (group=='8'):
conditionSeq = ['long','long','short','short']
whichWordsSeq = ['8','2','4','6']
if (group=='9'):
conditionSeq = ['long','long','short','short']
whichWordsSeq = ['1','3','5','7']
if (group=='10'):
conditionSeq = ['long','long','short','short']
whichWordsSeq = ['3','5','7','1']
if (group=='11'):
conditionSeq = ['short','short','long','long']
whichWordsSeq = ['5','7','1','3']
if (group=='12'):
conditionSeq = ['short','short','long','long']
whichWordsSeq = ['7','1','3','5']
if (group=='13'):
conditionSeq = ['long','long','short','short']
whichWordsSeq = ['2','4','6','8']
if (group=='14'):
conditionSeq = ['long','long','short','short']
whichWordsSeq = ['4','6','8','2']
if (group=='15'):
conditionSeq = ['short','short','long','long']
whichWordsSeq = ['6','8','2','4']
if (group=='16'):
conditionSeq = ['short','short','long','long']
whichWordsSeq = ['8','2','4','6']
for counter in range(4):
condition = conditionSeq[counter]
whichWords = whichWordsSeq[counter]
speed = speedSeq[counter]
#condition = 'long'
#whichWords = '1'
#speed = 'slow'
if (condition == 'short'):
rec_duration = 2
else:
rec_duration = 4
if (whichWords == '1'):
wordFile1 = "W1.txt"
wordFile2 = "W2.txt"
targetFile = "TargetList1.txt"
if (whichWords == '2'):
wordFile1 = "W2.txt"
wordFile2 = "W1.txt"
targetFile = "TargetList2.txt"
if (whichWords == '3'):
wordFile1 = "W3.txt"
wordFile2 = "W4.txt"
targetFile = "TargetList3.txt"
if (whichWords == '4'):
wordFile1 = "W4.txt"
wordFile2 = "W3.txt"
targetFile = "TargetList4.txt"
if (whichWords == '5'):
wordFile1 = "W5.txt"
wordFile2 = "W6.txt"
targetFile = "TargetList5.txt"
if (whichWords == '6'):
wordFile1 = "W6.txt"
wordFile2 = "W5.txt"
targetFile = "TargetList6.txt"
if (whichWords == '7'):
wordFile1 = "W7.txt"
wordFile2 = "W8.txt"
targetFile = "TargetList7.txt"
if (whichWords == '8'):
wordFile1 = "W8.txt"
wordFile2 = "W7.txt"
targetFile = "TargetList8.txt"
if (test!='0'):
condition = 'short'
rec_duration = 1.8
wordFile1 = "test" + test + ".txt"
wordFile2 = "test" + test + "b.txt"
targetFile = "TestTargets" + test +".txt"
print(wordFile1)
# reads word in from external source
wordList1 = open(wordFile1)
wordList2 = open(wordFile2)
TargetList = open(targetFile)
wordList1 = [x.rstrip() for x in wordList1.readlines()]
wordList2 = [x.rstrip() for x in wordList2.readlines()]
TargetList = [x.rstrip() for x in TargetList.readlines()]
lettersUnparsed = "a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z".upper()
for i in range(len(wordList1)):
wordList1[i] = wordList1[i].replace(" ", "") #delete spaces
for i in range(len(wordList2)):
wordList2[i] = wordList2[i].replace(" ", "") #delete spaces
for i in range(len(TargetList)):
TargetList[i] = TargetList[i].replace(" ", "") #delete spaces
if (test=='0'):
if (speed == 'fast'):
#wordList1 = wordList1[0:int((len(wordList1)/2))]
#wordList2 = wordList2[0:int((len(wordList2)/2))]
#TargetList = TargetList[0:int((len(TargetList)/2))]
#print(wordList1)
#print(len(wordList1))
wordDur = wordDur1
elif (speed == 'slow'):
#wordList1 = wordList1[int((len(wordList1)/2)):len(wordList1)]
#wordList2 = wordList2[int((len(wordList2)/2)):len(wordList2)]
#TargetList = TargetList[int((len(TargetList)/2)):len(TargetList)]
wordDur = wordDur2
#####shuffles wordlist for each participant.
print(wordList1)
print(wordList2)
print(TargetList)
print(len(wordList1))
print(len(TargetList))
tl = np.column_stack((wordList1, wordList2,TargetList))
np.random.shuffle(tl[10:,:])
print(tl)
wordList1 = tl[:,0]
wordList2 = tl[:,1]
TargetList = tl[:,2]
print(wordList1)
print(wordList2)
print(TargetList)
#set location of stimuli
#letter size 2.5 deg
SOAms = wordDur #Battelli, Agosta, Goodbourn, Holcombe mostly using 133 #KR: was 233
#Minimum SOAms should be 84 because any shorter, I can't always notice the second ring when lag1. 71 in Martini E2 and E1b (actually he used 66.6 but that's because he had a crazy refresh rate of 90 Hz)
letterDurMs = wordDur #85
#Was 17. 23.6 in Martini E2 and E1b (actually he used 22.2 but that's because he had a crazy refresh rate of 90 Hz = 0
ISIms = SOAms - letterDurMs
letterDurFrames = int( np.floor(letterDurMs / (1000./refreshRate)) )
cueDurFrames = letterDurFrames
ISIframes = int( np.floor(ISIms / (1000./refreshRate)) )
#have set ISIframes and letterDurFrames to integer that corresponds as close as possible to originally intended ms
rateInfo = 'total SOA=' + str(round( (ISIframes + letterDurFrames)*1000./refreshRate, 2)) + ' or ' + str(ISIframes + letterDurFrames) + ' frames, comprising\n'
rateInfo+= 'ISIframes ='+str(ISIframes)+' or '+str(ISIframes*(1000./refreshRate))+' ms and letterDurFrames ='+str(letterDurFrames)+' or '+str(round( letterDurFrames*(1000./refreshRate), 2))+'ms'
logging.info(rateInfo); print(rateInfo)
trialDurFrames = int( numWordsInStream*(ISIframes+letterDurFrames) ) #trial duration in frames
onsets = []; offsets = []; inferredOnsets = []
#rec_duration = 3.8 #1.8 for short #2.8 for both #3.8 for long
stimClock = core.Clock()
durs=list()
if counter== 0:
myWin = openMyStimWindow()
#set up output data file, log file, copy of program code, and logging
infix = ''
if doStaircase:
infix = 'staircase_'
fileName = subject + '_' + infix+ timeAndDateStr+ '_' + str(counter)
fileNameWithPath = os.path.join(dataPath, fileName)
if not demo and not exportImages:
dataFile = open(fileNameWithPath+'.txt', 'w')
saveCodeCmd = 'cp \'' + sys.argv[0] + '\' '+ fileNameWithPath + '.py'
os.system(saveCodeCmd) #save a copy of the code as it was when that subject was run
logFname = fileNameWithPath+'.log'
ppLogF = logging.LogFile(logFname,
filemode='w',#if you set this to 'a' it will append instead of overwriting
level=logging.INFO)#errors, data and warnings will be sent to this logfile
if demo or exportImages:
dataFile = sys.stdout; logF = sys.stdout
logging.console.setLevel(logging.ERROR) #only show this level messages and higher
logging.console.setLevel(logging.ERROR) #DEBUG means set console to receive nearly all messges, INFO next level, EXP, DATA, WARNING and ERROR
if fullscr and not demo and not exportImages:
runInfo = psychopy.info.RunTimeInfo(
# if you specify author and version here, it overrides the automatic detection of __author__ and __version__ in your script
#author='<your name goes here, plus whatever you like, e.g., your lab or contact info>',
#version="<your experiment version info>",
win=myWin, ## a psychopy.visual.Window() instance; None = default temp window used; False = no win, no win.flips()
refreshTest='grating', ## None, True, or 'grating' (eye-candy to avoid a blank screen)
verbose=False, ## True means report on everything
userProcsDetailed=True, ## if verbose and userProcsDetailed, return (command, process-ID) of the user's processes
#randomSeed='set:42', ## a way to record, and optionally set, a random seed of type str for making reproducible random sequences
## None -> default
## 'time' will use experimentRuntime.epoch as the value for the seed, different value each time the script is run
##'set:time' --> seed value is set to experimentRuntime.epoch, and initialized: random.seed(info['randomSeed'])
##'set:42' --> set & initialize to str('42'), and will give the same sequence of random.random() for all runs of the script
)
logging.info(runInfo)
logging.flush()
textStimuliStream1 = list()
textStimuliStream2 = list() #used for second, simultaneous RSVP stream
def calcAndPredrawStimuli(wordList1,wordList2,nDoneMain):
global textStimuliStream1, textStimuliStream2
del textStimuliStream1[:]
del textStimuliStream2[:]
if len(wordList1) < numWordsInStream:
print('Error! Your word list must have at least ',numWordsInStream,'strings')
#for i in (0): #draw the words that will be used on this trial, the first numWordsInStream of the shuffled list
word1 = wordList1[ nDoneMain ]# #[ idxsIntoWordList[i] ]
word2 = wordList2[ nDoneMain ]
textStimulusStream1 = visual.TextStim(myWin,text=word1,height=ltrHeight,colorSpace='rgb',color=letterColor,alignHoriz='right',alignVert='center',units='deg',font='Arial',autoLog=autoLogging)
textStimulusStream2 = visual.TextStim(myWin,text=word2,height=ltrHeight,colorSpace='rgb',color=letterColor,alignHoriz='left',alignVert='center',units='deg',font='Arial',autoLog=autoLogging)
textStimulusStream1.setPos([-wordEccentricity,0]) #left
textStimuliStream1.append(textStimulusStream1)
textStimulusStream2.setPos([wordEccentricity,0]) #right
textStimuliStream2.append(textStimulusStream2)
#create click sound for keyboard
try:
click=sound.Sound('406__tictacshutup__click-1-d.wav')
except: #in case file missing, create inferiro click manually
logging.warn('Could not load the desired click sound file, instead using manually created inferior click')
click=sound.Sound('D',octave=4, sampleRate=22050, secs=0.015, bits=8)
if showRefreshMisses:
fixSizePix = 32 #2.6 #make fixation bigger so flicker more conspicuous
else: fixSizePix = 32
fixColor = [1,1,1]
redfixColor = [0,1,1]
if exportImages: fixColor= [0,0,0]
fixatnNoiseTexture = np.round( np.random.rand(fixSizePix/4,fixSizePix/4) ,0 ) *2.0-1 #Can counterphase flicker noise texture to create salient flicker if you break fixation
fixation= visual.PatchStim(myWin, tex=fixatnNoiseTexture, size=(fixSizePix,fixSizePix), units='pix', mask='circle', interpolate=False, autoLog=False)
fixationBlank= visual.PatchStim(myWin, tex= -1*fixatnNoiseTexture, size=(fixSizePix,fixSizePix), units='pix', mask='circle', interpolate=False, autoLog=False) #reverse contrast
fixationPoint= visual.PatchStim(myWin,tex='none',colorSpace='rgb',color=(letterColor),size=2,units='pix',autoLog=autoLogging)
redfixationPoint= visual.PatchStim(myWin,tex='none',colorSpace='rgb',color=('white'),size=2,units='pix',autoLog=autoLogging)
respPromptStim = visual.TextStim(myWin,pos=(0, -.9),colorSpace='rgb',color=(letterColor),alignHoriz='center', alignVert='center',height=.05,units='norm',autoLog=autoLogging)
acceptTextStim = visual.TextStim(myWin,pos=(0, -.8),colorSpace='rgb',color=(letterColor),alignHoriz='center', alignVert='center',height=.05,units='norm',autoLog=autoLogging)
acceptTextStim.setText('Hit ENTER to accept. Backspace to edit')
respStim = visual.TextStim(myWin,pos=(0,0),colorSpace='rgb',color=(letterColor),alignHoriz='center', alignVert='center',height=1,units='deg',autoLog=autoLogging)
#clickSound, badKeySound = stringResponse.setupSoundsForResponse()
requireAcceptance = False
nextText = visual.TextStim(myWin,pos=(0, .1),colorSpace='rgb',color = (letterColor),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)
NextRemindCountText = visual.TextStim(myWin,pos=(0,.2),colorSpace='rgb',color= | |
#!/usr/bin/env python3
import os
import pandas as pd
from scattertable import scattertable
import numpy as np
import glob
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from matplotlib import markers
def subtract_era(datadi, era5di, file_dict,r, model_name):
''' maxtrix hist - matrix era5
hist is a matrix, ERA5 just one value '''
for parameter in file_dict.keys():
df_name = file_dict[parameter][0]
MIP = file_dict[parameter][1][:]
var = file_dict[parameter][2][:]
timeslice=file_dict[parameter][5]
reg = file_dict[parameter][3][:]
sce = file_dict[parameter][4][:]
vartitel=file_dict[parameter][6]
kind=file_dict[parameter][7]
FileName=df_name+MIP[0]+'_'+r+'_'+sce[0]+timeslice+var[0]+'.csv'
print(FileName)
FileEra= df_name+MIP[1]+'_'+r+'_'+sce[1]+timeslice+var[1]+'.csv'
print(FileEra)
InFile=os.path.join(datadi,FileName)
InEra=os.path.join(era5di,FileEra)
print('InEra:',era5di)
dfhist=pd.read_csv(InFile,index_col='model_member')
df_era5 =pd.read_csv(InEra ,index_col='model_member')
dfdiffm=pd.DataFrame()
colm=['ANN','DJF','MAM','JJA','SON']
for c in range(5):
diff=pd.DataFrame()
col=colm[c]
value=df_era5.iloc[0][col]
print('value= ', value)
dfhist.loc[dfhist[col]> 1e+19 ] = np.nan
diff=dfhist[col] - value
if var[1] == 'tp':
# change unit to mm/day
value=value * 86400
diff=(dfhist[col] * 86400) - value
print('value= ', value)
print('precipitation')
if kind == '%':
diff=diff * (100 / value)
dfdiffm=pd.concat([dfdiffm,diff],axis=1,join='outer',sort=False)
dfdiffm.columns = colm
# Alphabethical Order
dfdiffm=dfdiffm.sort_index()
# MEAN diff
dfdiffmm=dfdiffm.copy()
dfdiffmm.loc['MEAN']=dfdiffmm.mean(numeric_only=True, axis=0)
#
if MIP[0] == 'CMIP5':
t2=pd.DataFrame()
for i in model_name:
t=dfdiffm.loc[(dfdiffm.index == i)]
t2=pd.concat([t2,t],axis=0)
t2m=t2.copy()
t2m.loc['MEAN-CMIP5-CORDEX']=t2m.mean(numeric_only=True, axis=0)
EC=t2m.loc[(t2m.index == 'MEAN-CMIP5-CORDEX')]
# append to dfdiffmm
dfdiffmm=pd.concat([dfdiffmm,EC],axis=0)
return dfdiffmm
def add_mean_hm(InData,MIP,model_name):
''' add MEAN per month or season over all models for heatmap'''
InDatam=InData.copy()
InDatam.loc['MEAN']=InDatam.mean(numeric_only=True, axis=0)
#
if MIP == 'CMIP5':
t2=pd.DataFrame()
for i in model_name:
t=InDatam.loc[(InDatam.index == i)]
t2=pd.concat([t2,t],axis=0)
t2m=t2.copy()
t2m.loc['MEAN-CMIP5-CORDEX']=t2m.mean(numeric_only=True, axis=0)
#print(t2m)
EC=t2m.loc[(t2m.index == 'MEAN-CMIP5-CORDEX')]
# append
InDatam=pd.concat([InDatam,EC],axis=0)
#print(InDatam)
return InDatam
def add_mean(df):
''' mean over df.Scenario '''
MeanData=pd.DataFrame()
for s in df['Scenario'].unique():
print(s)
sel=df.loc[(df['Scenario'] == s)]
selm=sel.copy()
selm.loc['MEAN']=selm.mean(numeric_only=True, axis=0)
selm.at['MEAN','experiment'] = 'MEAN'
selm.at['MEAN','Scenario'] = s
MeanData=pd.concat([MeanData,selm],axis=0)
dfm=MeanData.loc[(MeanData['experiment'] == 'MEAN')]
print(dfm)
return dfm
# ******************************************************************
# ToDo die add columns in eine funktion zusammen fassen irgendwie
# ******************************************************************
def add_column_for_plot(df):
''' add column for CMIP-mini-ensembel ...'''
eurocordex = scattertable('cordex-gcm')
cmip6_lbc = scattertable('cmip6-gcm-lbc')
# Create column with names, which equal the names in list CMIP5-CORDEX :
df['model_member_experiment_id'] = df['model_member'].str.cat(df['experiment_id'],sep="_")
df['experimentn'] = df['model_member_experiment_id'].map(eurocordex)
df['experiment1'] = [x if x == 'CMIP5-CORDEX' else z for x,z in zip(df['experimentn'],df['project_id'])]
# clean up
df.drop(['experimentn'], inplace=True, axis=1)
# A simulation of the CMIP5-CORDEX ensemble is also a memebr of CMIP5 Ensemble and needs to added again
sel=df.loc[(df['experiment1'] == 'CMIP5-CORDEX')]
sel=sel.replace({'experiment1':'CMIP5-CORDEX'},{'experiment1':'CMIP5'}, regex=True)
dfn=df.append(sel)
# CMIP6: Create column with names , which equal the names in list CMIP6-LBC :
dfn['experimentn'] = dfn['model_member_experiment_id'].map(cmip6_lbc)
dfn['experiment'] = [x if x == 'CMIP6-LBC' else z for x,z in zip(dfn['experimentn'],dfn['experiment1'])]
# clean up
dfn.drop(['experimentn'], inplace=True, axis=1)
dfn.drop(['experiment1'], inplace=True, axis=1)
# the runs which are in the Ensemble with LBC are also in the ensemble of CMIP, so they have to occure twice
sel=dfn.loc[(dfn['experiment'] == 'CMIP6-LBC')]
sel=sel.replace({'experiment':'CMIP6-LBC'},{'experiment':'CMIP6'}, regex=True)
df=dfn.append(sel)
df['xcategory']=df['experiment'].str.cat(df['experiment_id'],sep="-")
return(df)
def add_column_for_plot_season(df):
''' add column for CMIP-mini-ensembel ...'''
eurocordex = scattertable('cordex-gcm')
cmip6_lbc = scattertable('cmip6-gcm-lbc')
# CMIP5: Create column with names , which equal the names in list CORDEX_GCM /cmip6-gcm-lbc :
df['model_member_experiment_id'] = df['model_member'].str.cat(df['experiment_id'],sep="_")
df['experimentn'] = df['model_member_experiment_id'].map(eurocordex)
df['experiment1'] = [x if x == 'CMIP5-CORDEX' else z for x,z in zip(df['experimentn'],df['project_id'])]
# clean up
df.drop(['experimentn'], inplace=True, axis=1)
# the runs which are in the Ensemble with LBC are also in the ensemble of CMIP, so they have to occure twice
sel=df.loc[(df['experiment1'] == 'CMIP5-CORDEX')]
sel['experiment1']= 'CMIP5'
dfn=df.append(sel)
# CMIP6: Create column with names , which equal the names in list CORDEX_GCM /cmip6-gcm-lbc :
dfn['experimentn'] = dfn['model_member_experiment_id'].map(cmip6_lbc)
dfn['experiment'] = [x if x == 'CMIP6-LBC' else z for x,z in zip(dfn['experimentn'],dfn['experiment1'])]
# clean up
dfn.drop(['experimentn'], inplace=True, axis=1)
dfn.drop(['experiment1'], inplace=True, axis=1)
# the runs which are in the Ensemble with LBC are also in the ensemble of CMIP, so they have to occure twice
sel=dfn.loc[(dfn['experiment'] == 'CMIP6-LBC')]
sel['experiment']= 'CMIP6'
df=dfn.append(sel)
df['xcategory']=df['season'].str.cat(df['experiment'],sep="-")
return(df)
def add_column_for_plot_cordex(df):
''' the CORDEX name of GCM and RCM needs to be splitt first'''
# CORDEX
df_tmp= df['model_member'].str.split(pat="_",expand=True)
column_name=['RCM','GCMn','CORDEX','member']
df_tmp.columns = column_name
df=pd.concat([df,df_tmp],axis=1)
df['GCM']=df['GCMn'].str.cat(df['member'],sep="_")
#replace project_id with rcm name
df=df.drop(['project_id'], axis=1)
df=df.drop(['GCMn'], axis=1)
df=df.drop(['CORDEX'], axis=1)
df=df.drop(['member'], axis=1)
df['project_id']=df['RCM']
# sometimes the same model has a different name
df['project_id'].replace('CLMcom-BTU-CCLM4-8-17','CLMcom-CCLM4-8-17',inplace=True)
df['project_id'].replace('IPSL-INERIS-WRF381P','IPSL-WRF381P',inplace=True)
# this looks silly, but some institutes have two names and some on
df['GCM'].replace('CNRM-CERFACS-CNRM-CM5_r1i1p1','CNRM-CM5_r1i1p1', inplace=True)
df['GCM'].replace('MIROC-MIROC5_r1i1p1','MIROC5_r1i1p1', inplace=True)
df['GCM'].replace('NCC-NorESM1-M_r1i1p1','NorESM1-M_r1i1p1', inplace=True)
df['GCM'].replace('CCCma-CanESM2_r1i1p1','CanESM2_r1i1p1', inplace=True)
df['GCM'].replace('MPI-M-MPI-ESM-LR_r1i1p1','MPI-ESM-LR_r1i1p1', inplace=True)
df['GCM'].replace('MPI-M-MPI-ESM-LR_r2i1p1','MPI-ESM-LR_r2i1p1', inplace=True)
df['GCM'].replace('MPI-M-MPI-ESM-LR_r3i1p1','MPI-ESM-LR_r3i1p1', inplace=True)
df['GCM'].replace('IPSL-IPSL-CM5A-LR_r1i1p1','IPSL-CM5A-LR_r1i1p1', inplace=True)
df['GCM'].replace('IPSL-IPSL-CM5A-MR_r1i1p1','IPSL-CM5A-MR_r1i1p1', inplace=True)
df['GCM'].replace('NOAA-GFDL-GFDL-ESM2G_r1i1p1','GFDL-ESM2G_r1i1p1', inplace=True)
df['GCM'].replace('ICHEC-EC-EARTH_r12i1p1','EC-EARTH_r12i1p1', inplace=True)
df['GCM'].replace('ICHEC-EC-EARTH_r1i1p1','EC-EARTH_r1i1p1', inplace=True)
df['GCM'].replace('ICHEC-EC-EARTH_r3i1p1','EC-EARTH_r3i1p1', inplace=True)
df['GCM'].replace('MOHC-HadGEM2-ES_r1i1p1','HadGEM2-ES_r1i1p1', inplace=True)
return(df)
def add_column_for_plot_cmip_cordex(df):
''' add column for color ...'''
eurocordex = scattertable('cordex-gcm')
#Create column with names , which equal the names in list CORDEX_GCM :
df['model_member_experiment_id'] = df['model_member'].str.cat(df['experiment_id'],sep="_")
df['experimentn'] = df['model_member_experiment_id'].map(eurocordex)
df['GCM'] = [z if x == 'CMIP5-CORDEX' else y for x,y,z in zip(df['experimentn'],df['project_id'],df['model_member'])]
df=df.drop(['experimentn'], axis=1)
return(df)
def add_column_for_plot_rcp(df,rcp):
''' add column for color ...'''
eurocordex = scattertable('cordex-gcm')
# Create column with names , which equal the names in list CORDEX_GCM :
# this will eventually determine the shape
df['model_member_experiment_id'] = df['model_member'].str.cat(df['experiment_id'],sep="_")
df['experimentn'] = df['model_member_experiment_id'].map(eurocordex)
df['experiment'] = [x if x == 'CMIP5-CORDEX' else z for x,z in zip(df['experimentn'],df['project_id'])]
# if all scenarios are plotted, the color of CORDEX should not change see else
print('rcp fuer if =',rcp)
if rcp != 'all':
#df['experimentnn'] = df['model_member_experiment_id'].map(eurocordex)
#df['experimentnnn'] = df['experiment_id'].str.cat(df['experimentnn'],sep="-")
df['experimentnnn'] = df['experiment_id'].str.cat(df['experimentn'],sep="-")
df['Scenario'] = [x if x == 'CMIP5-CORDEX' else z for x,z in zip(df['experimentn'],df['experiment_id'])]
#df['Scenario'] = [z if x == 'CMIP5-CORDEX' else y for x,y,z in zip(df['experimentn'],df['experiment_id'],df['experimentnnn'])]
# clean up
df.drop(['experimentnnn'], axis=1)
else:
df['Scenario'] = df['experiment_id']
#Kevin: 3 special models pickt for Kevin, geht auch einfacher
#kevin=scattertable('cmip6-gcm-lbc-kevin')
#kevin2=scattertable('cmip6-gcm-lbc-kevin2')
#df['experimentnk'] = df['model_member_experiment_id'].map(kevin) #kevin
#df['experimentnk2'] = df['model_member_experiment_id'].map(kevin2) #kevin
#df['Scenario'] = [z if x == 'ssp370_LBC' else y for x,y,z in zip(df['experimentnk2'],df['experiment_id'],df['experimentnk'])]
# clean up
df=df.drop(['experimentn'], axis=1)
return(df)
def plot_heatmap_season_1x3(reg,dataframes, plotname, var_dict,plot_titel, model_name, model_mean):
print ('reg: ', reg)
for parameter in var_dict.keys():
Einheit = var_dict[parameter][0]
# y-range:
lim_min = var_dict[parameter][1][0]
lim_max = var_dict[parameter][1][1]
color = var_dict[parameter][2]
format = var_dict[parameter][3] #fmt=".1f"
for s in range(0,len(reg)):
print('shape: ',dataframes[s].shape)
# figsize is important, make a reasonable choice
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex=True, sharey=True, figsize=(25,7))
#First
df=dataframes[0]
mask = df.isna()
ax1=sns.heatmap(df.T, mask=mask.T, ax=ax1, square=True, annot=True,fmt=format, annot_kws={"size": 8},
cmap=color,cbar_kws=dict(pad=0.003,shrink= 1,label= Einheit,),vmin=lim_min, vmax=lim_max,linewidths=0.5, linecolor='white' ,xticklabels=False, yticklabels=True)
ax1.set_facecolor('lightgrey')
ax1.tick_params(bottom=False) ## other options are left,right and top
ax1.set_ylabel(reg[0])
ax1.set_xlabel(' ')
ax1.set_title(plot_titel, color='k', fontsize=12)
#Second
df=dataframes[1]
mask = df.isna()
ax2=sns.heatmap(df.T, mask=mask.T, ax=ax2, square=True, annot=True,fmt=format, annot_kws={"size": 8},
cmap=color,cbar_kws=dict(pad=0.003,shrink= 1,label= Einheit),vmin=lim_min, vmax=lim_max,linewidths=0.5, linecolor='white' ,xticklabels=False, yticklabels=True)
ax2.set_facecolor('lightgrey')
ax2.tick_params(bottom=False)
ax2.set_ylabel('WCE') #reg[1])
ax2.set_xlabel(' ')
#Third
df=dataframes[2]
mask = df.isna()
ax3=sns.heatmap(df.T, mask=mask.T,ax=ax3, square=True, annot=True,fmt=format, annot_kws={"size": 8},
cmap=color,cbar_kws=dict(pad=0.003,shrink= 1,label= Einheit), vmin=lim_min, vmax=lim_max,linewidths=0.5, linecolor='white' ,xticklabels=True, yticklabels=True)
# no cbar: cbar=False
ax3.set_facecolor('lightgrey')
ax3.set_ylabel(reg[2])
ax3.set_xlabel(' ')
plt.tight_layout()
for label in ax3.get_xticklabels():
if label.get_text() in model_name:
#label.set_size(13)
label.set_weight("bold")
label.set_color("red")
for label in ax3.get_xticklabels():
if label.get_text() in model_mean:
label.set_weight("bold")
print('hier ist der file:' ,plotname)
#plt.show()
plt.savefig(plotname, bbox_inches="tight")
return
def rename(reg):
print('replace:',reg)
reg=reg.replace('1','BI') # prudence regions are sometimes only Numbers
reg=reg.replace('2','IP')
reg=reg.replace('3','FR')
reg=reg.replace('4','ME')
reg=reg.replace('5','SC')
reg=reg.replace('6','AL')
reg=reg.replace('7','MD')
reg=reg.replace('8','EA')
reg=reg.replace('CEU','WCE') # central Europe CEU has changed its name WCE
print ('reg=',reg)
return reg
def plot_heatmap_sce_1x3(sce, dataframes, plotname, var_dict,plot_titel, model_name, dt):
#Name df
for parameter in var_dict.keys():
Einheit = var_dict[parameter][0]
# y-range:
lim_min = var_dict[parameter][1][0]
lim_max = var_dict[parameter][1][1]
color = var_dict[parameter][2]
format = var_dict[parameter][3] #fmt=".1f"
for s in range(0,len(sce)):
print('shape: ',dataframes[s].shape)
# figsize is important, make a reasonable choice
if dt == 'mon':
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex=True, sharey=True, figsize=(30,10))
else:
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex=True, sharey=True, figsize=(25,7))
#First
df=dataframes[0]
mask = df.isna()
im=sns.heatmap(df.T, mask=mask.T, ax=ax1, square=True, annot=True,fmt=format, annot_kws={"size": 6},
cmap=color,cbar_kws=dict(pad=0.003,shrink= 1,label= Einheit,),vmin=lim_min, vmax=lim_max,linewidths=0.5, linecolor='white' ,xticklabels=False, yticklabels=True)
ax1.set_facecolor('lightgrey')
ax1.tick_params(bottom=False) ## other options are left,right and top
ax1.set_ylabel(sce[0])
ax1.set_xlabel(' ')
ax1.set_title(plot_titel, color='k', fontsize=12)
#ax1.text(32,-0.5,Einheit)
#Second
df=dataframes[1]
mask = df.isna()
ax2=sns.heatmap(df.T, mask=mask.T, ax=ax2, square=True, annot=True,fmt=format, annot_kws={"size": 6},
cmap=color,cbar_kws=dict(pad=0.003,shrink= 1,label= Einheit),vmin=lim_min, vmax=lim_max,linewidths=0.5, linecolor='white' ,xticklabels=False, yticklabels=True)
ax2.set_facecolor('lightgrey')
ax2.tick_params(bottom=False)
ax2.set_ylabel(sce[1])
ax2.set_xlabel(' ')
#Third
df=dataframes[2]
mask = df.isna()
ax3=sns.heatmap(df.T, mask=mask.T,ax=ax3, square=True, annot=True,fmt=format, annot_kws={"size": 6},
cmap=color,cbar_kws=dict(pad=0.003,shrink= 1,label= Einheit), vmin=lim_min, vmax=lim_max,linewidths=0.5, linecolor='white' ,xticklabels=True, yticklabels=True)
# no cbar: cbar=False
ax3.set_facecolor('lightgrey')
ax3.set_ylabel(sce[2])
ax3.set_xlabel(' ')
plt.tight_layout()
model_mean=("MEAN","MEAN-CMIP5-CORDEX")
for label in ax3.get_xticklabels():
if label.get_text() in model_name: #== "MPI-ESM-LR_r3i1p1":
#label.set_size(13)
label.set_weight("bold")
| |
<filename>Project_final_ver/admin/heuristics.py
"""
COMP30024 Artificial Intelligence, Semester 1, 2021
Project Part B: Playing the Game
Team Name: Admin
Team Member: <NAME> (955797) & <NAME> (693241)
This module contain functions of our searching strategy to make decisions for player's next action based on evaluation function.
"""
import math
from copy import deepcopy
from admin.pathfinding import *
INITIAL_WEIGHT = 5
def eval_board(my_token_array, opponent_token_array, enemy_throws_left, my_throw_range, op_throw_range, num_of_my_defeated_throws, enemies_defeated, player_faction, my_throws_left):
'''
Evaluate the board based on our evaluation function.
'''
sum_score = 0
if player_faction == "upper":
allowed_r_coord = [i for i in range(4 - my_throw_range, 5)]
allowed_r_coord_opp = [i for i in range(-4 , -3 + op_throw_range)]
else:
allowed_r_coord = [i for i in range(-4 , -3 + my_throw_range)]
allowed_r_coord_opp = [i for i in range(4 - op_throw_range, 5)]
allowed_q_coordinate = [i for i in range(-4 , 5)]
overlap_adjust_coef = 1
for i in range(len(my_token_array)):
for j in range(len(my_token_array)):
if i != j:
if (my_token_array[i][1] == my_token_array[j][1]) and (my_token_array[i][0] == my_token_array[j][0]):
overlap_adjust_coef = overlap_adjust_coef/10
raw_coordinates = []
raw_coordinates_opp = []
for r in allowed_r_coord:
for q in allowed_q_coordinate:
raw_coordinates.append((r,q))
for r in allowed_r_coord_opp:
for q in allowed_q_coordinate:
raw_coordinates_opp.append((r,q))
allowed_coordinates = raw_coordinates.copy()
allowed_coordinates_opp = raw_coordinates_opp.copy()
for coordinate in raw_coordinates:
if not boarder_check(coordinate):
allowed_coordinates.remove(coordinate)
for coordinate in raw_coordinates_opp:
if not boarder_check(coordinate):
allowed_coordinates_opp.remove(coordinate)
exposed_to_throw = 1
for my_token in my_token_array:
exposed_to_enemy_num = 0.5
weight_coef = INITIAL_WEIGHT
if my_token[2]:
closest_goal = gen_closest_goal(my_token[1], my_token[2])
closest_dist_goal = calculate_distance(my_token[1], closest_goal)
if closest_dist_goal == 1.0:
weight_coef *= 5
weight_coef -= 1
else:
closest_dist_goal = 0
weight_coef += 1
if my_token[3]:
closest_threat = gen_closest_goal(my_token[1], my_token[3])
closest_dist_threat = calculate_distance(my_token[1], closest_threat)
for threat in my_token[3]:
if calculate_distance(my_token[1], threat) == 1:
exposed_to_enemy_num += 1
weight_coef -= 1
else:
closest_dist_threat = 1000
if player_faction == "upper":
if ((my_token[1][0] - max(allowed_r_coord_opp)) < closest_dist_threat) and ((my_token[1][0] - max(allowed_r_coord_opp)) > 0):
closest_dist_threat = my_token[1][0] - max(allowed_r_coord_opp)
if closest_dist_threat == 1.0:
weight_coef /= 10
elif (my_token[1][0] - max(allowed_r_coord_opp)) <= 0 :
exposed_to_throw += 1
exposed_to_enemy_num += 0.5
closest_dist_threat = 0.1
else:
if (-(my_token[1][0] - min(allowed_r_coord_opp)) < closest_dist_threat) and (-(my_token[1][0] - min(allowed_r_coord_opp)) > 0):
closest_dist_threat = -(my_token[1][0] - min(allowed_r_coord_opp))
if closest_dist_threat == 1.0:
weight_coef /= 10
elif (-(my_token[1][0] - min(allowed_r_coord_opp))) <= 0 :
exposed_to_throw += 1
exposed_to_enemy_num += 0.5
closest_dist_threat = 0.1
cover = 0
for friendly in my_token_array:
if friendly[0] == protective_type(my_token[0]):
adjancent_tokens = get_token_adjacency(friendly[1],my_token_array)
potential_movements = gen_next_all_potential_moves(friendly[1])
truly_adjacent = adjancent_tokens.copy()
if len(adjancent_tokens) != 0:
for adjacent_pos in adjancent_tokens:
if calculate_distance(friendly[1], adjacent_pos) != 1.0:
truly_adjacent.remove(adjacent_pos)
if len(truly_adjacent) != 0:
potential_swing_moves = gen_all_potential_swing_moves(friendly[1], truly_adjacent)
in_swing = set(potential_swing_moves)
in_move = set(potential_movements)
in_swing_but_not_in_move = in_swing - in_move
potential_movements = potential_movements + list(in_swing_but_not_in_move)
for move in potential_movements:
if move == my_token[1]:
cover = 1
weight_coef += cover
enemy_covered = 0
if my_token[2]:
for enemy in opponent_token_array:
if enemy[1] == closest_goal:
target_info = enemy
break
for enemy in opponent_token_array:
if enemy[0] == protective_type(target_info[0]):
adjancent_tokens = get_token_adjacency(enemy[1],opponent_token_array)
potential_movements = gen_next_all_potential_moves(enemy[1])
truly_adjacent = adjancent_tokens.copy()
if len(adjancent_tokens) != 0:
for adjacent_pos in adjancent_tokens:
if calculate_distance(enemy[1], adjacent_pos) != 1.0:
truly_adjacent.remove(adjacent_pos)
if len(truly_adjacent) != 0:
potential_swing_moves = gen_all_potential_swing_moves(enemy[1], truly_adjacent)
in_swing = set(potential_swing_moves)
in_move = set(potential_movements)
in_swing_but_not_in_move = in_swing - in_move
potential_movements = potential_movements + list(in_swing_but_not_in_move)
for move in potential_movements:
if move == target_info[1]:
enemy_covered = -1
weight_coef += enemy_covered
covering_friendly = 0
adjancent_tokens = get_token_adjacency(my_token[1],my_token_array)
potential_movements = gen_next_all_potential_moves(my_token[1])
truly_adjacent = adjancent_tokens.copy()
if len(adjancent_tokens) != 0:
for adjacent_pos in adjancent_tokens:
if calculate_distance(my_token[1], adjacent_pos) != 1.0:
truly_adjacent.remove(adjacent_pos)
if len(truly_adjacent) != 0:
potential_swing_moves = gen_all_potential_swing_moves(my_token[1], truly_adjacent)
in_swing = set(potential_swing_moves)
in_move = set(potential_movements)
in_swing_but_not_in_move = in_swing - in_move
potential_movements = potential_movements + list(in_swing_but_not_in_move)
for friendly in my_token_array:
if friendly[1] in potential_movements:
if friendly[0] == protective_type(my_token[0]):
covering_friendly = +1
weight_coef += covering_friendly
if (my_token[1] in allowed_coordinates_opp) and (enemy_throws_left > 0) :
weight_coef -= 1
else:
weight_coef += 1
sum_score = sum_score + weight_coef * ((((enemy_throws_left + len(opponent_token_array) + 1)/(enemy_throws_left + 1)) / exposed_to_throw) * closest_dist_threat / (exposed_to_enemy_num * 2) - closest_dist_goal * exposed_to_enemy_num)
penal_coef = 1
my_token_type = []
op_token_type = []
for my_token in my_token_array:
my_token_type.append(my_token[0])
for opponent_token in opponent_token_array:
op_token_type.append(opponent_token[0])
for opponent_token in opponent_token_array:
if counter_type(opponent_token[0]) not in my_token_type:
penal_coef = penal_coef/4
winning_coef = 1
if ("s" in my_token_type) and ("p" not in op_token_type):
winning_coef += 1
if ("p" in my_token_type) and ("s" not in op_token_type):
winning_coef += 1
if ("r" in my_token_type) and ("p" not in op_token_type):
winning_coef += 1
num_of_s = 0
num_of_p = 0
num_of_r = 0
for types in my_token_type:
if types == "s":
num_of_s += 1
elif types == "p":
num_of_p += 1
elif types == "r":
num_of_r += 1
if num_of_s > 1:
winning_coef /= num_of_s
if num_of_p > 1:
winning_coef /= num_of_p
if num_of_r > 1:
winning_coef /= num_of_r
if enemy_throws_left < my_throws_left:
winning_coef *= (my_throws_left - enemy_throws_left + 1)
elif enemy_throws_left > my_throws_left:
winning_coef /= (enemy_throws_left - my_throws_left + 1)
sum_score = sum_score - enemies_defeated + num_of_my_defeated_throws - my_throws_left
sum_score = sum_score * overlap_adjust_coef * penal_coef * winning_coef
return sum_score
def if_score(player_action, opponent_action, my_token_array, opponent_token_array, enemy_throws_left, my_throw_range, op_throw_range, num_of_my_defeated_throws, enemies_defeated, player_faction, my_throws_left):
penal_coef = 1
enemy_throws_left_if = enemy_throws_left
my_throws_left_if = my_throws_left
token_types =[]
my_throw_range_if = my_throw_range
op_throw_range_if = op_throw_range
for token in my_token_array:
token_types.append(token[0])
if player_action == None:
return 0
if player_action[0] == "THROW":
if enemy_throws_left_if == 0:
penal_coef *= 2
else:
penal_coef = penal_coef/8
if (len(my_token_array) > 3) and (player_action[1] in token_types):
penal_coef /= 100
else:
if player_action[1] == player_action[2]:
penal_coef = 0
my_token_array_if = deepcopy(my_token_array)
opponent_token_array_if = deepcopy(opponent_token_array)
num_of_my_defeated_throws_if = num_of_my_defeated_throws
enemies_defeated_if = enemies_defeated
if player_action[0] == "THROW":
my_throw_range_if += 1
my_throws_left_if -= 1
my_token_array_if.append([player_action[1], (player_action[2][0],player_action[2][1]),[],[]])
else:
for position in my_token_array_if:
if position[1] == player_action[1]:
position[1] = player_action[2]
if opponent_action:
if opponent_action[0] == "THROW":
op_throw_range_if += 1
enemy_throws_left_if -= 1
opponent_token_array_if.append([opponent_action[1],(opponent_action[2][0],opponent_action[2][1]),[],[]]) # a list ['token_type',(r,q),[list_of_goal_pos],[list_of_threat_pos]]
else:
for position in opponent_token_array_if:
if position[1] == opponent_action[1]:
position[1] = opponent_action[2]
# Check for s-p-r dies together situation
my_token_array_if_copy = my_token_array_if.copy()
opponent_token_array_if_copy = opponent_token_array_if.copy()
for i in range(len(my_token_array_if_copy)):
for j in range(len(opponent_token_array_if_copy)):
for k in range(len(my_token_array_if_copy)):
if i != k:
if my_token_array_if_copy[i][1] == my_token_array_if_copy[k][1] == opponent_token_array_if_copy[j][1]:
if (my_token_array_if_copy[i][0] != opponent_token_array_if_copy[j][0]) and (my_token_array_if_copy[k][0] != opponent_token_array_if_copy[j][0]) and (my_token_array_if_copy[i][0] != my_token_array_if_copy[k][0]):
if my_token_array_if_copy[i] in my_token_array_if:
num_of_my_defeated_throws_if += 1
my_token_array_if.remove(my_token_array_if_copy[i])
if my_token_array_if_copy[k] in my_token_array_if:
num_of_my_defeated_throws_if += 1
my_token_array_if.remove(my_token_array_if_copy[k])
if opponent_token_array_if_copy[j] in opponent_token_array_if:
enemies_defeated_if += 1
opponent_token_array_if.remove(opponent_token_array_if_copy[j])
my_token_array_if_copy = my_token_array_if.copy()
opponent_token_array_if_copy = opponent_token_array_if.copy()
for i in range(len(opponent_token_array_if_copy)):
for j in range(len(my_token_array_if_copy)):
for k in range(len(opponent_token_array_if_copy)):
if i != k:
if opponent_token_array_if_copy[i][1] == opponent_token_array_if_copy[k][1] == my_token_array_if_copy[j][1]:
if (opponent_token_array_if_copy[i][0] != my_token_array_if_copy[j][0]) and (opponent_token_array_if_copy[k][0] != my_token_array_if_copy[j][0]) and (opponent_token_array_if_copy[i][0] != opponent_token_array_if_copy[k][0]):
if opponent_token_array_if_copy[i] in opponent_token_array_if:
enemies_defeated_if += 1
opponent_token_array_if.remove(opponent_token_array_if_copy[i])
if opponent_token_array_if_copy[k] in opponent_token_array_if:
enemies_defeated_if += 1
opponent_token_array_if.remove(opponent_token_array_if_copy[k])
if my_token_array_if_copy[j] in my_token_array_if:
num_of_my_defeated_throws_if += 1
my_token_array_if.remove(my_token_array_if_copy[j])
my_token_array_if_copy = my_token_array_if.copy()
opponent_token_array_if_copy = opponent_token_array_if.copy()
for my_token in my_token_array_if_copy:
my_token_type = my_token[0]
my_token_pos = my_token[1]
for enemy_token in opponent_token_array_if_copy:
enemy_token_type = enemy_token[0]
enemy_token_pos = enemy_token[1]
if my_token_pos == enemy_token_pos:
if (enemy_token_type == "r") and (my_token_type == "s"):
if my_token in my_token_array_if:
my_token_array_if.remove(my_token)
penal_coef = penal_coef/3
num_of_my_defeated_throws_if += 1
break
elif (enemy_token_type == "s") and (my_token_type == "p"):
num_of_my_defeated_throws_if += 1
penal_coef = penal_coef/3
if my_token in my_token_array_if:
my_token_array_if.remove(my_token)
break
elif (enemy_token_type == "p") and (my_token_type == "r"):
num_of_my_defeated_throws_if += 1
penal_coef = penal_coef/3
if my_token in my_token_array_if:
my_token_array_if.remove(my_token)
break
elif (enemy_token_type == "s") and (my_token_type == "r"):
enemies_defeated_if += 1
penal_coef = penal_coef * 5
if enemy_token in opponent_token_array_if:
opponent_token_array_if.remove(enemy_token)
break
elif (enemy_token_type == "p") and (my_token_type == "s"):
enemies_defeated_if += 1
penal_coef = penal_coef * 5
if enemy_token in opponent_token_array_if:
opponent_token_array_if.remove(enemy_token)
break
elif (enemy_token_type == "r") and (my_token_type == "p"):
enemies_defeated_if += 1
penal_coef = penal_coef * 5
if enemy_token in opponent_token_array_if:
opponent_token_array_if.remove(enemy_token)
break
my_token_array_if_copy = my_token_array_if.copy()
for token_1 in my_token_array_if_copy:
token_1_type = token_1[0]
token_1_pos = token_1[1]
for token_2 in my_token_array_if_copy:
token_2_type = token_2[0]
token_2_pos = token_2[1]
if token_1_pos == token_2_pos:
if (token_1_type == "r") and (token_2_type == "s"):
num_of_my_defeated_throws_if += 1
penal_coef = penal_coef/3
if token_2 in my_token_array_if:
my_token_array_if.remove(token_2)
break
elif (token_1_type == "s") and (token_2_type == "p"):
num_of_my_defeated_throws_if += | |
<filename>SpaDecon/DEC.py
from __future__ import division
import os
#import tensorflow as tf
#tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
from . SAE import SAE # load Stacked autoencoder
from . preprocessing import change_to_continuous
from time import time
import numpy as np
from keras.engine.topology import Layer, InputSpec
from keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping, ReduceLROnPlateau,History
from keras.layers import Dense, Input
from keras.models import Model
from keras.optimizers import SGD
from keras import callbacks
from keras.initializers import VarianceScaling
from sklearn.cluster import KMeans
import scanpy as sc
import pandas as pd
from sklearn.metrics import normalized_mutual_info_score,adjusted_rand_score
import keras.backend as K
from scipy.spatial import distance
from scipy.stats import entropy
import warnings
warnings.filterwarnings('ignore')
class ClusteringLayer(Layer): # Re-define lot of build in functions for Keras
"""
Clustering layer converts input sample (feature) to soft label, i.e. a vector that represents the probability of the
sample belonging to each cluster. The probability is calculated with student's t-distribution.
# Example
```
model.add(ClusteringLayer(n_clusters=10))
```
# Arguments
n_clusters: number of clusters.
weights: list of Numpy array with shape `(n_clusters, n_features)` witch represents the initial cluster centers.
alpha: parameter in Student's t-distribution. Default to 1.0.
# Input shape
2D tensor with shape: `(n_samples, n_features)`.
# Output shape
2D tensor with shape: `(n_samples, n_clusters)`.
"""
def __init__(self, n_clusters, weights=None, alpha=1.0, **kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(ClusteringLayer, self).__init__(**kwargs)
self.n_clusters = n_clusters
self.alpha = alpha
self.initial_weights = weights
self.input_spec = InputSpec(ndim=2)
def build(self, input_shape):
assert len(input_shape) == 2
input_dim = input_shape[1]
self.input_spec = InputSpec(dtype=K.floatx(), shape=(None, input_dim))
self.clusters = self.add_weight((self.n_clusters, input_dim), initializer='glorot_uniform', name='clustering')
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
self.built = True
def call(self, inputs, **kwargs): # The activation function for clustering layer
""" student t-distribution, as same as used in t-SNE algorithm.
q_ij = 1/(1+dist(x_i, u_j)^2), then normalize it.
Arguments:
inputs: the variable containing data, shape=(n_samples, n_features)
Return:
q: student's t-distribution, or soft labels for each sample. shape=(n_samples, n_clusters)
"""
q = 1.0 / (1.0 + (K.sum(K.square(K.expand_dims(inputs, axis=1) - self.clusters), axis=2) / self.alpha))
q **= (self.alpha + 1.0) / 2.0
q = K.transpose(K.transpose(q) / K.sum(q, axis=1))
return q
def compute_output_shape(self, input_shape):
assert input_shape and len(input_shape) == 2
return input_shape[0], self.n_clusters
def get_config(self):
config = {'n_clusters': self.n_clusters}
base_config = super(ClusteringLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class DEC(object):
def __init__(self,
dims,
x_all,
x_train, # input matrix, row sample, col predictors
y=None, # if provided will trained with supervised
alpha=1.0,
init='glorot_uniform', #initialization method
n_clusters=None, # Number of Clusters, if provided, the clusters center will be initialized by K-means,
louvain_resolution=1.0, # resolution for louvain
n_neighbors=10, # the
pretrain_epochs=200, # epoch for autoencoder
ae_weights=None, #ae_
actinlayer1="tanh",# activation for the last layer in encoder, and first layer in the decoder
is_stacked=True,
transfer_feature=None,
model_weights=None,
y_trans=None,
softmax=False,
):
super(DEC, self).__init__()
self.dims = dims
self.x_all=x_all #feature n*p, n:number of cells, p: number of genes
self.x_train = x_train
self.y=y # for supervised
self.y_trans=y_trans
self.input_dim = dims[0]
self.n_stacks = len(self.dims) - 1
self.is_stacked=is_stacked
self.resolution=louvain_resolution
self.alpha = alpha
self.actinlayer1=actinlayer1
self.transfer_feature=transfer_feature
self.model_weights=model_weights
self.softmax=softmax
self.pretrain_epochs=pretrain_epochs
if self.transfer_feature is None:
self.pretrain(n_neighbors=n_neighbors,epochs=self.pretrain_epochs,n_clusters=n_clusters)
else:
self.pretrain_transfer(n_neighbors=n_neighbors,model_weights=self.model_weights,features=transfer_feature,epochs=self.pretrain_epochs,n_clusters=n_clusters,y_trans=self.y_trans)
def pretrain(self, optimizer='adam', epochs=200, n_neighbors=10,batch_size=256,n_clusters=None):
#print("Doing DEC: pretrain")
sae=SAE(dims=self.dims,drop_rate=0.2,batch_size=batch_size,actinlayer1=self.actinlayer1)# batch_size
#print('...Pretraining source network...')
# begin pretraining
t0 = time()
if self.is_stacked:
sae.fit(self.x_all,epochs=epochs)
else:
sae.fit2(self.x_all,epochs=epochs)
self.autoencoder=sae.autoencoders
self.encoder=sae.encoder
#print(' ...Pretraining time: ', time() - t0, 'seconds...')
self.pretrained = True
#build dec model and initialize model
features=self.extract_features(self.x_train)
features=np.asarray(features)
if self.y is None: # Train data not labeled
if isinstance(n_clusters,int): # Number of clusters known, use k-means
print("...number of clusters have been specified, Initializing Cluster centroid using K-Means")
kmeans = KMeans(n_clusters=n_clusters, n_init=20)
Y_pred_init = kmeans.fit_predict(features)
self.init_pred= np.copy(Y_pred_init)
self.n_clusters=n_clusters
cluster_centers=kmeans.cluster_centers_
self.init_centroid=cluster_centers
else: # Number of clustered unknow, use unsupervised method
print("...number of clusters does not know, Initialize Cluster centroid using louvain")
adata=sc.AnnData(features)
sc.pp.neighbors(adata, n_neighbors=n_neighbors)
sc.tl.louvain(adata,resolution=self.resolution)
Y_pred_init=adata.obs['louvain']
self.init_pred=np.asarray(Y_pred_init,dtype=int)
features=pd.DataFrame(features,index=np.arange(0,features.shape[0]))
Group=pd.Series(self.init_pred,index=np.arange(0,features.shape[0]),name="Group")
Mergefeature=pd.concat([features,Group],axis=1)
cluster_centers=np.asarray(Mergefeature.groupby("Group").mean())
self.n_clusters=cluster_centers.shape[0]
self.init_centroid=cluster_centers
print("The shape of cluster_centers",cluster_centers.shape)
else: # train data is labeled
#print("y known, initilize Cluster centroid using y")
# build dec model
features=pd.DataFrame(features,index=np.arange(0,features.shape[0]))
Group=pd.Series(self.y.values,index=np.arange(0,features.shape[0]),name="Group")
Mergefeature=pd.concat([features,Group],axis=1)
cluster_centers=np.asarray(Mergefeature.groupby("Group").mean())
self.n_clusters=cluster_centers.shape[0]
self.init_centroid=cluster_centers
#print("The shape of cluster_center is",cluster_centers.shape)
if not self.softmax: # Use dec method to do clustering
clustering_layer = ClusteringLayer(self.n_clusters, name='clustering')(self.encoder.output)
else: # Use softmax to do clustering
clustering_layer=Dense(self.n_clusters,kernel_initializer="glorot_uniform",name="clustering",activation='softmax')(self.encoder.output)
self.model = Model(inputs=self.encoder.input, outputs=clustering_layer)
def pretrain_transfer(self,features,model_weights,y_trans=None,optmizer="adam",n_neighbors=10,epochs=200,batch_size=32,n_clusters=None):
#y_trans is a numpy array
#print("Doing DEC: pretrain_transfer")
if isinstance(n_clusters,int):
print("...number of clusters have been specified, Initializing Cluster centroid using K-Means")
kmeans = KMeans(n_clusters=n_clusters, n_init=20)
Y_pred_init = kmeans.fit_predict(features)
self.init_pred= np.copy(Y_pred_init)
self.n_clusters=n_clusters
cluster_centers=kmeans.cluster_centers_
self.init_centroid=[cluster_centers]
else:
#print("The shape of features is",features.shape)
if y_trans is not None and y_trans.shape[0]==features.shape[0]:
#print("The shape of y_trans is",y_trans.shape)
#print("...predicted y_test known, use it to get n_cliusters and init_centroid")
self.init_pred=y_trans
features=pd.DataFrame(features,index=np.arange(0,features.shape[0]))
Group=pd.Series(y_trans,index=np.arange(0,features.shape[0]),name="Group")
Mergefeature=pd.concat([features,Group],axis=1)
cluster_centers=np.asarray(Mergefeature.groupby("Group").mean())
self.n_clusters=cluster_centers.shape[0]
self.init_centroid=cluster_centers
else:
print("...number of clusters does not know, Initialize Cluster centroid using louvain")
#can be replaced by other clustering methods
adata=sc.AnnData(features)
sc.pp.neighbors(adata, n_neighbors=n_neighbors) #louvain step1
sc.tl.louvain(adata,resolution=self.resolution) #louvain step2
Y_pred_init=adata.obs['louvain']
self.init_pred=np.asarray(Y_pred_init,dtype=int)
features=pd.DataFrame(features,index=np.arange(0,features.shape[0]))
Group=pd.Series(self.init_pred,index=np.arange(0,features.shape[0]),name="Group")
Mergefeature=pd.concat([features,Group],axis=1)
cluster_centers=np.asarray(Mergefeature.groupby("Group").mean())
self.n_clusters=cluster_centers.shape[0]
self.init_centroid=cluster_centers
print("The shape of cluster_centers",cluster_centers.shape[0])
sae=SAE(dims=self.dims,drop_rate=0.2,batch_size=batch_size,actinlayer1=self.actinlayer1)# batch_size
self.autoencoder=sae.autoencoders
self.encoder=sae.encoder
clustering_layer=ClusteringLayer(self.n_clusters, name='clustering')(self.encoder.output) # use dec to do clustering
self.model=Model(self.encoder.input,outputs=clustering_layer)
#print("The length layers of self.model",len(self.model.layers))
for i in range(len(self.model.layers)-2):
self.model.layers[i+1].set_weights(model_weights[i+1])
self.model.get_layer("clustering").set_weights([self.init_centroid])
#fine tunning
def load_weights(self, weights): # load weights of DEC model
self.model.load_weights(weights)
def extract_features(self, x):
return self.encoder.predict(x)
def predict(self, x): # predict cluster labels using the output of clustering layer
q = self.model.predict(x, verbose=0)
return q.argmax(1)
@staticmethod
def target_distribution(q):
weight = q ** 2 / q.sum(0)
return (weight.T / weight.sum(1)).T
def compile(self, optimizer='sgd', loss='kld'):
self.model.compile(optimizer=optimizer, loss=loss)
def fit(self,x, maxiter=2e3, epochs_fit=10,batch_size=256, tol=1e-3): # unsupervised
print("Doing DEC: fit")
#step1 initial weights by louvain,or Kmeans
self.model.get_layer(name='clustering').set_weights([self.init_centroid])
y_pred_last = np.copy(self.init_pred)
# Step 2: deep clustering
# logging file
#y_pred_last=self.init_pred
loss = 0
index = 0
index_array = np.arange(x.shape[0])
for ite in range(int(maxiter)):
q = self.model.predict(x, verbose=0)
p = self.target_distribution(q) # update the auxiliary target distribution p
# evaluate the clustering performance
y_pred = q.argmax(1)
# check stop criterion
delta_label = np.sum(y_pred != y_pred_last).astype(np.float32) / y_pred.shape[0]
y_pred_last = np.copy(y_pred)
if ite > 0 and delta_label < tol:
print('delta_label ', delta_label, '< tol ', tol)
print('Reached tolerance threshold. Stopped training.')
break
print("The value of delta_label of current",str(ite+1),"th iteration is",delta_label,">= tol",tol)
#train on whole dataset on prespecified batch_size
callbacks=[EarlyStopping(monitor='loss',min_delta=10e-4,patience=4,verbose=0,mode='auto')]
self.model.fit(x=x,y=p,epochs=epochs_fit,batch_size=batch_size,callbacks=callbacks,shuffle=True,verbose=False)
y0=pd.Series(y_pred)
print("The final prediction cluster is:")
print(y0.value_counts())
Embeded_z=self.encoder.predict(x)
return Embeded_z,q
#Show the trajectory of the centroid during iterations
def fit_trajectory(self,x, maxiter=2e3, epochs_fit=10,batch_size=256, tol=1e-2, celltypes = None, threshold=1/30): # unsupervised
#print("Doing DEC: fit_trajectory")
#step1 initial weights by louvain,or Kmeans
self.model.get_layer(name='clustering').set_weights([self.init_centroid])
y_pred_last = np.copy(self.init_pred)
# Step 2: deep clustering
# logging file
#y_pred_last=self.init_pred
loss = 0
index = 0
index_array = np.arange(x.shape[0])
trajectory_z=[] #trajectory embedding
trajectory_l=[] #trajectory label
js = []
centroids_first = self.model.layers[-1].get_weights()
centroids_diff_all = []
for i in range(len(centroids_first[0])-1):
for j in range(i+1, len(centroids_first[0])):
centroids_diff_all.append(np.sqrt(((centroids_first[0][i]-centroids_first[0][j])**2).sum()))
print('centroids_diff_all', centroids_diff_all)
print(len(centroids_diff_all))
print(self.init_centroid)
# print(centroids_first)
# self.model.layers[-1].trainable = False
# print(self.model.summary())
# print(self.model.layers[-1].trainable == False)
weights = self.model.get_weights()
for ite in range(int(maxiter)):
old_weights = weights.copy()
weights = self.model.get_weights()
# print(weights)
# print(self.model.layers[-1].trainable == False)
centroids = self.model.layers[-1].get_weights()
# print(centroids)
q = self.model.predict(x, verbose=0)
# for i in range(len(q)):
# if sum(q[i]>threshold)==0:
# continue
# for j in range(len(q[i])):
# #if q[i][j]<0.1:
# if q[i][j]<threshold:
# q[i][j]=0
# q[i] = q[i]/q[i].sum()
p = self.target_distribution(q) # update the auxiliary target distribution p
# evaluate the clustering performance
#kl = np.array([[np.where(p[i]!=0, p[i]*np.log(p[i]/q[i]),0) for i in range(len(p))][j].sum() for j in range(len(p))]).sum()
#print(kl)
# print(entropy(p,q).sum())
#print(q.shape)
#q = pd.DataFrame(q)
#q.columns = list(celltypes)
y_pred = q.argmax(1)
#celltypes = list(np.sort(np.unique(y_pred)))
celltypes = [celltypes[i] for i in list(np.sort(np.unique(y_pred)))]
#print(celltypes)
# check stop criterion
#delta_label = np.sum(y_pred != y_pred_last).astype(np.float32) / y_pred.shape[0]
#y_pred_last = np.copy(y_pred)
#if ite > 0 and delta_label < tol:
# print('delta_label ', delta_label, '< tol ', tol)
# print('Reached tolerance threshold. Stopped training.')
# break
#print("The value of delta_label of current",str(ite+1),"th iteration is",delta_label,">= tol",0.01)
##train on whole dataset on prespecified batch_size
if ite == 0:
q_last = np.copy(q)
js_last = 1000000
callbacks=[EarlyStopping(monitor='loss',min_delta=10e-4,patience=4,verbose=0,mode='auto')]
self.model.fit(x=x,y=p,epochs=epochs_fit,batch_size=batch_size,callbacks=callbacks,shuffle=True,verbose=False)
#if ite < 10:
# js.append(distance.jensenshannon(q_last, q).sum())
# q_last = np.copy(q)
#print(js)
# callbacks=[EarlyStopping(monitor='loss',min_delta=10e-4,patience=4,verbose=0,mode='auto')]
# self.model.fit(x=x,y=p,epochs=epochs_fit,batch_size=batch_size,callbacks=callbacks,shuffle=True,verbose=False)
# continue
if ite>0:
centroids_diff = [np.sqrt(((centroids[0][i]-centroids_first[0][i])**2).sum()) for i in range(len(centroids[0]))]
print('centroids_diff: ', centroids_diff)
#js.append(distance.jensenshannon(q_last, q).sum())
js = distance.jensenshannon(q_last, q).sum()
delta_js = js_last-js
q_last = np.copy(q)
#print(js_last)
#print(js)
js_last = np.copy(js)
#print(js[ite-10:ite-5])
#print(js[ite-5:])
#delta_js = np.mean(js[ite-10:ite-5]) - np.mean(js[ite-5:])
#delta_js = js[ite-1]-js[ite-2]
#if delta_js < 0.001 and delta_js>0 and np.mean(js[ite-2:])<np.mean(js[0:3]):
#if delta_js < 0.01 and delta_js>0 and | |
if the etag you provide matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/users/{userId}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_user got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"userId": user_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def generate_totp_seed(self, user_id, mfa_totp_device_id, **kwargs):
"""
Generate seed for the MFA TOTP device.
:param str user_id: (required)
The OCID of the user.
:param str mfa_totp_device_id: (required)
The OCID of the MFA TOTP device.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match`
parameter to the value of the etag from a previous GET or POST response for that resource. The resource
will be updated or deleted only if the etag you provide matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.identity.models.MfaTotpDevice`
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/users/{userId}/mfaTotpDevices/{mfaTotpDeviceId}/actions/generateSeed"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"generate_totp_seed got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"userId": user_id,
"mfaTotpDeviceId": mfa_totp_device_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="MfaTotpDevice")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="MfaTotpDevice")
def get_authentication_policy(self, compartment_id, **kwargs):
"""
Gets the authentication policy for the given tenancy. You must specify your tenant\u2019s OCID as the value for
the compartment ID (remember that the tenancy is simply the root compartment).
:param str compartment_id: (required)
The OCID of the compartment.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.identity.models.AuthenticationPolicy`
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/authenticationPolicies/{compartmentId}"
method = "GET"
expected_kwargs = ["retry_strategy"]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_authentication_policy got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"compartmentId": compartment_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json"
}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="AuthenticationPolicy")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="AuthenticationPolicy")
def get_compartment(self, compartment_id, **kwargs):
"""
Gets the specified compartment's information.
This operation does not return a list of all the resources inside the compartment. There is no single
API operation that does that. Compartments can contain multiple types of resources (instances, block
storage volumes, etc.). To find out what's in a compartment, you must call the \"List\" operation for
each resource type and specify the compartment's OCID as a query parameter in the request. For example,
call the :func:`list_instances` operation in the Cloud Compute
Service or the :func:`list_volumes` operation in Cloud Block Storage.
:param str compartment_id: (required)
The OCID of the compartment.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.identity.models.Compartment`
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/compartments/{compartmentId}"
method = "GET"
expected_kwargs = ["retry_strategy"]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_compartment got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"compartmentId": compartment_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json"
}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Compartment")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Compartment")
def get_dynamic_group(self, dynamic_group_id, **kwargs):
"""
Gets the specified dynamic group's information.
:param str dynamic_group_id: (required)
The OCID of the dynamic group.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.identity.models.DynamicGroup`
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/dynamicGroups/{dynamicGroupId}"
method = "GET"
expected_kwargs = ["retry_strategy"]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_dynamic_group got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"dynamicGroupId": dynamic_group_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json"
}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="DynamicGroup")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="DynamicGroup")
def get_group(self, group_id, **kwargs):
"""
Gets the specified group's information.
This operation does | |
<reponame>wuyongwen/XX-Net
# -*- coding: utf-8 -*-
"""
port from hyper/http20/stream for async
remove push support
increase init window size to improve performance
~~~~~~~~~~~~~~~~~~~
Objects that make up the stream-level abstraction of hyper's HTTP/2 support.
Conceptually, a single HTTP/2 connection is made up of many streams: each
stream is an independent, bi-directional sequence of HTTP headers and data.
Each stream is identified by a monotonically increasing integer, assigned to
the stream by the endpoint that initiated the stream.
"""
import threading
from hyper.common.headers import HTTPHeaderMap
from hyper.packages.hyperframe.frame import (
FRAME_MAX_LEN, FRAMES, HeadersFrame, DataFrame, PushPromiseFrame,
WindowUpdateFrame, ContinuationFrame, BlockedFrame, RstStreamFrame
)
from hyper.http20.exceptions import ProtocolError, StreamResetError
from hyper.http20.util import h2_safe_headers
from hyper.http20.response import strip_headers
from hyper.common.util import to_host_port_tuple, to_native_string, to_bytestring
import simple_http_client
from http_common import *
# Define a set of states for a HTTP/2 stream.
STATE_IDLE = 0
STATE_OPEN = 1
STATE_HALF_CLOSED_LOCAL = 2
STATE_HALF_CLOSED_REMOTE = 3
STATE_CLOSED = 4
class Stream(object):
"""
A single HTTP/2 stream.
A stream is an independent, bi-directional sequence of HTTP headers and
data. Each stream is identified by a single integer. From a HTTP
perspective, a stream _approximately_ matches a single request-response
pair.
"""
def __init__(self,
logger,
config,
connection,
ip,
stream_id,
task,
send_cb,
close_cb,
encoder,
decoder,
receive_window_manager,
remote_window_size,
max_frame_size):
self.logger = logger
self.config = config
self.connection = connection
self.ip = ip
self.stream_id = stream_id
self.task = task
self.state = STATE_IDLE
self.get_head_time = None
# There are two flow control windows: one for data we're sending,
# one for data being sent to us.
self.receive_window_manager = receive_window_manager
self.remote_window_size = remote_window_size
self.max_frame_size = max_frame_size
# This is the callback handed to the stream by its parent connection.
# It is called when the stream wants to send data. It expects to
# receive a list of frames that will be automatically serialized.
self._send_cb = send_cb
# This is the callback to be called when the stream is closed.
self._close_cb = close_cb
# A reference to the header encoder and decoder objects belonging to
# the parent connection.
self._encoder = encoder
self._decoder = decoder
self.request_headers = HTTPHeaderMap()
# Convert the body to bytes if needed.
self.request_body = to_bytestring(self.task.body)
# request body not send blocked by send window
# the left body will send when send window opened.
self.request_body_left = len(self.request_body)
self.request_body_sended = False
# data list before decode
self.response_header_datas = []
# Set to a key-value set of the response headers once their
# HEADERS..CONTINUATION frame sequence finishes.
self.response_headers = None
# Unconsumed response data chunks
self.response_body = []
self.response_body_len = 0
def start_request(self):
"""
Open the stream. Does this by encoding and sending the headers: no more
calls to ``add_header`` are allowed after this method is called.
The `end` flag controls whether this will be the end of the stream, or
whether data will follow.
"""
# Strip any headers invalid in H2.
#headers = h2_safe_headers(self.request_headers)
host = self.connection.get_host(self.task.host)
self.add_header(":Method", self.task.method)
self.add_header(":Scheme", "https")
self.add_header(":Authority", host)
self.add_header(":Path", self.task.path)
default_headers = (':method', ':scheme', ':authority', ':path')
#headers = h2_safe_headers(self.task.headers)
for name, value in self.task.headers.items():
is_default = to_native_string(name) in default_headers
self.add_header(name, value, replace=is_default)
# Encode the headers.
encoded_headers = self._encoder(self.request_headers)
# It's possible that there is a substantial amount of data here. The
# data needs to go into one HEADERS frame, followed by a number of
# CONTINUATION frames. For now, for ease of implementation, let's just
# assume that's never going to happen (16kB of headers is lots!).
# Additionally, since this is so unlikely, there's no point writing a
# test for this: it's just so simple.
if len(encoded_headers) > FRAME_MAX_LEN: # pragma: no cover
raise ValueError("Header block too large.")
header_frame = HeadersFrame(self.stream_id)
header_frame.data = encoded_headers
# If no data has been provided, this is the end of the stream. Either
# way, due to the restriction above it's definitely the end of the
# headers.
header_frame.flags.add('END_HEADERS')
# Send the header frame.
self.task.set_state("start send header")
self._send_cb(header_frame)
# Transition the stream state appropriately.
self.state = STATE_OPEN
self.task.set_state("start send left body")
threading.Thread(target=self.left_work).start()
def left_work(self):
self.send_left_body()
self.timeout_response()
def add_header(self, name, value, replace=False):
"""
Adds a single HTTP header to the headers to be sent on the request.
"""
if not replace:
self.request_headers[name] = value
else:
self.request_headers.replace(name, value)
def send_left_body(self):
while self.remote_window_size and not self.request_body_sended:
send_size = min(self.remote_window_size, self.request_body_left, self.max_frame_size)
f = DataFrame(self.stream_id)
data_start = len(self.request_body) - self.request_body_left
f.data = self.request_body[data_start:data_start+send_size]
self.remote_window_size -= send_size
self.request_body_left -= send_size
# If the length of the data is less than MAX_CHUNK, we're probably
# at the end of the file. If this is the end of the data, mark it
# as END_STREAM.
if self.request_body_left == 0:
f.flags.add('END_STREAM')
# Send the frame and decrement the flow control window.
self._send_cb(f)
# If no more data is to be sent on this stream, transition our state.
if self.request_body_left == 0:
self.request_body_sended = True
self._close_local()
self.task.set_state("end send left body")
def receive_frame(self, frame):
"""
Handle a frame received on this stream.
called by connection.
"""
# self.logger.debug("stream %d recved frame %r", self.stream_id, frame)
if frame.type == WindowUpdateFrame.type:
self.remote_window_size += frame.window_increment
self.send_left_body()
elif frame.type == HeadersFrame.type:
# Begin the header block for the response headers.
#self.response_header_datas = [frame.data]
self.response_header_datas.append(frame.data)
elif frame.type == PushPromiseFrame.type:
self.logger.error("%s receive PushPromiseFrame:%d", self.ip, frame.stream_id)
elif frame.type == ContinuationFrame.type:
# Continue a header block begun with either HEADERS or PUSH_PROMISE.
self.response_header_datas.append(frame.data)
elif frame.type == DataFrame.type:
# Append the data to the buffer.
if not self.task.finished:
self.task.put_data(frame.data)
if 'END_STREAM' not in frame.flags:
# Increase the window size. Only do this if the data frame contains
# actual data.
# don't do it if stream is closed.
size = frame.flow_controlled_length
increment = self.receive_window_manager._handle_frame(size)
#if increment:
# self.logger.debug("stream:%d frame size:%d increase win:%d", self.stream_id, size, increment)
#content_len = int(self.request_headers.get("Content-Length")[0])
#self.logger.debug("%s get:%d s:%d", self.ip, self.response_body_len, size)
if increment and not self._remote_closed:
w = WindowUpdateFrame(self.stream_id)
w.window_increment = increment
self._send_cb(w)
elif frame.type == BlockedFrame.type:
# If we've been blocked we may want to fixup the window.
increment = self.receive_window_manager._blocked()
if increment:
w = WindowUpdateFrame(self.stream_id)
w.window_increment = increment
self._send_cb(w)
elif frame.type == RstStreamFrame.type:
# Rest Frame send from server is not define in RFC
inactive_time = time.time() - self.connection.last_active_time
self.logger.debug("%s Stream %d Rest by server, inactive:%d. error code:%d",
self.ip, self.stream_id, inactive_time, frame.error_code)
self.connection.close("RESET")
elif frame.type in FRAMES:
# This frame isn't valid at this point.
#raise ValueError("Unexpected frame %s." % frame)
self.logger.error("%s Unexpected frame %s.", self.ip, frame)
else: # pragma: no cover
# Unknown frames belong to extensions. Just drop it on the
# floor, but log so that users know that something happened.
self.logger.error("%s Received unknown frame, type %d", self.ip, frame.type)
pass
if 'END_HEADERS' in frame.flags:
if self.response_headers is not None:
raise ProtocolError("Too many header blocks.")
# Begin by decoding the header block. If this fails, we need to
# tear down the entire connection.
if len(self.response_header_datas) == 1:
header_data = self.response_header_datas[0]
else:
header_data = b''.join(self.response_header_datas)
try:
headers = self._decoder.decode(header_data)
except Exception as e:
self.logger.exception("decode h2 header %s fail:%r", header_data, e)
raise e
self.response_headers = HTTPHeaderMap(headers)
# We've handled the headers, zero them out.
self.response_header_datas = None
self.get_head_time = time.time()
length = self.response_headers.get("Content-Length", None)
if isinstance(length, list):
length = int(length[0])
if not self.task.finished:
self.task.content_length = length
self.task.set_state("h2_get_head")
self.send_response()
if 'END_STREAM' in frame.flags:
#self.logger.debug("%s Closing remote side of stream:%d", self.ip, self.stream_id)
time_now = time.time()
time_cost = time_now - self.get_head_time
if time_cost > 0 and \
isinstance(self.task.content_length, int) and \
not self.task.finished:
speed = self.task.content_length / time_cost
self.task.set_state("h2_finish[SP:%d]" % speed)
self._close_remote()
self.close("end stream")
if not self.task.finished:
self.connection.continue_timeout = 0
def send_response(self):
if self.task.responsed:
self.logger.error("http2_stream send_response but responsed.%s", self.task.url)
self.close("h2 stream send_response but sended.")
return
self.task.responsed = True
status = int(self.response_headers[b':status'][0])
strip_headers(self.response_headers)
response = simple_http_client.BaseResponse(status=status, headers=self.response_headers)
response.ssl_sock = self.connection.ssl_sock
response.worker = self.connection
response.task = self.task
self.task.queue.put(response)
if status in self.config.http2_status_to_close:
self.connection.close("status %d" % status)
def close(self, reason="close"):
if not self.task.responsed:
self.connection.retry_task_cb(self.task, reason)
else:
self.task.finish()
# empty block means fail or closed.
self._close_remote()
self._close_cb(self.stream_id, reason)
@property
def _local_closed(self):
return self.state in (STATE_CLOSED, STATE_HALF_CLOSED_LOCAL)
@property
def _remote_closed(self):
return self.state in (STATE_CLOSED, STATE_HALF_CLOSED_REMOTE)
@property
def _local_open(self):
return self.state in (STATE_OPEN, STATE_HALF_CLOSED_REMOTE)
def _close_local(self):
self.state = (
STATE_HALF_CLOSED_LOCAL if self.state == | |
<reponame>MikalaiMikalalai/ggrc-core
# -*- coding: utf-8 -*-
# Copyright (C) 2020 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# pylint: disable=invalid-name,too-many-lines
"""Tests for notifications for models with assignable mixin."""
import unittest
from collections import OrderedDict
from datetime import datetime
import ddt
from freezegun import freeze_time
from mock import patch
from ggrc import db
from ggrc.models import Assessment
from ggrc.models import CustomAttributeDefinition
from ggrc.models import CustomAttributeValue
from ggrc.models import Revision
from ggrc.models import all_models
from ggrc.utils import errors
from integration.ggrc import api_helper, generator
from integration.ggrc.models import factories
from integration.ggrc.models.factories import \
CustomAttributeDefinitionFactory as CAD
from integration.ggrc.notifications import TestNotifications
class TestAssignableNotification(TestNotifications):
"""Base class for testing notification creation for assignable mixin."""
def setUp(self):
super(TestAssignableNotification, self).setUp()
self.client.get("/login")
self._fix_notification_init()
factories.AuditFactory(slug="Audit")
class TestAssignableNotificationUsingImports(TestAssignableNotification):
"""Tests for notifications when interacting with objects through imports."""
@patch("ggrc.notifications.common.send_email")
def test_assessment_created_notifications(self, send_email):
"""Test if importing new assessments results in notifications for all."""
self.assertEqual(self._get_notifications().count(), 0)
self.import_file("assessment_template_no_warnings.csv", safe=False)
self.import_file("assessment_with_templates.csv")
titles = [asmt.title for asmt in Assessment.query]
query = self._get_notifications(notif_type="assessment_open")
self.assertEqual(query.count(), 6)
# check email content
self.client.get("/_notifications/send_daily_digest")
recipient, _, content = send_email.call_args[0]
self.assertEqual(recipient, u"<EMAIL>")
self.assertIn(u"New assessments were created", content)
for asmt_title in titles:
self.assertIn(asmt_title, content)
@patch("ggrc.notifications.common.send_email")
def test_assessment_updated_notifications(self, send_email):
"""Test if updating an assessment results in a notification."""
self.import_file("assessment_template_no_warnings.csv", safe=False)
self.import_file("assessment_with_templates.csv")
asmts = {asmt.slug: asmt for asmt in Assessment.query}
asmt = Assessment.query.get(asmts["A 1"].id)
asmt_id, asmt_slug = asmt.id, asmt.slug
asmt.status = Assessment.PROGRESS_STATE
db.session.commit()
self.client.get("/_notifications/send_daily_digest")
self.assertEqual(self._get_notifications().count(), 0)
asmt = Assessment.query.get(asmt_id)
self.import_data(OrderedDict([
(u"object_type", u"Assessment"),
(u"Code*", asmt.slug),
(u"Title", u"New Assessment 1 title"),
]))
query = self._get_notifications(notif_type="assessment_updated")
self.assertEqual(query.count(), 1)
# check email content
self.client.get("/_notifications/send_daily_digest")
recipient, _, content = send_email.call_args[0]
self.assertEqual(recipient, u"<EMAIL>")
self.assertIn(u"Assessments have been updated", content)
# the assessment updated notification should be sent even if there exists a
# status change notification , regardless of the order of actions
self.import_data(OrderedDict([
(u"object_type", u"Assessment"),
(u"Code*", asmt_slug),
(u"Title", u"New Assessment 1 title 2"),
]))
query = self._get_notifications(notif_type="assessment_updated")
self.assertEqual(query.count(), 1)
self.import_data(OrderedDict([
(u"object_type", u"Assessment"),
(u"Code*", asmt_slug),
(u"State*", Assessment.DONE_STATE),
]))
self.import_data(OrderedDict([
(u"object_type", u"Assessment"),
(u"Code*", asmt_slug),
(u"Title", u"New Assessment 1 title 3"),
]))
self.client.get("/_notifications/send_daily_digest")
recipient, _, content = send_email.call_args[0]
self.assertIn(u"Assessments have been updated", content)
@unittest.skip("An issue needs to be fixed.")
@patch("ggrc.notifications.common.send_email")
def test_assessment_ca_updated_notifications(self, send_email):
"""Test if updating assessment custom attr. results in a notification."""
CAD(definition_type="assessment", title="CA_misc_remarks")
self.import_file("assessment_template_no_warnings.csv")
self.import_file("assessment_with_templates.csv")
self.client.get("/_notifications/send_daily_digest")
self.assertEqual(self._get_notifications().count(), 0)
asmts = {asmt.slug: asmt for asmt in Assessment.query}
asmt = Assessment.query.get(asmts["A 1"].id)
self.import_data(OrderedDict([
(u"object_type", u"Assessment"),
(u"Code*", asmt.slug),
(u"CA_misc_remarks", u"CA new value"),
]))
asmt = Assessment.query.get(asmts["A 1"].id)
query = self._get_notifications(notif_type="assessment_updated")
self.assertEqual(query.count(), 1)
# check email content
self.client.get("/_notifications/send_daily_digest")
recipient, _, content = send_email.call_args[0]
self.assertEqual(recipient, u"<EMAIL>")
self.assertIn(u"Assessments have been updated", content)
@unittest.skip("An issue needs to be fixed.")
@patch("ggrc.notifications.common.send_email")
def test_assessment_url_updated_notifications(self, send_email):
"""Test if updating assessment URLs results in a notification."""
self.import_file("assessment_template_no_warnings.csv")
self.import_file("assessment_with_templates.csv")
asmts = {asmt.slug: asmt for asmt in Assessment.query}
asmt = Assessment.query.get(asmts["A 1"].id)
asmt_id = asmt.id
asmt.status = Assessment.PROGRESS_STATE
db.session.commit()
self.client.get("/_notifications/send_daily_digest")
self.assertEqual(self._get_notifications().count(), 0)
asmt = Assessment.query.get(asmt_id)
self.import_data(OrderedDict([
(u"object_type", u"Assessment"),
(u"Code*", asmt.slug),
(u"Evidence Url", u"www.foo-url.bar"),
]))
query = self._get_notifications(notif_type="assessment_updated")
self.assertEqual(query.count(), 1)
# check email content
self.client.get("/_notifications/send_daily_digest")
recipient, _, content = send_email.call_args[0]
self.assertEqual(recipient, u"<EMAIL>")
self.assertIn(u"Assessments have been updated", content)
@unittest.skip("An issue needs to be fixed.")
@patch("ggrc.notifications.common.send_email")
def test_attaching_assessment_evidence_notifications(self, send_email):
"""Test if attaching assessment evidence results in a notification."""
self.import_file("assessment_template_no_warnings.csv")
self.import_file("assessment_with_templates.csv")
asmts = {asmt.slug: asmt for asmt in Assessment.query}
asmt = Assessment.query.get(asmts["A 1"].id)
asmt_id = asmt.id
asmt.status = Assessment.PROGRESS_STATE
db.session.commit()
self.client.get("/_notifications/send_daily_digest")
self.assertEqual(self._get_notifications().count(), 0)
asmt = Assessment.query.get(asmt_id)
self.import_data(OrderedDict([
(u"object_type", u"Assessment"),
(u"Code*", asmt.slug),
(u"Evidence File", u"https://gdrive.com/qwerty1/view evidence.txt"),
]))
query = self._get_notifications(notif_type="assessment_updated")
self.assertEqual(query.count(), 1)
# check email content
self.client.get("/_notifications/send_daily_digest")
recipient, _, content = send_email.call_args[0]
self.assertEqual(recipient, u"<EMAIL>")
self.assertIn(u"Assessments have been updated", content)
@unittest.skip("An issue needs to be fixed.")
@patch("ggrc.notifications.common.send_email")
def test_assessment_person_updated_notifications(self, send_email):
"""Test if updating assessment people results in a notification."""
self.import_file("assessment_template_no_warnings.csv")
self.import_file("assessment_with_templates.csv")
asmts = {asmt.slug: asmt for asmt in Assessment.query}
asmt = Assessment.query.get(asmts["A 1"].id)
asmt_id = asmt.id
asmt.status = Assessment.PROGRESS_STATE
db.session.commit()
self.client.get("/_notifications/send_daily_digest")
self.assertEqual(self._get_notifications().count(), 0)
asmt = Assessment.query.get(asmt_id)
# change assignee
self.import_data(OrderedDict([
(u"object_type", u"Assessment"),
(u"Code*", asmt.slug),
(u"Assignee*", u"<EMAIL>"),
]))
query = self._get_notifications(notif_type="assessment_updated")
self.assertEqual(query.count(), 1)
# clear notifications
self.client.get("/_notifications/send_daily_digest")
self.assertEqual(self._get_notifications().count(), 0)
asmt = Assessment.query.get(asmt_id)
# change verifier
asmt = Assessment.query.get(asmt_id)
self.import_data(OrderedDict([
(u"object_type", u"Assessment"),
(u"Code*", asmt.slug),
(u"Verifiers", u"<EMAIL>"),
]))
query = self._get_notifications(notif_type="assessment_updated")
self.assertEqual(query.count(), 1)
# check email content
self.client.get("/_notifications/send_daily_digest")
recipient, _, content = send_email.call_args[0]
self.assertEqual(recipient, u"<EMAIL>")
self.assertIn(u"Assessments have been updated", content)
@patch("ggrc.notifications.common.send_email")
def test_assessment_state_change_notifications(self, send_email):
"""Test if updating assessment state results in notifications."""
# pylint: disable=too-many-statements
self.import_file("assessment_template_no_warnings.csv", safe=False)
self.import_file("assessment_with_templates.csv")
asmts = {asmt.slug: asmt for asmt in Assessment.query}
asmt = Assessment.query.get(asmts["A 1"].id)
asmt_id, asmt_slug = asmt.id, asmt.slug
# test starting an assessment
self.import_data(OrderedDict([
(u"object_type", u"Assessment"),
(u"Code*", asmt_slug),
(u"State*", Assessment.PROGRESS_STATE),
]))
self.client.get("/_notifications/send_daily_digest")
recipient, _, content = send_email.call_args[0]
self.assertEqual(recipient, u"<EMAIL>")
self.assertRegexpMatches(content, ur"Assessment\s+has\s+been\s+started")
# test submitting assessment for review
self.client.get("/_notifications/send_daily_digest")
self.assertEqual(self._get_notifications().count(), 0)
self.import_data(OrderedDict([
(u"object_type", u"Assessment"),
(u"Code*", asmt_slug),
(u"State*", Assessment.DONE_STATE),
]))
query = self._get_notifications(notif_type="assessment_ready_for_review")
self.assertEqual(query.count(), 1)
self.client.get("/_notifications/send_daily_digest")
recipient, _, content = send_email.call_args[0]
self.assertEqual(recipient, u"<EMAIL>")
self.assertIn(u"Assessments in review", content)
# test verifying an assessment
self.assertEqual(self._get_notifications().count(), 0)
self.import_data(OrderedDict([
(u"object_type", u"Assessment"),
(u"Code*", asmt_slug),
(u"State*", Assessment.FINAL_STATE),
# (will get verified, because there is a verifier assigned)
]))
query = self._get_notifications(notif_type="assessment_verified")
self.assertEqual(query.count(), 1)
self.client.get("/_notifications/send_daily_digest")
recipient, _, content = send_email.call_args[0]
self.assertEqual(recipient, u"<EMAIL>")
self.assertIn(u"Verified assessments", content)
# test reopening a verified assessment
self.assertEqual(self._get_notifications().count(), 0)
self.import_data(OrderedDict([
(u"object_type", u"Assessment"),
(u"Code*", asmt_slug),
(u"State*", Assessment.PROGRESS_STATE),
]))
query = self._get_notifications(notif_type="assessment_reopened")
self.assertEqual(query.count(), 1)
self.client.get("/_notifications/send_daily_digest")
recipient, _, content = send_email.call_args[0]
self.assertEqual(recipient, u"<EMAIL>")
self.assertIn(u"Reopened assessments", content)
# sending an assessment back to "in review" (i.e. the undo action)
asmt = Assessment.query.get(asmt_id)
asmt.status = Assessment.VERIFIED_STATE
db.session.commit()
self.assertEqual(self._get_notifications().count(), 0)
self.import_data(OrderedDict([
(u"object_type", u"Assessment"),
(u"Code*", asmt_slug),
(u"State*", Assessment.DONE_STATE),
]))
query = self._get_notifications()
self.assertEqual(query.count(), 0) # there should be no notification!
# test declining an assessment
asmt = Assessment.query.get(asmt_id)
asmt.status = Assessment.DONE_STATE
db.session.commit()
self.client.get("/_notifications/send_daily_digest")
self.assertEqual(self._get_notifications().count(), 0)
self.import_data(OrderedDict([
(u"object_type", u"Assessment"),
(u"Code*", asmt_slug),
(u"State*", Assessment.PROGRESS_STATE),
]))
query = self._get_notifications(notif_type="assessment_declined")
self.assertEqual(query.count(), 1)
query = self._get_notifications(notif_type="assessment_reopened")
self.assertEqual(query.count(), 1)
self.client.get("/_notifications/send_daily_digest")
recipient, _, content = send_email.call_args[0]
self.assertEqual(recipient, u"<EMAIL>")
self.assertIn(u"Declined assessments", content)
self.assertIn(u"Reopened assessments", content)
# directly submitting a not started assessment for review
asmt = Assessment.query.get(asmt_id)
asmt.status = Assessment.START_STATE
db.session.commit()
self.client.get("/_notifications/send_daily_digest")
self.assertEqual(self._get_notifications().count(), 0)
self.import_data(OrderedDict([
(u"object_type", u"Assessment"),
(u"Code*", asmt_slug),
(u"State*", Assessment.DONE_STATE),
]))
query = self._get_notifications(notif_type="assessment_ready_for_review")
self.assertEqual(query.count(), 1)
self.client.get("/_notifications/send_daily_digest")
recipient, _, content = send_email.call_args[0]
self.assertEqual(recipient, u"<EMAIL>")
self.assertIn(u"Assessments in review", content)
# directly completing a not started assessment
self.import_data(OrderedDict([
(u"object_type", u"Assessment"),
(u"Code*", asmt_slug),
(u"Verifiers", None),
(u"State*", Assessment.START_STATE),
]))
self.client.get("/_notifications/send_daily_digest")
self.assertEqual(self._get_notifications().count(), 0)
self.import_data(OrderedDict([
(u"object_type", u"Assessment"),
(u"Code*", asmt_slug),
(u"State*", Assessment.FINAL_STATE),
]))
query = self._get_notifications(notif_type="assessment_completed")
self.assertEqual(query.count(), 1)
self.client.get("/_notifications/send_daily_digest")
recipient, _, content = send_email.call_args[0]
self.assertEqual(recipient, u"<EMAIL>")
self.assertIn(u"Completed assessments", content)
# test reopening a completed assessment
self.assertEqual(self._get_notifications().count(), 0)
self.import_data(OrderedDict([
(u"object_type", u"Assessment"),
(u"Code*", asmt_slug),
(u"State*", Assessment.PROGRESS_STATE),
]))
query = self._get_notifications(notif_type="assessment_reopened")
self.assertEqual(query.count(), 1)
self.client.get("/_notifications/send_daily_digest")
recipient, _, content = send_email.call_args[0]
self.assertEqual(recipient, u"<EMAIL>")
self.assertIn(u"Reopened assessments", content)
# completing an assessment in progress
self.assertEqual(self._get_notifications().count(), 0)
self.import_data(OrderedDict([
(u"object_type", u"Assessment"),
(u"Code*", asmt_slug),
(u"State*", Assessment.FINAL_STATE),
]))
query = self._get_notifications(notif_type="assessment_completed")
self.assertEqual(query.count(), 1)
self.client.get("/_notifications/send_daily_digest")
recipient, _, content = send_email.call_args[0]
self.assertEqual(recipient, u"<EMAIL>")
self.assertIn(u"Completed assessments", content)
@patch("ggrc.notifications.common.send_email")
def test_multiple_assessment_state_changes_notification(self, send_email):
"""Test if several assessment state changes result in a single notification.
Users should only be notificed about the last state change, and not about
every state change that happened.
"""
self.import_file("assessment_template_no_warnings.csv", safe=False)
self.import_file("assessment_with_templates.csv")
asmts = {asmt.slug: asmt for asmt in Assessment.query}
asmt = Assessment.query.get(asmts["A 1"].id)
asmt_slug = asmt.slug
asmt.status = Assessment.START_STATE
db.session.commit()
self.client.get("/_notifications/send_daily_digest")
self.assertEqual(self._get_notifications().count(), 0)
# make multiple state transitions and check that only the last one is
# actually retained
states = (
Assessment.PROGRESS_STATE,
Assessment.DONE_STATE,
Assessment.PROGRESS_STATE,
Assessment.FINAL_STATE,
)
for new_state in states:
self.import_data(OrderedDict([
(u"object_type", u"Assessment"),
(u"Code*", asmt_slug),
(u"State*", new_state),
]))
self.client.get("/_notifications/send_daily_digest")
recipient, _, content = send_email.call_args[0]
self.assertEqual(recipient, u"<EMAIL>")
self.assertNotIn(u"Assessments in review", content)
self.assertNotIn(u"Declined assessments", content)
self.assertNotIn(u"Reopened assessments", content)
self.assertIn(u"Completed assessments", content)
@patch("ggrc.notifications.common.send_email")
def test_assessment_reopen_notifications_on_edit(self, send_email):
"""Test if updating assessment results in reopen notification."""
self.import_file("assessment_template_no_warnings.csv", safe=False)
self.import_file("assessment_with_templates.csv")
asmts = {asmt.slug: asmt for asmt in Assessment.query}
asmt = Assessment.query.get(asmts["A 1"].id)
asmt_id, asmt_slug = asmt.id, asmt.slug
for i, new_state in enumerate(Assessment.DONE_STATES):
asmt = Assessment.query.get(asmt_id)
asmt.status = new_state
db.session.commit()
self.client.get("/_notifications/send_daily_digest")
self.assertEqual(self._get_notifications().count(), 0)
self.import_data(OrderedDict([
(u"object_type", u"Assessment"),
(u"Code*", asmt_slug),
(u"Title", u"New Assessment 1 title - " + unicode(i)),
]))
query = self._get_notifications(notif_type="assessment_reopened")
self.assertEqual(query.count(), 1)
self.client.get("/_notifications/send_daily_digest")
recipient, _, content = send_email.call_args[0]
self.assertEqual(recipient, u"<EMAIL>")
self.assertIn(u"Reopened assessments", content)
@unittest.skip("An issue needs to be fixed.")
@patch("ggrc.notifications.common.send_email")
def test_assessment_reopen_notifications_on_ca_edit(self, send_email):
"""Test if updating assessment's CA value in reopen notification."""
CAD(definition_type="assessment", title="CA_misc_remarks")
self.import_file("assessment_template_no_warnings.csv")
self.import_file("assessment_with_templates.csv")
asmts = {asmt.slug: asmt for asmt in Assessment.query}
asmt = Assessment.query.get(asmts["A 1"].id)
asmt_id, asmt_slug = asmt.id, asmt.slug
| |
from __future__ import absolute_import, division, print_function
from collections import Iterable, defaultdict, deque
from functools import reduce
import numbers
import operator
import numpy as np
import scipy.sparse
try: # Windows compatibility
int = long
except NameError:
pass
class COO(object):
""" A Sparse Multidimensional Array
This is stored in COO format. It depends on NumPy and Scipy.sparse for
computation, but supports arrays of arbitrary dimension.
Parameters
----------
coords: np.ndarray (ndim, nnz)
An array holding the index locations of every value
Should have shape (number of dimensions, number of non-zeros)
data: np.array (nnz,)
An array of Values
shape: tuple (ndim,), optional
The shape of the array
Examples
--------
>>> x = np.eye(4)
>>> x[2, 3] = 5
>>> s = COO(x)
>>> s
<COO: shape=(4, 4), dtype=float64, nnz=5, sorted=True, duplicates=False>
>>> s.data
array([ 1., 1., 1., 5., 1.])
>>> s.coords
array([[0, 1, 2, 2, 3],
[0, 1, 2, 3, 3]], dtype=uint8)
>>> s.dot(s.T).sum(axis=0).todense()
array([ 1., 1., 31., 6.])
Make a sparse array by passing in an array of coordinates and an array of
values.
>>> coords = [[0, 0, 0, 1, 1],
... [0, 1, 2, 0, 3],
... [0, 3, 2, 0, 1]]
>>> data = [1, 2, 3, 4, 5]
>>> y = COO(coords, data, shape=(3, 4, 5))
>>> y
<COO: shape=(3, 4, 5), dtype=int64, nnz=5, sorted=False, duplicates=True>
>>> tensordot(s, y, axes=(0, 1))
<COO: shape=(4, 3, 5), dtype=float64, nnz=6, sorted=False, duplicates=False>
Following scipy.sparse conventions you can also pass these as a tuple with
rows and columns
>>> rows = [0, 1, 2, 3, 4]
>>> cols = [0, 0, 0, 1, 1]
>>> data = [10, 20, 30, 40, 50]
>>> z = COO((data, (rows, cols)))
>>> z.todense()
array([[10, 0],
[20, 0],
[30, 0],
[ 0, 40],
[ 0, 50]])
You can also pass a dictionary or iterable of index/value pairs. Repeated
indices imply summation:
>>> d = {(0, 0, 0): 1, (1, 2, 3): 2, (1, 1, 0): 3}
>>> COO(d)
<COO: shape=(2, 3, 4), dtype=int64, nnz=3, sorted=False, duplicates=False>
>>> L = [((0, 0), 1),
... ((1, 1), 2),
... ((0, 0), 3)]
>>> COO(L).todense()
array([[4, 0],
[0, 2]])
See Also
--------
COO.from_numpy
COO.from_scipy_sparse
"""
__array_priority__ = 12
def __init__(self, coords, data=None, shape=None, has_duplicates=True,
sorted=False, cache=False):
self._cache = None
if cache:
self.enable_caching()
if data is None:
# {(i, j, k): x, (i, j, k): y, ...}
if isinstance(coords, dict):
coords = list(coords.items())
has_duplicates = False
if isinstance(coords, np.ndarray):
result = COO.from_numpy(coords)
self.coords = result.coords
self.data = result.data
self.has_duplicates = result.has_duplicates
self.sorted = result.sorted
self.shape = result.shape
return
# []
if not coords:
data = []
coords = []
# [((i, j, k), value), (i, j, k), value), ...]
elif isinstance(coords[0][0], Iterable):
if coords:
assert len(coords[0]) == 2
data = [x[1] for x in coords]
coords = [x[0] for x in coords]
coords = np.asarray(coords).T
# (data, (row, col, slab, ...))
else:
data = coords[0]
coords = np.stack(coords[1], axis=0)
self.data = np.asarray(data)
self.coords = np.asarray(coords)
if self.coords.ndim == 1:
self.coords = self.coords[None, :]
if shape and not np.prod(self.coords.shape):
self.coords = np.zeros((len(shape), 0), dtype=np.uint64)
if shape is None:
if self.coords.nbytes:
shape = tuple((self.coords.max(axis=1) + 1).tolist())
else:
shape = ()
self.shape = tuple(shape)
if self.shape:
dtype = np.min_scalar_type(max(self.shape))
else:
dtype = np.int_
self.coords = self.coords.astype(dtype)
assert not self.shape or len(data) == self.coords.shape[1]
self.has_duplicates = has_duplicates
self.sorted = sorted
def enable_caching(self):
""" Enable caching of reshape, transpose, and tocsr/csc operations
This enables efficient iterative workflows that make heavy use of
csr/csc operations, such as tensordot. This maintains a cache of
recent results of reshape and transpose so that operations like
tensordot (which uses both internally) store efficiently stored
representations for repeated use. This can significantly cut down on
computational costs in common numeric algorithms.
However, this also assumes that neither this object, nor the downstream
objects will have their data mutated.
Examples
--------
>>> x.enable_caching() # doctest: +SKIP
>>> csr1 = x.transpose((2, 0, 1)).reshape((100, 120)).tocsr() # doctest: +SKIP
>>> csr2 = x.transpose((2, 0, 1)).reshape((100, 120)).tocsr() # doctest: +SKIP
>>> csr1 is csr2 # doctest: +SKIP
True
"""
self._cache = defaultdict(lambda: deque(maxlen=3))
return self
@classmethod
def from_numpy(cls, x):
if x.shape:
coords = np.where(x)
data = x[coords]
coords = np.vstack(coords)
else:
coords = []
data = x
return cls(coords, data, shape=x.shape, has_duplicates=False,
sorted=True)
def todense(self):
self = self.sum_duplicates()
x = np.zeros(shape=self.shape, dtype=self.dtype)
coords = tuple([self.coords[i, :] for i in range(self.ndim)])
x[coords] = self.data
return x
@classmethod
def from_scipy_sparse(cls, x):
x = scipy.sparse.coo_matrix(x)
coords = np.empty((2, x.nnz), dtype=x.row.dtype)
coords[0, :] = x.row
coords[1, :] = x.col
return COO(coords, x.data, shape=x.shape,
has_duplicates=not x.has_canonical_format,
sorted=x.has_canonical_format)
@property
def dtype(self):
return self.data.dtype
@property
def ndim(self):
return len(self.shape)
@property
def nnz(self):
return self.coords.shape[1]
@property
def nbytes(self):
return self.data.nbytes + self.coords.nbytes
def __sizeof__(self):
return self.nbytes
def __getitem__(self, index):
if not isinstance(index, tuple):
index = (index,)
index = tuple(ind + self.shape[i] if isinstance(ind, numbers.Integral) and ind < 0 else ind
for i, ind in enumerate(index))
if (all(ind == slice(None) or ind == slice(0, d)
for ind, d in zip(index, self.shape))):
return self
mask = np.ones(self.nnz, dtype=bool)
for i, ind in enumerate([i for i in index if i is not None]):
if ind == slice(None, None):
continue
mask &= _mask(self.coords[i], ind)
n = mask.sum()
coords = []
shape = []
i = 0
for ind in index:
if isinstance(ind, numbers.Integral):
i += 1
continue
elif isinstance(ind, slice):
start = ind.start or 0
stop = ind.stop if ind.stop is not None else self.shape[i]
shape.append(min(stop, self.shape[i]) - start)
coords.append(self.coords[i][mask] - start)
i += 1
elif isinstance(ind, list):
old = self.coords[i][mask]
new = np.empty(shape=old.shape, dtype=old.dtype)
for j, item in enumerate(ind):
new[old == item] = j
coords.append(new)
shape.append(len(ind))
i += 1
elif ind is None:
coords.append(np.zeros(n))
shape.append(1)
for j in range(i, self.ndim):
coords.append(self.coords[j][mask])
shape.append(self.shape[j])
coords = np.stack(coords, axis=0)
shape = tuple(shape)
data = self.data[mask]
return COO(coords, data, shape=shape,
has_duplicates=self.has_duplicates,
sorted=self.sorted)
def __str__(self):
return "<COO: shape=%s, dtype=%s, nnz=%d, sorted=%s, duplicates=%s>" % (
self.shape, self.dtype, self.nnz, self.sorted,
self.has_duplicates)
__repr__ = __str__
def reduction(self, method, axis=None, keepdims=False, dtype=None):
if axis is None:
axis = tuple(range(self.ndim))
kwargs = {}
if dtype:
kwargs['dtype'] = dtype
if isinstance(axis, numbers.Integral):
axis = (axis,)
if set(axis) == set(range(self.ndim)):
result = getattr(self.data, method)(**kwargs)
else:
axis = tuple(axis)
neg_axis = list(range(self.ndim))
for ax in axis:
neg_axis.remove(ax)
neg_axis = tuple(neg_axis)
a = self.transpose(axis + neg_axis)
a = a.reshape((np.prod([self.shape[d] for d in axis]),
np.prod([self.shape[d] for d in neg_axis])))
a = a.to_scipy_sparse()
a = getattr(a, method)(axis=0, **kwargs)
if isinstance(a, scipy.sparse.spmatrix):
a = COO.from_scipy_sparse(a)
a.sorted = self.sorted
a.has_duplicates = False
elif isinstance(a, np.matrix):
a = np.asarray(a)[0]
a = COO.from_numpy(a)
a = a.reshape([self.shape[d] for d in neg_axis])
result = a
if keepdims:
result = _keepdims(self, result, axis)
return result
def sum(self, axis=None, keepdims=False, dtype=None, out=None):
return self.reduction('sum', axis=axis, keepdims=keepdims, dtype=dtype)
def max(self, axis=None, keepdims=False, out=None):
x = self.reduction('max', axis=axis, keepdims=keepdims)
# TODO: verify that there are some missing elements in each entry
if isinstance(x, COO):
x.data[x.data < 0] = 0
return x
elif isinstance(x, np.ndarray):
x[x < 0] = 0
return x
else:
return np.max(x, 0)
def transpose(self, axes=None):
if axes is None:
axes = reversed(range(self.ndim))
axes = tuple(axes)
if axes == tuple(range(self.ndim)):
return self
if self._cache is not None:
for ax, value in self._cache['transpose']:
if ax == axes:
return value
shape = tuple(self.shape[ax] for ax in axes)
result = COO(self.coords[axes, :], self.data, shape,
has_duplicates=self.has_duplicates,
cache=self._cache is not None)
if self._cache is not None:
self._cache['transpose'].append((axes, result))
return result
@property
def T(self):
return self.transpose(list(range(self.ndim))[::-1])
def dot(self, other):
return dot(self, other)
def __matmul__(self, other):
try:
return dot(self, other)
except NotImplementedError:
return NotImplemented
def __rmatmul__(self, other):
try:
return dot(other, self)
except NotImplementedError:
return NotImplemented
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kwargs):
return NotImplemented
def linear_loc(self, signed=False):
""" Index location of every piece of data in a flattened array
This is used internally to check for duplicates, re-order, reshape,
etc..
"""
n = reduce(operator.mul, self.shape)
if signed:
n = -n
dtype = np.min_scalar_type(n)
out = np.zeros(self.nnz, dtype=dtype)
| |
<reponame>Kelketek/evennia
"""
OOBHandler - Out Of Band Handler
The OOBHandler.execute_cmd is called by the sessionhandler when it detects
an OOB instruction (exactly how this looked depends on the protocol; at this
point all oob calls should look the same)
The handler pieces of functionality:
function execution - the oob protocol can execute a function directly on
the server. The available functions must be defined
as global functions in settings.OOB_PLUGIN_MODULES.
repeat func execution - the oob protocol can request a given function be
executed repeatedly at a regular interval. This
uses an internal script pool.
tracking - the oob protocol can request Evennia to track changes to
fields on objects, as well as changes in Attributes. This is
done by dynamically adding tracker-objects on entities. The
behaviour of those objects can be customized by adding new
tracker classes in settings.OOB_PLUGIN_MODULES.
What goes into the OOB_PLUGIN_MODULES is a (list of) modules that contains
the working server-side code available to the OOB system: oob functions and
tracker classes.
oob functions have the following call signature:
function(caller, session, *args, **kwargs)
oob trackers should inherit from the OOBTracker class (in this
module) and implement a minimum of the same functionality.
If a function named "oob_error" is given, this will be called with error
messages.
"""
from inspect import isfunction
from twisted.internet.defer import inlineCallbacks
from django.conf import settings
from src.server.models import ServerConfig
from src.server.sessionhandler import SESSIONS
#from src.scripts.scripts import Script
#from src.utils.create import create_script
from src.scripts.tickerhandler import Ticker, TickerPool, TickerHandler
from src.utils.dbserialize import dbserialize, dbunserialize, pack_dbobj, unpack_dbobj
from src.utils import logger
from src.utils.utils import all_from_module, make_iter, to_str
_SA = object.__setattr__
_GA = object.__getattribute__
_DA = object.__delattr__
# load resources from plugin module
_OOB_FUNCS = {}
for mod in make_iter(settings.OOB_PLUGIN_MODULES):
_OOB_FUNCS.update(dict((key.lower(), func) for key, func in all_from_module(mod).items() if isfunction(func)))
# get custom error method or use the default
_OOB_ERROR = _OOB_FUNCS.get("oob_error", None)
if not _OOB_ERROR:
# create default oob error message function
def oob_error(oobhandler, session, errmsg, *args, **kwargs):
"Error wrapper"
session.msg(oob=("err", ("ERROR ", errmsg)))
_OOB_ERROR = oob_error
#
# TrackerHandler is assigned to objects that should notify themselves to
# the OOB system when some property changes. This is never assigned manually
# but automatically through the OOBHandler.
#
class TrackerHandler(object):
"""
This object is dynamically assigned to objects whenever one of its fields
are to be tracked. It holds an internal dictionary mapping to the fields
on that object. Each field can be tracked by any number of trackers (each
tied to a different callback).
"""
def __init__(self, obj):
"""
This is initiated and stored on the object as a
property _trackerhandler.
"""
try:
obj = obj.dbobj
except AttributeError:
pass
self.obj = obj
self.ntrackers = 0
# initiate store only with valid on-object fieldnames
self.tracktargets = dict((key, {})
for key in _GA(_GA(self.obj, "_meta"), "get_all_field_names")())
def add(self, fieldname, tracker):
"""
Add tracker to the handler. Raises KeyError if fieldname
does not exist.
"""
trackerkey = tracker.__class__.__name__
self.tracktargets[fieldname][trackerkey] = tracker
self.ntrackers += 1
def remove(self, fieldname, trackerclass, *args, **kwargs):
"""
Remove identified tracker from TrackerHandler.
Raises KeyError if tracker is not found.
"""
trackerkey = trackerclass.__name__
tracker = self.tracktargets[fieldname][trackerkey]
try:
tracker.at_remove(*args, **kwargs)
except Exception:
logger.log_trace()
del self.tracktargets[fieldname][trackerkey]
self.ntrackers -= 1
if self.ntrackers <= 0:
# if there are no more trackers, clean this handler
del self
def update(self, fieldname, new_value):
"""
Called by the field when it updates to a new value
"""
for tracker in self.tracktargets[fieldname].values():
try:
tracker.update(new_value)
except Exception:
logger.log_trace()
# On-object Trackers to load with TrackerHandler
class TrackerBase(object):
"""
Base class for OOB Tracker objects. Inherit from this
to define custom trackers.
"""
def __init__(self, *args, **kwargs):
pass
def update(self, *args, **kwargs):
"Called by tracked objects"
pass
def at_remove(self, *args, **kwargs):
"Called when tracker is removed"
pass
class ReportFieldTracker(TrackerBase):
"""
Tracker that passively sends data to a stored sessid whenever
a named database field changes. The TrackerHandler calls this with
the correct arguments.
"""
def __init__(self, oobhandler, fieldname, sessid, *args, **kwargs):
"""
name - name of entity to track, such as "db_key"
sessid - sessid of session to report to
"""
self.oobhandler = oobhandler
self.fieldname = fieldname
self.sessid = sessid
def update(self, new_value, *args, **kwargs):
"Called by cache when updating the tracked entitiy"
# use oobhandler to relay data
try:
# we must never relay objects across the amp, only text data.
new_value = new_value.key
except AttributeError:
new_value = to_str(new_value, force_string=True)
kwargs[self.fieldname] = new_value
# this is a wrapper call for sending oob data back to session
self.oobhandler.msg(self.sessid, "report", *args, **kwargs)
class ReportAttributeTracker(TrackerBase):
"""
Tracker that passively sends data to a stored sessid whenever
the Attribute updates. Since the field here is always "db_key",
we instead store the name of the attribute to return.
"""
def __init__(self, oobhandler, fieldname, sessid, attrname, *args, **kwargs):
"""
attrname - name of attribute to track
sessid - sessid of session to report to
"""
self.oobhandler = oobhandler
self.attrname = attrname
self.sessid = sessid
def update(self, new_value, *args, **kwargs):
"Called by cache when attribute's db_value field updates"
try:
new_value = new_value.dbobj
except AttributeError:
new_value = to_str(new_value, force_string=True)
kwargs[self.attrname] = new_value
# this is a wrapper call for sending oob data back to session
self.oobhandler.msg(self.sessid, "report", *args, **kwargs)
# Ticker of auto-updating objects
class OOBTicker(Ticker):
"""
Version of Ticker that executes an executable rather than trying to call
a hook method.
"""
@inlineCallbacks
def _callback(self):
"See original for more info"
for key, (_, args, kwargs) in self.subscriptions.items():
# args = (sessid, callback_function)
session = SESSIONS.session_from_sessid(args[0])
try:
# execute the oob callback
yield args[1](OOB_HANDLER, session, *args[2:], **kwargs)
except Exception:
logger.log_trace()
class OOBTickerPool(TickerPool):
ticker_class = OOBTicker
class OOBTickerHandler(TickerHandler):
ticker_pool_class = OOBTickerPool
# Main OOB Handler
class OOBHandler(object):
"""
The OOBHandler maintains all dynamic on-object oob hooks. It will store the
creation instructions and and re-apply them at a server reload (but
not after a server shutdown)
"""
def __init__(self):
"""
Initialize handler
"""
self.sessionhandler = SESSIONS
self.oob_tracker_storage = {}
self.tickerhandler = OOBTickerHandler("oob_ticker_storage")
def save(self):
"""
Save the command_storage as a serialized string into a temporary
ServerConf field
"""
if self.oob_tracker_storage:
#print "saved tracker_storage:", self.oob_tracker_storage
ServerConfig.objects.conf(key="oob_tracker_storage",
value=dbserialize(self.oob_tracker_storage))
self.tickerhandler.save()
def restore(self):
"""
Restore the command_storage from database and re-initialize the handler from storage.. This is
only triggered after a server reload, not after a shutdown-restart
"""
# load stored command instructions and use them to re-initialize handler
tracker_storage = ServerConfig.objects.conf(key="oob_tracker_storage")
if tracker_storage:
self.oob_tracker_storage = dbunserialize(tracker_storage)
for (obj, sessid, fieldname, trackerclass, args, kwargs) in self.oob_tracker_storage.values():
#print "restoring tracking:",obj, sessid, fieldname, trackerclass
self._track(unpack_dbobj(obj), sessid, fieldname, trackerclass, *args, **kwargs)
# make sure to purge the storage
ServerConfig.objects.conf(key="oob_tracker_storage", delete=True)
self.tickerhandler.restore()
def _track(self, obj, sessid, propname, trackerclass, *args, **kwargs):
"""
Create an OOB obj of class _oob_MAPPING[tracker_key] on obj. args,
kwargs will be used to initialize the OOB hook before adding
it to obj.
If propname is not given, but the OOB has a class property
named as propname, this will be used as the property name when assigning
the OOB to obj, otherwise tracker_key is used as the property name.
"""
try:
obj = obj.dbobj
except AttributeError:
pass
if not "_trackerhandler" in _GA(obj, "__dict__"):
# assign trackerhandler to object
_SA(obj, "_trackerhandler", TrackerHandler(obj))
# initialize object
tracker = trackerclass(self, propname, sessid, *args, **kwargs)
_GA(obj, "_trackerhandler").add(propname, tracker)
# store calling arguments as a pickle for retrieval later
obj_packed = pack_dbobj(obj)
storekey = (obj_packed, sessid, propname)
stored = (obj_packed, sessid, propname, trackerclass, args, kwargs)
self.oob_tracker_storage[storekey] = stored
#print "_track:", obj, id(obj), obj.__dict__
def _untrack(self, obj, sessid, propname, trackerclass, *args, **kwargs):
"""
Remove the OOB from obj. If oob implements an
at_delete hook, this will be called with args, kwargs
"""
try:
obj = obj.dbobj
except AttributeError:
pass
try:
# call at_remove hook on the trackerclass
_GA(obj, "_trackerhandler").remove(propname, trackerclass, *args, **kwargs)
except AttributeError:
pass
# remove the pickle from storage
store_key = (pack_dbobj(obj), sessid, propname)
self.oob_tracker_storage.pop(store_key, None)
def get_all_tracked(self, session):
"""
Get the names of all variables this session is tracking.
"""
sessid = session.sessid
return [stored for key, stored in self.oob_tracker_storage.items() if key[1] == sessid]
def track_field(self, obj, sessid, field_name, trackerclass=ReportFieldTracker):
"""
Shortcut wrapper method for specifically tracking a database field.
Takes the tracker class as argument.
"""
# all database field | |
Asks user clarifying questions if an invalid number is provided.
Returns None if user says any of the terminal answers."""
answer = await self.ask_freeform_question(
recipient, question_text, require_first_device
)
answer_text = answer
# This checks to see if the answer is a valid candidate for float by replacing
# the first comma or decimal point with a number to see if the resulting string .isnumeric()
# does the same for negative signs
if answer_text and not (
answer_text.replace("-", "1", 1).replace(".", "1", 1).isnumeric()
or answer_text.replace("-", "1", 1).replace(",", "1", 1).isnumeric()
):
# cancel if user replies with any of the terminal answers "stop, cancel, quit, etc. defined above"
if answer.lower() in self.TERMINAL_ANSWERS:
return None
# Check to see if the original question already specified wanting the answer as a decimal.
# If not asks the question again and adds "as a decimal" to clarify
if question_text and "as a decimal" in question_text:
return await self.ask_floatable_question(recipient, question_text)
return await self.ask_floatable_question(
recipient, (question_text or "") + " (as a decimal, ie 1.01 or 2,02)"
)
if answer_text:
return float(answer.replace(",", ".", 1))
return None
async def ask_intable_question(
self,
recipient: str,
question_text: Optional[str] = "How many years old do you wish you were?",
require_first_device: bool = False,
) -> Optional[int]:
"""Asks a question answered with an integer or whole number.
Asks user clarifying questions if an invalid number is provided.
Returns None if user says any of the terminal answers."""
answer = await self.ask_freeform_question(
recipient, question_text, require_first_device
)
if answer and not answer.isnumeric():
# cancel if user replies with any of the terminal answers "stop, cancel, quit, etc. defined above"
if answer.lower() in self.TERMINAL_ANSWERS:
return None
# Check to see if the original question already specified wanting the answer as a decimal.
# If not asks the question again and adds "as a whole number, ie '1' or '2000'" to clarify
if question_text and "as a whole number" in question_text:
return await self.ask_intable_question(recipient, question_text)
return await self.ask_intable_question(
recipient,
(question_text or "") + " (as a whole number, ie '1' or '2000')",
)
if answer:
return int(answer)
return None
async def ask_yesno_question(
self,
recipient: str,
question_text: str = "Are you sure? yes/no",
require_first_device: bool = False,
) -> Optional[bool]:
"""Asks a question that expects a yes or no answer. Returns a Boolean:
True if Yes False if No. None if cancelled"""
# ask the question as a freeform question
answer = await self.ask_freeform_question(
recipient, question_text, require_first_device
)
answer = answer.lower().rstrip(string.punctuation)
# if there is an answer and it is negative or positive
if answer and answer in (self.AFFIRMATIVE_ANSWERS + self.NEGATIVE_ANSWERS):
# return true if it's in affirmative answers otherwise assume it was negative and return false
if answer in self.AFFIRMATIVE_ANSWERS:
return True
return False
# return none if user answers cancel, etc
if answer and answer in self.TERMINAL_ANSWERS:
return None
# if the answer is not a terminal answer but also not a match, add clarifier and ask again
if "Please answer yes or no" not in question_text:
question_text = "Please answer yes or no, or cancel:\n \n" + question_text
return await self.ask_yesno_question(
recipient, question_text, require_first_device
)
async def ask_address_question(
self,
recipient: str,
question_text: str = "What's your shipping address?",
require_first_device: bool = False,
require_confirmation: bool = False,
) -> Optional[str]:
"""Asks user for their address and verifies through the google maps api
Can ask User for confirmation, returns string with formatted address or none"""
# get google maps api key from secrets
api = utils.get_secret("GOOGLE_MAPS_API")
if not api:
logging.error("Error, missing Google Maps API in secrets configuration")
return None
# ask for the address as a freeform question
address = await self.ask_freeform_question(
recipient, question_text, require_first_device
)
# we take the answer provided by the user, format it nicely as a request to google maps' api
# It returns a JSON object from which we can ascertain if the address is valid
async with self.client_session.get(
"https://maps.googleapis.com/maps/api/geocode/json",
params={"address": address, "key": api},
) as resp:
address_json = await resp.json()
# if google can't find the address results will be empty
if not (address_json["results"]):
# break out if user replied cancel, exit, stop, etc.
if address.lower() in self.TERMINAL_ANSWERS:
return None
# Otherwise, apologize and ask again
await self.send_message(
recipient,
"Sorry, I couldn't find that. \nPlease try again or reply cancel to cancel \n",
)
return await self.ask_address_question(
recipient, question_text, require_first_device, require_confirmation
)
# if maps does return a formatted address
if address_json["results"] and address_json["results"][0]["formatted_address"]:
if require_confirmation:
# Tell user the address we got and ask them to confirm
# Give them a google Maps link so they can check
maybe_address = address_json["results"][0]["formatted_address"]
maps_url = f"https://www.google.com/maps/search/?api=1&query={urllib.parse.quote_plus(maybe_address)}&query_place_id={address_json['results'][0]['place_id']}"
confirmation = await self.ask_yesno_question(
recipient,
f"Got: \n{maybe_address} \n\n{maps_url} \n\nIs this your address? (yes/no)",
require_first_device,
)
# If not, ask again
if not confirmation:
return await self.ask_address_question(
recipient,
question_text,
require_first_device,
require_confirmation,
)
return address_json["results"][0]["formatted_address"]
# If we made it here something unexpected probably went wrong.
# Google returned something but didn't have a formatted address
return None
async def ask_multiple_choice_question( # pylint: disable=too-many-arguments
self,
recipient: str,
question_text: Optional[str],
options: Union[dict[str, str], list[str]],
require_confirmation: bool = True,
require_first_device: bool = False,
) -> Optional[str]:
"""Prompts the user to select from a series of options.
Behaviour alters slightly based on options:
options as list -> we write labels for you with "1,2,3,...."
options as dict -> dict keys are the labels
options as dict with all values "" -> the labels are the options,
and only labels are printed"""
## TODO: allow fuzzy answers or lowercase answers. Needs design discussion.
# Check to ensure that user is on their first device as opposed to a linked device
# Important for certain questions involving payment addresses
if require_first_device:
self.requires_first_device[recipient] = True
if question_text is None:
question_text = "Pick one from these options:"
options_text = ""
# User can pass just a list of options and we generate labels for them using enumerate
# User can provide their own labels for the options by passing a dict
# Create a question with just labels by having all values be ""
# This will format the options text and check for a just labels question
if isinstance(options, list):
dict_options: dict[Any, str] = {
str(i): value for i, value in enumerate(options, start=1)
}
else:
dict_options = options
# Put ) between labels and text, if dict is all empty values leave blank
spacer = ") " if any(dict_options.values()) else ""
# We use a generator object to join all the options
# into one text that can be sent to the user
options_text = " \n".join(
f"{label}{spacer}{body}" for label, body in dict_options.items()
)
# for the purposes of making it case insensitive, make sure no options are the same when lowercased
lower_dict_options = {k.lower(): v for (k, v) in dict_options.items()}
if len(lower_dict_options) != len(dict_options):
raise ValueError("Need to ensure unique options when lower-cased!")
# send user the formatted question as a freeform question and process their response
answer = await self.ask_freeform_question(
recipient, question_text + "\n" + options_text, require_first_device
)
# when there is a match
if answer and answer.lower() in lower_dict_options.keys():
# if confirmation is required ask for it as a yes/no question
if require_confirmation:
confirmation_text = (
"You picked: \n"
+ answer
+ spacer
+ lower_dict_options[answer.lower()]
+ "\n\nIs this correct? (yes/no)"
)
confirmation = await self.ask_yesno_question(
recipient, confirmation_text
)
# if no, ask the question again
if not confirmation:
return await self.ask_multiple_choice_question(
recipient,
question_text,
dict_options,
require_confirmation,
require_first_device,
)
# if the answer given does not match a label
if answer and not answer.lower() in lower_dict_options.keys():
# return none and exit if user types cancel, stop, exit, etc...
if answer.lower() in self.TERMINAL_ANSWERS:
return None
# otherwise reminder to type the label exactly as it appears and restate the question
if "Please reply" not in question_text:
question_text = (
"Please reply with just the label exactly | |
to kill me- 'banished'?
O friar, the damned use that word in hell;
Howling attends it! How hast thou the heart,
Being a divine, a ghostly confessor,
A sin-absolver, and my friend profess'd,
To mangle me with that word 'banished'?
Friar. Thou fond mad man, hear me a little speak.
Rom. O, thou wilt speak again of banishment.
Friar. I'll give thee armour to keep off that word;
Adversity's sweet milk, philosophy,
To comfort thee, though thou art banished.
Rom. Yet 'banished'? Hang up philosophy!
Unless philosophy can make a Juliet,
Displant a town, reverse a prince's doom,
It helps not, it prevails not. Talk no more.
Friar. O, then I see that madmen have no ears.
Rom. How should they, when that wise men have no eyes?
Friar. Let me dispute with thee of thy estate.
Rom. Thou canst not speak of that thou dost not feel.
Wert thou as young as I, Juliet thy love,
An hour but married, Tybalt murdered,
Doting like me, and like me banished,
Then mightst thou speak, then mightst thou tear thy hair,
And fall upon the ground, as I do now,
Taking the measure of an unmade grave.
Knock [within].
Friar. Arise; one knocks. Good Romeo, hide thyself.
Rom. Not I; unless the breath of heartsick groans,
Mist-like infold me from the search of eyes. Knock.
Friar. Hark, how they knock! Who's there? Romeo, arise;
Thou wilt be taken.- Stay awhile!- Stand up; Knock.
Run to my study.- By-and-by!- God's will,
What simpleness is this.- I come, I come! Knock.
Who knocks so hard? Whence come you? What's your will
Nurse. [within] Let me come in, and you shall know my errand.
I come from L<NAME>.
Friar. Welcome then.
Enter Nurse.
Nurse. O holy friar, O, tell me, holy friar
Where is my lady's lord, where's Romeo?
Friar. There on the ground, with his own tears made drunk.
Nurse. O, he is even in my mistress' case,
Just in her case!
Friar. O woeful sympathy!
Piteous predicament!
Nurse. Even so lies she,
Blubb'ring and weeping, weeping and blubbering.
Stand up, stand up! Stand, an you be a man.
For Juliet's sake, for her sake, rise and stand!
Why should you fall into so deep an O?
Rom. (rises) Nurse-
Nurse. Ah sir! ah sir! Well, death's the end of all.
Rom. Spakest thou of Juliet? How is it with her?
Doth not she think me an old murtherer,
Now I have stain'd the childhood of our joy
With blood remov'd but little from her own?
Where is she? and how doth she! and what says
My conceal'd lady to our cancell'd love?
Nurse. O, she says nothing, sir, but weeps and weeps;
And now falls on her bed, and then starts up,
And Tybalt calls; and then on Romeo cries,
And then down falls again.
Rom. As if that name,
Shot from the deadly level of a gun,
Did murther her; as that name's cursed hand
Murder'd her kinsman. O, tell me, friar, tell me,
In what vile part of this anatomy
Doth my name lodge? Tell me, that I may sack
The hateful mansion. [Draws his dagger.]
Friar. Hold thy desperate hand.
Art thou a man? Thy form cries out thou art;
Thy tears are womanish, thy wild acts denote
The unreasonable fury of a beast.
Unseemly woman in a seeming man!
Or ill-beseeming beast in seeming both!
Thou hast amaz'd me. By my holy order,
I thought thy disposition better temper'd.
Hast thou slain Tybalt? Wilt thou slay thyself?
And slay thy lady that in thy life lives,
By doing damned hate upon thyself?
Why railest thou on thy birth, the heaven, and earth?
Since birth and heaven and earth, all three do meet
In thee at once; which thou at once wouldst lose.
Fie, fie, thou shamest thy shape, thy love, thy wit,
Which, like a usurer, abound'st in all,
And usest none in that true use indeed
Which should bedeck thy shape, thy love, thy wit.
Thy noble shape is but a form of wax
Digressing from the valour of a man;
Thy dear love sworn but hollow perjury,
Killing that love which thou hast vow'd to cherish;
Thy wit, that ornament to shape and love,
Misshapen in the conduct of them both,
Like powder in a skilless soldier's flask,
is get afire by thine own ignorance,
And thou dismemb'red with thine own defence.
What, rouse thee, man! Thy Juliet is alive,
For whose dear sake thou wast but lately dead.
There art thou happy. Tybalt would kill thee,
But thou slewest Tybalt. There art thou happy too.
The law, that threat'ned death, becomes thy friend
And turns it to exile. There art thou happy.
A pack of blessings light upon thy back;
Happiness courts thee in her best array;
But, like a misbhav'd and sullen wench,
Thou pout'st upon thy fortune and thy love.
Take heed, take heed, for such die miserable.
Go get thee to thy love, as was decreed,
Ascend her chamber, hence and comfort her.
But look thou stay not till the watch be set,
For then thou canst not pass to Mantua,
Where thou shalt live till we can find a time
To blaze your marriage, reconcile your friends,
Beg pardon of the Prince, and call thee back
With twenty hundred thousand times more joy
Than thou went'st forth in lamentation.
Go before, nurse. Commend me to thy lady,
And bid her hasten all the house to bed,
Which heavy sorrow makes them apt unto.
Romeo is coming.
Nurse. O Lord, I could have stay'd here all the night
To hear good counsel. O, what learning is!
My lord, I'll tell my lady you will come.
Rom. Do so, and bid my sweet prepare to chide.
Nurse. Here is a ring she bid me give you, sir.
Hie you, make haste, for it grows very late. Exit.
Rom. How well my comfort is reviv'd by this!
Friar. Go hence; good night; and here stands all your state:
Either be gone before the watch be set,
Or by the break of day disguis'd from hence.
Sojourn in Mantua. I'll find out your man,
And he shall signify from time to time
Every good hap to you that chances here.
Give me thy hand. 'Tis late. Farewell; good night.
Rom. But that a joy past joy calls out on me,
It were a grief so brief to part with thee.
Farewell.
Exeunt.
Scene IV.
Capulet's house
Enter Old Capulet, his Wife, and Paris.
Cap. Things have fall'n out, sir, so unluckily
That we have had no time to move our daughter.
Look you, she lov'd her kinsman Tybalt dearly,
And so did I. Well, we were born to die.
'Tis very late; she'll not come down to-night.
I promise you, but for your company,
I would have been abed an hour ago.
Par. These times of woe afford no tune to woo.
Madam, good night. Commend me to your daughter.
Lady. I will, and know her mind early to-morrow;
To-night she's mew'd up to her heaviness.
Cap. Sir Paris, I will make a desperate tender
Of my child's love. I think she will be rul'd
In all respects by me; nay more, I doubt it not.
Wife, go you to her ere you go to bed;
Acquaint her here of my son Paris' love
And bid her (mark you me?) on Wednesday next-
But, soft! what day is this?
Par. Monday, my lord.
Cap. Monday! ha, ha! Well, Wednesday is too soon.
Thursday let it be- a Thursday, tell her
She shall be married to this noble earl.
Will you be ready? Do you like this haste?
We'll | |
2, 3])
s2 = _handle_zeros_in_scale(s1, copy=True)
assert_allclose(s1, np.array([0, 1e-16, 1, 2, 3]))
assert_allclose(s2, np.array([1, 1, 1, 2, 3]))
def test_minmax_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = MinMaxScaler().fit(X)
scaler_incr = MinMaxScaler()
for batch in gen_batches(n_samples, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_array_almost_equal(scaler_batch.data_min_,
scaler_incr.data_min_)
assert_array_almost_equal(scaler_batch.data_max_,
scaler_incr.data_max_)
assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_
assert_array_almost_equal(scaler_batch.data_range_,
scaler_incr.data_range_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_)
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_batch = MinMaxScaler().fit(X[batch0])
scaler_incr = MinMaxScaler().partial_fit(X[batch0])
assert_array_almost_equal(scaler_batch.data_min_,
scaler_incr.data_min_)
assert_array_almost_equal(scaler_batch.data_max_,
scaler_incr.data_max_)
assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_
assert_array_almost_equal(scaler_batch.data_range_,
scaler_incr.data_range_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_)
# Test std until the end of partial fits, and
scaler_batch = MinMaxScaler().fit(X)
scaler_incr = MinMaxScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n_samples, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
def test_standard_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = StandardScaler(with_std=False).fit(X)
scaler_incr = StandardScaler(with_std=False)
for batch in gen_batches(n_samples, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_array_almost_equal(scaler_batch.mean_, scaler_incr.mean_)
assert scaler_batch.var_ == scaler_incr.var_ # Nones
assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_incr = StandardScaler().partial_fit(X[batch0])
if chunk_size == 1:
assert_array_almost_equal(np.zeros(n_features, dtype=np.float64),
scaler_incr.var_)
assert_array_almost_equal(np.ones(n_features, dtype=np.float64),
scaler_incr.scale_)
else:
assert_array_almost_equal(np.var(X[batch0], axis=0),
scaler_incr.var_)
assert_array_almost_equal(np.std(X[batch0], axis=0),
scaler_incr.scale_) # no constants
# Test std until the end of partial fits, and
scaler_batch = StandardScaler().fit(X)
scaler_incr = StandardScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n_samples, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.var_, scaler_incr.var_)
assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_
def test_standard_scaler_partial_fit_numerical_stability():
# Test if the incremental computation introduces significative errors
# for large datasets with values of large magniture
rng = np.random.RandomState(0)
n_features = 2
n_samples = 100
offsets = rng.uniform(-1e15, 1e15, size=n_features)
scales = rng.uniform(1e3, 1e6, size=n_features)
X = rng.randn(n_samples, n_features) * scales + offsets
scaler_batch = StandardScaler().fit(X)
scaler_incr = StandardScaler()
for chunk in X:
scaler_incr = scaler_incr.partial_fit(chunk.reshape(1, n_features))
# Regardless of abs values, they must not be more diff 6 significant digits
tol = 10 ** (-6)
assert_allclose(scaler_incr.mean_, scaler_batch.mean_, rtol=tol)
assert_allclose(scaler_incr.var_, scaler_batch.var_, rtol=tol)
assert_allclose(scaler_incr.scale_, scaler_batch.scale_, rtol=tol)
# NOTE Be aware that for much larger offsets std is very unstable (last
# assert) while mean is OK.
# Sparse input
size = (100, 3)
scale = 1e20
X = rng.randint(0, 2, size).astype(np.float64) * scale
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
for X in [X_csr, X_csc]:
# with_mean=False is required with sparse input
scaler = StandardScaler(with_mean=False).fit(X)
scaler_incr = StandardScaler(with_mean=False)
for chunk in X:
# chunk = sparse.csr_matrix(data_chunks)
scaler_incr = scaler_incr.partial_fit(chunk)
# Regardless of magnitude, they must not differ more than of 6 digits
tol = 10 ** (-6)
assert scaler.mean_ is not None
assert_allclose(scaler_incr.var_, scaler.var_, rtol=tol)
assert_allclose(scaler_incr.scale_, scaler.scale_, rtol=tol)
@pytest.mark.parametrize("sample_weight", [True, None])
def test_partial_fit_sparse_input(sample_weight):
# Check that sparsity is not destroyed
X = np.array([[1.], [0.], [0.], [5.]])
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
if sample_weight:
sample_weight = rng.rand(X_csc.shape[0])
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
for X in [X_csr, X_csc]:
X_null = null_transform.partial_fit(
X, sample_weight=sample_weight).transform(X)
assert_array_equal(X_null.toarray(), X.toarray())
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.toarray(), X_null.toarray())
assert_array_equal(X_orig.toarray(), X.toarray())
@pytest.mark.parametrize("sample_weight", [True, None])
def test_standard_scaler_trasform_with_partial_fit(sample_weight):
# Check some postconditions after applying partial_fit and transform
X = X_2d[:100, :]
if sample_weight:
sample_weight = rng.rand(X.shape[0])
scaler_incr = StandardScaler()
for i, batch in enumerate(gen_batches(X.shape[0], 1)):
X_sofar = X[:(i + 1), :]
chunks_copy = X_sofar.copy()
if sample_weight is None:
scaled_batch = StandardScaler().fit_transform(X_sofar)
scaler_incr = scaler_incr.partial_fit(X[batch])
else:
scaled_batch = StandardScaler().fit_transform(
X_sofar, sample_weight=sample_weight[:i + 1])
scaler_incr = scaler_incr.partial_fit(
X[batch], sample_weight=sample_weight[batch])
scaled_incr = scaler_incr.transform(X_sofar)
assert_array_almost_equal(scaled_batch, scaled_incr)
assert_array_almost_equal(X_sofar, chunks_copy) # No change
right_input = scaler_incr.inverse_transform(scaled_incr)
assert_array_almost_equal(X_sofar, right_input)
zero = np.zeros(X.shape[1])
epsilon = np.finfo(float).eps
assert_array_less(zero, scaler_incr.var_ + epsilon) # as less or equal
assert_array_less(zero, scaler_incr.scale_ + epsilon)
if sample_weight is None:
# (i+1) because the Scaler has been already fitted
assert (i + 1) == scaler_incr.n_samples_seen_
else:
assert (
np.sum(sample_weight[:i + 1]) ==
pytest.approx(scaler_incr.n_samples_seen_)
)
def test_min_max_scaler_iris():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.max(axis=0), 1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# not default params: min=1, max=2
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 1)
assert_array_almost_equal(X_trans.max(axis=0), 2)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# min=-.5, max=.6
scaler = MinMaxScaler(feature_range=(-.5, .6))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), -.5)
assert_array_almost_equal(X_trans.max(axis=0), .6)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# raises on invalid range
scaler = MinMaxScaler(feature_range=(2, 1))
with pytest.raises(ValueError):
scaler.fit(X)
def test_min_max_scaler_zero_variance_features():
# Check min max scaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
# default params
scaler = MinMaxScaler()
X_trans = scaler.fit_transform(X)
X_expected_0_1 = [[0., 0., 0.5],
[0., 0., 0.0],
[0., 0., 1.0]]
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
X_trans_new = scaler.transform(X_new)
X_expected_0_1_new = [[+0., 1., 0.500],
[-1., 0., 0.083],
[+0., 0., 1.333]]
assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
X_expected_1_2 = [[1., 1., 1.5],
[1., 1., 1.0],
[1., 1., 2.0]]
assert_array_almost_equal(X_trans, X_expected_1_2)
# function interface
X_trans = minmax_scale(X)
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans = minmax_scale(X, feature_range=(1, 2))
assert_array_almost_equal(X_trans, X_expected_1_2)
def test_minmax_scale_axis1():
X = iris.data
X_trans = minmax_scale(X, axis=1)
assert_array_almost_equal(np.min(X_trans, axis=1), 0)
assert_array_almost_equal(np.max(X_trans, axis=1), 1)
def test_min_max_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = MinMaxScaler(copy=True)
X_scaled = scaler.fit(X).transform(X)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_array_almost_equal(X_scaled.min(axis=0),
np.zeros(n_features))
assert_array_almost_equal(X_scaled.max(axis=0),
np.zeros(n_features))
else:
assert_array_almost_equal(X_scaled.min(axis=0), .0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.)
assert scaler.n_samples_seen_ == X.shape[0]
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones((5, 1))
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert X_scaled.min() >= 0.
assert X_scaled.max() <= 1.
assert scaler.n_samples_seen_ == X.shape[0]
# Function interface
X_1d = X_1row.ravel()
min_ = X_1d.min()
max_ = X_1d.max()
assert_array_almost_equal((X_1d - min_) / (max_ - min_),
minmax_scale(X_1d, copy=True))
@pytest.mark.parametrize("sample_weight", [True, None])
def test_scaler_without_centering(sample_weight):
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
if sample_weight:
sample_weight = rng.rand(X.shape[0])
with pytest.raises(ValueError):
StandardScaler().fit(X_csr)
with pytest.raises(ValueError):
StandardScaler().fit(X_csc)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
scaler = StandardScaler(with_mean=False).fit(
X, sample_weight=sample_weight)
X_scaled = scaler.transform(X, copy=True)
assert not np.any(np.isnan(X_scaled))
scaler_csr = StandardScaler(with_mean=False).fit(
X_csr, sample_weight=sample_weight)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert not np.any(np.isnan(X_csr_scaled.data))
scaler_csc = StandardScaler(with_mean=False).fit(
X_csc, sample_weight=sample_weight)
X_csc_scaled = scaler_csc.transform(X_csc, copy=True)
assert not np.any(np.isnan(X_csc_scaled.data))
assert_array_almost_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.var_, scaler_csr.var_)
assert_array_almost_equal(scaler.scale_, scaler_csr.scale_)
assert_array_almost_equal(scaler.n_samples_seen_,
scaler_csr.n_samples_seen_)
assert_array_almost_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.var_, scaler_csc.var_)
assert_array_almost_equal(scaler.scale_, scaler_csc.scale_)
assert_array_almost_equal(scaler.n_samples_seen_,
scaler_csc.n_samples_seen_)
if sample_weight is None:
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_var = \
mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_var, X_scaled.var(axis=0))
# Check that X has not been modified (copy)
assert X_scaled is not X
assert X_csr_scaled is not X_csr
X_scaled_back = scaler.inverse_transform(X_scaled)
assert X_scaled_back is not X
assert X_scaled_back is not X_scaled
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert X_csr_scaled_back is not X_csr
assert X_csr_scaled_back is not X_csr_scaled
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert X_csc_scaled_back is not X_csc
assert X_csc_scaled_back is not X_csc_scaled
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
@pytest.mark.parametrize("with_mean", [True, False])
@pytest.mark.parametrize("with_std", [True, False])
@pytest.mark.parametrize("array_constructor",
[np.asarray, sparse.csc_matrix, sparse.csr_matrix])
def test_scaler_n_samples_seen_with_nan(with_mean, with_std,
array_constructor):
X = np.array([[0, 1, 3],
[np.nan, 6, 10],
[5, 4, np.nan],
[8, 0, np.nan]],
dtype=np.float64)
X = array_constructor(X)
if sparse.issparse(X) and with_mean:
pytest.skip("'with_mean=True' cannot be used with sparse matrix.")
transformer = StandardScaler(with_mean=with_mean, with_std=with_std)
transformer.fit(X)
assert_array_equal(transformer.n_samples_seen_, np.array([3, 4, 2]))
def _check_identity_scalers_attributes(scaler_1, scaler_2):
assert scaler_1.mean_ is scaler_2.mean_ is None
assert scaler_1.var_ is scaler_2.var_ is None
assert scaler_1.scale_ is scaler_2.scale_ is None
assert scaler_1.n_samples_seen_ == scaler_2.n_samples_seen_
def test_scaler_return_identity():
# test that the scaler return identity when with_mean and with_std are
# False
X_dense = np.array([[0, 1, 3],
[5, 6, 0],
[8, 0, 10]],
dtype=np.float64)
X_csr = sparse.csr_matrix(X_dense)
X_csc = X_csr.tocsc()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.