ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a3a1edee8d4acc2fb77b356ed1fd89a5cb190ca | #!/usr/bin/python3 -i
#
# Copyright (c) 2015-2021 The Khronos Group Inc.
# Copyright (c) 2015-2021 Valve Corporation
# Copyright (c) 2015-2021 LunarG, Inc.
# Copyright (c) 2015-2021 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Dustin Graves <[email protected]>
# Author: Mark Lobodzinski <[email protected]>
# Author: Dave Houlton <[email protected]>
import os,re,sys,string,json
import xml.etree.ElementTree as etree
from generator import *
from collections import namedtuple
from common_codegen import *
# This is a workaround to use a Python 2.7 and 3.x compatible syntax.
from io import open
# ParameterValidationGeneratorOptions - subclass of GeneratorOptions.
#
# Adds options used by ParameterValidationOutputGenerator object during Parameter validation layer generation.
#
# Additional members
# prefixText - list of strings to prefix generated header with
# (usually a copyright statement + calling convention macros).
# protectFile - True if multiple inclusion protection should be
# generated (based on the filename) around the entire header.
# protectFeature - True if #ifndef..#endif protection should be
# generated around a feature interface in the header file.
# genFuncPointers - True if function pointer typedefs should be
# generated
# protectProto - If conditional protection should be generated
# around prototype declarations, set to either '#ifdef'
# to require opt-in (#ifdef protectProtoStr) or '#ifndef'
# to require opt-out (#ifndef protectProtoStr). Otherwise
# set to None.
# protectProtoStr - #ifdef/#ifndef symbol to use around prototype
# declarations, if protectProto is set
# apicall - string to use for the function declaration prefix,
# such as APICALL on Windows.
# apientry - string to use for the calling convention macro,
# in typedefs, such as APIENTRY.
# apientryp - string to use for the calling convention macro
# in function pointer typedefs, such as APIENTRYP.
# indentFuncProto - True if prototype declarations should put each
# parameter on a separate line
# indentFuncPointer - True if typedefed function pointers should put each
# parameter on a separate line
# alignFuncParam - if nonzero and parameters are being put on a
# separate line, align parameter names at the specified column
class ParameterValidationGeneratorOptions(GeneratorOptions):
def __init__(self,
conventions = None,
filename = None,
directory = '.',
genpath = None,
apiname = None,
profile = None,
versions = '.*',
emitversions = '.*',
defaultExtensions = None,
addExtensions = None,
removeExtensions = None,
emitExtensions = None,
emitSpirv = None,
sortProcedure = regSortFeatures,
prefixText = "",
apicall = '',
apientry = '',
apientryp = '',
indentFuncProto = True,
indentFuncPointer = False,
alignFuncParam = 0,
expandEnumerants = True,
valid_usage_path = ''):
GeneratorOptions.__init__(self,
conventions = conventions,
filename = filename,
directory = directory,
genpath = genpath,
apiname = apiname,
profile = profile,
versions = versions,
emitversions = emitversions,
defaultExtensions = defaultExtensions,
addExtensions = addExtensions,
removeExtensions = removeExtensions,
emitExtensions = emitExtensions,
emitSpirv = emitSpirv,
sortProcedure = sortProcedure)
self.prefixText = prefixText
self.apicall = apicall
self.apientry = apientry
self.apientryp = apientryp
self.indentFuncProto = indentFuncProto
self.indentFuncPointer = indentFuncPointer
self.alignFuncParam = alignFuncParam
self.expandEnumerants = expandEnumerants
self.valid_usage_path = valid_usage_path
# ParameterValidationOutputGenerator - subclass of OutputGenerator.
# Generates param checker layer code.
#
# ---- methods ----
# ParamCheckerOutputGenerator(errFile, warnFile, diagFile) - args as for
# OutputGenerator. Defines additional internal state.
# ---- methods overriding base class ----
# beginFile(genOpts)
# endFile()
# beginFeature(interface, emit)
# endFeature()
# genType(typeinfo,name)
# genStruct(typeinfo,name)
# genGroup(groupinfo,name)
# genEnum(enuminfo, name)
# genCmd(cmdinfo)
class ParameterValidationOutputGenerator(OutputGenerator):
"""Generate Parameter Validation code based on XML element attributes"""
# This is an ordered list of sections in the header file.
ALL_SECTIONS = ['command']
def __init__(self,
errFile = sys.stderr,
warnFile = sys.stderr,
diagFile = sys.stdout):
OutputGenerator.__init__(self, errFile, warnFile, diagFile)
self.INDENT_SPACES = 4
self.declarations = []
inline_custom_source_preamble = """
"""
# These functions have additional, custom-written checks in the utils cpp file. CodeGen will automatically add a call
# to those functions of the form 'bool manual_PreCallValidateAPIName', where the 'vk' is dropped.
# see 'manual_PreCallValidateCreateGraphicsPipelines' as an example.
self.functions_with_manual_checks = [
'vkCreateInstance',
'vkCreateDevice',
'vkCreateQueryPool',
'vkCreateRenderPass',
'vkCreateRenderPass2',
'vkCreateRenderPass2KHR',
'vkCreateBuffer',
'vkCreateImage',
'vkCreatePipelineLayout',
'vkCreateGraphicsPipelines',
'vkCreateComputePipelines',
'vkCreateRayTracingPipelinesNV',
'vkCreateRayTracingPipelinesKHR',
'vkCreateSampler',
'vkCreateDescriptorSetLayout',
'vkFreeDescriptorSets',
'vkUpdateDescriptorSets',
'vkBeginCommandBuffer',
'vkCmdSetViewport',
'vkCmdSetScissor',
'vkCmdSetLineWidth',
'vkCmdDrawIndirect',
'vkCmdDrawIndexedIndirect',
'vkCmdDrawMultiEXT',
'vkCmdDrawMultiIndexedEXT',
'vkCmdClearAttachments',
'vkCmdBindIndexBuffer',
'vkCmdCopyBuffer',
'vkCmdUpdateBuffer',
'vkCmdFillBuffer',
'vkCreateSwapchainKHR',
'vkCreateSharedSwapchainsKHR',
'vkQueuePresentKHR',
'vkCreateDescriptorPool',
'vkCmdDispatch',
'vkCmdDispatchIndirect',
'vkCmdDispatchBaseKHR',
'vkCmdPushDescriptorSetKHR',
'vkCmdSetExclusiveScissorNV',
'vkCmdSetViewportShadingRatePaletteNV',
'vkCmdSetCoarseSampleOrderNV',
'vkCmdDrawMeshTasksNV',
'vkCmdDrawMeshTasksIndirectNV',
'vkCmdDrawMeshTasksIndirectCountNV',
'vkAllocateMemory',
'vkCreateAccelerationStructureNV',
'vkCreateAccelerationStructureKHR',
'vkGetAccelerationStructureHandleNV',
'vkGetPhysicalDeviceImageFormatProperties',
'vkGetPhysicalDeviceImageFormatProperties2',
'vkGetPhysicalDeviceImageFormatProperties2KHR',
'vkCmdBuildAccelerationStructureNV',
'vkCreateFramebuffer',
'vkCmdSetLineStippleEXT',
'vkSetDebugUtilsObjectNameEXT',
'vkSetDebugUtilsObjectTagEXT',
'vkCmdSetViewportWScalingNV',
'vkAcquireNextImageKHR',
'vkAcquireNextImage2KHR',
'vkCmdBindTransformFeedbackBuffersEXT',
'vkCmdBeginTransformFeedbackEXT',
'vkCmdEndTransformFeedbackEXT',
'vkCmdDrawIndirectByteCountEXT',
'vkCreateSamplerYcbcrConversion',
'vkCreateSamplerYcbcrConversionKHR',
'vkImportSemaphoreFdKHR',
'vkCmdBindVertexBuffers',
'vkCreateImageView',
'vkCopyAccelerationStructureToMemoryKHR',
'vkCmdCopyAccelerationStructureToMemoryKHR',
'vkCopyAccelerationStructureKHR',
'vkCmdCopyAccelerationStructureKHR',
'vkCopyMemoryToAccelerationStructureKHR',
'vkCmdCopyMemoryToAccelerationStructureKHR',
'vkCmdDrawIndirectCount',
'vkCmdDrawIndirectCountKHR',
'vkCmdDrawIndexedIndirectCount',
'vkCmdDrawIndexedIndirectCountKHR',
'vkCmdWriteAccelerationStructuresPropertiesKHR',
'vkWriteAccelerationStructuresPropertiesKHR',
'vkGetRayTracingCaptureReplayShaderGroupHandlesKHR',
'vkCmdTraceRaysKHR',
'vkCmdTraceRaysNV',
'vkCmdTraceRaysIndirectKHR',
'vkCmdBuildAccelerationStructureIndirectKHR',
'vkGetDeviceAccelerationStructureCompatibilityKHR',
'vkCmdSetViewportWithCountEXT',
'vkCmdSetScissorWithCountEXT',
'vkCmdBindVertexBuffers2EXT',
'vkCmdCopyBuffer2KHR',
'vkCmdBuildAccelerationStructuresKHR',
'vkCmdBuildAccelerationStructuresIndirectKHR',
'vkBuildAccelerationStructuresKHR',
'vkGetAccelerationStructureBuildSizesKHR',
'vkCmdWriteAccelerationStructuresPropertiesNV',
'vkCreateDisplayModeKHR',
'vkCreatePrivateDataSlotEXT',
'vkCmdSetVertexInputEXT',
'vkCmdPushConstants',
'vkMergePipelineCaches'
]
# Commands to ignore
self.blacklist = [
'vkGetInstanceProcAddr',
'vkGetDeviceProcAddr',
'vkEnumerateInstanceVersion',
'vkEnumerateInstanceLayerProperties',
'vkEnumerateInstanceExtensionProperties',
'vkEnumerateDeviceLayerProperties',
'vkEnumerateDeviceExtensionProperties',
'vkGetDeviceGroupSurfacePresentModes2EXT'
]
# Structure fields to ignore
self.structMemberBlacklist = { 'VkWriteDescriptorSet' : ['dstSet'], 'VkAccelerationStructureGeometryKHR' :['geometry'] }
# Validation conditions for some special case struct members that are conditionally validated
self.structMemberValidationConditions = { 'VkPipelineColorBlendStateCreateInfo' : { 'logicOp' : '{}logicOpEnable == VK_TRUE' } }
# Header version
self.headerVersion = None
# Internal state - accumulators for different inner block text
self.validation = [] # Text comprising the main per-api parameter validation routines
self.stypes = [] # Values from the VkStructureType enumeration
self.structTypes = dict() # Map of Vulkan struct typename to required VkStructureType
self.handleTypes = set() # Set of handle type names
self.commands = [] # List of CommandData records for all Vulkan commands
self.structMembers = [] # List of StructMemberData records for all Vulkan structs
self.validatedStructs = dict() # Map of structs type names to generated validation code for that struct type
self.enumRanges = set() # Set of enum names
self.enum_values_definitions = dict() # [enum, string] containing enumerated type map definitions
self.flag_values_definitions = dict() # [flag, string] containing flag type map definitions
self.stype_version_dict = dict() # String containing structtype to version map data
self.flags = set() # Map of flags typenames
self.flagBits = dict() # Map of flag bits typename to list of values
self.newFlags = set() # Map of flags typenames /defined in the current feature/
self.required_extensions = dict() # Dictionary of required extensions for each item in the current extension
self.extension_type = '' # Type of active feature (extension), device or instance
self.extension_names = dict() # Dictionary of extension names to extension name defines
self.structextends_list = [] # List of extensions which extend another struct
self.struct_feature_protect = dict() # Dictionary of structnames and FeatureExtraProtect strings
self.valid_vuids = set() # Set of all valid VUIDs
self.vuid_dict = dict() # VUID dictionary (from JSON)
self.alias_dict = dict() # Dict of cmd|struct aliases
self.header_file = False # Header file generation flag
self.source_file = False # Source file generation flag
self.instance_extension_list = '' # List of instance extension name defines
self.device_extension_list = '' # List of device extension name defines
self.returnedonly_structs = [] # List of structs with 'returnonly' attribute
self.called_types = set() # Set of types called via function/struct - not in list == app never passes in to validate
# Named tuples to store struct and command data
self.CommandParam = namedtuple('CommandParam', ['type', 'name', 'ispointer', 'isstaticarray', 'isbool', 'israngedenum',
'isconst', 'isoptional', 'iscount', 'noautovalidity',
'len', 'extstructs', 'condition', 'cdecl'])
self.CommandData = namedtuple('CommandData', ['name', 'params', 'cdecl', 'extension_type', 'result', 'promotion_info'])
self.StructMemberData = namedtuple('StructMemberData', ['name', 'members'])
#
# Generate Copyright comment block for file
def GenerateCopyright(self):
copyright = '/* *** THIS FILE IS GENERATED - DO NOT EDIT! ***\n'
copyright += ' * See parameter_validation_generator.py for modifications\n'
copyright += ' *\n'
copyright += ' * Copyright (c) 2015-2021 The Khronos Group Inc.\n'
copyright += ' * Copyright (c) 2015-2021 LunarG, Inc.\n'
copyright += ' * Copyright (C) 2015-2021 Google Inc.\n'
copyright += ' *\n'
copyright += ' * Licensed under the Apache License, Version 2.0 (the "License");\n'
copyright += ' * you may not use this file except in compliance with the License.\n'
copyright += ' * Copyright (c) 2015-2017 Valve Corporation\n'
copyright += ' * You may obtain a copy of the License at\n'
copyright += ' *\n'
copyright += ' * http://www.apache.org/licenses/LICENSE-2.0\n'
copyright += ' *\n'
copyright += ' * Unless required by applicable law or agreed to in writing, software\n'
copyright += ' * distributed under the License is distributed on an "AS IS" BASIS,\n'
copyright += ' * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n'
copyright += ' * See the License for the specific language governing permissions and\n'
copyright += ' * limitations under the License.\n'
copyright += ' *\n'
copyright += ' * Author: Mark Lobodzinski <[email protected]>\n'
copyright += ' * Author: Dave Houlton <[email protected]>\n'
copyright += ' */\n\n'
return copyright
#
# Increases the global indent variable
def incIndent(self, indent):
inc = ' ' * self.INDENT_SPACES
if indent:
return indent + inc
return inc
#
# Decreases the global indent variable
def decIndent(self, indent):
if indent and (len(indent) > self.INDENT_SPACES):
return indent[:-self.INDENT_SPACES]
return ''
#
# Walk the JSON-derived dict and find all "vuid" key values
def ExtractVUIDs(self, d):
if hasattr(d, 'items'):
for k, v in d.items():
if k == "vuid":
yield v
elif isinstance(v, dict):
for s in self.ExtractVUIDs(v):
yield s
elif isinstance (v, list):
for l in v:
for s in self.ExtractVUIDs(l):
yield s
#
# Called at file creation time
def beginFile(self, genOpts):
OutputGenerator.beginFile(self, genOpts)
self.header_file = (genOpts.filename == 'parameter_validation.h')
self.source_file = (genOpts.filename == 'parameter_validation.cpp')
if not self.header_file and not self.source_file:
print("Error: Output Filenames have changed, update generator source.\n")
sys.exit(1)
if self.source_file or self.header_file:
# Output Copyright text
s = self.GenerateCopyright()
write(s, file=self.outFile)
if self.header_file:
return
stype_map = ''
stype_version_dict = dict()
# Create contents of Structs->API version unordered map
root = self.registry.reg
for node in root.findall('feature'):
version_name = node.get('name')
version_name = version_name.replace('VK_', 'VK_API_')
for enum_item in node.iter('enum'):
if enum_item.get('extends') == "VkStructureType":
struct_type_id = enum_item.get('name')
self.stype_version_dict[struct_type_id] = version_name
for extensions in root.findall('extensions'):
for extension in extensions.findall('extension'):
for entry in extension.iterfind('require/enum[@extends="VkStructureType"]'):
alias = entry.get('alias')
if alias is not None and (entry.get('comment') is None or 'typo' not in entry.get('comment')):
self.stype_version_dict[alias] = extension.get('name')
# Build map of structure type names to VkStructureType enum values
# Find all types of category "struct"
for struct in self.registry.tree.iterfind('types/type[@category="struct"]'):
# Check if struct has member named "sType" of type "VkStructureType" which has values defined
stype = struct.find('member[name="sType"][type="VkStructureType"][@values]')
if stype is not None:
# Store VkStructureType value for this type
self.structTypes[struct.get('name')] = stype.get('values')
self.valid_usage_path = genOpts.valid_usage_path
vu_json_filename = os.path.join(self.valid_usage_path + os.sep, 'validusage.json')
if os.path.isfile(vu_json_filename):
json_file = open(vu_json_filename, 'r', encoding='utf-8')
self.vuid_dict = json.load(json_file)
json_file.close()
if len(self.vuid_dict) == 0:
print("Error: Could not find, or error loading %s/validusage.json\n", vu_json_filename)
sys.exit(1)
#
# Build a set of all vuid text strings found in validusage.json
for json_vuid_string in self.ExtractVUIDs(self.vuid_dict):
self.valid_vuids.add(json_vuid_string)
#
# Headers
write('#include "chassis.h"', file=self.outFile)
self.newline()
write('#include "stateless_validation.h"', file=self.outFile)
self.newline()
#
# Called at end-time for final content output
def endFile(self):
if self.source_file:
# C-specific
self.newline()
# Don't need flag/enum lists if app can never call it to be validated
# But need to save everything as not all information is known until endFile()
for flag, string in self.flag_values_definitions.items():
if flag == 'VkGeometryInstanceFlagsKHR':
# only called in VkAccelerationStructureInstanceKHR which is never called anywhere explicitly
continue
flagBits = flag.replace('Flags', 'FlagBits')
if flag in self.called_types or flagBits in self.called_types:
write(string, file=self.outFile)
for enum, string in self.enum_values_definitions.items():
if enum in self.called_types:
write(string, file=self.outFile)
self.newline()
self.newline()
api_func = 'bool StatelessValidation::CheckPromotedApiAgainstVulkanVersion(VkInstance instance, const char *api_name, const uint32_t promoted_version) const {\n'
api_func += ' bool skip = false;\n'
api_func += ' if (api_version < promoted_version) {\n'
api_func += ' skip = LogError(instance,\n'
api_func += ' kVUID_PVError_ApiVersionViolation, "Attemped to call %s() with an effective API version of %s"\n'
api_func += ' "but this API was not promoted until version %s.", api_name, StringAPIVersion(api_version).c_str(),\n'
api_func += ' StringAPIVersion(promoted_version).c_str());\n'
api_func += ' }\n'
api_func += ' return skip;\n'
api_func += '}\n\n'
api_func += 'bool StatelessValidation::CheckPromotedApiAgainstVulkanVersion(VkPhysicalDevice pdev, const char *api_name, const uint32_t promoted_version) const {\n'
api_func += ' bool skip = false;\n'
api_func += ' const auto &target_pdev = physical_device_properties_map.find(pdev);\n'
api_func += ' if (target_pdev != physical_device_properties_map.end()) {\n'
api_func += ' auto effective_api_version = std::min(target_pdev->second->apiVersion, api_version);\n'
api_func += ' if (effective_api_version < promoted_version) {\n'
api_func += ' skip = LogError(instance,\n'
api_func += ' kVUID_PVError_ApiVersionViolation, "Attemped to call %s() with an effective API version of %s, "\n'
api_func += ' "which is the minimum of version requested in pApplicationInfo (%s) and supported by this physical device (%s), "\n'
api_func += ' "but this API was not promoted until version %s.", api_name, StringAPIVersion(api_version).c_str(),\n'
api_func += ' StringAPIVersion(target_pdev->second->apiVersion).c_str(), StringAPIVersion(effective_api_version).c_str(),\n'
api_func += ' StringAPIVersion(promoted_version).c_str());\n'
api_func += ' }\n'
api_func += ' }\n'
api_func += ' return skip;\n'
api_func += '}\n'
write(api_func, file=self.outFile)
pnext_handler = 'bool StatelessValidation::ValidatePnextStructContents(const char *api_name, const ParameterName ¶meter_name,\n'
pnext_handler += ' const VkBaseOutStructure* header, const char *pnext_vuid) const {\n'
pnext_handler += ' bool skip = false;\n'
pnext_handler += ' switch(header->sType) {\n'
# Do some processing here to extract data from validatedstructs...
for item in self.structextends_list:
postProcSpec = {}
postProcSpec['ppp'] = '' if not item else '{postProcPrefix}'
postProcSpec['pps'] = '' if not item else '{postProcSuffix}'
postProcSpec['ppi'] = '' if not item else '{postProcInsert}'
pnext_case = '\n'
pnext_check = ''
protect = ''
# Guard struct cases with feature ifdefs, if necessary
if item in self.struct_feature_protect.keys():
protect = self.struct_feature_protect[item]
pnext_case += '#ifdef %s\n' % protect
pnext_case += ' // Validation code for %s structure members\n' % item
pnext_case += ' case %s: { // Covers VUID-%s-sType-sType\n' % (self.structTypes[item], item)
# pNext version/extension-enabled checks
ver_info = ''
struct_type = self.structTypes[item]
if struct_type in self.stype_version_dict.keys():
ver_info = self.stype_version_dict[struct_type]
else:
struct_type[:-4]
if struct_type[:-4] in self.stype_version_dict.values():
ver_info = self.stype_version_dict[struct_type[:-4]]
else:
ver_info = None
if ver_info is not None:
if 'VK_API_VERSION_' in ver_info:
api_version = ver_info;
pnext_check += ' if (api_version < %s) {\n' % ver_info
pnext_check += ' skip |= LogError(\n'
pnext_check += ' instance, pnext_vuid,\n'
pnext_check += ' "%%s: Includes a pNext pointer (%%s) to a VkStructureType (%s) which was added in %s but the "\n' % (struct_type, ver_info)
pnext_check += ' "current effective API version is %s.",\n'
pnext_check += ' api_name, parameter_name.get_name().c_str(), StringAPIVersion(api_version).c_str());\n'
pnext_check += ' }\n'
else:
# Dependent on enabled extension
ext_name = ver_info
ext_name_define = self.extension_names[ver_info]
table_type = ''
if ext_name_define in self.instance_extension_list:
table_type = 'instance'
elif ext_name_define in self.device_extension_list:
table_type = 'device'
else:
print("Error in parameter_validation_generator.py CodeGen.")
norm_ext_name = ext_name_define[:-15].lower()
if table_type == 'device':
pnext_check += ' if ((!SupportedByPdev(physical_device, %s)) && !%s_extensions.%s) {\n' % (ext_name_define, table_type, norm_ext_name.lower())
else:
pnext_check += ' if (!%s_extensions.%s) {\n' % (table_type, norm_ext_name.lower())
pnext_check += ' skip |= LogError(\n'
pnext_check += ' instance, pnext_vuid,\n'
pnext_check += ' "%%s: Includes a pNext pointer (%%s) to a VkStructureType (%s), but its parent extension "\n' % struct_type
pnext_check += ' "%s has not been enabled.",\n' % ext_name
pnext_check += ' api_name, parameter_name.get_name().c_str());\n'
pnext_check += ' }\n'
pnext_check += '\n'
expr = self.expandStructCode(item, item, 'structure->', '', ' ', [], postProcSpec)
struct_validation_source = self.ScrubStructCode(expr)
if struct_validation_source != '':
pnext_case += ' %s *structure = (%s *) header;\n' % (item, item)
pnext_case += '%s%s' % (pnext_check, struct_validation_source)
pnext_case += ' } break;\n'
if protect:
pnext_case += '#endif // %s\n' % protect
# Skip functions containing no validation
if struct_validation_source or pnext_check != '':
pnext_handler += pnext_case;
else:
pnext_handler += '\n // No Validation code for %s structure members -- Covers VUID-%s-sType-sType\n' % (item, item)
pnext_handler += ' default:\n'
pnext_handler += ' skip = false;\n'
pnext_handler += ' }\n'
pnext_handler += ' return skip;\n'
pnext_handler += '}\n'
write(pnext_handler, file=self.outFile)
self.newline()
ext_template = 'bool StatelessValidation::OutputExtensionError(const std::string &api_name, const std::string &extension_name) const {\n'
ext_template += ' return LogError(instance,\n'
ext_template += ' kVUID_PVError_ExtensionNotEnabled, "Attemped to call %s() but its required extension %s has not been enabled\\n",\n'
ext_template += ' api_name.c_str(), extension_name.c_str());\n'
ext_template += '}\n'
write(ext_template, file=self.outFile)
self.newline()
commands_text = '\n'.join(self.validation)
write(commands_text, file=self.outFile)
self.newline()
if self.header_file:
# Output declarations and record intercepted procedures
write('\n'.join(self.declarations), file=self.outFile)
# Finish processing in superclass
OutputGenerator.endFile(self)
#
# Processing at beginning of each feature or extension
def beginFeature(self, interface, emit):
# Start processing in superclass
OutputGenerator.beginFeature(self, interface, emit)
# C-specific
# Accumulate includes, defines, types, enums, function pointer typedefs, end function prototypes separately for this
# feature. They're only printed in endFeature().
self.headerVersion = None
self.stypes = []
self.commands = []
self.structMembers = []
self.newFlags = set()
self.featureExtraProtect = GetFeatureProtect(interface)
# Get base list of extension dependencies for all items in this extension
base_required_extensions = []
if "VK_VERSION_1" not in self.featureName:
nameElem = interface[0][1]
name = nameElem.get('name')
# Save Name Define to get correct enable name later
self.extension_names[self.featureName] = name
# This extension is the first dependency for this command
base_required_extensions.append(self.featureName)
# Add any defined extension dependencies to the base dependency list for this extension
requires = interface.get('requires')
if requires is not None:
base_required_extensions.extend(requires.split(','))
# Build dictionary of extension dependencies for each item in this extension
self.required_extensions = dict()
for require_element in interface.findall('require'):
# Copy base extension dependency list
required_extensions = list(base_required_extensions)
# Add any additional extension dependencies specified in this require block
additional_extensions = require_element.get('extension')
if additional_extensions:
required_extensions.extend(additional_extensions.split(','))
# Save full extension list for all named items
for element in require_element.findall('*[@name]'):
self.required_extensions[element.get('name')] = required_extensions
# And note if this is an Instance or Device extension
self.extension_type = interface.get('type')
if interface.tag == 'extension':
name_elem = interface[0][1]
name_definition = name_elem.get('name')
if 'EXTENSION_NAME' not in name_definition:
print("Error in vk.xml file -- extension name is not available")
if interface.get('type') == 'instance':
self.instance_extension_list += '%s, ' % name_definition
else:
self.device_extension_list += '%s, ' % name_definition
#
# Called at the end of each extension (feature)
def endFeature(self):
if self.header_file:
return
# C-specific
# Actually write the interface to the output file.
if (self.emit):
# If type declarations are needed by other features based on this one, it may be necessary to suppress the ExtraProtect,
# or move it below the 'for section...' loop.
ifdef = ''
if (self.featureExtraProtect is not None):
ifdef = '#ifdef %s\n' % self.featureExtraProtect
self.validation.append(ifdef)
# Generate the struct member checking code from the captured data
self.processStructMemberData()
# Generate the command parameter checking code from the captured data
self.processCmdData()
# Write the declaration for the HeaderVersion
if self.headerVersion:
write('const uint32_t GeneratedVulkanHeaderVersion = {};'.format(self.headerVersion), file=self.outFile)
# Write the declarations for the VkFlags values combining all flag bits
for flag in sorted(self.newFlags):
flagBits = flag.replace('Flags', 'FlagBits')
if flagBits in self.flagBits:
bits = self.flagBits[flagBits]
decl = 'const {} All{} = {}'.format(flag, flagBits, bits[0])
for bit in bits[1:]:
decl += '|' + bit
decl += ';'
self.flag_values_definitions[flag] = decl
endif = '\n'
if (self.featureExtraProtect is not None):
endif = '#endif // %s\n' % self.featureExtraProtect
self.validation.append(endif)
# Finish processing in superclass
OutputGenerator.endFeature(self)
#
# Type generation
def genType(self, typeinfo, name, alias):
# record the name/alias pair
if alias is not None:
self.alias_dict[name]=alias
OutputGenerator.genType(self, typeinfo, name, alias)
typeElem = typeinfo.elem
# If the type is a struct type, traverse the embedded <member> tags generating a structure. Otherwise, emit the tag text.
category = typeElem.get('category')
if (category == 'struct' or category == 'union'):
self.genStruct(typeinfo, name, alias)
elif (category == 'handle'):
self.handleTypes.add(name)
elif (category == 'bitmask'):
self.flags.add(name)
self.newFlags.add(name)
elif (category == 'define'):
if name == 'VK_HEADER_VERSION':
nameElem = typeElem.find('name')
self.headerVersion = noneStr(nameElem.tail).strip()
#
# Struct parameter check generation.
# This is a special case of the <type> tag where the contents are interpreted as a set of <member> tags instead of freeform C
# type declarations. The <member> tags are just like <param> tags - they are a declaration of a struct or union member.
# Only simple member declarations are supported (no nested structs etc.)
def genStruct(self, typeinfo, typeName, alias):
if not self.source_file:
return
# alias has already been recorded in genType, above
OutputGenerator.genStruct(self, typeinfo, typeName, alias)
conditions = self.structMemberValidationConditions[typeName] if typeName in self.structMemberValidationConditions else None
members = typeinfo.elem.findall('.//member')
if self.featureExtraProtect is not None:
self.struct_feature_protect[typeName] = self.featureExtraProtect
#
# Iterate over members once to get length parameters for arrays
lens = set()
for member in members:
len = self.getLen(member)
if len:
lens.add(len)
#
# Generate member info
membersInfo = []
returned_only = typeinfo.elem.attrib.get('returnedonly') is not None
for member in members:
# Get the member's type and name
info = self.getTypeNameTuple(member)
type = info[0]
name = info[1]
stypeValue = ''
cdecl = self.makeCParamDecl(member, 0)
ispointer = self.paramIsPointer(member)
isconst = True if 'const' in cdecl else False
# Store pointer/array/string info -- Check for parameter name in lens set
iscount = False
if name in lens:
iscount = True
# The pNext members are not tagged as optional, but are treated as optional for parameter NULL checks. Static array
# members are also treated as optional to skip NULL pointer validation, as they won't be NULL.
isstaticarray = self.paramIsStaticArray(member)
isoptional = False
if self.paramIsOptional(member) or (name == 'pNext') or (isstaticarray):
isoptional = True
# Determine if value should be ignored by code generation.
noautovalidity = False
if (member.attrib.get('noautovalidity') is not None) or ((typeName in self.structMemberBlacklist) and (name in self.structMemberBlacklist[typeName])):
noautovalidity = True
# Some types are marked as noautovalidity, but stateless_validation.h will still want them for manual validation
noautovalidity_type_exceptions = [
"VkQueryPipelineStatisticFlags",
"VkBorderColor"
]
# Store all types that are from incoming calls if auto validity
# non-const pointers don't have auto gen code as used for return values
if (noautovalidity == False) or (type in noautovalidity_type_exceptions):
if not returned_only and (not ispointer or isconst):
self.called_types.add(type)
structextends = False
membersInfo.append(self.CommandParam(type=type, name=name,
ispointer=ispointer,
isstaticarray=isstaticarray,
isbool=True if type == 'VkBool32' else False,
israngedenum=True if type in self.enumRanges else False,
isconst=isconst,
isoptional=isoptional,
iscount=iscount,
noautovalidity=noautovalidity,
len=self.getLen(member),
extstructs=self.registry.validextensionstructs[typeName] if name == 'pNext' else None,
condition=conditions[name] if conditions and name in conditions else None,
cdecl=cdecl))
# If this struct extends another, keep its name in list for further processing
if typeinfo.elem.attrib.get('structextends') is not None:
self.structextends_list.append(typeName)
# Returnedonly structs should have most of their members ignored -- on entry, we only care about validating the sType and
# pNext members. Everything else will be overwritten by the callee.
if returned_only:
self.returnedonly_structs.append(typeName)
membersInfo = [m for m in membersInfo if m.name in ('sType', 'pNext')]
self.structMembers.append(self.StructMemberData(name=typeName, members=membersInfo))
#
# Capture group (e.g. C "enum" type) info to be used for param check code generation.
# These are concatenated together with other types.
def genGroup(self, groupinfo, groupName, alias):
if not self.source_file:
return
# record the name/alias pair
if alias is not None:
self.alias_dict[groupName]=alias
OutputGenerator.genGroup(self, groupinfo, groupName, alias)
groupElem = groupinfo.elem
# Store the sType values
if groupName == 'VkStructureType':
for elem in groupElem.findall('enum'):
self.stypes.append(elem.get('name'))
elif 'FlagBits' in groupName:
bits = []
for elem in groupElem.findall('enum'):
if elem.get('supported') != 'disabled':
bits.append(elem.get('name'))
if bits:
self.flagBits[groupName] = bits
else:
# Determine if begin/end ranges are needed (we don't do this for VkStructureType, which has a more finely grained check)
expandName = re.sub(r'([0-9a-z_])([A-Z0-9][^A-Z0-9]?)',r'\1_\2',groupName).upper()
expandPrefix = expandName
expandSuffix = ''
expandSuffixMatch = re.search(r'[A-Z][A-Z]+$',groupName)
if expandSuffixMatch:
expandSuffix = '_' + expandSuffixMatch.group()
# Strip off the suffix from the prefix
expandPrefix = expandName.rsplit(expandSuffix, 1)[0]
isEnum = ('FLAG_BITS' not in expandPrefix)
if isEnum:
self.enumRanges.add(groupName)
# Create definition for a list containing valid enum values for this enumerated type
if self.featureExtraProtect is not None:
enum_entry = '#ifdef %s\n' % self.featureExtraProtect
else:
enum_entry = ''
enum_entry += 'const std::vector<%s> All%sEnums = {' % (groupName, groupName)
for enum in groupElem:
name = enum.get('name')
if name is not None and enum.get('supported') != 'disabled':
enum_entry += '%s, ' % name
enum_entry += '};'
if self.featureExtraProtect is not None:
enum_entry += '\n#endif // %s' % self.featureExtraProtect
self.enum_values_definitions[groupName] = enum_entry
#
# Capture command parameter info to be used for param check code generation.
def genCmd(self, cmdinfo, name, alias):
# record the name/alias pair
if alias is not None:
self.alias_dict[name]=alias
OutputGenerator.genCmd(self, cmdinfo, name, alias)
decls = self.makeCDecls(cmdinfo.elem)
typedef = decls[1]
typedef = typedef.split(')',1)[1]
if self.header_file:
if name not in self.blacklist:
if (self.featureExtraProtect is not None):
self.declarations += [ '#ifdef %s' % self.featureExtraProtect ]
# Strip off 'vk' from API name
decl = '%s%s' % ('bool PreCallValidate', decls[0].split("VKAPI_CALL vk")[1])
decl_terminator = ' const override;'
if 'ValidationCache' in name:
decl_terminator = ' const;'
decl = str(decl).replace(';', decl_terminator)
self.declarations += [ decl ]
if (self.featureExtraProtect is not None):
self.declarations += [ '#endif' ]
if self.source_file:
if name not in self.blacklist:
params = cmdinfo.elem.findall('param')
# Get list of array lengths
lens = set()
for param in params:
len = self.getLen(param)
if len:
lens.add(len)
# Get param info
paramsInfo = []
for param in params:
paramInfo = self.getTypeNameTuple(param)
cdecl = self.makeCParamDecl(param, 0)
ispointer = self.paramIsPointer(param)
isconst = True if 'const' in cdecl else False
# non-const pointers don't have auto gen code as used for return values
if not ispointer or isconst:
self.called_types.add(paramInfo[0])
# Check for parameter name in lens set
iscount = False
if paramInfo[1] in lens:
iscount = True
paramsInfo.append(self.CommandParam(type=paramInfo[0], name=paramInfo[1],
ispointer=ispointer,
isstaticarray=self.paramIsStaticArray(param),
isbool=True if paramInfo[0] == 'VkBool32' else False,
israngedenum=True if paramInfo[0] in self.enumRanges else False,
isconst=isconst,
isoptional=self.paramIsOptional(param),
iscount=iscount,
noautovalidity=True if param.attrib.get('noautovalidity') is not None else False,
len=self.getLen(param),
extstructs=None,
condition=None,
cdecl=cdecl))
# Save return value information, if any
result_type = ''
promotion_info = ''
resultinfo = cmdinfo.elem.find('proto/type')
if (resultinfo is not None and resultinfo.text != 'void'):
result_type = resultinfo.text
if "VK_VERSION" in self.featureName and "VK_VERSION_1_0" != self.featureName:
if ('VkInstance' == paramsInfo[0].type or 'VkPhysicalDevice' == paramsInfo[0].type):
promotion_info = [paramsInfo[0].name, self.featureName]
self.commands.append(self.CommandData(name=name, params=paramsInfo, cdecl=self.makeCDecls(cmdinfo.elem)[0], extension_type=self.extension_type, result=result_type, promotion_info=promotion_info))
#
# Check if the parameter passed in is a pointer
def paramIsPointer(self, param):
ispointer = 0
paramtype = param.find('type')
if (paramtype.tail is not None) and ('*' in paramtype.tail):
ispointer = paramtype.tail.count('*')
elif paramtype.text[:4] == 'PFN_':
# Treat function pointer typedefs as a pointer to a single value
ispointer = 1
return ispointer
#
# Check if the parameter passed in is a static array
def paramIsStaticArray(self, param):
isstaticarray = 0
paramname = param.find('name')
if (paramname.tail is not None) and ('[' in paramname.tail):
isstaticarray = paramname.tail.count('[')
return isstaticarray
#
# Check if the parameter passed in is optional
# Returns a list of Boolean values for comma separated len attributes (len='false,true')
def paramIsOptional(self, param):
# See if the handle is optional
isoptional = False
# Simple, if it's optional, return true
optString = param.attrib.get('optional')
if optString:
if optString == 'true':
isoptional = True
elif ',' in optString:
opts = []
for opt in optString.split(','):
val = opt.strip()
if val == 'true':
opts.append(True)
elif val == 'false':
opts.append(False)
else:
print('Unrecognized len attribute value',val)
isoptional = opts
return isoptional
#
# Check if the handle passed in is optional
# Uses the same logic as ValidityOutputGenerator.isHandleOptional
def isHandleOptional(self, param, lenParam):
# Simple, if it's optional, return true
if param.isoptional:
return True
# If no validity is being generated, it usually means that validity is complex and not absolute, so let's say yes.
if param.noautovalidity:
return True
# If the parameter is an array and we haven't already returned, find out if any of the len parameters are optional
if lenParam and lenParam.isoptional:
return True
return False
#
# Retrieve the value of the len tag
def getLen(self, param):
result = None
# Default to altlen when available to avoid LaTeX markup
if 'altlen' in param.attrib:
len = param.attrib.get('altlen')
else:
len = param.attrib.get('len')
if len and len != 'null-terminated':
# Only first level is supported for multidimensional arrays. Conveniently, this also strips the trailing
# 'null-terminated' from arrays of strings
len = len.split(',')[0]
# Convert scope notation to pointer access
result = str(len).replace('::', '->')
elif self.paramIsStaticArray(param):
# For static arrays get length from inside []
array_match = re.search(r'\[(\d+)\]', param.find('name').tail)
if array_match:
result = array_match.group(1)
return result
#
# Retrieve the type and name for a parameter
def getTypeNameTuple(self, param):
type = ''
name = ''
for elem in param:
if elem.tag == 'type':
type = noneStr(elem.text)
elif elem.tag == 'name':
name = noneStr(elem.text)
return (type, name)
#
# Find a named parameter in a parameter list
def getParamByName(self, params, name):
for param in params:
if param.name == name:
return param
return None
#
# Get the length paramater record for the specified length expression
def getLenParam(self, params, length):
# First check if any element of params matches length exactly
lenParam = self.getParamByName(params, length)
if not lenParam:
# Otherwise, look for any elements of params that appear within length
len_candidates = [p for p in params if re.search(r'\b{}\b'.format(p.name), length)]
# 0 or 1 matches are expected, >1 would require a special case and/or explicit validation
if len(len_candidates) == 0:
lenParam = None
elif len(len_candidates) == 1:
lenParam = len_candidates[0]
else:
raise Exception('Cannot determine length parameter for len attribute value {}'.format(length))
return lenParam
#
# Convert a vulkan.h command declaration into a parameter_validation.h definition
def getCmdDef(self, cmd):
# Strip the trailing ';' and split into individual lines
lines = cmd.cdecl[:-1].split('\n')
cmd_hdr = '\n'.join(lines)
return cmd_hdr
#
# Generate the code to check for a NULL dereference before calling the
# validation function
def genCheckedLengthCall(self, name, exprs):
count = name.count('->')
if count:
checkedExpr = []
localIndent = ''
elements = name.split('->')
# Open the if expression blocks
for i in range(0, count):
checkedExpr.append(localIndent + 'if ({} != NULL) {{\n'.format('->'.join(elements[0:i+1])))
localIndent = self.incIndent(localIndent)
# Add the validation expression
for expr in exprs:
checkedExpr.append(localIndent + expr)
# Close the if blocks
for i in range(0, count):
localIndent = self.decIndent(localIndent)
checkedExpr.append(localIndent + '}\n')
return [checkedExpr]
# No if statements were required
return exprs
#
# Generate code to check for a specific condition before executing validation code
def genConditionalCall(self, prefix, condition, exprs):
checkedExpr = []
localIndent = ''
formattedCondition = condition.format(prefix)
checkedExpr.append(localIndent + 'if ({})\n'.format(formattedCondition))
checkedExpr.append(localIndent + '{\n')
localIndent = self.incIndent(localIndent)
for expr in exprs:
checkedExpr.append(localIndent + expr)
localIndent = self.decIndent(localIndent)
checkedExpr.append(localIndent + '}\n')
return [checkedExpr]
#
# Get VUID identifier from implicit VUID tag
def GetVuid(self, name, suffix):
vuid_string = 'VUID-%s-%s' % (name, suffix)
vuid = "kVUIDUndefined"
if '->' in vuid_string:
return vuid
if vuid_string in self.valid_vuids:
vuid = "\"%s\"" % vuid_string
else:
if name in self.alias_dict:
alias_string = 'VUID-%s-%s' % (self.alias_dict[name], suffix)
if alias_string in self.valid_vuids:
vuid = "\"%s\"" % alias_string
return vuid
#
# Generate the sType check string
def makeStructTypeCheck(self, prefix, value, lenValue, valueRequired, lenValueRequired, lenPtrRequired, funcPrintName, lenPrintName, valuePrintName, postProcSpec, struct_type_name):
checkExpr = []
stype = self.structTypes[value.type]
vuid_name = struct_type_name if struct_type_name is not None else funcPrintName
stype_vuid = self.GetVuid(value.type, "sType-sType")
param_vuid = self.GetVuid(vuid_name, "%s-parameter" % value.name)
if lenValue:
count_required_vuid = self.GetVuid(vuid_name, "%s-arraylength" % value.len)
# This is an array of struct pointers
if value.ispointer == 2:
checkExpr.append('skip |= validate_struct_pointer_type_array("{}", {ppp}"{ldn}"{pps}, {ppp}"{dn}"{pps}, "{sv}", {pf}{ln}, {pf}{vn}, {sv}, {}, {}, {}, {}, {});\n'.format(
funcPrintName, lenValueRequired, valueRequired, stype_vuid, param_vuid, count_required_vuid, ln=lenValue.name, ldn=lenPrintName, dn=valuePrintName, vn=value.name, sv=stype, pf=prefix, **postProcSpec))
# This is an array with a pointer to a count value
elif lenValue.ispointer:
# When the length parameter is a pointer, there is an extra Boolean parameter in the function call to indicate if it is required
checkExpr.append('skip |= validate_struct_type_array("{}", {ppp}"{ldn}"{pps}, {ppp}"{dn}"{pps}, "{sv}", {pf}{ln}, {pf}{vn}, {sv}, {}, {}, {}, {}, {}, {});\n'.format(
funcPrintName, lenPtrRequired, lenValueRequired, valueRequired, stype_vuid, param_vuid, count_required_vuid, ln=value.len, ldn=lenPrintName, dn=valuePrintName, vn=value.name, sv=stype, pf=prefix, **postProcSpec))
# This is an array with an integer count value
else:
checkExpr.append('skip |= validate_struct_type_array("{}", {ppp}"{ldn}"{pps}, {ppp}"{dn}"{pps}, "{sv}", {pf}{ln}, {pf}{vn}, {sv}, {}, {}, {}, {}, {});\n'.format(
funcPrintName, lenValueRequired, valueRequired, stype_vuid, param_vuid, count_required_vuid, ln=value.len, ldn=lenPrintName, dn=valuePrintName, vn=value.name, sv=stype, pf=prefix, **postProcSpec))
# This is an individual struct
else:
checkExpr.append('skip |= validate_struct_type("{}", {ppp}"{}"{pps}, "{sv}", {}{vn}, {sv}, {}, {}, {});\n'.format(
funcPrintName, valuePrintName, prefix, valueRequired, param_vuid, stype_vuid, vn=value.name, sv=stype, vt=value.type, **postProcSpec))
return checkExpr
#
# Generate the handle check string
def makeHandleCheck(self, prefix, value, lenValue, valueRequired, lenValueRequired, funcPrintName, lenPrintName, valuePrintName, postProcSpec):
checkExpr = []
if lenValue:
if lenValue.ispointer:
# This is assumed to be an output array with a pointer to a count value
raise('Unsupported parameter validation case: Output handle array elements are not NULL checked')
else:
count_required_vuid = self.GetVuid(funcPrintName, "%s-arraylength" % (value.len))
# This is an array with an integer count value
checkExpr.append('skip |= validate_handle_array("{}", {ppp}"{ldn}"{pps}, {ppp}"{dn}"{pps}, {pf}{ln}, {pf}{vn}, {}, {}, {});\n'.format(
funcPrintName, lenValueRequired, valueRequired, count_required_vuid, ln=value.len, ldn=lenPrintName, dn=valuePrintName, vn=value.name, pf=prefix, **postProcSpec))
else:
# This is assumed to be an output handle pointer
raise('Unsupported parameter validation case: Output handles are not NULL checked')
return checkExpr
#
# Generate check string for an array of VkFlags values
def makeFlagsArrayCheck(self, prefix, value, lenValue, valueRequired, lenValueRequired, funcPrintName, lenPrintName, valuePrintName, postProcSpec):
checkExpr = []
flagBitsName = value.type.replace('Flags', 'FlagBits')
if not flagBitsName in self.flagBits:
raise('Unsupported parameter validation case: array of reserved VkFlags')
else:
allFlags = 'All' + flagBitsName
checkExpr.append('skip |= validate_flags_array("{}", {ppp}"{}"{pps}, {ppp}"{}"{pps}, "{}", {}, {pf}{}, {pf}{}, {}, {});\n'.format(funcPrintName, lenPrintName, valuePrintName, flagBitsName, allFlags, value.len, value.name, lenValueRequired, valueRequired, pf=prefix, **postProcSpec))
return checkExpr
#
# Generate pNext check string
def makeStructNextCheck(self, prefix, value, funcPrintName, valuePrintName, postProcSpec, struct_type_name):
checkExpr = []
# Generate an array of acceptable VkStructureType values for pNext
extStructCount = 0
extStructVar = 'NULL'
extStructNames = 'NULL'
pNextVuid = self.GetVuid(struct_type_name, "pNext-pNext")
sTypeVuid = self.GetVuid(struct_type_name, "sType-unique")
if value.extstructs:
extStructVar = 'allowed_structs_{}'.format(struct_type_name)
extStructCount = 'ARRAY_SIZE({})'.format(extStructVar)
extStructNames = '"' + ', '.join(value.extstructs) + '"'
checkExpr.append('const VkStructureType {}[] = {{ {} }};\n'.format(extStructVar, ', '.join([self.structTypes[s] for s in value.extstructs])))
checkExpr.append('skip |= validate_struct_pnext("{}", {ppp}"{}"{pps}, {}, {}{}, {}, {}, GeneratedVulkanHeaderVersion, {}, {});\n'.format(
funcPrintName, valuePrintName, extStructNames, prefix, value.name, extStructCount, extStructVar, pNextVuid, sTypeVuid, **postProcSpec))
return checkExpr
#
# Generate the pointer check string
def makePointerCheck(self, prefix, value, lenValue, valueRequired, lenValueRequired, lenPtrRequired, funcPrintName, lenPrintName, valuePrintName, postProcSpec, struct_type_name):
checkExpr = []
vuid_tag_name = struct_type_name if struct_type_name is not None else funcPrintName
if lenValue:
length_deref = '->' in value.len
count_required_vuid = self.GetVuid(vuid_tag_name, "%s-arraylength" % (value.len))
array_required_vuid = self.GetVuid(vuid_tag_name, "%s-parameter" % (value.name))
# TODO: Remove workaround for missing optional tag in vk.xml
if array_required_vuid == '"VUID-VkFramebufferCreateInfo-pAttachments-parameter"':
return []
# This is an array with a pointer to a count value
if lenValue.ispointer and not length_deref:
# If count and array parameters are optional, there will be no validation
if valueRequired == 'true' or lenPtrRequired == 'true' or lenValueRequired == 'true':
# When the length parameter is a pointer, there is an extra Boolean parameter in the function call to indicate if it is required
checkExpr.append('skip |= validate_array("{}", {ppp}"{ldn}"{pps}, {ppp}"{dn}"{pps}, {pf}{ln}, &{pf}{vn}, {}, {}, {}, {}, {});\n'.format(
funcPrintName, lenPtrRequired, lenValueRequired, valueRequired, count_required_vuid, array_required_vuid, ln=value.len, ldn=lenPrintName, dn=valuePrintName, vn=value.name, pf=prefix, **postProcSpec))
# This is an array with an integer count value
else:
# If count and array parameters are optional, there will be no validation
if valueRequired == 'true' or lenValueRequired == 'true':
if value.type != 'char':
# A valid VU can't use '->' in the middle so the generated VUID from the spec uses '::' instead
count_required_vuid = self.GetVuid(vuid_tag_name, "%s-arraylength" % (value.len.replace('->', '::')))
checkExpr.append('skip |= validate_array("{}", {ppp}"{ldn}"{pps}, {ppp}"{dn}"{pps}, {pf}{ln}, &{pf}{vn}, {}, {}, {}, {});\n'.format(
funcPrintName, lenValueRequired, valueRequired, count_required_vuid, array_required_vuid, ln=value.len, ldn=lenPrintName, dn=valuePrintName, vn=value.name, pf=prefix, **postProcSpec))
else:
# Arrays of strings receive special processing
checkExpr.append('skip |= validate_string_array("{}", {ppp}"{ldn}"{pps}, {ppp}"{dn}"{pps}, {pf}{ln}, {pf}{vn}, {}, {}, {}, {});\n'.format(
funcPrintName, lenValueRequired, valueRequired, count_required_vuid, array_required_vuid, ln=value.len, ldn=lenPrintName, dn=valuePrintName, vn=value.name, pf=prefix, **postProcSpec))
if checkExpr:
if lenValue and length_deref:
# Add checks to ensure the validation call does not dereference a NULL pointer to obtain the count
checkExpr = self.genCheckedLengthCall(value.len, checkExpr)
# This is an individual struct that is not allowed to be NULL
elif not value.isoptional:
# Function pointers need a reinterpret_cast to void*
ptr_required_vuid = self.GetVuid(vuid_tag_name, "%s-parameter" % (value.name))
if value.type[:4] == 'PFN_':
allocator_dict = {'pfnAllocation': '"VUID-VkAllocationCallbacks-pfnAllocation-00632"',
'pfnReallocation': '"VUID-VkAllocationCallbacks-pfnReallocation-00633"',
'pfnFree': '"VUID-VkAllocationCallbacks-pfnFree-00634"',
}
vuid = allocator_dict.get(value.name)
if vuid is not None:
ptr_required_vuid = vuid
checkExpr.append('skip |= validate_required_pointer("{}", {ppp}"{}"{pps}, reinterpret_cast<const void*>({}{}), {});\n'.format(funcPrintName, valuePrintName, prefix, value.name, ptr_required_vuid, **postProcSpec))
else:
checkExpr.append('skip |= validate_required_pointer("{}", {ppp}"{}"{pps}, {}{}, {});\n'.format(funcPrintName, valuePrintName, prefix, value.name, ptr_required_vuid, **postProcSpec))
else:
# Special case for optional internal allocation function pointers.
if (value.type, value.name) == ('PFN_vkInternalAllocationNotification', 'pfnInternalAllocation'):
checkExpr.extend(self.internalAllocationCheck(funcPrintName, prefix, value.name, 'pfnInternalFree', postProcSpec))
elif (value.type, value.name) == ('PFN_vkInternalFreeNotification', 'pfnInternalFree'):
checkExpr.extend(self.internalAllocationCheck(funcPrintName, prefix, value.name, 'pfnInternalAllocation', postProcSpec))
return checkExpr
#
# Generate internal allocation function pointer check.
def internalAllocationCheck(self, funcPrintName, prefix, name, complementaryName, postProcSpec):
checkExpr = []
vuid = '"VUID-VkAllocationCallbacks-pfnInternalAllocation-00635"'
checkExpr.append('if ({}{} != NULL)'.format(prefix, name))
checkExpr.append('{')
local_indent = self.incIndent('')
# Function pointers need a reinterpret_cast to void*
checkExpr.append(local_indent + 'skip |= validate_required_pointer("{}", {ppp}"{}{}"{pps}, reinterpret_cast<const void*>({}{}), {});\n'.format(funcPrintName, prefix, complementaryName, prefix, complementaryName, vuid, **postProcSpec))
checkExpr.append('}\n')
return checkExpr
#
# Process struct member validation code, performing name substitution if required
def processStructMemberCode(self, line, funcName, memberNamePrefix, memberDisplayNamePrefix, postProcSpec):
# Build format specifier list
kwargs = {}
if '{postProcPrefix}' in line:
# If we have a tuple that includes a format string and format parameters, need to use ParameterName class
if type(memberDisplayNamePrefix) is tuple:
kwargs['postProcPrefix'] = 'ParameterName('
else:
kwargs['postProcPrefix'] = postProcSpec['ppp']
if '{postProcSuffix}' in line:
# If we have a tuple that includes a format string and format parameters, need to use ParameterName class
if type(memberDisplayNamePrefix) is tuple:
kwargs['postProcSuffix'] = ', ParameterName::IndexVector{{ {}{} }})'.format(postProcSpec['ppi'], memberDisplayNamePrefix[1])
else:
kwargs['postProcSuffix'] = postProcSpec['pps']
if '{postProcInsert}' in line:
# If we have a tuple that includes a format string and format parameters, need to use ParameterName class
if type(memberDisplayNamePrefix) is tuple:
kwargs['postProcInsert'] = '{}{}, '.format(postProcSpec['ppi'], memberDisplayNamePrefix[1])
else:
kwargs['postProcInsert'] = postProcSpec['ppi']
if '{funcName}' in line:
kwargs['funcName'] = funcName
if '{valuePrefix}' in line:
kwargs['valuePrefix'] = memberNamePrefix
if '{displayNamePrefix}' in line:
# Check for a tuple that includes a format string and format parameters to be used with the ParameterName class
if type(memberDisplayNamePrefix) is tuple:
kwargs['displayNamePrefix'] = memberDisplayNamePrefix[0]
else:
kwargs['displayNamePrefix'] = memberDisplayNamePrefix
if kwargs:
# Need to escape the C++ curly braces
if 'IndexVector' in line:
line = line.replace('IndexVector{ ', 'IndexVector{{ ')
line = line.replace(' }),', ' }}),')
return line.format(**kwargs)
return line
#
# Process struct member validation code, stripping metadata
def ScrubStructCode(self, code):
scrubbed_lines = ''
for line in code:
if 'validate_struct_pnext' in line:
continue
if 'allowed_structs' in line:
continue
if 'xml-driven validation' in line:
continue
line = line.replace('{postProcPrefix}', '')
line = line.replace('{postProcSuffix}', '')
line = line.replace('{postProcInsert}', '')
line = line.replace('{funcName}', '')
line = line.replace('{valuePrefix}', '')
line = line.replace('{displayNamePrefix}', '')
line = line.replace('{IndexVector}', '')
line = line.replace('local_data->', '')
scrubbed_lines += line
return scrubbed_lines
#
# Process struct validation code for inclusion in function or parent struct validation code
def expandStructCode(self, item_type, funcName, memberNamePrefix, memberDisplayNamePrefix, indent, output, postProcSpec):
lines = self.validatedStructs[item_type]
for line in lines:
if output:
output[-1] += '\n'
if type(line) is list:
for sub in line:
output.append(self.processStructMemberCode(indent + sub, funcName, memberNamePrefix, memberDisplayNamePrefix, postProcSpec))
else:
output.append(self.processStructMemberCode(indent + line, funcName, memberNamePrefix, memberDisplayNamePrefix, postProcSpec))
return output
#
# Process struct pointer/array validation code, performing name substitution if required
def expandStructPointerCode(self, prefix, value, lenValue, funcName, valueDisplayName, postProcSpec):
expr = []
expr.append('if ({}{} != NULL)\n'.format(prefix, value.name))
expr.append('{')
indent = self.incIndent(None)
if lenValue:
# Need to process all elements in the array
indexName = value.len.replace('Count', 'Index')
expr[-1] += '\n'
if lenValue.ispointer:
# If the length value is a pointer, de-reference it for the count.
expr.append(indent + 'for (uint32_t {iname} = 0; {iname} < *{}{}; ++{iname})\n'.format(prefix, value.len, iname=indexName))
else:
expr.append(indent + 'for (uint32_t {iname} = 0; {iname} < {}{}; ++{iname})\n'.format(prefix, value.len, iname=indexName))
expr.append(indent + '{')
indent = self.incIndent(indent)
# Prefix for value name to display in error message
if value.ispointer == 2:
memberNamePrefix = '{}{}[{}]->'.format(prefix, value.name, indexName)
memberDisplayNamePrefix = ('{}[%i]->'.format(valueDisplayName), indexName)
else:
memberNamePrefix = '{}{}[{}].'.format(prefix, value.name, indexName)
memberDisplayNamePrefix = ('{}[%i].'.format(valueDisplayName), indexName)
else:
memberNamePrefix = '{}{}->'.format(prefix, value.name)
memberDisplayNamePrefix = '{}->'.format(valueDisplayName)
# Expand the struct validation lines
expr = self.expandStructCode(value.type, funcName, memberNamePrefix, memberDisplayNamePrefix, indent, expr, postProcSpec)
if lenValue:
# Close if and for scopes
indent = self.decIndent(indent)
expr.append(indent + '}\n')
expr.append('}\n')
return expr
#
# Generate the parameter checking code
def genFuncBody(self, funcName, values, valuePrefix, displayNamePrefix, structTypeName):
lines = [] # Generated lines of code
unused = [] # Unused variable names
duplicateCountVuid = [] # prevent duplicate VUs being generated
for value in values:
usedLines = []
lenParam = None
#
# Prefix and suffix for post processing of parameter names for struct members. Arrays of structures need special processing to include the array index in the full parameter name.
postProcSpec = {}
postProcSpec['ppp'] = '' if not structTypeName else '{postProcPrefix}'
postProcSpec['pps'] = '' if not structTypeName else '{postProcSuffix}'
postProcSpec['ppi'] = '' if not structTypeName else '{postProcInsert}'
#
# Generate the full name of the value, which will be printed in the error message, by adding the variable prefix to the value name
valueDisplayName = '{}{}'.format(displayNamePrefix, value.name)
#
# Check for NULL pointers, ignore the in-out count parameters that
# will be validated with their associated array
if (value.ispointer or value.isstaticarray) and not value.iscount:
# Parameters for function argument generation
req = 'true' # Parameter cannot be NULL
cpReq = 'true' # Count pointer cannot be NULL
cvReq = 'true' # Count value cannot be 0
lenDisplayName = None # Name of length parameter to print with validation messages; parameter name with prefix applied
countRequiredVuid = None # If there is a count required VUID to check
# Generate required/optional parameter strings for the pointer and count values
if value.isoptional:
req = 'false'
if value.len:
# The parameter is an array with an explicit count parameter
lenParam = self.getLenParam(values, value.len)
if lenParam:
lenDisplayName = value.len.replace(lenParam.name, displayNamePrefix + lenParam.name)
if lenParam.ispointer:
# Count parameters that are pointers are inout
if type(lenParam.isoptional) is list:
if lenParam.isoptional[0]:
cpReq = 'false'
if lenParam.isoptional[1]:
cvReq = 'false'
else:
if lenParam.isoptional:
cpReq = 'false'
else:
if lenParam.isoptional:
cvReq = 'false'
elif value.noautovalidity:
# Handle edge case where XML expresses a non-optional non-pointer value length with noautovalidity
# ex: <param noautovalidity="true"len="commandBufferCount">
vuidNameTag = structTypeName if structTypeName is not None else funcName
countRequiredVuid = self.GetVuid(vuidNameTag, "%s-arraylength" % (lenParam.name))
if countRequiredVuid in duplicateCountVuid:
countRequiredVuid = None
else:
duplicateCountVuid.append(countRequiredVuid)
else:
# Do not generate length checks for constant sized arrays
cpReq = 'false'
cvReq = 'false'
#
# The parameter will not be processed when tagged as 'noautovalidity'
# For the pointer to struct case, the struct pointer will not be validated, but any
# members not tagged as 'noautovalidity' will be validated
# We special-case the custom allocator checks, as they are explicit but can be auto-generated.
AllocatorFunctions = ['PFN_vkAllocationFunction', 'PFN_vkReallocationFunction', 'PFN_vkFreeFunction', 'PFN_vkInternalAllocationNotification', 'PFN_vkInternalFreeNotification']
if value.noautovalidity and value.type not in AllocatorFunctions and not countRequiredVuid:
# Log a diagnostic message when validation cannot be automatically generated and must be implemented manually
self.logMsg('diag', 'ParameterValidation: No validation for {} {}'.format(structTypeName if structTypeName else funcName, value.name))
elif countRequiredVuid:
usedLines.append('skip |= validate_array("{}", {ppp}"{ldn}"{pps}, "", {pf}{ln}, &{pf}{vn}, true, false, {}, kVUIDUndefined);\n'.format(
funcName, countRequiredVuid, pf=valuePrefix, ldn=lenDisplayName, ln=value.len, vn=value.name, **postProcSpec))
else:
if value.type in self.structTypes:
# If this is a pointer to a struct with an sType field, verify the type
usedLines += self.makeStructTypeCheck(valuePrefix, value, lenParam, req, cvReq, cpReq, funcName, lenDisplayName, valueDisplayName, postProcSpec, structTypeName)
# If this is an input handle array that is not allowed to contain NULL handles, verify that none of the handles are VK_NULL_HANDLE
elif value.type in self.handleTypes and value.isconst and not self.isHandleOptional(value, lenParam):
usedLines += self.makeHandleCheck(valuePrefix, value, lenParam, req, cvReq, funcName, lenDisplayName, valueDisplayName, postProcSpec)
elif value.type in self.flags and value.isconst:
usedLines += self.makeFlagsArrayCheck(valuePrefix, value, lenParam, req, cvReq, funcName, lenDisplayName, valueDisplayName, postProcSpec)
elif value.isbool and value.isconst:
usedLines.append('skip |= validate_bool32_array("{}", {ppp}"{}"{pps}, {ppp}"{}"{pps}, {pf}{}, {pf}{}, {}, {});\n'.format(funcName, lenDisplayName, valueDisplayName, value.len, value.name, cvReq, req, pf=valuePrefix, **postProcSpec))
elif value.israngedenum and value.isconst:
enum_value_list = 'All%sEnums' % value.type
usedLines.append('skip |= validate_ranged_enum_array("{}", {ppp}"{}"{pps}, {ppp}"{}"{pps}, "{}", {}, {pf}{}, {pf}{}, {}, {});\n'.format(funcName, lenDisplayName, valueDisplayName, value.type, enum_value_list, value.len, value.name, cvReq, req, pf=valuePrefix, **postProcSpec))
elif value.name == 'pNext':
usedLines += self.makeStructNextCheck(valuePrefix, value, funcName, valueDisplayName, postProcSpec, structTypeName)
else:
usedLines += self.makePointerCheck(valuePrefix, value, lenParam, req, cvReq, cpReq, funcName, lenDisplayName, valueDisplayName, postProcSpec, structTypeName)
# If this is a pointer to a struct (input), see if it contains members that need to be checked
if value.type in self.validatedStructs:
if value.isconst: # or value.type in self.returnedonly_structs:
usedLines.append(self.expandStructPointerCode(valuePrefix, value, lenParam, funcName, valueDisplayName, postProcSpec))
elif value.type in self.returnedonly_structs:
usedLines.append(self.expandStructPointerCode(valuePrefix, value, lenParam, funcName, valueDisplayName, postProcSpec))
# Non-pointer types
else:
# The parameter will not be processes when tagged as 'noautovalidity'
# For the struct case, the struct type will not be validated, but any
# members not tagged as 'noautovalidity' will be validated
if value.noautovalidity:
# Log a diagnostic message when validation cannot be automatically generated and must be implemented manually
self.logMsg('diag', 'ParameterValidation: No validation for {} {}'.format(structTypeName if structTypeName else funcName, value.name))
else:
vuid_name_tag = structTypeName if structTypeName is not None else funcName
if value.type in self.structTypes:
stype = self.structTypes[value.type]
vuid = self.GetVuid(value.type, "sType-sType")
undefined_vuid = '"kVUIDUndefined"'
usedLines.append('skip |= validate_struct_type("{}", {ppp}"{}"{pps}, "{sv}", &({}{vn}), {sv}, false, kVUIDUndefined, {});\n'.format(
funcName, valueDisplayName, valuePrefix, vuid, vn=value.name, sv=stype, vt=value.type, **postProcSpec))
elif value.type in self.handleTypes:
if not self.isHandleOptional(value, None):
usedLines.append('skip |= validate_required_handle("{}", {ppp}"{}"{pps}, {}{});\n'.format(funcName, valueDisplayName, valuePrefix, value.name, **postProcSpec))
elif value.type in self.flags and value.type.replace('Flags', 'FlagBits') not in self.flagBits:
vuid = self.GetVuid(vuid_name_tag, "%s-zerobitmask" % (value.name))
usedLines.append('skip |= validate_reserved_flags("{}", {ppp}"{}"{pps}, {pf}{}, {});\n'.format(funcName, valueDisplayName, value.name, vuid, pf=valuePrefix, **postProcSpec))
elif value.type in self.flags or value.type in self.flagBits:
if value.type in self.flags:
flagBitsName = value.type.replace('Flags', 'FlagBits')
flagsType = 'kOptionalFlags' if value.isoptional else 'kRequiredFlags'
invalidVuid = self.GetVuid(vuid_name_tag, "%s-parameter" % (value.name))
zeroVuid = self.GetVuid(vuid_name_tag, "%s-requiredbitmask" % (value.name))
elif value.type in self.flagBits:
flagBitsName = value.type
flagsType = 'kOptionalSingleBit' if value.isoptional else 'kRequiredSingleBit'
invalidVuid = self.GetVuid(vuid_name_tag, "%s-parameter" % (value.name))
zeroVuid = invalidVuid
allFlagsName = 'All' + flagBitsName
invalid_vuid = self.GetVuid(vuid_name_tag, "%s-parameter" % (value.name))
allFlagsName = 'All' + flagBitsName
zeroVuidArg = '' if value.isoptional else ', ' + zeroVuid
usedLines.append('skip |= validate_flags("{}", {ppp}"{}"{pps}, "{}", {}, {pf}{}, {}, {}{});\n'.format(funcName, valueDisplayName, flagBitsName, allFlagsName, value.name, flagsType, invalidVuid, zeroVuidArg, pf=valuePrefix, **postProcSpec))
elif value.isbool:
usedLines.append('skip |= validate_bool32("{}", {ppp}"{}"{pps}, {}{});\n'.format(funcName, valueDisplayName, valuePrefix, value.name, **postProcSpec))
elif value.israngedenum:
vuid = self.GetVuid(vuid_name_tag, "%s-parameter" % (value.name))
enum_value_list = 'All%sEnums' % value.type
usedLines.append('skip |= validate_ranged_enum("{}", {ppp}"{}"{pps}, "{}", {}, {}{}, {});\n'.format(funcName, valueDisplayName, value.type, enum_value_list, valuePrefix, value.name, vuid, **postProcSpec))
# If this is a struct, see if it contains members that need to be checked
if value.type in self.validatedStructs:
memberNamePrefix = '{}{}.'.format(valuePrefix, value.name)
memberDisplayNamePrefix = '{}.'.format(valueDisplayName)
usedLines.append(self.expandStructCode(value.type, funcName, memberNamePrefix, memberDisplayNamePrefix, '', [], postProcSpec))
# Append the parameter check to the function body for the current command
if usedLines:
# Apply special conditional checks
if value.condition:
usedLines = self.genConditionalCall(valuePrefix, value.condition, usedLines)
lines += usedLines
elif not value.iscount:
# If no expression was generated for this value, it is unreferenced by the validation function, unless
# it is an array count, which is indirectly referenced for array valiadation.
unused.append(value.name)
if not lines:
lines.append('// No xml-driven validation\n')
return lines, unused
#
# Generate the struct member check code from the captured data
def processStructMemberData(self):
indent = self.incIndent(None)
for struct in self.structMembers:
#
# The string returned by genFuncBody will be nested in an if check for a NULL pointer, so needs its indent incremented
lines, unused = self.genFuncBody('{funcName}', struct.members, '{valuePrefix}', '{displayNamePrefix}', struct.name)
if lines:
self.validatedStructs[struct.name] = lines
#
# Generate the command param check code from the captured data
def processCmdData(self):
indent = self.incIndent(None)
for command in self.commands:
# Skip first parameter if it is a dispatch handle (everything except vkCreateInstance)
startIndex = 0 if command.name == 'vkCreateInstance' else 1
lines, unused = self.genFuncBody(command.name, command.params[startIndex:], '', '', None)
# Cannot validate extension dependencies for device extension APIs having a physical device as their dispatchable object
if (command.name in self.required_extensions) and (self.extension_type != 'device' or command.params[0].type != 'VkPhysicalDevice'):
ext_test = ''
if command.params[0].type in ["VkInstance", "VkPhysicalDevice"] or command.name == 'vkCreateInstance':
ext_table_type = 'instance'
else:
ext_table_type = 'device'
for ext in self.required_extensions[command.name]:
ext_name_define = ''
ext_enable_name = ''
for extension in self.registry.extensions:
if extension.attrib['name'] == ext:
ext_name_define = extension[0][1].get('name')
ext_enable_name = ext_name_define.lower()
ext_enable_name = re.sub('_extension_name', '', ext_enable_name)
break
ext_test = 'if (!%s_extensions.%s) skip |= OutputExtensionError("%s", %s);\n' % (ext_table_type, ext_enable_name, command.name, ext_name_define)
lines.insert(0, ext_test)
if lines:
func_sig = self.getCmdDef(command) + ' const {\n'
func_sig = func_sig.split('VKAPI_CALL vk')[1]
cmdDef = 'bool StatelessValidation::PreCallValidate' + func_sig
cmdDef += '%sbool skip = false;\n' % indent
if isinstance(command.promotion_info, list):
version_flag = command.promotion_info[1]
version_id = version_flag.replace('VK_VERSION', 'VK_API_VERSION')
cmdDef += '%s if (CheckPromotedApiAgainstVulkanVersion(%s, "%s", %s)) return true;\n' % (indent, command.promotion_info[0], command.name, version_id)
for line in lines:
if type(line) is list:
for sub in line:
cmdDef += indent + sub
else:
cmdDef += indent + line
# Insert call to custom-written function if present
if command.name in self.functions_with_manual_checks:
# Generate parameter list for manual fcn and down-chain calls
params_text = ''
for param in command.params:
params_text += '%s, ' % param.name
params_text = params_text[:-2] + ');\n'
cmdDef += ' if (!skip) skip |= manual_PreCallValidate'+ command.name[2:] + '(' + params_text
cmdDef += '%sreturn skip;\n' % indent
cmdDef += '}\n'
self.validation.append(cmdDef)
|
py | 1a3a1f6abeae1a833d6b66f9878535e57fe668d6 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# from trainer import Trainer
import pyximport
pyximport.install()
from cython_train.trainer_cython import Trainer
from ssd_v2 import SSD300v2
import keras
import argparse
def main():
parser = argparse.ArgumentParser(description="Training ssd model with keras")
parser.add_argument("-c", "--class_number", metavar="class_number",
type=int, default=21,
dest="class_number", help="set the classify number ")
parser.add_argument("-b", "--prior_boxes_ssd300", metavar="prior_boxes_ssd300",
type=str, default='prior_boxes_ssd300.pkl',
dest="prior_boxes_ssd300", help="set the prior boxes file")
parser.add_argument("-t", "--train_file", metavar="train_file",
type=str, default='VOC2007.pkl',
dest="train_file", help="set the train file")
parser.add_argument("-p", "--path_prefix", metavar="path_prefix",
type=str, default='./VOCdevkit/VOC2007/JPEGImages/',
dest="path_prefix", help="set the path prefix")
parser.add_argument("-w", "--weight_file", metavar="weight_file",
type=str, default='weights_SSD300.hdf5',
dest="weight_file", help="set the weight file")
parser.add_argument("-s", "--save_weight_file", metavar="save_weight_file",
type=str,
default='./resource/checkpoints/weights.{epoch:02d}-{val_loss:.2f}.hdf5',
dest="save_weight_file", help="set the save weight file")
parser.add_argument("-n", "--nb_epoch", metavar="nb_epoch",
type=int,
default=100,
dest="nb_epoch", help="set the number of epoch")
args = parser.parse_args()
input_shape = (300, 300, 3)
model = SSD300v2(input_shape, num_classes=args.class_number)
base_lr=3e-4
trainer = Trainer(class_number=args.class_number,
input_shape=input_shape,
priors_file=args.prior_boxes_ssd300,
train_file=args.train_file,
path_prefix=args.path_prefix,
model=model,
weight_file=args.weight_file,
freeze=('input_1', 'conv1_1', 'conv1_2', 'pool1',
'conv2_1', 'conv2_2', 'pool2',
'conv3_1', 'conv3_2', 'conv3_3', 'pool3'),
save_weight_file=args.save_weight_file,
optim=keras.optimizers.Adam(lr=base_lr),
)
trainer.train(nb_epoch=args.nb_epoch)
if __name__ == "__main__":
main()
|
py | 1a3a1ffdaa1fbfb212713809bb2cd885524e5918 | #!/usr/bin/python
import plistlib
import os
import subprocess
import sys
import json
import CoreFoundation
sys.path.insert(0, '/usr/local/munki')
from munkilib import FoundationPlist
DEBUG = False
# Don't skip manual check
if len(sys.argv) > 1:
if sys.argv[1] == 'debug':
print '**** DEBUGGING ENABLED ****'
DEBUG = True
import pprint
PP = pprint.PrettyPrinter(indent=4)
microsoft_office_config = {}
# Apps to check
apps=['Microsoft Word','Microsoft Excel','Microsoft Outlook','Microsoft PowerPoint','Microsoft OneNote','Microsoft Teams','OneDrive']
for app in apps:
app_path = '/Applications/' + app + '.app/'
if os.path.isdir(app_path):
microsoft_office_config[app] = {}
if os.path.exists(app_path + '/Contents/_MASReceipt'):
microsoft_office_config[app]["MAS"] = "True"
else:
microsoft_office_config[app]["MAS"] = "False"
pl = FoundationPlist.readPlist(app_path + '/Contents/Info.plist')
app_version = pl["CFBundleVersion"]
microsoft_office_config[app]["Version"] = app_version
# Check for Licensing Helper file
Licensing_Helper = os.path.isfile("/Library/PrivilegedHelperTools/com.microsoft.office.licensingV2.helper")
if Licensing_Helper:
microsoft_office_config['Licensing_Helper'] = 'Detected'
else:
microsoft_office_config['Licensing_Helper'] = 'Not Detected'
# Check for Retail VL License
Retail_VL_License = os.path.isfile("/Library/Preferences/com.microsoft.office.licensingV2.plist")
if Retail_VL_License:
microsoft_office_config['Retail_VL_License'] = 'Detected'
else:
microsoft_office_config['Retail_VL_License'] = 'Not Detected'
# Check versions for each app
# Check MAS for each app
# Get all users' home folders
cmd = ['dscl', '.', '-readall', '/Users', 'NFSHomeDirectory']
proc = subprocess.Popen(cmd, shell=False, bufsize=-1,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(output, unused_error) = proc.communicate()
# Check each home folder for MAU Channel
for user in output.split('\n'):
if 'NFSHomeDirectory' in user and '/var/empty' not in user:
user_name = user.replace("NFSHomeDirectory: /Users/", "")
userpath = user.replace("NFSHomeDirectory: ", "")
# Check each home folder for MAU Version
autoupdate_pref = userpath + '/Library/Preferences/com.microsoft.autoupdate2.plist'
if os.path.isfile(autoupdate_pref):
pl = FoundationPlist.readPlist(autoupdate_pref)
microsoft_office_config["Users"] = {}
microsoft_office_config["Users"][user_name] = {}
microsoft_office_config["Users"][user_name]["MAU_Channel"] = pl["ChannelName"]
# Check each home folder for Office 365 License
office_365_license = userpath + '/Library/Group Containers/UBF8T346G9.Office/com.microsoft.Office365.plist'
if os.path.isfile(office_365_license):
microsoft_office_config["Users"][user_name]["Office_365_License"] = "Detected"
def main():
"""Main"""
# Create cache dir if it does not exist
cachedir = '%s/cache' % os.path.dirname(os.path.realpath(__file__))
if not os.path.exists(cachedir):
os.makedirs(cachedir)
microsoft_office_cache = os.path.join(cachedir, 'microsoft_office.json')
print json.dumps(microsoft_office_config, indent=4)
with open(microsoft_office_cache, 'w') as fp:
json.dump(microsoft_office_config, fp, indent=4)
if __name__ == "__main__":
main()
|
py | 1a3a2017caa21a0f3c0fe7bbda1eff5527e5b9f2 | input = """
1 2 0 0
1 3 0 0
1 4 0 0
1 5 0 0
1 6 0 0
1 7 0 0
1 8 0 0
1 9 0 0
1 10 0 0
1 11 0 0
1 12 0 0
1 13 0 0
1 14 0 0
1 15 0 0
1 16 0 0
1 17 0 0
1 18 0 0
1 19 0 0
1 20 0 0
1 21 0 0
1 22 0 0
1 23 0 0
1 24 0 0
1 25 0 0
1 26 0 0
1 27 0 0
1 28 0 0
1 29 0 0
1 30 0 0
1 31 0 0
1 32 0 0
1 33 0 0
1 34 0 0
1 35 0 0
1 36 0 0
1 37 0 0
1 38 0 0
1 39 0 0
1 40 0 0
1 41 0 0
1 42 0 0
1 43 0 0
1 44 0 0
1 45 0 0
1 46 0 0
1 47 0 0
1 48 0 0
1 49 0 0
1 50 0 0
1 51 0 0
1 52 0 0
1 53 0 0
1 54 0 0
1 55 0 0
1 56 0 0
1 57 0 0
1 58 0 0
1 59 0 0
1 60 0 0
1 61 0 0
1 62 0 0
1 63 0 0
1 64 0 0
1 65 0 0
1 66 0 0
1 67 0 0
1 68 0 0
1 69 0 0
1 70 0 0
1 71 0 0
1 72 0 0
1 73 0 0
1 74 0 0
1 75 0 0
1 76 0 0
1 77 0 0
1 78 0 0
1 79 0 0
1 80 0 0
1 81 0 0
1 82 0 0
1 83 0 0
1 84 0 0
1 85 0 0
1 86 0 0
1 87 0 0
1 88 0 0
1 89 0 0
1 90 0 0
1 91 0 0
1 92 0 0
1 93 0 0
1 94 0 0
1 95 0 0
1 96 0 0
1 97 0 0
1 98 0 0
1 99 0 0
1 100 0 0
1 101 0 0
1 102 0 0
1 103 0 0
1 104 0 0
1 105 0 0
1 106 0 0
1 107 0 0
1 108 0 0
1 109 0 0
1 110 0 0
1 111 0 0
1 112 0 0
1 113 0 0
1 114 0 0
1 115 0 0
1 116 0 0
1 117 0 0
1 118 0 0
1 119 0 0
1 120 0 0
1 121 0 0
1 122 0 0
1 123 0 0
1 124 0 0
1 125 0 0
1 126 0 0
1 127 0 0
1 128 0 0
1 129 0 0
1 130 0 0
1 131 0 0
1 132 0 0
1 133 0 0
1 134 0 0
1 135 0 0
1 136 0 0
1 137 0 0
1 138 0 0
1 139 0 0
1 140 0 0
1 141 0 0
1 142 0 0
1 143 0 0
1 144 0 0
1 145 0 0
1 146 0 0
1 147 0 0
1 148 0 0
1 149 0 0
1 150 0 0
1 151 0 0
1 152 0 0
1 153 0 0
1 154 0 0
1 155 0 0
1 156 0 0
1 157 0 0
1 158 0 0
1 159 0 0
1 160 0 0
1 161 0 0
1 162 0 0
1 163 0 0
1 164 0 0
1 165 0 0
1 166 0 0
1 167 0 0
1 168 0 0
1 169 0 0
1 170 0 0
1 171 0 0
1 172 0 0
1 173 0 0
1 174 0 0
1 175 0 0
1 176 0 0
1 177 0 0
1 178 0 0
1 179 0 0
1 180 0 0
1 181 0 0
1 182 0 0
1 183 0 0
1 184 0 0
1 185 0 0
1 186 0 0
1 187 0 0
1 188 0 0
1 189 0 0
1 190 0 0
1 191 0 0
1 192 0 0
1 193 0 0
1 194 0 0
1 195 0 0
1 196 0 0
1 197 0 0
1 198 0 0
1 199 0 0
1 200 0 0
1 201 2 1 202 203
1 202 2 1 201 203
1 203 0 0
1 204 2 1 205 206
1 205 2 1 204 206
1 206 0 0
1 207 2 1 208 209
1 208 2 1 207 209
1 209 0 0
1 210 2 1 211 212
1 211 2 1 210 212
1 212 0 0
1 213 2 1 214 215
1 214 2 1 213 215
1 215 0 0
1 216 2 1 217 218
1 217 2 1 216 218
1 218 0 0
1 219 2 1 220 221
1 220 2 1 219 221
1 221 0 0
1 222 2 1 223 224
1 223 2 1 222 224
1 224 0 0
1 225 2 1 226 227
1 226 2 1 225 227
1 227 0 0
1 228 2 1 229 230
1 229 2 1 228 230
1 230 0 0
1 231 2 1 232 233
1 232 2 1 231 233
1 233 0 0
1 234 2 1 235 236
1 235 2 1 234 236
1 236 0 0
1 237 2 1 238 239
1 238 2 1 237 239
1 239 0 0
1 240 2 1 241 242
1 241 2 1 240 242
1 242 0 0
1 243 2 1 244 245
1 244 2 1 243 245
1 245 0 0
1 246 2 1 247 248
1 247 2 1 246 248
1 248 0 0
1 249 2 1 250 251
1 250 2 1 249 251
1 251 0 0
1 252 2 1 253 254
1 253 2 1 252 254
1 254 0 0
1 255 2 1 256 257
1 256 2 1 255 257
1 257 0 0
1 258 2 1 259 260
1 259 2 1 258 260
1 260 0 0
1 261 2 1 262 263
1 262 2 1 261 263
1 263 0 0
1 264 2 1 265 266
1 265 2 1 264 266
1 266 0 0
1 267 2 1 268 269
1 268 2 1 267 269
1 269 0 0
1 270 2 1 271 272
1 271 2 1 270 272
1 272 0 0
1 273 2 1 274 275
1 274 2 1 273 275
1 275 0 0
1 276 2 1 277 278
1 277 2 1 276 278
1 278 0 0
1 279 2 1 280 281
1 280 2 1 279 281
1 281 0 0
1 282 2 1 283 284
1 283 2 1 282 284
1 284 0 0
1 285 2 1 286 287
1 286 2 1 285 287
1 287 0 0
1 288 2 1 289 290
1 289 2 1 288 290
1 290 0 0
1 291 2 1 292 293
1 292 2 1 291 293
1 293 0 0
1 294 2 1 295 296
1 295 2 1 294 296
1 296 0 0
1 297 2 1 298 299
1 298 2 1 297 299
1 299 0 0
1 300 2 1 301 302
1 301 2 1 300 302
1 302 0 0
1 303 2 1 304 305
1 304 2 1 303 305
1 305 0 0
1 306 2 1 307 308
1 307 2 1 306 308
1 308 0 0
1 309 2 1 310 311
1 310 2 1 309 311
1 311 0 0
1 312 2 1 313 314
1 313 2 1 312 314
1 314 0 0
1 315 2 1 316 317
1 316 2 1 315 317
1 317 0 0
1 318 2 1 319 320
1 319 2 1 318 320
1 320 0 0
1 321 2 1 322 323
1 322 2 1 321 323
1 323 0 0
1 324 2 1 325 326
1 325 2 1 324 326
1 326 0 0
1 327 2 1 328 329
1 328 2 1 327 329
1 329 0 0
1 330 2 1 331 332
1 331 2 1 330 332
1 332 0 0
1 333 2 1 334 335
1 334 2 1 333 335
1 335 0 0
1 336 2 1 337 338
1 337 2 1 336 338
1 338 0 0
1 339 2 1 340 341
1 340 2 1 339 341
1 341 0 0
1 342 2 1 343 344
1 343 2 1 342 344
1 344 0 0
1 345 2 1 346 347
1 346 2 1 345 347
1 347 0 0
1 348 2 1 349 350
1 349 2 1 348 350
1 350 0 0
1 351 2 1 352 353
1 352 2 1 351 353
1 353 0 0
1 354 2 1 355 356
1 355 2 1 354 356
1 356 0 0
1 357 2 1 358 359
1 358 2 1 357 359
1 359 0 0
1 360 2 1 361 362
1 361 2 1 360 362
1 362 0 0
1 363 2 1 364 365
1 364 2 1 363 365
1 365 0 0
1 366 2 1 367 368
1 367 2 1 366 368
1 368 0 0
1 369 2 1 370 371
1 370 2 1 369 371
1 371 0 0
1 372 2 1 373 374
1 373 2 1 372 374
1 374 0 0
1 375 2 1 376 377
1 376 2 1 375 377
1 377 0 0
1 378 2 1 379 380
1 379 2 1 378 380
1 380 0 0
1 381 2 1 382 383
1 382 2 1 381 383
1 383 0 0
1 384 2 1 385 386
1 385 2 1 384 386
1 386 0 0
1 387 2 1 388 389
1 388 2 1 387 389
1 389 0 0
1 390 2 1 391 392
1 391 2 1 390 392
1 392 0 0
1 393 2 1 394 395
1 394 2 1 393 395
1 395 0 0
1 396 2 1 397 398
1 397 2 1 396 398
1 398 0 0
1 399 2 1 400 401
1 400 2 1 399 401
1 401 0 0
1 402 2 1 403 404
1 403 2 1 402 404
1 404 0 0
1 405 2 1 406 407
1 406 2 1 405 407
1 407 0 0
1 408 2 1 409 410
1 409 2 1 408 410
1 410 0 0
1 411 2 1 412 413
1 412 2 1 411 413
1 413 0 0
1 414 2 1 415 416
1 415 2 1 414 416
1 416 0 0
1 417 2 1 418 419
1 418 2 1 417 419
1 419 0 0
1 420 2 1 421 422
1 421 2 1 420 422
1 422 0 0
1 423 2 1 424 425
1 424 2 1 423 425
1 425 0 0
1 426 2 1 427 428
1 427 2 1 426 428
1 428 0 0
1 429 2 1 430 431
1 430 2 1 429 431
1 431 0 0
1 432 2 1 433 434
1 433 2 1 432 434
1 434 0 0
1 435 2 1 436 437
1 436 2 1 435 437
1 437 0 0
1 438 2 1 439 440
1 439 2 1 438 440
1 440 0 0
1 441 2 1 442 443
1 442 2 1 441 443
1 443 0 0
1 444 2 1 445 446
1 445 2 1 444 446
1 446 0 0
1 447 2 1 448 449
1 448 2 1 447 449
1 449 0 0
1 450 2 1 451 452
1 451 2 1 450 452
1 452 0 0
1 453 2 1 454 455
1 454 2 1 453 455
1 455 0 0
1 456 2 1 457 458
1 457 2 1 456 458
1 458 0 0
1 459 2 1 460 461
1 460 2 1 459 461
1 461 0 0
1 462 2 1 463 464
1 463 2 1 462 464
1 464 0 0
1 465 2 1 466 467
1 466 2 1 465 467
1 467 0 0
1 468 2 1 469 470
1 469 2 1 468 470
1 470 0 0
1 471 1 0 468
1 472 1 0 465
1 473 1 0 462
1 474 1 0 459
1 475 1 0 456
1 476 1 0 453
1 477 1 0 450
1 478 1 0 447
1 479 1 0 444
1 480 1 0 441
1 481 1 0 438
1 482 1 0 435
1 483 1 0 432
1 484 1 0 429
1 485 1 0 426
1 486 1 0 423
1 487 1 0 420
1 488 1 0 417
1 489 1 0 414
1 490 1 0 411
1 491 1 0 408
1 492 1 0 405
1 493 1 0 402
1 494 1 0 399
1 495 1 0 396
1 496 1 0 393
1 497 1 0 390
1 498 1 0 387
1 499 1 0 384
1 500 1 0 381
1 471 1 0 378
1 472 1 0 375
1 473 1 0 372
1 474 1 0 369
1 475 1 0 366
1 476 1 0 363
1 477 1 0 360
1 478 1 0 357
1 479 1 0 354
1 480 1 0 351
1 481 1 0 348
1 482 1 0 345
1 483 1 0 342
1 484 1 0 339
1 485 1 0 336
1 486 1 0 333
1 487 1 0 330
1 488 1 0 327
1 489 1 0 324
1 490 1 0 321
1 491 1 0 318
1 492 1 0 315
1 493 1 0 312
1 494 1 0 309
1 495 1 0 306
1 496 1 0 303
1 497 1 0 300
1 498 1 0 297
1 499 1 0 294
1 500 1 0 291
1 471 1 0 288
1 472 1 0 285
1 473 1 0 282
1 474 1 0 279
1 475 1 0 276
1 476 1 0 273
1 477 1 0 270
1 478 1 0 267
1 479 1 0 264
1 480 1 0 261
1 481 1 0 258
1 482 1 0 255
1 483 1 0 252
1 484 1 0 249
1 485 1 0 246
1 486 1 0 243
1 487 1 0 240
1 488 1 0 237
1 489 1 0 234
1 490 1 0 231
1 491 1 0 228
1 492 1 0 225
1 493 1 0 222
1 494 1 0 219
1 495 1 0 216
1 496 1 0 213
1 497 1 0 210
1 498 1 0 207
1 499 1 0 204
1 500 1 0 201
1 1 1 1 500
1 1 1 1 499
1 1 1 1 498
1 1 1 1 497
1 1 1 1 496
1 1 1 1 495
1 1 1 1 494
1 1 1 1 493
1 1 1 1 492
1 1 1 1 491
1 1 1 1 490
1 1 1 1 489
1 1 1 1 488
1 1 1 1 487
1 1 1 1 486
1 1 1 1 485
1 1 1 1 484
1 1 1 1 483
1 1 1 1 482
1 1 1 1 481
1 1 1 1 480
1 1 1 1 479
1 1 1 1 478
1 1 1 1 477
1 1 1 1 476
1 1 1 1 475
1 1 1 1 474
1 1 1 1 473
1 1 1 1 472
1 1 1 1 471
1 1 2 0 468 378
1 1 2 0 468 288
1 1 2 0 465 375
1 1 2 0 465 285
1 1 2 0 462 372
1 1 2 0 462 282
1 1 2 0 459 369
1 1 2 0 459 279
1 1 2 0 456 366
1 1 2 0 456 276
1 1 2 0 453 363
1 1 2 0 453 273
1 1 2 0 450 360
1 1 2 0 450 270
1 1 2 0 447 357
1 1 2 0 447 267
1 1 2 0 444 354
1 1 2 0 444 264
1 1 2 0 441 351
1 1 2 0 441 261
1 1 2 0 438 348
1 1 2 0 438 258
1 1 2 0 435 345
1 1 2 0 435 255
1 1 2 0 432 342
1 1 2 0 432 252
1 1 2 0 429 339
1 1 2 0 429 249
1 1 2 0 426 336
1 1 2 0 426 246
1 1 2 0 423 333
1 1 2 0 423 243
1 1 2 0 420 330
1 1 2 0 420 240
1 1 2 0 417 327
1 1 2 0 417 237
1 1 2 0 414 324
1 1 2 0 414 234
1 1 2 0 411 321
1 1 2 0 411 231
1 1 2 0 408 318
1 1 2 0 408 228
1 1 2 0 405 315
1 1 2 0 405 225
1 1 2 0 402 312
1 1 2 0 402 222
1 1 2 0 399 309
1 1 2 0 399 219
1 1 2 0 396 306
1 1 2 0 396 216
1 1 2 0 393 303
1 1 2 0 393 213
1 1 2 0 390 300
1 1 2 0 390 210
1 1 2 0 387 297
1 1 2 0 387 207
1 1 2 0 384 294
1 1 2 0 384 204
1 1 2 0 381 291
1 1 2 0 381 201
1 1 2 0 378 468
1 1 2 0 378 288
1 1 2 0 375 465
1 1 2 0 375 285
1 1 2 0 372 462
1 1 2 0 372 282
1 1 2 0 369 459
1 1 2 0 369 279
1 1 2 0 366 456
1 1 2 0 366 276
1 1 2 0 363 453
1 1 2 0 363 273
1 1 2 0 360 450
1 1 2 0 360 270
1 1 2 0 357 447
1 1 2 0 357 267
1 1 2 0 354 444
1 1 2 0 354 264
1 1 2 0 351 441
1 1 2 0 351 261
1 1 2 0 348 438
1 1 2 0 348 258
1 1 2 0 345 435
1 1 2 0 345 255
1 1 2 0 342 432
1 1 2 0 342 252
1 1 2 0 339 429
1 1 2 0 339 249
1 1 2 0 336 426
1 1 2 0 336 246
1 1 2 0 333 423
1 1 2 0 333 243
1 1 2 0 330 420
1 1 2 0 330 240
1 1 2 0 327 417
1 1 2 0 327 237
1 1 2 0 324 414
1 1 2 0 324 234
1 1 2 0 321 411
1 1 2 0 321 231
1 1 2 0 318 408
1 1 2 0 318 228
1 1 2 0 315 405
1 1 2 0 315 225
1 1 2 0 312 402
1 1 2 0 312 222
1 1 2 0 309 399
1 1 2 0 309 219
1 1 2 0 306 396
1 1 2 0 306 216
1 1 2 0 303 393
1 1 2 0 303 213
1 1 2 0 300 390
1 1 2 0 300 210
1 1 2 0 297 387
1 1 2 0 297 207
1 1 2 0 294 384
1 1 2 0 294 204
1 1 2 0 291 381
1 1 2 0 291 201
1 1 2 0 288 468
1 1 2 0 288 378
1 1 2 0 285 465
1 1 2 0 285 375
1 1 2 0 282 462
1 1 2 0 282 372
1 1 2 0 279 459
1 1 2 0 279 369
1 1 2 0 276 456
1 1 2 0 276 366
1 1 2 0 273 453
1 1 2 0 273 363
1 1 2 0 270 450
1 1 2 0 270 360
1 1 2 0 267 447
1 1 2 0 267 357
1 1 2 0 264 444
1 1 2 0 264 354
1 1 2 0 261 441
1 1 2 0 261 351
1 1 2 0 258 438
1 1 2 0 258 348
1 1 2 0 255 435
1 1 2 0 255 345
1 1 2 0 252 432
1 1 2 0 252 342
1 1 2 0 249 429
1 1 2 0 249 339
1 1 2 0 246 426
1 1 2 0 246 336
1 1 2 0 243 423
1 1 2 0 243 333
1 1 2 0 240 420
1 1 2 0 240 330
1 1 2 0 237 417
1 1 2 0 237 327
1 1 2 0 234 414
1 1 2 0 234 324
1 1 2 0 231 411
1 1 2 0 231 321
1 1 2 0 228 408
1 1 2 0 228 318
1 1 2 0 225 405
1 1 2 0 225 315
1 1 2 0 222 402
1 1 2 0 222 312
1 1 2 0 219 399
1 1 2 0 219 309
1 1 2 0 216 396
1 1 2 0 216 306
1 1 2 0 213 393
1 1 2 0 213 303
1 1 2 0 210 390
1 1 2 0 210 300
1 1 2 0 207 387
1 1 2 0 207 297
1 1 2 0 204 384
1 1 2 0 204 294
1 1 2 0 201 381
1 1 2 0 201 291
1 1 2 0 468 429
1 1 2 0 468 417
1 1 2 0 468 408
1 1 2 0 468 405
1 1 2 0 468 396
1 1 2 0 465 444
1 1 2 0 465 441
1 1 2 0 465 399
1 1 2 0 465 396
1 1 2 0 465 393
1 1 2 0 465 381
1 1 2 0 462 459
1 1 2 0 462 453
1 1 2 0 462 423
1 1 2 0 462 411
1 1 2 0 462 408
1 1 2 0 462 402
1 1 2 0 459 453
1 1 2 0 459 435
1 1 2 0 459 432
1 1 2 0 459 405
1 1 2 0 459 399
1 1 2 0 456 447
1 1 2 0 456 438
1 1 2 0 456 432
1 1 2 0 456 399
1 1 2 0 456 390
1 1 2 0 453 411
1 1 2 0 453 402
1 1 2 0 453 399
1 1 2 0 450 438
1 1 2 0 450 435
1 1 2 0 450 423
1 1 2 0 450 414
1 1 2 0 450 390
1 1 2 0 447 441
1 1 2 0 447 417
1 1 2 0 447 396
1 1 2 0 447 390
1 1 2 0 447 387
1 1 2 0 447 384
1 1 2 0 444 441
1 1 2 0 444 420
1 1 2 0 444 399
1 1 2 0 441 438
1 1 2 0 441 408
1 1 2 0 441 399
1 1 2 0 441 390
1 1 2 0 441 387
1 1 2 0 441 381
1 1 2 0 438 420
1 1 2 0 438 417
1 1 2 0 438 405
1 1 2 0 438 387
1 1 2 0 438 384
1 1 2 0 435 426
1 1 2 0 435 420
1 1 2 0 435 411
1 1 2 0 435 402
1 1 2 0 435 390
1 1 2 0 432 411
1 1 2 0 432 408
1 1 2 0 432 402
1 1 2 0 432 393
1 1 2 0 429 417
1 1 2 0 429 396
1 1 2 0 429 390
1 1 2 0 426 423
1 1 2 0 426 399
1 1 2 0 426 396
1 1 2 0 423 420
1 1 2 0 420 405
1 1 2 0 420 399
1 1 2 0 417 405
1 1 2 0 414 408
1 1 2 0 414 405
1 1 2 0 414 393
1 1 2 0 411 402
1 1 2 0 408 402
1 1 2 0 408 396
1 1 2 0 408 384
1 1 2 0 405 399
1 1 2 0 405 384
1 1 2 0 402 399
1 1 2 0 402 393
1 1 2 0 399 396
1 1 2 0 399 393
1 1 2 0 396 393
1 1 2 0 396 387
1 1 2 0 390 387
1 1 2 0 378 339
1 1 2 0 378 327
1 1 2 0 378 318
1 1 2 0 378 315
1 1 2 0 378 306
1 1 2 0 375 354
1 1 2 0 375 351
1 1 2 0 375 309
1 1 2 0 375 306
1 1 2 0 375 303
1 1 2 0 375 291
1 1 2 0 372 369
1 1 2 0 372 363
1 1 2 0 372 333
1 1 2 0 372 321
1 1 2 0 372 318
1 1 2 0 372 312
1 1 2 0 369 363
1 1 2 0 369 345
1 1 2 0 369 342
1 1 2 0 369 315
1 1 2 0 369 309
1 1 2 0 366 357
1 1 2 0 366 348
1 1 2 0 366 342
1 1 2 0 366 309
1 1 2 0 366 300
1 1 2 0 363 321
1 1 2 0 363 312
1 1 2 0 363 309
1 1 2 0 360 348
1 1 2 0 360 345
1 1 2 0 360 333
1 1 2 0 360 324
1 1 2 0 360 300
1 1 2 0 357 351
1 1 2 0 357 327
1 1 2 0 357 306
1 1 2 0 357 300
1 1 2 0 357 297
1 1 2 0 357 294
1 1 2 0 354 351
1 1 2 0 354 330
1 1 2 0 354 309
1 1 2 0 351 348
1 1 2 0 351 318
1 1 2 0 351 309
1 1 2 0 351 300
1 1 2 0 351 297
1 1 2 0 351 291
1 1 2 0 348 330
1 1 2 0 348 327
1 1 2 0 348 315
1 1 2 0 348 297
1 1 2 0 348 294
1 1 2 0 345 336
1 1 2 0 345 330
1 1 2 0 345 321
1 1 2 0 345 312
1 1 2 0 345 300
1 1 2 0 342 321
1 1 2 0 342 318
1 1 2 0 342 312
1 1 2 0 342 303
1 1 2 0 339 327
1 1 2 0 339 306
1 1 2 0 339 300
1 1 2 0 336 333
1 1 2 0 336 309
1 1 2 0 336 306
1 1 2 0 333 330
1 1 2 0 330 315
1 1 2 0 330 309
1 1 2 0 327 315
1 1 2 0 324 318
1 1 2 0 324 315
1 1 2 0 324 303
1 1 2 0 321 312
1 1 2 0 318 312
1 1 2 0 318 306
1 1 2 0 318 294
1 1 2 0 315 309
1 1 2 0 315 294
1 1 2 0 312 309
1 1 2 0 312 303
1 1 2 0 309 306
1 1 2 0 309 303
1 1 2 0 306 303
1 1 2 0 306 297
1 1 2 0 300 297
1 1 2 0 288 249
1 1 2 0 288 237
1 1 2 0 288 228
1 1 2 0 288 225
1 1 2 0 288 216
1 1 2 0 285 264
1 1 2 0 285 261
1 1 2 0 285 219
1 1 2 0 285 216
1 1 2 0 285 213
1 1 2 0 285 201
1 1 2 0 282 279
1 1 2 0 282 273
1 1 2 0 282 243
1 1 2 0 282 231
1 1 2 0 282 228
1 1 2 0 282 222
1 1 2 0 279 273
1 1 2 0 279 255
1 1 2 0 279 252
1 1 2 0 279 225
1 1 2 0 279 219
1 1 2 0 276 267
1 1 2 0 276 258
1 1 2 0 276 252
1 1 2 0 276 219
1 1 2 0 276 210
1 1 2 0 273 231
1 1 2 0 273 222
1 1 2 0 273 219
1 1 2 0 270 258
1 1 2 0 270 255
1 1 2 0 270 243
1 1 2 0 270 234
1 1 2 0 270 210
1 1 2 0 267 261
1 1 2 0 267 237
1 1 2 0 267 216
1 1 2 0 267 210
1 1 2 0 267 207
1 1 2 0 267 204
1 1 2 0 264 261
1 1 2 0 264 240
1 1 2 0 264 219
1 1 2 0 261 258
1 1 2 0 261 228
1 1 2 0 261 219
1 1 2 0 261 210
1 1 2 0 261 207
1 1 2 0 261 201
1 1 2 0 258 240
1 1 2 0 258 237
1 1 2 0 258 225
1 1 2 0 258 207
1 1 2 0 258 204
1 1 2 0 255 246
1 1 2 0 255 240
1 1 2 0 255 231
1 1 2 0 255 222
1 1 2 0 255 210
1 1 2 0 252 231
1 1 2 0 252 228
1 1 2 0 252 222
1 1 2 0 252 213
1 1 2 0 249 237
1 1 2 0 249 216
1 1 2 0 249 210
1 1 2 0 246 243
1 1 2 0 246 219
1 1 2 0 246 216
1 1 2 0 243 240
1 1 2 0 240 225
1 1 2 0 240 219
1 1 2 0 237 225
1 1 2 0 234 228
1 1 2 0 234 225
1 1 2 0 234 213
1 1 2 0 231 222
1 1 2 0 228 222
1 1 2 0 228 216
1 1 2 0 228 204
1 1 2 0 225 219
1 1 2 0 225 204
1 1 2 0 222 219
1 1 2 0 222 213
1 1 2 0 219 216
1 1 2 0 219 213
1 1 2 0 216 213
1 1 2 0 216 207
1 1 2 0 210 207
0
201 col(29,3)
204 col(28,3)
207 col(27,3)
210 col(26,3)
213 col(25,3)
216 col(24,3)
219 col(23,3)
222 col(22,3)
225 col(21,3)
228 col(20,3)
231 col(19,3)
234 col(18,3)
237 col(17,3)
240 col(16,3)
243 col(15,3)
246 col(14,3)
249 col(13,3)
252 col(12,3)
255 col(11,3)
258 col(10,3)
261 col(9,3)
264 col(8,3)
267 col(7,3)
270 col(6,3)
273 col(5,3)
276 col(4,3)
279 col(3,3)
282 col(2,3)
285 col(1,3)
288 col(0,3)
291 col(29,2)
294 col(28,2)
297 col(27,2)
300 col(26,2)
303 col(25,2)
306 col(24,2)
309 col(23,2)
312 col(22,2)
315 col(21,2)
318 col(20,2)
321 col(19,2)
324 col(18,2)
327 col(17,2)
330 col(16,2)
333 col(15,2)
336 col(14,2)
339 col(13,2)
342 col(12,2)
345 col(11,2)
348 col(10,2)
351 col(9,2)
354 col(8,2)
357 col(7,2)
360 col(6,2)
363 col(5,2)
366 col(4,2)
369 col(3,2)
372 col(2,2)
375 col(1,2)
378 col(0,2)
381 col(29,1)
384 col(28,1)
387 col(27,1)
390 col(26,1)
393 col(25,1)
396 col(24,1)
399 col(23,1)
402 col(22,1)
405 col(21,1)
408 col(20,1)
411 col(19,1)
414 col(18,1)
417 col(17,1)
420 col(16,1)
423 col(15,1)
426 col(14,1)
429 col(13,1)
432 col(12,1)
435 col(11,1)
438 col(10,1)
441 col(9,1)
444 col(8,1)
447 col(7,1)
450 col(6,1)
453 col(5,1)
456 col(4,1)
459 col(3,1)
462 col(2,1)
465 col(1,1)
468 col(0,1)
0
B+
0
B-
1
0
1
"""
output = """
"""
|
py | 1a3a2025ad21b549bc628df6d2f7899519499f18 | from setuptools import setup
from oioswift import __version__
setup(
name='oioswift',
version=__version__,
author='OpenIO',
author_email='[email protected]',
description='OpenIO Swift Gateway',
url='https://github.com/open-io/oio-swift',
license='Apache License (2.0)',
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 2.7',
'License :: OSI Approved :: Apache Software License',
'Intended Audience :: Information Technology',
'Operating System :: OS Independent',
],
packages=[
'oioswift',
'oioswift.common',
'oioswift.common.middleware',
'oioswift.common.middleware.crypto',
'oioswift.proxy',
'oioswift.proxy.controllers'],
entry_points={
'paste.app_factory': [
'main=oioswift.server:app_factory',
],
'paste.filter_factory': [
'autocontainer=oioswift.common.middleware.autocontainer:filter_factory',
'encryption=oioswift.common.middleware.crypto:filter_factory',
'hashedcontainer=oioswift.common.middleware.hashedcontainer:filter_factory',
'healthcheck=oioswift.common.middleware.healthcheck:filter_factory',
'keymaster=oioswift.common.middleware.crypto.keymaster:filter_factory',
'regexcontainer=oioswift.common.middleware.regexcontainer:filter_factory',
'versioned_writes=oioswift.common.middleware.versioned_writes:filter_factory',
'container_hierarchy=oioswift.common.middleware.container_hierarchy:filter_factory',
'copy=oioswift.common.middleware.copy:filter_factory',
'verb_acl=oioswift.common.middleware.verb_acl:filter_factory',
'tempauth=oioswift.common.middleware.tempauth:filter_factory',
],
},
scripts=[
'bin/oioswift-proxy-server',
],
install_requires=['swift>=2.13.0', 'oio>=4.2.0']
)
|
py | 1a3a204aac23314b4ad60cd184b371cae5cef3e7 | import json
import re
from datetime import datetime, timedelta, date
from itertools import groupby, dropwhile, izip_longest
import requests
from cabot.cabotapp import alert
from cabot.cabotapp.utils import cabot_needs_setup
from dateutil.relativedelta import relativedelta
from django import forms
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.contrib.auth import get_user_model
from django.core.exceptions import ValidationError
from django.core.validators import URLValidator
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.db import transaction
from django.http import JsonResponse, HttpResponse, HttpResponseRedirect
from django.template import RequestContext, loader
from django.utils import timezone
from django.utils.decorators import method_decorator
from django.utils.timezone import utc
from django.views.generic import (
DetailView, CreateView, UpdateView, ListView, DeleteView, TemplateView, View)
from django.shortcuts import redirect, render
from alert import AlertPlugin, AlertPluginUserData
from models import (
StatusCheck, GraphiteStatusCheck, JenkinsStatusCheck, HttpStatusCheck, ICMPStatusCheck,
StatusCheckResult, UserProfile, Service, Instance, Shift, get_duty_officers)
from tasks import run_status_check as _run_status_check
from .graphite import get_data, get_matching_metrics
class LoginRequiredMixin(object):
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(LoginRequiredMixin, self).dispatch(*args, **kwargs)
@login_required
def subscriptions(request):
""" Simple list of all checks """
services = Service.objects.all()
users = User.objects.filter(is_active=True)
return render(request, 'cabotapp/subscriptions.html', {
'services': services,
'users': users,
'duty_officers': get_duty_officers(),
})
@login_required
def run_status_check(request, pk):
"""Runs a specific check"""
_run_status_check(check_or_id=pk)
return HttpResponseRedirect(reverse('check', kwargs={'pk': pk}))
def duplicate_icmp_check(request, pk):
pc = StatusCheck.objects.get(pk=pk)
npk = pc.duplicate()
return HttpResponseRedirect(reverse('update-icmp-check', kwargs={'pk': npk}))
def duplicate_instance(request, pk):
instance = Instance.objects.get(pk=pk)
new_instance = instance.duplicate()
return HttpResponseRedirect(reverse('update-instance', kwargs={'pk': new_instance}))
def duplicate_http_check(request, pk):
pc = StatusCheck.objects.get(pk=pk)
npk = pc.duplicate()
return HttpResponseRedirect(reverse('update-http-check', kwargs={'pk': npk}))
def duplicate_graphite_check(request, pk):
pc = StatusCheck.objects.get(pk=pk)
npk = pc.duplicate()
return HttpResponseRedirect(reverse('update-graphite-check', kwargs={'pk': npk}))
def duplicate_jenkins_check(request, pk):
pc = StatusCheck.objects.get(pk=pk)
npk = pc.duplicate()
return HttpResponseRedirect(reverse('update-jenkins-check', kwargs={'pk': npk}))
class StatusCheckResultDetailView(LoginRequiredMixin, DetailView):
model = StatusCheckResult
context_object_name = 'result'
class SymmetricalForm(forms.ModelForm):
symmetrical_fields = () # Iterable of 2-tuples (field, model)
def __init__(self, *args, **kwargs):
super(SymmetricalForm, self).__init__(*args, **kwargs)
if self.instance and self.instance.pk:
for field in self.symmetrical_fields:
self.fields[field].initial = getattr(
self.instance, field).all()
def save(self, commit=True):
instance = super(SymmetricalForm, self).save(commit=False)
if commit:
instance.save()
if instance.pk:
for field in self.symmetrical_fields:
setattr(instance, field, self.cleaned_data[field])
self.save_m2m()
return instance
base_widgets = {
'name': forms.TextInput(attrs={
'style': 'width:30%',
}),
'importance': forms.RadioSelect(),
}
class StatusCheckForm(SymmetricalForm):
symmetrical_fields = ('service_set', 'instance_set')
service_set = forms.ModelMultipleChoiceField(
queryset=Service.objects.all(),
required=False,
help_text='Link to service(s).',
widget=forms.SelectMultiple(
attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
},
)
)
instance_set = forms.ModelMultipleChoiceField(
queryset=Instance.objects.all(),
required=False,
help_text='Link to instance(s).',
widget=forms.SelectMultiple(
attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
},
)
)
class GraphiteStatusCheckForm(StatusCheckForm):
class Meta:
model = GraphiteStatusCheck
fields = (
'name',
'metric',
'check_type',
'value',
'frequency',
'active',
'importance',
'expected_num_hosts',
'allowed_num_failures',
'debounce',
)
widgets = dict(**base_widgets)
widgets.update({
'value': forms.TextInput(attrs={
'style': 'width: 100px',
'placeholder': 'threshold value',
}),
'metric': forms.TextInput(attrs={
'style': 'width: 100%',
'placeholder': 'graphite metric key'
}),
'check_type': forms.Select(attrs={
'data-rel': 'chosen',
})
})
class ICMPStatusCheckForm(StatusCheckForm):
class Meta:
model = ICMPStatusCheck
fields = (
'name',
'frequency',
'importance',
'active',
'debounce',
)
widgets = dict(**base_widgets)
class HttpStatusCheckForm(StatusCheckForm):
class Meta:
model = HttpStatusCheck
fields = (
'name',
'endpoint',
'username',
'password',
'text_match',
'status_code',
'timeout',
'verify_ssl_certificate',
'frequency',
'importance',
'active',
'debounce',
)
widgets = dict(**base_widgets)
widgets.update({
'endpoint': forms.TextInput(attrs={
'style': 'width: 100%',
'placeholder': 'https://www.arachnys.com',
}),
'username': forms.TextInput(attrs={
'style': 'width: 30%',
}),
'password': forms.PasswordInput(attrs={
'style': 'width: 30%',
}),
'text_match': forms.TextInput(attrs={
'style': 'width: 100%',
'placeholder': '[Aa]rachnys\s+[Rr]ules',
}),
'status_code': forms.TextInput(attrs={
'style': 'width: 20%',
'placeholder': '200',
}),
})
def clean_password(self):
new_password_value = self.cleaned_data['password']
if new_password_value == '':
new_password_value = self.initial.get('password')
return new_password_value
class JenkinsStatusCheckForm(StatusCheckForm):
class Meta:
model = JenkinsStatusCheck
fields = (
'name',
'importance',
'debounce',
'max_queued_build_time',
)
widgets = dict(**base_widgets)
class InstanceForm(SymmetricalForm):
symmetrical_fields = ('service_set',)
service_set = forms.ModelMultipleChoiceField(
queryset=Service.objects.all(),
required=False,
help_text='Link to service(s).',
widget=forms.SelectMultiple(
attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
},
)
)
class Meta:
model = Instance
template_name = 'instance_form.html'
fields = (
'name',
'address',
'users_to_notify',
'status_checks',
'service_set',
)
widgets = {
'name': forms.TextInput(attrs={'style': 'width: 70%;'}),
'address': forms.TextInput(attrs={'style': 'width: 70%;'}),
'status_checks': forms.SelectMultiple(attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
}),
'service_set': forms.SelectMultiple(attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
}),
'alerts': forms.SelectMultiple(attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
}),
'users_to_notify': forms.CheckboxSelectMultiple(),
}
def __init__(self, *args, **kwargs):
ret = super(InstanceForm, self).__init__(*args, **kwargs)
self.fields['users_to_notify'].queryset = User.objects.filter(
is_active=True).order_by('first_name', 'last_name')
return ret
class ServiceForm(forms.ModelForm):
class Meta:
model = Service
template_name = 'service_form.html'
fields = (
'name',
'url',
'users_to_notify',
'status_checks',
'instances',
'alerts',
'alerts_enabled',
'hackpad_id',
'runbook_link'
)
widgets = {
'name': forms.TextInput(attrs={'style': 'width: 70%;'}),
'url': forms.TextInput(attrs={'style': 'width: 70%;'}),
'status_checks': forms.SelectMultiple(attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
}),
'instances': forms.SelectMultiple(attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
}),
'alerts': forms.SelectMultiple(attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
}),
'users_to_notify': forms.CheckboxSelectMultiple(),
'hackpad_id': forms.TextInput(attrs={'style': 'width:70%;'}),
'runbook_link': forms.TextInput(attrs={'style': 'width:70%;'}),
}
def __init__(self, *args, **kwargs):
ret = super(ServiceForm, self).__init__(*args, **kwargs)
self.fields['users_to_notify'].queryset = User.objects.filter(
is_active=True).order_by('first_name', 'last_name')
return ret
def clean_hackpad_id(self):
value = self.cleaned_data['hackpad_id']
if not value:
return ''
for pattern in settings.RECOVERY_SNIPPETS_WHITELIST:
if re.match(pattern, value):
return value
raise ValidationError('Please specify a valid JS snippet link')
def clean_runbook_link(self):
value = self.cleaned_data['runbook_link']
if not value:
return ''
try:
URLValidator()(value)
return value
except ValidationError:
raise ValidationError('Please specify a valid runbook link')
class StatusCheckReportForm(forms.Form):
service = forms.ModelChoiceField(
queryset=Service.objects.all(),
widget=forms.HiddenInput
)
checks = forms.ModelMultipleChoiceField(
queryset=StatusCheck.objects.all(),
widget=forms.SelectMultiple(
attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
},
)
)
date_from = forms.DateField(label='From', widget=forms.DateInput(attrs={'class': 'datepicker'}))
date_to = forms.DateField(label='To', widget=forms.DateInput(attrs={'class': 'datepicker'}))
def get_report(self):
checks = self.cleaned_data['checks']
now = timezone.now()
for check in checks:
# Group results of the check by status (failed alternating with succeeded),
# take time of the first one in each group (starting from a failed group),
# split them into pairs and form the list of problems.
results = check.statuscheckresult_set.filter(
time__gte=self.cleaned_data['date_from'],
time__lt=self.cleaned_data['date_to'] + timedelta(days=1)
).order_by('time')
groups = dropwhile(lambda item: item[0], groupby(results, key=lambda r: r.succeeded))
times = [next(group).time for succeeded, group in groups]
pairs = izip_longest(*([iter(times)] * 2))
check.problems = [(start, end, (end or now) - start) for start, end in pairs]
if results:
check.success_rate = results.filter(succeeded=True).count() / float(len(results)) * 100
return checks
class CheckCreateView(LoginRequiredMixin, CreateView):
template_name = 'cabotapp/statuscheck_form.html'
def form_valid(self, form):
form.instance.created_by = self.request.user
return super(CheckCreateView, self).form_valid(form)
def get_initial(self):
if self.initial:
initial = self.initial
else:
initial = {}
metric = self.request.GET.get('metric')
if metric:
initial['metric'] = metric
service_id = self.request.GET.get('service')
instance_id = self.request.GET.get('instance')
if service_id:
try:
service = Service.objects.get(id=service_id)
initial['service_set'] = [service]
except Service.DoesNotExist:
pass
if instance_id:
try:
instance = Instance.objects.get(id=instance_id)
initial['instance_set'] = [instance]
except Instance.DoesNotExist:
pass
return initial
def get_success_url(self):
if self.request.GET.get('service'):
return reverse('service', kwargs={'pk': self.request.GET.get('service')})
if self.request.GET.get('instance'):
return reverse('instance', kwargs={'pk': self.request.GET.get('instance')})
return reverse('checks')
class CheckUpdateView(LoginRequiredMixin, UpdateView):
template_name = 'cabotapp/statuscheck_form.html'
def get_success_url(self):
return reverse('check', kwargs={'pk': self.object.id})
class ICMPCheckCreateView(CheckCreateView):
model = ICMPStatusCheck
form_class = ICMPStatusCheckForm
class ICMPCheckUpdateView(CheckUpdateView):
model = ICMPStatusCheck
form_class = ICMPStatusCheckForm
class GraphiteCheckUpdateView(CheckUpdateView):
model = GraphiteStatusCheck
form_class = GraphiteStatusCheckForm
class GraphiteCheckCreateView(CheckCreateView):
model = GraphiteStatusCheck
form_class = GraphiteStatusCheckForm
class HttpCheckCreateView(CheckCreateView):
model = HttpStatusCheck
form_class = HttpStatusCheckForm
class HttpCheckUpdateView(CheckUpdateView):
model = HttpStatusCheck
form_class = HttpStatusCheckForm
class JenkinsCheckCreateView(CheckCreateView):
model = JenkinsStatusCheck
form_class = JenkinsStatusCheckForm
def form_valid(self, form):
form.instance.frequency = 1
return super(JenkinsCheckCreateView, self).form_valid(form)
class JenkinsCheckUpdateView(CheckUpdateView):
model = JenkinsStatusCheck
form_class = JenkinsStatusCheckForm
def form_valid(self, form):
form.instance.frequency = 1
return super(JenkinsCheckUpdateView, self).form_valid(form)
class StatusCheckListView(LoginRequiredMixin, ListView):
model = StatusCheck
context_object_name = 'checks'
def get_queryset(self):
return StatusCheck.objects.all().order_by('name').prefetch_related('service_set', 'instance_set')
class StatusCheckDeleteView(LoginRequiredMixin, DeleteView):
model = StatusCheck
success_url = reverse_lazy('checks')
context_object_name = 'check'
template_name = 'cabotapp/statuscheck_confirm_delete.html'
class StatusCheckDetailView(LoginRequiredMixin, DetailView):
model = StatusCheck
context_object_name = 'check'
template_name = 'cabotapp/statuscheck_detail.html'
def render_to_response(self, context, *args, **kwargs):
if context is None:
context = {}
context['checkresults'] = self.object.statuscheckresult_set.order_by(
'-time_complete')[:100]
return super(StatusCheckDetailView, self).render_to_response(context, *args, **kwargs)
class UserProfileUpdateView(LoginRequiredMixin, View):
model = AlertPluginUserData
def get(self, *args, **kwargs):
return HttpResponseRedirect(reverse('update-alert-user-data', args=(self.kwargs['pk'], u'General')))
class UserProfileUpdateAlert(LoginRequiredMixin, View):
template = loader.get_template('cabotapp/alertpluginuserdata_form.html')
model = AlertPluginUserData
def get(self, request, pk, alerttype):
try:
profile = UserProfile.objects.get(user=pk)
except UserProfile.DoesNotExist:
user = User.objects.get(id=pk)
profile = UserProfile(user=user)
profile.save()
profile.user_data()
if alerttype == u'General':
form = GeneralSettingsForm(initial={
'first_name': profile.user.first_name,
'last_name': profile.user.last_name,
'email_address': profile.user.email,
'enabled': profile.user.is_active,
})
else:
plugin_userdata = self.model.objects.get(title=alerttype, user=profile)
form_model = get_object_form(type(plugin_userdata))
form = form_model(instance=plugin_userdata)
return render(request, self.template.template.name, {
'form': form,
'alert_preferences': profile.user_data(),
})
def post(self, request, pk, alerttype):
profile = UserProfile.objects.get(user=pk)
success = False
if alerttype == u'General':
form = GeneralSettingsForm(request.POST)
if form.is_valid():
profile.user.first_name = form.cleaned_data['first_name']
profile.user.last_name = form.cleaned_data['last_name']
profile.user.is_active = form.cleaned_data['enabled']
profile.user.email = form.cleaned_data['email_address']
profile.user.save()
success = True
else:
plugin_userdata = self.model.objects.get(title=alerttype, user=profile)
form_model = get_object_form(type(plugin_userdata))
form = form_model(request.POST, instance=plugin_userdata)
if form.is_valid():
form.save()
success = True
if success:
messages.add_message(request, messages.SUCCESS, 'Updated Successfully', extra_tags='success')
else:
messages.add_message(request, messages.ERROR, 'Error Updating Profile', extra_tags='danger')
return HttpResponseRedirect(reverse('update-alert-user-data', args=(self.kwargs['pk'], alerttype)))
class PluginSettingsView(LoginRequiredMixin, View):
template = loader.get_template('cabotapp/plugin_settings_form.html')
model = AlertPlugin
def get(self, request, plugin_name):
if plugin_name == u'global':
form = CoreSettingsForm()
alert_test_form = AlertTestForm()
else:
plugin = self.model.objects.get(title=plugin_name)
form_model = get_object_form(type(plugin))
form = form_model(instance=plugin)
alert_test_form = AlertTestPluginForm(initial = {
'alert_plugin': plugin
})
return render(request, self.template.template.name, {
'form': form,
'plugins': AlertPlugin.objects.all(),
'plugin_name': plugin_name,
'alert_test_form': alert_test_form
})
def post(self, request, plugin_name):
if plugin_name == u'global':
form = CoreSettingsForm(request.POST)
else:
plugin = self.model.objects.get(title=plugin_name)
form_model = get_object_form(type(plugin))
form = form_model(request.POST, instance=plugin)
if form.is_valid():
form.save()
messages.add_message(request, messages.SUCCESS, 'Updated Successfully', extra_tags='success')
else:
messages.add_message(request, messages.ERROR, 'Error Updating Plugin', extra_tags='danger')
return HttpResponseRedirect(reverse('plugin-settings', args=(plugin_name,)))
def get_object_form(model_type):
class AlertPreferencesForm(forms.ModelForm):
class Meta:
model = model_type
fields = '__all__'
def is_valid(self):
return True
return AlertPreferencesForm
class AlertTestForm(forms.Form):
action = reverse_lazy('alert-test')
service = forms.ModelChoiceField(
queryset=Service.objects.all(),
widget=forms.Select(attrs={
'data-rel': 'chosen',
})
)
STATUS_CHOICES = (
(Service.PASSING_STATUS, 'Passing'),
(Service.WARNING_STATUS, 'Warning'),
(Service.ERROR_STATUS, 'Error'),
(Service.CRITICAL_STATUS, 'Critical'),
)
old_status = forms.ChoiceField(
choices=STATUS_CHOICES,
initial=Service.PASSING_STATUS,
widget=forms.Select(attrs={
'data-rel': 'chosen',
})
)
new_status = forms.ChoiceField(
choices=STATUS_CHOICES,
initial=Service.ERROR_STATUS,
widget=forms.Select(attrs={
'data-rel': 'chosen',
})
)
class AlertTestPluginForm(AlertTestForm):
action = reverse_lazy('alert-test-plugin')
service = None
alert_plugin = forms.ModelChoiceField(
queryset=AlertPlugin.objects.filter(enabled=True),
widget=forms.HiddenInput
)
class AlertTestView(LoginRequiredMixin, View):
def trigger_alert_to_user(self, service, user, old_status, new_status):
"""
Clear out all service users and duty shifts, and disable all fallback users.
Then add a single shift for this user, and add this user to users-to-notify.
This should ensure we never alert anyone except the user triggering the alert test.
"""
with transaction.atomic():
sid = transaction.savepoint()
service.update_status()
service.status_checks.update(active=False)
service.overall_status = new_status
service.old_overall_status = old_status
service.last_alert_sent = None
check = StatusCheck(name='ALERT_TEST')
check.save()
StatusCheckResult.objects.create(
status_check=check,
time=timezone.now(),
time_complete=timezone.now(),
succeeded=new_status == Service.PASSING_STATUS)
check.last_run = timezone.now()
check.save()
service.status_checks.add(check)
service.users_to_notify.clear()
service.users_to_notify.add(user)
service.unexpired_acknowledgements().delete()
Shift.objects.update(deleted=True)
UserProfile.objects.update(fallback_alert_user=False)
Shift(
start=timezone.now() - timedelta(days=1),
end=timezone.now() + timedelta(days=1),
uid='test-shift',
last_modified=timezone.now(),
user=user
).save()
service.alert()
transaction.savepoint_rollback(sid)
def post(self, request):
form = AlertTestForm(request.POST)
if form.is_valid():
data = form.clean()
service = data['service']
self.trigger_alert_to_user(service, request.user, data['old_status'], data['new_status'])
return JsonResponse({"result": "ok"})
return JsonResponse({"result": "error"}, status=400)
class AlertTestPluginView(AlertTestView):
def post(self, request):
form = AlertTestPluginForm(request.POST)
if form.is_valid():
data = form.clean()
with transaction.atomic():
sid = transaction.savepoint()
service = Service.objects.create(
name='test-alert-service'
)
service.alerts.add(data['alert_plugin'])
self.trigger_alert_to_user(service, request.user, data['old_status'], data['new_status'])
transaction.savepoint_rollback(sid)
return JsonResponse({"result": "ok"})
return JsonResponse({"result": "error"}, status=400)
class CoreSettingsForm(forms.Form):
pass
class GeneralSettingsForm(forms.Form):
first_name = forms.CharField(label='First name', max_length=30, required=False)
last_name = forms.CharField(label='Last name', max_length=30, required=False)
email_address = forms.CharField(label='Email Address', max_length=75,
required=False) # We use 75 and not the 254 because Django 1.6.8 only supports
# 75. See commit message for details.
enabled = forms.BooleanField(label='Enabled', required=False)
class InstanceListView(LoginRequiredMixin, ListView):
model = Instance
context_object_name = 'instances'
def get_queryset(self):
return Instance.objects.all().order_by('name').prefetch_related('status_checks')
class ServiceListView(LoginRequiredMixin, ListView):
model = Service
context_object_name = 'services'
def get_queryset(self):
return Service.objects.all().order_by('name').prefetch_related('status_checks')
class InstanceDetailView(LoginRequiredMixin, DetailView):
model = Instance
context_object_name = 'instance'
def get_context_data(self, **kwargs):
context = super(InstanceDetailView, self).get_context_data(**kwargs)
date_from = date.today() - relativedelta(day=1)
context['report_form'] = StatusCheckReportForm(initial={
'checks': self.object.status_checks.all(),
'service': self.object,
'date_from': date_from,
'date_to': date_from + relativedelta(months=1) - relativedelta(days=1)
})
return context
class ServiceDetailView(LoginRequiredMixin, DetailView):
model = Service
context_object_name = 'service'
def get_context_data(self, **kwargs):
context = super(ServiceDetailView, self).get_context_data(**kwargs)
date_from = date.today() - relativedelta(day=1)
context['report_form'] = StatusCheckReportForm(initial={
'alerts': self.object.alerts.all(),
'checks': self.object.status_checks.all(),
'service': self.object,
'date_from': date_from,
'date_to': date_from + relativedelta(months=1) - relativedelta(days=1)
})
return context
class InstanceCreateView(LoginRequiredMixin, CreateView):
model = Instance
form_class = InstanceForm
def form_valid(self, form):
ret = super(InstanceCreateView, self).form_valid(form)
if self.object.status_checks.filter(polymorphic_ctype__model='icmpstatuscheck').count() == 0:
self.generate_default_ping_check(self.object)
return ret
def generate_default_ping_check(self, obj):
pc = ICMPStatusCheck(
name="Default Ping Check for %s" % obj.name,
frequency=5,
importance=Service.ERROR_STATUS,
debounce=0,
created_by=None,
)
pc.save()
obj.status_checks.add(pc)
def get_success_url(self):
return reverse('instance', kwargs={'pk': self.object.id})
def get_initial(self):
if self.initial:
initial = self.initial
else:
initial = {}
service_id = self.request.GET.get('service')
if service_id:
try:
service = Service.objects.get(id=service_id)
initial['service_set'] = [service]
except Service.DoesNotExist:
pass
return initial
@login_required
def acknowledge_alert(request, pk):
service = Service.objects.get(pk=pk)
service.acknowledge_alert(user=request.user)
return HttpResponseRedirect(reverse('service', kwargs={'pk': pk}))
@login_required
def remove_acknowledgement(request, pk):
service = Service.objects.get(pk=pk)
service.remove_acknowledgement(user=request.user)
return HttpResponseRedirect(reverse('service', kwargs={'pk': pk}))
class ServiceCreateView(LoginRequiredMixin, CreateView):
model = Service
form_class = ServiceForm
def __init__(self, *args, **kwargs):
super(ServiceCreateView, self).__init__(*args, **kwargs)
def get_success_url(self):
return reverse('service', kwargs={'pk': self.object.id})
class InstanceUpdateView(LoginRequiredMixin, UpdateView):
model = Instance
form_class = InstanceForm
def get_success_url(self):
return reverse('instance', kwargs={'pk': self.object.id})
class ServiceUpdateView(LoginRequiredMixin, UpdateView):
model = Service
form_class = ServiceForm
def get_success_url(self):
return reverse('service', kwargs={'pk': self.object.id})
class ServiceDeleteView(LoginRequiredMixin, DeleteView):
model = Service
success_url = reverse_lazy('services')
context_object_name = 'service'
template_name = 'cabotapp/service_confirm_delete.html'
class InstanceDeleteView(LoginRequiredMixin, DeleteView):
model = Instance
success_url = reverse_lazy('instances')
context_object_name = 'instance'
template_name = 'cabotapp/instance_confirm_delete.html'
class ShiftListView(LoginRequiredMixin, ListView):
model = Shift
context_object_name = 'shifts'
def get_queryset(self):
return Shift.objects.filter(
end__gt=datetime.utcnow().replace(tzinfo=utc),
deleted=False).order_by('start')
class StatusCheckReportView(LoginRequiredMixin, TemplateView):
template_name = 'cabotapp/statuscheck_report.html'
def get_context_data(self, **kwargs):
form = StatusCheckReportForm(self.request.GET)
if form.is_valid():
return {'checks': form.get_report(), 'service': form.cleaned_data['service']}
class SetupForm(forms.Form):
username = forms.CharField(label='Username', max_length=100, required=True)
email = forms.EmailField(label='Email', max_length=200, required=False)
password = forms.CharField(label='Password', required=True, widget=forms.PasswordInput())
class SetupView(View):
template = loader.get_template('cabotapp/setup.html')
def get(self, request):
if not cabot_needs_setup():
return redirect('login')
form = SetupForm(initial={
'username': 'admin',
})
return HttpResponse(self.template.render({'form': form}, request))
def post(self, request):
if not cabot_needs_setup():
return redirect('login')
form = SetupForm(request.POST)
if form.is_valid():
get_user_model().objects.create_superuser(
username=form.cleaned_data['username'],
email=form.cleaned_data['email'],
password=form.cleaned_data['password'],
)
return redirect('login')
return HttpResponse(self.template.render({'form': form}, request), status=400)
# Misc JSON api and other stuff
def checks_run_recently(request):
"""
Checks whether or not stuff is running by looking to see if checks have run in last 10 mins
"""
ten_mins = datetime.utcnow().replace(tzinfo=utc) - timedelta(minutes=10)
most_recent = StatusCheckResult.objects.filter(time_complete__gte=ten_mins)
if most_recent.exists():
return HttpResponse('Checks running')
return HttpResponse('Checks not running')
def about(request):
""" Very simple about page """
from cabot import version
return render(request, 'cabotapp/about.html', {
'cabot_version': version,
})
def jsonify(d):
return HttpResponse(json.dumps(d), content_type='application/json')
@login_required
def graphite_api_data(request):
metric = request.GET.get('metric')
if request.GET.get('frequency'):
mins_to_check = int(request.GET.get('frequency'))
else:
mins_to_check = None
data = None
matching_metrics = None
try:
data = get_data(metric, mins_to_check)
except requests.exceptions.RequestException, e:
pass
if not data:
try:
matching_metrics = get_matching_metrics(metric)
except requests.exceptions.RequestException, e:
return jsonify({'status': 'error', 'message': str(e)})
matching_metrics = {'metrics': matching_metrics}
return jsonify({'status': 'ok', 'data': data, 'matchingMetrics': matching_metrics})
|
py | 1a3a20b86b4f9cb627f08b6816347396ea438bde | #!/usr/bin/env python
__copyright__ = 'Copyright 2013-2014, http://radical.rutgers.edu'
__license__ = 'MIT'
import os
import sys
verbose = os.environ.get('RADICAL_PILOT_VERBOSE', 'REPORT')
os.environ['RADICAL_PILOT_VERBOSE'] = verbose
import radical.pilot as rp
import radical.utils as ru
# ------------------------------------------------------------------------------
#
# READ the RADICAL-Pilot documentation: https://radicalpilot.readthedocs.io/
#
# ------------------------------------------------------------------------------
# -----------------------------------------------------------------------------
#
if __name__ == '__main__':
# we use a reporter class for nicer output
report = ru.Reporter(name='radical.pilot')
report.title('Getting Started (RP version %s)' % rp.version)
# use the resource specified as argument, fall back to localhost
if len(sys.argv) > 2: report.exit('Usage:\t%s [resource]\n\n' % sys.argv[0])
elif len(sys.argv) == 2: resource = sys.argv[1]
else : resource = 'local.localhost'
# Create a new session. No need to try/except this: if session creation
# fails, there is not much we can do anyways...
session = rp.Session()
# all other pilot code is now tried/excepted. If an exception is caught, we
# can rely on the session object to exist and be valid, and we can thus tear
# the whole RP stack down via a 'session.close()' call in the 'finally'
# clause...
try:
# read the config used for resource details
report.info('read config')
config = ru.read_json('%s/config.json' % os.path.dirname(os.path.abspath(__file__)))
report.ok('>>ok\n')
report.header('submit pilots')
# Add a Pilot Manager. Pilot managers manage one or more ComputePilots.
pmgr = rp.PilotManager(session=session)
# Define an [n]-core local pilot that runs for [x] minutes
# Here we use a dict to initialize the description object
pd_init = {
'resource' : resource,
'runtime' : 15, # pilot runtime (min)
'exit_on_error' : True,
'project' : config[resource].get('project', None),
'queue' : config[resource].get('queue', None),
'access_schema' : config[resource].get('schema', None),
'cores' : config[resource].get('cores', 1),
'gpus' : config[resource].get('gpus', 0),
}
pdesc = rp.ComputePilotDescription(pd_init)
# Launch the pilot.
pilot = pmgr.submit_pilots(pdesc)
report.header('submit units')
# Register the ComputePilot in a UnitManager object.
umgr = rp.UnitManager(session=session)
umgr.add_pilots(pilot)
# Create a workload of ComputeUnits.
# Each compute unit runs a specific `echo` command
n = 128 # number of units to run
report.info('create %d unit description(s)\n\t' % n)
cuds = list()
for i in range(0, n):
# create a new CU description, and fill it.
# Here we don't use dict initialization.
cud = rp.ComputeUnitDescription()
cud.environment = {'TEST' : 'jabberwocky'}
cud.executable = '/bin/echo'
cud.arguments = ['$RP_UNIT_ID greets $TEST']
cuds.append(cud)
report.progress()
report.ok('>>ok\n')
# Submit the previously created ComputeUnit descriptions to the
# PilotManager. This will trigger the selected scheduler to start
# assigning ComputeUnits to the ComputePilots.
units = umgr.submit_units(cuds)
# Wait for all compute units to reach a final state (DONE, CANCELED or FAILED).
report.header('gather results')
umgr.wait_units()
report.info('\n')
for unit in units:
report.plain(' * %s: %s, exit: %3s, out: %s\n'
% (unit.uid, unit.state[:4],
unit.exit_code, unit.stdout.strip()[:35]))
except Exception as e:
# Something unexpected happened in the pilot code above
report.error('caught Exception: %s\n' % e)
raise
except (KeyboardInterrupt, SystemExit):
# the callback called sys.exit(), and we can here catch the
# corresponding KeyboardInterrupt exception for shutdown. We also catch
# SystemExit (which gets raised if the main threads exits for some other
# reason).
report.warn('exit requested\n')
finally:
# always clean up the session, no matter if we caught an exception or
# not. This will kill all remaining pilots.
report.header('finalize')
session.close()
report.header()
# ------------------------------------------------------------------------------
|
py | 1a3a241b29e3df2f7dc08f8dc07a3b4c74bf0ed4 | """
File: 1514.py
Title: Path with Maximum Probability
Difficulty: Medium
URL: https://leetcode.com/problems/path-with-maximum-probability/
"""
import heapq
import unittest
from collections import defaultdict, deque
from typing import List
class Solution:
def maxProbability(self,
n: int,
edges: List[List[int]],
probs: List[float],
start: int,
end: int) -> float:
adjacents = defaultdict(dict)
for edge, prob in zip(edges, probs):
a, b = edge
adjacents[a][b] = prob
adjacents[b][a] = prob
heap = [(-1, start)]
visited = [False] * n
while heap:
neg_prob, here = heapq.heappop(heap)
if visited[here]:
continue
if here == end:
return -neg_prob
visited[here] = True
for there in adjacents[here]:
if not visited[there]:
there_prob = neg_prob * adjacents[here][there]
heapq.heappush(heap, (there_prob, there))
return 0.0
class SolutionTestCase(unittest.TestCase):
def test_example1(self):
# Input
n = 3
edges = [[0, 1], [1, 2], [0, 2]]
probs = [0.5, 0.5, 0.2]
start = 0
end = 2
# Output
output = 0.25000
solution = Solution()
self.assertEqual(solution.maxProbability(n, edges, probs, start, end),
output)
def test_example2(self):
# Input
n = 3
edges = [[0, 1], [1, 2], [0, 2]]
probs = [0.5, 0.5, 0.3]
start = 0
end = 2
# Output
output = 0.30000
solution = Solution()
self.assertEqual(solution.maxProbability(n, edges, probs, start, end),
output)
def test_example3(self):
# Input
n = 3
edges = [[0, 1]]
probs = [0.5]
start = 0
end = 2
# Output
output = 0.00000
solution = Solution()
self.assertEqual(solution.maxProbability(n, edges, probs, start, end),
output)
if __name__ == "__main__":
unittest.main()
|
py | 1a3a250b7bdf379dbcdc0c09ffa219d7d8aa2278 | """arikefoods URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('accounts/', include('allauth.urls')),
path('', include('home.urls')),
path('food_menu/', include('menu.urls')),
path('', include('order.urls')),
path('checkout/', include('checkout.urls')),
path('', include('recipe_blog.urls')),
path('feedback/', include('feedback.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
py | 1a3a2676f22bd8ef1c8f63624fc09674876c9e9b | # Generated by Django 1.11.15 on 2018-08-17 18:14
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('credentials', '0003_auto_20170525_1109'),
]
operations = [
migrations.CreateModel(
name='NotifyCredentialsConfig',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('change_date', models.DateTimeField(auto_now_add=True, verbose_name='Change date')),
('enabled', models.BooleanField(default=False, verbose_name='Enabled')),
('arguments', models.TextField(blank=True, default='', help_text='Useful for manually running a Jenkins job. Specify like "--start-date=2018 --courses A B".')),
('changed_by', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL, verbose_name='Changed by')),
],
options={
'verbose_name': 'notify_credentials argument',
},
),
]
|
py | 1a3a26f6dc04bd10f1fa64936f3005016c5fc005 | from direct.interval.IntervalGlobal import *
from otp.nametag.NametagConstants import *
from panda3d.core import *
from random import choice
FlippyWheelbarrowPies = [
[
1.16, 11.24, 7.0, 246.8, 351.25, 0.0, 1.6, 1.4, 1.8],
[
2.27, 8.02, 6.35, 104.04, 311.99, 9.46, 1.35, 1.35, 1],
[
-1.23, 7.33, 6.88, 276.34, 28.61, 350.54, 1.41, 1.41, 1.6],
[
0.27, 8.24, 6.42, 198.15, 351.87, 355.24, 1.93, 2, 2],
[
0.06, 5.23, 6.78, 63.43, 355.91, 15.26, 1.3, 1.6, 1.8],
[
-0.81, 11.37, 6.82, 326.31, 5.19, 19.98, 1.76, 1.86, 1.5],
[
1.35, 10.09, 5.92, 35.54, 353.66, 343.3, 1.5, 1.9, 1.8],
[
1.9, 5.59, 6.5, 75.96, 326.31, 8, 1.76, 1.56, 1.5],
[
-1.74, 5.42, 6.28, 327.53, 318.81, 4.76, 1.8, 2, 2],
[
-1.55, 9.22, 5.72, 266.53, 341.57, 0.0, 2.09, 1.68, 1.81]]
IntroMusic = 'phase_4/audio/bgm/EE_Intro.ogg'
AnnouncementMusic = 'phase_4/audio/bgm/EE_Announcement.ogg'
VictoryMusic = 'phase_4/audio/bgm/EE_Celebration.ogg'
SadMusic = 'phase_4/audio/bgm/EE_DiesandPies.ogg'
CreditsMusic = 'phase_4/audio/bgm/EE_Theme.ogg'
SurleeTips = [
'Always watch all sides of you, the Cogs are sneaky and love to backstab.',
"Make sure to not only pie the cogs, but your fellow toons as well! There's lots of Laff to go around.",
"Mover and Shakers give tremors as they walk -- You'll need to hit them from a distance.",
'Come on, get more pies! Fight for the town!',
'The bigger a Cog is, the faster they walk and the more they talk.',
"Don't let them take away our fun! Stop them!",
"The Cog's business is too boring to bear. Don't let them talk to you.",
"That's what I'm talking about. Keep at it!",
"Flippy, we need more pies over here. They're flying out quick.",
"Doctor Dimm, have you had any luck on Slappy's stand?",
'Keep a close eye on your pie count, it can run out fast.']
BalloonBasePosition = [
-15, 33, 1.1]
BalloonElectionPosition = [166.5, 64.0, 53.0]
BalloonScale = 2.5
FlippyGibPies = [
"Let 'em fly!",
"Wow, I've never seen someone carry so many pies.",
'Come back any time.',
'Ready for WAR?',
'Let the pies fly!',
'Clobber the competition! Try not to hit him too hard, though.',
'Are you really going to eat that many pies, __NAME__?',
'Oof, I better start baking more pies!']
FlippyGibPiesChoice = choice(FlippyGibPies)
FlippyDelayResponse = 1.0
FlippyPhraseIds = [
[
100, 101, 102, 103, 104, 105],
[
107, 108],
[
200, 201, 202, 206, 207],
[
203, 204, 205],
[
208, 209],
[
301],
[
500, 21002],
[
505, 506, 507, 5602],
[
508, 511, 1001, 1003, 1005, 1006, 1126, 1127, 5603,
1106, 1107, 1108, 1109, 1110, 1124, 1125, 1126,
1127, 1128],
[
509],
[
510],
[
600, 601, 602, 603],
[
700, 701, 702, 704, 705, 706, 707],
[
703],
[
800, 801, 802, 803, 804],
[
807],
[
808],
[
901],
[
900, 902, 903, 904, 905],
[
1200],
[
1500],
[
1501],
[
1508],
[
1415],
[
1520, 1527],
[
1526],
[
1112, 1114, 1554, 1555, 1556, 1557, 1559, 812],
[
1558],
[
5400],
[
5401, 5407, 5408, 5409],
[
5402],
[
5404],
[
5405],
[
5500],
[
5501],
[
5502],
[
5503],
[
5504],
[
5505],
[
5506],
[
5507],
[
5600, 5601],
[
10100],
[
10101, 10102],
[
10103, 10104],
[
10105],
[
5700],
[
1130, 1131, 1132, 1133, 1136],
[
5406],
[
5411],
[
1600],
[
1605],
[
1104, 1105]]
FlippyPhrases = [
'Hey there, __NAME__! How are you doing?',
"I'm here, present and accounted for!",
'Alrighty, catcha later!',
'Thanks! To you as well.',
'Leaving so soon?',
"Ha, that's a funny phrase. Owooo!",
'No problem.',
'You betcha!',
'I would if I could, but I should stay here in case new toons come along.',
'Thanks for the offer, but I think I have things under control.',
"Sorry, I'm not allowed to make friends over the election period. :(",
"Right back 'atcha!",
'Aw, shucks. I like yours too!',
'Not sure if I should consider that a compliement or...',
"No problem. It's all good.",
'Huh. I forget.',
'Good, because I can only respond to SpeedChat. Haha!',
"It's probably the leftover ingredients from all of those pies. Pee-yew!",
"I'm sorry. Did I do something wrong? :(",
"I haven't gotten any in a while. I guess you could say that the election is my ToonTask!",
'All the cream pies we need!',
'Oh? No problem, just grab some from the wheelbarrow.',
'Totally! Throw is my favorite kind of gag.',
"Uh oh, that's no good. You should find an ice cream cone around here.",
"I'm wide open, pass it here!",
'Sorry, only pies here.',
'...like, the gear? What have gears ever done to you? :(',
'Hmm, good idea. Pies are going so fast that we might have to switch to cupcakes by the time of the election.',
"Toontown... Offline? I've heard Toons say that a few times, but I can never figure out what they mean.",
'Hmm, well I did spot a butterfly over there.',
'Oof, plenty of times at first. Karts are tricky to get used to.',
"I do, actually! I don't use it often.",
"Hiya, viewers! Don't forget to Flip for Flippy!",
':D',
':C',
':)',
':(',
':P',
':O',
'>:C',
'>:)',
"I'm doing pretty great! And you?",
"I'm not allowed to vote, silly!",
"That's the spirit!",
'Slappy is pretty fun, too. Great balloon. Though... See that plane stuck up there...?',
'Me too. Alec did a great job, and I hear there are more coming.',
'Ooooh, I just love that word! Good to see it catching on.',
"Don't worry, I have time.",
'At least you have me to keep you company.',
"I probably should. There's way too many butterflies here!",
'Please, take as many as you want!',
'I think Slappy has some over at his stand.',
"We're already in Toontown Central!"]
SlappySpeech1 = [
'Hiya! Up for a ride?',
'Off we goooo!',
"In case you didn't get it back there, that was a pun.",
'"Up" for a ride. Get it?',
'Haha! I quack myself up.',
'That was another pun!',
"Do you know any good puns? I'm full of them.",
"That wasn't a pun, though. I should have had one there. It was fitting.",
"Oh man, we're almost back already?",
'Well, at least we had a WHALE of a time!',
"Err- no. Wrong pun. That one didn't make sense.",
"I'll CATCHA later! Get it, because of the whale pun? It makes sense now. I planned that."]
SlappySpeech2 = [
'Hello! Want a ride, I assume?',
"Good! It would be kind of weird if you didn't.",
"I take it you're a balloon fanatic like myself, eh?",
"No? Oh. I don't see how you can't be.",
"Just look at this thing. It's a 500 pound bag floating in the sky!",
"If that isn't amazing, I don't know what is.",
"Small balloons, too. You know, I've always wanted to be a balloon salesman.",
"I'd get my own little cart and everything!",
'They soar through the skies, going beyond what we know.',
"Maybe even into another world. Who knows what they'd see on the outskirts of Toontown?",
"I've always wondered what kind of mysteries lie out there. The balloons know.",
"D'awh, here already. I was just about to get into the history of balloons. Come back any time!"]
SlappySpeech3 = [
'Hey there! Yep, just hop on in!',
"You know, some may consider it rude to jump into someone else's balloon without permission.",
"In fact, I'm going to have to ask you to step out now.",
"Yeah, just right off the side there. It's not too high up yet.",
"I'm joking! I'm joking. Don't jump out, the ride is free.",
'Can you see your house from up here?',
"I can't. This cardboard hill is in the way.",
"I've always wondered why they put those up. Why not enjoy the scenery?",
'Not to mention the Jellybeans they could have saved by not buying paint.',
'It seems counterproductive to me. Those are definitely getting torn down.',
'That is, if I get elected. Hey, are you voting for me?',
"Nonono, don't tell me. I want to be surprised. Remember this free balloon ride at the polls, though!"]
SlappySpeech5 = [
'Oooh, look who it is!',
'I was wondering when you would come by for a ride.',
'How are things going? Having fun with this election excitement?',
"I know I certainly am. I've been on hundreds of these balloon rides, and they never get old.",
'You get used to the air sickness after a while.',
'Woah, look over there! You can see some of the grey!',
'The grey is just one of those many things in Toontown that bewilders me.',
'An undrawn area, just waiting for color. Can you imagine the creativity?',
"It's an unexplored blank canvas of imagination.",
"You know what? You and I -- after this election, we're going to go out there.",
'You and I will figure out the secrets of the grey, unleash the creativity it holds. I promise you on that.',
"We'll find out what it is, for not only Toontown but for the whole Tooniverse. Make sure you hold me to it! "]
SlappySpeechChoices = [
SlappySpeech1, SlappySpeech2, SlappySpeech3, SlappySpeech5]
SlappySpeeches = choice(SlappySpeechChoices)
NumBalloonPaths = 1
def generateFlightPaths(balloon):
flightPaths = []
flightPaths.append(Sequence(Wait(0.5), balloon.balloon.posHprInterval(1.5, Point3(-19, 35, 3), (0,
2,
2)), balloon.balloon.posHprInterval(1.5, Point3(-23, 38, 5), (0,
-2,
-2)), balloon.balloon.posHprInterval(8.0, Point3(-53, 75, 24), (0,
0,
0)), balloon.balloon.posHprInterval(0.5, Point3(-54, 76, 25), (5,
2,
2)), balloon.balloon.posHprInterval(11.0, Point3(-105, 33, 54), (180,
-2,
-2)), balloon.balloon.posHprInterval(0.5, Point3(-106, 34, 55), (175,
-4,
0)), balloon.balloon.posHprInterval(10.0, Point3(-100, -60, 54), (0,
2,
-2)), balloon.balloon.posHprInterval(0.5, Point3(-97.5, -59.5, 54), (-2,
-2,
2)), balloon.balloon.posHprInterval(18.0, Point3(60, -10, 54), (-70,
0,
0)), balloon.balloon.posHprInterval(0.5, Point3(62, -11, 54), (-65,
-2,
2)), balloon.balloon.posHprInterval(15.0, Point3(-15, 33, 1.1), (0,
0,
0))))
return flightPaths
def generateToonFlightPaths(balloon):
toonFlightPaths = []
toonFlightPaths.append(Sequence(Wait(0.5), base.localAvatar.posInterval(1.5, Point3(-19, 35, 3)), base.localAvatar.posInterval(1.5, Point3(-23, 38, 5)), base.localAvatar.posInterval(8.0, Point3(-53, 75, 24)), base.localAvatar.posInterval(0.5, Point3(-54, 76, 25)), base.localAvatar.posInterval(11.0, Point3(-105, 33, 54)), base.localAvatar.posInterval(0.5, Point3(-106, 34, 55)), base.localAvatar.posInterval(10.0, Point3(-100, -60, 54)), base.localAvatar.posInterval(0.5, Point3(-99, -59, 53)), base.localAvatar.posInterval(18.0, Point3(60, -10, 54)), base.localAvatar.posInterval(0.5, Point3(62, -11, 54)), base.localAvatar.posInterval(15.0, Point3(-15, 33, 1.1))))
return toonFlightPaths
def generateSpeechSequence(balloon):
speechSequence = Sequence(Func(balloon.slappy.setChatAbsolute, SlappySpeeches[0], CFSpeech | CFTimeout), Wait(4), Func(balloon.slappy.setChatAbsolute, SlappySpeeches[1], CFSpeech | CFTimeout), Wait(6), Func(balloon.slappy.setChatAbsolute, SlappySpeeches[2], CFSpeech | CFTimeout), Wait(4), Func(balloon.slappy.setChatAbsolute, SlappySpeeches[3], CFSpeech | CFTimeout), Wait(6), Func(balloon.slappy.setChatAbsolute, SlappySpeeches[4], CFSpeech | CFTimeout), Wait(10), Func(balloon.slappy.setChatAbsolute, SlappySpeeches[5], CFSpeech | CFTimeout), Wait(6), Func(balloon.slappy.setChatAbsolute, SlappySpeeches[6], CFSpeech | CFTimeout), Wait(10), Func(balloon.slappy.setChatAbsolute, SlappySpeeches[7], CFSpeech | CFTimeout), Wait(6), Func(balloon.slappy.setChatAbsolute, SlappySpeeches[8], CFSpeech | CFTimeout), Wait(7), Func(balloon.slappy.setChatAbsolute, SlappySpeeches[9], CFSpeech | CFTimeout), Wait(5), Func(balloon.slappy.setChatAbsolute, SlappySpeeches[10], CFSpeech | CFTimeout), Wait(6), Func(balloon.slappy.setChatAbsolute, SlappySpeeches[11], CFSpeech | CFTimeout))
return speechSequence |
py | 1a3a2783767e4a7e9fe2614e0bc48b04cf8a46c7 | # Generated by Django 2.2.13 on 2020-07-21 19:17
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('pipeline', '0034_auto_20200721_1753'),
]
operations = [
migrations.CreateModel(
name='PostSecondaryInstitution',
fields=[
('location_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='pipeline.Location')),
('institution_type', models.CharField(blank=True, max_length=255, null=True)),
('economic_development_region', models.CharField(blank=True, max_length=255, null=True)),
],
bases=('pipeline.location',),
),
]
|
py | 1a3a2813fd94ced5d033bb187275a7905b675ca2 | #coding=utf8
import traceback
from extensions.database import db
from extensions.hueyext import hueyapp
from extensions.celeryext import celeryapp
from models.asyncmodel import Async
from models.warehouse import Warehouse, Area, Workarea, Location
from models.inv import Good, Category, Inv
from models.auth import Partner, Seq
from models.stockout import Stockout, StockoutLine
from blueprints.stockout.action import StockoutAction
from utils.upload import get_file_content
from utils.functions import clear_empty
from utils.base import DictNone
#
#@hueyapp.task()
@celeryapp.task
def import_stockout(company_code, warehouse_code, owner_code, args, task_id, user_code=None, user_name=None):
ret = import_stockout_sync(company_code, warehouse_code, owner_code, args, task_id, user_code=user_code, user_name=user_name)
db.session.close()
return ret
def import_stockout_sync(company_code, warehouse_code, owner_code, args, task_id, user_code=None, user_name=None):
task = Async.query.get(task_id)
print('handle async task_id ==> ', task_id, task.async_id)
task.get_file()
content = get_file_content(task.link)
success = True
exc_info = ''
try:
order_dict = DictNone()
task.code = 'stockout'
for row in content:
d = DictNone(clear_empty(row))
if not d.erp_order_code:
continue
# 创建订单
if d.erp_order_code not in order_dict:
if Stockout.query.filter_by(company_code=company_code, warehouse_code=warehouse_code, owner_code=owner_code, \
erp_order_code=d.erp_order_code).count() > 0:
continue
order = Stockout(company_code=company_code, warehouse_code=warehouse_code, owner_code=owner_code,
source='import', user_code=user_code, user_name=user_name)
order_dict[d.erp_order_code] = order
order.erp_order_code = d.erp_order_code
order.order_code = Seq.make_order_code('C', company_code, warehouse_code, owner_code)
order.xtype = d.xtype or 'B2B'
order.order_type = d.order_type
order.date_planned = d.date_planned
order.source = 'custom'
order.remark = d.remark or ''
order.partner_code = d.partner_code or ''
order.partner_name = d.partner_name or ''
order.sender_info = {'name': d.sender, 'tel': d.sender_tel, 'address': d.sender_address}
order.receiver_info = {'name': d.receiver, 'tel': d.receiver_tel, 'address': d.receiver_address}
order.supplier_info = {'supplier_code': d.supplier_code}
order.express_info = {'express_code': d.express_code}
order.invoice_info = {'invoice': d.invoice}
# order.JSON = {'custom1': d.custom1, 'custom2': d.custom2, 'custom3': d.custom3, 'custom4': d.custom4}
db.session.add(order)
else:
order = order_dict[d.erp_order_code]
if not d.sku or not d.qty:
continue
line = StockoutLine(company_code=company_code, warehouse_code=warehouse_code, owner_code=owner_code)
line.erp_order_code = order.erp_order_code
line.order_code = order.order_code
line.sku = d.sku
line.barcode = d.barcode or d.sku
line.name = d.name or d.sku
line.qty = int(d.qty)
line.remark = d.remark or ''
line.supplier_code = d.supplier_code or ''
# line.supplier_code = d.supplier_code or ''
# line.quality_type = d.quality_type or 'ZP'
# line.product_date = d.product_date or None
# line.expire_date = d.expire_date or None
# line.batch_code = d.batch_code or ''
# line.virtual_warehouse = d.virtual_warehouse or ''
# line.spec = d.spec or ''
line.style = d.style or ''
line.color = d.color or ''
line.size = d.size or ''
line.unit = d.unit or ''
# line.JSON = {'custom1': d.custom1, 'custom2': d.custom2, 'custom3': d.custom3, 'custom4': d.custom4}
db.session.add(line)
line.stockout = order
db.session.flush()
exc_info = 'save stockout: %s'%len(content)
except:
exc_info = traceback.format_exc()
success = False
if success:
db.session.commit()
task.state = 'done'
task.exc_info = 'SUCCESS'
else:
db.session.rollback()
task.state = 'fail'
task.exc_info = exc_info[-1500:]
print(exc_info)
db.session.commit()
|
py | 1a3a2942e6544224d4762106c960f4da206b31ed | # -*- coding:utf-8 -*-
from __future__ import absolute_import
"""
词向量测试 20K
词向量:
- 规模: 19527 x 300D
- 来源: [Chinese-Word-Vectors: sgns.sikuquanshu.word.bz2](https://github.com/Embedding/Chinese-Word-Vectors)
测试结果:
- faiss: load index, 0.82s; search 100 times by word, 1.08s; search 100 times by vec, 1.06s
- gensim: load index, 5.80s; search 100 times by word, 1.64s; search 100 times by vec, 1.62s
"""
import bz2
import logging
import pickle
import time
import gensim
import numpy as np
import os
from pyxtools import global_init_logger
from pyxtools.faiss_tools import faiss
class BasicBenchmark(object):
""" Basic Class """
def __init__(self, similar_top_n: int = 20):
""" init """
self.logger = logging.getLogger(self.__class__.__name__)
self.similar_top_n = similar_top_n
self.dimension = None
self.result_dict = {}
self.word_vec_model_file = "vec.model"
self._word_vec_dict = {}
def prepare(self):
""" 准备工作 """
self._global_prepare()
def _global_prepare(self):
""" """
if not os.path.exists(self.word_vec_model_file):
with open(self.word_vec_model_file, "wb") as fw:
with bz2.BZ2File('./sgns.sikuquanshu.word.bz2', 'rb') as fr:
fw.write(fr.read())
@staticmethod
def get_word_list() -> [str]:
""" 测试词 """
return ["计", "算", "机", "词", "向", "量", "囧"]
def run(self):
# prepare
self.prepare()
# init
time_start = time.time()
self.init()
self.logger.info("Init: cost {} s!".format(time.time() - time_start))
# search similar words
time_start = time.time()
for i in range(100):
self.search()
for word in self.get_word_list():
result_list = self.result_dict[word]
self.logger.info("{}>>\n{}".format(
word, "\n".join([result for result in result_list])
))
self.logger.info("Search 100 times by word: cost {} s!".format(time.time() - time_start))
# search similar words by vec
self.result_dict.clear()
time_start = time.time()
for i in range(100):
self.vec_search()
for word in self.get_word_list():
result_list = self.result_dict[word]
self.logger.info("{}>>\n{}".format(
word, "\n".join([result for result in result_list])
))
self.logger.info("Search 100 times by vec: cost {} s!".format(time.time() - time_start))
def init(self):
raise NotImplementedError
def search(self):
raise NotImplementedError
def vec_search(self, ):
raise NotImplementedError
def save_result_dict(self, word: str, result: str):
if word not in self.result_dict:
self.result_dict[word] = [result]
else:
result_list = self.result_dict[word]
if result not in result_list:
self.result_dict[word].append(result)
def load_pre_trained_model(self, ):
""" 返回预训练好的模型 """
gensim_model = gensim.models.KeyedVectors.load_word2vec_format(self.word_vec_model_file, binary=False)
self.dimension = gensim_model.vector_size
return gensim_model
class GensimBenchmark(BasicBenchmark):
""" Gensim """
def __init__(self):
super(GensimBenchmark, self).__init__()
self._model = None
def init(self):
self._model = self.load_pre_trained_model()
for word in self.get_word_list():
self._word_vec_dict[word] = self._model.get_vector(word)
def search(self):
for word in self.get_word_list():
result = ", ".join([item[0] for item in self._model.similar_by_word(word, topn=self.similar_top_n)])
self.save_result_dict(word, result)
def vec_search(self):
""" 直接使用词向量搜索 """
for word in self.get_word_list():
word_vec = self._word_vec_dict[word]
result = ", ".join(
[item[0] for item in self._model.similar_by_word(word_vec, topn=self.similar_top_n + 1)[1:]]
)
self.save_result_dict(word, result)
class FaissBenchmark(BasicBenchmark):
""" Faiss """
def __init__(self):
super(FaissBenchmark, self).__init__()
self._model = None
self._word_detail_info = None
self.faiss_index_file = "./faiss.index"
self.faiss_index_detail_pkl = "./faiss.pkl"
def prepare(self):
""" 将Gensim 版本的模型转化为Faiss模型 """
super(FaissBenchmark, self).prepare()
# turn model from gensim to faiss index
if os.path.exists(self.faiss_index_file) and os.path.exists(self.faiss_index_detail_pkl):
return
# load model to dict
self.logger.info("loading model...")
time_start = time.time()
gensim_model = self.load_pre_trained_model()
model_size = len(gensim_model.vocab)
self.dimension = gensim_model.vector_size
feature = np.zeros(shape=(model_size, self.dimension), dtype=np.float32)
word_list = [word for word in gensim_model.vocab]
for i, word in enumerate(word_list):
feature[i] = gensim_model.get_vector(word) # not normed
self.logger.info("success to load index! Cost {} seconds!".format(time.time() - time_start))
# train faiss index
index_factory = "Flat"
normed_feature = feature / np.linalg.norm(feature, axis=1, keepdims=True)
faiss_index = faiss.index_factory(self.dimension, index_factory)
self.logger.info("training index...")
time_start = time.time()
faiss_index.train(normed_feature) # nb * d
faiss_index.add(normed_feature)
self.logger.info("success to train index! Cost {} seconds!".format(time.time() - time_start))
# save in file
faiss.write_index(faiss_index, self.faiss_index_file)
with open(self.faiss_index_detail_pkl, "wb") as f:
pickle.dump((word_list, feature), f)
def init(self):
""" load model """
self._model = faiss.read_index(self.faiss_index_file)
with open(self.faiss_index_detail_pkl, "rb") as f:
self._word_detail_info = pickle.load(f)
self.dimension = self._word_detail_info[1].shape[-1]
for word in self.get_word_list():
self._word_vec_dict[word] = self._word_detail_info[1][self._word_detail_info[0].index(word)]
def _search_by_vec(self, feature_list, ):
""" 向量搜索 """
normed_feature_list = feature_list / np.linalg.norm(feature_list, axis=1, keepdims=True)
length = normed_feature_list.shape[0]
distance_list, indices = self._model.search(normed_feature_list, self.similar_top_n + 1)
distance_list = distance_list.reshape((length, self.similar_top_n + 1))
indices = indices.reshape((length, self.similar_top_n + 1))
return distance_list, indices
def search(self):
""" search similar words """
# 获取查询词向量
word_list = self.get_word_list()
word_feature_list = np.zeros(shape=(len(word_list), self.dimension), dtype=np.float32)
for i, word in enumerate(word_list):
word_feature_list[i] = self._word_detail_info[1][self._word_detail_info[0].index(word)]
# search
_, indices_arr = self._search_by_vec(word_feature_list)
# show result
for i, word in enumerate(word_list):
result = ", ".join([self._word_detail_info[0][word_index] for word_index in indices_arr[i][1:]])
self.save_result_dict(word, result)
def vec_search(self):
""" 直接使用词向量搜索 """
# 获取查询词向量
word_list = self.get_word_list()
word_feature_list = np.zeros(shape=(len(word_list), self.dimension), dtype=np.float32)
for i, word in enumerate(word_list):
word_feature_list[i] = self._word_vec_dict[word]
# search
_, indices_arr = self._search_by_vec(word_feature_list)
# show result
for i, word in enumerate(word_list):
result = ", ".join([self._word_detail_info[0][word_index] for word_index in indices_arr[i][1:]])
self.save_result_dict(word, result)
if __name__ == '__main__':
# global logger
global_init_logger()
# benchmark
for method_cls in [FaissBenchmark, GensimBenchmark, ]:
method_cls().run()
|
py | 1a3a2a2b0a093f228ef66263035599da4da29189 | # Imports
from datetime import timedelta
from typing import List, Tuple
import hypothesis.strategies as st
import numpy as np
import numpy.testing as npt
import pandas as pd
import pyarrow as pa
import pytest
from hypothesis import given, settings
from fletcher._algorithms import (
_extract_data_buffer_as_np_array,
_merge_valid_bitmaps,
max_op,
min_op,
np_ufunc_op,
prod_op,
sum_op,
)
from fletcher.algorithms.utils.chunking import (
_calculate_chunk_offsets,
_combined_in_chunk_offsets,
_in_chunk_offsets,
)
def _is_na(a):
return (a is pa.NA) or (a is None) or (np.isnan(a))
def assert_allclose_na(a, b):
"""assert_allclose with a broader NA/nan/None definition."""
if _is_na(a) and _is_na(b):
pass
else:
npt.assert_allclose(a, b)
@pytest.mark.parametrize(
"op, pandas_op", [(sum_op, pd.Series.sum), (prod_op, pd.Series.prod)]
)
@settings(deadline=timedelta(milliseconds=1000))
@given(
data=st.lists(st.one_of(st.floats(max_value=10.0, min_value=-10), st.none())),
skipna=st.booleans(),
)
def test_reduce_op(data, skipna, op, pandas_op):
arrow = pa.array(data, type=pa.float64(), from_pandas=True)
pandas = pd.Series(data, dtype=float)
assert_allclose_na(op(arrow, skipna), pandas_op(pandas, skipna=skipna))
# Split in the middle and check whether this still works
if len(data) > 2:
arrow = pa.chunked_array(
[
pa.array(data[: len(data) // 2], type=pa.float64(), from_pandas=True),
pa.array(data[len(data) // 2 :], type=pa.float64(), from_pandas=True),
]
)
assert_allclose_na(op(arrow, skipna), pandas_op(pandas, skipna=skipna))
@pytest.mark.parametrize(
"op, pandas_op", [(min_op, pd.Series.min), (max_op, pd.Series.max)]
)
@settings(deadline=timedelta(milliseconds=1000))
@given(
data=st.lists(st.one_of(st.floats(max_value=10.0), st.none())), skipna=st.booleans()
)
def test_reduce_op_no_identity(data, skipna, op, pandas_op):
arrow = pa.array(data, type=pa.float64(), from_pandas=True)
pandas = pd.Series(data, dtype=float)
should_raise = arrow.null_count == len(arrow) and (skipna or len(arrow) == 0)
if should_raise:
with pytest.raises(ValueError):
assert_allclose_na(op(arrow, skipna), pandas_op(pandas, skipna=skipna))
else:
assert_allclose_na(op(arrow, skipna), pandas_op(pandas, skipna=skipna))
# Split in the middle and check whether this still works
if len(data) > 2:
arrow = pa.chunked_array(
[
pa.array(data[: len(data) // 2], type=pa.float64(), from_pandas=True),
pa.array(data[len(data) // 2 :], type=pa.float64(), from_pandas=True),
]
)
if should_raise:
with pytest.raises(ValueError):
assert_allclose_na(op(arrow, skipna), pandas_op(pandas, skipna=skipna))
else:
assert_allclose_na(op(arrow, skipna), pandas_op(pandas, skipna=skipna))
def test_calculate_chunk_offsets():
arr = pa.chunked_array([[1, 1, 1]])
npt.assert_array_equal(_calculate_chunk_offsets(arr), np.array([0]))
arr = pa.chunked_array([[1], [1, 1]])
npt.assert_array_equal(_calculate_chunk_offsets(arr), np.array([0, 1]))
arr = pa.chunked_array([[1, 1], [1]])
npt.assert_array_equal(_calculate_chunk_offsets(arr), np.array([0, 2]))
def check_valid_in_offsets(
arr: pa.ChunkedArray, in_offsets: List[Tuple[int, int, int]]
) -> None:
if arr.num_chunks == 0:
assert in_offsets == []
return
# We always start at the beginning
assert in_offsets[0][0] == 0
assert in_offsets[0][1] == 0
# Overall, the chunk offsets must have the same length as the array
assert sum(x[2] for x in in_offsets) == len(arr)
@given(data=st.lists(st.lists(st.integers(min_value=0, max_value=10))))
def test_in_chunk_offsets(data: List[List[int]]):
arr = pa.chunked_array(data, type=pa.int64())
# Simple case: Passing in the actual chunk offsets should yield a valid selection
offsets = list(_calculate_chunk_offsets(arr))
in_offsets = _in_chunk_offsets(arr, offsets)
check_valid_in_offsets(arr, in_offsets)
def test_combined_in_chunk_offsets():
a = pa.chunked_array([[]])
b = pa.chunked_array([[]])
in_a_offsets, in_b_offsets = _combined_in_chunk_offsets(a, b)
assert in_a_offsets == [(0, 0, 0)]
assert in_b_offsets == [(0, 0, 0)]
a = pa.chunked_array([[1]])
b = pa.chunked_array([[2]])
in_a_offsets, in_b_offsets = _combined_in_chunk_offsets(a, b)
assert in_a_offsets == [(0, 0, 1)]
assert in_b_offsets == [(0, 0, 1)]
a = pa.chunked_array([[1, 2], [3, 4, 5]])
b = pa.chunked_array([[1], [2, 3], [4, 5]])
in_a_offsets, in_b_offsets = _combined_in_chunk_offsets(a, b)
assert in_a_offsets == [(0, 0, 1), (0, 1, 1), (1, 0, 1), (1, 1, 2)]
assert in_b_offsets == [(0, 0, 1), (1, 0, 1), (1, 1, 1), (2, 0, 2)]
@pytest.mark.parametrize("data", [[1, 2, 4, 5], [1.0, 0.5, 4.0, 5.0]])
def test_extract_data_buffer_as_np_array(data):
arr = pa.array(data)
result = _extract_data_buffer_as_np_array(arr)
expected = np.array(data)
npt.assert_array_equal(result, expected)
result = _extract_data_buffer_as_np_array(arr[2:4])
expected = np.array(data[2:4])
npt.assert_array_equal(result, expected)
def assert_content_equals_array(result, expected):
"""Assert that the result is an Arrow structure and the content matches an array."""
assert isinstance(result, (pa.Array, pa.ChunkedArray))
if isinstance(result, pa.ChunkedArray):
result = pa.concat_arrays(result.iterchunks())
assert result.equals(expected)
def check_np_ufunc(a, b, expected):
result = np_ufunc_op(a, b, np.ndarray.__add__)
assert_content_equals_array(result, expected)
result = np_ufunc_op(b, a, np.ndarray.__add__)
assert_content_equals_array(result, expected)
def test_np_ufunc_op_chunked_chunked():
a = pa.chunked_array([[1, 2], [3, None, None]])
b = pa.chunked_array([[1], [2, 3], [4, None]])
expected = pa.array([2, 4, 6, None, None])
check_np_ufunc(a, b, expected)
def test_np_ufunc_op_chunked_flat():
a = pa.chunked_array([[1, 2], [3, None, None]])
b = pa.array([1, 2, 3, 4, None])
expected = pa.array([2, 4, 6, None, None])
check_np_ufunc(a, b, expected)
def test_np_ufunc_op_chunked_np_array():
a = pa.chunked_array([[1, 2], [3, None]])
b = np.array([1, 2, 3, 4])
expected = pa.array([2, 4, 6, None])
check_np_ufunc(a, b, expected)
def test_np_ufunc_op_chunked_scalar():
a = pa.chunked_array([[1, 2], [3, None]])
b = 4
expected = pa.array([5, 6, 7, None])
check_np_ufunc(a, b, expected)
def test_np_ufunc_op_flat_flat():
a = pa.array([1, 2, 3, None, None])
b = pa.array([1, 2, 3, 4, None])
expected = pa.array([2, 4, 6, None, None])
check_np_ufunc(a, b, expected)
def test_np_ufunc_op_flat_np_array():
a = pa.array([1, 2, 3, None])
b = np.array([1, 2, 3, 4])
expected = pa.array([2, 4, 6, None])
check_np_ufunc(a, b, expected)
def test_np_ufunc_op_flat_scalar():
a = pa.array([1, 2, 3, None])
b = 4
expected = pa.array([5, 6, 7, None])
check_np_ufunc(a, b, expected)
def test_merge_valid_bitmaps():
a = pa.array([1, 1, 1, 1, 1, 1, 1, 1, 1])
b = pa.array([1, 1, 1, None, None, None, 1, 1, 1])
expected = np.array([0xFF, 0x1], dtype=np.uint8)
result = _merge_valid_bitmaps(a, a)
npt.assert_array_equal(result, expected)
expected = np.array([0xC7, 0x1], dtype=np.uint8)
result = _merge_valid_bitmaps(a, b)
npt.assert_array_equal(result, expected)
expected = np.array([0x1], dtype=np.uint8)
result = _merge_valid_bitmaps(a.slice(8, 1), a.slice(8, 1))
npt.assert_array_equal(result, expected)
expected = np.array([0xF], dtype=np.uint8)
result = _merge_valid_bitmaps(a.slice(0, 4), a.slice(0, 4))
npt.assert_array_equal(result, expected)
expected = np.array([0x7], dtype=np.uint8)
result = _merge_valid_bitmaps(a.slice(0, 4), b.slice(0, 4))
npt.assert_array_equal(result, expected)
expected = np.array([0xF], dtype=np.uint8)
result = _merge_valid_bitmaps(a.slice(5, 4), a.slice(5, 4))
npt.assert_array_equal(result, expected)
expected = np.array([0xE], dtype=np.uint8)
result = _merge_valid_bitmaps(a.slice(5, 4), b.slice(5, 4))
npt.assert_array_equal(result, expected)
expected = np.array([0x3], dtype=np.uint8)
result = _merge_valid_bitmaps(a.slice(5, 2), a.slice(5, 2))
npt.assert_array_equal(result, expected)
expected = np.array([0x2], dtype=np.uint8)
result = _merge_valid_bitmaps(a.slice(5, 2), b.slice(5, 2))
npt.assert_array_equal(result, expected)
expected = np.array([0x3], dtype=np.uint8)
result = _merge_valid_bitmaps(a.slice(5, 2), a.slice(3, 2))
npt.assert_array_equal(result, expected)
expected = np.array([0x0], dtype=np.uint8)
result = _merge_valid_bitmaps(a.slice(5, 2), b.slice(3, 2))
npt.assert_array_equal(result, expected)
|
py | 1a3a2a98e89c3ee8a69a023e04140f2d3face4d2 | # Copyright 2020, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Libraries for constructing federated aggregation.
For example uses of the symbols in this module, see
[Tuning recommended aggregations for learning](
https://www.tensorflow.org/federated/tutorials/tuning_recommended_aggregators)
tutorial, and for details of the design and how to implement new aggregations,
see [Implementing Custom Aggregations](
https://www.tensorflow.org/federated/tutorials/custom_aggregators) tutorial.
"""
from tensorflow_federated.python.aggregators.differential_privacy import DifferentiallyPrivateFactory
from tensorflow_federated.python.aggregators.encoded import EncodedSumFactory
from tensorflow_federated.python.aggregators.factory import AggregationFactory
from tensorflow_federated.python.aggregators.factory import UnweightedAggregationFactory
from tensorflow_federated.python.aggregators.factory import WeightedAggregationFactory
from tensorflow_federated.python.aggregators.mean import MeanFactory
from tensorflow_federated.python.aggregators.mean import UnweightedMeanFactory
from tensorflow_federated.python.aggregators.quantile_estimation import PrivateQuantileEstimationProcess
from tensorflow_federated.python.aggregators.robust import clipping_factory
from tensorflow_federated.python.aggregators.robust import zeroing_factory
from tensorflow_federated.python.aggregators.sampling import UnweightedReservoirSamplingFactory
from tensorflow_federated.python.aggregators.secure import SecureSumFactory
from tensorflow_federated.python.aggregators.sum_factory import SumFactory
|
py | 1a3a2b0bba9f130977723b63e47b742cd692be40 | """
Python Markdown
A Python implementation of John Gruber's Markdown.
Documentation: https://python-markdown.github.io/
GitHub: https://github.com/Python-Markdown/markdown/
PyPI: https://pypi.org/project/Markdown/
Started by Manfred Stienstra (http://www.dwerg.net/).
Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org).
Currently maintained by Waylan Limberg (https://github.com/waylan),
Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser).
Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later)
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
Copyright 2004 Manfred Stienstra (the original version)
License: BSD (see LICENSE.md for details).
"""
from markdown.test_tools import TestCase
class TestFootnotes(TestCase):
default_kwargs = {'extensions': ['footnotes']}
maxDiff = None
def test_basic_footnote(self):
self.assertMarkdownRenders(
self.dedent(
"""
paragraph[^1]
[^1]: A Footnote
"""
),
'<p>paragraph<sup id="fnref:1"><a class="footnote-ref" href="#fn:1">1</a></sup></p>\n'
'<div class="footnote">\n'
'<hr />\n'
'<ol>\n'
'<li id="fn:1">\n'
'<p>A Footnote <a class="footnote-backref" href="#fnref:1"'
' title="Jump back to footnote 1 in the text">↩</a></p>\n'
'</li>\n'
'</ol>\n'
'</div>'
)
def test_multiple_footnotes(self):
self.assertMarkdownRenders(
self.dedent(
"""
foo[^1]
bar[^2]
[^1]: Footnote 1
[^2]: Footnote 2
"""
),
'<p>foo<sup id="fnref:1"><a class="footnote-ref" href="#fn:1">1</a></sup></p>\n'
'<p>bar<sup id="fnref:2"><a class="footnote-ref" href="#fn:2">2</a></sup></p>\n'
'<div class="footnote">\n'
'<hr />\n'
'<ol>\n'
'<li id="fn:1">\n'
'<p>Footnote 1 <a class="footnote-backref" href="#fnref:1"'
' title="Jump back to footnote 1 in the text">↩</a></p>\n'
'</li>\n'
'<li id="fn:2">\n'
'<p>Footnote 2 <a class="footnote-backref" href="#fnref:2"'
' title="Jump back to footnote 2 in the text">↩</a></p>\n'
'</li>\n'
'</ol>\n'
'</div>'
)
def test_multiple_footnotes_multiline(self):
self.assertMarkdownRenders(
self.dedent(
"""
foo[^1]
bar[^2]
[^1]: Footnote 1
line 2
[^2]: Footnote 2
"""
),
'<p>foo<sup id="fnref:1"><a class="footnote-ref" href="#fn:1">1</a></sup></p>\n'
'<p>bar<sup id="fnref:2"><a class="footnote-ref" href="#fn:2">2</a></sup></p>\n'
'<div class="footnote">\n'
'<hr />\n'
'<ol>\n'
'<li id="fn:1">\n'
'<p>Footnote 1\nline 2 <a class="footnote-backref" href="#fnref:1"'
' title="Jump back to footnote 1 in the text">↩</a></p>\n'
'</li>\n'
'<li id="fn:2">\n'
'<p>Footnote 2 <a class="footnote-backref" href="#fnref:2"'
' title="Jump back to footnote 2 in the text">↩</a></p>\n'
'</li>\n'
'</ol>\n'
'</div>'
)
def test_footnote_multi_line(self):
self.assertMarkdownRenders(
self.dedent(
"""
paragraph[^1]
[^1]: A Footnote
line 2
"""
),
'<p>paragraph<sup id="fnref:1"><a class="footnote-ref" href="#fn:1">1</a></sup></p>\n'
'<div class="footnote">\n'
'<hr />\n'
'<ol>\n'
'<li id="fn:1">\n'
'<p>A Footnote\nline 2 <a class="footnote-backref" href="#fnref:1"'
' title="Jump back to footnote 1 in the text">↩</a></p>\n'
'</li>\n'
'</ol>\n'
'</div>'
)
def test_footnote_multi_line_lazy_indent(self):
self.assertMarkdownRenders(
self.dedent(
"""
paragraph[^1]
[^1]: A Footnote
line 2
"""
),
'<p>paragraph<sup id="fnref:1"><a class="footnote-ref" href="#fn:1">1</a></sup></p>\n'
'<div class="footnote">\n'
'<hr />\n'
'<ol>\n'
'<li id="fn:1">\n'
'<p>A Footnote\nline 2 <a class="footnote-backref" href="#fnref:1"'
' title="Jump back to footnote 1 in the text">↩</a></p>\n'
'</li>\n'
'</ol>\n'
'</div>'
)
def test_footnote_multi_line_complex(self):
self.assertMarkdownRenders(
self.dedent(
"""
paragraph[^1]
[^1]:
A Footnote
line 2
* list item
> blockquote
"""
),
'<p>paragraph<sup id="fnref:1"><a class="footnote-ref" href="#fn:1">1</a></sup></p>\n'
'<div class="footnote">\n'
'<hr />\n'
'<ol>\n'
'<li id="fn:1">\n'
'<p>A Footnote\nline 2</p>\n'
'<ul>\n<li>list item</li>\n</ul>\n'
'<blockquote>\n<p>blockquote</p>\n</blockquote>\n'
'<p><a class="footnote-backref" href="#fnref:1"'
' title="Jump back to footnote 1 in the text">↩</a></p>\n'
'</li>\n'
'</ol>\n'
'</div>'
)
def test_footnote_multple_complex(self):
self.assertMarkdownRenders(
self.dedent(
"""
foo[^1]
bar[^2]
[^1]:
A Footnote
line 2
* list item
> blockquote
[^2]: Second footnote
paragraph 2
"""
),
'<p>foo<sup id="fnref:1"><a class="footnote-ref" href="#fn:1">1</a></sup></p>\n'
'<p>bar<sup id="fnref:2"><a class="footnote-ref" href="#fn:2">2</a></sup></p>\n'
'<div class="footnote">\n'
'<hr />\n'
'<ol>\n'
'<li id="fn:1">\n'
'<p>A Footnote\nline 2</p>\n'
'<ul>\n<li>list item</li>\n</ul>\n'
'<blockquote>\n<p>blockquote</p>\n</blockquote>\n'
'<p><a class="footnote-backref" href="#fnref:1"'
' title="Jump back to footnote 1 in the text">↩</a></p>\n'
'</li>\n'
'<li id="fn:2">\n'
'<p>Second footnote</p>\n'
'<p>paragraph 2 <a class="footnote-backref" href="#fnref:2"'
' title="Jump back to footnote 2 in the text">↩</a></p>\n'
'</li>\n'
'</ol>\n'
'</div>'
)
def test_footnote_multple_complex_no_blank_line_between(self):
self.assertMarkdownRenders(
self.dedent(
"""
foo[^1]
bar[^2]
[^1]:
A Footnote
line 2
* list item
> blockquote
[^2]: Second footnote
paragraph 2
"""
),
'<p>foo<sup id="fnref:1"><a class="footnote-ref" href="#fn:1">1</a></sup></p>\n'
'<p>bar<sup id="fnref:2"><a class="footnote-ref" href="#fn:2">2</a></sup></p>\n'
'<div class="footnote">\n'
'<hr />\n'
'<ol>\n'
'<li id="fn:1">\n'
'<p>A Footnote\nline 2</p>\n'
'<ul>\n<li>list item</li>\n</ul>\n'
'<blockquote>\n<p>blockquote</p>\n</blockquote>\n'
'<p><a class="footnote-backref" href="#fnref:1"'
' title="Jump back to footnote 1 in the text">↩</a></p>\n'
'</li>\n'
'<li id="fn:2">\n'
'<p>Second footnote</p>\n'
'<p>paragraph 2 <a class="footnote-backref" href="#fnref:2"'
' title="Jump back to footnote 2 in the text">↩</a></p>\n'
'</li>\n'
'</ol>\n'
'</div>'
)
def test_backlink_text(self):
"""Test backlink configuration."""
self.assertMarkdownRenders(
'paragraph[^1]\n\n[^1]: A Footnote',
'<p>paragraph<sup id="fnref:1"><a class="footnote-ref" href="#fn:1">1</a></sup></p>\n'
'<div class="footnote">\n'
'<hr />\n'
'<ol>\n'
'<li id="fn:1">\n'
'<p>A Footnote <a class="footnote-backref" href="#fnref:1"'
' title="Jump back to footnote 1 in the text">back</a></p>\n'
'</li>\n'
'</ol>\n'
'</div>',
extension_configs={'footnotes': {'BACKLINK_TEXT': 'back'}}
)
def test_footnote_separator(self):
"""Test separator configuration."""
self.assertMarkdownRenders(
'paragraph[^1]\n\n[^1]: A Footnote',
'<p>paragraph<sup id="fnref-1"><a class="footnote-ref" href="#fn-1">1</a></sup></p>\n'
'<div class="footnote">\n'
'<hr />\n'
'<ol>\n'
'<li id="fn-1">\n'
'<p>A Footnote <a class="footnote-backref" href="#fnref-1"'
' title="Jump back to footnote 1 in the text">↩</a></p>\n'
'</li>\n'
'</ol>\n'
'</div>',
extension_configs={'footnotes': {'SEPARATOR': '-'}}
)
|
py | 1a3a2b293650433b0d7cacb0faeba913c9351b36 | from __future__ import print_function
import gdbremote_testcase
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestGdbRemoteAuxvSupport(gdbremote_testcase.GdbRemoteTestCaseBase):
mydir = TestBase.compute_mydir(__file__)
AUXV_SUPPORT_FEATURE_NAME = "qXfer:auxv:read"
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def has_auxv_support(self):
inferior_args = ["message:main entered", "sleep:5"]
procs = self.prep_debug_monitor_and_inferior(
inferior_args=inferior_args)
# Don't do anything until we match the launched inferior main entry output.
# Then immediately interrupt the process.
# This prevents auxv data being asked for before it's ready and leaves
# us in a stopped state.
self.test_sequence.add_log_lines([
# Start the inferior...
"read packet: $c#63",
# ... match output....
{"type": "output_match", "regex": self.maybe_strict_output_regex(
r"message:main entered\r\n")},
], True)
# ... then interrupt.
self.add_interrupt_packets()
self.add_qSupported_packets()
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
features = self.parse_qSupported_response(context)
return self.AUXV_SUPPORT_FEATURE_NAME in features and features[
self.AUXV_SUPPORT_FEATURE_NAME] == "+"
def get_raw_auxv_data(self):
# Start up llgs and inferior, and check for auxv support.
if not self.has_auxv_support():
self.skipTest("auxv data not supported")
# Grab pointer size for target. We'll assume that is equivalent to an unsigned long on the target.
# Auxv is specified in terms of pairs of unsigned longs.
self.reset_test_sequence()
self.add_process_info_collection_packets()
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
proc_info = self.parse_process_info_response(context)
self.assertIsNotNone(proc_info)
self.assertTrue("ptrsize" in proc_info)
word_size = int(proc_info["ptrsize"])
OFFSET = 0
LENGTH = 0x400
# Grab the auxv data.
self.reset_test_sequence()
self.test_sequence.add_log_lines(
[
"read packet: $qXfer:auxv:read::{:x},{:x}:#00".format(
OFFSET,
LENGTH),
{
"direction": "send",
"regex": re.compile(
r"^\$([^E])(.*)#[0-9a-fA-F]{2}$",
re.MULTILINE | re.DOTALL),
"capture": {
1: "response_type",
2: "content_raw"}}],
True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Ensure we end up with all auxv data in one packet.
# FIXME don't assume it all comes back in one packet.
self.assertEqual(context.get("response_type"), "l")
# Decode binary data.
content_raw = context.get("content_raw")
self.assertIsNotNone(content_raw)
return (word_size, self.decode_gdbremote_binary(content_raw))
def supports_auxv(self):
# When non-auxv platforms support llgs, skip the test on platforms
# that don't support auxv.
self.assertTrue(self.has_auxv_support())
#
# We skip the "supports_auxv" test on debugserver. The rest of the tests
# appropriately skip the auxv tests if the support flag is not present
# in the qSupported response, so the debugserver test bits are still there
# in case debugserver code one day does have auxv support and thus those
# tests don't get skipped.
#
@skipIfWindows # no auxv support.
@llgs_test
def test_supports_auxv_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.supports_auxv()
def auxv_data_is_correct_size(self):
(word_size, auxv_data) = self.get_raw_auxv_data()
self.assertIsNotNone(auxv_data)
# Ensure auxv data is a multiple of 2*word_size (there should be two
# unsigned long fields per auxv entry).
self.assertEqual(len(auxv_data) % (2 * word_size), 0)
# print("auxv contains {} entries".format(len(auxv_data) / (2*word_size)))
@debugserver_test
def test_auxv_data_is_correct_size_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.auxv_data_is_correct_size()
@skipIfWindows
@expectedFailureNetBSD
@llgs_test
def test_auxv_data_is_correct_size_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.auxv_data_is_correct_size()
def auxv_keys_look_valid(self):
(word_size, auxv_data) = self.get_raw_auxv_data()
self.assertIsNotNone(auxv_data)
# Grab endian.
self.reset_test_sequence()
self.add_process_info_collection_packets()
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
process_info = self.parse_process_info_response(context)
self.assertIsNotNone(process_info)
endian = process_info.get("endian")
self.assertIsNotNone(endian)
auxv_dict = self.build_auxv_dict(endian, word_size, auxv_data)
self.assertIsNotNone(auxv_dict)
# Verify keys look reasonable.
for auxv_key in auxv_dict:
self.assertTrue(auxv_key >= 1)
self.assertTrue(auxv_key <= 1000)
# print("auxv dict: {}".format(auxv_dict))
@debugserver_test
def test_auxv_keys_look_valid_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.auxv_keys_look_valid()
@skipIfWindows
@expectedFailureNetBSD
@llgs_test
def test_auxv_keys_look_valid_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.auxv_keys_look_valid()
def auxv_chunked_reads_work(self):
# Verify that multiple smaller offset,length reads of auxv data
# return the same data as a single larger read.
# Grab the auxv data with a single large read here.
(word_size, auxv_data) = self.get_raw_auxv_data()
self.assertIsNotNone(auxv_data)
# Grab endian.
self.reset_test_sequence()
self.add_process_info_collection_packets()
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
process_info = self.parse_process_info_response(context)
self.assertIsNotNone(process_info)
endian = process_info.get("endian")
self.assertIsNotNone(endian)
auxv_dict = self.build_auxv_dict(endian, word_size, auxv_data)
self.assertIsNotNone(auxv_dict)
iterated_auxv_data = self.read_binary_data_in_chunks(
"qXfer:auxv:read::", 2 * word_size)
self.assertIsNotNone(iterated_auxv_data)
auxv_dict_iterated = self.build_auxv_dict(
endian, word_size, iterated_auxv_data)
self.assertIsNotNone(auxv_dict_iterated)
# Verify both types of data collection returned same content.
self.assertEqual(auxv_dict_iterated, auxv_dict)
@debugserver_test
def test_auxv_chunked_reads_work_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.auxv_chunked_reads_work()
@skipIfWindows
@expectedFailureNetBSD
@llgs_test
def test_auxv_chunked_reads_work_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.auxv_chunked_reads_work()
|
py | 1a3a2bc4aac8f2a9636e322026f110532a792b57 | # coding: utf-8
"""
grafeas.proto
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1beta1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class V1beta1NoteKind(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
allowed enum values
"""
NOTE_KIND_UNSPECIFIED = "NOTE_KIND_UNSPECIFIED"
VULNERABILITY = "VULNERABILITY"
BUILD = "BUILD"
IMAGE = "IMAGE"
PACKAGE = "PACKAGE"
DEPLOYMENT = "DEPLOYMENT"
DISCOVERY = "DISCOVERY"
ATTESTATION = "ATTESTATION"
INTOTO = "INTOTO"
SBOM = "SBOM"
SPDX_PACKAGE = "SPDX_PACKAGE"
SPDX_FILE = "SPDX_FILE"
SPDX_RELATIONSHIP = "SPDX_RELATIONSHIP"
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self): # noqa: E501
"""V1beta1NoteKind - a model defined in Swagger""" # noqa: E501
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(V1beta1NoteKind, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1NoteKind):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | 1a3a2cf146d0b356666881f4c3443c8d224ddcdd | import time
import atexit
import logging
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.interval import IntervalTrigger
from app import app
from app.helper.feed import getFeed
@app.before_first_request
def init():
scheduler = BackgroundScheduler()
scheduler.start()
scheduler.add_job(
func=getFeed,
trigger=IntervalTrigger(minutes=30),
id="get_rss_fed",
name="Get Feed every 30 minutes",
replace_existing=True,
)
atexit.register(lambda: scheduler.shutdown())
logging.basicConfig()
logging.getLogger("apscheduler").setLevel(logging.DEBUG)
|
py | 1a3a2d1fd9ef484fc0d00b0aadb9d02d722af795 | import os
from unittest.mock import patch
from ..decorators import messagebox_on_error, die_on_error
def test_messagebox_on_error():
os.environ['GLUE_TESTING'] = 'False'
def failing_function():
raise ValueError("Dialog failure")
def working_function():
pass
@messagebox_on_error('An error occurred')
def decorated_failing_function():
failing_function()
@messagebox_on_error('An error occurred')
def decorated_working_function():
working_function()
# Test decorator
with patch('qtpy.QtWidgets.QMessageBox') as mb:
decorated_failing_function()
assert mb.call_args[0][2] == 'An error occurred\nDialog failure'
with patch('qtpy.QtWidgets.QMessageBox') as mb:
decorated_working_function()
assert mb.call_count == 0
# Test context manager
with patch('qtpy.QtWidgets.QMessageBox') as mb:
with messagebox_on_error('An error occurred'):
failing_function()
assert mb.call_args[0][2] == 'An error occurred\nDialog failure'
with patch('qtpy.QtWidgets.QMessageBox') as mb:
with messagebox_on_error('An error occurred'):
working_function()
assert mb.call_count == 0
os.environ['GLUE_TESTING'] = 'True'
def test_die_on_error():
os.environ['GLUE_TESTING'] = 'False'
def failing_function():
raise ValueError("Dialog failure")
def working_function():
pass
@die_on_error('An error occurred')
def decorated_failing_function():
failing_function()
@die_on_error('An error occurred')
def decorated_working_function():
working_function()
# Test decorator
with patch('sys.exit') as exit:
with patch('qtpy.QtWidgets.QMessageBox') as mb:
decorated_failing_function()
assert mb.call_args[0][2] == 'An error occurred\nDialog failure'
assert exit.called_once_with(1)
with patch('sys.exit') as exit:
with patch('qtpy.QtWidgets.QMessageBox') as mb:
decorated_working_function()
assert mb.call_count == 0
assert exit.call_count == 0
# Test context manager
with patch('sys.exit') as exit:
with patch('qtpy.QtWidgets.QMessageBox') as mb:
with die_on_error('An error occurred'):
failing_function()
assert mb.call_args[0][2] == 'An error occurred\nDialog failure'
assert exit.called_once_with(1)
with patch('sys.exit') as exit:
with patch('qtpy.QtWidgets.QMessageBox') as mb:
with die_on_error('An error occurred'):
working_function()
assert mb.call_count == 0
assert exit.call_count == 0
os.environ['GLUE_TESTING'] = 'True'
|
py | 1a3a2e503f85cebe33164f900024788884271b7a | # Generated by Django 2.1.10 on 2019-07-24 18:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nodasf', '0006_county_image'),
]
operations = [
migrations.AddField(
model_name='congressdistrict',
name='image',
field=models.ImageField(blank=True, default='', upload_to='media/stock'),
),
]
|
py | 1a3a2ee562ab1097161b7630633d804ddaa42171 | # -*- coding: utf-8 -*-
"""This file contains a parser for the Google Drive snapshots.
The Google Drive snapshots are stored in SQLite database files named
snapshot.db.
"""
from __future__ import unicode_literals
from dfdatetime import posix_time as dfdatetime_posix_time
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.parsers import sqlite
from plaso.parsers.sqlite_plugins import interface
class GoogleDriveSnapshotCloudEntryEventData(events.EventData):
"""Google Drive snapshot cloud entry event data.
Attributes:
doc_type (int): document type.
path (str): path of the file.
shared (bool): True if the file is shared, False if the file is private.
size (int): size of the file.
url (str): URL of the file.
"""
DATA_TYPE = 'gdrive:snapshot:cloud_entry'
def __init__(self):
"""Initializes event data."""
super(GoogleDriveSnapshotCloudEntryEventData, self).__init__(
data_type=self.DATA_TYPE)
self.document_type = None
self.path = None
self.shared = None
self.size = None
self.url = None
class GoogleDriveSnapshotLocalEntryEventData(events.EventData):
"""Google Drive snapshot local entry event data.
Attributes:
path (str): path of the file.
size (int): size of the file.
"""
DATA_TYPE = 'gdrive:snapshot:local_entry'
def __init__(self):
"""Initializes event data."""
super(GoogleDriveSnapshotLocalEntryEventData, self).__init__(
data_type=self.DATA_TYPE)
self.path = None
self.size = None
class GoogleDrivePlugin(interface.SQLitePlugin):
"""SQLite plugin for Google Drive snapshot.db files."""
NAME = 'google_drive'
DESCRIPTION = 'Parser for Google Drive SQLite database files.'
# Define the needed queries.
QUERIES = [
(('SELECT cloud_entry.resource_id, cloud_entry.filename, '
'cloud_entry.modified, cloud_entry.created, cloud_entry.size, '
'cloud_entry.doc_type, cloud_entry.shared, cloud_entry.checksum, '
'cloud_entry.url, cloud_relations.parent_resource_id '
'FROM cloud_entry, cloud_relations '
'WHERE cloud_relations.child_resource_id = cloud_entry.resource_id '
'AND cloud_entry.modified IS NOT NULL;'),
'ParseCloudEntryRow'),
(('SELECT inode_number, filename, modified, checksum, size '
'FROM local_entry WHERE modified IS NOT NULL;'),
'ParseLocalEntryRow')]
# The required tables.
REQUIRED_TABLES = frozenset([
'cloud_entry', 'cloud_relations', 'local_entry', 'local_relations',
'mapping', 'overlay_status'])
SCHEMAS = [{
'cloud_entry': (
'CREATE TABLE cloud_entry (resource_id TEXT, filename TEXT, '
'modified INTEGER, created INTEGER, acl_role INTEGER, doc_type '
'INTEGER, removed INTEGER, url TEXT, size INTEGER, checksum TEXT, '
'shared INTEGER, PRIMARY KEY (resource_id))'),
'cloud_relations': (
'CREATE TABLE cloud_relations (child_resource_id TEXT, '
'parent_resource_id TEXT, UNIQUE (child_resource_id, '
'parent_resource_id), FOREIGN KEY (child_resource_id) REFERENCES '
'cloud_entry(resource_id), FOREIGN KEY (parent_resource_id) '
'REFERENCES cloud_entry(resource_id))'),
'local_entry': (
'CREATE TABLE local_entry (inode_number INTEGER, filename TEXT, '
'modified INTEGER, checksum TEXT, size INTEGER, PRIMARY KEY '
'(inode_number))'),
'local_relations': (
'CREATE TABLE local_relations (child_inode_number INTEGER, '
'parent_inode_number INTEGER, UNIQUE (child_inode_number), FOREIGN '
'KEY (parent_inode_number) REFERENCES local_entry(inode_number), '
'FOREIGN KEY (child_inode_number) REFERENCES '
'local_entry(inode_number))'),
'mapping': (
'CREATE TABLE mapping (inode_number INTEGER, resource_id TEXT, '
'UNIQUE (inode_number), FOREIGN KEY (inode_number) REFERENCES '
'local_entry(inode_number), FOREIGN KEY (resource_id) REFERENCES '
'cloud_entry(resource_id))'),
'overlay_status': (
'CREATE TABLE overlay_status (path TEXT, overlay_status INTEGER, '
'PRIMARY KEY (path))')}]
# Queries used to build cache.
LOCAL_PATH_CACHE_QUERY = (
'SELECT local_relations.child_inode_number, '
'local_relations.parent_inode_number, local_entry.filename '
'FROM local_relations, local_entry '
'WHERE local_relations.child_inode_number = local_entry.inode_number')
CLOUD_PATH_CACHE_QUERY = (
'SELECT cloud_entry.filename, cloud_entry.resource_id, '
'cloud_relations.parent_resource_id AS parent '
'FROM cloud_entry, cloud_relations '
'WHERE cloud_entry.doc_type = 0 '
'AND cloud_entry.resource_id = cloud_relations.child_resource_id')
def GetLocalPath(self, inode, cache, database):
"""Return local path for a given inode.
Args:
inode: The inode number for the file.
cache (SQLiteCache): cache.
database (SQLiteDatabase): database.
Returns:
A full path, including the filename of the given inode value.
"""
local_path = cache.GetResults('local_path')
if not local_path:
results = database.Query(self.LOCAL_PATH_CACHE_QUERY)
cache.CacheQueryResults(
results, 'local_path', 'child_inode_number',
('parent_inode_number', 'filename'))
local_path = cache.GetResults('local_path')
parent, path = local_path.get(inode, [None, None])
# TODO: Read the local_sync_root from the sync_config.db and use that
# for a root value.
root_value = '%local_sync_root%/'
if not path:
return root_value
paths = []
while path:
paths.append(path)
parent, path = local_path.get(parent, [None, None])
if not paths:
return root_value
# Paths are built top level to root so we need to reverse the list to
# represent them in the traditional order.
paths.reverse()
return root_value + '/'.join(paths)
def GetCloudPath(self, resource_id, cache, database):
"""Return cloud path given a resource id.
Args:
resource_id: The resource_id for the file.
cache: The local cache object.
database: A database object (instance of SQLiteDatabase).
Returns:
A full path to the resource value.
"""
cloud_path = cache.GetResults('cloud_path')
if not cloud_path:
results = database.Query(self.CLOUD_PATH_CACHE_QUERY)
cache.CacheQueryResults(
results, 'cloud_path', 'resource_id', ('filename', 'parent'))
cloud_path = cache.GetResults('cloud_path')
if resource_id == 'folder:root':
return '/'
paths = []
parent_path, parent_id = cloud_path.get(resource_id, ['', ''])
while parent_path:
if parent_path == 'folder:root':
break
paths.append(parent_path)
parent_path, parent_id = cloud_path.get(parent_id, ['', ''])
if not paths:
return '/'
# Paths are built top level to root so we need to reverse the list to
# represent them in the traditional order.
paths.reverse()
return '/{0:s}/'.format('/'.join(paths))
def ParseCloudEntryRow(
self, parser_mediator, query, row, cache=None, database=None,
**unused_kwargs):
"""Parses a cloud entry row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
cache (SQLiteCache): cache.
database (SQLiteDatabase): database.
"""
query_hash = hash(query)
parent_resource_id = self._GetRowValue(
query_hash, row, 'parent_resource_id')
filename = self._GetRowValue(query_hash, row, 'filename')
cloud_path = self.GetCloudPath(parent_resource_id, cache, database)
cloud_filename = '{0:s}{1:s}'.format(cloud_path, filename)
event_data = GoogleDriveSnapshotCloudEntryEventData()
event_data.document_type = self._GetRowValue(query_hash, row, 'doc_type')
event_data.path = cloud_filename
event_data.query = query
event_data.shared = bool(self._GetRowValue(query_hash, row, 'shared'))
event_data.size = self._GetRowValue(query_hash, row, 'size')
event_data.url = self._GetRowValue(query_hash, row, 'url')
timestamp = self._GetRowValue(query_hash, row, 'modified')
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = self._GetRowValue(query_hash, row, 'created')
if timestamp:
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
def ParseLocalEntryRow(
self, parser_mediator, query, row, cache=None, database=None,
**unused_kwargs):
"""Parses a local entry row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
cache (Optional[SQLiteCache]): cache.
database (Optional[SQLiteDatabase]): database.
"""
query_hash = hash(query)
inode_number = self._GetRowValue(query_hash, row, 'inode_number')
local_path = self.GetLocalPath(inode_number, cache, database)
event_data = GoogleDriveSnapshotLocalEntryEventData()
event_data.path = local_path
event_data.query = query
event_data.size = self._GetRowValue(query_hash, row, 'size')
timestamp = self._GetRowValue(query_hash, row, 'modified')
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
sqlite.SQLiteParser.RegisterPlugin(GoogleDrivePlugin)
|
py | 1a3a302ca8ea9e3c9f998c3e303d48e4bd177b45 | import os
import pytest
from conda_build import api
from .utils import fail_dir, metadata_dir
@pytest.mark.parametrize("pkg_format,pkg_ext", [(None, ".tar.bz2"), ("2", ".conda")])
def test_conda_pkg_format(
pkg_format, pkg_ext, testing_config, testing_workdir, monkeypatch, capfd
):
"""Conda package format "2" builds .conda packages."""
# Build the "entry_points" recipe, which contains a test pass for package.
recipe = os.path.join(metadata_dir, "entry_points")
# These variables are defined solely for testing purposes,
# so they can be checked within build scripts
testing_config.activate = True
testing_config.conda_pkg_format = pkg_format
monkeypatch.setenv("CONDA_TEST_VAR", "conda_test")
monkeypatch.setenv("CONDA_TEST_VAR_2", "conda_test_2")
output_file, = api.get_output_file_paths(recipe, config=testing_config)
assert output_file.endswith(pkg_ext)
api.build(recipe, config=testing_config)
assert os.path.exists(output_file)
out, err = capfd.readouterr()
# Verify that test pass ran through api
assert "Manual entry point" in out
assert "TEST END: %s" % output_file in out
|
py | 1a3a30a4899d1d14fb4e1c1ee6fdd72771b03e54 | from openpyxl import load_workbook
from docx import Document
from docx.oxml.ns import qn
import os
# 设置文档字体
def set_font(document):
document.styles['Normal'].font.name = u'宋体'
document.styles['Normal']._element.rPr.rFonts.set(qn('w:eastAsia'), u'宋体')
def get_ws(file_path):
# 读取excel xlsx文件
wb = load_workbook(file_path) # 打开现有工作表
ws = wb.active # 默认对第一张工作表进行操作
return ws
def get_title(ws):
title = []
for col_index in range(ws.max_column):
title.append(ws.cell(row=1, column=col_index+1).value)
return title
def print_title(title):
print("表头字段如下:")
for t in title:
print(t,end=" ")
print()
def enter_choice():
optional = '是否YyNn'
sure = '是Yy'
while True:
try:
choice = input("是否采用与字段内容无关的数值递增的文件命名方式?(是/否)(y/n):\n")
if choice not in optional: # 如果输入不在可选字符范围
raise ValueError("需输入'是'、'否'、'y'、'n'中的一个字符")
break
except Exception as err:
print("输入不符合要求:{}\n请重新输入".format(repr(err)))
if choice in sure:
return True
else:
return False
def enter_name_rules(title):
while True:
try:
print("请输入命名字段")
name_title = input()
if name_title not in title:
raise ValueError("请原样输入表头中的一个字段")
name_rules = title.index(name_title)
break
except Exception as err:
print(err)
return name_rules
# 转换成word表格
def excel_to_table(ws,name_rules,default_value,save_dir='ExcelToWordResult'):
'''
:param ws: load_workbook处理后的工作簿对象
:param name_rules: 命名规则
:param default_value: 命名缺省值
:return:
'''
# 获取行列数
if not os.path.exists(save_dir):
os.makedirs(save_dir)
row_num = ws.max_row
column_num = ws.max_column
# for row in ws.rows: # ws.rows是一个存储每行ceil的元组
# for ceil in row:
# print(ceil.value)
# 写入word文件
for row_index in range(1,row_num): # 跳过表头,写入每个记录
# 创建word文档
document = Document()
# 设置文档字体
set_font(document)
# 在word文档中添加表格
tbobj = document.add_table(rows=2, cols=column_num, style="Table Grid")
# 添加表头以及记录
for col_index in range(column_num):
tbobj.cell(0, col_index).text = str(ws.cell(row=1, column=col_index+1).value) # 添加表头
tbobj.cell(1, col_index).text = str(ws.cell(row=row_index+1, column=col_index+1).value) # 添加记录
if name_rules == default_value: # 如果采用默认命名(数字递增)
filename = str(row_index) + '.docx'
# 文件名
else:
filename = str(ws.cell(row=row_index+1, column=name_rules+1).value) + '.docx'
# 保存文件
save_path = save_dir + '\\' + filename
try: # 涉及文件IO,进行异常处理
document.save(save_path)
# 输出文件存储路径的提示信息
current_path = os.getcwd() # 获得当前路径
print("当前路径是{}".format(current_path))
print("{} 存储成功".format(save_path))
except Exception as err:
print(err)
print("文件存储失败")
def main(file_path):
ws = get_ws(file_path) # 获取工作簿对象
title = get_title(ws) # 获取其表头字段
print_title(title)
choice = enter_choice() # 由用户指定是否采用数值递增命名
default_value = -1 # 命名方式缺省值
if choice:
name_rules = default_value
else:
name_rules = enter_name_rules(title)
excel_to_table(ws, name_rules, default_value) # 以表格形式写入批量word
if __name__ == "__main__":
file_path = '学生信息表.xlsx'
main(file_path) |
py | 1a3a31f7104038e95603784abaed8bea8bad9317 | import datetime
import inspect
import json
import logging
import logging.config
import os
import pathlib
from types import ModuleType
from typing import Any, Callable, ContextManager, List, Optional, Union
import dotenv
import orjson # type: ignore
import sentry_sdk
import structlog
import platform
import tempfile
from structlog_sentry_logger import structlog_sentry
ROOT_DIR = pathlib.Path("/tmp" if platform.system() == "Darwin" else tempfile.gettempdir())
LOG_DATA_DIR = ROOT_DIR / ".logs"
LOG_DATA_DIR.mkdir(exist_ok=True)
DATETIME_FORMAT = "iso"
_CONFIGS = {"USE_ORJSON": True}
def _toggle_json_library(use_orjson: bool = True) -> None:
_CONFIGS["USE_ORJSON"] = use_orjson
def get_namespaced_module_name(__file__: Union[pathlib.Path, str]) -> str:
fully_qualified_path = pathlib.Path(__file__).resolve()
prefix_dir = str(ROOT_DIR) if str(ROOT_DIR) in str(fully_qualified_path) else "/"
namespaces = fully_qualified_path.relative_to(prefix_dir).with_suffix("").parts
return ".".join(namespaces)
def get_caller_name(prev_stack_frame: inspect.FrameInfo) -> str:
deduced_calling_module = deduce_module(prev_stack_frame)
return (
deduced_calling_module.__name__
if deduced_calling_module
and not is_caller_main(deduced_calling_module.__name__)
else get_namespaced_module_name(prev_stack_frame.filename)
)
def deduce_module(prev_stack_frame: inspect.FrameInfo) -> Optional[ModuleType]:
return inspect.getmodule(prev_stack_frame[0])
def get_caller_name_from_frames(stack_frames: List[inspect.FrameInfo]) -> str:
prev_stack_frame = stack_frames[1] if __file__.endswith(".py") else stack_frames[0]
return get_caller_name(prev_stack_frame)
def get_logger(name: Optional[str] = None) -> Any:
"""
Convenience function that returns a logger
Returns: A proxy that creates a correctly configured logger bound to
the __name__ of the calling module
"""
del name
stack_frames = inspect.stack()
caller_name = get_caller_name_from_frames(stack_frames)
if not structlog.is_configured():
timestamper = structlog.processors.TimeStamper(fmt=DATETIME_FORMAT)
set_logging_config(caller_name, timestamper)
set_structlog_config(timestamper)
logger = structlog.get_logger(caller_name)
logger.setLevel(logging.DEBUG)
return logger
getLogger = get_logger
"""
CamelCase alias for `structlog_sentry_logger.get_logger`.
"""
def get_config_dict() -> dict:
"""
Convenience function to get the local logging configuration dictionary,
e.g., to help configure loggers from other libraries.
Returns: The logging configuration dictionary that would be used to
configure the Python logging library component of the logger
"""
stack_frames = inspect.stack()
caller_name = get_caller_name_from_frames(stack_frames)
timestamper = structlog.processors.TimeStamper(fmt=DATETIME_FORMAT)
return get_logging_config(caller_name, timestamper)
def is_caller_main(caller_name: str) -> bool:
return caller_name == "__main__"
def get_logging_config(
module_name: str, timestamper: structlog.processors.TimeStamper
) -> dict:
handlers = get_handlers(module_name)
return {
"version": 1,
"disable_existing_loggers": False,
"formatters": (get_formatters(timestamper)),
"handlers": handlers,
"loggers": {
"": {
"handlers": list(handlers.keys()),
"level": "WARNING",
"propagate": True,
}
},
}
def set_logging_config(
module_name: str, timestamper: structlog.processors.TimeStamper
) -> None:
config_dict = get_logging_config(module_name, timestamper)
logging.config.dictConfig(config_dict)
def get_formatters(timestamper: structlog.processors.TimeStamper) -> dict:
pre_chain = [
# Add the log level and a timestamp to the event_dict if the log
# entry is not from structlog.
structlog.stdlib.add_log_level,
timestamper,
structlog.stdlib.add_logger_name,
]
return {
"plain": {
"()": structlog.stdlib.ProcessorFormatter,
"processor": structlog.processors.JSONRenderer(
serializer=serializer,
option=orjson.OPT_NON_STR_KEYS | orjson.OPT_SORT_KEYS,
),
"foreign_pre_chain": pre_chain,
},
"colored": {
"()": structlog.stdlib.ProcessorFormatter,
"processor": structlog.dev.ConsoleRenderer(colors=True),
"format": "%(message)s [in %(funcName)s]",
"foreign_pre_chain": pre_chain,
},
}
def serializer(
*args: Any,
default: Optional[Callable[[Any], Any]] = None,
option: Optional[int] = orjson.OPT_NON_STR_KEYS | orjson.OPT_SORT_KEYS,
) -> str:
if _CONFIGS["USE_ORJSON"]:
return orjson.dumps(*args, default=default, option=option).decode() # type: ignore[misc]
return json.dumps(*args, sort_keys=True)
def get_handlers(module_name: str) -> dict:
default_key = "default"
base_handlers = {
default_key: {
"level": "DEBUG",
"class": "logging.StreamHandler",
"stream": "ext://sys.stdout",
}
}
if _ENV_VARS_REQUIRED_BY_LIBRARY[get_handlers] in os.environ:
# Prettify stdout/stderr streams
base_handlers[default_key]["formatter"] = "colored"
# Add filename handler
file_timestamp = datetime.datetime.utcnow().isoformat().replace(":", "-")
log_file_name = f"{file_timestamp}_{module_name}.jsonl"
log_file_path = LOG_DATA_DIR / log_file_name
base_handlers["filename"] = {
"level": "DEBUG",
"class": "logging.handlers.RotatingFileHandler",
"filename": str(log_file_path),
# 1 MB
"maxBytes": 1 << 20, # type: ignore[dict-item]
"backupCount": 3, # type: ignore[dict-item]
"formatter": "plain",
}
else:
base_handlers[default_key]["formatter"] = "plain"
return base_handlers
def set_structlog_config(timestamper: structlog.processors.TimeStamper) -> None:
structlog_processors = [
timestamper,
structlog.processors.StackInfoRenderer(),
add_severity_field_from_level_if_in_cloud_environment,
]
stdlib_log_compatibility_processors = [
structlog.stdlib.filter_by_level,
structlog.stdlib.add_log_level,
structlog.stdlib.add_logger_name,
structlog.stdlib.PositionalArgumentsFormatter(),
SentryBreadcrumbJsonProcessor(level=logging.ERROR, tag_keys="__all__"),
]
# Note: MUST come last!
format_wrapper_processer = [structlog.stdlib.ProcessorFormatter.wrap_for_formatter]
structlog.configure(
processors=(
stdlib_log_compatibility_processors # type: ignore[arg-type]
+ structlog_processors
+ format_wrapper_processer # type: ignore[arg-type,operator]
),
# See [Performance](https://www.structlog.org/en/stable/performance.html)
# for an in-depth explanation of the below settings
context_class=dict,
logger_factory=structlog.stdlib.LoggerFactory(),
wrapper_class=structlog.stdlib.BoundLogger,
cache_logger_on_first_use=True,
)
def add_severity_field_from_level_if_in_cloud_environment(
logger: Any, # pylint: disable=unused-argument
method: str, # pylint: disable=unused-argument
event_dict: structlog.types.EventDict,
) -> structlog.types.EventDict:
"""A custom processor for structlog for Cloud Logging compatibility
Since Cloud Logging infers log levels from the `severity` key, simply duplicates
`level` to the `severity` field in the logger's event dictionary.
"""
if (
is_cloud_logging_compatibility_mode_requested()
or is_probably_in_cloud_environment()
):
cloud_logging_log_level_key, python_log_level_key = "severity", "level"
if cloud_logging_log_level_key in event_dict:
# Dogfood by instantiating a local logger with own library.
# Note: NO infinite loop since the below log message does *NOT* use
# `severity` as a key in the emitted event.
local_logger = get_logger()
local_logger.warning(
"Existing log value being overwritten",
src_key=python_log_level_key,
dest_key=cloud_logging_log_level_key,
old_value=event_dict[cloud_logging_log_level_key],
new_value=event_dict[python_log_level_key],
logger_name=logger.name,
)
event_dict[cloud_logging_log_level_key] = event_dict[python_log_level_key]
return event_dict
def is_cloud_logging_compatibility_mode_requested() -> bool:
return (
_ENV_VARS_REQUIRED_BY_LIBRARY[is_cloud_logging_compatibility_mode_requested]
in os.environ
)
def is_probably_in_cloud_environment() -> bool:
"""Returns True if it is *likely* (but not guaranteed) logging is occurring in the context of a Cloud Logging environment"""
for env_var in [
# GKE
# There are no GKE-specific environment variable that definitively imply we are
# running in GKE... Falling back to detecting Kubernetes-injected environment
# variables since those are the only ones present in GKE pods that *could* imply
# we are running in GKE.
# Kubernetes
# see: https://kubernetes.io/docs/concepts/services-networking/connect-applications-service/#environment-variables
"KUBERNETES_SERVICE_HOST",
# Cloud Function
# see: https://cloud.google.com/functions/docs/configuring/env-var#runtime_environment_variables_set_automatically
"GCP_PROJECT",
# GAE
# see: https://cloud.google.com/functions/docs/configuring/env-var#runtime_environment_variables_set_automatically
"GOOGLE_CLOUD_PROJECT",
]:
if env_var in os.environ:
return True
return False
_ENV_VARS_REQUIRED_BY_LIBRARY = {
get_handlers: "STRUCTLOG_SENTRY_LOGGER_LOCAL_DEVELOPMENT_LOGGING_MODE_ON",
is_cloud_logging_compatibility_mode_requested: "STRUCTLOG_SENTRY_LOGGER_CLOUD_LOGGING_COMPATIBILITY_MODE_ON",
sentry_sdk.init: "SENTRY_DSN",
}
class SentryBreadcrumbJsonProcessor(structlog_sentry.SentryJsonProcessor):
"""
Addresses: `SentryJsonProcessor breaks logging breadcrumbs #25`_
(source_)
.. _`SentryJsonProcessor breaks logging breadcrumbs #25`: https://github.com/kiwicom/structlog-sentry/issues/25
.. _`source`: https://github.com/kiwicom/structlog-sentry/issues/25#issuecomment-660292563
"""
def __init__( # pylint: disable=too-many-arguments
self,
breadcrumb_level: int = logging.INFO,
level: int = logging.WARNING,
active: bool = True,
as_extra: bool = True,
tag_keys: Union[List[str], str] = None,
) -> None:
self.breadcrumb_level = breadcrumb_level
super().__init__(
level=level, active=active, as_extra=as_extra, tag_keys=tag_keys
)
@staticmethod
def save_breadcrumb(logger: Any, event_dict: structlog.types.EventDict) -> None:
data = event_dict.copy() # type: ignore[attr-defined]
data.pop("event")
data.pop("logger", None)
data.pop("level", None)
data.pop("timestamp", None)
breadcrumb = {
"ty": "log",
"level": event_dict["level"].lower(),
"category": event_dict.get("logger") or logger.name,
"message": event_dict["event"],
"data": data,
}
sentry_sdk.add_breadcrumb(breadcrumb, hint={"event_dict": event_dict})
def __call__(
self, logger: Any, method: str, event_dict: structlog.types.EventDict
) -> structlog.types.EventDict:
do_breadcrumb = (
getattr(logging, event_dict["level"].upper()) >= self.breadcrumb_level
)
if do_breadcrumb:
self.save_breadcrumb(logger, event_dict)
return super().__call__(logger=logger, method=method, event_dict=event_dict)
def _load_library_specific_env_vars() -> None:
# Inject into the environment ONLY the env vars required by the library;
# we manually update/add to the the environment ONLY the keys in a user's `.env` for
# which the library is inspecting (i.e., the set intersection between the
# aforementioned), and only if they weren't already defined in the environment.
users_dotenv_values = dotenv.dotenv_values(dotenv.find_dotenv())
legal_env_vars_keys = (
_ENV_VARS_REQUIRED_BY_LIBRARY.values() & users_dotenv_values.keys()
)
for k in legal_env_vars_keys:
v = users_dotenv_values[k]
# Any env-var-to-add already defined in the environment will take precedent over
# what is defined in a user's `.env` file.
if k not in os.environ and v is not None:
os.environ[k] = v
def _init_sentry() -> ContextManager[Any]:
# Note: if DSN isn't defined, will silently not transmit telemetry
return sentry_sdk.init() # pylint: disable=abstract-class-instantiated
|
py | 1a3a31f9d7809d12b5019a6e79d7ab2139466622 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PerlIntervaltree(PerlPackage):
"""Set::IntervalTree uses Interval Trees to store and efficiently look up
ranges using a range-based lookup."""
homepage = "https://metacpan.org/release/Set-IntervalTree"
url = "https://cpan.metacpan.org/authors/id/B/BE/BENBOOTH/Set-IntervalTree-0.10.tar.gz"
version('0.10', '42efe9369f1b30e7fd04e10c07226b06')
depends_on('perl-extutils-makemaker', type='build')
|
py | 1a3a32882c34e0cfa689cf052144bcbb555bd8df | from machine import mem32
# import time
import sys
import uasyncio
from i2c_responder_base import I2CResponderBase
import calc_icmpv6_chksum
class I2CResponder(I2CResponderBase):
"""Implementation of a (polled) Raspberry Pico I2C Responder.
Subclass of the original I2CResponder class which has been renamed
I2CReponderBase. See that class for more info.
This new version I2CResponder implments a protocol which both Controller and Responder must adhere to
in order to send longer messages.
Created: March 30, 2022 By: D. Garrett
"""
VERSION = "2.0.1"
def __init__(self, i2c_device_id=0, sda_gpio=0, scl_gpio=1, responder_address=0x41):
"""Initialize.
Args:
i2c_device_id (int, optional): The internal Pico I2C device to use (0 or 1).
sda_gpio (int, optional): The gpio number of the pin to use for SDA.
scl_gpio (int, optional): The gpio number of the pin to use for SCL.
responder_address (int, required): The I2C address to assign to this Responder.
"""
super().__init__(i2c_device_id=i2c_device_id, sda_gpio=sda_gpio,
scl_gpio=scl_gpio, responder_address=responder_address)
"""
Send a long message to the Controller
16 bytes at a time.
First send 4 byte length of message.
Then send blocks of up to 16 bytes.
"""
async def send_msg(self, msg):
# send length of message
# UTF8 may have multibyte characters
buff = bytearray(msg.encode('utf8'))
rem_bytes = len(buff)
len_buff = bytearray(rem_bytes.to_bytes(4,sys.byteorder))
await self.send_bytes(len_buff)
# print("sending: " + str(len_buff))
# send message
msg_pos = 0
# if controller no longer requesting input
# stop sending data
while rem_bytes > 0: # and self.read_is_pending():
if rem_bytes <= 16:
await self.send_bytes(buff[msg_pos:])
return
await self.send_bytes(buff[msg_pos:msg_pos+16])
msg_pos += 16
rem_bytes -= 16
"""
Send a block bytes of up to 16 bytes of data
"""
async def send_bytes(self,buffer_out):
for value in buffer_out:
# loop (polling) until the Controller issues an I2C READ.
while not self.read_is_pending():
await uasyncio.sleep_ms(0)
# stop sending if controller no longer soliciting input
# if not self.read_is_pending():
# return
self.put_read_data(value)
"""
Read a long message from the Controller.
Send an acknowledgment to the Controller of
if the receive was successful.
If receive failed, retry up to 5 times, then send 2
telling controller it was a permanent error and
don't bother to resend.
If failed receive, returns an empty string,
else returns the string received.
"""
async def rcv_msg(self):
if not self.write_data_is_available():
return ""
retry = 8
ok = False
while not ok and retry > 0:
b_array, ok = await self.rcv_block()
retry = retry - 1
if retry > 0:
# Controller will resend if not okay
await self.send_ack(int(ok))
if not ok:
"""
print("receive error... ",end="")
print((5-retry))
print("received: ", end="")
print(b_array)
"""
# await uasyncio.sleep_ms(0)
else:
# permanent error - don't resend
print("***** permanent receive error *****")
await self.send_ack(2)
if ok:
# don't try to decode invalid receive.
# may result in decode error.
return b_array.decode('utf8')
else:
return ""
"""
Send a 2 byte int acknowledgement to the Controller of
message received.
1 = message received ok and checksum matched
0 = message not received ok, resend
2 = message not received ok, but don't resend
"""
async def send_ack(self, ok):
b = bytearray(ok.to_bytes(2,sys.byteorder))
await self.send_bytes(b)
"""
Receive a byte array data where the first two bytes
of the input stream contain the msg length and the
next two contain a checksum.
Return a byte array of data and True/False for if the
checksum matched.
"""
async def rcv_block(self):
# read length of message and checksum
data = self.get_write_data(max_size=4)
n_bytes = int.from_bytes(bytes(data[0:2]),sys.byteorder)
chksum = int.from_bytes(bytes(data[2:4]),sys.byteorder)
"""
print("rcv bytes: ",end="")
print(n_bytes, end="")
print(", checksum: ",end="")
print(chksum)
"""
r = await self.rcv_bytes(n_bytes)
# print("returning results")
# r = bytearray(data)
cs = calc_icmpv6_chksum.calc_icmpv6_chksum(r)
# wait until all sent data is received
# and controller issues a read for the ack
while not self.read_is_pending():
if self.write_data_is_available():
self.get_write_data(max_size=16)
return r, cs == chksum
"""
Receive bytes in blocks of 16 bytes or less until
n_bytes of data received or "times out".
Here, "times out" means no bytes received
for 50ms.
Returns a list of bytes.
"""
async def rcv_bytes(self, rem_bytes):
data = bytearray(rem_bytes)
data_offset = 0
wait_cnt = 0
empty = []
while rem_bytes > 0:
if self.write_data_is_available():
b = self.get_write_data(max_size=16)
else:
b = empty
if len(b) == 0:
print("+",end="")
await uasyncio.sleep_ms(5)
wait_cnt = wait_cnt + 1
if wait_cnt > 50:
# time out receive - exit early
# print("i2c_responder.rcv_msg() tired of waiting, exiting before EOD")
return data[:data_offset]
else:
wait_cnt = 0
r_cnt = len(b)
rem_bytes = rem_bytes - r_cnt
for i in range(r_cnt):
data[data_offset] = b[i]
data_offset = data_offset + 1
if rem_bytes > 0 and r_cnt != 16:
# received a short block
print("**** <16 bytes in block: ", end="")
print(len(b))
return data[:data_offset]
"""
print("v2 rcvd '", end="")
print(bytearray(b),end="")
print("' blk remain: ",end="")
print(rem_bytes)
"""
return data
|
py | 1a3a329f9677b88eb93ed33b2ed8e958a32b948c | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
from unittest import TestCase, skip
from docutils.core import Publisher
from docutils import io
from m2rr import prolog, convert
class RendererTestBase(TestCase):
def conv(self, src, **kwargs):
out = convert(src, **kwargs)
self.check_rst(out)
return out
def conv_no_check(self, src, **kwargs):
out = convert(src, **kwargs)
return out
def check_rst(self, rst):
pub = Publisher(reader=None, parser=None, writer=None, settings=None,
source_class=io.StringInput,
destination_class=io.StringOutput)
pub.set_components(reader_name='standalone',
parser_name='restructuredtext',
writer_name='pseudoxml')
pub.process_programmatic_settings(
settings_spec=None,
settings_overrides={'output_encoding': 'unicode'},
config_section=None,
)
pub.set_source(rst, source_path=None)
pub.set_destination(destination=None, destination_path=None)
output = pub.publish(enable_exit_status=False)
self.assertLess(pub.document.reporter.max_level, 0)
return output, pub
class TestBasic(RendererTestBase):
def test_fail_rst(self):
with self.assertRaises(AssertionError):
# This check should be failed and report warning
self.check_rst('```')
def test_simple_paragraph(self):
src = 'this is a sentence.\n'
out = self.conv(src)
self.assertEqual(out, '\n' + src)
def test_multiline_paragraph(self):
src = '\n'.join([
'first sentence.',
'second sentence.',
])
out = self.conv(src)
self.assertEqual(out, '\n' + src + '\n')
def test_multi_paragraph(self):
src = '\n'.join([
'first paragraph.',
'',
'second paragraph.',
])
out = self.conv(src)
self.assertEqual(out, '\n' + src + '\n')
def test_hr(self):
src = 'a\n\n---\n\nb'
out = self.conv(src)
self.assertEqual(out, '\na\n\n----\n\nb\n')
def test_linebreak(self):
src = 'abc def \nghi'
out = self.conv(src)
self.assertEqual(
out,
prolog + '\nabc def\\ :raw-html-m2rr:`<br>`\nghi' + '\n',
)
class TestInlineMarkdown(RendererTestBase):
def test_inline_code(self):
src = '`a`'
out = self.conv(src)
self.assertEqual(out.replace('\n', ''), '``a``')
def test_inline_code_with_backticks(self):
src = '```a``a```'
out = self.conv(src)
self.assertEqual(out.strip(),
'.. role:: raw-html-m2rr(raw)\n'
' :format: html\n\n\n'
':raw-html-m2rr:`<code class="docutils literal">'
'<span class="pre">a``a</span></code>`'
)
def test_strikethrough(self):
src = ('~~a~~')
self.conv(src)
def test_emphasis(self):
src = '*a*'
out = self.conv(src)
self.assertEqual(out.replace('\n', ''), '*a*')
def test_emphasis_(self):
src = '_a_'
out = self.conv(src)
self.assertEqual(out.replace('\n', ''), '*a*')
def test_emphasis_no_(self):
src = '_a_'
out = self.conv(src, no_underscore_emphasis=True)
self.assertEqual(out.replace('\n', ''), '_a_')
def test_double_emphasis(self):
src = '**a**'
out = self.conv(src)
self.assertEqual(out.replace('\n', ''), '**a**')
def test_double_emphasis__(self):
src = '__a__'
out = self.conv(src)
self.assertEqual(out.replace('\n', ''), '**a**')
def test_emphasis_no__(self):
src = '__a__'
out = self.conv(src, no_underscore_emphasis=True)
self.assertEqual(out.replace('\n', ''), '__a__')
def test_autolink(self):
src = 'link to http://example.com/ in sentence.'
out = self.conv(src)
self.assertEqual(out, '\n' + src + '\n')
def test_link(self):
src = 'this is a [link](http://example.com/).'
out = self.conv(src)
self.assertEqual(
out, '\nthis is a `link <http://example.com/>`_.\n')
def test_anonymous_link(self):
src = 'this is a [link](http://example.com/).'
out = self.conv(src, anonymous_references=True)
self.assertEqual(
out, '\nthis is a `link <http://example.com/>`__.\n')
def test_link_with_rel_link_enabled(self):
src = 'this is a [link](http://example.com/).'
out = self.conv_no_check(
src,
parse_relative_links=True
)
self.assertEqual(
out, '\nthis is a `link <http://example.com/>`_.\n')
def test_anonymous_link_with_rel_link_enabled(self):
src = 'this is a [link](http://example.com/).'
out = self.conv_no_check(
src,
parse_relative_links=True,
anonymous_references=True
)
self.assertEqual(
out, '\nthis is a `link <http://example.com/>`__.\n')
def test_anchor(self):
src = 'this is an [anchor](#anchor).'
out = self.conv_no_check(
src,
parse_relative_links=True
)
self.assertEqual(
out, '\nthis is an :ref:`anchor <anchor>`.\n')
def test_relative_link(self):
src = 'this is a [relative link](a_file.md).'
out = self.conv_no_check(
src,
parse_relative_links=True
)
self.assertEqual(
out, '\nthis is a :doc:`relative link <a_file>`.\n')
def test_relative_link_with_anchor(self):
src = 'this is a [relative link](a_file.md#anchor).'
out = self.conv_no_check(
src,
parse_relative_links=True
)
self.assertEqual(
out, '\nthis is a :doc:`relative link <a_file>`.\n')
def test_link_title(self):
src = 'this is a [link](http://example.com/ "example").'
out = self.conv(src)
self.assertEqual(
out,
'.. role:: raw-html-m2rr(raw)\n'
' :format: html\n\n\n'
'this is a :raw-html-m2rr:'
'`<a href="http://example.com/" title="example">link</a>`.\n'
)
def test_image_link(self):
src = '[](link_target_url)'
out = self.conv(src)
self.assertEqual(
out,
'\n\n.. image:: image_taget_url\n'
' :target: link_target_url\n :alt: Alt Text\n\n',
)
def test_rest_role(self):
src = 'a :code:`some code` inline.'
out = self.conv(src)
self.assertEqual(out, '\n' + src + '\n')
def test_rest_role2(self):
src = 'a `some code`:code: inline.'
out = self.conv(src)
self.assertEqual(out, '\n' + src + '\n')
def test_rest_link(self):
src = 'a `RefLink <http://example.com>`_ here.'
out = self.conv(src)
self.assertEqual(out, '\n' + src + '\n')
def test_rest_link_and_role(self):
src = 'a :code:`a` and `RefLink <http://example.com>`_ here.'
out = self.conv(src)
self.assertEqual(out, '\n' + src + '\n')
def test_rest_link_and_role2(self):
src = 'a `a`:code: and `RefLink <http://example.com>`_ here.'
out = self.conv(src)
self.assertEqual(out, '\n' + src + '\n')
def test_rest_role_incomplete(self):
src = 'a co:`de` and `RefLink <http://example.com>`_ here.'
out = self.conv(src)
self.assertEqual(
out,
'\na co:\\ ``de`` and `RefLink <http://example.com>`_ here.\n',
)
def test_rest_role_incomplete2(self):
src = 'a `RefLink <http://example.com>`_ and co:`de` here.'
out = self.conv(src)
self.assertEqual(
out,
'\na `RefLink <http://example.com>`_ and co:\\ ``de`` here.\n',
)
def test_rest_role_with_code(self):
src = 'a `code` and :code:`rest` here.'
out = self.conv(src)
self.assertEqual(out, '\na ``code`` and :code:`rest` here.\n')
def test_rest2_role_with_code(self):
src = 'a `code` and `rest`:code: here.'
out = self.conv(src)
self.assertEqual(out, '\na ``code`` and `rest`:code: here.\n')
def test_code_with_rest_role(self):
src = 'a :code:`rest` and `code` here.'
out = self.conv(src)
self.assertEqual(out, '\na :code:`rest` and ``code`` here.\n')
def test_code_with_rest_role2(self):
src = 'a `rest`:code: and `code` here.'
out = self.conv(src)
self.assertEqual(out, '\na `rest`:code: and ``code`` here.\n')
def test_rest_link_with_code(self):
src = 'a `RefLink <a>`_ and `code` here.'
out = self.conv(src)
self.assertEqual(out, '\na `RefLink <a>`_ and ``code`` here.\n')
def test_code_with_rest_link(self):
src = 'a `code` and `RefLink <a>`_ here.'
out = self.conv(src)
self.assertEqual(out, '\na ``code`` and `RefLink <a>`_ here.\n')
def test_inline_math(self):
src = 'this is `$E = mc^2$` inline math.'
out = self.conv(src)
self.assertEqual(out, '\nthis is :math:`E = mc^2` inline math.\n')
def test_disable_inline_math(self):
src = 'this is `$E = mc^2$` inline math.'
out = self.conv(src, disable_inline_math=True)
self.assertEqual(out, '\nthis is ``$E = mc^2$`` inline math.\n')
def test_inline_html(self):
src = 'this is <s>html</s>.'
out = self.conv(src)
self.assertEqual(
out, prolog + '\nthis is :raw-html-m2rr:`<s>html</s>`.\n')
def test_block_html(self):
src = '<h1>title</h1>'
out = self.conv(src)
self.assertEqual(out, '\n\n.. raw:: html\n\n <h1>title</h1>\n\n')
class TestBlockQuote(RendererTestBase):
def test_block_quote(self):
src = '> q1\n> q2'
out = self.conv(src)
self.assertEqual(out, '\n..\n\n q1\n q2\n\n')
def test_block_quote_nested(self):
src = '> q1\n> > q2'
out = self.conv(src)
# one extra empty line is inserted, but still valid rst anyway
self.assertEqual(out, '\n..\n\n q1\n\n ..\n\n q2\n\n')
@skip('markdown does not support dedent in block quote')
def test_block_quote_nested_2(self):
src = '> q1\n> > q2\n> q3'
out = self.conv(src)
self.assertEqual(out, '\n..\n\n q1\n\n ..\n q2\n\n q3\n\n')
class TestCodeBlock(RendererTestBase):
def test_plain_code_block(self):
src = '\n'.join([
'```',
'pip install sphinx',
'```',
])
out = self.conv(src)
# Default to text block if non specified
self.assertEqual(out, '\n.. code-block:: text\n\n\
pip install sphinx\n')
def test_plain_code_block_tilda(self):
src = '\n'.join([
'~~~',
'pip install sphinx',
'~~~',
])
out = self.conv(src)
self.assertEqual(out, '\n.. code-block:: text\n\n pip install \
sphinx\n')
def test_code_block_math(self):
src = '\n'.join([
'```math',
'E = mc^2',
'```',
])
out = self.conv(src)
self.assertEqual(out, '\n.. math::\n\n E = mc^2\n')
def test_plain_code_block_indent(self):
src = '\n'.join([
'```',
'pip install sphinx',
' new line',
'```',
])
out = self.conv(src)
self.assertEqual(
out,
'\n.. code-block:: text\n\n pip install \
sphinx\n new line\n',
)
def test_python_code_block(self):
src = '\n'.join([
'```python',
'print(1)',
'```',
])
out = self.conv(src)
self.assertEqual(out, '\n.. code-block:: python\n\n print(1)\n')
def test_python_code_block_indent(self):
src = '\n'.join([
'```python',
'def a(i):',
' print(i)',
'```',
])
out = self.conv(src)
self.assertEqual(
out,
'\n.. code-block:: python\n\n def a(i):\n print(i)\n',
)
class TestImage(RendererTestBase):
def test_image(self):
src = ''
out = self.conv(src)
# first and last newline is inserted by paragraph
self.assertEqual(
out,
'\n\n.. image:: a.png\n :target: a.png\n :alt: alt text\n\n',
)
def test_image_title(self):
src = ''
self.conv(src)
# title is not supported now
class TestHeading(RendererTestBase):
def test_heading(self):
src = '# head 1'
out = self.conv(src)
self.assertEqual(out, '\nhead 1\n' + '=' * 6 + '\n')
def test_heading_multibyte(self):
src = '# マルチバイト文字\n'
out = self.conv(src)
self.assertEqual(out, '\nマルチバイト文字\n' + '=' * 16 + '\n')
class TestList(RendererTestBase):
def test_ul(self):
src = '* list'
out = self.conv(src)
self.assertEqual(out, '\n\n* list\n')
def test_ol(self):
src = '1. list'
out = self.conv(src)
self.assertEqual(out, '\n\n#. list\n')
def test_nested_ul(self):
src = '\n'.join([
'* list 1',
'* list 2',
' * list 2.1',
' * list 2.2',
'* list 3',
])
out = self.conv(src)
self.assertEqual(
out,
'\n\n* list 1\n'
'* list 2\n\n'
' * list 2.1\n'
' * list 2.2\n\n'
'* list 3\n',
)
def test_nested_ul_2(self):
src = '\n'.join([
'* list 1',
'* list 2',
' * list 2.1',
' * list 2.2',
' * list 2.2.1',
' * list 2.2.2',
'* list 3',
])
out = self.conv(src)
self.assertEqual(
out,
'\n\n* list 1\n'
'* list 2\n\n'
' * list 2.1\n'
' * list 2.2\n\n'
' * list 2.2.1\n'
' * list 2.2.2\n\n'
'* list 3\n'
)
def test_nested_ol(self):
src = '\n'.join([
'1. list 1',
'2. list 2',
' 2. list 2.1',
' 3. list 2.2',
'3. list 3',
])
out = self.conv(src)
self.assertEqual(
out,
'\n\n#. list 1\n'
'#. list 2\n'
'\n'
' #. list 2.1\n'
' #. list 2.2\n'
'\n'
'#. list 3\n',
)
def test_nested_ol_2(self):
src = '\n'.join([
'1. list 1',
'2. list 2',
' 3. list 2.1',
' 4. list 2.2',
' 5. list 2.2.1',
' 6. list 2.2.2',
'7. list 3',
])
out = self.conv(src)
self.assertEqual(
out,
'\n'.join([
'\n\n#. list 1',
'#. list 2',
'',
' #. list 2.1',
' #. list 2.2',
'',
' #. list 2.2.1',
' #. list 2.2.2',
'',
'#. list 3\n',
])
)
def test_nested_mixed_1(self):
src = '\n'.join([
'1. list 1',
'2. list 2',
' * list 2.1',
' * list 2.2',
' 1. list 2.2.1',
' 2. list 2.2.2',
'7. list 3',
])
out = self.conv(src)
self.assertEqual(
out,
'\n'.join([
'\n\n#. list 1',
'#. list 2',
'',
' * list 2.1',
' * list 2.2',
'',
' #. list 2.2.1',
' #. list 2.2.2',
'',
'#. list 3\n',
])
)
def test_nested_multiline_1(self):
src = '\n'.join([
'* list 1',
' list 1 cont',
'* list 2',
' list 2 cont',
' * list 2.1',
' list 2.1 cont',
' * list 2.2',
' list 2.2 cont',
' * list 2.2.1',
' * list 2.2.2',
'* list 3',
])
out = self.conv(src)
self.assertEqual(
out,
'\n'.join([
'\n\n* list 1',
' list 1 cont',
'* list 2',
' list 2 cont',
'',
' * list 2.1',
' list 2.1 cont',
' * list 2.2',
' list 2.2 cont',
'',
' * list 2.2.1',
' * list 2.2.2',
'',
'* list 3\n',
])
)
def test_nested_multiline_2(self):
src = '\n'.join([
'1. list 1',
' list 1 cont',
'1. list 2',
' list 2 cont',
' 1. list 2.1',
' list 2.1 cont',
' 1. list 2.2',
' list 2.2 cont',
' 1. list 2.2.1',
' 1. list 2.2.2',
'1. list 3',
])
out = self.conv(src)
self.assertEqual(
out,
'\n'.join([
'\n\n#. list 1',
' list 1 cont',
'#. list 2',
' list 2 cont',
'',
' #. list 2.1',
' list 2.1 cont',
' #. list 2.2',
' list 2.2 cont',
'',
' #. list 2.2.1',
' #. list 2.2.2',
'',
'#. list 3\n',
])
)
def test_nested_multiline_3(self):
src = '\n'.join([
'1. list 1',
' list 1 cont',
'1. list 2',
' list 2 cont',
' * list 2.1',
' list 2.1 cont',
' * list 2.2',
' list 2.2 cont',
' 1. list 2.2.1',
' 1. list 2.2.2',
'1. list 3',
])
out = self.conv(src)
self.assertEqual(
out,
'\n'.join([
'\n\n#. list 1',
' list 1 cont',
'#. list 2',
' list 2 cont',
'',
' * list 2.1',
' list 2.1 cont',
' * list 2.2',
' list 2.2 cont',
'',
' #. list 2.2.1',
' #. list 2.2.2',
'',
'#. list 3\n',
])
)
class TestConplexText(RendererTestBase):
def test_code(self):
src = '''
some sentence
```python
print(1)
```
some sentence
# title
```python
print(1)
```
---
end
'''
self.conv(src)
class TestTable(RendererTestBase):
def test_table(self):
src = '''h1 | h2 | h3\n--- | --- | ---\n1 | 2 | 3\n4 | 5 | 6'''
out = self.conv(src)
self.assertEqual(out, '\n'.join([
'',
'.. list-table::',
' :header-rows: 1',
'',
' * - h1',
' - h2',
' - h3',
' * - 1',
' - 2',
' - 3',
' * - 4',
' - 5',
' - 6',
'',
'',
]))
class TestFootNote(RendererTestBase):
def test_footnote(self):
src = '\n'.join([
'This is a[^1] footnote[^2] ref[^ref] with rst [#a]_.',
'',
'[^1]: note 1',
'[^2]: note 2',
'[^ref]: note ref',
'.. [#a] note rst',
])
out = self.conv(src)
self.assertEqual(out, '\n'.join([
'',
'This is a\\ [#fn-1]_ '
'footnote\\ [#fn-2]_ ref\\ [#fn-ref]_ with rst [#a]_.',
'',
'.. [#a] note rst', # one empty line inserted...
'',
'.. [#fn-1] note 1',
'.. [#fn-2] note 2',
'.. [#fn-ref] note ref',
'',
]))
def test_sphinx_ref(self):
src = 'This is a sphinx [ref]_ global ref.\n\n.. [ref] ref text'
out = self.conv(src)
self.assertEqual(out, '\n' + src)
class TestDirective(RendererTestBase):
def test_comment_oneline(self):
src = '.. a'
out = self.conv(src)
self.assertEqual(out, '\n.. a')
def test_comment_indented(self):
src = ' .. a'
out = self.conv(src)
self.assertEqual(out, '\n .. a')
def test_comment_newline(self):
src = '..\n\n comment\n\nnewline'
out = self.conv(src)
self.assertEqual(out, '\n..\n\n comment\n\nnewline\n')
def test_comment_multiline(self):
comment = (
'.. this is comment.\n'
' this is also comment.\n'
'\n'
'\n'
' comment may include empty line.\n'
'\n\n')
src = comment + '`eoc`'
out = self.conv(src)
self.assertEqual(out, '\n' + comment + '``eoc``\n')
class TestRestCode(RendererTestBase):
def test_rest_code_block_empty(self):
src = '\n\n::\n\n'
out = self.conv(src)
self.assertEqual(out, '\n\n')
def test_eol_marker(self):
src = 'a::\n\n code\n'
out = self.conv(src)
self.assertEqual(out, '\na:\n\n.. code-block:: text\n\n code\n')
def test_eol_marker_remove(self):
src = 'a ::\n\n code\n'
out = self.conv(src)
self.assertEqual(out, '\na\n\n.. code-block:: text\n\n code\n')
|
py | 1a3a32f6c90ab449ab0b7efe2b64638ce6a87a49 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RouteTablesOperations:
"""RouteTablesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
route_table_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
route_table_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def get(
self,
resource_group_name: str,
route_table_name: str,
expand: Optional[str] = None,
**kwargs
) -> "models.RouteTable":
"""Gets the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteTable, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_07_01.models.RouteTable
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.RouteTable"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
route_table_name: str,
parameters: "models.RouteTable",
**kwargs
) -> "models.RouteTable":
cls = kwargs.pop('cls', None) # type: ClsType["models.RouteTable"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'RouteTable')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('RouteTable', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
route_table_name: str,
parameters: "models.RouteTable",
**kwargs
) -> AsyncLROPoller["models.RouteTable"]:
"""Create or updates a route table in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param parameters: Parameters supplied to the create or update route table operation.
:type parameters: ~azure.mgmt.network.v2018_07_01.models.RouteTable
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RouteTable or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_07_01.models.RouteTable]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.RouteTable"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
route_table_name: str,
parameters: "models.TagsObject",
**kwargs
) -> "models.RouteTable":
cls = kwargs.pop('cls', None) # type: ClsType["models.RouteTable"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
route_table_name: str,
parameters: "models.TagsObject",
**kwargs
) -> AsyncLROPoller["models.RouteTable"]:
"""Updates a route table tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param parameters: Parameters supplied to update route table tags.
:type parameters: ~azure.mgmt.network.v2018_07_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RouteTable or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_07_01.models.RouteTable]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.RouteTable"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["models.RouteTableListResult"]:
"""Gets all route tables in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteTableListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.RouteTableListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.RouteTableListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RouteTableListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables'} # type: ignore
def list_all(
self,
**kwargs
) -> AsyncIterable["models.RouteTableListResult"]:
"""Gets all route tables in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteTableListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.RouteTableListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.RouteTableListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RouteTableListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/routeTables'} # type: ignore
|
py | 1a3a341873efaa53386ef9cb51c8c5362081b89a | # flake8: noqa
import base64
import collections
import datetime
import inspect
import os
import os.path as osp
import pickle
import re
import subprocess
import sys
import cloudpickle
import dateutil.tz
import numpy as np
from garage.core import Serializable
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def flatten(l):
return [item for sublist in l for item in sublist]
class BinaryOp(Serializable):
def __init__(self):
Serializable.quick_init(self, locals())
def rdiv(self, a, b):
return b / a
# def __init__(self, opname, a, b):
# self.opname = opname
# self.a = a
# self.b = b
class VariantDict(AttrDict):
def __init__(self, d, hidden_keys):
super(VariantDict, self).__init__(d)
self._hidden_keys = hidden_keys
def dump(self):
return {k: v for k, v in self.items() if k not in self._hidden_keys}
class VariantGenerator:
"""
Usage:
vg = VariantGenerator()
vg.add("param1", [1, 2, 3])
vg.add("param2", ['x', 'y'])
vg.variants() => # all combinations of [1,2,3] x ['x','y']
Supports noncyclic dependency among parameters:
vg = VariantGenerator()
vg.add("param1", [1, 2, 3])
vg.add("param2", lambda param1: [param1+1, param1+2])
vg.variants() => # ..
"""
def __init__(self):
self._variants = []
self._populate_variants()
self._hidden_keys = []
for k, vs, cfg in self._variants:
if cfg.get('hide', False):
self._hidden_keys.append(k)
def add(self, key, vals, **kwargs):
self._variants.append((key, vals, kwargs))
def _populate_variants(self):
methods = inspect.getmembers(
self.__class__,
predicate=lambda x: inspect.isfunction(x) or inspect.ismethod(x))
methods = [
x[1].__get__(self, self.__class__) for x in methods
if getattr(x[1], '__is_variant', False)
]
for m in methods:
self.add(m.__name__, m, **getattr(m, '__variant_config', dict()))
def variants(self, randomized=False):
ret = list(self.ivariants())
if randomized:
np.random.shuffle(ret)
return list(map(self.variant_dict, ret))
def variant_dict(self, variant):
return VariantDict(variant, self._hidden_keys)
def to_name_suffix(self, variant):
suffix = []
for k, vs, cfg in self._variants:
if not cfg.get('hide', False):
suffix.append(k + '_' + str(variant[k]))
return '_'.join(suffix)
def ivariants(self):
dependencies = list()
for key, vals, _ in self._variants:
if hasattr(vals, '__call__'):
args = inspect.getfullargspec(vals).args
if hasattr(vals, 'im_self') or hasattr(vals, '__self__'):
# remove the first 'self' parameter
args = args[1:]
dependencies.append((key, set(args)))
else:
dependencies.append((key, set()))
sorted_keys = []
# topo sort all nodes
while len(sorted_keys) < len(self._variants):
# get all nodes with zero in-degree
free_nodes = [k for k, v in dependencies if not v]
if not free_nodes:
error_msg = 'Invalid parameter dependency: \n'
for k, v in dependencies:
if v:
error_msg += k + ' depends on ' + ' & '.join(v) + '\n'
raise ValueError(error_msg)
dependencies = [(k, v) for k, v in dependencies
if k not in free_nodes]
# remove the free nodes from the remaining dependencies
for _, v in dependencies:
v.difference_update(free_nodes)
sorted_keys += free_nodes
return self._ivariants_sorted(sorted_keys)
def _ivariants_sorted(self, sorted_keys):
if not sorted_keys:
yield dict()
else:
first_keys = sorted_keys[:-1]
first_variants = self._ivariants_sorted(first_keys)
last_key = sorted_keys[-1]
last_vals = [v for k, v, _ in self._variants if k == last_key][0]
if hasattr(last_vals, '__call__'):
last_val_keys = inspect.getfullargspec(last_vals).args
if hasattr(last_vals, 'im_self') or hasattr(
last_vals, '__self__'):
last_val_keys = last_val_keys[1:]
else:
last_val_keys = None
for variant in first_variants:
if hasattr(last_vals, '__call__'):
last_variants = last_vals(
**{k: variant[k]
for k in last_val_keys})
for last_choice in last_variants:
yield AttrDict(variant, **{last_key: last_choice})
else:
for last_choice in last_vals:
yield AttrDict(variant, **{last_key: last_choice})
def variant(*args, **kwargs):
def _variant(fn):
fn.__is_variant = True
fn.__variant_config = kwargs
return fn
if len(args) == 1 and isinstance(args[0], collections.Callable):
return _variant(args[0])
return _variant
def query_yes_no(question, default='yes'):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {'yes': True, 'y': True, 'ye': True, 'no': False, 'n': False}
if default is None:
prompt = ' [y/n] '
elif default == 'yes':
prompt = ' [Y/n] '
elif default == 'no':
prompt = ' [y/N] '
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
exp_count = 0
now = datetime.datetime.now(dateutil.tz.tzlocal())
timestamp = now.strftime('%Y_%m_%d_%H_%M_%S')
def run_experiment(method_call=None,
batch_tasks=None,
exp_prefix='experiment',
exp_name=None,
log_dir=None,
script='garage.experiment.experiment_wrapper',
python_command='python',
dry=False,
env=None,
variant=None,
use_tf=False,
use_gpu=False,
pre_commands=None,
**kwargs):
"""Serialize the method call and run the experiment using the
specified mode.
Args:
method_call (callable): A method call.
batch_tasks (list[dict]): A batch of method calls.
exp_prefix (str): Name prefix for the experiment.
exp_name (str): Name of the experiment.
log_dir (str): Log directory for the experiment.
script (str): The name of the entrance point python script.
python_command (str): Python command to run the experiment.
dry (bool): Whether to do a dry-run, which only prints the
commands without executing them.
env (dict): Extra environment variables.
variant (dict): If provided, should be a dictionary of parameters.
use_tf (bool): Used along with the Theano and GPU configuration
when using TensorFlow
use_gpu (bool): Whether the launched task is running on GPU.
This triggers a few configuration changes including certain
environment flags.
pre_commands (str): Pre commands to run the experiment.
"""
if method_call is None and batch_tasks is None:
raise Exception(
'Must provide at least either method_call or batch_tasks')
for task in (batch_tasks or [method_call]):
if not hasattr(task, '__call__'):
raise ValueError('batch_tasks should be callable')
# ensure variant exists
if variant is None:
variant = dict()
if batch_tasks is None:
batch_tasks = [
dict(
kwargs,
pre_commands=pre_commands,
method_call=method_call,
exp_name=exp_name,
log_dir=log_dir,
env=env,
variant=variant)
]
global exp_count
if use_tf:
if not use_gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = ''
else:
os.unsetenv('CUDA_VISIBLE_DEVICES')
for task in batch_tasks:
call = task.pop('method_call')
data = base64.b64encode(cloudpickle.dumps(call)).decode('utf-8')
task['args_data'] = data
exp_count += 1
if task.get('exp_name', None) is None:
task['exp_name'] = '{}_{}_{:04n}'.format(exp_prefix, timestamp,
exp_count)
if task.get('log_dir', None) is None:
task['log_dir'] = (
'{log_dir}/local/{exp_prefix}/{exp_name}'.format(
log_dir=osp.join(os.getcwd(), 'data'),
exp_prefix=exp_prefix.replace('_', '-'),
exp_name=task['exp_name']))
if task.get('variant', None) is not None:
variant = task.pop('variant')
if 'exp_name' not in variant:
variant['exp_name'] = task['exp_name']
task['variant_data'] = base64.b64encode(
pickle.dumps(variant)).decode('utf-8')
elif 'variant' in task:
del task['variant']
task['env'] = task.get('env', dict()) or dict()
task['env']['GARAGE_USE_GPU'] = str(use_gpu)
task['env']['GARAGE_USE_TF'] = str(use_tf)
for task in batch_tasks:
env = task.pop('env', None)
command = to_local_command(
task, python_command=python_command, script=script)
print(command)
if dry:
return
try:
if env is None:
env = dict()
subprocess.call(command, shell=True, env=dict(os.environ, **env))
except Exception as e:
print(e)
if isinstance(e, KeyboardInterrupt):
raise
_find_unsafe = re.compile(r'[a-zA-Z0-9_^@%+=:,./-]').search
def _shellquote(s):
"""Return a shell-escaped version of the string *s*."""
if not s:
return "''"
if _find_unsafe(s) is None:
return s
# use single quotes, and put single quotes into double quotes
# the string $'b is then quoted as '$'"'"'b'
return "'" + s.replace("'", "'\"'\"'") + "'"
def _to_param_val(v):
if v is None:
return ''
elif isinstance(v, list):
return ' '.join(map(_shellquote, list(map(str, v))))
else:
return _shellquote(str(v))
def to_local_command(params,
python_command='python',
script='garage.experiment.experiment_wrapper'):
command = python_command + ' -m ' + script
garage_env = eval(os.environ.get('GARAGE_ENV', '{}'))
for k, v in garage_env.items():
command = '{}={} '.format(k, v) + command
pre_commands = params.pop('pre_commands', None)
post_commands = params.pop('post_commands', None)
if pre_commands is not None or post_commands is not None:
print('Not executing the pre_commands: ', pre_commands,
', nor post_commands: ', post_commands)
for k, v in params.items():
if isinstance(v, dict):
for nk, nv in v.items():
if str(nk) == '_name':
command += ' --{} {}'.format(k, _to_param_val(nv))
else:
command += \
' --{}_{} {}'.format(k, nk, _to_param_val(nv))
else:
command += ' --{} {}'.format(k, _to_param_val(v))
return command
def concretize(obj):
if isinstance(obj, dict):
# make sure that there's no hidden caveat
ret = dict()
for k, v in obj.items():
ret[concretize(k)] = concretize(v)
return ret
elif isinstance(obj, (list, tuple)):
return obj.__class__(list(map(concretize, obj)))
else:
return obj
|
py | 1a3a3679dac3e038051840870d0e1b94b7d14832 | # Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from st2common import transport
from st2common.models.db.sensor import sensor_type_access
from st2common.persistence.base import ContentPackResource
__all__ = ["SensorType"]
class SensorType(ContentPackResource):
impl = sensor_type_access
publisher = None
@classmethod
def _get_impl(cls):
return cls.impl
@classmethod
def _get_publisher(cls):
if not cls.publisher:
cls.publisher = transport.reactor.SensorCUDPublisher()
return cls.publisher
|
py | 1a3a36f6cf0ce7ef2a2ca282d35d2a24fc80bd7c | # -*- coding: utf-8 -*-
# Copyright 2019-2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from http import HTTPStatus
from typing import TYPE_CHECKING, List, Optional, Tuple
from urllib import parse as urlparse
from synapse.api.constants import EventTypes, JoinRules, Membership
from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError
from synapse.api.filtering import Filter
from synapse.http.servlet import (
RestServlet,
assert_params_in_dict,
parse_integer,
parse_json_object_from_request,
parse_string,
)
from synapse.http.site import SynapseRequest
from synapse.rest.admin._base import (
admin_patterns,
assert_requester_is_admin,
assert_user_is_admin,
)
from synapse.storage.databases.main.room import RoomSortOrder
from synapse.types import JsonDict, RoomAlias, RoomID, UserID, create_requester
from synapse.util import json_decoder
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
class ShutdownRoomRestServlet(RestServlet):
"""Shuts down a room by removing all local users from the room and blocking
all future invites and joins to the room. Any local aliases will be repointed
to a new room created by `new_room_user_id` and kicked users will be auto
joined to the new room.
"""
PATTERNS = admin_patterns("/shutdown_room/(?P<room_id>[^/]+)")
def __init__(self, hs: "HomeServer"):
self.hs = hs
self.auth = hs.get_auth()
self.room_shutdown_handler = hs.get_room_shutdown_handler()
async def on_POST(
self, request: SynapseRequest, room_id: str
) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
await assert_user_is_admin(self.auth, requester.user)
content = parse_json_object_from_request(request)
assert_params_in_dict(content, ["new_room_user_id"])
ret = await self.room_shutdown_handler.shutdown_room(
room_id=room_id,
new_room_user_id=content["new_room_user_id"],
new_room_name=content.get("room_name"),
message=content.get("message"),
requester_user_id=requester.user.to_string(),
block=True,
)
return (200, ret)
class DeleteRoomRestServlet(RestServlet):
"""Delete a room from server.
It is a combination and improvement of shutdown and purge room.
Shuts down a room by removing all local users from the room.
Blocking all future invites and joins to the room is optional.
If desired any local aliases will be repointed to a new room
created by `new_room_user_id` and kicked users will be auto-
joined to the new room.
If 'purge' is true, it will remove all traces of a room from the database.
"""
PATTERNS = admin_patterns("/rooms/(?P<room_id>[^/]+)/delete$")
def __init__(self, hs: "HomeServer"):
self.hs = hs
self.auth = hs.get_auth()
self.room_shutdown_handler = hs.get_room_shutdown_handler()
self.pagination_handler = hs.get_pagination_handler()
async def on_POST(
self, request: SynapseRequest, room_id: str
) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
await assert_user_is_admin(self.auth, requester.user)
content = parse_json_object_from_request(request)
block = content.get("block", False)
if not isinstance(block, bool):
raise SynapseError(
HTTPStatus.BAD_REQUEST,
"Param 'block' must be a boolean, if given",
Codes.BAD_JSON,
)
purge = content.get("purge", True)
if not isinstance(purge, bool):
raise SynapseError(
HTTPStatus.BAD_REQUEST,
"Param 'purge' must be a boolean, if given",
Codes.BAD_JSON,
)
force_purge = content.get("force_purge", False)
if not isinstance(force_purge, bool):
raise SynapseError(
HTTPStatus.BAD_REQUEST,
"Param 'force_purge' must be a boolean, if given",
Codes.BAD_JSON,
)
ret = await self.room_shutdown_handler.shutdown_room(
room_id=room_id,
new_room_user_id=content.get("new_room_user_id"),
new_room_name=content.get("room_name"),
message=content.get("message"),
requester_user_id=requester.user.to_string(),
block=block,
)
# Purge room
if purge:
await self.pagination_handler.purge_room(room_id, force=force_purge)
return (200, ret)
class ListRoomRestServlet(RestServlet):
"""
List all rooms that are known to the homeserver. Results are returned
in a dictionary containing room information. Supports pagination.
"""
PATTERNS = admin_patterns("/rooms$")
def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastore()
self.auth = hs.get_auth()
self.admin_handler = hs.get_admin_handler()
async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
await assert_user_is_admin(self.auth, requester.user)
# Extract query parameters
start = parse_integer(request, "from", default=0)
limit = parse_integer(request, "limit", default=100)
order_by = parse_string(request, "order_by", default=RoomSortOrder.NAME.value)
if order_by not in (
RoomSortOrder.ALPHABETICAL.value,
RoomSortOrder.SIZE.value,
RoomSortOrder.NAME.value,
RoomSortOrder.CANONICAL_ALIAS.value,
RoomSortOrder.JOINED_MEMBERS.value,
RoomSortOrder.JOINED_LOCAL_MEMBERS.value,
RoomSortOrder.VERSION.value,
RoomSortOrder.CREATOR.value,
RoomSortOrder.ENCRYPTION.value,
RoomSortOrder.FEDERATABLE.value,
RoomSortOrder.PUBLIC.value,
RoomSortOrder.JOIN_RULES.value,
RoomSortOrder.GUEST_ACCESS.value,
RoomSortOrder.HISTORY_VISIBILITY.value,
RoomSortOrder.STATE_EVENTS.value,
):
raise SynapseError(
400,
"Unknown value for order_by: %s" % (order_by,),
errcode=Codes.INVALID_PARAM,
)
search_term = parse_string(request, "search_term")
if search_term == "":
raise SynapseError(
400,
"search_term cannot be an empty string",
errcode=Codes.INVALID_PARAM,
)
direction = parse_string(request, "dir", default="f")
if direction not in ("f", "b"):
raise SynapseError(
400, "Unknown direction: %s" % (direction,), errcode=Codes.INVALID_PARAM
)
reverse_order = True if direction == "b" else False
# Return list of rooms according to parameters
rooms, total_rooms = await self.store.get_rooms_paginate(
start, limit, order_by, reverse_order, search_term
)
response = {
# next_token should be opaque, so return a value the client can parse
"offset": start,
"rooms": rooms,
"total_rooms": total_rooms,
}
# Are there more rooms to paginate through after this?
if (start + limit) < total_rooms:
# There are. Calculate where the query should start from next time
# to get the next part of the list
response["next_batch"] = start + limit
# Is it possible to paginate backwards? Check if we currently have an
# offset
if start > 0:
if start > limit:
# Going back one iteration won't take us to the start.
# Calculate new offset
response["prev_batch"] = start - limit
else:
response["prev_batch"] = 0
return 200, response
class RoomRestServlet(RestServlet):
"""Get room details.
TODO: Add on_POST to allow room creation without joining the room
"""
PATTERNS = admin_patterns("/rooms/(?P<room_id>[^/]+)$")
def __init__(self, hs: "HomeServer"):
self.hs = hs
self.auth = hs.get_auth()
self.store = hs.get_datastore()
async def on_GET(
self, request: SynapseRequest, room_id: str
) -> Tuple[int, JsonDict]:
await assert_requester_is_admin(self.auth, request)
ret = await self.store.get_room_with_stats(room_id)
if not ret:
raise NotFoundError("Room not found")
members = await self.store.get_users_in_room(room_id)
ret["joined_local_devices"] = await self.store.count_devices_by_users(members)
return (200, ret)
class RoomMembersRestServlet(RestServlet):
"""
Get members list of a room.
"""
PATTERNS = admin_patterns("/rooms/(?P<room_id>[^/]+)/members")
def __init__(self, hs: "HomeServer"):
self.hs = hs
self.auth = hs.get_auth()
self.store = hs.get_datastore()
async def on_GET(
self, request: SynapseRequest, room_id: str
) -> Tuple[int, JsonDict]:
await assert_requester_is_admin(self.auth, request)
ret = await self.store.get_room(room_id)
if not ret:
raise NotFoundError("Room not found")
members = await self.store.get_users_in_room(room_id)
ret = {"members": members, "total": len(members)}
return 200, ret
class RoomStateRestServlet(RestServlet):
"""
Get full state within a room.
"""
PATTERNS = admin_patterns("/rooms/(?P<room_id>[^/]+)/state")
def __init__(self, hs: "HomeServer"):
self.hs = hs
self.auth = hs.get_auth()
self.store = hs.get_datastore()
self.clock = hs.get_clock()
self._event_serializer = hs.get_event_client_serializer()
async def on_GET(
self, request: SynapseRequest, room_id: str
) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
await assert_user_is_admin(self.auth, requester.user)
ret = await self.store.get_room(room_id)
if not ret:
raise NotFoundError("Room not found")
event_ids = await self.store.get_current_state_ids(room_id)
events = await self.store.get_events(event_ids.values())
now = self.clock.time_msec()
room_state = await self._event_serializer.serialize_events(
events.values(),
now,
# We don't bother bundling aggregations in when asked for state
# events, as clients won't use them.
bundle_aggregations=False,
)
ret = {"state": room_state}
return 200, ret
class JoinRoomAliasServlet(RestServlet):
PATTERNS = admin_patterns("/join/(?P<room_identifier>[^/]*)")
def __init__(self, hs: "HomeServer"):
self.hs = hs
self.auth = hs.get_auth()
self.room_member_handler = hs.get_room_member_handler()
self.admin_handler = hs.get_admin_handler()
self.state_handler = hs.get_state_handler()
async def on_POST(
self, request: SynapseRequest, room_identifier: str
) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
await assert_user_is_admin(self.auth, requester.user)
content = parse_json_object_from_request(request)
assert_params_in_dict(content, ["user_id"])
target_user = UserID.from_string(content["user_id"])
if not self.hs.is_mine(target_user):
raise SynapseError(400, "This endpoint can only be used with local users")
if not await self.admin_handler.get_user(target_user):
raise NotFoundError("User not found")
if RoomID.is_valid(room_identifier):
room_id = room_identifier
try:
remote_room_hosts = [
x.decode("ascii") for x in request.args[b"server_name"]
] # type: Optional[List[str]]
except Exception:
remote_room_hosts = None
elif RoomAlias.is_valid(room_identifier):
handler = self.room_member_handler
room_alias = RoomAlias.from_string(room_identifier)
room_id, remote_room_hosts = await handler.lookup_room_alias(room_alias)
else:
raise SynapseError(
400, "%s was not legal room ID or room alias" % (room_identifier,)
)
fake_requester = create_requester(
target_user, authenticated_entity=requester.authenticated_entity
)
# send invite if room has "JoinRules.INVITE"
room_state = await self.state_handler.get_current_state(room_id)
join_rules_event = room_state.get((EventTypes.JoinRules, ""))
if join_rules_event:
if not (join_rules_event.content.get("join_rule") == JoinRules.PUBLIC):
# update_membership with an action of "invite" can raise a
# ShadowBanError. This is not handled since it is assumed that
# an admin isn't going to call this API with a shadow-banned user.
await self.room_member_handler.update_membership(
requester=requester,
target=fake_requester.user,
room_id=room_id,
action="invite",
remote_room_hosts=remote_room_hosts,
ratelimit=False,
)
await self.room_member_handler.update_membership(
requester=fake_requester,
target=fake_requester.user,
room_id=room_id,
action="join",
remote_room_hosts=remote_room_hosts,
ratelimit=False,
)
return 200, {"room_id": room_id}
class MakeRoomAdminRestServlet(RestServlet):
"""Allows a server admin to get power in a room if a local user has power in
a room. Will also invite the user if they're not in the room and it's a
private room. Can specify another user (rather than the admin user) to be
granted power, e.g.:
POST/_synapse/admin/v1/rooms/<room_id_or_alias>/make_room_admin
{
"user_id": "@foo:example.com"
}
"""
PATTERNS = admin_patterns("/rooms/(?P<room_identifier>[^/]*)/make_room_admin")
def __init__(self, hs: "HomeServer"):
self.hs = hs
self.auth = hs.get_auth()
self.room_member_handler = hs.get_room_member_handler()
self.event_creation_handler = hs.get_event_creation_handler()
self.state_handler = hs.get_state_handler()
self.is_mine_id = hs.is_mine_id
async def on_POST(self, request, room_identifier):
requester = await self.auth.get_user_by_req(request)
await assert_user_is_admin(self.auth, requester.user)
content = parse_json_object_from_request(request, allow_empty_body=True)
# Resolve to a room ID, if necessary.
if RoomID.is_valid(room_identifier):
room_id = room_identifier
elif RoomAlias.is_valid(room_identifier):
room_alias = RoomAlias.from_string(room_identifier)
room_id, _ = await self.room_member_handler.lookup_room_alias(room_alias)
room_id = room_id.to_string()
else:
raise SynapseError(
400, "%s was not legal room ID or room alias" % (room_identifier,)
)
# Which user to grant room admin rights to.
user_to_add = content.get("user_id", requester.user.to_string())
# Figure out which local users currently have power in the room, if any.
room_state = await self.state_handler.get_current_state(room_id)
if not room_state:
raise SynapseError(400, "Server not in room")
create_event = room_state[(EventTypes.Create, "")]
power_levels = room_state.get((EventTypes.PowerLevels, ""))
if power_levels is not None:
# We pick the local user with the highest power.
user_power = power_levels.content.get("users", {})
admin_users = [
user_id for user_id in user_power if self.is_mine_id(user_id)
]
admin_users.sort(key=lambda user: user_power[user])
if not admin_users:
raise SynapseError(400, "No local admin user in room")
admin_user_id = None
for admin_user in reversed(admin_users):
if room_state.get((EventTypes.Member, admin_user)):
admin_user_id = admin_user
break
if not admin_user_id:
raise SynapseError(
400,
"No local admin user in room",
)
pl_content = power_levels.content
else:
# If there is no power level events then the creator has rights.
pl_content = {}
admin_user_id = create_event.sender
if not self.is_mine_id(admin_user_id):
raise SynapseError(
400,
"No local admin user in room",
)
# Grant the user power equal to the room admin by attempting to send an
# updated power level event.
new_pl_content = dict(pl_content)
new_pl_content["users"] = dict(pl_content.get("users", {}))
new_pl_content["users"][user_to_add] = new_pl_content["users"][admin_user_id]
fake_requester = create_requester(
admin_user_id,
authenticated_entity=requester.authenticated_entity,
)
try:
await self.event_creation_handler.create_and_send_nonmember_event(
fake_requester,
event_dict={
"content": new_pl_content,
"sender": admin_user_id,
"type": EventTypes.PowerLevels,
"state_key": "",
"room_id": room_id,
},
)
except AuthError:
# The admin user we found turned out not to have enough power.
raise SynapseError(
400, "No local admin user in room with power to update power levels."
)
# Now we check if the user we're granting admin rights to is already in
# the room. If not and it's not a public room we invite them.
member_event = room_state.get((EventTypes.Member, user_to_add))
is_joined = False
if member_event:
is_joined = member_event.content["membership"] in (
Membership.JOIN,
Membership.INVITE,
)
if is_joined:
return 200, {}
join_rules = room_state.get((EventTypes.JoinRules, ""))
is_public = False
if join_rules:
is_public = join_rules.content.get("join_rule") == JoinRules.PUBLIC
if is_public:
return 200, {}
await self.room_member_handler.update_membership(
fake_requester,
target=UserID.from_string(user_to_add),
room_id=room_id,
action=Membership.INVITE,
)
return 200, {}
class ForwardExtremitiesRestServlet(RestServlet):
"""Allows a server admin to get or clear forward extremities.
Clearing does not require restarting the server.
Clear forward extremities:
DELETE /_synapse/admin/v1/rooms/<room_id_or_alias>/forward_extremities
Get forward_extremities:
GET /_synapse/admin/v1/rooms/<room_id_or_alias>/forward_extremities
"""
PATTERNS = admin_patterns("/rooms/(?P<room_identifier>[^/]*)/forward_extremities")
def __init__(self, hs: "HomeServer"):
self.hs = hs
self.auth = hs.get_auth()
self.room_member_handler = hs.get_room_member_handler()
self.store = hs.get_datastore()
async def resolve_room_id(self, room_identifier: str) -> str:
"""Resolve to a room ID, if necessary."""
if RoomID.is_valid(room_identifier):
resolved_room_id = room_identifier
elif RoomAlias.is_valid(room_identifier):
room_alias = RoomAlias.from_string(room_identifier)
room_id, _ = await self.room_member_handler.lookup_room_alias(room_alias)
resolved_room_id = room_id.to_string()
else:
raise SynapseError(
400, "%s was not legal room ID or room alias" % (room_identifier,)
)
if not resolved_room_id:
raise SynapseError(
400, "Unknown room ID or room alias %s" % room_identifier
)
return resolved_room_id
async def on_DELETE(self, request, room_identifier):
requester = await self.auth.get_user_by_req(request)
await assert_user_is_admin(self.auth, requester.user)
room_id = await self.resolve_room_id(room_identifier)
deleted_count = await self.store.delete_forward_extremities_for_room(room_id)
return 200, {"deleted": deleted_count}
async def on_GET(self, request, room_identifier):
requester = await self.auth.get_user_by_req(request)
await assert_user_is_admin(self.auth, requester.user)
room_id = await self.resolve_room_id(room_identifier)
extremities = await self.store.get_forward_extremities_for_room(room_id)
return 200, {"count": len(extremities), "results": extremities}
class RoomEventContextServlet(RestServlet):
"""
Provide the context for an event.
This API is designed to be used when system administrators wish to look at
an abuse report and understand what happened during and immediately prior
to this event.
"""
PATTERNS = admin_patterns("/rooms/(?P<room_id>[^/]*)/context/(?P<event_id>[^/]*)$")
def __init__(self, hs):
super().__init__()
self.clock = hs.get_clock()
self.room_context_handler = hs.get_room_context_handler()
self._event_serializer = hs.get_event_client_serializer()
self.auth = hs.get_auth()
async def on_GET(self, request, room_id, event_id):
requester = await self.auth.get_user_by_req(request, allow_guest=False)
await assert_user_is_admin(self.auth, requester.user)
limit = parse_integer(request, "limit", default=10)
# picking the API shape for symmetry with /messages
filter_str = parse_string(request, b"filter", encoding="utf-8")
if filter_str:
filter_json = urlparse.unquote(filter_str)
event_filter = Filter(
json_decoder.decode(filter_json)
) # type: Optional[Filter]
else:
event_filter = None
results = await self.room_context_handler.get_event_context(
requester,
room_id,
event_id,
limit,
event_filter,
use_admin_priviledge=True,
)
if not results:
raise SynapseError(404, "Event not found.", errcode=Codes.NOT_FOUND)
time_now = self.clock.time_msec()
results["events_before"] = await self._event_serializer.serialize_events(
results["events_before"], time_now
)
results["event"] = await self._event_serializer.serialize_event(
results["event"], time_now
)
results["events_after"] = await self._event_serializer.serialize_events(
results["events_after"], time_now
)
results["state"] = await self._event_serializer.serialize_events(
results["state"], time_now
)
return 200, results
|
py | 1a3a37d18ed5555b6972bbecfeb918b6e1bd9793 | import smtplib
from smtplib import SMTPServerDisconnected
from email.message import EmailMessage
import mimetypes
import os
import logging
class MailClient(object):
"""
Example mail client using SMTPlib
Uses config
"""
def __init__(self, config=None, logger=None):
self.mailserver = None
self.logger = logger if logger else logging.getLogger("MailClient")
self.C = config
self.fromaddr = self.C["mail.connection.user"]
self.connect()
def connect(self):
self.mailserver = smtplib.SMTP(self.C["mail.connection.host"], self.C["mail.connection.port"])
self.mailserver.ehlo()
self.mailserver.starttls()
self.mailserver.login(self.fromaddr, self.C["mail.connection.passwd"])
self.logger.info("self.Connected successfully to mail server.")
@staticmethod
def add_attachment(msg, fpath):
"""
Liberated from docs
"""
ctype, encoding = mimetypes.guess_type(fpath)
if ctype is None or encoding is not None:
ctype = 'application/octet-stream'
maintype, subtype = ctype.split('/', 1)
with open(fpath, "rb") as f:
msg.add_attachment(f.read(), maintype=maintype, subtype=subtype, filename=os.path.basename(fpath))
def compose_mail(self, title, body, attachments=None, to=None):
msg = EmailMessage()
msg.set_content(body)
msg["To"] = to if to else ", ".join(self.C["mail.recipients"])
msg["From"] = self.fromaddr
msg["Subject"] = title
if attachments:
if not isinstance(attachments, list):
attachments = [attachments]
self.logger.info("Found {} attachment. Processing".format(len(attachments)))
for attachment in attachments:
self.logger.info("Attaching \"{}\"".format(attachment))
self.add_attachment(msg, attachment)
self.logger.debug("Attached \"{}\"".format(attachment))
return msg
def send(self, msg):
try:
self.mailserver.send_message(msg)
self.logger.info("Mail sent to the {} recipients".format(len(self.C["mail.recipients"])))
except SMTPServerDisconnected:
self.logger.warning("Mail server disconnected. Reconnecting.")
self.connect()
self.send(msg)
if __name__ == '__main__':
from src.config.config import Config
c = Config("mail.yaml")
m = MailClient(config=c)
mail = m.compose_mail("test mail",
"this is a test mail. \n Please ignore the content",
attachments=["attachments/1.txt", "attachments/2.txt"])
m.send(mail)
|
py | 1a3a38b44011478686dc83d1c82949cbbab58086 | yahoo = search.Yahoo()
def singlescan(url):
"""instance to scan single targeted domain"""
if urlparse(url).query != '':
@@ -67,7 +67,7 @@ def singleScan(url):
return vulnerables
def initparser():
"""initialize parser arguments"""
global parser
@@ -80,7 +80,7 @@ def initParser():
if __name__ == "__main__":
initparser()
args = parser.parse_args()
# find random SQLi by dork
@@ -109,8 +109,14 @@ def initParser():
exit(0)
io.stdout("scanning server information")
vulnerableurls = [result[0] for result in vulnerables]
table_data = serverinfo.check(vulnerableurls)
# add db name to info
for result, info in zip(vulnerables, table_data):
info.insert(1, result[1]) # database name
io.fullprint(table_data)
# do reverse domain of given site
@@ -141,7 +147,7 @@ def initParser():
vulnerables = []
for domain in domains:
vulnerables_temp = singlescan(domain)
if vulnerables_temp:
vulnerables += vulnerables_temp
@@ -151,13 +157,18 @@ def initParser():
exit(0)
io.stdout("scanning server information")
vulnerableurls = [result[0] for result in vulnerables]
table_data = serverinfo.check(vulnerableurls)
# add db name to info
for result, info in zip(vulnerables, table_data):
info.insert(1, result[1]) # database name
io.fullprint(table_data)
# scan SQLi of given site
elif args.target:
vulnerables = singlescan(args.target)
if not vulnerables:
exit(0)
@@ -166,9 +177,9 @@ def initParser():
io.stdout("getting server info of domains can take a few mins")
table_data = serverinfo.check([args.target])
io.printserverinfo(table_data)
print "" # give space between two table
io.normalprint(vulnerables)
# print help message, if no parameter is provided
|
py | 1a3a3904da552b504ba1080f23fa59ec937d94c8 | from django.shortcuts import render, HttpResponse
from posts.models import Post
# Create your views here.
def index(request):
posts = Post.objects.all().order_by('-registered_at')[:5]
context = {
'posts' : posts
}
return render(request, 'home/index.html', context) |
py | 1a3a3982731a4be31da1d5f74b89af30c6b79588 | from elegantrl.agents.AgentSAC import AgentSAC
from elegantrl.agents.net import Critic, ActorSAC, ActorFixSAC, CriticREDQ
import torch
import numpy as np
from copy import deepcopy
class AgentREDQ(AgentSAC): # [ElegantRL.2021.11.11]
"""
Bases: ``AgentBase``
Randomized Ensemble Double Q-learning algorithm. “Randomized Ensembled Double Q-Learning: Learning Fast Without A Model”. Xinyue Chen et al.. 2021.
:param net_dim[int]: the dimension of networks (the width of neural networks)
:param state_dim[int]: the dimension of state (the number of state vector)
:param action_dim[int]: the dimension of action (the number of discrete action)
:param reward_scale: scale the reward to get a appropriate scale Q value
:param gamma: the discount factor of Reinforcement Learning
:param learning_rate: learning rate of optimizer
:param if_per_or_gae: PER (off-policy) or GAE (on-policy) for sparse reward
:param env_num: the env number of VectorEnv. env_num == 1 means don't use VectorEnv
:param gpu_id: the gpu_id of the training device. Use CPU when cuda is not available.
:param G: Update to date ratio
:param M: subset size of critics
:param N: ensemble number of critics
"""
def __init__(self, net_dim, state_dim, action_dim, gpu_id=0, args=None):
self.ClassCri = Critic
self.get_obj_critic = self.get_obj_critic_raw
self.ClassAct = ActorSAC
self.if_use_cri_target = True
self.if_use_act_target = False
self.alpha_log = None
self.alpha_optim = None
self.target_entropy = None
self.obj_critic = (-np.log(0.5)) ** 0.5 # for reliable_lambda
self.act_class = getattr(self, "act_class", ActorFixSAC)
self.cri_class = getattr(self, "cri_class", CriticREDQ)
super().__init__(net_dim, state_dim, action_dim, gpu_id, args)
self.obj_c = (-np.log(0.5)) ** 0.5 # for reliable_lambda
def init(
self,
net_dim=256,
state_dim=8,
action_dim=2,
reward_scale=1.0,
gamma=0.99,
learning_rate=3e-4,
if_per_or_gae=False,
env_num=1,
gpu_id=0,
G=20,
M=2,
N=10,
):
self.gamma = gamma
self.state_dim = state_dim
self.action_dim = action_dim
self.reward_scale = reward_scale
self.traj_list = [[] for _ in range(env_num)]
self.G = G
self.M = M
self.N = N
self.device = torch.device(
f"cuda:{gpu_id}" if (torch.cuda.is_available() and (gpu_id >= 0)) else "cpu"
)
self.cri_list = [
self.ClassCri(net_dim, state_dim, action_dim).to(self.device)
for i in range(self.N)
]
self.act = self.ClassAct(net_dim, state_dim, action_dim).to(self.device)
self.cri_target_list = [deepcopy(self.cri_list[i]) for i in range(N)]
self.cri_optim_list = [
torch.optim.Adam(self.cri_list[i].parameters(), learning_rate)
for i in range(self.N)
]
self.act_optim = torch.optim.Adam(self.act.parameters(), learning_rate)
assert isinstance(if_per_or_gae, bool)
if env_num == 1:
self.explore_env = self.explore_one_env
else:
self.explore_env = self.explore_vec_env
self.alpha_log = torch.zeros(
1, requires_grad=True, device=self.device
) # trainable parameter
self.alpha_optim = torch.optim.Adam([self.alpha_log], lr=learning_rate)
self.target_entropy = np.log(action_dim)
self.criterion = torch.nn.MSELoss()
def get_obj_critic_raw(self, buffer, batch_size):
with torch.no_grad():
reward, mask, action, state, next_s = buffer.sample_batch(batch_size)
next_a, next_log_prob = self.act_target.get_action_logprob(
next_s
) # stochastic policy
next_q = self.cri_target.get_q_min(next_s, next_a)
alpha = self.alpha_log.exp().detach()
q_label = reward + mask * (next_q + next_log_prob * alpha)
qs = self.cri.get_q_values(state, action)
obj_critic = self.criterion(qs, q_label * torch.ones_like(qs))
return obj_critic, state
def get_obj_critic_per(self, buffer, batch_size):
with torch.no_grad():
reward, mask, action, state, next_s, is_weights = buffer.sample_batch(
batch_size
)
next_a, next_log_prob = self.act_target.get_action_logprob(
next_s
) # stochastic policy
next_q = self.cri_target.get_q_min(next_s, next_a)
alpha = self.alpha_log.exp().detach()
q_label = reward + mask * (next_q + next_log_prob * alpha)
qs = self.cri.get_q_values(state, action)
td_error = self.criterion(qs, q_label * torch.ones_like(qs)).mean(dim=1)
obj_critic = (td_error * is_weights).mean()
buffer.td_error_update(td_error.detach())
return obj_critic, state
def get_obj_critic_raw_(self, buffer, batch_size, alpha):
"""
Calculate the loss of networks with **uniform sampling**.
:param buffer: the ReplayBuffer instance that stores the trajectories.
:param batch_size: the size of batch data for Stochastic Gradient Descent (SGD).
:param alpha: the trade-off coefficient of entropy regularization.
:return: the loss of the network and states.
"""
with torch.no_grad():
batch = buffer.sample_batch(batch_size)
state = torch.Tensor(batch["obs1"]).to(self.device)
next_s = torch.Tensor(batch["obs2"]).to(self.device)
action = torch.Tensor(batch["acts"]).to(self.device)
reward = torch.Tensor(batch["rews"]).unsqueeze(1).to(self.device)
mask = torch.Tensor(batch["done"]).unsqueeze(1).to(self.device)
# state, next_s, actions, reward, mask = buffer.sample_batch(batch_size)
# print(batch_size,reward.shape,mask.shape,action.shape, state.shape, next_s.shape)
next_a, next_log_prob = self.act.get_action_logprob(
next_s
) # stochastic policy
g = torch.Generator()
g.manual_seed(torch.randint(high=10000000, size=(1,))[0].item())
a = torch.randperm(self.N, generator=g)
# a = np.random.choice(self.N, self.M, replace=False)
# print(a[:M])
q_tmp = [self.cri_target_list[a[j]](next_s, next_a) for j in range(self.M)]
q_prediction_next_cat = torch.cat(q_tmp, 1)
min_q, min_indices = torch.min(q_prediction_next_cat, dim=1, keepdim=True)
next_q_with_log_prob = min_q - alpha * next_log_prob
y_q = reward + (1 - mask) * self.gamma * next_q_with_log_prob
q_values = [
self.cri_list[j](state, action) for j in range(self.N)
] # todo ensemble
q_values_cat = torch.cat(q_values, dim=1)
y_q = y_q.expand(-1, self.N) if y_q.shape[1] == 1 else y_q
obj_critic = self.criterion(q_values_cat, y_q) * self.N
return obj_critic, state
# return y_q, state,action
def select_actions_(self, state, size, env):
"""
Select continuous actions for exploration
:param state: states.shape==(batch_size, state_dim, )
:return: actions.shape==(batch_size, action_dim, ), -1 < action < +1
"""
state = state.to(self.device)
actions = self.act.get_action(state)
return actions.detach().cpu()
def cri_multi_train_(self, k):
q_values = self.cri_list[k](self.state, self.action)
obj = self.criterion(q_values, self.y_q)
self.cri_optim_list[k].zero_grad()
obj.backward()
self.cri_optim_list[k].step()
def update_net_(self, buffer, batch_size, soft_update_tau):
# buffer.update_now_len()
"""
Update the neural networks by sampling batch data from ``ReplayBuffer``.
:param buffer: the ReplayBuffer instance that stores the trajectories.
:param batch_size: the size of batch data for Stochastic Gradient Descent (SGD).
:param soft_update_tau: the soft update parameter.
:return: a tuple of the log information.
"""
for i in range(self.G):
alpha = self.alpha_log.cpu().exp().item()
"""objective of critic (loss function of critic)"""
obj_critic, state = self.get_obj_critic(buffer, batch_size, alpha)
# self.y_q, self.state,self.action = self.get_obj_critic(buffer, batch_size, alpha)
for q_i in range(self.N):
self.cri_optim_list[q_i].zero_grad()
obj_critic.backward()
if ((i + 1) % self.G == 0) or i == self.G - 1:
a_noise_pg, logprob = self.act.get_action_logprob(
state
) # policy gradient
"""objective of alpha (temperature parameter automatic adjustment)"""
cri_tmp = []
for j in range(self.N):
self.cri_list[j].requires_grad_(False)
cri_tmp.append(self.cri_list[j](state, a_noise_pg))
q_value_pg = torch.cat(cri_tmp, 1)
q_value_pg = torch.mean(q_value_pg, dim=1, keepdim=True)
obj_actor = (-q_value_pg + logprob * alpha).mean() # todo ensemble
self.act_optim.zero_grad()
obj_actor.backward()
for j in range(self.N):
self.cri_list[j].requires_grad_(True)
obj_alpha = -(self.alpha_log * (logprob - 1).detach()).mean()
self.optim_update(self.alpha_optim, obj_alpha)
for q_i in range(self.N):
self.cri_optim_list[q_i].step()
if ((i + 1) % self.G == 0) or i == self.G - 1:
self.act_optim.step()
for q_i in range(self.N):
self.soft_update(
self.cri_target_list[q_i], self.cri_list[q_i], soft_update_tau
)
return obj_actor, alpha
|
py | 1a3a39da8ad6c25d198a38e8c7ecadf295767291 | from asyncore import dispatcher_with_send
from concurrent.futures import thread
import imp
import multiprocessing
from queue import Queue
from neuron import States
import wirelessNode
import neuronNetworks
from configure import config
from multiprocessing import Process
from multiprocessing import Value
import numpy as np
class Model(object):
def __init__(self):
self.wn = wirelessNode.wirelessNetwork()
self.nn = neuronNetworks.neuronNetwork()
self.wn.setNN(self.nn)
self.complete = True
def setWeight(self, srcID, desID, weight):
self.nn.setConnectWeight(srcID, desID, weight)
def setSelfWeight(self, nodeID, weight):
self.nn.setNodeSelfWeight(nodeID, weight)
def addEdge(self, srcID, desID):
self.nn.addConnect(srcID, desID)
def delEdge(self, srcID, desID):
self.nn.delConnect(srcID, desID)
def getNodeState(self, nodeID):
return self.nn.getNodeState(nodeID)
def getConnectToNode(self, nodeID):
return self.nn.getConnectToNode(nodeID)
def isComplete(self):
if self.runFlag.value == 0:
return False
else:
return True
def recordStructure(self, stream):
self.nn.recordStructure(stream)
#调用这个函数时,线程会重新跑起来
def resetResult(self):
self.startTest()
def setLastLoss(self, minValue):
self.nn.setLastLoss(minValue)
def getResult(self):
return self.result.value
def setModelID(self, id):
self.id = id
def startTest(self):
self.runFlag = Value('b', False)
self.result = Value('f', 1)
self.t = Process(target=self.doTest, args=(self.nn, self.wn, self.runFlag, self.result))
self.t.start()
return True
def doTest(self, nn, wn, runFlag, result):
#当未完成时,开始工作,直到完成测试。完成测试后complete会被修改为True
timeSec = 0
slotPerSec = config.slotPerSec
nn.resetResult()
while nn.isComplete() == False:
for i in range(slotPerSec):
wn.timeLapse(timeSec, i)
timeSec = timeSec + 1
runResult = nn.getResult()
value = np.array(runResult)
result.value = np.sqrt(((value) ** 2).mean())
runFlag.value = 1
|
py | 1a3a3a5a19ebb776f90a7e379fe0566f5a7df493 | """
Run a large scale benchmark.
We measure: {dataset, encoder, model, train and test accuracy measures, train and test runtimes, feature count}.
Note: A reasonably recent version of sklearn is required to run GradientBoostingClassifier and MLPClassifier.
"""
import os
import warnings
import pandas as pd
import numpy as np
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
import category_encoders
from examples.benchmarking_large import arff_loader
from examples.benchmarking_large.util import train_model, train_encoder
# The settings are taken from:
# Data-driven advice for applying machine learning to bioinformatics problems, Olson et al.
# Following models have high variance of results: SGD, SVC and DecisionTree. That is not a big deal.
# Just be careful during result interpretation.
# Also, following models are slow because of their configuration: GradientBoosting and RandomForest.
# SGD and DecisionTree benefit from stronger regularization.
models = [SGDClassifier(loss='modified_huber', max_iter=50, tol=1e-3),
LogisticRegression(C=1.5, penalty='l1', fit_intercept=True),
SVC(kernel='poly', probability=True, C=0.01, gamma=0.1, degree=3, coef0=10.0),
KNeighborsClassifier(),
GaussianNB(),
DecisionTreeClassifier(max_depth=4),
GradientBoostingClassifier(loss='deviance', learning_rate=0.1, n_estimators=500, max_depth=3, max_features='log2'),
RandomForestClassifier(n_estimators=500, max_features=0.25, criterion='entropy'),
MLPClassifier()]
# We use Arff datasets on GitHub. But once OpenML loader will be part of scikit-learn:
# https://github.com/scikit-learn/scikit-learn/pull/11419
# the plan is to move on OpenML.
# We ignore datasets without any polynomial feature.
# We also ignore 'splice.arff', 'anneal.arff', 'anneal.orig.arff' due to high runtime.
# Datasets sensitive to amount of regularization are:
# breast.cancer.arff Medium impact
# bridges.version1.arff
# bridges.version2.arff
# car.arff
# colic.arff
# cylinder.bands.arff Medium impact
# flags.arff Large impact
# heart.c.arff
# hepatitis.arff
# hypothyroid.arff
# kr.vs.kp.arff
# labor.arff Large impact
# lymph.arff
# nursery.arff
# postoperative.patient.data.arff Large impact
# primary.tumor.arff
# solar.flare1.arff Medium impact
# solar.flare2.arff Medium impact
# soybean.arff Large impact
# sick.arff
# spectrometer.arff Large impact
# sponge.arff Large impact
# tic-tac-toe.arff
# trains.arff Medium impact (note that this is a tiny dataset -> with high variance)
datasets = [#'audiology.arff', 'autos.arff', 'breast.cancer.arff', 'bridges.version1.arff', 'bridges.version2.arff', 'car.arff',
# 'colic.arff',
'credit.a.arff', 'credit.g.arff', 'cylinder.bands.arff', 'flags.arff', 'heart.c.arff', 'heart.h.arff',
'hepatitis.arff', 'hypothyroid.arff', 'kr.vs.kp.arff', 'labor.arff', 'lymph.arff', 'mushroom.arff', 'nursery.arff',
'postoperative.patient.data.arff', 'primary.tumor.arff', 'sick.arff', 'solar.flare1.arff', 'solar.flare2.arff',
'soybean.arff', 'spectrometer.arff', 'sponge.arff', 'tic-tac-toe.arff', 'trains.arff', 'vote.arff', 'vowel.arff']
# datasets = ['postoperative.patient.data.arff']
# datasets = ['amazon.csv', 'carvana.csv', 'erasmus.csv', 'internetusage.csv', 'ipumsla97small.csv', 'kobe.csv', 'pbcseq.csv', 'phpvcoG8S.csv', 'westnile.csv']
# We ignore encoders {BackwardDifferenceEncoder, HelmertEncoder, PolynomialEncoder and SumEncoder} because of:
# https://github.com/scikit-learn-contrib/categorical-encoding/issues/91
encoders = [ category_encoders.TargetEncoderV2()]
# Initialization
if os.path.isfile('./output/result.csv'):
os.remove('./output/result.csv')
# Ok...
warnings.filterwarnings('ignore')
# Loop over datasets, then over encoders, and finally, over the models
for dataset_name in datasets:
X, y, fold_count = arff_loader.load(dataset_name)
non_numeric = list(X.select_dtypes(exclude=[np.number]).columns.values)
for encoder in encoders:
print("Encoding:", dataset_name, y.name, encoder.__class__.__name__)
folds, fit_encoder_time, score_encoder_time = train_encoder(X, y, fold_count, encoder)
for model in models:
print('Evaluating:', dataset_name, encoder.__class__.__name__, model.__class__.__name__)
scores, fit_model_time, score_model_time = train_model(folds, model)
# Log into csv
result = pd.DataFrame([dataset_name, y.name, encoder.__class__.__name__, model.__class__.__name__, X.shape[1],
folds[0][0].shape[1], fit_encoder_time, score_encoder_time, fit_model_time, score_model_time]
+ list(scores)).T
if not os.path.isfile('./output/result.csv'):
result.to_csv('./output/result.csv',
header=['dataset', 'target', 'encoder', 'model', 'input_features', 'output_features', 'fit_encoder_time',
'score_encoder_time', 'fit_model_time', 'score_model_time', 'test_matthews', 'train_matthews',
'test_auc', 'train_auc', 'test_brier', 'train_brier'], index=False)
else:
result.to_csv('./output/result.csv', mode='a', header=False, index=False)
print('Finished. The result was stored into ./output/result.csv.')
|
py | 1a3a3aa39ebe459d08ab6a435a19ca79622f0593 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Django settings for scrapy_joy project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '$k70*9=58#2%p(!b_1ox*!96^&vuvwz)3oq8&-yvofetyjyy)#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ['*']
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
}
}
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'xadmin',
'crispy_forms',
# 'reversion',
'kombu.transport.django',
'djcelery',
'dynamic_scraper',
'debug_toolbar',
'scrapy_joy',
'open_news',
'open_loan',
'open_insurance',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
ROOT_URLCONF = 'scrapy_joy.urls'
WSGI_APPLICATION = 'scrapy_joy.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'scrapy_joy2',
'USER': 'root',
'PASSWORD': '123456',
'HOST': '127.0.0.1',
'PORT': '3306',
'OPTIONS': {'init_command': 'SET storage_engine=INNODB;'}
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'zh_cn'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False
DATE_FORMAT = 'Y-m-d'
DATETIME_FORMAT = 'Y-m-d H:i'
TIME_FORMAT = 'H:i'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(BASE_DIR, 'templates'),
)
# **** 发送邮件设置****
EMAIL_HOST = 'smtp.163.com'
EMAIL_PORT = 25
EMAIL_HOST_USER = '[email protected]'
EMAIL_HOST_PASSWORD = 'dafcnranfmcvwrds'
EMAIL_SUBJECT_PREFIX = u'[Kaisa利率]'
DEFAULT_FROM_EMAIL = u'Kaisa利率 <[email protected]>'
SERVER_EMAIL = '[email protected]'
HOST_NAME = 'http://127.0.0.1:8000'
# **** cacheops缓存设置 ****
CACHEOPS_REDIS = {
'host': 'localhost',
'port': 6379,
'db': 1,
'socket_timeout': 3,
'password': '',
}
CACHEOPS_DEFAULTS = {
'timeout': 60*60
}
CACHEOPS = {
'auth.user': {'ops': 'get', 'timeout': 60*15},
'auth.*': {'ops': ('fetch', 'get')},
'auth.permission': {'ops': 'all'},
'*.*': {},
}
DEBUG_TOOLBAR_PATCH_SETTINGS = False
INTERNAL_IPS = '127.0.0.1'
# django-celery settings
import djcelery
djcelery.setup_loader()
BROKER_HOST = "localhost"
BROKER_PORT = 5672
BROKER_BACKEND = "django"
BROKER_USER = "guest"
BROKER_PASSWORD = "guest"
BROKER_VHOST = "/"
CELERYBEAT_SCHEDULER = 'djcelery.schedulers.DatabaseScheduler'
try:
from local_settings import *
except:
pass |
py | 1a3a3ac204551e622b08f2b152baf2baa2b08a5a | from .passive_components import Filter
from .active_components import Amplifier
VALID_PASSIVE = [
'Filter',
'Attenuator',
'Mixer',
'Coupler',
'Tap',
'Splitter',
]
VALID_ACTIVE = [
'Amplifier',
'ActiveMixer',
'Switch',
]
VALID_COMPONENTS = VALID_PASSIVE + VALID_ACTIVE
def component_builder(comp_dict):
"""
This function builds an actual component object from a dictionary as parsed from the xml_parser
Args:
comp_dict (dict): Component dictionary
Returns:
comp (Component): Component object of the correct type
"""
uid = comp_dict['uid']
name = comp_dict['name']
comp_type = comp_dict['type']
if comp_type in VALID_COMPONENTS:
# valid component
classHandle = globals()[comp_type] # get handle to class name
compObj = classHandle(uid, name) # create instance of the component class
# add all parameters to the component object
params_dict = comp_dict['params']
for key, val in params_dict.items():
compObj.add_parameter(**val)
return compObj
else:
raise Exception("Invalid component type ({}). Valid components: {}".format(comp_type, VALID_COMPONENTS))
|
py | 1a3a3ad18869dc874318d11b740904124e299621 | from fractions import Fraction
from unittest import TestCase
from musurgia.fractaltree.fractaltree import FractalTree
class Test(TestCase):
def setUp(self) -> None:
self.ft = FractalTree(proportions=[1, 2, 3], tree_permutation_order=[3, 1, 2], value=10)
def test_0(self):
with self.assertRaises(Exception):
self.ft.get_layer(1)
def test_1(self):
self.assertEqual([self.ft], self.ft.get_layer(0))
def test_2(self):
self.ft.add_layer()
result = self.ft.get_children()
self.assertEqual(result, self.ft.get_layer(1))
def test_3(self):
for i in range(3):
self.ft.add_layer()
result = self.ft.get_children()
self.assertEqual(result, self.ft.get_layer(1))
def test_4(self):
for i in range(3):
self.ft.add_layer()
result = [child.get_children() for child in self.ft.get_children()]
self.assertEqual(result, self.ft.get_layer(2))
def test_5(self):
for i in range(3):
self.ft.add_layer()
result = self.ft.get_leaves()
self.assertEqual(result, self.ft.get_layer(3))
def test_6(self):
for i in range(3):
self.ft.add_layer()
with self.assertRaises(ValueError):
self.ft.get_layer(4)
def test_7(self):
self.ft.add_layer()
self.ft.add_layer(lambda n: True if n.fractal_order > 1 else False)
self.ft.add_layer(lambda n: True if n.fractal_order > 1 else False)
self.ft.add_layer(lambda n: True if n.fractal_order > 1 else False)
self.ft.add_layer(lambda n: True if n.fractal_order > 1 else False)
result = [[['1.1'], [['1.2.1'], ['1.2.2.1', '1.2.2.2', '1.2.2.3'], ['1.2.3.1', '1.2.3.2', '1.2.3.3']],
[['1.3.1.1', '1.3.1.2', '1.3.1.3'], ['1.3.2'], ['1.3.3.1', '1.3.3.2', '1.3.3.3']]], '2',
[[['3.1.1'], ['3.1.2.1', '3.1.2.2', '3.1.2.3'], ['3.1.3.1', '3.1.3.2', '3.1.3.3']],
[['3.2.1.1', '3.2.1.2', '3.2.1.3'], ['3.2.2'], ['3.2.3.1', '3.2.3.2', '3.2.3.3']], ['3.3']]]
self.assertEqual(result, [name for name in self.ft.get_layer(4, key='name')])
def test_7_1(self):
self.ft.add_layer()
self.ft.add_layer()
self.assertEqual([10], self.ft.get_layer(0, key='value'))
def test_7_2(self):
self.ft.add_layer()
self.ft.add_layer()
result = [Fraction(5, 1), Fraction(5, 3), Fraction(10, 3)]
self.assertEqual(result, self.ft.get_layer(1, key='value'))
def test_7_3(self):
self.ft.add_layer()
self.ft.add_layer()
result = [[Fraction(5, 6), Fraction(5, 3), Fraction(5, 2)], [Fraction(5, 6), Fraction(5, 18), Fraction(5, 9)],
[Fraction(10, 9), Fraction(5, 3), Fraction(5, 9)]]
self.assertEqual(result, self.ft.get_layer(2, key='value'))
def test_get_layer_key_lambda(self):
self.ft.add_layer()
self.ft.add_layer()
result = [[0.83, 1.67, 2.5], [0.83, 0.28, 0.56], [1.11, 1.67, 0.56]]
self.assertEqual(result, self.ft.get_layer(2, key=lambda node: round(float(node.value), 2)))
|
py | 1a3a3b1da92e65dc39e7d3bb038cfc0db1125fa9 | """Test generating the Xibo API."""
from meetup2xibo.updater.xibo_api import XiboApi
from meetup2xibo.updater.xibo_event import XiboEvent
from meetup2xibo.updater.http_response_error import XiboApiError
from requests_toolbelt.utils import dump
import json
import os
import pytest
SAMPLE_URL = "https://example.com/api"
SAMPLE_XIBO_EVENT_COLUMNS = {
'dataSetColumnId_1': "Nova Labs Open House",
'dataSetColumnId_2': "Orange Bay",
'dataSetColumnId_3': "zvbxrpl2",
'dataSetColumnId_4': "2019-02-26 15:00:00",
'dataSetColumnId_5': "2019-02-26 17:00:00"
}
SAMPLE_ABOUT_JSON = json.loads("""{
"sourceUrl": null,
"version": "1.8.12"
}""")
SAMPLE_JSON_LIST_0 = json.loads("[]")
SAMPLE_JSON_LIST_1 = json.loads("[111]")
SAMPLE_JSON_LIST_2 = json.loads("[211, 222]")
SAMPLE_JSON_LIST_3 = json.loads("[311, 322, 333]")
SAMPLE_XIBO_PAGE_LENGTH = 3
REAL_XIBO_PAGE_LENGTH = 50
def save_json(the_json, path):
"""Save JSON to a file."""
pretty_json = json.dumps(the_json, indent = 4, sort_keys = True)
with path.with_suffix(".json").open("w") as f:
print(pretty_json, file = f)
def save_response(response, path):
"""Save an HTTP response to the path."""
with path.with_suffix(".txt").open("w") as f:
data = dump.dump_response(response)
print(data.decode('utf-8'), file = f)
def test_bad_status(xibo_session, xibo_api_url_builder):
"""Test raising a Xibo API error for a bad HTTP response status."""
bad_about_url = xibo_api_url_builder.about_url() + "x"
xibo_api = XiboApi(xibo_session, xibo_api_url_builder, SAMPLE_XIBO_PAGE_LENGTH)
with pytest.raises(XiboApiError, match=r'.*HTTP status is \d+, not ok.*'):
xibo_api.get_response(bad_about_url)
@pytest.mark.skip(reason="Not authorized to use this API service")
def test_about_response(module_file_path, xibo_session, xibo_api_url_builder):
"""Save response from an "about" request to Xibo."""
xibo_api = XiboApi(xibo_session, xibo_api_url_builder, SAMPLE_XIBO_PAGE_LENGTH)
xibo_json = xibo_api.get_about()
save_json(xibo_json, module_file_path)
def test_get_xibo_api_version(mocker):
"""Testing getting the Xibo API version number."""
xibo_api = XiboApi(None, None, SAMPLE_XIBO_PAGE_LENGTH)
xibo_api.get_about = mocker.Mock(return_value = SAMPLE_ABOUT_JSON)
assert xibo_api.get_xibo_api_version() == "1.8.12"
def test_get_datasets_by_code_response(module_file_path, xibo_session, xibo_api_url_builder):
"""Save response from a "dataset" request to Xibo."""
dataset_code = os.getenv("EVENT_DATASET_CODE")
if not dataset_code:
pytest.skip("Define environment variable EVENT_DATASET_CODE")
xibo_api = XiboApi(xibo_session, xibo_api_url_builder, SAMPLE_XIBO_PAGE_LENGTH)
xibo_json = xibo_api.get_datasets_by_code(dataset_code)
save_json(xibo_json, module_file_path)
def test_get_dataset_column_response(module_file_path, xibo_session, xibo_api_url_builder):
"""Save response from a "dataset column" request to Xibo."""
dataset_id = os.getenv("EVENT_DATASET_ID")
if not dataset_id:
pytest.skip("Define environment variable EVENT_DATASET_ID")
xibo_api = XiboApi(xibo_session, xibo_api_url_builder, REAL_XIBO_PAGE_LENGTH)
xibo_json = xibo_api.get_dataset_column_by_id(dataset_id)
save_json(list(xibo_json), module_file_path)
def test_get_response(module_file_path, xibo_session, xibo_api_url_builder):
"""Save response from a "dataset data" request to Xibo."""
dataset_id = os.getenv("EVENT_DATASET_ID")
if not dataset_id:
pytest.skip("Define environment variable EVENT_DATASET_ID")
xibo_api = XiboApi(xibo_session, xibo_api_url_builder, SAMPLE_XIBO_PAGE_LENGTH)
url = xibo_api_url_builder.dataset_data_url(dataset_id)
response = xibo_api.get_response(url, start = 100, length = 7)
save_response(response, module_file_path)
def test_get_dataset_data(module_file_path, xibo_session, xibo_api_url_builder):
"""Save JSON from a "dataset data" request to Xibo."""
dataset_id = os.getenv("EVENT_DATASET_ID")
if not dataset_id:
pytest.skip("Define environment variable EVENT_DATASET_ID")
xibo_api = XiboApi(xibo_session, xibo_api_url_builder, REAL_XIBO_PAGE_LENGTH)
xibo_json = xibo_api.get_dataset_data_by_id(dataset_id)
save_json(list(xibo_json), module_file_path)
def test_delete_row_response(module_file_path, xibo_session, xibo_api_url_builder):
"""Save response from a "dataset data delete" request to Xibo."""
row_id = os.getenv("DELETE_ROW_ID")
if not row_id:
pytest.skip("Environment variable DELETE_ROW_ID is not defined")
dataset_id = os.getenv("EVENT_DATASET_ID")
if not dataset_id:
pytest.skip("Define environment variable EVENT_DATASET_ID")
xibo_api = XiboApi(xibo_session, xibo_api_url_builder, SAMPLE_XIBO_PAGE_LENGTH)
response = xibo_api.delete_dataset_data_by_id(dataset_id, row_id)
save_response(response, module_file_path)
def test_insert_dataset_data_response(module_file_path, xibo_session, xibo_api_url_builder):
"""Save response from a "dataset data insert" request to Xibo."""
dataset_id = os.getenv("EVENT_DATASET_ID")
if not dataset_id:
pytest.skip("Define environment variable EVENT_DATASET_ID")
xibo_api = XiboApi(xibo_session, xibo_api_url_builder, SAMPLE_XIBO_PAGE_LENGTH)
response = xibo_api.insert_dataset_data(dataset_id, SAMPLE_XIBO_EVENT_COLUMNS)
save_response(response, module_file_path)
def test_update_dataset_data_response(module_file_path, xibo_session, xibo_api_url_builder):
"""Save response from a "dataset data update" request to Xibo."""
row_id = os.getenv("UPDATE_ROW_ID")
if not row_id:
pytest.skip("Environment variable UPDATE_ROW_ID is not defined")
dataset_id = os.getenv("EVENT_DATASET_ID")
if not dataset_id:
pytest.skip("Define environment variable EVENT_DATASET_ID")
xibo_api = XiboApi(xibo_session, xibo_api_url_builder, SAMPLE_XIBO_PAGE_LENGTH)
response = xibo_api.update_dataset_data(dataset_id, row_id, SAMPLE_XIBO_EVENT_COLUMNS)
save_response(response, module_file_path)
def test_get_paged_json_0(mocker):
"""Test getting 0 paged JSON results."""
xibo_api = XiboApi(None, None, SAMPLE_XIBO_PAGE_LENGTH)
xibo_api.get_json = mocker.Mock(return_value = SAMPLE_JSON_LIST_0)
results = xibo_api.get_paged_json(SAMPLE_URL)
assert list(results) == SAMPLE_JSON_LIST_0
xibo_api.get_json.assert_called_once_with(SAMPLE_URL, start = 0, length = SAMPLE_XIBO_PAGE_LENGTH)
def test_get_paged_json_1(mocker):
"""Test getting 1 paged JSON result."""
xibo_api = XiboApi(None, None, SAMPLE_XIBO_PAGE_LENGTH)
xibo_api.get_json = mocker.Mock(return_value = SAMPLE_JSON_LIST_1)
results = xibo_api.get_paged_json(SAMPLE_URL)
assert list(results) == SAMPLE_JSON_LIST_1
xibo_api.get_json.assert_called_once_with(SAMPLE_URL, start = 0, length = SAMPLE_XIBO_PAGE_LENGTH)
def test_get_paged_json_2(mocker):
"""Test getting 2 paged JSON results."""
xibo_api = XiboApi(None, None, SAMPLE_XIBO_PAGE_LENGTH)
xibo_api.get_json = mocker.Mock(return_value = SAMPLE_JSON_LIST_2)
results = xibo_api.get_paged_json(SAMPLE_URL)
assert list(results) == SAMPLE_JSON_LIST_2
xibo_api.get_json.assert_called_once_with(SAMPLE_URL, start = 0, length = SAMPLE_XIBO_PAGE_LENGTH)
def test_get_paged_json_3(mocker):
"""Test getting 3 paged JSON results, requiring two pages."""
return_values = [SAMPLE_JSON_LIST_3, SAMPLE_JSON_LIST_0]
expected_calls = [
mocker.call(SAMPLE_URL, start = 0, length = SAMPLE_XIBO_PAGE_LENGTH),
mocker.call(SAMPLE_URL, start = SAMPLE_XIBO_PAGE_LENGTH, length = SAMPLE_XIBO_PAGE_LENGTH),
]
xibo_api = XiboApi(None, None, SAMPLE_XIBO_PAGE_LENGTH)
xibo_api.get_json = mocker.Mock(side_effect = return_values)
results = xibo_api.get_paged_json(SAMPLE_URL)
assert list(results) == SAMPLE_JSON_LIST_3
assert xibo_api.get_json.call_args_list == expected_calls
def test_get_paged_json_4(mocker):
"""Test getting 4 paged JSON results, requiring two pages."""
return_values = [SAMPLE_JSON_LIST_3, SAMPLE_JSON_LIST_1]
expected_calls = [
mocker.call(SAMPLE_URL, start = 0, length = SAMPLE_XIBO_PAGE_LENGTH),
mocker.call(SAMPLE_URL, start = SAMPLE_XIBO_PAGE_LENGTH, length = SAMPLE_XIBO_PAGE_LENGTH),
]
xibo_api = XiboApi(None, None, SAMPLE_XIBO_PAGE_LENGTH)
xibo_api.get_json = mocker.Mock(side_effect = return_values)
results = xibo_api.get_paged_json(SAMPLE_URL)
assert list(results) == SAMPLE_JSON_LIST_3 + SAMPLE_JSON_LIST_1
assert xibo_api.get_json.call_args_list == expected_calls
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 autoindent
|
py | 1a3a3b1f1a9d70de5cee8ad8ec73e87ecc979180 | """SFP - Simple Functional Programming."""
from functools import reduce
def tail(iterable):
"""Get tail of a iterable is everything except the first element."""
r = [x for x in iterable]
return r[1:]
def pipe(*args):
"""All the arguments given to this function will be passed as param to
`reduce` and it will return a function with all closures set to pipe in."""
return reduce(_pipe, args)
def _pipe(curr, prev):
"""Callback to `reduce` function."""
return lambda x: prev(curr(x))
def compose(*args):
"""Composes all the given function in one."""
return reduce(lambda fun, tion: lambda arg: fun(tion(arg)),
args,
lambda arg: arg)
|
py | 1a3a3dcdc3550fd5e3257b579bd966922f1accb6 | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'meuSite.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
py | 1a3a3dee63379fcc88483acf53619ced9badf877 | from dataclasses import dataclass, field
from .base import GameServerPacket
@dataclass
class ActionFailed(GameServerPacket):
type: Int8 = field(default=37, init=False, repr=False)
|
py | 1a3a3fe4eacdc4264039dc54d319498859454d31 | import unittest
import openfigi
class MyTestCase(unittest.TestCase):
def test_wkn_ticker_anonymous(self):
"""Get an ETF by WKN and check if response makes sense"""
ofg = openfigi.OpenFigi()
ofg.enqueue_request(id_type='ID_WERTPAPIER', id_value='A0YEDG')
response = ofg.fetch_response()
self.assertTrue(type(response) is list)
self.assertTrue(len(response) > 0)
self.assertTrue(type(response[0]) is dict)
self.assertTrue('data' in response[0].keys())
self.assertTrue(len(response[0]['data']) > 0)
if __name__ == '__main__':
unittest.main()
|
py | 1a3a400db1b16960977a3822c23767ffb95d86b2 | import base64
import datetime
import hashlib
import json
from urllib.parse import parse_qs, urlencode, urlparse
from django.contrib.auth import get_user_model
from django.test import RequestFactory, TestCase
from django.urls import reverse
from django.utils import timezone
from django.utils.crypto import get_random_string
from oauthlib.oauth2.rfc6749 import errors as oauthlib_errors
from oauth2_provider.models import (
get_access_token_model, get_application_model,
get_grant_model, get_refresh_token_model
)
from oauth2_provider.settings import oauth2_settings
from oauth2_provider.views import ProtectedResourceView
from .utils import get_basic_auth_header
Application = get_application_model()
AccessToken = get_access_token_model()
Grant = get_grant_model()
RefreshToken = get_refresh_token_model()
UserModel = get_user_model()
# mocking a protected resource view
class ResourceView(ProtectedResourceView):
def get(self, request, *args, **kwargs):
return "This is a protected resource"
class BaseTest(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.test_user = UserModel.objects.create_user("test_user", "[email protected]", "123456")
self.dev_user = UserModel.objects.create_user("dev_user", "[email protected]", "123456")
oauth2_settings.ALLOWED_REDIRECT_URI_SCHEMES = ["http", "custom-scheme"]
self.application = Application(
name="Test Application",
redirect_uris=(
"http://localhost http://example.com http://example.org custom-scheme://example.com"
),
user=self.dev_user,
client_type=Application.CLIENT_CONFIDENTIAL,
authorization_grant_type=Application.GRANT_AUTHORIZATION_CODE,
)
self.application.save()
oauth2_settings._SCOPES = ["read", "write"]
oauth2_settings._DEFAULT_SCOPES = ["read", "write"]
def tearDown(self):
self.application.delete()
self.test_user.delete()
self.dev_user.delete()
class TestRegressionIssue315(BaseTest):
"""
Test to avoid regression for the issue 315: request object
was being reassigned when getting AuthorizationView
"""
def test_request_is_not_overwritten(self):
self.client.login(username="test_user", password="123456")
query_string = urlencode({
"client_id": self.application.client_id,
"response_type": "code",
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.org",
})
url = "{url}?{qs}".format(url=reverse("oauth2_provider:authorize"), qs=query_string)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
assert "request" not in response.context_data
class TestAuthorizationCodeView(BaseTest):
def test_skip_authorization_completely(self):
"""
If application.skip_authorization = True, should skip the authorization page.
"""
self.client.login(username="test_user", password="123456")
self.application.skip_authorization = True
self.application.save()
query_string = urlencode({
"client_id": self.application.client_id,
"response_type": "code",
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.org",
})
url = "{url}?{qs}".format(url=reverse("oauth2_provider:authorize"), qs=query_string)
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
def test_pre_auth_invalid_client(self):
"""
Test error for an invalid client_id with response_type: code
"""
self.client.login(username="test_user", password="123456")
query_string = urlencode({
"client_id": "fakeclientid",
"response_type": "code",
})
url = "{url}?{qs}".format(url=reverse("oauth2_provider:authorize"), qs=query_string)
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.context_data["url"],
"?error=invalid_request&error_description=Invalid+client_id+parameter+value."
)
def test_pre_auth_valid_client(self):
"""
Test response for a valid client_id with response_type: code
"""
self.client.login(username="test_user", password="123456")
query_string = urlencode({
"client_id": self.application.client_id,
"response_type": "code",
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.org",
})
url = "{url}?{qs}".format(url=reverse("oauth2_provider:authorize"), qs=query_string)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# check form is in context and form params are valid
self.assertIn("form", response.context)
form = response.context["form"]
self.assertEqual(form["redirect_uri"].value(), "http://example.org")
self.assertEqual(form["state"].value(), "random_state_string")
self.assertEqual(form["scope"].value(), "read write")
self.assertEqual(form["client_id"].value(), self.application.client_id)
def test_pre_auth_valid_client_custom_redirect_uri_scheme(self):
"""
Test response for a valid client_id with response_type: code
using a non-standard, but allowed, redirect_uri scheme.
"""
self.client.login(username="test_user", password="123456")
query_string = urlencode({
"client_id": self.application.client_id,
"response_type": "code",
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "custom-scheme://example.com",
})
url = "{url}?{qs}".format(url=reverse("oauth2_provider:authorize"), qs=query_string)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# check form is in context and form params are valid
self.assertIn("form", response.context)
form = response.context["form"]
self.assertEqual(form["redirect_uri"].value(), "custom-scheme://example.com")
self.assertEqual(form["state"].value(), "random_state_string")
self.assertEqual(form["scope"].value(), "read write")
self.assertEqual(form["client_id"].value(), self.application.client_id)
def test_pre_auth_approval_prompt(self):
tok = AccessToken.objects.create(
user=self.test_user, token="1234567890",
application=self.application,
expires=timezone.now() + datetime.timedelta(days=1),
scope="read write"
)
self.client.login(username="test_user", password="123456")
query_string = urlencode({
"client_id": self.application.client_id,
"response_type": "code",
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.org",
"approval_prompt": "auto",
})
url = "{url}?{qs}".format(url=reverse("oauth2_provider:authorize"), qs=query_string)
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
# user already authorized the application, but with different scopes: prompt them.
tok.scope = "read"
tok.save()
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_pre_auth_approval_prompt_default(self):
self.assertEqual(oauth2_settings.REQUEST_APPROVAL_PROMPT, "force")
AccessToken.objects.create(
user=self.test_user, token="1234567890",
application=self.application,
expires=timezone.now() + datetime.timedelta(days=1),
scope="read write"
)
self.client.login(username="test_user", password="123456")
query_string = urlencode({
"client_id": self.application.client_id,
"response_type": "code",
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.org",
})
url = "{url}?{qs}".format(url=reverse("oauth2_provider:authorize"), qs=query_string)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_pre_auth_approval_prompt_default_override(self):
oauth2_settings.REQUEST_APPROVAL_PROMPT = "auto"
AccessToken.objects.create(
user=self.test_user, token="1234567890",
application=self.application,
expires=timezone.now() + datetime.timedelta(days=1),
scope="read write"
)
self.client.login(username="test_user", password="123456")
query_string = urlencode({
"client_id": self.application.client_id,
"response_type": "code",
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.org",
})
url = "{url}?{qs}".format(url=reverse("oauth2_provider:authorize"), qs=query_string)
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
def test_pre_auth_default_redirect(self):
"""
Test for default redirect uri if omitted from query string with response_type: code
"""
self.client.login(username="test_user", password="123456")
query_string = urlencode({
"client_id": self.application.client_id,
"response_type": "code",
})
url = "{url}?{qs}".format(url=reverse("oauth2_provider:authorize"), qs=query_string)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
form = response.context["form"]
self.assertEqual(form["redirect_uri"].value(), "http://localhost")
def test_pre_auth_forbibben_redirect(self):
"""
Test error when passing a forbidden redirect_uri in query string with response_type: code
"""
self.client.login(username="test_user", password="123456")
query_string = urlencode({
"client_id": self.application.client_id,
"response_type": "code",
"redirect_uri": "http://forbidden.it",
})
url = "{url}?{qs}".format(url=reverse("oauth2_provider:authorize"), qs=query_string)
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_pre_auth_wrong_response_type(self):
"""
Test error when passing a wrong response_type in query string
"""
self.client.login(username="test_user", password="123456")
query_string = urlencode({
"client_id": self.application.client_id,
"response_type": "WRONG",
})
url = "{url}?{qs}".format(url=reverse("oauth2_provider:authorize"), qs=query_string)
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
self.assertIn("error=unsupported_response_type", response["Location"])
def test_code_post_auth_allow(self):
"""
Test authorization code is given for an allowed request with response_type: code
"""
self.client.login(username="test_user", password="123456")
form_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.org",
"response_type": "code",
"allow": True,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=form_data)
self.assertEqual(response.status_code, 302)
self.assertIn("http://example.org?", response["Location"])
self.assertIn("state=random_state_string", response["Location"])
self.assertIn("code=", response["Location"])
def test_code_post_auth_deny(self):
"""
Test error when resource owner deny access
"""
self.client.login(username="test_user", password="123456")
form_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.org",
"response_type": "code",
"allow": False,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=form_data)
self.assertEqual(response.status_code, 302)
self.assertIn("error=access_denied", response["Location"])
self.assertIn("state=random_state_string", response["Location"])
def test_code_post_auth_deny_no_state(self):
"""
Test optional state when resource owner deny access
"""
self.client.login(username="test_user", password="123456")
form_data = {
"client_id": self.application.client_id,
"scope": "read write",
"redirect_uri": "http://example.org",
"response_type": "code",
"allow": False,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=form_data)
self.assertEqual(response.status_code, 302)
self.assertIn("error=access_denied", response["Location"])
self.assertNotIn("state", response["Location"])
def test_code_post_auth_bad_responsetype(self):
"""
Test authorization code is given for an allowed request with a response_type not supported
"""
self.client.login(username="test_user", password="123456")
form_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.org",
"response_type": "UNKNOWN",
"allow": True,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=form_data)
self.assertEqual(response.status_code, 302)
self.assertIn("http://example.org?error", response["Location"])
def test_code_post_auth_forbidden_redirect_uri(self):
"""
Test authorization code is given for an allowed request with a forbidden redirect_uri
"""
self.client.login(username="test_user", password="123456")
form_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://forbidden.it",
"response_type": "code",
"allow": True,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=form_data)
self.assertEqual(response.status_code, 400)
def test_code_post_auth_malicious_redirect_uri(self):
"""
Test validation of a malicious redirect_uri
"""
self.client.login(username="test_user", password="123456")
form_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "/../",
"response_type": "code",
"allow": True,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=form_data)
self.assertEqual(response.status_code, 400)
def test_code_post_auth_allow_custom_redirect_uri_scheme(self):
"""
Test authorization code is given for an allowed request with response_type: code
using a non-standard, but allowed, redirect_uri scheme.
"""
self.client.login(username="test_user", password="123456")
form_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "custom-scheme://example.com",
"response_type": "code",
"allow": True,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=form_data)
self.assertEqual(response.status_code, 302)
self.assertIn("custom-scheme://example.com?", response["Location"])
self.assertIn("state=random_state_string", response["Location"])
self.assertIn("code=", response["Location"])
def test_code_post_auth_deny_custom_redirect_uri_scheme(self):
"""
Test error when resource owner deny access
using a non-standard, but allowed, redirect_uri scheme.
"""
self.client.login(username="test_user", password="123456")
form_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "custom-scheme://example.com",
"response_type": "code",
"allow": False,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=form_data)
self.assertEqual(response.status_code, 302)
self.assertIn("custom-scheme://example.com?", response["Location"])
self.assertIn("error=access_denied", response["Location"])
self.assertIn("state=random_state_string", response["Location"])
def test_code_post_auth_redirection_uri_with_querystring(self):
"""
Tests that a redirection uri with query string is allowed
and query string is retained on redirection.
See http://tools.ietf.org/html/rfc6749#section-3.1.2
"""
self.client.login(username="test_user", password="123456")
form_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.com?foo=bar",
"response_type": "code",
"allow": True,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=form_data)
self.assertEqual(response.status_code, 302)
self.assertIn("http://example.com?foo=bar", response["Location"])
self.assertIn("code=", response["Location"])
self.assertIn("state=random_state_string", response["Location"])
def test_code_post_auth_failing_redirection_uri_with_querystring(self):
"""
Test that in case of error the querystring of the redirection uri is preserved
See https://github.com/jazzband/django-oauth-toolkit/issues/238
"""
self.client.login(username="test_user", password="123456")
form_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.com?foo=bar",
"response_type": "code",
"allow": False,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=form_data)
self.assertEqual(response.status_code, 302)
self.assertIn("http://example.com?", response["Location"])
self.assertIn("error=access_denied", response["Location"])
self.assertIn("state=random_state_string", response["Location"])
self.assertIn("foo=bar", response["Location"])
def test_code_post_auth_fails_when_redirect_uri_path_is_invalid(self):
"""
Tests that a redirection uri is matched using scheme + netloc + path
"""
self.client.login(username="test_user", password="123456")
form_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.com/a?foo=bar",
"response_type": "code",
"allow": True,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=form_data)
self.assertEqual(response.status_code, 400)
class TestAuthorizationCodeTokenView(BaseTest):
def get_auth(self):
"""
Helper method to retrieve a valid authorization code
"""
authcode_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.org",
"response_type": "code",
"allow": True,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=authcode_data)
query_dict = parse_qs(urlparse(response["Location"]).query)
return query_dict["code"].pop()
def generate_pkce_codes(self, algorithm, length=43):
"""
Helper method to generate pkce codes
"""
code_verifier = get_random_string(length)
if algorithm == "S256":
code_challenge = base64.urlsafe_b64encode(
hashlib.sha256(code_verifier.encode()).digest()
).decode().rstrip("=")
else:
code_challenge = code_verifier
return code_verifier, code_challenge
def get_pkce_auth(self, code_challenge, code_challenge_method):
"""
Helper method to retrieve a valid authorization code using pkce
"""
oauth2_settings.PKCE_REQUIRED = True
authcode_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.org",
"response_type": "code",
"allow": True,
"code_challenge": code_challenge,
"code_challenge_method": code_challenge_method,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=authcode_data)
query_dict = parse_qs(urlparse(response["Location"]).query)
oauth2_settings.PKCE_REQUIRED = False
return query_dict["code"].pop()
def test_basic_auth(self):
"""
Request an access token using basic authentication for client authentication
"""
self.client.login(username="test_user", password="123456")
authorization_code = self.get_auth()
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org"
}
auth_headers = get_basic_auth_header(self.application.client_id, self.application.client_secret)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content.decode("utf-8"))
self.assertEqual(content["token_type"], "Bearer")
self.assertEqual(content["scope"], "read write")
self.assertEqual(content["expires_in"], oauth2_settings.ACCESS_TOKEN_EXPIRE_SECONDS)
def test_refresh(self):
"""
Request an access token using a refresh token
"""
self.client.login(username="test_user", password="123456")
authorization_code = self.get_auth()
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org"
}
auth_headers = get_basic_auth_header(self.application.client_id, self.application.client_secret)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
content = json.loads(response.content.decode("utf-8"))
self.assertTrue("refresh_token" in content)
# make a second token request to be sure the previous refresh token remains valid, see #65
authorization_code = self.get_auth()
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org"
}
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
token_request_data = {
"grant_type": "refresh_token",
"refresh_token": content["refresh_token"],
"scope": content["scope"],
}
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content.decode("utf-8"))
self.assertTrue("access_token" in content)
# check refresh token cannot be used twice
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 400)
content = json.loads(response.content.decode("utf-8"))
self.assertTrue("invalid_grant" in content.values())
def test_refresh_with_grace_period(self):
"""
Request an access token using a refresh token
"""
oauth2_settings.REFRESH_TOKEN_GRACE_PERIOD_SECONDS = 120
self.client.login(username="test_user", password="123456")
authorization_code = self.get_auth()
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org"
}
auth_headers = get_basic_auth_header(self.application.client_id, self.application.client_secret)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
content = json.loads(response.content.decode("utf-8"))
self.assertTrue("refresh_token" in content)
# make a second token request to be sure the previous refresh token remains valid, see #65
authorization_code = self.get_auth()
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org"
}
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
token_request_data = {
"grant_type": "refresh_token",
"refresh_token": content["refresh_token"],
"scope": content["scope"],
}
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content.decode("utf-8"))
self.assertTrue("access_token" in content)
first_access_token = content["access_token"]
first_refresh_token = content["refresh_token"]
# check access token returns same data if used twice, see #497
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content.decode("utf-8"))
self.assertTrue("access_token" in content)
self.assertEqual(content["access_token"], first_access_token)
# refresh token should be the same as well
self.assertTrue("refresh_token" in content)
self.assertEqual(content["refresh_token"], first_refresh_token)
oauth2_settings.REFRESH_TOKEN_GRACE_PERIOD_SECONDS = 0
def test_refresh_invalidates_old_tokens(self):
"""
Ensure existing refresh tokens are cleaned up when issuing new ones
"""
self.client.login(username="test_user", password="123456")
authorization_code = self.get_auth()
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org"
}
auth_headers = get_basic_auth_header(self.application.client_id, self.application.client_secret)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
content = json.loads(response.content.decode("utf-8"))
rt = content["refresh_token"]
at = content["access_token"]
token_request_data = {
"grant_type": "refresh_token",
"refresh_token": rt,
"scope": content["scope"],
}
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
refresh_token = RefreshToken.objects.filter(token=rt).first()
self.assertIsNotNone(refresh_token.revoked)
self.assertFalse(AccessToken.objects.filter(token=at).exists())
def test_refresh_no_scopes(self):
"""
Request an access token using a refresh token without passing any scope
"""
self.client.login(username="test_user", password="123456")
authorization_code = self.get_auth()
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org"
}
auth_headers = get_basic_auth_header(self.application.client_id, self.application.client_secret)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
content = json.loads(response.content.decode("utf-8"))
self.assertTrue("refresh_token" in content)
token_request_data = {
"grant_type": "refresh_token",
"refresh_token": content["refresh_token"],
}
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content.decode("utf-8"))
self.assertTrue("access_token" in content)
def test_refresh_bad_scopes(self):
"""
Request an access token using a refresh token and wrong scopes
"""
self.client.login(username="test_user", password="123456")
authorization_code = self.get_auth()
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org"
}
auth_headers = get_basic_auth_header(self.application.client_id, self.application.client_secret)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
content = json.loads(response.content.decode("utf-8"))
self.assertTrue("refresh_token" in content)
token_request_data = {
"grant_type": "refresh_token",
"refresh_token": content["refresh_token"],
"scope": "read write nuke",
}
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 400)
def test_refresh_fail_repeating_requests(self):
"""
Try refreshing an access token with the same refresh token more than once
"""
self.client.login(username="test_user", password="123456")
authorization_code = self.get_auth()
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org"
}
auth_headers = get_basic_auth_header(self.application.client_id, self.application.client_secret)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
content = json.loads(response.content.decode("utf-8"))
self.assertTrue("refresh_token" in content)
token_request_data = {
"grant_type": "refresh_token",
"refresh_token": content["refresh_token"],
"scope": content["scope"],
}
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 400)
def test_refresh_repeating_requests(self):
"""
Trying to refresh an access token with the same refresh token more than
once succeeds in the grace period and fails outside
"""
oauth2_settings.REFRESH_TOKEN_GRACE_PERIOD_SECONDS = 120
self.client.login(username="test_user", password="123456")
authorization_code = self.get_auth()
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org"
}
auth_headers = get_basic_auth_header(self.application.client_id, self.application.client_secret)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
content = json.loads(response.content.decode("utf-8"))
self.assertTrue("refresh_token" in content)
token_request_data = {
"grant_type": "refresh_token",
"refresh_token": content["refresh_token"],
"scope": content["scope"],
}
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
# try refreshing outside the refresh window, see #497
rt = RefreshToken.objects.get(token=content["refresh_token"])
self.assertIsNotNone(rt.revoked)
rt.revoked = timezone.now() - datetime.timedelta(minutes=10) # instead of mocking out datetime
rt.save()
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 400)
oauth2_settings.REFRESH_TOKEN_GRACE_PERIOD_SECONDS = 0
def test_refresh_repeating_requests_non_rotating_tokens(self):
"""
Try refreshing an access token with the same refresh token more than once when not rotating tokens.
"""
self.client.login(username="test_user", password="123456")
authorization_code = self.get_auth()
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org"
}
auth_headers = get_basic_auth_header(self.application.client_id, self.application.client_secret)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
content = json.loads(response.content.decode("utf-8"))
self.assertTrue("refresh_token" in content)
token_request_data = {
"grant_type": "refresh_token",
"refresh_token": content["refresh_token"],
"scope": content["scope"],
}
oauth2_settings.ROTATE_REFRESH_TOKEN = False
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
oauth2_settings.ROTATE_REFRESH_TOKEN = True
def test_basic_auth_bad_authcode(self):
"""
Request an access token using a bad authorization code
"""
self.client.login(username="test_user", password="123456")
token_request_data = {
"grant_type": "authorization_code",
"code": "BLAH",
"redirect_uri": "http://example.org"
}
auth_headers = get_basic_auth_header(self.application.client_id, self.application.client_secret)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 400)
def test_basic_auth_bad_granttype(self):
"""
Request an access token using a bad grant_type string
"""
self.client.login(username="test_user", password="123456")
token_request_data = {
"grant_type": "UNKNOWN",
"code": "BLAH",
"redirect_uri": "http://example.org"
}
auth_headers = get_basic_auth_header(self.application.client_id, self.application.client_secret)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 400)
def test_basic_auth_grant_expired(self):
"""
Request an access token using an expired grant token
"""
self.client.login(username="test_user", password="123456")
g = Grant(
application=self.application, user=self.test_user, code="BLAH",
expires=timezone.now(), redirect_uri="", scope="")
g.save()
token_request_data = {
"grant_type": "authorization_code",
"code": "BLAH",
"redirect_uri": "http://example.org"
}
auth_headers = get_basic_auth_header(self.application.client_id, self.application.client_secret)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 400)
def test_basic_auth_bad_secret(self):
"""
Request an access token using basic authentication for client authentication
"""
self.client.login(username="test_user", password="123456")
authorization_code = self.get_auth()
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org"
}
auth_headers = get_basic_auth_header(self.application.client_id, "BOOM!")
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 401)
def test_basic_auth_wrong_auth_type(self):
"""
Request an access token using basic authentication for client authentication
"""
self.client.login(username="test_user", password="123456")
authorization_code = self.get_auth()
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org"
}
user_pass = "{0}:{1}".format(self.application.client_id, self.application.client_secret)
auth_string = base64.b64encode(user_pass.encode("utf-8"))
auth_headers = {
"HTTP_AUTHORIZATION": "Wrong " + auth_string.decode("utf-8"),
}
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 401)
def test_request_body_params(self):
"""
Request an access token using client_type: public
"""
self.client.login(username="test_user", password="123456")
authorization_code = self.get_auth()
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org",
"client_id": self.application.client_id,
"client_secret": self.application.client_secret,
}
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content.decode("utf-8"))
self.assertEqual(content["token_type"], "Bearer")
self.assertEqual(content["scope"], "read write")
self.assertEqual(content["expires_in"], oauth2_settings.ACCESS_TOKEN_EXPIRE_SECONDS)
def test_public(self):
"""
Request an access token using client_type: public
"""
self.client.login(username="test_user", password="123456")
self.application.client_type = Application.CLIENT_PUBLIC
self.application.save()
authorization_code = self.get_auth()
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org",
"client_id": self.application.client_id
}
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content.decode("utf-8"))
self.assertEqual(content["token_type"], "Bearer")
self.assertEqual(content["scope"], "read write")
self.assertEqual(content["expires_in"], oauth2_settings.ACCESS_TOKEN_EXPIRE_SECONDS)
def test_public_pkce_S256_authorize_get(self):
"""
Request an access token using client_type: public
and PKCE enabled. Tests if the authorize get is successfull
for the S256 algorithm
"""
self.client.login(username="test_user", password="123456")
self.application.client_type = Application.CLIENT_PUBLIC
self.application.save()
code_verifier, code_challenge = self.generate_pkce_codes("S256")
oauth2_settings.PKCE_REQUIRED = True
query_string = urlencode({
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.org",
"response_type": "code",
"allow": True,
"code_challenge": code_challenge,
"code_challenge_method": "S256"
})
url = "{url}?{qs}".format(url=reverse("oauth2_provider:authorize"), qs=query_string)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
oauth2_settings.PKCE_REQUIRED = False
def test_public_pkce_plain_authorize_get(self):
"""
Request an access token using client_type: public
and PKCE enabled. Tests if the authorize get is successfull
for the plain algorithm
"""
self.client.login(username="test_user", password="123456")
self.application.client_type = Application.CLIENT_PUBLIC
self.application.save()
code_verifier, code_challenge = self.generate_pkce_codes("plain")
oauth2_settings.PKCE_REQUIRED = True
query_string = urlencode({
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.org",
"response_type": "code",
"allow": True,
"code_challenge": code_challenge,
"code_challenge_method": "plain"
})
url = "{url}?{qs}".format(url=reverse("oauth2_provider:authorize"), qs=query_string)
response = self.client.get(url)
print(code_challenge)
print(response.context_data)
print(url)
self.assertEqual(response.status_code, 200)
oauth2_settings.PKCE_REQUIRED = False
def test_public_pkce_S256(self):
"""
Request an access token using client_type: public
and PKCE enabled with the S256 algorithm
"""
self.client.login(username="test_user", password="123456")
self.application.client_type = Application.CLIENT_PUBLIC
self.application.save()
code_verifier, code_challenge = self.generate_pkce_codes("S256")
authorization_code = self.get_pkce_auth(code_challenge, "S256")
oauth2_settings.PKCE_REQUIRED = True
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org",
"client_id": self.application.client_id,
"code_verifier": code_verifier
}
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content.decode("utf-8"))
self.assertEqual(content["token_type"], "Bearer")
self.assertEqual(content["scope"], "read write")
self.assertEqual(content["expires_in"], oauth2_settings.ACCESS_TOKEN_EXPIRE_SECONDS)
oauth2_settings.PKCE_REQUIRED = False
def test_public_pkce_plain(self):
"""
Request an access token using client_type: public
and PKCE enabled with the plain algorithm
"""
self.client.login(username="test_user", password="123456")
self.application.client_type = Application.CLIENT_PUBLIC
self.application.save()
code_verifier, code_challenge = self.generate_pkce_codes("plain")
authorization_code = self.get_pkce_auth(code_challenge, "plain")
oauth2_settings.PKCE_REQUIRED = True
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org",
"client_id": self.application.client_id,
"code_verifier": code_verifier
}
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data)
print(response.content)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content.decode("utf-8"))
self.assertEqual(content["token_type"], "Bearer")
self.assertEqual(content["scope"], "read write")
self.assertEqual(content["expires_in"], oauth2_settings.ACCESS_TOKEN_EXPIRE_SECONDS)
oauth2_settings.PKCE_REQUIRED = False
def test_public_pkce_invalid_algorithm(self):
"""
Request an access token using client_type: public
and PKCE enabled with an invalid algorithm
"""
self.client.login(username="test_user", password="123456")
self.application.client_type = Application.CLIENT_PUBLIC
self.application.save()
code_verifier, code_challenge = self.generate_pkce_codes("invalid")
oauth2_settings.PKCE_REQUIRED = True
query_string = urlencode({
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.org",
"response_type": "code",
"allow": True,
"code_challenge": code_challenge,
"code_challenge_method": "invalid",
})
url = "{url}?{qs}".format(url=reverse("oauth2_provider:authorize"), qs=query_string)
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
self.assertIn("error=invalid_request", response["Location"])
oauth2_settings.PKCE_REQUIRED = False
def test_public_pkce_missing_code_challenge(self):
"""
Request an access token using client_type: public
and PKCE enabled but with the code_challenge missing
"""
self.client.login(username="test_user", password="123456")
self.application.client_type = Application.CLIENT_PUBLIC
self.application.skip_authorization = True
self.application.save()
code_verifier, code_challenge = self.generate_pkce_codes("S256")
oauth2_settings.PKCE_REQUIRED = True
query_string = urlencode({
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.org",
"response_type": "code",
"allow": True,
"code_challenge_method": "S256"
})
url = "{url}?{qs}".format(url=reverse("oauth2_provider:authorize"), qs=query_string)
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
self.assertIn("error=invalid_request", response["Location"])
oauth2_settings.PKCE_REQUIRED = False
def test_public_pkce_missing_code_challenge_method(self):
"""
Request an access token using client_type: public
and PKCE enabled but with the code_challenge_method missing
"""
self.client.login(username="test_user", password="123456")
self.application.client_type = Application.CLIENT_PUBLIC
self.application.save()
code_verifier, code_challenge = self.generate_pkce_codes("S256")
oauth2_settings.PKCE_REQUIRED = True
query_string = urlencode({
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.org",
"response_type": "code",
"allow": True,
"code_challenge": code_challenge
})
url = "{url}?{qs}".format(url=reverse("oauth2_provider:authorize"), qs=query_string)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
oauth2_settings.PKCE_REQUIRED = False
def test_public_pkce_S256_invalid_code_verifier(self):
"""
Request an access token using client_type: public
and PKCE enabled with the S256 algorithm and an invalid code_verifier
"""
self.client.login(username="test_user", password="123456")
self.application.client_type = Application.CLIENT_PUBLIC
self.application.save()
code_verifier, code_challenge = self.generate_pkce_codes("S256")
authorization_code = self.get_pkce_auth(code_challenge, "S256")
oauth2_settings.PKCE_REQUIRED = True
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org",
"client_id": self.application.client_id,
"code_verifier": "invalid"
}
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data)
self.assertEqual(response.status_code, 400)
oauth2_settings.PKCE_REQUIRED = False
def test_public_pkce_plain_invalid_code_verifier(self):
"""
Request an access token using client_type: public
and PKCE enabled with the plain algorithm and an invalid code_verifier
"""
self.client.login(username="test_user", password="123456")
self.application.client_type = Application.CLIENT_PUBLIC
self.application.save()
code_verifier, code_challenge = self.generate_pkce_codes("plain")
authorization_code = self.get_pkce_auth(code_challenge, "plain")
oauth2_settings.PKCE_REQUIRED = True
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org",
"client_id": self.application.client_id,
"code_verifier": "invalid"
}
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data)
self.assertEqual(response.status_code, 400)
oauth2_settings.PKCE_REQUIRED = False
def test_public_pkce_S256_missing_code_verifier(self):
"""
Request an access token using client_type: public
and PKCE enabled with the S256 algorithm and the code_verifier missing
"""
self.client.login(username="test_user", password="123456")
self.application.client_type = Application.CLIENT_PUBLIC
self.application.save()
code_verifier, code_challenge = self.generate_pkce_codes("S256")
authorization_code = self.get_pkce_auth(code_challenge, "S256")
oauth2_settings.PKCE_REQUIRED = True
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org",
"client_id": self.application.client_id
}
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data)
self.assertEqual(response.status_code, 400)
oauth2_settings.PKCE_REQUIRED = False
def test_public_pkce_plain_missing_code_verifier(self):
"""
Request an access token using client_type: public
and PKCE enabled with the plain algorithm and the code_verifier missing
"""
self.client.login(username="test_user", password="123456")
self.application.client_type = Application.CLIENT_PUBLIC
self.application.save()
code_verifier, code_challenge = self.generate_pkce_codes("plain")
authorization_code = self.get_pkce_auth(code_challenge, "plain")
oauth2_settings.PKCE_REQUIRED = True
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org",
"client_id": self.application.client_id
}
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data)
self.assertEqual(response.status_code, 400)
oauth2_settings.PKCE_REQUIRED = False
def test_malicious_redirect_uri(self):
"""
Request an access token using client_type: public and ensure redirect_uri is
properly validated.
"""
self.client.login(username="test_user", password="123456")
self.application.client_type = Application.CLIENT_PUBLIC
self.application.save()
authorization_code = self.get_auth()
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "/../",
"client_id": self.application.client_id
}
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data)
self.assertEqual(response.status_code, 400)
data = response.json()
self.assertEqual(data["error"], "invalid_request")
self.assertEqual(data["error_description"], oauthlib_errors.MismatchingRedirectURIError.description)
def test_code_exchange_succeed_when_redirect_uri_match(self):
"""
Tests code exchange succeed when redirect uri matches the one used for code request
"""
self.client.login(username="test_user", password="123456")
# retrieve a valid authorization code
authcode_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.org?foo=bar",
"response_type": "code",
"allow": True,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=authcode_data)
query_dict = parse_qs(urlparse(response["Location"]).query)
authorization_code = query_dict["code"].pop()
# exchange authorization code for a valid access token
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org?foo=bar"
}
auth_headers = get_basic_auth_header(self.application.client_id, self.application.client_secret)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content.decode("utf-8"))
self.assertEqual(content["token_type"], "Bearer")
self.assertEqual(content["scope"], "read write")
self.assertEqual(content["expires_in"], oauth2_settings.ACCESS_TOKEN_EXPIRE_SECONDS)
def test_code_exchange_fails_when_redirect_uri_does_not_match(self):
"""
Tests code exchange fails when redirect uri does not match the one used for code request
"""
self.client.login(username="test_user", password="123456")
# retrieve a valid authorization code
authcode_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.org?foo=bar",
"response_type": "code",
"allow": True,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=authcode_data)
query_dict = parse_qs(urlparse(response["Location"]).query)
authorization_code = query_dict["code"].pop()
# exchange authorization code for a valid access token
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org?foo=baraa"
}
auth_headers = get_basic_auth_header(self.application.client_id, self.application.client_secret)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 400)
data = response.json()
self.assertEqual(data["error"], "invalid_request")
self.assertEqual(data["error_description"], oauthlib_errors.MismatchingRedirectURIError.description)
def test_code_exchange_succeed_when_redirect_uri_match_with_multiple_query_params(self):
"""
Tests code exchange succeed when redirect uri matches the one used for code request
"""
self.client.login(username="test_user", password="123456")
self.application.redirect_uris = "http://localhost http://example.com?foo=bar"
self.application.save()
# retrieve a valid authorization code
authcode_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.com?bar=baz&foo=bar",
"response_type": "code",
"allow": True,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=authcode_data)
query_dict = parse_qs(urlparse(response["Location"]).query)
authorization_code = query_dict["code"].pop()
# exchange authorization code for a valid access token
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.com?bar=baz&foo=bar"
}
auth_headers = get_basic_auth_header(self.application.client_id, self.application.client_secret)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content.decode("utf-8"))
self.assertEqual(content["token_type"], "Bearer")
self.assertEqual(content["scope"], "read write")
self.assertEqual(content["expires_in"], oauth2_settings.ACCESS_TOKEN_EXPIRE_SECONDS)
class TestAuthorizationCodeProtectedResource(BaseTest):
def test_resource_access_allowed(self):
self.client.login(username="test_user", password="123456")
# retrieve a valid authorization code
authcode_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.org",
"response_type": "code",
"allow": True,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=authcode_data)
query_dict = parse_qs(urlparse(response["Location"]).query)
authorization_code = query_dict["code"].pop()
# exchange authorization code for a valid access token
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org"
}
auth_headers = get_basic_auth_header(self.application.client_id, self.application.client_secret)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
content = json.loads(response.content.decode("utf-8"))
access_token = content["access_token"]
# use token to access the resource
auth_headers = {
"HTTP_AUTHORIZATION": "Bearer " + access_token,
}
request = self.factory.get("/fake-resource", **auth_headers)
request.user = self.test_user
view = ResourceView.as_view()
response = view(request)
self.assertEqual(response, "This is a protected resource")
def test_resource_access_deny(self):
auth_headers = {
"HTTP_AUTHORIZATION": "Bearer " + "faketoken",
}
request = self.factory.get("/fake-resource", **auth_headers)
request.user = self.test_user
view = ResourceView.as_view()
response = view(request)
self.assertEqual(response.status_code, 403)
class TestDefaultScopes(BaseTest):
def test_pre_auth_default_scopes(self):
"""
Test response for a valid client_id with response_type: code using default scopes
"""
self.client.login(username="test_user", password="123456")
oauth2_settings._DEFAULT_SCOPES = ["read"]
query_string = urlencode({
"client_id": self.application.client_id,
"response_type": "code",
"state": "random_state_string",
"redirect_uri": "http://example.org",
})
url = "{url}?{qs}".format(url=reverse("oauth2_provider:authorize"), qs=query_string)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# check form is in context and form params are valid
self.assertIn("form", response.context)
form = response.context["form"]
self.assertEqual(form["redirect_uri"].value(), "http://example.org")
self.assertEqual(form["state"].value(), "random_state_string")
self.assertEqual(form["scope"].value(), "read")
self.assertEqual(form["client_id"].value(), self.application.client_id)
oauth2_settings._DEFAULT_SCOPES = ["read", "write"]
|
py | 1a3a40feb6e362d6cd5ffe5672ba8bb5ab87ab79 | import numpy as np
import scipy.sparse as sp
import tensorflow as tf
from keras import backend as K
modes = {
'S': 1, # Single (rank(A)=2, rank(B)=2)
'M': 2, # Mixed (rank(A)=2, rank(B)=3)
'iM': 3, # Inverted mixed (rank(A)=3, rank(B)=2)
'B': 4, # Batch (rank(A)=3, rank(B)=3)
'UNK': -1 # Unknown
}
################################################################################
# Ops for convolutions / Laplacians
################################################################################
def filter_dot(fltr, features):
"""
Performs the multiplication of a graph filter (N x N) with the node features,
automatically dealing with single, mixed, and batch modes.
:param fltr: the graph filter(s) (N x N in single and mixed mode,
batch x N x N in batch mode).
:param features: the node features (N x F in single mode, batch x N x F in
mixed and batch mode).
:return: the filtered features.
"""
if len(K.int_shape(features)) == 2:
# Single mode
return K.dot(fltr, features)
else:
if len(K.int_shape(fltr)) == 3:
# Batch mode
return K.batch_dot(fltr, features)
else:
# Mixed mode
return mixed_mode_dot(fltr, features)
def normalize_A(A):
"""
Computes symmetric normalization of A, dealing with sparse A and batch mode
automatically.
:param A: Tensor or SparseTensor with rank k = {2, 3}.
:return: SparseTensor of rank k.
"""
D = degrees(A)
D = tf.sqrt(D)[:, None] + K.epsilon()
if K.ndim(A) == 3:
# Batch mode
output = (A / D) / transpose(D, perm=(0, 2, 1))
else:
# Single mode
output = (A / D) / transpose(D)
return output
def degrees(A):
"""
Computes the degrees of each node in A, dealing with sparse A and batch mode
automatically.
:param A: Tensor or SparseTensor with rank k = {2, 3}.
:return: Tensor or SparseTensor of rank k - 1.
"""
if K.is_sparse(A):
D = tf.sparse.reduce_sum(A, axis=-1)
else:
D = tf.reduce_sum(A, axis=-1)
return D
def degree_matrix(A, return_sparse_batch=False):
"""
Computes the degree matrix of A, deals with sparse A and batch mode
automatically.
:param A: Tensor or SparseTensor with rank k = {2, 3}.
:param return_sparse_batch: if operating in batch mode, return a
SparseTensor. Note that the sparse degree tensor returned by this function
cannot be used for sparse matrix multiplication afterwards.
:return: SparseTensor of rank k.
"""
D = degrees(A)
batch_mode = K.ndim(D) == 2
N = tf.shape(D)[-1]
batch_size = tf.shape(D)[0] if batch_mode else 1
inner_index = tf.tile(tf.stack([tf.range(N)] * 2, axis=1), (batch_size, 1))
if batch_mode:
if return_sparse_batch:
outer_index = repeat(
tf.range(batch_size), tf.ones(batch_size) * tf.cast(N, tf.float32)
)
indices = tf.concat([outer_index[:, None], inner_index], 1)
dense_shape = (batch_size, N, N)
else:
return tf.linalg.diag(D)
else:
indices = inner_index
dense_shape = (N, N)
indices = tf.cast(indices, tf.int64)
values = tf.reshape(D, (-1, ))
return tf.SparseTensor(indices, values, dense_shape)
################################################################################
# Scipy to tf.sparse conversion
################################################################################
def sp_matrix_to_sp_tensor_value(x):
"""
Converts a Scipy sparse matrix to a tf.SparseTensorValue
:param x: a Scipy sparse matrix
:return: tf.SparseTensorValue
"""
if not hasattr(x, 'tocoo'):
try:
x = sp.coo_matrix(x)
except:
raise TypeError('x must be convertible to scipy.coo_matrix')
else:
x = x.tocoo()
return tf.SparseTensorValue(
indices=np.array([x.row, x.col]).T,
values=x.data,
dense_shape=x.shape
)
def sp_matrix_to_sp_tensor(x):
"""
Converts a Scipy sparse matrix to a tf.SparseTensor
:param x: a Scipy sparse matrix
:return: tf.SparseTensor
"""
if not hasattr(x, 'tocoo'):
try:
x = sp.coo_matrix(x)
except:
raise TypeError('x must be convertible to scipy.coo_matrix')
else:
x = x.tocoo()
return tf.SparseTensor(
indices=np.array([x.row, x.col]).T,
values=x.data,
dense_shape=x.shape
)
################################################################################
# Matrix multiplication
################################################################################
def matmul_A_B(A, B):
"""
Computes A * B, dealing with sparsity and single/batch/mixed modes
automatically. Mixed mode multiplication also works when A has rank 3 and
B has rank 2. Sparse multiplication does not work with batch mode.
:param A: Tensor or SparseTensor with rank 2 or 3.
:param B: Tensor or SparseTensor with rank 2 or 3.
:return:
"""
mode = autodetect_mode(A, B)
if mode == modes['S']:
# Single mode
output = single_mode_dot(A, B)
elif mode == modes['M']:
# Mixed mode
output = mixed_mode_dot(A, B)
elif mode == modes['iM']:
# Inverted mixed (rank(A)=3, rank(B)=2)
# Works only with dense tensors
output = K.dot(A, B)
elif mode == modes['B']:
# Batch mode
# Works only with dense tensors
output = K.batch_dot(A, B)
else:
raise ValueError('A and B must have rank 2 or 3.')
return output
def matmul_AT_B_A(A, B):
"""
Computes A.T * B * A, dealing with sparsity and single/batch/mixed modes
automatically. Mixed mode multiplication also works when A has rank 3 and
B has rank 2. Sparse multiplication does not work with batch mode.
:param A: Tensor or SparseTensor with rank 2 or 3.
:param B: Tensor or SparseTensor with rank 2 or 3.
:return:
"""
mode = autodetect_mode(A, B)
if mode == modes['S']:
# Single (rank(A)=2, rank(B)=2)
output = single_mode_dot(single_mode_dot(transpose(A), B), A)
elif mode == modes['M']:
# Mixed (rank(A)=2, rank(B)=3)
output = mixed_mode_dot(transpose(A), B)
if K.is_sparse(A):
output = transpose(
mixed_mode_dot(transpose(A), transpose(output, (0, 2, 1))),
(0, 2, 1)
)
else:
output = K.dot(output, A)
elif mode == modes['iM']:
# Inverted mixed (rank(A)=3, rank(B)=2)
# Works only with dense tensors
output = mixed_mode_dot(B, A)
output = K.batch_dot(transpose(A, (0, 2, 1)), output)
elif mode == modes['B']:
# Batch (rank(A)=3, rank(B)=3)
# Works only with dense tensors
output = K.batch_dot(
K.batch_dot(
transpose(A, (0, 2, 1)),
B
),
A
)
else:
raise ValueError('A and B must have rank 2 or 3.')
return output
def matmul_AT_B(A, B):
"""
Computes A.T * B, dealing with sparsity and single/batch/mixed modes
automatically. Mixed mode multiplication also works when A has rank 3 and
B has rank 2. Sparse multiplication does not work with batch mode.
:param A: Tensor or SparseTensor with rank 2 or 3.
:param B: Tensor or SparseTensor with rank 2 or 3.
:return:
"""
mode = autodetect_mode(A, B)
if mode == modes['S']:
# Single (rank(A)=2, rank(B)=2)
output = single_mode_dot(transpose(A), B)
elif mode == modes['M']:
# Mixed (rank(A)=2, rank(B)=3)
output = mixed_mode_dot(transpose(A), B)
elif mode == modes['iM']:
# Inverted mixed (rank(A)=3, rank(B)=2)
# Works only with dense tensors
output = K.dot(transpose(A, (0, 2, 1)), B)
elif mode == modes['B']:
# Batch (rank(A)=3, rank(B)=3)
# Works only with dense tensors
output = K.batch_dot(transpose(A, (0, 2, 1)), B)
else:
raise ValueError('A and B must have rank 2 or 3.')
return output
def matmul_A_BT(A, B):
"""
Computes A * B.T, dealing with sparsity and single/batch/mixed modes
automatically. Mixed mode multiplication also works when A has rank 3 and
B has rank 2. Sparse multiplication does not work with batch mode.
:param A: Tensor or SparseTensor with rank 2 or 3.
:param B: Tensor or SparseTensor with rank 2 or 3.
:return:
"""
mode = autodetect_mode(A, B)
if mode == modes['S']:
# Single (rank(A)=2, rank(B)=2)
output = single_mode_dot(A, transpose(B))
elif mode == modes['M']:
# Mixed (rank(A)=2, rank(B)=3)
output = mixed_mode_dot(A, transpose(B, (0, 2, 1)))
elif mode == modes['iM']:
# Inverted mixed (rank(A)=3, rank(B)=2)
# Works only with dense tensors
output = K.dot(A, transpose(B))
elif mode == modes['B']:
# Batch (rank(A)=3, rank(B)=3)
# Works only with dense tensors
output = K.batch_dot(A, transpose(B, (0, 2, 1)))
else:
raise ValueError('A and B must have rank 2 or 3.')
return output
################################################################################
# Ops related to the modes of operation (single, mixed, batch)
################################################################################
def autodetect_mode(A, X):
"""
Return a code identifying the mode of operation (single, mixed, batch),
given A and X. See the modes variable for meaning of codes.
:param A: Tensor.
:param X: Tensor.
:return: mode of operation.
"""
if K.ndim(X) == 2:
if K.ndim(A) == 2:
return modes['S']
elif K.ndim(A) == 3:
return modes['iM']
else:
return modes['UNK']
elif K.ndim(X) == 3:
if K.ndim(A) == 2:
return modes['M']
elif K.ndim(A) == 3:
return modes['B']
else:
return modes['UNK']
else:
return modes['UNK']
def single_mode_dot(A, B):
"""
Dot product between two rank 2 matrices. Deals automatically with either A
or B being sparse.
:param A: rank 2 Tensor or SparseTensor.
:param B: rank 2 Tensor or SparseTensor.
:return: rank 2 Tensor or SparseTensor.
"""
a_sparse = K.is_sparse(A)
b_sparse = K.is_sparse(B)
if a_sparse and b_sparse:
raise ValueError('Sparse x Sparse matmul is not implemented yet.')
elif a_sparse:
output = tf.sparse_tensor_dense_matmul(A, B)
elif b_sparse:
output = transpose(
tf.sparse_tensor_dense_matmul(
transpose(B), transpose(A)
)
)
else:
output = tf.matmul(A, B)
return output
def mixed_mode_dot(A, B):
"""
Computes the equivalent of `tf.einsum('ij,bjk->bik', fltr, output)`, but
works for both dense and sparse input filters.
:param A: rank 2 Tensor or SparseTensor.
:param B: rank 3 Tensor or SparseTensor.
:return: rank 3 Tensor or SparseTensor.
"""
s_0_, s_1_, s_2_ = K.int_shape(B)
B_T = transpose(B, (1, 2, 0))
B_T = reshape(B_T, (s_1_, -1))
output = single_mode_dot(A, B_T)
output = reshape(output, (s_1_, s_2_, -1))
output = transpose(output, (2, 0, 1))
return output
################################################################################
# Wrappers for automatic switching between dense and sparse ops
################################################################################
def transpose(A, perm=None, name=None):
"""
Transposes A according to perm, dealing with sparse A automatically.
:param A: Tensor or SparseTensor with rank k.
:param perm: permutation indices of size k.
:param name: name for the operation.
:return: Tensor or SparseTensor with rank k.
"""
if K.is_sparse(A):
transpose_op = tf.sparse.transpose
else:
transpose_op = tf.transpose
if perm is None:
perm = (1, 0) # Make explicit so that shape will always be preserved
return transpose_op(A, perm=perm, name=name)
def reshape(A, shape=None, name=None):
"""
Reshapes A according to shape, dealing with sparse A automatically.
:param A: Tensor or SparseTensor.
:param shape: new shape.
:param name: name for the operation.
:return: Tensor or SparseTensor.
"""
if K.is_sparse(A):
reshape_op = tf.sparse.reshape
else:
reshape_op = tf.reshape
return reshape_op(A, shape=shape, name=name)
################################################################################
# Misc ops
################################################################################
def matrix_power(x, k):
"""
Computes the k-th power of a square matrix.
:param x: a square matrix (Tensor or SparseTensor)
:param k: exponent
:return: matrix of same type and dtype as the input
"""
if K.ndim(x) != 2:
raise ValueError('x must have rank 2.')
sparse = K.is_sparse(x)
if sparse:
x_dense = tf.sparse.to_dense(x)
else:
x_dense = x
x_k = x_dense
for _ in range(k - 1):
x_k = K.dot(x_k, x_dense)
if sparse:
return tf.contrib.layers.dense_to_sparse(x_k)
else:
return x_k
def repeat(x, repeats):
"""
Repeats elements of a Tensor (equivalent to np.repeat, but only for 1D
tensors).
:param x: rank 1 tensor;
:param repeats: rank 1 tensor with same shape as x, the number of
repetitions for each element;
:return: rank 1 tensor, of shape `(sum(repeats), )`.
"""
x = tf.expand_dims(x, 1)
max_repeats = tf.reduce_max(repeats)
tile_repeats = [1, max_repeats]
arr_tiled = tf.tile(x, tile_repeats)
mask = tf.less(tf.range(max_repeats), tf.expand_dims(repeats, 1))
result = tf.reshape(tf.boolean_mask(arr_tiled, mask), [-1])
return result
def segment_top_k(x, I, ratio, top_k_var):
"""
Returns indices to get the top K values in x segment-wise, according to
the segments defined in I. K is not fixed, but it is defined as a ratio of
the number of elements in each segment.
:param x: a rank 1 tensor;
:param I: a rank 1 tensor with segment IDs for x;
:param ratio: float, ratio of elements to keep for each segment;
:param top_k_var: a tf.Variable created without shape validation (i.e.,
`tf.Variable(0.0, validate_shape=False)`);
:return: a rank 1 tensor containing the indices to get the top K values of
each segment in x.
"""
num_nodes = tf.segment_sum(tf.ones_like(I), I) # Number of nodes in each graph
cumsum = tf.cumsum(num_nodes) # Cumulative number of nodes (A, A+B, A+B+C)
cumsum_start = cumsum - num_nodes # Start index of each graph
n_graphs = tf.shape(num_nodes)[0] # Number of graphs in batch
max_n_nodes = tf.reduce_max(num_nodes) # Order of biggest graph in batch
batch_n_nodes = tf.shape(I)[0] # Number of overall nodes in batch
to_keep = tf.ceil(ratio * tf.cast(num_nodes, tf.float32))
to_keep = tf.cast(to_keep, tf.int32) # Nodes to keep in each graph
index = tf.range(batch_n_nodes)
index = (index - tf.gather(cumsum_start, I)) + (I * max_n_nodes)
y_min = tf.reduce_min(x)
dense_y = tf.ones((n_graphs * max_n_nodes,))
# subtract 1 to ensure that filler values do not get picked
dense_y = dense_y * tf.cast(y_min - 1, tf.float32)
# top_k_var is a variable with unknown shape defined in the elsewhere
dense_y = tf.assign(top_k_var, dense_y, validate_shape=False)
dense_y = tf.scatter_update(dense_y, index, x)
dense_y = tf.reshape(dense_y, (n_graphs, max_n_nodes))
perm = tf.argsort(dense_y, direction='DESCENDING')
perm = perm + cumsum_start[:, None]
perm = tf.reshape(perm, (-1,))
to_rep = tf.tile(tf.constant([1., 0.]), (n_graphs,))
rep_times = tf.reshape(tf.concat((to_keep[:, None], (max_n_nodes - to_keep)[:, None]), -1), (-1,))
mask = repeat(to_rep, rep_times)
perm = tf.boolean_mask(perm, mask)
return perm
|
py | 1a3a4101fcd7b7936c6b81b2ec712d7a60210d20 | # Generated by Django 2.0 on 2017-12-05 00:17
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import organizations.base
import organizations.fields
class Migration(migrations.Migration):
initial = True
dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]
operations = [
migrations.CreateModel(
name="Organization",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"name",
models.CharField(
help_text="The name of the organization", max_length=200
),
),
("is_active", models.BooleanField(default=True)),
(
"created",
organizations.fields.AutoCreatedField(
default=django.utils.timezone.now, editable=False
),
),
(
"modified",
organizations.fields.AutoLastModifiedField(
default=django.utils.timezone.now, editable=False
),
),
(
"slug",
organizations.fields.SlugField(
editable=True,
help_text="The name in all lowercase, suitable for URL identification",
max_length=200,
populate_from="name",
unique=True,
),
),
],
options={
"verbose_name": "organization",
"verbose_name_plural": "organizations",
"ordering": ["name"],
"abstract": False,
},
bases=(organizations.base.UnicodeMixin, models.Model),
),
migrations.CreateModel(
name="OrganizationOwner",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"created",
organizations.fields.AutoCreatedField(
default=django.utils.timezone.now, editable=False
),
),
(
"modified",
organizations.fields.AutoLastModifiedField(
default=django.utils.timezone.now, editable=False
),
),
(
"organization",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
related_name="owner",
to="organizations.Organization",
),
),
],
options={
"verbose_name": "organization owner",
"verbose_name_plural": "organization owners",
"abstract": False,
},
bases=(organizations.base.UnicodeMixin, models.Model),
),
migrations.CreateModel(
name="OrganizationUser",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"created",
organizations.fields.AutoCreatedField(
default=django.utils.timezone.now, editable=False
),
),
(
"modified",
organizations.fields.AutoLastModifiedField(
default=django.utils.timezone.now, editable=False
),
),
("is_admin", models.BooleanField(default=False)),
(
"organization",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="organization_users",
to="organizations.Organization",
),
),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="organizations_organizationuser",
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"verbose_name": "organization user",
"verbose_name_plural": "organization users",
"ordering": ["organization", "user"],
"abstract": False,
},
bases=(organizations.base.UnicodeMixin, models.Model),
),
migrations.AddField(
model_name="organizationowner",
name="organization_user",
field=models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
to="organizations.OrganizationUser",
),
),
migrations.AddField(
model_name="organization",
name="users",
field=models.ManyToManyField(
related_name="organizations_organization",
through="organizations.OrganizationUser",
to=settings.AUTH_USER_MODEL,
),
),
migrations.AlterUniqueTogether(
name="organizationuser", unique_together={("user", "organization")}
),
]
|
py | 1a3a42085e176e8d21d806fa4fcbe909ad123fc8 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
from six.moves import range
import json, os
from semantic_version import Version
import frappe
import requests
import subprocess # nosec
from frappe.utils import cstr
from frappe.utils.gitutils import get_app_branch
from frappe import _, safe_decode
import git
def get_change_log(user=None):
if not user: user = frappe.session.user
last_known_versions = frappe._dict(json.loads(frappe.db.get_value("User",
user, "last_known_versions") or "{}"))
current_versions = get_versions()
if not last_known_versions:
update_last_known_versions()
return []
change_log = []
def set_in_change_log(app, opts, change_log):
from_version = last_known_versions.get(app, {}).get("version") or "0.0.1"
to_version = opts["version"]
if from_version != to_version:
app_change_log = get_change_log_for_app(app, from_version=from_version, to_version=to_version)
if app_change_log:
change_log.append({
"title": opts["title"],
"description": opts["description"],
"version": to_version,
"change_log": app_change_log
})
for app, opts in current_versions.items():
if app != "frappe":
set_in_change_log(app, opts, change_log)
if "frappe" in current_versions:
set_in_change_log("frappe", current_versions["frappe"], change_log)
return change_log
def get_change_log_for_app(app, from_version, to_version):
change_log_folder = os.path.join(frappe.get_app_path(app), "change_log")
if not os.path.exists(change_log_folder):
return
from_version = Version(from_version)
to_version = Version(to_version)
# remove pre-release part
to_version.prerelease = None
major_version_folders = ["v{0}".format(i) for i in range(from_version.major, to_version.major + 1)]
app_change_log = []
for folder in os.listdir(change_log_folder):
if folder in major_version_folders:
for file in os.listdir(os.path.join(change_log_folder, folder)):
version = Version(os.path.splitext(file)[0][1:].replace("_", "."))
if from_version < version <= to_version:
file_path = os.path.join(change_log_folder, folder, file)
content = frappe.read_file(file_path)
app_change_log.append([version, content])
app_change_log = sorted(app_change_log, key=lambda d: d[0], reverse=True)
# convert version to string and send
return [[cstr(d[0]), d[1]] for d in app_change_log]
@frappe.whitelist()
def update_last_known_versions():
frappe.db.set_value("User", frappe.session.user, "last_known_versions",
json.dumps(get_versions()), update_modified=False)
@frappe.whitelist()
def get_versions():
"""Get versions of all installed apps.
Example:
{
"frappe": {
"title": "Frappe Framework",
"version": "5.0.0"
}
}"""
versions = {}
for app in frappe.get_installed_apps(sort=True):
app_hooks = frappe.get_hooks(app_name=app)
versions[app] = {
"title": app_hooks.get("app_title")[0],
"description": app_hooks.get("app_description")[0],
"branch": get_app_branch(app)
}
if versions[app]['branch'] != 'master':
branch_version = app_hooks.get('{0}_version'.format(versions[app]['branch']))
if branch_version:
versions[app]['branch_version'] = branch_version[0] + ' ({0})'.format(get_app_last_commit_ref(app))
try:
versions[app]["version"] = frappe.get_attr(app + ".__version__")
except AttributeError:
versions[app]["version"] = '0.0.1'
return versions
def get_app_branch(app):
'''Returns branch of an app'''
try:
result = subprocess.check_output('cd ../apps/{0} && git rev-parse --abbrev-ref HEAD'.format(app),
shell=True)
result = safe_decode(result)
result = result.strip()
return result
except Exception as e:
return ''
def get_app_last_commit_ref(app):
try:
result = subprocess.check_output('cd ../apps/{0} && git rev-parse HEAD --short 7'.format(app),
shell=True)
result = safe_decode(result)
result = result.strip()
return result
except Exception as e:
return ''
def check_for_update():
updates = frappe._dict(major=[], minor=[], patch=[])
apps = get_versions()
for app in apps:
app_details = check_release_on_github(app)
if not app_details: continue
github_version, org_name = app_details
# Get local instance's current version or the app
branch_version = apps[app]['branch_version'].split(' ')[0] if apps[app].get('branch_version', '') else ''
instance_version = Version(branch_version or apps[app].get('version'))
# Compare and popup update message
for update_type in updates:
if github_version.__dict__[update_type] > instance_version.__dict__[update_type]:
updates[update_type].append(frappe._dict(
current_version = str(instance_version),
available_version = str(github_version),
org_name = org_name,
app_name = app,
title = apps[app]['title'],
))
break
if github_version.__dict__[update_type] < instance_version.__dict__[update_type]: break
add_message_to_redis(updates)
def parse_latest_non_beta_release(response):
"""
Pasrses the response JSON for all the releases and returns the latest non prerelease
Parameters
response (list): response object returned by github
Returns
json : json object pertaining to the latest non-beta release
"""
for release in response:
if release['prerelease'] == True: continue
return release
def check_release_on_github(app):
# Check if repo remote is on github
from subprocess import CalledProcessError
try:
remote_url = subprocess.check_output("cd ../apps/{} && git ls-remote --get-url".format(app), shell=True).decode()
except CalledProcessError:
# Passing this since some apps may not have git initializaed in them
return None
if isinstance(remote_url, bytes):
remote_url = remote_url.decode()
if "github.com" not in remote_url:
return None
# Get latest version from github
if 'https' not in remote_url:
return None
org_name = remote_url.split('/')[3]
r = requests.get('https://api.github.com/repos/{}/{}/releases'.format(org_name, app))
if r.status_code == 200 and r.json():
lastest_non_beta_release = parse_latest_non_beta_release(r.json())
return Version(lastest_non_beta_release['tag_name'].strip('v')), org_name
else:
# In case of an improper response or if there are no releases
return None
def add_message_to_redis(update_json):
# "update-message" will store the update message string
# "update-user-set" will be a set of users
cache = frappe.cache()
cache.set_value("update-info", json.dumps(update_json))
user_list = [x.name for x in frappe.get_all("User", filters={"enabled": True})]
system_managers = [user for user in user_list if 'System Manager' in frappe.get_roles(user)]
cache.sadd("update-user-set", *system_managers)
@frappe.whitelist()
def show_update_popup():
cache = frappe.cache()
user = frappe.session.user
update_info = cache.get_value("update-info")
if not update_info:
return
updates = json.loads(update_info)
current_versions = get_versions()
# Check if user is int the set of users to send update message to
update_message = ""
if cache.sismember("update-user-set", user):
for update_type in updates:
release_links = ""
for app in updates[update_type]:
app = frappe._dict(app)
release_links += "<a href='https://github.com/{org_name}/{app_name}/releases/tag/v{available_version}'><b>{title}</b>: v{available_version}</a><br>".format(
available_version = app.available_version,
org_name = app.org_name,
app_name = app.app_name,
title = app.title
)
if release_links:
update_message += _("New {} releases for the following apps are available".format(update_type)) + ":<br><br>{}".format(release_links)
if update_message:
frappe.msgprint(update_message, title=_("New updates are available"), indicator='green')
cache.srem("update-user-set", user)
|
py | 1a3a423d2a900386c2a9b8781596f965b3b8bea4 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import vim
import re
import os
import os.path
from functools import wraps
from .utils import *
from .explorer import *
from .manager import *
from .mru import *
from .devicons import (
webDevIconsGetFileTypeSymbol,
webDevIconsStrLen,
webDevIconsBytesLen,
matchaddDevIconsDefault,
matchaddDevIconsExact,
matchaddDevIconsExtension,
)
#*****************************************************
# BufferExplorer
#*****************************************************
class BufferExplorer(Explorer):
def __init__(self):
self._prefix_length = 0
self._max_bufname_len = 0
def getContent(self, *args, **kwargs):
mru_bufnrs = []
for num in reversed(lfEval("g:Lf_MruBufnrs")):
if num not in mru_bufnrs:
mru_bufnrs.append(int(num))
for num in reversed(mru_bufnrs):
mru.setBufferTimestamp(num)
lfCmd("let g:Lf_MruBufnrs = []")
if "--all" not in kwargs.get("arguments", {}):
if "--tabpage" not in kwargs.get("arguments", {}):
buffers = {b.number: b for b in vim.buffers
if lfEval("buflisted(%d)" % b.number) == '1'}
else:
buffers = {w.buffer.number: w.buffer for w in vim.current.tabpage.windows
if lfEval("buflisted(%d)" % w.buffer.number) == '1'}
else:
if "--tabpage" not in kwargs.get("arguments", {}):
buffers = {b.number: b for b in vim.buffers
if os.path.basename(b.name) != "LeaderF"}
else:
buffers = {w.buffer.number: w.buffer for w in vim.current.tabpage.windows
if os.path.basename(w.buffer.name) != "LeaderF"}
# e.g., 12 u %a+- aaa.txt
bufnr_len = len(lfEval("bufnr('$')"))
self._prefix_length = bufnr_len + 8
if lfEval("get(g:, 'Lf_ShowDevIcons', 1)") == '1':
self._prefix_length += webDevIconsStrLen()
self._max_bufname_len = max([int(lfEval("strdisplaywidth('%s')"
% escQuote(getBasename(buffers[nr].name))))
for nr in mru.getMruBufnrs() if nr in buffers] + [len('[No Name]')] or [0])
bufnames = []
for nr in mru.getMruBufnrs():
if nr in buffers:
buf_name = buffers[nr].name
if not buf_name:
buf_name = "[No Name]"
if lfEval("g:Lf_ShowRelativePath") == '1':
buf_name = lfRelpath(buf_name)
basename = getBasename(buf_name)
dirname = getDirname(buf_name)
space_num = self._max_bufname_len \
- int(lfEval("strdisplaywidth('%s')" % escQuote(basename)))
if lfEval("get(g:, 'Lf_ShowDevIcons', 1)") == '1':
icon = webDevIconsGetFileTypeSymbol(basename)
else:
icon = ''
# e.g., 12 u %a+- aaa.txt
buf_name = '{:{width}d} {:1s} {:1s}{:1s}{:1s}{:1s} {}{}{} "{}"'.format(nr,
'' if buffers[nr].options["buflisted"] else 'u',
'%' if int(lfEval("bufnr('%')")) == nr
else '#' if int(lfEval("bufnr('#')")) == nr else '',
'a' if lfEval("bufwinnr(%d)" % nr) != '-1' else 'h',
'+' if buffers[nr].options["modified"] else '',
'-' if not buffers[nr].options["modifiable"] else '',
icon, basename, ' ' * space_num,
dirname if dirname else '.' + os.sep,
width=bufnr_len)
bufnames.append(buf_name)
del buffers[nr]
elif lfEval("bufnr(%d)" % nr) == '-1':
mru.delMruBufnr(nr)
return bufnames
def getStlCategory(self):
return 'Buffer'
def getStlCurDir(self):
return escQuote(lfEncode(os.getcwd()))
def supportsNameOnly(self):
return True
def getPrefixLength(self):
return self._prefix_length
def getMaxBufnameLen(self):
return self._max_bufname_len
#*****************************************************
# BufExplManager
#*****************************************************
class BufExplManager(Manager):
def __init__(self):
super(BufExplManager, self).__init__()
def _getExplClass(self):
return BufferExplorer
def _defineMaps(self):
lfCmd("call leaderf#Buffer#Maps()")
def _acceptSelection(self, *args, **kwargs):
if len(args) == 0:
return
line = args[0]
buf_number = int(re.sub(r"^.*?(\d+).*$", r"\1", line))
if kwargs.get("mode", '') == 't':
buf_name = lfEval("bufname(%s)" % buf_number)
lfCmd("tab drop %s" % escSpecial(buf_name))
else:
if lfEval("get(g:, 'Lf_JumpToExistingWindow', 0)") == '1':
buf_name = lfEval("bufname(%s)" % buf_number)
lfCmd("hide drop %s" % escSpecial(buf_name))
else:
lfCmd("hide buffer %d" % buf_number)
def _getDigest(self, line, mode):
"""
specify what part in the line to be processed and highlighted
Args:
mode: 0, return the full path
1, return the name only
2, return the directory name
"""
if not line:
return ''
prefix_len = self._getExplorer().getPrefixLength()
if mode == 0:
return line[prefix_len:]
elif mode == 1:
buf_number = int(re.sub(r"^.*?(\d+).*$", r"\1", line))
basename = getBasename(vim.buffers[buf_number].name)
return basename if basename else "[No Name]"
else:
start_pos = line.find(' "')
return line[start_pos+2 : -1]
def _getDigestStartPos(self, line, mode):
"""
return the start position of the digest returned by _getDigest()
Args:
mode: 0, return the start postion of full path
1, return the start postion of name only
2, return the start postion of directory name
"""
if not line:
return 0
prefix_len = self._getExplorer().getPrefixLength() - webDevIconsStrLen() + webDevIconsBytesLen()
if mode == 0:
return prefix_len
elif mode == 1:
return prefix_len
else:
buf_number = int(re.sub(r"^.*?(\d+).*$", r"\1", line))
basename = getBasename(vim.buffers[buf_number].name)
space_num = self._getExplorer().getMaxBufnameLen() \
- int(lfEval("strdisplaywidth('%s')" % escQuote(basename)))
return prefix_len + lfBytesLen(basename) + space_num + 2
def _createHelp(self):
help = []
help.append('" <CR>/<double-click>/o : open file under cursor')
help.append('" x : open file under cursor in a horizontally split window')
help.append('" v : open file under cursor in a vertically split window')
help.append('" t : open file under cursor in a new tabpage')
help.append('" d : wipe out buffer under cursor')
help.append('" D : delete buffer under cursor')
help.append('" i/<Tab> : switch to input mode')
help.append('" q : quit')
help.append('" <F1> : toggle this help')
help.append('" ---------------------------------------------------------')
return help
def _afterEnter(self):
super(BufExplManager, self)._afterEnter()
winid = None
if self._getInstance().getWinPos() == 'popup':
lfCmd("""call win_execute(%d, 'let matchid = matchadd(''Lf_hl_bufNumber'', ''^\s*\zs\d\+'')')"""
% self._getInstance().getPopupWinId())
id = int(lfEval("matchid"))
self._match_ids.append(id)
lfCmd("""call win_execute(%d, 'let matchid = matchadd(''Lf_hl_bufIndicators'', ''^\s*\d\+\s*\zsu\=\s*[#%%]\=...'')')"""
% self._getInstance().getPopupWinId())
id = int(lfEval("matchid"))
self._match_ids.append(id)
lfCmd("""call win_execute(%d, 'let matchid = matchadd(''Lf_hl_bufModified'', ''^\s*\d\+\s*u\=\s*[#%%]\=.+\s*\zs.*$'')')"""
% self._getInstance().getPopupWinId())
id = int(lfEval("matchid"))
self._match_ids.append(id)
lfCmd("""call win_execute(%d, 'let matchid = matchadd(''Lf_hl_bufNomodifiable'', ''^\s*\d\+\s*u\=\s*[#%%]\=..-\s*\zs.*$'')')"""
% self._getInstance().getPopupWinId())
id = int(lfEval("matchid"))
self._match_ids.append(id)
lfCmd("""call win_execute(%d, 'let matchid = matchadd(''Lf_hl_bufDirname'', '' \zs".*"$'')')"""
% self._getInstance().getPopupWinId())
id = int(lfEval("matchid"))
self._match_ids.append(id)
winid = self._getInstance().getPopupWinId()
else:
id = int(lfEval("matchadd('Lf_hl_bufNumber', '^\s*\zs\d\+')"))
self._match_ids.append(id)
id = int(lfEval("matchadd('Lf_hl_bufIndicators', '^\s*\d\+\s*\zsu\=\s*[#%]\=...')"))
self._match_ids.append(id)
id = int(lfEval("matchadd('Lf_hl_bufModified', '^\s*\d\+\s*u\=\s*[#%]\=.+\s*\zs.*$')"))
self._match_ids.append(id)
id = int(lfEval("matchadd('Lf_hl_bufNomodifiable', '^\s*\d\+\s*u\=\s*[#%]\=..-\s*\zs.*$')"))
self._match_ids.append(id)
id = int(lfEval('''matchadd('Lf_hl_bufDirname', ' \zs".*"$')'''))
self._match_ids.append(id)
# devicons
if lfEval("get(g:, 'Lf_ShowDevIcons', 1)") == '1':
self._match_ids.extend(matchaddDevIconsExtension(r'__icon__\ze\s\+\S\+\.__name__\($\|\s\)', winid))
self._match_ids.extend(matchaddDevIconsExact(r'__icon__\ze\s\+__name__\($\|\s\)', winid))
self._match_ids.extend(matchaddDevIconsDefault(r'__icon__\ze\s\+\S\+\($\|\s\)', winid))
def _beforeExit(self):
super(BufExplManager, self)._beforeExit()
def deleteBuffer(self, wipe=0):
instance = self._getInstance()
if self._inHelpLines():
return
if instance.getWinPos() == 'popup':
lfCmd("call win_execute(%d, 'setlocal modifiable')" % instance.getPopupWinId())
else:
lfCmd("setlocal modifiable")
line = instance._buffer_object[instance.window.cursor[0] - 1]
if len(self._content) > 0:
self._content.remove(line)
self._getInstance().setStlTotal(len(self._content)//self._getUnit())
self._getInstance().setStlResultsCount(len(self._content)//self._getUnit())
buf_number = int(re.sub(r"^.*?(\d+).*$", r"\1", line))
lfCmd("confirm %s %d" % ('bw' if wipe else 'bd', buf_number))
del instance._buffer_object[instance.window.cursor[0] - 1]
if instance.getWinPos() == 'popup':
instance.refreshPopupStatusline()
lfCmd("call win_execute(%d, 'setlocal nomodifiable')" % instance.getPopupWinId())
else:
lfCmd("setlocal nomodifiable")
def _previewInPopup(self, *args, **kwargs):
line = args[0]
buf_number = int(re.sub(r"^.*?(\d+).*$", r"\1", line))
self._createPopupPreview(vim.buffers[buf_number].name, buf_number, 0)
#*****************************************************
# bufExplManager is a singleton
#*****************************************************
bufExplManager = BufExplManager()
__all__ = ['bufExplManager']
|
py | 1a3a429f08b31da66f209b9859ca53996ab20713 | #! /usr/bin/python
# -*- coding: utf-8 -*-
import base64
import gzip
import json
import math
import os
import pickle
import re
import shutil
# import ast
import sys
import tarfile
import time
import zipfile
import cloudpickle
import h5py
import numpy as np
import scipy.io as sio
from six.moves import cPickle
import progressbar
import tensorflow as tf
import tensorlayer as tl
from tensorflow.python.keras.saving import model_config as model_config_lib
from tensorflow.python.platform import gfile
from tensorflow.python.util import serialization
from tensorflow.python.util.tf_export import keras_export
from tensorlayer import logging, nlp, utils, visualize
import cloudpickle
import base64
from tensorflow.python.keras.saving import model_config as model_config_lib
from tensorflow.python.util.tf_export import keras_export
from tensorflow.python.util import serialization
import json
import datetime
# from six.moves import zip
if sys.version_info[0] == 2:
from urllib import urlretrieve
else:
from urllib.request import urlretrieve
# import tensorflow.contrib.eager.python.saver as tfes
# TODO: tf2.0 not stable, cannot import tensorflow.contrib.eager.python.saver
__all__ = [
'assign_weights',
'del_file',
'del_folder',
'download_file_from_google_drive',
'exists_or_mkdir',
'file_exists',
'folder_exists',
'load_and_assign_npz',
'load_and_assign_npz_dict',
'load_ckpt',
'load_cropped_svhn',
'load_file_list',
'load_folder_list',
'load_npy_to_any',
'load_npz',
'maybe_download_and_extract',
'natural_keys',
'npz_to_W_pdf',
'read_file',
'save_any_to_npy',
'save_ckpt',
'save_npz',
'save_npz_dict',
'tf_variables_to_numpy',
'assign_tf_variable',
'save_weights_to_hdf5',
'load_hdf5_to_weights_in_order',
'load_hdf5_to_weights',
'save_hdf5_graph',
'load_hdf5_graph',
# 'net2static_graph',
'static_graph2net',
# 'save_pkl_graph',
# 'load_pkl_graph',
]
def func2str(expr):
b = cloudpickle.dumps(expr)
s = base64.b64encode(b).decode()
return s
def str2func(s):
b = base64.b64decode(s)
expr = cloudpickle.loads(b)
return expr
# def net2static_graph(network):
# saved_file = dict()
# # if network._NameNone is True:
# # saved_file.update({"name": None})
# # else:
# # saved_file.update({"name": network.name})
# # if not isinstance(network.inputs, list):
# # saved_file.update({"inputs": network.inputs._info[0].name})
# # else:
# # saved_inputs = []
# # for saved_input in network.inputs:
# # saved_inputs.append(saved_input._info[0].name)
# # saved_file.update({"inputs": saved_inputs})
# # if not isinstance(network.outputs, list):
# # saved_file.update({"outputs": network.outputs._info[0].name})
# # else:
# # saved_outputs = []
# # for saved_output in network.outputs:
# # saved_outputs.append(saved_output._info[0].name)
# # saved_file.update({"outputs": saved_outputs})
# saved_file.update({"config": network.config})
#
# return saved_file
@keras_export('keras.models.save_model')
def save_keras_model(model):
# f.attrs['keras_model_config'] = json.dumps(
# {
# 'class_name': model.__class__.__name__,
# 'config': model.get_config()
# },
# default=serialization.get_json_type).encode('utf8')
#
# f.flush()
return json.dumps(
{
'class_name': model.__class__.__name__,
'config': model.get_config()
}, default=serialization.get_json_type
).encode('utf8')
@keras_export('keras.models.load_model')
def load_keras_model(model_config):
custom_objects = {}
if model_config is None:
raise ValueError('No model found in config.')
model_config = json.loads(model_config.decode('utf-8'))
model = model_config_lib.model_from_config(model_config, custom_objects=custom_objects)
return model
def save_hdf5_graph(network, filepath='model.hdf5', save_weights=False, customized_data=None):
"""Save the architecture of TL model into a hdf5 file. Support saving model weights.
Parameters
-----------
network : TensorLayer Model.
The network to save.
filepath : str
The name of model file.
save_weights : bool
Whether to save model weights.
customized_data : dict
The user customized meta data.
Examples
--------
>>> # Save the architecture (with parameters)
>>> tl.files.save_hdf5_graph(network, filepath='model.hdf5', save_weights=True)
>>> # Save the architecture (without parameters)
>>> tl.files.save_hdf5_graph(network, filepath='model.hdf5', save_weights=False)
>>> # Load the architecture in another script (no parameters restore)
>>> net = tl.files.load_hdf5_graph(filepath='model.hdf5', load_weights=False)
>>> # Load the architecture in another script (restore parameters)
>>> net = tl.files.load_hdf5_graph(filepath='model.hdf5', load_weights=True)
"""
if network.outputs is None:
raise RuntimeError("save_hdf5_graph not support dynamic mode yet")
logging.info("[*] Saving TL model into {}, saving weights={}".format(filepath, save_weights))
model_config = network.config # net2static_graph(network)
model_config["version_info"]["save_date"] = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc
).isoformat()
model_config_str = str(model_config)
customized_data_str = str(customized_data)
# version_info = {
# "tensorlayer_version": tl.__version__,
# "backend": "tensorflow",
# "backend_version": tf.__version__,
# "training_device": "gpu",
# "save_date": datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
# }
# version_info_str = str(version_info)
with h5py.File(filepath, 'w') as f:
f.attrs["model_config"] = model_config_str.encode('utf8')
f.attrs["customized_data"] = customized_data_str.encode('utf8')
# f.attrs["version_info"] = version_info_str.encode('utf8')
if save_weights:
_save_weights_to_hdf5_group(f, network.all_layers)
f.flush()
logging.info("[*] Saved TL model into {}, saving weights={}".format(filepath, save_weights))
def generate_func(args):
for key in args:
if isinstance(args[key], tuple) and args[key][0] == 'is_Func':
fn = str2func(args[key][1])
args[key] = fn
# if key in ['act']:
# # fn_dict = args[key]
# # module_path = fn_dict['module_path']
# # func_name = fn_dict['func_name']
# # lib = importlib.import_module(module_path)
# # fn = getattr(lib, func_name)
# # args[key] = fn
# fn = str2func(args[key])
# args[key] = fn
# elif key in ['fn']:
# fn = str2func(args[key])
# args[key] = fn
def eval_layer(layer_kwargs):
layer_class = layer_kwargs.pop('class')
args = layer_kwargs['args']
layer_type = args.pop('layer_type')
if layer_type == "normal":
generate_func(args)
return eval('tl.layers.' + layer_class)(**args)
elif layer_type == "layerlist":
ret_layer = []
layers = args["layers"]
for layer_graph in layers:
ret_layer.append(eval_layer(layer_graph))
args['layers'] = ret_layer
return eval('tl.layers.' + layer_class)(**args)
elif layer_type == "modellayer":
M = static_graph2net(args['model'])
args['model'] = M
return eval('tl.layers.' + layer_class)(**args)
elif layer_type == "keraslayer":
M = load_keras_model(args['fn'])
input_shape = args.pop('keras_input_shape')
_ = M(np.random.random(input_shape).astype(np.float32))
args['fn'] = M
args['fn_weights'] = M.trainable_variables
return eval('tl.layers.' + layer_class)(**args)
else:
raise RuntimeError("Unknown layer type.")
def static_graph2net(model_config):
layer_dict = {}
model_name = model_config["name"]
inputs_tensors = model_config["inputs"]
outputs_tensors = model_config["outputs"]
all_args = model_config["model_architecture"]
for idx, layer_kwargs in enumerate(all_args):
layer_class = layer_kwargs["class"] # class of current layer
prev_layers = layer_kwargs.pop("prev_layer") # name of previous layers
net = eval_layer(layer_kwargs)
if layer_class in tl.layers.inputs.__all__:
net = net._nodes[0].out_tensors[0]
if prev_layers is not None:
for prev_layer in prev_layers:
if not isinstance(prev_layer, list):
output = net(layer_dict[prev_layer])
layer_dict[output._info[0].name] = output
else:
list_layers = [layer_dict[layer] for layer in prev_layer]
output = net(list_layers)
layer_dict[output._info[0].name] = output
else:
layer_dict[net._info[0].name] = net
if not isinstance(inputs_tensors, list):
model_inputs = layer_dict[inputs_tensors]
else:
model_inputs = []
for inputs_tensor in inputs_tensors:
model_inputs.append(layer_dict[inputs_tensor])
if not isinstance(outputs_tensors, list):
model_outputs = layer_dict[outputs_tensors]
else:
model_outputs = []
for outputs_tensor in outputs_tensors:
model_outputs.append(layer_dict[outputs_tensor])
from tensorlayer.models import Model
M = Model(inputs=model_inputs, outputs=model_outputs, name=model_name)
logging.info("[*] Load graph finished")
return M
def load_hdf5_graph(filepath='model.hdf5', load_weights=False):
"""Restore TL model archtecture from a a pickle file. Support loading model weights.
Parameters
-----------
filepath : str
The name of model file.
load_weights : bool
Whether to load model weights.
Returns
--------
network : TensorLayer Model.
Examples
--------
- see ``tl.files.save_hdf5_graph``
"""
logging.info("[*] Loading TL model from {}, loading weights={}".format(filepath, load_weights))
f = h5py.File(filepath, 'r')
model_config_str = f.attrs["model_config"].decode('utf8')
model_config = eval(model_config_str)
# version_info_str = f.attrs["version_info"].decode('utf8')
# version_info = eval(version_info_str)
version_info = model_config["version_info"]
backend_version = version_info["backend_version"]
tensorlayer_version = version_info["tensorlayer_version"]
if backend_version != tf.__version__:
logging.warning(
"Saved model uses tensorflow version {}, but now you are using tensorflow version {}".format(
backend_version, tf.__version__
)
)
if tensorlayer_version != tl.__version__:
logging.warning(
"Saved model uses tensorlayer version {}, but now you are using tensorlayer version {}".format(
tensorlayer_version, tl.__version__
)
)
M = static_graph2net(model_config)
if load_weights:
if not ('layer_names' in f.attrs.keys()):
raise RuntimeError("Saved model does not contain weights.")
M.load_weights(filepath=filepath)
f.close()
logging.info("[*] Loaded TL model from {}, loading weights={}".format(filepath, load_weights))
return M
# def load_pkl_graph(name='model.pkl'):
# """Restore TL model archtecture from a a pickle file. No parameters be restored.
#
# Parameters
# -----------
# name : str
# The name of graph file.
#
# Returns
# --------
# network : TensorLayer Model.
#
# Examples
# --------
# >>> # It is better to use load_hdf5_graph
# """
# logging.info("[*] Loading TL graph from {}".format(name))
# with open(name, 'rb') as file:
# saved_file = pickle.load(file)
#
# M = static_graph2net(saved_file)
#
# return M
#
#
# def save_pkl_graph(network, name='model.pkl'):
# """Save the architecture of TL model into a pickle file. No parameters be saved.
#
# Parameters
# -----------
# network : TensorLayer layer
# The network to save.
# name : str
# The name of graph file.
#
# Example
# --------
# >>> # It is better to use save_hdf5_graph
# """
# if network.outputs is None:
# raise AssertionError("save_graph not support dynamic mode yet")
#
# logging.info("[*] Saving TL graph into {}".format(name))
#
# saved_file = net2static_graph(network)
#
# with open(name, 'wb') as file:
# pickle.dump(saved_file, file, protocol=pickle.HIGHEST_PROTOCOL)
# logging.info("[*] Saved graph")
# Load dataset functions
def load_mnist_dataset(shape=(-1, 784), path='data'):
"""Load the original mnist.
Automatically download MNIST dataset and return the training, validation and test set with 50000, 10000 and 10000 digit images respectively.
Parameters
----------
shape : tuple
The shape of digit images (the default is (-1, 784), alternatively (-1, 28, 28, 1)).
path : str
The path that the data is downloaded to.
Returns
-------
X_train, y_train, X_val, y_val, X_test, y_test: tuple
Return splitted training/validation/test set respectively.
Examples
--------
>>> X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1,784), path='datasets')
>>> X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 28, 28, 1))
"""
return _load_mnist_dataset(shape, path, name='mnist', url='http://yann.lecun.com/exdb/mnist/')
def load_fashion_mnist_dataset(shape=(-1, 784), path='data'):
"""Load the fashion mnist.
Automatically download fashion-MNIST dataset and return the training, validation and test set with 50000, 10000 and 10000 fashion images respectively, `examples <http://marubon-ds.blogspot.co.uk/2017/09/fashion-mnist-exploring.html>`__.
Parameters
----------
shape : tuple
The shape of digit images (the default is (-1, 784), alternatively (-1, 28, 28, 1)).
path : str
The path that the data is downloaded to.
Returns
-------
X_train, y_train, X_val, y_val, X_test, y_test: tuple
Return splitted training/validation/test set respectively.
Examples
--------
>>> X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_fashion_mnist_dataset(shape=(-1,784), path='datasets')
>>> X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_fashion_mnist_dataset(shape=(-1, 28, 28, 1))
"""
return _load_mnist_dataset(
shape, path, name='fashion_mnist', url='http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/'
)
def _load_mnist_dataset(shape, path, name='mnist', url='http://yann.lecun.com/exdb/mnist/'):
"""A generic function to load mnist-like dataset.
Parameters:
----------
shape : tuple
The shape of digit images.
path : str
The path that the data is downloaded to.
name : str
The dataset name you want to use(the default is 'mnist').
url : str
The url of dataset(the default is 'http://yann.lecun.com/exdb/mnist/').
"""
path = os.path.join(path, name)
# Define functions for loading mnist-like data's images and labels.
# For convenience, they also download the requested files if needed.
def load_mnist_images(path, filename):
filepath = maybe_download_and_extract(filename, path, url)
logging.info(filepath)
# Read the inputs in Yann LeCun's binary format.
with gzip.open(filepath, 'rb') as f:
data = np.frombuffer(f.read(), np.uint8, offset=16)
# The inputs are vectors now, we reshape them to monochrome 2D images,
# following the shape convention: (examples, channels, rows, columns)
data = data.reshape(shape)
# The inputs come as bytes, we convert them to float32 in range [0,1].
# (Actually to range [0, 255/256], for compatibility to the version
# provided at http://deeplearning.net/data/mnist/mnist.pkl.gz.)
return data / np.float32(256)
def load_mnist_labels(path, filename):
filepath = maybe_download_and_extract(filename, path, url)
# Read the labels in Yann LeCun's binary format.
with gzip.open(filepath, 'rb') as f:
data = np.frombuffer(f.read(), np.uint8, offset=8)
# The labels are vectors of integers now, that's exactly what we want.
return data
# Download and read the training and test set images and labels.
logging.info("Load or Download {0} > {1}".format(name.upper(), path))
X_train = load_mnist_images(path, 'train-images-idx3-ubyte.gz')
y_train = load_mnist_labels(path, 'train-labels-idx1-ubyte.gz')
X_test = load_mnist_images(path, 't10k-images-idx3-ubyte.gz')
y_test = load_mnist_labels(path, 't10k-labels-idx1-ubyte.gz')
# We reserve the last 10000 training examples for validation.
X_train, X_val = X_train[:-10000], X_train[-10000:]
y_train, y_val = y_train[:-10000], y_train[-10000:]
# We just return all the arrays in order, as expected in main().
# (It doesn't matter how we do this as long as we can read them again.)
X_train = np.asarray(X_train, dtype=np.float32)
y_train = np.asarray(y_train, dtype=np.int32)
X_val = np.asarray(X_val, dtype=np.float32)
y_val = np.asarray(y_val, dtype=np.int32)
X_test = np.asarray(X_test, dtype=np.float32)
y_test = np.asarray(y_test, dtype=np.int32)
return X_train, y_train, X_val, y_val, X_test, y_test
def load_cifar10_dataset(shape=(-1, 32, 32, 3), path='data', plotable=False):
"""Load CIFAR-10 dataset.
It consists of 60000 32x32 colour images in 10 classes, with
6000 images per class. There are 50000 training images and 10000 test images.
The dataset is divided into five training batches and one test batch, each with
10000 images. The test batch contains exactly 1000 randomly-selected images from
each class. The training batches contain the remaining images in random order,
but some training batches may contain more images from one class than another.
Between them, the training batches contain exactly 5000 images from each class.
Parameters
----------
shape : tupe
The shape of digit images e.g. (-1, 3, 32, 32) and (-1, 32, 32, 3).
path : str
The path that the data is downloaded to, defaults is ``data/cifar10/``.
plotable : boolean
Whether to plot some image examples, False as default.
Examples
--------
>>> X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3))
References
----------
- `CIFAR website <https://www.cs.toronto.edu/~kriz/cifar.html>`__
- `Data download link <https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz>`__
- `<https://teratail.com/questions/28932>`__
"""
path = os.path.join(path, 'cifar10')
logging.info("Load or Download cifar10 > {}".format(path))
# Helper function to unpickle the data
def unpickle(file):
fp = open(file, 'rb')
if sys.version_info.major == 2:
data = pickle.load(fp)
elif sys.version_info.major == 3:
data = pickle.load(fp, encoding='latin-1')
fp.close()
return data
filename = 'cifar-10-python.tar.gz'
url = 'https://www.cs.toronto.edu/~kriz/'
# Download and uncompress file
maybe_download_and_extract(filename, path, url, extract=True)
# Unpickle file and fill in data
X_train = None
y_train = []
for i in range(1, 6):
data_dic = unpickle(os.path.join(path, 'cifar-10-batches-py/', "data_batch_{}".format(i)))
if i == 1:
X_train = data_dic['data']
else:
X_train = np.vstack((X_train, data_dic['data']))
y_train += data_dic['labels']
test_data_dic = unpickle(os.path.join(path, 'cifar-10-batches-py/', "test_batch"))
X_test = test_data_dic['data']
y_test = np.array(test_data_dic['labels'])
if shape == (-1, 3, 32, 32):
X_test = X_test.reshape(shape)
X_train = X_train.reshape(shape)
elif shape == (-1, 32, 32, 3):
X_test = X_test.reshape(shape, order='F')
X_train = X_train.reshape(shape, order='F')
X_test = np.transpose(X_test, (0, 2, 1, 3))
X_train = np.transpose(X_train, (0, 2, 1, 3))
else:
X_test = X_test.reshape(shape)
X_train = X_train.reshape(shape)
y_train = np.array(y_train)
if plotable:
if sys.platform.startswith('darwin'):
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
logging.info('\nCIFAR-10')
fig = plt.figure(1)
logging.info('Shape of a training image: X_train[0] %s' % X_train[0].shape)
plt.ion() # interactive mode
count = 1
for _ in range(10): # each row
for _ in range(10): # each column
_ = fig.add_subplot(10, 10, count)
if shape == (-1, 3, 32, 32):
# plt.imshow(X_train[count-1], interpolation='nearest')
plt.imshow(np.transpose(X_train[count - 1], (1, 2, 0)), interpolation='nearest')
# plt.imshow(np.transpose(X_train[count-1], (2, 1, 0)), interpolation='nearest')
elif shape == (-1, 32, 32, 3):
plt.imshow(X_train[count - 1], interpolation='nearest')
# plt.imshow(np.transpose(X_train[count-1], (1, 0, 2)), interpolation='nearest')
else:
raise Exception("Do not support the given 'shape' to plot the image examples")
plt.gca().xaxis.set_major_locator(plt.NullLocator()) # 不显示刻度(tick)
plt.gca().yaxis.set_major_locator(plt.NullLocator())
count = count + 1
plt.draw() # interactive mode
plt.pause(3) # interactive mode
logging.info("X_train: %s" % X_train.shape)
logging.info("y_train: %s" % y_train.shape)
logging.info("X_test: %s" % X_test.shape)
logging.info("y_test: %s" % y_test.shape)
X_train = np.asarray(X_train, dtype=np.float32)
X_test = np.asarray(X_test, dtype=np.float32)
y_train = np.asarray(y_train, dtype=np.int32)
y_test = np.asarray(y_test, dtype=np.int32)
return X_train, y_train, X_test, y_test
def load_cropped_svhn(path='data', include_extra=True):
"""Load Cropped SVHN.
The Cropped Street View House Numbers (SVHN) Dataset contains 32x32x3 RGB images.
Digit '1' has label 1, '9' has label 9 and '0' has label 0 (the original dataset uses 10 to represent '0'), see `ufldl website <http://ufldl.stanford.edu/housenumbers/>`__.
Parameters
----------
path : str
The path that the data is downloaded to.
include_extra : boolean
If True (default), add extra images to the training set.
Returns
-------
X_train, y_train, X_test, y_test: tuple
Return splitted training/test set respectively.
Examples
---------
>>> X_train, y_train, X_test, y_test = tl.files.load_cropped_svhn(include_extra=False)
>>> tl.vis.save_images(X_train[0:100], [10, 10], 'svhn.png')
"""
start_time = time.time()
path = os.path.join(path, 'cropped_svhn')
logging.info("Load or Download Cropped SVHN > {} | include extra images: {}".format(path, include_extra))
url = "http://ufldl.stanford.edu/housenumbers/"
np_file = os.path.join(path, "train_32x32.npz")
if file_exists(np_file) is False:
filename = "train_32x32.mat"
filepath = maybe_download_and_extract(filename, path, url)
mat = sio.loadmat(filepath)
X_train = mat['X'] / 255.0 # to [0, 1]
X_train = np.transpose(X_train, (3, 0, 1, 2))
y_train = np.squeeze(mat['y'], axis=1)
y_train[y_train == 10] = 0 # replace 10 to 0
np.savez(np_file, X=X_train, y=y_train)
del_file(filepath)
else:
v = np.load(np_file, allow_pickle=True)
X_train = v['X']
y_train = v['y']
logging.info(" n_train: {}".format(len(y_train)))
np_file = os.path.join(path, "test_32x32.npz")
if file_exists(np_file) is False:
filename = "test_32x32.mat"
filepath = maybe_download_and_extract(filename, path, url)
mat = sio.loadmat(filepath)
X_test = mat['X'] / 255.0
X_test = np.transpose(X_test, (3, 0, 1, 2))
y_test = np.squeeze(mat['y'], axis=1)
y_test[y_test == 10] = 0
np.savez(np_file, X=X_test, y=y_test)
del_file(filepath)
else:
v = np.load(np_file, allow_pickle=True)
X_test = v['X']
y_test = v['y']
logging.info(" n_test: {}".format(len(y_test)))
if include_extra:
logging.info(" getting extra 531131 images, please wait ...")
np_file = os.path.join(path, "extra_32x32.npz")
if file_exists(np_file) is False:
logging.info(" the first time to load extra images will take long time to convert the file format ...")
filename = "extra_32x32.mat"
filepath = maybe_download_and_extract(filename, path, url)
mat = sio.loadmat(filepath)
X_extra = mat['X'] / 255.0
X_extra = np.transpose(X_extra, (3, 0, 1, 2))
y_extra = np.squeeze(mat['y'], axis=1)
y_extra[y_extra == 10] = 0
np.savez(np_file, X=X_extra, y=y_extra)
del_file(filepath)
else:
v = np.load(np_file, allow_pickle=True)
X_extra = v['X']
y_extra = v['y']
# print(X_train.shape, X_extra.shape)
logging.info(" adding n_extra {} to n_train {}".format(len(y_extra), len(y_train)))
t = time.time()
X_train = np.concatenate((X_train, X_extra), 0)
y_train = np.concatenate((y_train, y_extra), 0)
# X_train = np.append(X_train, X_extra, axis=0)
# y_train = np.append(y_train, y_extra, axis=0)
logging.info(" added n_extra {} to n_train {} took {}s".format(len(y_extra), len(y_train), time.time() - t))
else:
logging.info(" no extra images are included")
logging.info(" image size: %s n_train: %d n_test: %d" % (str(X_train.shape[1:4]), len(y_train), len(y_test)))
logging.info(" took: {}s".format(int(time.time() - start_time)))
return X_train, y_train, X_test, y_test
def load_ptb_dataset(path='data'):
"""Load Penn TreeBank (PTB) dataset.
It is used in many LANGUAGE MODELING papers,
including "Empirical Evaluation and Combination of Advanced Language
Modeling Techniques", "Recurrent Neural Network Regularization".
It consists of 929k training words, 73k validation words, and 82k test
words. It has 10k words in its vocabulary.
Parameters
----------
path : str
The path that the data is downloaded to, defaults is ``data/ptb/``.
Returns
--------
train_data, valid_data, test_data : list of int
The training, validating and testing data in integer format.
vocab_size : int
The vocabulary size.
Examples
--------
>>> train_data, valid_data, test_data, vocab_size = tl.files.load_ptb_dataset()
References
---------------
- ``tensorflow.models.rnn.ptb import reader``
- `Manual download <http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz>`__
Notes
------
- If you want to get the raw data, see the source code.
"""
path = os.path.join(path, 'ptb')
logging.info("Load or Download Penn TreeBank (PTB) dataset > {}".format(path))
# Maybe dowload and uncompress tar, or load exsisting files
filename = 'simple-examples.tgz'
url = 'http://www.fit.vutbr.cz/~imikolov/rnnlm/'
maybe_download_and_extract(filename, path, url, extract=True)
data_path = os.path.join(path, 'simple-examples', 'data')
train_path = os.path.join(data_path, "ptb.train.txt")
valid_path = os.path.join(data_path, "ptb.valid.txt")
test_path = os.path.join(data_path, "ptb.test.txt")
word_to_id = nlp.build_vocab(nlp.read_words(train_path))
train_data = nlp.words_to_word_ids(nlp.read_words(train_path), word_to_id)
valid_data = nlp.words_to_word_ids(nlp.read_words(valid_path), word_to_id)
test_data = nlp.words_to_word_ids(nlp.read_words(test_path), word_to_id)
vocab_size = len(word_to_id)
# logging.info(nlp.read_words(train_path)) # ... 'according', 'to', 'mr.', '<unk>', '<eos>']
# logging.info(train_data) # ... 214, 5, 23, 1, 2]
# logging.info(word_to_id) # ... 'beyond': 1295, 'anti-nuclear': 9599, 'trouble': 1520, '<eos>': 2 ... }
# logging.info(vocabulary) # 10000
# exit()
return train_data, valid_data, test_data, vocab_size
def load_matt_mahoney_text8_dataset(path='data'):
"""Load Matt Mahoney's dataset.
Download a text file from Matt Mahoney's website
if not present, and make sure it's the right size.
Extract the first file enclosed in a zip file as a list of words.
This dataset can be used for Word Embedding.
Parameters
----------
path : str
The path that the data is downloaded to, defaults is ``data/mm_test8/``.
Returns
--------
list of str
The raw text data e.g. [.... 'their', 'families', 'who', 'were', 'expelled', 'from', 'jerusalem', ...]
Examples
--------
>>> words = tl.files.load_matt_mahoney_text8_dataset()
>>> print('Data size', len(words))
"""
path = os.path.join(path, 'mm_test8')
logging.info("Load or Download matt_mahoney_text8 Dataset> {}".format(path))
filename = 'text8.zip'
url = 'http://mattmahoney.net/dc/'
maybe_download_and_extract(filename, path, url, expected_bytes=31344016)
with zipfile.ZipFile(os.path.join(path, filename)) as f:
word_list = f.read(f.namelist()[0]).split()
for idx, _ in enumerate(word_list):
word_list[idx] = word_list[idx].decode()
return word_list
def load_imdb_dataset(
path='data', nb_words=None, skip_top=0, maxlen=None, test_split=0.2, seed=113, start_char=1, oov_char=2,
index_from=3
):
"""Load IMDB dataset.
Parameters
----------
path : str
The path that the data is downloaded to, defaults is ``data/imdb/``.
nb_words : int
Number of words to get.
skip_top : int
Top most frequent words to ignore (they will appear as oov_char value in the sequence data).
maxlen : int
Maximum sequence length. Any longer sequence will be truncated.
seed : int
Seed for reproducible data shuffling.
start_char : int
The start of a sequence will be marked with this character. Set to 1 because 0 is usually the padding character.
oov_char : int
Words that were cut out because of the num_words or skip_top limit will be replaced with this character.
index_from : int
Index actual words with this index and higher.
Examples
--------
>>> X_train, y_train, X_test, y_test = tl.files.load_imdb_dataset(
... nb_words=20000, test_split=0.2)
>>> print('X_train.shape', X_train.shape)
(20000,) [[1, 62, 74, ... 1033, 507, 27],[1, 60, 33, ... 13, 1053, 7]..]
>>> print('y_train.shape', y_train.shape)
(20000,) [1 0 0 ..., 1 0 1]
References
-----------
- `Modified from keras. <https://github.com/fchollet/keras/blob/master/keras/datasets/imdb.py>`__
"""
path = os.path.join(path, 'imdb')
filename = "imdb.pkl"
url = 'https://s3.amazonaws.com/text-datasets/'
maybe_download_and_extract(filename, path, url)
if filename.endswith(".gz"):
f = gzip.open(os.path.join(path, filename), 'rb')
else:
f = open(os.path.join(path, filename), 'rb')
X, labels = cPickle.load(f)
f.close()
np.random.seed(seed)
np.random.shuffle(X)
np.random.seed(seed)
np.random.shuffle(labels)
if start_char is not None:
X = [[start_char] + [w + index_from for w in x] for x in X]
elif index_from:
X = [[w + index_from for w in x] for x in X]
if maxlen:
new_X = []
new_labels = []
for x, y in zip(X, labels):
if len(x) < maxlen:
new_X.append(x)
new_labels.append(y)
X = new_X
labels = new_labels
if not X:
raise Exception(
'After filtering for sequences shorter than maxlen=' + str(maxlen) + ', no sequence was kept. '
'Increase maxlen.'
)
if not nb_words:
nb_words = max([max(x) for x in X])
# by convention, use 2 as OOV word
# reserve 'index_from' (=3 by default) characters: 0 (padding), 1 (start), 2 (OOV)
if oov_char is not None:
X = [[oov_char if (w >= nb_words or w < skip_top) else w for w in x] for x in X]
else:
nX = []
for x in X:
nx = []
for w in x:
if (w >= nb_words or w < skip_top):
nx.append(w)
nX.append(nx)
X = nX
X_train = np.array(X[:int(len(X) * (1 - test_split))])
y_train = np.array(labels[:int(len(X) * (1 - test_split))])
X_test = np.array(X[int(len(X) * (1 - test_split)):])
y_test = np.array(labels[int(len(X) * (1 - test_split)):])
return X_train, y_train, X_test, y_test
def load_nietzsche_dataset(path='data'):
"""Load Nietzsche dataset.
Parameters
----------
path : str
The path that the data is downloaded to, defaults is ``data/nietzsche/``.
Returns
--------
str
The content.
Examples
--------
>>> see tutorial_generate_text.py
>>> words = tl.files.load_nietzsche_dataset()
>>> words = basic_clean_str(words)
>>> words = words.split()
"""
logging.info("Load or Download nietzsche dataset > {}".format(path))
path = os.path.join(path, 'nietzsche')
filename = "nietzsche.txt"
url = 'https://s3.amazonaws.com/text-datasets/'
filepath = maybe_download_and_extract(filename, path, url)
with open(filepath, "r") as f:
words = f.read()
return words
def load_wmt_en_fr_dataset(path='data'):
"""Load WMT'15 English-to-French translation dataset.
It will download the data from the WMT'15 Website (10^9-French-English corpus), and the 2013 news test from the same site as development set.
Returns the directories of training data and test data.
Parameters
----------
path : str
The path that the data is downloaded to, defaults is ``data/wmt_en_fr/``.
References
----------
- Code modified from /tensorflow/models/rnn/translation/data_utils.py
Notes
-----
Usually, it will take a long time to download this dataset.
"""
path = os.path.join(path, 'wmt_en_fr')
# URLs for WMT data.
_WMT_ENFR_TRAIN_URL = "http://www.statmt.org/wmt10/"
_WMT_ENFR_DEV_URL = "http://www.statmt.org/wmt15/"
def gunzip_file(gz_path, new_path):
"""Unzips from gz_path into new_path."""
logging.info("Unpacking %s to %s" % (gz_path, new_path))
with gzip.open(gz_path, "rb") as gz_file:
with open(new_path, "wb") as new_file:
for line in gz_file:
new_file.write(line)
def get_wmt_enfr_train_set(path):
"""Download the WMT en-fr training corpus to directory unless it's there."""
filename = "training-giga-fren.tar"
maybe_download_and_extract(filename, path, _WMT_ENFR_TRAIN_URL, extract=True)
train_path = os.path.join(path, "giga-fren.release2.fixed")
gunzip_file(train_path + ".fr.gz", train_path + ".fr")
gunzip_file(train_path + ".en.gz", train_path + ".en")
return train_path
def get_wmt_enfr_dev_set(path):
"""Download the WMT en-fr training corpus to directory unless it's there."""
filename = "dev-v2.tgz"
dev_file = maybe_download_and_extract(filename, path, _WMT_ENFR_DEV_URL, extract=False)
dev_name = "newstest2013"
dev_path = os.path.join(path, "newstest2013")
if not (gfile.Exists(dev_path + ".fr") and gfile.Exists(dev_path + ".en")):
logging.info("Extracting tgz file %s" % dev_file)
with tarfile.open(dev_file, "r:gz") as dev_tar:
fr_dev_file = dev_tar.getmember("dev/" + dev_name + ".fr")
en_dev_file = dev_tar.getmember("dev/" + dev_name + ".en")
fr_dev_file.name = dev_name + ".fr" # Extract without "dev/" prefix.
en_dev_file.name = dev_name + ".en"
dev_tar.extract(fr_dev_file, path)
dev_tar.extract(en_dev_file, path)
return dev_path
logging.info("Load or Download WMT English-to-French translation > {}".format(path))
train_path = get_wmt_enfr_train_set(path)
dev_path = get_wmt_enfr_dev_set(path)
return train_path, dev_path
def load_flickr25k_dataset(tag='sky', path="data", n_threads=50, printable=False):
"""Load Flickr25K dataset.
Returns a list of images by a given tag from Flick25k dataset,
it will download Flickr25k from `the official website <http://press.liacs.nl/mirflickr/mirdownload.html>`__
at the first time you use it.
Parameters
------------
tag : str or None
What images to return.
- If you want to get images with tag, use string like 'dog', 'red', see `Flickr Search <https://www.flickr.com/search/>`__.
- If you want to get all images, set to ``None``.
path : str
The path that the data is downloaded to, defaults is ``data/flickr25k/``.
n_threads : int
The number of thread to read image.
printable : boolean
Whether to print infomation when reading images, default is ``False``.
Examples
-----------
Get images with tag of sky
>>> images = tl.files.load_flickr25k_dataset(tag='sky')
Get all images
>>> images = tl.files.load_flickr25k_dataset(tag=None, n_threads=100, printable=True)
"""
path = os.path.join(path, 'flickr25k')
filename = 'mirflickr25k.zip'
url = 'http://press.liacs.nl/mirflickr/mirflickr25k/'
# download dataset
if folder_exists(os.path.join(path, "mirflickr")) is False:
logging.info("[*] Flickr25k is nonexistent in {}".format(path))
maybe_download_and_extract(filename, path, url, extract=True)
del_file(os.path.join(path, filename))
# return images by the given tag.
# 1. image path list
folder_imgs = os.path.join(path, "mirflickr")
path_imgs = load_file_list(path=folder_imgs, regx='\\.jpg', printable=False)
path_imgs.sort(key=natural_keys)
# 2. tag path list
folder_tags = os.path.join(path, "mirflickr", "meta", "tags")
path_tags = load_file_list(path=folder_tags, regx='\\.txt', printable=False)
path_tags.sort(key=natural_keys)
# 3. select images
if tag is None:
logging.info("[Flickr25k] reading all images")
else:
logging.info("[Flickr25k] reading images with tag: {}".format(tag))
images_list = []
for idx, _v in enumerate(path_tags):
tags = read_file(os.path.join(folder_tags, path_tags[idx])).split('\n')
# logging.info(idx+1, tags)
if tag is None or tag in tags:
images_list.append(path_imgs[idx])
images = visualize.read_images(images_list, folder_imgs, n_threads=n_threads, printable=printable)
return images
def load_flickr1M_dataset(tag='sky', size=10, path="data", n_threads=50, printable=False):
"""Load Flick1M dataset.
Returns a list of images by a given tag from Flickr1M dataset,
it will download Flickr1M from `the official website <http://press.liacs.nl/mirflickr/mirdownload.html>`__
at the first time you use it.
Parameters
------------
tag : str or None
What images to return.
- If you want to get images with tag, use string like 'dog', 'red', see `Flickr Search <https://www.flickr.com/search/>`__.
- If you want to get all images, set to ``None``.
size : int
integer between 1 to 10. 1 means 100k images ... 5 means 500k images, 10 means all 1 million images. Default is 10.
path : str
The path that the data is downloaded to, defaults is ``data/flickr25k/``.
n_threads : int
The number of thread to read image.
printable : boolean
Whether to print infomation when reading images, default is ``False``.
Examples
----------
Use 200k images
>>> images = tl.files.load_flickr1M_dataset(tag='zebra', size=2)
Use 1 Million images
>>> images = tl.files.load_flickr1M_dataset(tag='zebra')
"""
path = os.path.join(path, 'flickr1M')
logging.info("[Flickr1M] using {}% of images = {}".format(size * 10, size * 100000))
images_zip = [
'images0.zip', 'images1.zip', 'images2.zip', 'images3.zip', 'images4.zip', 'images5.zip', 'images6.zip',
'images7.zip', 'images8.zip', 'images9.zip'
]
tag_zip = 'tags.zip'
url = 'http://press.liacs.nl/mirflickr/mirflickr1m/'
# download dataset
for image_zip in images_zip[0:size]:
image_folder = image_zip.split(".")[0]
# logging.info(path+"/"+image_folder)
if folder_exists(os.path.join(path, image_folder)) is False:
# logging.info(image_zip)
logging.info("[Flickr1M] {} is missing in {}".format(image_folder, path))
maybe_download_and_extract(image_zip, path, url, extract=True)
del_file(os.path.join(path, image_zip))
# os.system("mv {} {}".format(os.path.join(path, 'images'), os.path.join(path, image_folder)))
shutil.move(os.path.join(path, 'images'), os.path.join(path, image_folder))
else:
logging.info("[Flickr1M] {} exists in {}".format(image_folder, path))
# download tag
if folder_exists(os.path.join(path, "tags")) is False:
logging.info("[Flickr1M] tag files is nonexistent in {}".format(path))
maybe_download_and_extract(tag_zip, path, url, extract=True)
del_file(os.path.join(path, tag_zip))
else:
logging.info("[Flickr1M] tags exists in {}".format(path))
# 1. image path list
images_list = []
images_folder_list = []
for i in range(0, size):
images_folder_list += load_folder_list(path=os.path.join(path, 'images%d' % i))
images_folder_list.sort(key=lambda s: int(s.split('/')[-1])) # folder/images/ddd
for folder in images_folder_list[0:size * 10]:
tmp = load_file_list(path=folder, regx='\\.jpg', printable=False)
tmp.sort(key=lambda s: int(s.split('.')[-2])) # ddd.jpg
images_list.extend([os.path.join(folder, x) for x in tmp])
# 2. tag path list
tag_list = []
tag_folder_list = load_folder_list(os.path.join(path, "tags"))
# tag_folder_list.sort(key=lambda s: int(s.split("/")[-1])) # folder/images/ddd
tag_folder_list.sort(key=lambda s: int(os.path.basename(s)))
for folder in tag_folder_list[0:size * 10]:
tmp = load_file_list(path=folder, regx='\\.txt', printable=False)
tmp.sort(key=lambda s: int(s.split('.')[-2])) # ddd.txt
tmp = [os.path.join(folder, s) for s in tmp]
tag_list += tmp
# 3. select images
logging.info("[Flickr1M] searching tag: {}".format(tag))
select_images_list = []
for idx, _val in enumerate(tag_list):
tags = read_file(tag_list[idx]).split('\n')
if tag in tags:
select_images_list.append(images_list[idx])
logging.info("[Flickr1M] reading images with tag: {}".format(tag))
images = visualize.read_images(select_images_list, '', n_threads=n_threads, printable=printable)
return images
def load_cyclegan_dataset(filename='summer2winter_yosemite', path='data'):
"""Load images from CycleGAN's database, see `this link <https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets/>`__.
Parameters
------------
filename : str
The dataset you want, see `this link <https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets/>`__.
path : str
The path that the data is downloaded to, defaults is `data/cyclegan`
Examples
---------
>>> im_train_A, im_train_B, im_test_A, im_test_B = load_cyclegan_dataset(filename='summer2winter_yosemite')
"""
path = os.path.join(path, 'cyclegan')
url = 'https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets/'
if folder_exists(os.path.join(path, filename)) is False:
logging.info("[*] {} is nonexistent in {}".format(filename, path))
maybe_download_and_extract(filename + '.zip', path, url, extract=True)
del_file(os.path.join(path, filename + '.zip'))
def load_image_from_folder(path):
path_imgs = load_file_list(path=path, regx='\\.jpg', printable=False)
return visualize.read_images(path_imgs, path=path, n_threads=10, printable=False)
im_train_A = load_image_from_folder(os.path.join(path, filename, "trainA"))
im_train_B = load_image_from_folder(os.path.join(path, filename, "trainB"))
im_test_A = load_image_from_folder(os.path.join(path, filename, "testA"))
im_test_B = load_image_from_folder(os.path.join(path, filename, "testB"))
def if_2d_to_3d(images): # [h, w] --> [h, w, 3]
for i, _v in enumerate(images):
if len(images[i].shape) == 2:
images[i] = images[i][:, :, np.newaxis]
images[i] = np.tile(images[i], (1, 1, 3))
return images
im_train_A = if_2d_to_3d(im_train_A)
im_train_B = if_2d_to_3d(im_train_B)
im_test_A = if_2d_to_3d(im_test_A)
im_test_B = if_2d_to_3d(im_test_B)
return im_train_A, im_train_B, im_test_A, im_test_B
def download_file_from_google_drive(ID, destination):
"""Download file from Google Drive.
See ``tl.files.load_celebA_dataset`` for example.
Parameters
--------------
ID : str
The driver ID.
destination : str
The destination for save file.
"""
try:
from tqdm import tqdm
except ImportError as e:
print(e)
raise ImportError("Module tqdm not found. Please install tqdm via pip or other package managers.")
try:
import requests
except ImportError as e:
print(e)
raise ImportError("Module requests not found. Please install requests via pip or other package managers.")
def save_response_content(response, destination, chunk_size=32 * 1024):
total_size = int(response.headers.get('content-length', 0))
with open(destination, "wb") as f:
for chunk in tqdm(response.iter_content(chunk_size), total=total_size, unit='B', unit_scale=True,
desc=destination):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params={'id': ID}, stream=True)
token = get_confirm_token(response)
if token:
params = {'id': ID, 'confirm': token}
response = session.get(URL, params=params, stream=True)
save_response_content(response, destination)
def load_celebA_dataset(path='data'):
"""Load CelebA dataset
Return a list of image path.
Parameters
-----------
path : str
The path that the data is downloaded to, defaults is ``data/celebA/``.
"""
data_dir = 'celebA'
filename, drive_id = "img_align_celeba.zip", "0B7EVK8r0v71pZjFTYXZWM3FlRnM"
save_path = os.path.join(path, filename)
image_path = os.path.join(path, data_dir)
if os.path.exists(image_path):
logging.info('[*] {} already exists'.format(save_path))
else:
exists_or_mkdir(path)
download_file_from_google_drive(drive_id, save_path)
zip_dir = ''
with zipfile.ZipFile(save_path) as zf:
zip_dir = zf.namelist()[0]
zf.extractall(path)
os.remove(save_path)
os.rename(os.path.join(path, zip_dir), image_path)
data_files = load_file_list(path=image_path, regx='\\.jpg', printable=False)
for i, _v in enumerate(data_files):
data_files[i] = os.path.join(image_path, data_files[i])
return data_files
def load_voc_dataset(path='data', dataset='2012', contain_classes_in_person=False):
"""Pascal VOC 2007/2012 Dataset.
It has 20 objects:
aeroplane, bicycle, bird, boat, bottle, bus, car, cat, chair, cow, diningtable, dog, horse, motorbike, person, pottedplant, sheep, sofa, train, tvmonitor
and additional 3 classes : head, hand, foot for person.
Parameters
-----------
path : str
The path that the data is downloaded to, defaults is ``data/VOC``.
dataset : str
The VOC dataset version, `2012`, `2007`, `2007test` or `2012test`. We usually train model on `2007+2012` and test it on `2007test`.
contain_classes_in_person : boolean
Whether include head, hand and foot annotation, default is False.
Returns
---------
imgs_file_list : list of str
Full paths of all images.
imgs_semseg_file_list : list of str
Full paths of all maps for semantic segmentation. Note that not all images have this map!
imgs_insseg_file_list : list of str
Full paths of all maps for instance segmentation. Note that not all images have this map!
imgs_ann_file_list : list of str
Full paths of all annotations for bounding box and object class, all images have this annotations.
classes : list of str
Classes in order.
classes_in_person : list of str
Classes in person.
classes_dict : dictionary
Class label to integer.
n_objs_list : list of int
Number of objects in all images in ``imgs_file_list`` in order.
objs_info_list : list of str
Darknet format for the annotation of all images in ``imgs_file_list`` in order. ``[class_id x_centre y_centre width height]`` in ratio format.
objs_info_dicts : dictionary
The annotation of all images in ``imgs_file_list``, ``{imgs_file_list : dictionary for annotation}``,
format from `TensorFlow/Models/object-detection <https://github.com/tensorflow/models/blob/master/object_detection/create_pascal_tf_record.py>`__.
Examples
----------
>>> imgs_file_list, imgs_semseg_file_list, imgs_insseg_file_list, imgs_ann_file_list,
>>> classes, classes_in_person, classes_dict,
>>> n_objs_list, objs_info_list, objs_info_dicts = tl.files.load_voc_dataset(dataset="2012", contain_classes_in_person=False)
>>> idx = 26
>>> print(classes)
['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
>>> print(classes_dict)
{'sheep': 16, 'horse': 12, 'bicycle': 1, 'bottle': 4, 'cow': 9, 'sofa': 17, 'car': 6, 'dog': 11, 'cat': 7, 'person': 14, 'train': 18, 'diningtable': 10, 'aeroplane': 0, 'bus': 5, 'pottedplant': 15, 'tvmonitor': 19, 'chair': 8, 'bird': 2, 'boat': 3, 'motorbike': 13}
>>> print(imgs_file_list[idx])
data/VOC/VOC2012/JPEGImages/2007_000423.jpg
>>> print(n_objs_list[idx])
2
>>> print(imgs_ann_file_list[idx])
data/VOC/VOC2012/Annotations/2007_000423.xml
>>> print(objs_info_list[idx])
14 0.173 0.461333333333 0.142 0.496
14 0.828 0.542666666667 0.188 0.594666666667
>>> ann = tl.prepro.parse_darknet_ann_str_to_list(objs_info_list[idx])
>>> print(ann)
[[14, 0.173, 0.461333333333, 0.142, 0.496], [14, 0.828, 0.542666666667, 0.188, 0.594666666667]]
>>> c, b = tl.prepro.parse_darknet_ann_list_to_cls_box(ann)
>>> print(c, b)
[14, 14] [[0.173, 0.461333333333, 0.142, 0.496], [0.828, 0.542666666667, 0.188, 0.594666666667]]
References
-------------
- `Pascal VOC2012 Website <http://host.robots.ox.ac.uk/pascal/VOC/voc2012/#devkit>`__.
- `Pascal VOC2007 Website <http://host.robots.ox.ac.uk/pascal/VOC/voc2007/>`__.
"""
import xml.etree.ElementTree as ET
try:
import lxml.etree as etree
except ImportError as e:
print(e)
raise ImportError("Module lxml not found. Please install lxml via pip or other package managers.")
path = os.path.join(path, 'VOC')
def _recursive_parse_xml_to_dict(xml):
"""Recursively parses XML contents to python dict.
We assume that `object` tags are the only ones that can appear
multiple times at the same level of a tree.
Args:
xml: xml tree obtained by parsing XML file contents using lxml.etree
Returns:
Python dictionary holding XML contents.
"""
if not xml:
# if xml is not None:
return {xml.tag: xml.text}
result = {}
for child in xml:
child_result = _recursive_parse_xml_to_dict(child)
if child.tag != 'object':
result[child.tag] = child_result[child.tag]
else:
if child.tag not in result:
result[child.tag] = []
result[child.tag].append(child_result[child.tag])
return {xml.tag: result}
if dataset == "2012":
url = "http://host.robots.ox.ac.uk/pascal/VOC/voc2012/"
tar_filename = "VOCtrainval_11-May-2012.tar"
extracted_filename = "VOC2012" # "VOCdevkit/VOC2012"
logging.info(" [============= VOC 2012 =============]")
elif dataset == "2012test":
extracted_filename = "VOC2012test" # "VOCdevkit/VOC2012"
logging.info(" [============= VOC 2012 Test Set =============]")
logging.info(
" \nAuthor: 2012test only have person annotation, so 2007test is highly recommended for testing !\n"
)
time.sleep(3)
if os.path.isdir(os.path.join(path, extracted_filename)) is False:
logging.info("For VOC 2012 Test data - online registration required")
logging.info(
" Please download VOC2012test.tar from: \n register: http://host.robots.ox.ac.uk:8080 \n voc2012 : http://host.robots.ox.ac.uk:8080/eval/challenges/voc2012/ \ndownload: http://host.robots.ox.ac.uk:8080/eval/downloads/VOC2012test.tar"
)
logging.info(" unzip VOC2012test.tar,rename the folder to VOC2012test and put it into %s" % path)
exit()
# # http://host.robots.ox.ac.uk:8080/eval/downloads/VOC2012test.tar
# url = "http://host.robots.ox.ac.uk:8080/eval/downloads/"
# tar_filename = "VOC2012test.tar"
elif dataset == "2007":
url = "http://host.robots.ox.ac.uk/pascal/VOC/voc2007/"
tar_filename = "VOCtrainval_06-Nov-2007.tar"
extracted_filename = "VOC2007"
logging.info(" [============= VOC 2007 =============]")
elif dataset == "2007test":
# http://host.robots.ox.ac.uk/pascal/VOC/voc2007/index.html#testdata
# http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar
url = "http://host.robots.ox.ac.uk/pascal/VOC/voc2007/"
tar_filename = "VOCtest_06-Nov-2007.tar"
extracted_filename = "VOC2007test"
logging.info(" [============= VOC 2007 Test Set =============]")
else:
raise Exception("Please set the dataset aug to 2012, 2012test or 2007.")
# download dataset
if dataset != "2012test":
_platform = sys.platform
if folder_exists(os.path.join(path, extracted_filename)) is False:
logging.info("[VOC] {} is nonexistent in {}".format(extracted_filename, path))
maybe_download_and_extract(tar_filename, path, url, extract=True)
del_file(os.path.join(path, tar_filename))
if dataset == "2012":
if _platform == "win32":
os.system("mv {}\VOCdevkit\VOC2012 {}\VOC2012".format(path, path))
else:
os.system("mv {}/VOCdevkit/VOC2012 {}/VOC2012".format(path, path))
elif dataset == "2007":
if _platform == "win32":
os.system("mv {}\VOCdevkit\VOC2007 {}\VOC2007".format(path, path))
else:
os.system("mv {}/VOCdevkit/VOC2007 {}/VOC2007".format(path, path))
elif dataset == "2007test":
if _platform == "win32":
os.system("mv {}\VOCdevkit\VOC2007 {}\VOC2007test".format(path, path))
else:
os.system("mv {}/VOCdevkit/VOC2007 {}/VOC2007test".format(path, path))
del_folder(os.path.join(path, 'VOCdevkit'))
# object classes(labels) NOTE: YOU CAN CUSTOMIZE THIS LIST
classes = [
"aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog",
"horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"
]
if contain_classes_in_person:
classes_in_person = ["head", "hand", "foot"]
else:
classes_in_person = []
classes += classes_in_person # use extra 3 classes for person
classes_dict = utils.list_string_to_dict(classes)
logging.info("[VOC] object classes {}".format(classes_dict))
# 1. image path list
# folder_imgs = path+"/"+extracted_filename+"/JPEGImages/"
folder_imgs = os.path.join(path, extracted_filename, "JPEGImages")
imgs_file_list = load_file_list(path=folder_imgs, regx='\\.jpg', printable=False)
logging.info("[VOC] {} images found".format(len(imgs_file_list)))
imgs_file_list.sort(
key=lambda s: int(s.replace('.', ' ').replace('_', '').split(' ')[-2])
) # 2007_000027.jpg --> 2007000027
imgs_file_list = [os.path.join(folder_imgs, s) for s in imgs_file_list]
# logging.info('IM',imgs_file_list[0::3333], imgs_file_list[-1])
if dataset != "2012test":
# ======== 2. semantic segmentation maps path list
# folder_semseg = path+"/"+extracted_filename+"/SegmentationClass/"
folder_semseg = os.path.join(path, extracted_filename, "SegmentationClass")
imgs_semseg_file_list = load_file_list(path=folder_semseg, regx='\\.png', printable=False)
logging.info("[VOC] {} maps for semantic segmentation found".format(len(imgs_semseg_file_list)))
imgs_semseg_file_list.sort(
key=lambda s: int(s.replace('.', ' ').replace('_', '').split(' ')[-2])
) # 2007_000032.png --> 2007000032
imgs_semseg_file_list = [os.path.join(folder_semseg, s) for s in imgs_semseg_file_list]
# logging.info('Semantic Seg IM',imgs_semseg_file_list[0::333], imgs_semseg_file_list[-1])
# ======== 3. instance segmentation maps path list
# folder_insseg = path+"/"+extracted_filename+"/SegmentationObject/"
folder_insseg = os.path.join(path, extracted_filename, "SegmentationObject")
imgs_insseg_file_list = load_file_list(path=folder_insseg, regx='\\.png', printable=False)
logging.info("[VOC] {} maps for instance segmentation found".format(len(imgs_semseg_file_list)))
imgs_insseg_file_list.sort(
key=lambda s: int(s.replace('.', ' ').replace('_', '').split(' ')[-2])
) # 2007_000032.png --> 2007000032
imgs_insseg_file_list = [os.path.join(folder_insseg, s) for s in imgs_insseg_file_list]
# logging.info('Instance Seg IM',imgs_insseg_file_list[0::333], imgs_insseg_file_list[-1])
else:
imgs_semseg_file_list = []
imgs_insseg_file_list = []
# 4. annotations for bounding box and object class
# folder_ann = path+"/"+extracted_filename+"/Annotations/"
folder_ann = os.path.join(path, extracted_filename, "Annotations")
imgs_ann_file_list = load_file_list(path=folder_ann, regx='\\.xml', printable=False)
logging.info(
"[VOC] {} XML annotation files for bounding box and object class found".format(len(imgs_ann_file_list))
)
imgs_ann_file_list.sort(
key=lambda s: int(s.replace('.', ' ').replace('_', '').split(' ')[-2])
) # 2007_000027.xml --> 2007000027
imgs_ann_file_list = [os.path.join(folder_ann, s) for s in imgs_ann_file_list]
# logging.info('ANN',imgs_ann_file_list[0::3333], imgs_ann_file_list[-1])
if dataset == "2012test": # remove unused images in JPEG folder
imgs_file_list_new = []
for ann in imgs_ann_file_list:
ann = os.path.split(ann)[-1].split('.')[0]
for im in imgs_file_list:
if ann in im:
imgs_file_list_new.append(im)
break
imgs_file_list = imgs_file_list_new
logging.info("[VOC] keep %d images" % len(imgs_file_list_new))
# parse XML annotations
def convert(size, box):
dw = 1. / size[0]
dh = 1. / size[1]
x = (box[0] + box[1]) / 2.0
y = (box[2] + box[3]) / 2.0
w = box[1] - box[0]
h = box[3] - box[2]
x = x * dw
w = w * dw
y = y * dh
h = h * dh
return x, y, w, h
def convert_annotation(file_name):
"""Given VOC2012 XML Annotations, returns number of objects and info."""
in_file = open(file_name)
out_file = ""
tree = ET.parse(in_file)
root = tree.getroot()
size = root.find('size')
w = int(size.find('width').text)
h = int(size.find('height').text)
n_objs = 0
for obj in root.iter('object'):
if dataset != "2012test":
difficult = obj.find('difficult').text
cls = obj.find('name').text
if cls not in classes or int(difficult) == 1:
continue
else:
cls = obj.find('name').text
if cls not in classes:
continue
cls_id = classes.index(cls)
xmlbox = obj.find('bndbox')
b = (
float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text),
float(xmlbox.find('ymax').text)
)
bb = convert((w, h), b)
out_file += str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n'
n_objs += 1
if cls in "person":
for part in obj.iter('part'):
cls = part.find('name').text
if cls not in classes_in_person:
continue
cls_id = classes.index(cls)
xmlbox = part.find('bndbox')
b = (
float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text),
float(xmlbox.find('ymin').text), float(xmlbox.find('ymax').text)
)
bb = convert((w, h), b)
# out_file.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n')
out_file += str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n'
n_objs += 1
in_file.close()
return n_objs, out_file
logging.info("[VOC] Parsing xml annotations files")
n_objs_list = []
objs_info_list = [] # Darknet Format list of string
objs_info_dicts = {}
for idx, ann_file in enumerate(imgs_ann_file_list):
n_objs, objs_info = convert_annotation(ann_file)
n_objs_list.append(n_objs)
objs_info_list.append(objs_info)
with tf.io.gfile.GFile(ann_file, 'r') as fid:
xml_str = fid.read()
xml = etree.fromstring(xml_str)
data = _recursive_parse_xml_to_dict(xml)['annotation']
objs_info_dicts.update({imgs_file_list[idx]: data})
return imgs_file_list, imgs_semseg_file_list, imgs_insseg_file_list, imgs_ann_file_list, classes, classes_in_person, classes_dict, n_objs_list, objs_info_list, objs_info_dicts
def load_mpii_pose_dataset(path='data', is_16_pos_only=False):
"""Load MPII Human Pose Dataset.
Parameters
-----------
path : str
The path that the data is downloaded to.
is_16_pos_only : boolean
If True, only return the peoples contain 16 pose keypoints. (Usually be used for single person pose estimation)
Returns
----------
img_train_list : list of str
The image directories of training data.
ann_train_list : list of dict
The annotations of training data.
img_test_list : list of str
The image directories of testing data.
ann_test_list : list of dict
The annotations of testing data.
Examples
--------
>>> import pprint
>>> import tensorlayer as tl
>>> img_train_list, ann_train_list, img_test_list, ann_test_list = tl.files.load_mpii_pose_dataset()
>>> image = tl.vis.read_image(img_train_list[0])
>>> tl.vis.draw_mpii_pose_to_image(image, ann_train_list[0], 'image.png')
>>> pprint.pprint(ann_train_list[0])
References
-----------
- `MPII Human Pose Dataset. CVPR 14 <http://human-pose.mpi-inf.mpg.de>`__
- `MPII Human Pose Models. CVPR 16 <http://pose.mpi-inf.mpg.de>`__
- `MPII Human Shape, Poselet Conditioned Pictorial Structures and etc <http://pose.mpi-inf.mpg.de/#related>`__
- `MPII Keyponts and ID <http://human-pose.mpi-inf.mpg.de/#download>`__
"""
path = os.path.join(path, 'mpii_human_pose')
logging.info("Load or Download MPII Human Pose > {}".format(path))
# annotation
url = "http://datasets.d2.mpi-inf.mpg.de/andriluka14cvpr/"
tar_filename = "mpii_human_pose_v1_u12_2.zip"
extracted_filename = "mpii_human_pose_v1_u12_2"
if folder_exists(os.path.join(path, extracted_filename)) is False:
logging.info("[MPII] (annotation) {} is nonexistent in {}".format(extracted_filename, path))
maybe_download_and_extract(tar_filename, path, url, extract=True)
del_file(os.path.join(path, tar_filename))
# images
url = "http://datasets.d2.mpi-inf.mpg.de/andriluka14cvpr/"
tar_filename = "mpii_human_pose_v1.tar.gz"
extracted_filename2 = "images"
if folder_exists(os.path.join(path, extracted_filename2)) is False:
logging.info("[MPII] (images) {} is nonexistent in {}".format(extracted_filename, path))
maybe_download_and_extract(tar_filename, path, url, extract=True)
del_file(os.path.join(path, tar_filename))
# parse annotation, format see http://human-pose.mpi-inf.mpg.de/#download
logging.info("reading annotations from mat file ...")
# mat = sio.loadmat(os.path.join(path, extracted_filename, "mpii_human_pose_v1_u12_1.mat"))
# def fix_wrong_joints(joint): # https://github.com/mitmul/deeppose/blob/master/datasets/mpii_dataset.py
# if '12' in joint and '13' in joint and '2' in joint and '3' in joint:
# if ((joint['12'][0] < joint['13'][0]) and
# (joint['3'][0] < joint['2'][0])):
# joint['2'], joint['3'] = joint['3'], joint['2']
# if ((joint['12'][0] > joint['13'][0]) and
# (joint['3'][0] > joint['2'][0])):
# joint['2'], joint['3'] = joint['3'], joint['2']
# return joint
ann_train_list = []
ann_test_list = []
img_train_list = []
img_test_list = []
def save_joints():
# joint_data_fn = os.path.join(path, 'data.json')
# fp = open(joint_data_fn, 'w')
mat = sio.loadmat(os.path.join(path, extracted_filename, "mpii_human_pose_v1_u12_1.mat"))
for _, (anno, train_flag) in enumerate( # all images
zip(mat['RELEASE']['annolist'][0, 0][0], mat['RELEASE']['img_train'][0, 0][0])):
img_fn = anno['image']['name'][0, 0][0]
train_flag = int(train_flag)
# print(i, img_fn, train_flag) # DEBUG print all images
if train_flag:
img_train_list.append(img_fn)
ann_train_list.append([])
else:
img_test_list.append(img_fn)
ann_test_list.append([])
head_rect = []
if 'x1' in str(anno['annorect'].dtype):
head_rect = zip(
[x1[0, 0] for x1 in anno['annorect']['x1'][0]], [y1[0, 0] for y1 in anno['annorect']['y1'][0]],
[x2[0, 0] for x2 in anno['annorect']['x2'][0]], [y2[0, 0] for y2 in anno['annorect']['y2'][0]]
)
else:
head_rect = [] # TODO
if 'annopoints' in str(anno['annorect'].dtype):
annopoints = anno['annorect']['annopoints'][0]
head_x1s = anno['annorect']['x1'][0]
head_y1s = anno['annorect']['y1'][0]
head_x2s = anno['annorect']['x2'][0]
head_y2s = anno['annorect']['y2'][0]
for annopoint, head_x1, head_y1, head_x2, head_y2 in zip(annopoints, head_x1s, head_y1s, head_x2s,
head_y2s):
# if annopoint != []:
# if len(annopoint) != 0:
if annopoint.size:
head_rect = [
float(head_x1[0, 0]),
float(head_y1[0, 0]),
float(head_x2[0, 0]),
float(head_y2[0, 0])
]
# joint coordinates
annopoint = annopoint['point'][0, 0]
j_id = [str(j_i[0, 0]) for j_i in annopoint['id'][0]]
x = [x[0, 0] for x in annopoint['x'][0]]
y = [y[0, 0] for y in annopoint['y'][0]]
joint_pos = {}
for _j_id, (_x, _y) in zip(j_id, zip(x, y)):
joint_pos[int(_j_id)] = [float(_x), float(_y)]
# joint_pos = fix_wrong_joints(joint_pos)
# visibility list
if 'is_visible' in str(annopoint.dtype):
vis = [v[0] if v.size > 0 else [0] for v in annopoint['is_visible'][0]]
vis = dict([(k, int(v[0])) if len(v) > 0 else v for k, v in zip(j_id, vis)])
else:
vis = None
# if len(joint_pos) == 16:
if ((is_16_pos_only ==True) and (len(joint_pos) == 16)) or (is_16_pos_only == False):
# only use image with 16 key points / or use all
data = {
'filename': img_fn,
'train': train_flag,
'head_rect': head_rect,
'is_visible': vis,
'joint_pos': joint_pos
}
# print(json.dumps(data), file=fp) # py3
if train_flag:
ann_train_list[-1].append(data)
else:
ann_test_list[-1].append(data)
# def write_line(datum, fp):
# joints = sorted([[int(k), v] for k, v in datum['joint_pos'].items()])
# joints = np.array([j for i, j in joints]).flatten()
#
# out = [datum['filename']]
# out.extend(joints)
# out = [str(o) for o in out]
# out = ','.join(out)
#
# print(out, file=fp)
# def split_train_test():
# # fp_test = open('data/mpii/test_joints.csv', 'w')
# fp_test = open(os.path.join(path, 'test_joints.csv'), 'w')
# # fp_train = open('data/mpii/train_joints.csv', 'w')
# fp_train = open(os.path.join(path, 'train_joints.csv'), 'w')
# # all_data = open('data/mpii/data.json').readlines()
# all_data = open(os.path.join(path, 'data.json')).readlines()
# N = len(all_data)
# N_test = int(N * 0.1)
# N_train = N - N_test
#
# print('N:{}'.format(N))
# print('N_train:{}'.format(N_train))
# print('N_test:{}'.format(N_test))
#
# np.random.seed(1701)
# perm = np.random.permutation(N)
# test_indices = perm[:N_test]
# train_indices = perm[N_test:]
#
# print('train_indices:{}'.format(len(train_indices)))
# print('test_indices:{}'.format(len(test_indices)))
#
# for i in train_indices:
# datum = json.loads(all_data[i].strip())
# write_line(datum, fp_train)
#
# for i in test_indices:
# datum = json.loads(all_data[i].strip())
# write_line(datum, fp_test)
save_joints()
# split_train_test() #
# read images dir
logging.info("reading images list ...")
img_dir = os.path.join(path, extracted_filename2)
_img_list = load_file_list(path=os.path.join(path, extracted_filename2), regx='\\.jpg', printable=False)
# ann_list = json.load(open(os.path.join(path, 'data.json')))
for i, im in enumerate(img_train_list):
if im not in _img_list:
print('missing training image {} in {} (remove from img(ann)_train_list)'.format(im, img_dir))
# img_train_list.remove(im)
del img_train_list[i]
del ann_train_list[i]
for i, im in enumerate(img_test_list):
if im not in _img_list:
print('missing testing image {} in {} (remove from img(ann)_test_list)'.format(im, img_dir))
# img_test_list.remove(im)
del img_train_list[i]
del ann_train_list[i]
# check annotation and images
n_train_images = len(img_train_list)
n_test_images = len(img_test_list)
n_images = n_train_images + n_test_images
logging.info("n_images: {} n_train_images: {} n_test_images: {}".format(n_images, n_train_images, n_test_images))
n_train_ann = len(ann_train_list)
n_test_ann = len(ann_test_list)
n_ann = n_train_ann + n_test_ann
logging.info("n_ann: {} n_train_ann: {} n_test_ann: {}".format(n_ann, n_train_ann, n_test_ann))
n_train_people = len(sum(ann_train_list, []))
n_test_people = len(sum(ann_test_list, []))
n_people = n_train_people + n_test_people
logging.info("n_people: {} n_train_people: {} n_test_people: {}".format(n_people, n_train_people, n_test_people))
# add path to all image file name
for i, value in enumerate(img_train_list):
img_train_list[i] = os.path.join(img_dir, value)
for i, value in enumerate(img_test_list):
img_test_list[i] = os.path.join(img_dir, value)
return img_train_list, ann_train_list, img_test_list, ann_test_list
def save_npz(save_list=None, name='model.npz'):
"""Input parameters and the file name, save parameters into .npz file. Use tl.utils.load_npz() to restore.
Parameters
----------
save_list : list of tensor
A list of parameters (tensor) to be saved.
name : str
The name of the `.npz` file.
Examples
--------
Save model to npz
>>> tl.files.save_npz(network.all_weights, name='model.npz')
Load model from npz (Method 1)
>>> load_params = tl.files.load_npz(name='model.npz')
>>> tl.files.assign_weights(load_params, network)
Load model from npz (Method 2)
>>> tl.files.load_and_assign_npz(name='model.npz', network=network)
References
----------
`Saving dictionary using numpy <http://stackoverflow.com/questions/22315595/saving-dictionary-of-header-information-using-numpy-savez>`__
"""
logging.info("[*] Saving TL weights into %s" % name)
if save_list is None:
save_list = []
save_list_var = tf_variables_to_numpy(save_list)
np.savez(name, params=save_list_var)
save_list_var = None
del save_list_var
logging.info("[*] Saved")
def load_npz(path='', name='model.npz'):
"""Load the parameters of a Model saved by tl.files.save_npz().
Parameters
----------
path : str
Folder path to `.npz` file.
name : str
The name of the `.npz` file.
Returns
--------
list of array
A list of parameters in order.
Examples
--------
- See ``tl.files.save_npz``
References
----------
- `Saving dictionary using numpy <http://stackoverflow.com/questions/22315595/saving-dictionary-of-header-information-using-numpy-savez>`__
"""
d = np.load(os.path.join(path, name), allow_pickle=True)
return d['params']
def assign_params(**kwargs):
raise Exception("please change assign_params --> assign_weights")
def assign_weights(weights, network):
"""Assign the given parameters to the TensorLayer network.
Parameters
----------
weights : list of array
A list of model weights (array) in order.
network : :class:`Layer`
The network to be assigned.
Returns
--------
1) list of operations if in graph mode
A list of tf ops in order that assign weights. Support sess.run(ops) manually.
2) list of tf variables if in eager mode
A list of tf variables (assigned weights) in order.
Examples
--------
References
----------
- `Assign value to a TensorFlow variable <http://stackoverflow.com/questions/34220532/how-to-assign-value-to-a-tensorflow-variable>`__
"""
ops = []
for idx, param in enumerate(weights):
ops.append(network.all_weights[idx].assign(param))
return ops
def load_and_assign_npz(name=None, network=None):
"""Load model from npz and assign to a network.
Parameters
-------------
name : str
The name of the `.npz` file.
network : :class:`Model`
The network to be assigned.
Examples
--------
- See ``tl.files.save_npz``
"""
if network is None:
raise ValueError("network is None.")
if not os.path.exists(name):
logging.error("file {} doesn't exist.".format(name))
return False
else:
weights = load_npz(name=name)
assign_weights(weights, network)
logging.info("[*] Load {} SUCCESS!".format(name))
def save_npz_dict(save_list=None, name='model.npz'):
"""Input parameters and the file name, save parameters as a dictionary into .npz file.
Use ``tl.files.load_and_assign_npz_dict()`` to restore.
Parameters
----------
save_list : list of parameters
A list of parameters (tensor) to be saved.
name : str
The name of the `.npz` file.
"""
if save_list is None:
save_list = []
save_list_names = [tensor.name for tensor in save_list]
save_list_var = tf_variables_to_numpy(save_list)
save_var_dict = {save_list_names[idx]: val for idx, val in enumerate(save_list_var)}
np.savez(name, **save_var_dict)
save_list_var = None
save_var_dict = None
del save_list_var
del save_var_dict
logging.info("[*] Model saved in npz_dict %s" % name)
def load_and_assign_npz_dict(name='model.npz', network=None, skip=False):
"""Restore the parameters saved by ``tl.files.save_npz_dict()``.
Parameters
-------------
name : str
The name of the `.npz` file.
network : :class:`Model`
The network to be assigned.
skip : boolean
If 'skip' == True, loaded weights whose name is not found in network's weights will be skipped.
If 'skip' is False, error will be raised when mismatch is found. Default False.
"""
if not os.path.exists(name):
logging.error("file {} doesn't exist.".format(name))
return False
weights = np.load(name, allow_pickle=True)
if len(weights.keys()) != len(set(weights.keys())):
raise Exception("Duplication in model npz_dict %s" % name)
net_weights_name = [w.name for w in network.all_weights]
for key in weights.keys():
if key not in net_weights_name:
if skip:
logging.warning("Weights named '%s' not found in network. Skip it." % key)
else:
raise RuntimeError(
"Weights named '%s' not found in network. Hint: set argument skip=Ture "
"if you want to skip redundant or mismatch weights." % key
)
else:
assign_tf_variable(network.all_weights[net_weights_name.index(key)], weights[key])
logging.info("[*] Model restored from npz_dict %s" % name)
def save_ckpt(mode_name='model.ckpt', save_dir='checkpoint', var_list=None, global_step=None, printable=False):
"""Save parameters into `ckpt` file.
Parameters
------------
mode_name : str
The name of the model, default is ``model.ckpt``.
save_dir : str
The path / file directory to the `ckpt`, default is ``checkpoint``.
var_list : list of tensor
The parameters / variables (tensor) to be saved. If empty, save all global variables (default).
global_step : int or None
Step number.
printable : boolean
Whether to print all parameters information.
See Also
--------
load_ckpt
"""
if var_list is None:
if sess is None:
# FIXME: not sure whether global variables can be accessed in eager mode
raise ValueError(
"If var_list is None, sess must be specified. "
"In eager mode, can not access global variables easily. "
)
var_list = []
ckpt_file = os.path.join(save_dir, mode_name)
if var_list == []:
var_list = tf.global_variables()
logging.info("[*] save %s n_weights: %d" % (ckpt_file, len(var_list)))
if printable:
for idx, v in enumerate(var_list):
logging.info(" param {:3}: {:15} {}".format(idx, v.name, str(v.get_shape())))
if sess:
# graph mode
saver = tf.train.Saver(var_list)
saver.save(sess, ckpt_file, global_step=global_step)
else:
# eager mode
# saver = tfes.Saver(var_list)
# saver.save(ckpt_file, global_step=global_step)
# TODO: tf2.0 not stable, cannot import tensorflow.contrib.eager.python.saver
pass
def load_ckpt(sess=None, mode_name='model.ckpt', save_dir='checkpoint', var_list=None, is_latest=True, printable=False):
"""Load parameters from `ckpt` file.
Parameters
------------
sess : Session
TensorFlow Session.
mode_name : str
The name of the model, default is ``model.ckpt``.
save_dir : str
The path / file directory to the `ckpt`, default is ``checkpoint``.
var_list : list of tensor
The parameters / variables (tensor) to be saved. If empty, save all global variables (default).
is_latest : boolean
Whether to load the latest `ckpt`, if False, load the `ckpt` with the name of ```mode_name``.
printable : boolean
Whether to print all parameters information.
Examples
----------
- Save all global parameters.
>>> tl.files.save_ckpt(sess=sess, mode_name='model.ckpt', save_dir='model', printable=True)
- Save specific parameters.
>>> tl.files.save_ckpt(sess=sess, mode_name='model.ckpt', var_list=net.all_params, save_dir='model', printable=True)
- Load latest ckpt.
>>> tl.files.load_ckpt(sess=sess, var_list=net.all_params, save_dir='model', printable=True)
- Load specific ckpt.
>>> tl.files.load_ckpt(sess=sess, mode_name='model.ckpt', var_list=net.all_params, save_dir='model', is_latest=False, printable=True)
"""
# if sess is None:
# raise ValueError("session is None.")
if var_list is None:
if sess is None:
# FIXME: not sure whether global variables can be accessed in eager mode
raise ValueError(
"If var_list is None, sess must be specified. "
"In eager mode, can not access global variables easily. "
)
var_list = []
if is_latest:
ckpt_file = tf.train.latest_checkpoint(save_dir)
else:
ckpt_file = os.path.join(save_dir, mode_name)
if not var_list:
var_list = tf.global_variables()
logging.info("[*] load %s n_weights: %d" % (ckpt_file, len(var_list)))
if printable:
for idx, v in enumerate(var_list):
logging.info(" weights {:3}: {:15} {}".format(idx, v.name, str(v.get_shape())))
try:
if sess:
# graph mode
saver = tf.train.Saver(var_list)
saver.restore(sess, ckpt_file)
else:
# eager mode
# saver = tfes.Saver(var_list)
# saver.restore(ckpt_file)
# TODO: tf2.0 not stable, cannot import tensorflow.contrib.eager.python.saver
pass
except Exception as e:
logging.info(e)
logging.info("[*] load ckpt fail ...")
def save_any_to_npy(save_dict=None, name='file.npy'):
"""Save variables to `.npy` file.
Parameters
------------
save_dict : directory
The variables to be saved.
name : str
File name.
Examples
---------
>>> tl.files.save_any_to_npy(save_dict={'data': ['a','b']}, name='test.npy')
>>> data = tl.files.load_npy_to_any(name='test.npy')
>>> print(data)
{'data': ['a','b']}
"""
if save_dict is None:
save_dict = {}
np.save(name, save_dict)
def load_npy_to_any(path='', name='file.npy'):
"""Load `.npy` file.
Parameters
------------
path : str
Path to the file (optional).
name : str
File name.
Examples
---------
- see tl.files.save_any_to_npy()
"""
file_path = os.path.join(path, name)
try:
return np.load(file_path, allow_pickle=True).item()
except Exception:
return np.load(file_path, allow_pickle=True)
raise Exception("[!] Fail to load %s" % file_path)
def file_exists(filepath):
"""Check whether a file exists by given file path."""
return os.path.isfile(filepath)
def folder_exists(folderpath):
"""Check whether a folder exists by given folder path."""
return os.path.isdir(folderpath)
def del_file(filepath):
"""Delete a file by given file path."""
os.remove(filepath)
def del_folder(folderpath):
"""Delete a folder by given folder path."""
shutil.rmtree(folderpath)
def read_file(filepath):
"""Read a file and return a string.
Examples
---------
>>> data = tl.files.read_file('data.txt')
"""
with open(filepath, 'r') as afile:
return afile.read()
def load_file_list(path=None, regx='\.jpg', printable=True, keep_prefix=False):
r"""Return a file list in a folder by given a path and regular expression.
Parameters
----------
path : str or None
A folder path, if `None`, use the current directory.
regx : str
The regx of file name.
printable : boolean
Whether to print the files infomation.
keep_prefix : boolean
Whether to keep path in the file name.
Examples
----------
>>> file_list = tl.files.load_file_list(path=None, regx='w1pre_[0-9]+\.(npz)')
"""
if path is None:
path = os.getcwd()
file_list = os.listdir(path)
return_list = []
for _, f in enumerate(file_list):
if re.search(regx, f):
return_list.append(f)
# return_list.sort()
if keep_prefix:
for i, f in enumerate(return_list):
return_list[i] = os.path.join(path, f)
if printable:
logging.info('Match file list = %s' % return_list)
logging.info('Number of files = %d' % len(return_list))
return return_list
def load_folder_list(path=""):
"""Return a folder list in a folder by given a folder path.
Parameters
----------
path : str
A folder path.
"""
return [os.path.join(path, o) for o in os.listdir(path) if os.path.isdir(os.path.join(path, o))]
def exists_or_mkdir(path, verbose=True):
"""Check a folder by given name, if not exist, create the folder and return False,
if directory exists, return True.
Parameters
----------
path : str
A folder path.
verbose : boolean
If True (default), prints results.
Returns
--------
boolean
True if folder already exist, otherwise, returns False and create the folder.
Examples
--------
>>> tl.files.exists_or_mkdir("checkpoints/train")
"""
if not os.path.exists(path):
if verbose:
logging.info("[*] creates %s ..." % path)
os.makedirs(path)
return False
else:
if verbose:
logging.info("[!] %s exists ..." % path)
return True
def maybe_download_and_extract(filename, working_directory, url_source, extract=False, expected_bytes=None):
"""Checks if file exists in working_directory otherwise tries to dowload the file,
and optionally also tries to extract the file if format is ".zip" or ".tar"
Parameters
-----------
filename : str
The name of the (to be) dowloaded file.
working_directory : str
A folder path to search for the file in and dowload the file to
url : str
The URL to download the file from
extract : boolean
If True, tries to uncompress the dowloaded file is ".tar.gz/.tar.bz2" or ".zip" file, default is False.
expected_bytes : int or None
If set tries to verify that the downloaded file is of the specified size, otherwise raises an Exception, defaults is None which corresponds to no check being performed.
Returns
----------
str
File path of the dowloaded (uncompressed) file.
Examples
--------
>>> down_file = tl.files.maybe_download_and_extract(filename='train-images-idx3-ubyte.gz',
... working_directory='data/',
... url_source='http://yann.lecun.com/exdb/mnist/')
>>> tl.files.maybe_download_and_extract(filename='ADEChallengeData2016.zip',
... working_directory='data/',
... url_source='http://sceneparsing.csail.mit.edu/data/',
... extract=True)
"""
# We first define a download function, supporting both Python 2 and 3.
def _download(filename, working_directory, url_source):
progress_bar = progressbar.ProgressBar()
def _dlProgress(count, blockSize, totalSize, pbar=progress_bar):
if (totalSize != 0):
if not pbar.max_value:
totalBlocks = math.ceil(float(totalSize) / float(blockSize))
pbar.max_value = int(totalBlocks)
pbar.update(count, force=True)
filepath = os.path.join(working_directory, filename)
logging.info('Downloading %s...\n' % filename)
urlretrieve(url_source + filename, filepath, reporthook=_dlProgress)
exists_or_mkdir(working_directory, verbose=False)
filepath = os.path.join(working_directory, filename)
if not os.path.exists(filepath):
_download(filename, working_directory, url_source)
statinfo = os.stat(filepath)
logging.info('Succesfully downloaded %s %s bytes.' % (filename, statinfo.st_size)) # , 'bytes.')
if (not (expected_bytes is None) and (expected_bytes != statinfo.st_size)):
raise Exception('Failed to verify ' + filename + '. Can you get to it with a browser?')
if (extract):
if tarfile.is_tarfile(filepath):
logging.info('Trying to extract tar file')
tarfile.open(filepath, 'r').extractall(working_directory)
logging.info('... Success!')
elif zipfile.is_zipfile(filepath):
logging.info('Trying to extract zip file')
with zipfile.ZipFile(filepath) as zf:
zf.extractall(working_directory)
logging.info('... Success!')
else:
logging.info("Unknown compression_format only .tar.gz/.tar.bz2/.tar and .zip supported")
return filepath
def natural_keys(text):
"""Sort list of string with number in human order.
Examples
----------
>>> l = ['im1.jpg', 'im31.jpg', 'im11.jpg', 'im21.jpg', 'im03.jpg', 'im05.jpg']
>>> l.sort(key=tl.files.natural_keys)
['im1.jpg', 'im03.jpg', 'im05', 'im11.jpg', 'im21.jpg', 'im31.jpg']
>>> l.sort() # that is what we dont want
['im03.jpg', 'im05', 'im1.jpg', 'im11.jpg', 'im21.jpg', 'im31.jpg']
References
----------
- `link <http://nedbatchelder.com/blog/200712/human_sorting.html>`__
"""
# - alist.sort(key=natural_keys) sorts in human order
# http://nedbatchelder.com/blog/200712/human_sorting.html
# (See Toothy's implementation in the comments)
def atoi(text):
return int(text) if text.isdigit() else text
return [atoi(c) for c in re.split('(\d+)', text)]
# Visualizing npz files
def npz_to_W_pdf(path=None, regx='w1pre_[0-9]+\.(npz)'):
r"""Convert the first weight matrix of `.npz` file to `.pdf` by using `tl.visualize.W()`.
Parameters
----------
path : str
A folder path to `npz` files.
regx : str
Regx for the file name.
Examples
---------
Convert the first weight matrix of w1_pre...npz file to w1_pre...pdf.
>>> tl.files.npz_to_W_pdf(path='/Users/.../npz_file/', regx='w1pre_[0-9]+\.(npz)')
"""
file_list = load_file_list(path=path, regx=regx)
for f in file_list:
W = load_npz(path, f)[0]
logging.info("%s --> %s" % (f, f.split('.')[0] + '.pdf'))
visualize.draw_weights(W, second=10, saveable=True, name=f.split('.')[0], fig_idx=2012)
def tf_variables_to_numpy(variables):
"""Convert TF tensor or a list of tensors into a list of numpy array"""
if not isinstance(variables, list):
var_list = [variables]
else:
var_list = variables
results = [v.numpy() for v in var_list]
return results
def assign_tf_variable(variable, value):
"""Assign value to a TF variable"""
variable.assign(value)
def _save_weights_to_hdf5_group(f, layers):
"""
Save layer/model weights into hdf5 group recursively.
Parameters
----------
f: hdf5 group
A hdf5 group created by h5py.File() or create_group().
layers: list
A list of layers to save weights.
"""
f.attrs['layer_names'] = [layer.name.encode('utf8') for layer in layers]
for layer in layers:
g = f.create_group(layer.name)
if isinstance(layer, tl.models.Model):
_save_weights_to_hdf5_group(g, layer.all_layers)
elif isinstance(layer, tl.layers.ModelLayer):
_save_weights_to_hdf5_group(g, layer.model.all_layers)
elif isinstance(layer, tl.layers.LayerList):
_save_weights_to_hdf5_group(g, layer.layers)
elif isinstance(layer, tl.layers.Layer):
if layer.all_weights is not None:
weight_values = tf_variables_to_numpy(layer.all_weights)
weight_names = [w.name.encode('utf8') for w in layer.all_weights]
else:
weight_values = []
weight_names = []
g.attrs['weight_names'] = weight_names
for name, val in zip(weight_names, weight_values):
val_dataset = g.create_dataset(name, val.shape, dtype=val.dtype)
if not val.shape:
# scalar
val_dataset[()] = val
else:
val_dataset[:] = val
else:
raise Exception("Only layer or model can be saved into hdf5.")
def _load_weights_from_hdf5_group_in_order(f, layers):
"""
Load layer weights from a hdf5 group sequentially.
Parameters
----------
f: hdf5 group
A hdf5 group created by h5py.File() or create_group().
layers: list
A list of layers to load weights.
"""
layer_names = [n.decode('utf8') for n in f.attrs["layer_names"]]
for idx, name in enumerate(layer_names):
g = f[name]
layer = layers[idx]
if isinstance(layer, tl.models.Model):
_load_weights_from_hdf5_group_in_order(g, layer.all_layers)
elif isinstance(layer, tl.layers.ModelLayer):
_load_weights_from_hdf5_group_in_order(g, layer.model.all_layers)
elif isinstance(layer, tl.layers.LayerList):
_load_weights_from_hdf5_group_in_order(g, layer.layers)
elif isinstance(layer, tl.layers.Layer):
weight_names = [n.decode('utf8') for n in g.attrs['weight_names']]
for iid, w_name in enumerate(weight_names):
assign_tf_variable(layer.all_weights[iid], np.asarray(g[w_name]))
else:
raise Exception("Only layer or model can be saved into hdf5.")
if idx == len(layers) - 1:
break
def _load_weights_from_hdf5_group(f, layers, skip=False):
"""
Load layer weights from a hdf5 group by layer name.
Parameters
----------
f: hdf5 group
A hdf5 group created by h5py.File() or create_group().
layers: list
A list of layers to load weights.
skip : boolean
If 'skip' == True, loaded layer whose name is not found in 'layers' will be skipped. If 'skip' is False,
error will be raised when mismatch is found. Default False.
"""
layer_names = [n.decode('utf8') for n in f.attrs["layer_names"]]
layer_index = {layer.name: layer for layer in layers}
for idx, name in enumerate(layer_names):
if name not in layer_index.keys():
if skip:
logging.warning("Layer named '%s' not found in network. Skip it." % name)
else:
raise RuntimeError(
"Layer named '%s' not found in network. Hint: set argument skip=Ture "
"if you want to skip redundant or mismatch Layers." % name
)
else:
g = f[name]
layer = layer_index[name]
if isinstance(layer, tl.models.Model):
_load_weights_from_hdf5_group(g, layer.all_layers, skip)
elif isinstance(layer, tl.layers.ModelLayer):
_load_weights_from_hdf5_group(g, layer.model.all_layers, skip)
elif isinstance(layer, tl.layers.LayerList):
_load_weights_from_hdf5_group(g, layer.layers, skip)
elif isinstance(layer, tl.layers.Layer):
weight_names = [n.decode('utf8') for n in g.attrs['weight_names']]
for iid, w_name in enumerate(weight_names):
# FIXME : this is only for compatibility
if isinstance(layer, tl.layers.BatchNorm) and np.asarray(g[w_name]).ndim > 1:
assign_tf_variable(layer.all_weights[iid], np.asarray(g[w_name]).squeeze())
continue
assign_tf_variable(layer.all_weights[iid], np.asarray(g[w_name]))
else:
raise Exception("Only layer or model can be saved into hdf5.")
def save_weights_to_hdf5(filepath, network):
"""Input filepath and save weights in hdf5 format.
Parameters
----------
filepath : str
Filename to which the weights will be saved.
network : Model
TL model.
Returns
-------
"""
logging.info("[*] Saving TL weights into %s" % filepath)
with h5py.File(filepath, 'w') as f:
_save_weights_to_hdf5_group(f, network.all_layers)
logging.info("[*] Saved")
def load_hdf5_to_weights_in_order(filepath, network):
"""Load weights sequentially from a given file of hdf5 format
Parameters
----------
filepath : str
Filename to which the weights will be loaded, should be of hdf5 format.
network : Model
TL model.
Notes:
If the file contains more weights than given 'weights', then the redundant ones will be ignored
if all previous weights match perfectly.
Returns
-------
"""
f = h5py.File(filepath, 'r')
try:
layer_names = [n.decode('utf8') for n in f.attrs["layer_names"]]
except Exception:
raise NameError(
"The loaded hdf5 file needs to have 'layer_names' as attributes. "
"Please check whether this hdf5 file is saved from TL."
)
if len(network.all_layers) != len(layer_names):
logging.warning(
"Number of weights mismatch."
"Trying to load a saved file with " + str(len(layer_names)) + " layers into a model with " +
str(len(network.all_layers)) + " layers."
)
_load_weights_from_hdf5_group_in_order(f, network.all_layers)
f.close()
logging.info("[*] Load %s SUCCESS!" % filepath)
def load_hdf5_to_weights(filepath, network, skip=False):
"""Load weights by name from a given file of hdf5 format
Parameters
----------
filepath : str
Filename to which the weights will be loaded, should be of hdf5 format.
network : Model
TL model.
skip : bool
If 'skip' == True, loaded weights whose name is not found in 'weights' will be skipped. If 'skip' is False,
error will be raised when mismatch is found. Default False.
Returns
-------
"""
f = h5py.File(filepath, 'r')
try:
layer_names = [n.decode('utf8') for n in f.attrs["layer_names"]]
except Exception:
raise NameError(
"The loaded hdf5 file needs to have 'layer_names' as attributes. "
"Please check whether this hdf5 file is saved from TL."
)
net_index = {layer.name: layer for layer in network.all_layers}
if len(network.all_layers) != len(layer_names):
logging.warning(
"Number of weights mismatch."
"Trying to load a saved file with " + str(len(layer_names)) + " layers into a model with " +
str(len(network.all_layers)) + " layers."
)
# check mismatch form network weights to hdf5
for name in net_index.keys():
if name not in layer_names:
logging.warning("Network layer named '%s' not found in loaded hdf5 file. It will be skipped." % name)
# load weights from hdf5 to network
_load_weights_from_hdf5_group(f, network.all_layers, skip)
f.close()
logging.info("[*] Load %s SUCCESS!" % filepath)
|
py | 1a3a42a15aeb0ac858f8f48fb1ab85e16a9c47af | import logging
from typing import Dict
from synch.factory import get_reader, get_writer
from synch.settings import Settings
logger = logging.getLogger("synch.replication.etl")
def etl_full(
alias: str, schema: str, tables_pk: Dict, renew=False, full=True
):
"""
full etl
"""
reader = get_reader(alias)
source_db_database = Settings.get_source_db_database(alias, schema)
schema = source_db_database.get("database")
writer = get_writer()
if not writer.check_database_exists(schema):
if source_db_database.get("auto_create") is not False:
writer.create_database(schema, Settings.cluster_name())
else:
logger.warning(
f"Can't etl since no database {schema} found in ClickHouse and auto_create=false"
)
exit(-1)
for table in source_db_database.get("tables"):
if not full:
if table['table'] not in list(tables_pk.keys()):
continue
if table.get("auto_full_etl") is False:
continue
table_name = table.get("table")
pk = tables_pk.get(table_name)
writer = get_writer(table.get("clickhouse_engine"))
if not pk and not renew:
logger.warning(f"No pk found in {schema}.{table_name}, skip")
continue
elif isinstance(pk, tuple):
pk = f"({','.join(pk)})"
if renew:
drop_sql = f"drop table if exists {schema}.{table_name}"
writer.execute(drop_sql)
logger.info(f"drop table success:{schema}.{table_name}")
if not writer.check_table_exists(schema, table_name):
sign_column = table.get("sign_column")
version_column = table.get("version_column")
order_by = table.get("order_by")
writer.execute(
writer.get_table_create_sql(
reader,
schema,
table_name,
pk,
table.get("partition_by"),
table.get("engine_settings"),
sign_column=sign_column,
version_column=version_column,
order_by=order_by,
)
)
if Settings.is_cluster():
for w in get_writer(choice=False):
w.execute(
w.get_distributed_table_create_sql(
schema, table_name, Settings.get("clickhouse.distributed_suffix")
)
)
if reader.fix_column_type and not table.get("skip_decimal"):
writer.fix_table_column_type(reader, schema, table_name)
full_insert_sql = writer.get_full_insert_sql(reader, schema, table_name, sign_column)
logger.info(f"{full_insert_sql}")
writer.execute(full_insert_sql)
logger.info(f"full data etl for {schema}.{table_name} success")
else:
logger.debug(
f"{schema}.{table_name} exists, skip, or use --renew force etl with drop old tables"
)
|
py | 1a3a4434fccb34bb14f4adef8b909639a1e620f5 | #! /usr/bin/env python3
import struct
import enum
def printMessage(s):
return ' '.join("{:02x}".format(c) for c in s)
class MessageType(enum.Enum):
Text = 0
Numeric = 1
Logic = 2
def decodeMessage(s, msgType):
payloadSize = struct.unpack_from('<H', s, 0)[0]
if payloadSize < 5: # includes the mailSize
raise BufferError('Payload size is too small')
a, b, c, d = struct.unpack_from('<4B', s, 2)
if a != 1 or b != 0 or c != 0x81 or d != 0x9e:
raise BufferError('Header is not correct. Expecting 01 00 81 9e')
mailSize = struct.unpack_from('<B', s, 6)[0]
if payloadSize < (5 + mailSize): # includes the valueSize
raise BufferError('Payload size is too small')
mailBytes = struct.unpack_from('<' + str(mailSize) + 's', s, 7)[0]
mail = mailBytes.decode('ascii')[:-1]
valueSize = struct.unpack_from('<H', s, 7 + mailSize)[0]
if payloadSize < (7 + mailSize + valueSize): # includes the valueSize
raise BufferError('Payload size does not match the packet')
if msgType == MessageType.Logic:
if valueSize != 1:
raise BufferError('Value size is not one byte required for Logic Type')
valueBytes = struct.unpack_from('<B', s, 9 + mailSize)[0]
value = True if valueBytes != 0 else False
elif msgType == MessageType.Numeric:
if valueSize != 4:
raise BufferError('Value size is not four bytes required for Numeric Type')
value = struct.unpack_from('<f', s, 9 + mailSize)[0]
else:
valueBytes = struct.unpack_from('<' + str(valueSize) + 's', s, 9 + mailSize)[0]
value = valueBytes.decode('ascii')[:-1]
remnant = None
if len(s) > (payloadSize + 2):
remnant = s[(payloadSize) + 2:]
return (mail, value, remnant)
def encodeMessage(msgType, mail, value):
mail = mail + '\x00'
mailBytes = mail.encode('ascii')
mailSize = len(mailBytes)
fmt = '<H4BB' + str(mailSize) + 'sH'
if msgType == MessageType.Logic:
valueSize = 1
valueBytes = 1 if value is True else 0
fmt += 'B'
elif msgType == MessageType.Numeric:
valueSize = 4
valueBytes = float(value)
fmt += 'f'
else:
value = value + '\x00'
valueBytes = value.encode('ascii')
valueSize = len(valueBytes)
fmt += str(valueSize) + 's'
payloadSize = 7 + mailSize + valueSize
s = struct.pack(fmt, payloadSize, 0x01, 0x00, 0x81, 0x9e, mailSize, mailBytes, valueSize, valueBytes)
return s
if __name__ == "__main__":
s = encodeMessage(MessageType.Text, 'abc', 'Hello')
print(printMessage(s))
|
py | 1a3a44a704717b9eb2511b19b0f212915f0e4c28 | def KDistanceUtil(root,k,nodes):
if root is None:
return
if k == 0:
nodes.append(root.data)
else:
KDistanceUtil(root.left, k-1,nodes)
KDistanceUtil(root.right, k-1,nodes)
def KDistance(root, k):
nodes = []
KDistanceUtil(root,k,nodes)
return nodes
|
py | 1a3a44f1d11b5fc091d8ff724424b4059054a46a | from spacy.lang.en import English
from spacy.tokens import Span
nlp = English()
# Define the method
def to_html(span, tag):
# Wrap the span text in a HTML tag and return it
return f"<{tag}>{span.text}</{tag}>"
# Register the Span method extension "to_html" with the method to_html
____.____(____, ____=____)
# Process the text and call the to_html method on the span with the tag name "strong"
doc = nlp("Hello world, this is a sentence.")
span = doc[0:2]
print(____)
|
py | 1a3a486a361a6cc6108ab131988ef6cdbf48cb20 | # Electrum - Lightweight Bitcoin Client
# Copyright (c) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
import dns
import json
import traceback
import sys
from .address import Address
from . import dnssec
from .util import FileImportFailed, FileImportFailedEncrypted
class Contacts(dict):
def __init__(self, storage):
self.storage = storage
d = self.storage.get('contacts', {})
try:
self.update(d)
except:
return
# backward compatibility
for k, v in self.items():
_type, n = v
if _type == 'address' and Address.is_valid(n):
self.pop(k)
self[n] = ('address', k)
def save(self):
self.storage.put('contacts', dict(self))
def import_file(self, path):
try:
with open(path, 'r') as f:
d = self._validate(json.loads(f.read()))
except json.decoder.JSONDecodeError:
traceback.print_exc(file=sys.stderr)
raise FileImportFailedEncrypted()
except BaseException:
traceback.print_exc(file=sys.stdout)
raise FileImportFailed()
self.update(d)
self.save()
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
self.save()
def pop(self, key):
if key in self.keys():
dict.pop(self, key)
self.save()
def resolve(self, k):
if Address.is_valid(k):
return {
'address': Address.from_string(k),
'type': 'address'
}
if k in self.keys():
_type, addr = self[k]
if _type == 'address':
return {
'address': addr,
'type': 'contact'
}
out = self.resolve_openalias(k)
if out:
address, name, validated = out
return {
'address': address,
'name': name,
'type': 'openalias',
'validated': validated
}
raise Exception("Invalid Bitcoin address or alias", k)
def resolve_openalias(self, url):
# support email-style addresses, per the OA standard
url = url.replace('@', '.')
records, validated = dnssec.query(url, dns.rdatatype.TXT)
prefix = 'btc'
for record in records:
string = record.strings[0]
if string.startswith('oa1:' + prefix):
address = self.find_regex(string, r'recipient_address=([A-Za-z0-9]+)')
name = self.find_regex(string, r'recipient_name=([^;]+)')
if not name:
name = address
if not address:
continue
return Address.from_string(address), name, validated
def find_regex(self, haystack, needle):
regex = re.compile(needle)
try:
return regex.search(haystack).groups()[0]
except AttributeError:
return None
def _validate(self, data):
for k,v in list(data.items()):
if k == 'contacts':
return self._validate(v)
if not Address.is_valid(k):
data.pop(k)
else:
_type,_ = v
if _type != 'address':
data.pop(k)
return data
|
py | 1a3a4890438ae6a577d83439092f49db8b800e93 | from __future__ import absolute_import, division, print_function
import copy
from databroker import Header
# do this as a weird import to get the py2 shim
from databroker._core import SimpleNamespace
def test_header_dict_conformance(db):
db.prepare_hook = lambda name, doc: copy.deepcopy(doc)
# TODO update this if / when we add conformance testing to
# validate attrs in Header
target = {'start': {'uid': 'start'},
'stop': {'uid': 'stop', 'start_uid': 'start'},
'ext': SimpleNamespace()}
h = Header(db, **target)
# hack the descriptor lookup/cache mechanism
target['descriptors'] = [{'uid': 'desc', 'start_uid': 'start'}]
h._cache['desc'] = [{'uid': 'desc', 'start_uid': 'start'}]
assert len(h) == len(target)
assert set(h) == set(target)
assert set(h.keys()) == set(target.keys())
for k, v in h.items():
assert v == target[k]
assert v == h[k]
# this is a dumb test
assert len(list(h.values())) == len(h)
n, d = h.to_name_dict_pair()
assert n == 'header'
assert d == target
|
py | 1a3a49931548e04f0d363a18a2b313b8b0a85cf0 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import json
import numpy as np
from scipy import misc as scp_misc
import tensorflow as tf
import facenet
import align.detect_face as detect_face
# from PIL import Image
def initialize_mtcnn(gpu_memory_fraction):
with tf.Graph().as_default():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
with sess.as_default():
pnet, rnet, onet = detect_face.create_mtcnn(sess, None)
return pnet, rnet, onet
def align_image(input_image, output_image, pnet, rnet, onet, image_size=182, margin=44, random_order=True,
gpu_memory_fraction=1.0, debug=False, just_count=False):
minsize = 20 # minimum size of face
threshold = [0.7, 0.7, 0.9] # three steps's threshold
factor = 0.709 # scale factor
if not os.path.exists(output_image):
try:
img = scp_misc.imread(input_image)
except (IOError, ValueError, IndexError) as e:
errorMessage = '{}: {}'.format(input_image, e)
if debug:
print(errorMessage)
else:
if img.ndim < 2:
if debug:
print('Unable to align "%s"' % image_path)
if img.ndim == 2:
img = facenet.to_rgb(img)
img = img[:, :, 0:3]
bounding_boxes, _ = detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
nrof_faces = bounding_boxes.shape[0]
if just_count == True:
return True, nrof_faces
if nrof_faces > 0:
det = bounding_boxes[:, 0:4]
img_size = np.asarray(img.shape)[0:2]
if nrof_faces > 1:
det = np.squeeze(det)
counter = 0
scaled_list = []
for d in det:
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(d[0] - margin / 2, 0)
bb[1] = np.maximum(d[1] - margin / 2, 0)
bb[2] = np.minimum(d[2] + margin / 2, img_size[1])
bb[3] = np.minimum(d[3] + margin / 2, img_size[0])
cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]
scaled = scp_misc.imresize(cropped, (image_size, image_size), interp='bilinear')
filename = "{}_{}.jpg".format(output_image.split(".")[0] + "image", str(counter))
scp_misc.imsave(filename, scaled)
scaled_list.append(scaled)
counter = counter +1
return True, scaled_list
if nrof_faces == 1:
det = np.squeeze(det)
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(det[0] - margin / 2, 0)
bb[1] = np.maximum(det[1] - margin / 2, 0)
bb[2] = np.minimum(det[2] + margin / 2, img_size[1])
bb[3] = np.minimum(det[3] + margin / 2, img_size[0])
cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]
scaled = scp_misc.imresize(cropped, (image_size, image_size), interp='bilinear')
scp_misc.imsave(output_image, scaled)
return True, scaled
else:
if debug:
print('Unable to align "%s"' % input_image)
return False, 1
def main(args):
# TODO Check why this was previously being initialised inside the image loop
file_to_facecount = dict()
pnet, rnet, onet = initialize_mtcnn(0.8)
for filename in os.listdir(args.input_dir):
input_image = filename
output_image = filename
if os.path.isfile(os.path.join(args.input_dir, input_image)) == False:
continue
input_image = os.path.join(args.input_dir, input_image)
output_image = os.path.join(args.output_dir, output_image)
_, result = align_image(input_image, output_image, pnet, rnet, onet, image_size=args.image_size, margin=args.margin, random_order=args.random_order,
gpu_memory_fraction=args.gpu_memory_fraction, debug=False, just_count=args.just_count)
if args.just_count == True:
file_to_facecount[filename] = result
if args.just_count:
json.dump(file_to_facecount, open(os.path.join(args.output_dir, args.count_file), "w"))
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('input_dir', type=str, help='Directory with unaligned images.')
parser.add_argument('output_dir', type=str, help='Directory with aligned face thumbnails.')
parser.add_argument('--image_size', type=int,
help='Image size (height, width) in pixels.', default=182)
parser.add_argument('--margin', type=int,
help='Margin for the crop around the bounding box (height, width) in pixels.', default=44)
parser.add_argument('--random_order',
help='Shuffles the order of images to enable alignment using multiple processes.',
action='store_true')
parser.add_argument('--gpu_memory_fraction', type=float,
help='Upper bound on the amount of GPU memory that will be used by the process.',
default=1.0)
parser.add_argument('--has_classes', dest='has_classes', action='store_true',
help='Input folder is split into class subfolders, and these should be replicated',
default=True)
parser.add_argument('--no_classes', dest='has_classes', action='store_false',
help='Input folder is split into class subfolders, and these should be replicated',
default=True)
parser.add_argument('--just_count', dest='just_count', action='store_true',
help='Just save out a JSON mapping filenames to counts of faces found',
default=False)
parser.add_argument('--count_file', type=str,
help='Where to save counts of faces',
default="face_counts.json")
return parser.parse_args(argv)
if __name__ == "__main__":
main(parse_arguments(sys.argv[1:]))
#
#print(ads)
#print("bleh"\
#print(os.listdir(path))
#
# for filename in os.listdir(path):
# print(filename)
# x = filename.split('_')[0]
# ads.append(x)
# directory = (path + "/" + x)
# if not os.path.exists(directory):
# os.makedirs(directory)
# #shutil.copy(newpath + "/" + filename, directory) |
py | 1a3a4aba544357acab869f5e3824056b6fc5ac92 |
from sqlalchemy.testing import eq_, assert_raises, \
assert_raises_message, is_
from sqlalchemy.ext import declarative as decl
import sqlalchemy as sa
from sqlalchemy import testing
from sqlalchemy import Integer, String, ForeignKey
from sqlalchemy.testing.schema import Table, Column
from sqlalchemy.orm import relationship, create_session, class_mapper, \
configure_mappers, clear_mappers, \
polymorphic_union, deferred, Session
from sqlalchemy.ext.declarative import declared_attr, AbstractConcreteBase, \
ConcreteBase, has_inherited_table
from sqlalchemy.testing import fixtures
Base = None
class DeclarativeTestBase(fixtures.TestBase, testing.AssertsExecutionResults):
def setup(self):
global Base
Base = decl.declarative_base(testing.db)
def teardown(self):
Session.close_all()
clear_mappers()
Base.metadata.drop_all()
class DeclarativeInheritanceTest(DeclarativeTestBase):
def test_we_must_copy_mapper_args(self):
class Person(Base):
__tablename__ = 'people'
id = Column(Integer, primary_key=True)
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator,
'polymorphic_identity': 'person'}
class Engineer(Person):
primary_language = Column(String(50))
assert 'inherits' not in Person.__mapper_args__
assert class_mapper(Engineer).polymorphic_identity is None
assert class_mapper(Engineer).polymorphic_on is Person.__table__.c.type
def test_we_must_only_copy_column_mapper_args(self):
class Person(Base):
__tablename__ = 'people'
id = Column(Integer, primary_key=True)
a = Column(Integer)
b = Column(Integer)
c = Column(Integer)
d = Column(Integer)
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator,
'polymorphic_identity': 'person',
'version_id_col': 'a',
'column_prefix': 'bar',
'include_properties': ['id', 'a', 'b'],
}
assert class_mapper(Person).version_id_col == 'a'
assert class_mapper(Person).include_properties == set(['id', 'a', 'b'])
def test_custom_join_condition(self):
class Foo(Base):
__tablename__ = 'foo'
id = Column('id', Integer, primary_key=True)
class Bar(Foo):
__tablename__ = 'bar'
id = Column('id', Integer, primary_key=True)
foo_id = Column('foo_id', Integer)
__mapper_args__ = {'inherit_condition': foo_id == Foo.id}
# compile succeeds because inherit_condition is honored
configure_mappers()
def test_joined(self):
class Company(Base, fixtures.ComparableEntity):
__tablename__ = 'companies'
id = Column('id', Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column('name', String(50))
employees = relationship('Person')
class Person(Base, fixtures.ComparableEntity):
__tablename__ = 'people'
id = Column('id', Integer, primary_key=True,
test_needs_autoincrement=True)
company_id = Column('company_id', Integer,
ForeignKey('companies.id'))
name = Column('name', String(50))
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Engineer(Person):
__tablename__ = 'engineers'
__mapper_args__ = {'polymorphic_identity': 'engineer'}
id = Column('id', Integer, ForeignKey('people.id'),
primary_key=True)
primary_language = Column('primary_language', String(50))
class Manager(Person):
__tablename__ = 'managers'
__mapper_args__ = {'polymorphic_identity': 'manager'}
id = Column('id', Integer, ForeignKey('people.id'),
primary_key=True)
golf_swing = Column('golf_swing', String(50))
Base.metadata.create_all()
sess = create_session()
c1 = Company(name='MegaCorp, Inc.',
employees=[Engineer(name='dilbert',
primary_language='java'), Engineer(name='wally',
primary_language='c++'), Manager(name='dogbert',
golf_swing='fore!')])
c2 = Company(name='Elbonia, Inc.',
employees=[Engineer(name='vlad',
primary_language='cobol')])
sess.add(c1)
sess.add(c2)
sess.flush()
sess.expunge_all()
eq_(sess.query(Company).filter(Company.employees.of_type(Engineer).
any(Engineer.primary_language
== 'cobol')).first(), c2)
# ensure that the Manager mapper was compiled with the Manager id
# column as higher priority. this ensures that "Manager.id"
# is appropriately treated as the "id" column in the "manager"
# table (reversed from 0.6's behavior.)
eq_(
Manager.id.property.columns,
[Manager.__table__.c.id, Person.__table__.c.id]
)
# assert that the "id" column is available without a second
# load. as of 0.7, the ColumnProperty tests all columns
# in it's list to see which is present in the row.
sess.expunge_all()
def go():
assert sess.query(Manager).filter(Manager.name == 'dogbert'
).one().id
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
def go():
assert sess.query(Person).filter(Manager.name == 'dogbert'
).one().id
self.assert_sql_count(testing.db, go, 1)
def test_add_subcol_after_the_fact(self):
class Person(Base, fixtures.ComparableEntity):
__tablename__ = 'people'
id = Column('id', Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column('name', String(50))
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Engineer(Person):
__tablename__ = 'engineers'
__mapper_args__ = {'polymorphic_identity': 'engineer'}
id = Column('id', Integer, ForeignKey('people.id'),
primary_key=True)
Engineer.primary_language = Column('primary_language',
String(50))
Base.metadata.create_all()
sess = create_session()
e1 = Engineer(primary_language='java', name='dilbert')
sess.add(e1)
sess.flush()
sess.expunge_all()
eq_(sess.query(Person).first(),
Engineer(primary_language='java', name='dilbert'))
def test_add_parentcol_after_the_fact(self):
class Person(Base, fixtures.ComparableEntity):
__tablename__ = 'people'
id = Column('id', Integer, primary_key=True,
test_needs_autoincrement=True)
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Engineer(Person):
__tablename__ = 'engineers'
__mapper_args__ = {'polymorphic_identity': 'engineer'}
primary_language = Column(String(50))
id = Column('id', Integer, ForeignKey('people.id'),
primary_key=True)
Person.name = Column('name', String(50))
Base.metadata.create_all()
sess = create_session()
e1 = Engineer(primary_language='java', name='dilbert')
sess.add(e1)
sess.flush()
sess.expunge_all()
eq_(sess.query(Person).first(),
Engineer(primary_language='java', name='dilbert'))
def test_add_sub_parentcol_after_the_fact(self):
class Person(Base, fixtures.ComparableEntity):
__tablename__ = 'people'
id = Column('id', Integer, primary_key=True,
test_needs_autoincrement=True)
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Engineer(Person):
__tablename__ = 'engineers'
__mapper_args__ = {'polymorphic_identity': 'engineer'}
primary_language = Column(String(50))
id = Column('id', Integer, ForeignKey('people.id'),
primary_key=True)
class Admin(Engineer):
__tablename__ = 'admins'
__mapper_args__ = {'polymorphic_identity': 'admin'}
workstation = Column(String(50))
id = Column('id', Integer, ForeignKey('engineers.id'),
primary_key=True)
Person.name = Column('name', String(50))
Base.metadata.create_all()
sess = create_session()
e1 = Admin(primary_language='java', name='dilbert',
workstation='foo')
sess.add(e1)
sess.flush()
sess.expunge_all()
eq_(sess.query(Person).first(), Admin(primary_language='java',
name='dilbert', workstation='foo'))
def test_subclass_mixin(self):
class Person(Base, fixtures.ComparableEntity):
__tablename__ = 'people'
id = Column('id', Integer, primary_key=True)
name = Column('name', String(50))
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class MyMixin(object):
pass
class Engineer(MyMixin, Person):
__tablename__ = 'engineers'
__mapper_args__ = {'polymorphic_identity': 'engineer'}
id = Column('id', Integer, ForeignKey('people.id'),
primary_key=True)
primary_language = Column('primary_language', String(50))
assert class_mapper(Engineer).inherits is class_mapper(Person)
def test_with_undefined_foreignkey(self):
class Parent(Base):
__tablename__ = 'parent'
id = Column('id', Integer, primary_key=True)
tp = Column('type', String(50))
__mapper_args__ = dict(polymorphic_on=tp)
class Child1(Parent):
__tablename__ = 'child1'
id = Column('id', Integer, ForeignKey('parent.id'),
primary_key=True)
related_child2 = Column('c2', Integer,
ForeignKey('child2.id'))
__mapper_args__ = dict(polymorphic_identity='child1')
# no exception is raised by the ForeignKey to "child2" even
# though child2 doesn't exist yet
class Child2(Parent):
__tablename__ = 'child2'
id = Column('id', Integer, ForeignKey('parent.id'),
primary_key=True)
related_child1 = Column('c1', Integer)
__mapper_args__ = dict(polymorphic_identity='child2')
sa.orm.configure_mappers() # no exceptions here
def test_foreign_keys_with_col(self):
"""Test that foreign keys that reference a literal 'id' subclass
'id' attribute behave intuitively.
See [ticket:1892].
"""
class Booking(Base):
__tablename__ = 'booking'
id = Column(Integer, primary_key=True)
class PlanBooking(Booking):
__tablename__ = 'plan_booking'
id = Column(Integer, ForeignKey(Booking.id),
primary_key=True)
# referencing PlanBooking.id gives us the column
# on plan_booking, not booking
class FeatureBooking(Booking):
__tablename__ = 'feature_booking'
id = Column(Integer, ForeignKey(Booking.id),
primary_key=True)
plan_booking_id = Column(Integer,
ForeignKey(PlanBooking.id))
plan_booking = relationship(PlanBooking,
backref='feature_bookings')
assert FeatureBooking.__table__.c.plan_booking_id.\
references(PlanBooking.__table__.c.id)
assert FeatureBooking.__table__.c.id.\
references(Booking.__table__.c.id)
def test_single_colsonbase(self):
"""test single inheritance where all the columns are on the base
class."""
class Company(Base, fixtures.ComparableEntity):
__tablename__ = 'companies'
id = Column('id', Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column('name', String(50))
employees = relationship('Person')
class Person(Base, fixtures.ComparableEntity):
__tablename__ = 'people'
id = Column('id', Integer, primary_key=True,
test_needs_autoincrement=True)
company_id = Column('company_id', Integer,
ForeignKey('companies.id'))
name = Column('name', String(50))
discriminator = Column('type', String(50))
primary_language = Column('primary_language', String(50))
golf_swing = Column('golf_swing', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Engineer(Person):
__mapper_args__ = {'polymorphic_identity': 'engineer'}
class Manager(Person):
__mapper_args__ = {'polymorphic_identity': 'manager'}
Base.metadata.create_all()
sess = create_session()
c1 = Company(name='MegaCorp, Inc.',
employees=[Engineer(name='dilbert',
primary_language='java'), Engineer(name='wally',
primary_language='c++'), Manager(name='dogbert',
golf_swing='fore!')])
c2 = Company(name='Elbonia, Inc.',
employees=[Engineer(name='vlad',
primary_language='cobol')])
sess.add(c1)
sess.add(c2)
sess.flush()
sess.expunge_all()
eq_(sess.query(Person).filter(Engineer.primary_language
== 'cobol').first(), Engineer(name='vlad'))
eq_(sess.query(Company).filter(Company.employees.of_type(Engineer).
any(Engineer.primary_language
== 'cobol')).first(), c2)
def test_single_colsonsub(self):
"""test single inheritance where the columns are local to their
class.
this is a newer usage.
"""
class Company(Base, fixtures.ComparableEntity):
__tablename__ = 'companies'
id = Column('id', Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column('name', String(50))
employees = relationship('Person')
class Person(Base, fixtures.ComparableEntity):
__tablename__ = 'people'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
company_id = Column(Integer, ForeignKey('companies.id'))
name = Column(String(50))
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Engineer(Person):
__mapper_args__ = {'polymorphic_identity': 'engineer'}
primary_language = Column(String(50))
class Manager(Person):
__mapper_args__ = {'polymorphic_identity': 'manager'}
golf_swing = Column(String(50))
# we have here a situation that is somewhat unique. the Person
# class is mapped to the "people" table, but it was mapped when
# the table did not include the "primary_language" or
# "golf_swing" columns. declarative will also manipulate the
# exclude_properties collection so that sibling classes don't
# cross-pollinate.
assert Person.__table__.c.company_id is not None
assert Person.__table__.c.golf_swing is not None
assert Person.__table__.c.primary_language is not None
assert Engineer.primary_language is not None
assert Manager.golf_swing is not None
assert not hasattr(Person, 'primary_language')
assert not hasattr(Person, 'golf_swing')
assert not hasattr(Engineer, 'golf_swing')
assert not hasattr(Manager, 'primary_language')
Base.metadata.create_all()
sess = create_session()
e1 = Engineer(name='dilbert', primary_language='java')
e2 = Engineer(name='wally', primary_language='c++')
m1 = Manager(name='dogbert', golf_swing='fore!')
c1 = Company(name='MegaCorp, Inc.', employees=[e1, e2, m1])
e3 = Engineer(name='vlad', primary_language='cobol')
c2 = Company(name='Elbonia, Inc.', employees=[e3])
sess.add(c1)
sess.add(c2)
sess.flush()
sess.expunge_all()
eq_(sess.query(Person).filter(Engineer.primary_language
== 'cobol').first(), Engineer(name='vlad'))
eq_(sess.query(Company).filter(Company.employees.of_type(Engineer).
any(Engineer.primary_language
== 'cobol')).first(), c2)
eq_(sess.query(Engineer).filter_by(primary_language='cobol'
).one(), Engineer(name='vlad', primary_language='cobol'))
@testing.skip_if(lambda: testing.against('oracle'),
"Test has an empty insert in it at the moment")
def test_columns_single_inheritance_conflict_resolution(self):
"""Test that a declared_attr can return the existing column and it will
be ignored. this allows conditional columns to be added.
See [ticket:2472].
"""
class Person(Base):
__tablename__ = 'person'
id = Column(Integer, primary_key=True)
class Engineer(Person):
"""single table inheritance"""
@declared_attr
def target_id(cls):
return cls.__table__.c.get('target_id',
Column(Integer, ForeignKey('other.id'))
)
@declared_attr
def target(cls):
return relationship("Other")
class Manager(Person):
"""single table inheritance"""
@declared_attr
def target_id(cls):
return cls.__table__.c.get('target_id',
Column(Integer, ForeignKey('other.id'))
)
@declared_attr
def target(cls):
return relationship("Other")
class Other(Base):
__tablename__ = 'other'
id = Column(Integer, primary_key=True)
is_(
Engineer.target_id.property.columns[0],
Person.__table__.c.target_id
)
is_(
Manager.target_id.property.columns[0],
Person.__table__.c.target_id
)
# do a brief round trip on this
Base.metadata.create_all()
session = Session()
o1, o2 = Other(), Other()
session.add_all([
Engineer(target=o1),
Manager(target=o2),
Manager(target=o1)
])
session.commit()
eq_(session.query(Engineer).first().target, o1)
def test_joined_from_single(self):
class Company(Base, fixtures.ComparableEntity):
__tablename__ = 'companies'
id = Column('id', Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column('name', String(50))
employees = relationship('Person')
class Person(Base, fixtures.ComparableEntity):
__tablename__ = 'people'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
company_id = Column(Integer, ForeignKey('companies.id'))
name = Column(String(50))
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Manager(Person):
__mapper_args__ = {'polymorphic_identity': 'manager'}
golf_swing = Column(String(50))
class Engineer(Person):
__tablename__ = 'engineers'
__mapper_args__ = {'polymorphic_identity': 'engineer'}
id = Column(Integer, ForeignKey('people.id'),
primary_key=True)
primary_language = Column(String(50))
assert Person.__table__.c.golf_swing is not None
assert not Person.__table__.c.has_key('primary_language')
assert Engineer.__table__.c.primary_language is not None
assert Engineer.primary_language is not None
assert Manager.golf_swing is not None
assert not hasattr(Person, 'primary_language')
assert not hasattr(Person, 'golf_swing')
assert not hasattr(Engineer, 'golf_swing')
assert not hasattr(Manager, 'primary_language')
Base.metadata.create_all()
sess = create_session()
e1 = Engineer(name='dilbert', primary_language='java')
e2 = Engineer(name='wally', primary_language='c++')
m1 = Manager(name='dogbert', golf_swing='fore!')
c1 = Company(name='MegaCorp, Inc.', employees=[e1, e2, m1])
e3 = Engineer(name='vlad', primary_language='cobol')
c2 = Company(name='Elbonia, Inc.', employees=[e3])
sess.add(c1)
sess.add(c2)
sess.flush()
sess.expunge_all()
eq_(sess.query(Person).with_polymorphic(Engineer).
filter(Engineer.primary_language
== 'cobol').first(), Engineer(name='vlad'))
eq_(sess.query(Company).filter(Company.employees.of_type(Engineer).
any(Engineer.primary_language
== 'cobol')).first(), c2)
eq_(sess.query(Engineer).filter_by(primary_language='cobol'
).one(), Engineer(name='vlad', primary_language='cobol'))
def test_single_from_joined_colsonsub(self):
class Person(Base, fixtures.ComparableEntity):
__tablename__ = 'people'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column(String(50))
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Manager(Person):
__tablename__ = 'manager'
__mapper_args__ = {'polymorphic_identity': 'manager'}
id = Column(Integer, ForeignKey('people.id'), primary_key=True)
golf_swing = Column(String(50))
class Boss(Manager):
boss_name = Column(String(50))
is_(
Boss.__mapper__.column_attrs['boss_name'].columns[0],
Manager.__table__.c.boss_name
)
def test_polymorphic_on_converted_from_inst(self):
class A(Base):
__tablename__ = 'A'
id = Column(Integer, primary_key=True)
discriminator = Column(String)
@declared_attr
def __mapper_args__(cls):
return {
'polymorphic_identity': cls.__name__,
'polymorphic_on': cls.discriminator
}
class B(A):
pass
is_(B.__mapper__.polymorphic_on, A.__table__.c.discriminator)
def test_add_deferred(self):
class Person(Base, fixtures.ComparableEntity):
__tablename__ = 'people'
id = Column('id', Integer, primary_key=True,
test_needs_autoincrement=True)
Person.name = deferred(Column(String(10)))
Base.metadata.create_all()
sess = create_session()
p = Person(name='ratbert')
sess.add(p)
sess.flush()
sess.expunge_all()
eq_(sess.query(Person).all(), [Person(name='ratbert')])
sess.expunge_all()
person = sess.query(Person).filter(Person.name == 'ratbert'
).one()
assert 'name' not in person.__dict__
def test_single_fksonsub(self):
"""test single inheritance with a foreign key-holding column on
a subclass.
"""
class Person(Base, fixtures.ComparableEntity):
__tablename__ = 'people'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column(String(50))
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Engineer(Person):
__mapper_args__ = {'polymorphic_identity': 'engineer'}
primary_language_id = Column(Integer,
ForeignKey('languages.id'))
primary_language = relationship('Language')
class Language(Base, fixtures.ComparableEntity):
__tablename__ = 'languages'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column(String(50))
assert not hasattr(Person, 'primary_language_id')
Base.metadata.create_all()
sess = create_session()
java, cpp, cobol = Language(name='java'), Language(name='cpp'), \
Language(name='cobol')
e1 = Engineer(name='dilbert', primary_language=java)
e2 = Engineer(name='wally', primary_language=cpp)
e3 = Engineer(name='vlad', primary_language=cobol)
sess.add_all([e1, e2, e3])
sess.flush()
sess.expunge_all()
eq_(sess.query(Person).filter(Engineer.primary_language.has(
Language.name
== 'cobol')).first(), Engineer(name='vlad',
primary_language=Language(name='cobol')))
eq_(sess.query(Engineer).filter(Engineer.primary_language.has(
Language.name
== 'cobol')).one(), Engineer(name='vlad',
primary_language=Language(name='cobol')))
eq_(sess.query(Person).join(Engineer.primary_language).order_by(
Language.name).all(),
[Engineer(name='vlad',
primary_language=Language(name='cobol')),
Engineer(name='wally', primary_language=Language(name='cpp'
)), Engineer(name='dilbert',
primary_language=Language(name='java'))])
def test_single_three_levels(self):
class Person(Base, fixtures.ComparableEntity):
__tablename__ = 'people'
id = Column(Integer, primary_key=True)
name = Column(String(50))
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Engineer(Person):
__mapper_args__ = {'polymorphic_identity': 'engineer'}
primary_language = Column(String(50))
class JuniorEngineer(Engineer):
__mapper_args__ = \
{'polymorphic_identity': 'junior_engineer'}
nerf_gun = Column(String(50))
class Manager(Person):
__mapper_args__ = {'polymorphic_identity': 'manager'}
golf_swing = Column(String(50))
assert JuniorEngineer.nerf_gun
assert JuniorEngineer.primary_language
assert JuniorEngineer.name
assert Manager.golf_swing
assert Engineer.primary_language
assert not hasattr(Engineer, 'golf_swing')
assert not hasattr(Engineer, 'nerf_gun')
assert not hasattr(Manager, 'nerf_gun')
assert not hasattr(Manager, 'primary_language')
def test_single_detects_conflict(self):
class Person(Base):
__tablename__ = 'people'
id = Column(Integer, primary_key=True)
name = Column(String(50))
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Engineer(Person):
__mapper_args__ = {'polymorphic_identity': 'engineer'}
primary_language = Column(String(50))
# test sibling col conflict
def go():
class Manager(Person):
__mapper_args__ = {'polymorphic_identity': 'manager'}
golf_swing = Column(String(50))
primary_language = Column(String(50))
assert_raises(sa.exc.ArgumentError, go)
# test parent col conflict
def go():
class Salesman(Person):
__mapper_args__ = {'polymorphic_identity': 'manager'}
name = Column(String(50))
assert_raises(sa.exc.ArgumentError, go)
def test_single_no_special_cols(self):
class Person(Base, fixtures.ComparableEntity):
__tablename__ = 'people'
id = Column('id', Integer, primary_key=True)
name = Column('name', String(50))
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
def go():
class Engineer(Person):
__mapper_args__ = {'polymorphic_identity': 'engineer'}
primary_language = Column('primary_language',
String(50))
foo_bar = Column(Integer, primary_key=True)
assert_raises_message(sa.exc.ArgumentError,
'place primary key', go)
def test_single_no_table_args(self):
class Person(Base, fixtures.ComparableEntity):
__tablename__ = 'people'
id = Column('id', Integer, primary_key=True)
name = Column('name', String(50))
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
def go():
class Engineer(Person):
__mapper_args__ = {'polymorphic_identity': 'engineer'}
primary_language = Column('primary_language',
String(50))
# this should be on the Person class, as this is single
# table inheritance, which is why we test that this
# throws an exception!
__table_args__ = {'mysql_engine': 'InnoDB'}
assert_raises_message(sa.exc.ArgumentError,
'place __table_args__', go)
@testing.emits_warning("This declarative")
def test_dupe_name_in_hierarchy(self):
class A(Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
a_1 = A
class A(a_1):
__tablename__ = 'b'
id = Column(Integer(), ForeignKey(a_1.id), primary_key=True)
assert A.__mapper__.inherits is a_1.__mapper__
class OverlapColPrecedenceTest(DeclarativeTestBase):
"""test #1892 cases when declarative does column precedence."""
def _run_test(self, Engineer, e_id, p_id):
p_table = Base.metadata.tables['person']
e_table = Base.metadata.tables['engineer']
assert Engineer.id.property.columns[0] is e_table.c[e_id]
assert Engineer.id.property.columns[1] is p_table.c[p_id]
def test_basic(self):
class Person(Base):
__tablename__ = 'person'
id = Column(Integer, primary_key=True)
class Engineer(Person):
__tablename__ = 'engineer'
id = Column(Integer, ForeignKey('person.id'), primary_key=True)
self._run_test(Engineer, "id", "id")
def test_alt_name_base(self):
class Person(Base):
__tablename__ = 'person'
id = Column("pid", Integer, primary_key=True)
class Engineer(Person):
__tablename__ = 'engineer'
id = Column(Integer, ForeignKey('person.pid'), primary_key=True)
self._run_test(Engineer, "id", "pid")
def test_alt_name_sub(self):
class Person(Base):
__tablename__ = 'person'
id = Column(Integer, primary_key=True)
class Engineer(Person):
__tablename__ = 'engineer'
id = Column("eid", Integer, ForeignKey('person.id'),
primary_key=True)
self._run_test(Engineer, "eid", "id")
def test_alt_name_both(self):
class Person(Base):
__tablename__ = 'person'
id = Column("pid", Integer, primary_key=True)
class Engineer(Person):
__tablename__ = 'engineer'
id = Column("eid", Integer, ForeignKey('person.pid'),
primary_key=True)
self._run_test(Engineer, "eid", "pid")
from test.orm.test_events import _RemoveListeners
class ConcreteInhTest(_RemoveListeners, DeclarativeTestBase):
def _roundtrip(self, Employee, Manager, Engineer, Boss,
polymorphic=True, explicit_type=False):
Base.metadata.create_all()
sess = create_session()
e1 = Engineer(name='dilbert', primary_language='java')
e2 = Engineer(name='wally', primary_language='c++')
m1 = Manager(name='dogbert', golf_swing='fore!')
e3 = Engineer(name='vlad', primary_language='cobol')
b1 = Boss(name="pointy haired")
if polymorphic:
for obj in [e1, e2, m1, e3, b1]:
if explicit_type:
eq_(obj.type, obj.__mapper__.polymorphic_identity)
else:
assert_raises_message(
AttributeError,
"does not implement attribute .?'type' "
"at the instance level.",
getattr, obj, "type"
)
else:
assert "type" not in Engineer.__dict__
assert "type" not in Manager.__dict__
assert "type" not in Boss.__dict__
sess.add_all([e1, e2, m1, e3, b1])
sess.flush()
sess.expunge_all()
if polymorphic:
eq_(sess.query(Employee).order_by(Employee.name).all(),
[Engineer(name='dilbert'), Manager(name='dogbert'),
Boss(name='pointy haired'), Engineer(name='vlad'), Engineer(name='wally')])
else:
eq_(sess.query(Engineer).order_by(Engineer.name).all(),
[Engineer(name='dilbert'), Engineer(name='vlad'),
Engineer(name='wally')])
eq_(sess.query(Manager).all(), [Manager(name='dogbert')])
eq_(sess.query(Boss).all(), [Boss(name='pointy haired')])
def test_explicit(self):
engineers = Table('engineers', Base.metadata, Column('id',
Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)),
Column('primary_language', String(50)))
managers = Table('managers', Base.metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)),
Column('golf_swing', String(50))
)
boss = Table('boss', Base.metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)),
Column('golf_swing', String(50))
)
punion = polymorphic_union({
'engineer': engineers,
'manager': managers,
'boss': boss}, 'type', 'punion')
class Employee(Base, fixtures.ComparableEntity):
__table__ = punion
__mapper_args__ = {'polymorphic_on': punion.c.type}
class Engineer(Employee):
__table__ = engineers
__mapper_args__ = {'polymorphic_identity': 'engineer',
'concrete': True}
class Manager(Employee):
__table__ = managers
__mapper_args__ = {'polymorphic_identity': 'manager',
'concrete': True}
class Boss(Manager):
__table__ = boss
__mapper_args__ = {'polymorphic_identity': 'boss',
'concrete': True}
self._roundtrip(Employee, Manager, Engineer, Boss)
def test_concrete_inline_non_polymorphic(self):
"""test the example from the declarative docs."""
class Employee(Base, fixtures.ComparableEntity):
__tablename__ = 'people'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column(String(50))
class Engineer(Employee):
__tablename__ = 'engineers'
__mapper_args__ = {'concrete': True}
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
primary_language = Column(String(50))
name = Column(String(50))
class Manager(Employee):
__tablename__ = 'manager'
__mapper_args__ = {'concrete': True}
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
golf_swing = Column(String(50))
name = Column(String(50))
class Boss(Manager):
__tablename__ = 'boss'
__mapper_args__ = {'concrete': True}
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
golf_swing = Column(String(50))
name = Column(String(50))
self._roundtrip(Employee, Manager, Engineer, Boss, polymorphic=False)
def test_abstract_concrete_extension(self):
class Employee(AbstractConcreteBase, Base, fixtures.ComparableEntity):
pass
class Manager(Employee):
__tablename__ = 'manager'
employee_id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column(String(50))
golf_swing = Column(String(40))
__mapper_args__ = {
'polymorphic_identity': 'manager',
'concrete': True}
class Boss(Manager):
__tablename__ = 'boss'
employee_id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column(String(50))
golf_swing = Column(String(40))
__mapper_args__ = {
'polymorphic_identity': 'boss',
'concrete': True}
class Engineer(Employee):
__tablename__ = 'engineer'
employee_id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column(String(50))
primary_language = Column(String(40))
__mapper_args__ = {'polymorphic_identity': 'engineer',
'concrete': True}
self._roundtrip(Employee, Manager, Engineer, Boss)
def test_concrete_extension(self):
class Employee(ConcreteBase, Base, fixtures.ComparableEntity):
__tablename__ = 'employee'
employee_id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column(String(50))
__mapper_args__ = {
'polymorphic_identity': 'employee',
'concrete': True}
class Manager(Employee):
__tablename__ = 'manager'
employee_id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column(String(50))
golf_swing = Column(String(40))
__mapper_args__ = {
'polymorphic_identity': 'manager',
'concrete': True}
class Boss(Manager):
__tablename__ = 'boss'
employee_id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column(String(50))
golf_swing = Column(String(40))
__mapper_args__ = {
'polymorphic_identity': 'boss',
'concrete': True}
class Engineer(Employee):
__tablename__ = 'engineer'
employee_id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column(String(50))
primary_language = Column(String(40))
__mapper_args__ = {'polymorphic_identity': 'engineer',
'concrete': True}
self._roundtrip(Employee, Manager, Engineer, Boss)
def test_has_inherited_table_doesnt_consider_base(self):
class A(Base):
__tablename__ = 'a'
id = Column(Integer, primary_key=True)
assert not has_inherited_table(A)
class B(A):
__tablename__ = 'b'
id = Column(Integer, ForeignKey('a.id'), primary_key=True)
assert has_inherited_table(B)
def test_has_inherited_table_in_mapper_args(self):
class Test(Base):
__tablename__ = 'test'
id = Column(Integer, primary_key=True)
type = Column(String(20))
@declared_attr
def __mapper_args__(cls):
if not has_inherited_table(cls):
ret = {
'polymorphic_identity': 'default',
'polymorphic_on': cls.type,
}
else:
ret = {'polymorphic_identity': cls.__name__}
return ret
class PolyTest(Test):
__tablename__ = 'poly_test'
id = Column(Integer, ForeignKey(Test.id), primary_key=True)
configure_mappers()
assert Test.__mapper__.polymorphic_on is Test.__table__.c.type
assert PolyTest.__mapper__.polymorphic_on is Test.__table__.c.type
def test_ok_to_override_type_from_abstract(self):
class Employee(AbstractConcreteBase, Base, fixtures.ComparableEntity):
pass
class Manager(Employee):
__tablename__ = 'manager'
employee_id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column(String(50))
golf_swing = Column(String(40))
@property
def type(self):
return "manager"
__mapper_args__ = {
'polymorphic_identity': "manager",
'concrete': True}
class Boss(Manager):
__tablename__ = 'boss'
employee_id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column(String(50))
golf_swing = Column(String(40))
@property
def type(self):
return "boss"
__mapper_args__ = {
'polymorphic_identity': "boss",
'concrete': True}
class Engineer(Employee):
__tablename__ = 'engineer'
employee_id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column(String(50))
primary_language = Column(String(40))
@property
def type(self):
return "engineer"
__mapper_args__ = {'polymorphic_identity': "engineer",
'concrete': True}
self._roundtrip(Employee, Manager, Engineer, Boss, explicit_type=True)
|
py | 1a3a4bd466cb105a1707cecc23cd4c5eb9b07cda | import os
from alipay import AliPay
from django.conf import settings
from django.shortcuts import render
from rest_framework import status
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from rest_framework.views import APIView
from orders.models import OrderInfo
# Create your views here.
# PUT /payment/status/?<支付结果数据>
from payment.models import Payment
class PaymentStatusView(APIView):
permission_classes = [IsAuthenticated]
def put(self,request):
"""
保存支付结果
1.获取支付结果数据并进行签名认证
2.校验订单是否有效
3.保存支付结果并修改订单支付状态
4.返回支付交易编号
"""
data = request.query_params.dict()
signature = data.pop('sign')
alipay = AliPay(
appid=settings.ALIPAY_APPID, # 开发应用appid
app_notify_url=None, # 默认回调url
app_private_key_path=os.path.join(settings.BASE_DIR, 'apps/payment/keys/app_private_key.pem'),
# 网站的私钥文件路径
alipay_public_key_path=os.path.join(settings.BASE_DIR, 'apps/payment/keys/alipay_public_key.pem'),
# 支付宝公钥文件路径
sign_type="RSA2", # RSA 或者 RSA2
debug=settings.ALIPAY_DEBUG # 默认False
)
success = alipay.verify(data, signature)
if not success:
return Response({'message':'非法操作'},status=status.HTTP_403_FORBIDDEN)
try:
order = OrderInfo.objects.get(order_id=data.get('out_trade_no'),
user=request.user,
pay_method=OrderInfo.PAY_METHODS_ENUM['ALIPAY'],
status=OrderInfo.ORDER_STATUS_ENUM['UNPAID']
)
except OrderInfo.DoesNotExist:
return Response({'message': '无效的订单id'}, status=status.HTTP_400_BAD_REQUEST)
trade_id = data.get('trade_no')
Payment.objects.create(
order = order,
trade_id=trade_id
)
order.status = OrderInfo.ORDER_STATUS_ENUM['UNSEND']
order.save()
return Response({'trade_id':trade_id})
# GET /orders/(?P<order_id>\d+)/payment/
class PaymentView(APIView):
permission_classes = [IsAuthenticated]
def get(self,request,order_id):
"""
获取支付宝支付网址
1.获取order_id并校验订单是否有效
2.组织支付宝支付网址和参数
3.返回支付宝支付网址
"""
user = request.user
try:
order = OrderInfo.objects.get(order_id=order_id,
user=user,
pay_method=OrderInfo.PAY_METHODS_ENUM['ALIPAY'],
status=OrderInfo.ORDER_STATUS_ENUM['UNPAID']
)
except OrderInfo.DoesNotExist:
return Response({'message': '无效的订单id'}, status=status.HTTP_400_BAD_REQUEST)
# 初始化
alipay = AliPay(
appid=settings.ALIPAY_APPID, # 开发应用appid
app_notify_url=None, # 默认回调url
app_private_key_path=os.path.join(settings.BASE_DIR, 'apps/payment/keys/app_private_key.pem'),
# 网站的私钥文件路径
alipay_public_key_path=os.path.join(settings.BASE_DIR, 'apps/payment/keys/alipay_public_key.pem'),
# 支付宝公钥文件路径
sign_type="RSA2", # RSA 或者 RSA2
debug=settings.ALIPAY_DEBUG # 默认False
)
# 组织支付参数
# 电脑网站支付,需要跳转到https://openapi.alipaydev.com/gateway.do? + order_string
total_pay = order.total_amount # Decimal
order_string = alipay.api_alipay_trade_page_pay(
out_trade_no=order_id, # 订单id
total_amount=str(total_pay),
subject='闫氏商城%s' % order_id, # 订单标题
return_url="http://www.meiduo.site:8080/pay_success.html", # 回调地址
)
alipay_url = settings.ALIPAY_URL + '?' + order_string
return Response({'alipay_url': alipay_url})
|
py | 1a3a4c9705fb93bd67b195b9d7d46e3072a994e3 | # coding: utf-8
"""
Openmoney API
Openmoney API # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.namespaces_get import NamespacesGet # noqa: E501
from swagger_client.rest import ApiException
class TestNamespacesGet(unittest.TestCase):
"""NamespacesGet unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testNamespacesGet(self):
"""Test NamespacesGet"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.namespaces_get.NamespacesGet() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
py | 1a3a4cbd2815a0e335156f4315914864b72c1294 | # coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.title_v30_rc2 import TitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.translated_title_v30_rc2 import TranslatedTitleV30Rc2 # noqa: F401,E501
class ResearchResourceTitleV30Rc2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'title': 'TitleV30Rc2',
'translated_title': 'TranslatedTitleV30Rc2'
}
attribute_map = {
'title': 'title',
'translated_title': 'translated-title'
}
def __init__(self, title=None, translated_title=None): # noqa: E501
"""ResearchResourceTitleV30Rc2 - a model defined in Swagger""" # noqa: E501
self._title = None
self._translated_title = None
self.discriminator = None
if title is not None:
self.title = title
if translated_title is not None:
self.translated_title = translated_title
@property
def title(self):
"""Gets the title of this ResearchResourceTitleV30Rc2. # noqa: E501
:return: The title of this ResearchResourceTitleV30Rc2. # noqa: E501
:rtype: TitleV30Rc2
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this ResearchResourceTitleV30Rc2.
:param title: The title of this ResearchResourceTitleV30Rc2. # noqa: E501
:type: TitleV30Rc2
"""
self._title = title
@property
def translated_title(self):
"""Gets the translated_title of this ResearchResourceTitleV30Rc2. # noqa: E501
:return: The translated_title of this ResearchResourceTitleV30Rc2. # noqa: E501
:rtype: TranslatedTitleV30Rc2
"""
return self._translated_title
@translated_title.setter
def translated_title(self, translated_title):
"""Sets the translated_title of this ResearchResourceTitleV30Rc2.
:param translated_title: The translated_title of this ResearchResourceTitleV30Rc2. # noqa: E501
:type: TranslatedTitleV30Rc2
"""
self._translated_title = translated_title
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ResearchResourceTitleV30Rc2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResearchResourceTitleV30Rc2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | 1a3a4cc4d5a04f18bb6e3f317835b06eb7ecf4e1 | """Core module. Provides the basic operations needed in sympy.
"""
from basic import Basic, Atom, S, C
from expr import Expr
from sympify import sympify
from symbol import Symbol, Wild, symbols, var
from numbers import Number, Real, Rational, Integer, NumberSymbol,\
RealNumber, igcd, ilcm, seterr
from power import Pow, integer_nthroot
from mul import Mul
from add import Add
from relational import Rel, Eq, Ne, Lt, Le, Gt, Ge, \
Equality, Inequality, Unequality, StrictInequality
from multidimensional import vectorize
from function import Lambda, WildFunction, Derivative, diff, FunctionClass, \
Function, expand, PoleError, expand_mul, expand_log, expand_func,\
expand_trig, expand_complex
from sets import Set, Interval, Union, EmptySet
from evalf import PrecisionExhausted, N
from containers import Tuple
# expose singletons like exp, log, oo, I, etc.
for _n, _cls in Basic.singleton.items():
exec '%s = _cls()' % (_n)
|
py | 1a3a4ce51efd88bec962c1c79065988736ed361d | """ Implement a customer json encoder """
from json import JSONEncoder
from datetime import datetime
from decimal import Decimal
class EnhancedEncoder(JSONEncoder):
"""
Enhanced encoder to encode datetime, Decimal object
"""
def default(self, o):
"""
Overriding default function of JSON encoder to dealing with datetime & decimal
:param o: object value to be encoded
:return: ways to encode datetime, decimal & default values
"""
if isinstance(o, datetime):
return o.strftime("%Y-%m-%d %H:%M:%S")
elif isinstance(o, Decimal):
return float(o)
else:
return JSONEncoder.default(self, o)
|
py | 1a3a4d836f58fc5366ae246c1379ea6c81c6b37b | from itertools import chain
import glob
import torch
from PIL import Image
from os import path
from torch.utils.data import Dataset
class SegmentationDataset(Dataset):
_EXTENSIONS = ["*.jpg", "*.jpeg", "*.png"]
def __init__(self, in_dir, transform):
super(SegmentationDataset, self).__init__()
self.in_dir = in_dir
self.transform = transform
# Find all images
self.images = []
for img_path in chain(*(glob.iglob(path.join(self.in_dir, ext)) for ext in SegmentationDataset._EXTENSIONS)):
_, name_with_ext = path.split(img_path)
idx, _ = path.splitext(name_with_ext)
self.images.append({
"idx": idx,
"path": img_path
})
def __len__(self):
return len(self.images)
def __getitem__(self, item):
# Load image
with Image.open(self.images[item]["path"]) as img_raw:
size = img_raw.size
img = self.transform(img_raw.convert(mode="RGB"))
return {"img": img, "meta": {"idx": self.images[item]["idx"], "size": size}}
def segmentation_collate(items):
imgs = torch.stack([item["img"] for item in items])
metas = [item["meta"] for item in items]
return {"img": imgs, "meta": metas}
|
py | 1a3a4ddacaaa2c1bc4dcc4a95a2c4cbdc6a6c211 | import tests.periodicities.period_test as per
per.buildModel((30 , 'B' , 50));
|
py | 1a3a4e79359244cab729f78e39a8ea5a9724c587 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .services.translation_service import TranslationServiceClient
from .services.translation_service import TranslationServiceAsyncClient
from .types.translation_service import BatchDocumentInputConfig
from .types.translation_service import BatchDocumentOutputConfig
from .types.translation_service import BatchTranslateDocumentMetadata
from .types.translation_service import BatchTranslateDocumentRequest
from .types.translation_service import BatchTranslateDocumentResponse
from .types.translation_service import BatchTranslateMetadata
from .types.translation_service import BatchTranslateResponse
from .types.translation_service import BatchTranslateTextRequest
from .types.translation_service import CreateGlossaryMetadata
from .types.translation_service import CreateGlossaryRequest
from .types.translation_service import DeleteGlossaryMetadata
from .types.translation_service import DeleteGlossaryRequest
from .types.translation_service import DeleteGlossaryResponse
from .types.translation_service import DetectedLanguage
from .types.translation_service import DetectLanguageRequest
from .types.translation_service import DetectLanguageResponse
from .types.translation_service import DocumentInputConfig
from .types.translation_service import DocumentOutputConfig
from .types.translation_service import DocumentTranslation
from .types.translation_service import GcsDestination
from .types.translation_service import GcsSource
from .types.translation_service import GetGlossaryRequest
from .types.translation_service import GetSupportedLanguagesRequest
from .types.translation_service import Glossary
from .types.translation_service import GlossaryInputConfig
from .types.translation_service import InputConfig
from .types.translation_service import ListGlossariesRequest
from .types.translation_service import ListGlossariesResponse
from .types.translation_service import OutputConfig
from .types.translation_service import SupportedLanguage
from .types.translation_service import SupportedLanguages
from .types.translation_service import TranslateDocumentRequest
from .types.translation_service import TranslateDocumentResponse
from .types.translation_service import TranslateTextGlossaryConfig
from .types.translation_service import TranslateTextRequest
from .types.translation_service import TranslateTextResponse
from .types.translation_service import Translation
__all__ = (
'TranslationServiceAsyncClient',
'BatchDocumentInputConfig',
'BatchDocumentOutputConfig',
'BatchTranslateDocumentMetadata',
'BatchTranslateDocumentRequest',
'BatchTranslateDocumentResponse',
'BatchTranslateMetadata',
'BatchTranslateResponse',
'BatchTranslateTextRequest',
'CreateGlossaryMetadata',
'CreateGlossaryRequest',
'DeleteGlossaryMetadata',
'DeleteGlossaryRequest',
'DeleteGlossaryResponse',
'DetectLanguageRequest',
'DetectLanguageResponse',
'DetectedLanguage',
'DocumentInputConfig',
'DocumentOutputConfig',
'DocumentTranslation',
'GcsDestination',
'GcsSource',
'GetGlossaryRequest',
'GetSupportedLanguagesRequest',
'Glossary',
'GlossaryInputConfig',
'InputConfig',
'ListGlossariesRequest',
'ListGlossariesResponse',
'OutputConfig',
'SupportedLanguage',
'SupportedLanguages',
'TranslateDocumentRequest',
'TranslateDocumentResponse',
'TranslateTextGlossaryConfig',
'TranslateTextRequest',
'TranslateTextResponse',
'Translation',
'TranslationServiceClient',
)
|
py | 1a3a4f8df249632d321772c42a67f4496742c8c2 | #!/usr/bin/env python
###############################################################################
# Copyright (C) 1994 - 2009, Performance Dynamics Company #
# #
# This software is licensed as described in the file COPYING, which #
# you should have received as part of this distribution. The terms #
# are also available at http://www.perfdynamics.com/Tools/copyright.html. #
# #
# You may opt to use, copy, modify, merge, publish, distribute and/or sell #
# copies of the Software, and permit persons to whom the Software is #
# furnished to do so, under the terms of the COPYING file. #
# #
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY #
# KIND, either express or implied. #
###############################################################################
#
# dbc.c - Teradata DBC-10/12 performance model
#
# PDQ calculation of optimal parallel configuration.
#
# $Id: dbc.py,v 4.3 2009/03/26 02:55:32 pfeller Exp $
#
#---------------------------------------------------------------------
import pdq
#---------------------------------------------------------------------
def itoa(n, s):
sign = n
if (sign < 0):
n = -n
i = 0
while (n > 0):
# generate digits in reverse order
s[i] = '0' + (n % 10)
i += 1
n /= 10
if (sign < 0):
s[i] = '-'
i += 1
s[i] = '\0'
# reverse
l = len(s)
j = l - 1
for i in range(l):
c = s[i]
s[i] = s[j]
s[j] = c
i += 1
j -= 1
if i >= j:
break
#---------------------------------------------------------------------
think = 10.0
importrs = 300
Sifp = 0.10
Samp = 0.60
Sdsu = 1.20
Nifp = 15
Namp = 50
Ndsu = 100
pdq.Init("Teradata DBC-10/12")
# Create parallel centers
for k in range(Nifp):
name = "IFP%d" % k
nodes = pdq.CreateNode(name, pdq.CEN, pdq.FCFS)
for k in range(Namp):
name = "AMP%d" % k
nodes = pdq.CreateNode(name, pdq.CEN, pdq.FCFS)
for k in range(Ndsu):
name = "DSU%d" % k
nodes = pdq.CreateNode(name, pdq.CEN, pdq.FCFS)
streams = pdq.CreateClosed("query", pdq.TERM, importrs, think)
# pdq.SetGraph("query", 100) - unsupported call
for k in range(Nifp):
name = "IFP%d" % k
pdq.SetDemand(name, "query", Sifp / Nifp)
for k in range(Namp):
name = "AMP%d" % k
pdq.SetDemand(name, "query", Samp / Namp)
for k in range(Ndsu):
name = "DSU%d" % k
pdq.SetDemand(name, "query", Sdsu / Ndsu)
# 300 nodes takes about a minute to solve on a PowerMac
print("Solving ... ")
pdq.Solve(pdq.EXACT)
print("Done.\n")
# pdq.PrintXLS()
pdq.Report()
|
py | 1a3a4ff92432dda2666e361829d4e18bd0936257 | from panda3d.core import *
from direct.distributed.MsgTypes import *
from direct.directnotify import DirectNotifyGlobal
import LoginBase
from direct.distributed.PyDatagram import PyDatagram
class LoginGSAccount(LoginBase.LoginBase):
def __init__(self, cr):
LoginBase.LoginBase.__init__(self, cr)
def createAccount(self, loginName, password, data):
self.loginName = loginName
self.password = password
self.createFlag = 1
self.cr.freeTimeExpiresAt = -1
self.cr.setIsPaid(1)
return None
def authorize(self, loginName, password):
self.loginName = loginName
self.password = password
self.createFlag = 0
self.cr.freeTimeExpiresAt = -1
self.cr.setIsPaid(1)
return None
def supportsRelogin(self):
return 1
def sendLoginMsg(self):
DISLID = config.GetInt('fake-DISL-PlayerAccountId', 0)
if not DISLID:
NameStringId = 'DISLID_%s' % self.loginName
DISLID = config.GetInt(NameStringId, 0)
cr = self.cr
datagram = PyDatagram()
datagram.addUint16(CLIENT_LOGIN)
datagram.addString(self.loginName)
if cr.connectMethod != cr.CM_HTTP:
datagram.addUint32(cr.tcpConn.getAddress().getIp())
else:
datagram.addUint32(0)
datagram.addUint16(5150)
datagram.addString(cr.serverVersion)
datagram.addUint32(cr.hashVal)
datagram.addString(self.password)
datagram.addBool(self.createFlag)
datagram.addString(cr.validateDownload)
datagram.addString(cr.wantMagicWords)
datagram.addUint32(DISLID)
datagram.addString(config.GetString('otp-whitelist', 'YES'))
cr.send(datagram)
def resendPlayToken(self):
pass
def requestPwdReminder(self, email = None, acctName = None):
return 0
def getAccountData(self, loginName, password):
return 'Unsupported'
def supportsParentPassword(self):
return 1
def authenticateParentPassword(self, loginName, password, parentPassword):
return (password == parentPassword, None)
def authenticateParentUsernameAndPassword(self, loginName, password, parentUsername, parentPassword):
return (password == parentPassword, None)
def supportsAuthenticateDelete(self):
return 1
def authenticateDelete(self, loginName, password):
return (password == self.cr.password, None)
def enableSecretFriends(self, loginName, password, parentPassword, enable = 1):
return (password == parentPassword, None)
|
py | 1a3a5162125876af8109f030e53070c2a3ce5b26 | from django.contrib import admin
from .models import DMVModel, BankAccountModel
# Register your models here.
admin.site.register(DMVModel)
admin.site.register(BankAccountModel) |
py | 1a3a51a8f8d87113dc7b5c0baa6a863e0f8e119a | __author__ = 'Sergey Matyunin'
import numpy as np
def interp2linear(z, xi, yi, extrapval=np.nan):
"""
Linear interpolation equivalent to interp2(z, xi, yi,'linear') in MATLAB
@param z: function defined on square lattice [0..width(z))X[0..height(z))
@param xi: matrix of x coordinates where interpolation is required
@param yi: matrix of y coordinates where interpolation is required
@param extrapval: value for out of range positions. default is numpy.nan
@return: interpolated values in [xi,yi] points
@raise Exception:
"""
x = xi.copy()
y = yi.copy()
nrows, ncols = z.shape
if nrows < 2 or ncols < 2:
raise Exception("z shape is too small")
if not x.shape == y.shape:
raise Exception("sizes of X indexes and Y-indexes must match")
# find x values out of range
x_bad = ( (x < 0) | (x > ncols - 1))
if x_bad.any():
x[x_bad] = 0
# find y values out of range
y_bad = ((y < 0) | (y > nrows - 1))
if y_bad.any():
y[y_bad] = 0
# linear indexing. z must be in 'C' order
ndx = np.floor(y) * ncols + np.floor(x)
ndx = ndx.astype('int32')
# fix parameters on x border
d = (x == ncols - 1)
x = (x - np.floor(x))
if d.any():
x[d] += 1
ndx[d] -= 1
# fix parameters on y border
d = (y == nrows - 1)
y = (y - np.floor(y))
if d.any():
y[d] += 1
ndx[d] -= ncols
# interpolate
one_minus_t = 1 - y
z = z.ravel()
f = (z[ndx] * one_minus_t + z[ndx + ncols] * y ) * (1 - x) + (
z[ndx + 1] * one_minus_t + z[ndx + ncols + 1] * y) * x
# Set out of range positions to extrapval
if x_bad.any():
f[x_bad] = extrapval
if y_bad.any():
f[y_bad] = extrapval
return f
|
py | 1a3a5214b4abeadc00942cb8089b25ff5bb6e41d | from textwrap import wrap
def box(s, width=25):
a = wrap(s, width)
return ['+' + '-'*(width+2) + '+'] + \
['| ' + l.ljust(width) + ' |' for l in a] + \
['+' + '-'*(width+2) + '+']
def fillBoxes(boxes, maxWidth):
s = ['']
start = 0
for b in boxes:
# locate start line
for x in range(start, len(s)):
if len(s[start]) + len(b[0]) < maxWidth:
break
start += 1
print 'Adding',len(s),start,len(b)
if len(b) > len(s)-start:
s += ['' for i in range(len(b))]
p = len(s[start])
for l in range(len(b)):
if len(s[start+l]) < p:
s[start+l] += ' '*p
s[start+l] += b[l]
return '\n'.join(s).strip()
b2 = box('1this is a very long string that needs to be wrapped into shorted lines', 45)
b1 = box('2this is a very long string that needs to be wrapped into shorted lines')
b3 = box('3this is a very long string that needs to be wrapped into shorted lines', 45)
b4 = box('4this is a very long string that needs to be wrapped into shorted lines')
b5 = box('5this is a very long string that needs to be wrapped into shorted lines')
b6 = box(' '.join([str(i).zfill(2) for i in range(100)]), 29)
print fillBoxes([b1,b2,b3,b4,b5,b6], 150)
|
py | 1a3a525b1a37a3411bd34f21ae53c91463571e17 | import logging
from hazelcast.cluster import ClusterService, RandomLoadBalancer
from hazelcast.config import ClientConfig
from hazelcast.connection import ConnectionManager, Heartbeat
from hazelcast.invocation import InvocationService, ListenerService
from hazelcast.lifecycle import LifecycleService, LIFECYCLE_STATE_SHUTTING_DOWN, LIFECYCLE_STATE_SHUTDOWN
from hazelcast.partition import PartitionService
from hazelcast.proxy import ProxyManager, MAP_SERVICE, QUEUE_SERVICE, LIST_SERVICE, SET_SERVICE, MULTI_MAP_SERVICE, \
REPLICATED_MAP_SERVICE, ATOMIC_LONG_SERVICE, ATOMIC_REFERENCE_SERVICE, RINGBUFFER_SERIVCE, COUNT_DOWN_LATCH_SERVICE, \
TOPIC_SERVICE, RELIABLE_TOPIC_SERVICE, SEMAPHORE_SERVICE, LOCK_SERVICE, ID_GENERATOR_SERVICE, \
ID_GENERATOR_ATOMIC_LONG_PREFIX, \
EXECUTOR_SERVICE
from hazelcast.reactor import AsyncoreReactor
from hazelcast.serialization import SerializationServiceV1
from hazelcast.transaction import TWO_PHASE, TransactionManager
from hazelcast.util import LockReferenceIdGenerator
class HazelcastClient(object):
"""
Hazelcast Client.
"""
logger = logging.getLogger("HazelcastClient")
_config = None
def __init__(self, config=None):
self.config = config or ClientConfig()
self.lifecycle = LifecycleService(self.config)
self.reactor = AsyncoreReactor()
self.connection_manager = ConnectionManager(self, self.reactor.new_connection)
self.heartbeat = Heartbeat(self)
self.invoker = InvocationService(self)
self.listener = ListenerService(self)
self.cluster = ClusterService(self.config, self)
self.partition_service = PartitionService(self)
self.proxy = ProxyManager(self)
self.load_balancer = RandomLoadBalancer(self.cluster)
self.serialization_service = SerializationServiceV1(serialization_config=self.config.serialization_config)
self.transaction_manager = TransactionManager(self)
self.lock_reference_id_generator = LockReferenceIdGenerator()
self._start()
def _start(self):
self.reactor.start()
try:
self.cluster.start()
self.heartbeat.start()
self.partition_service.start()
except:
self.reactor.shutdown()
raise
self.logger.info("Client started.")
def get_atomic_long(self, name):
"""
Creates cluster-wide :class:`~hazelcast.proxy.atomic_long.AtomicLong`.
:param name: (str), name of the AtomicLong proxy.
:return: (:class:`~hazelcast.proxy.atomic_long.AtomicLong`), AtomicLong proxy for the given name.
"""
return self.proxy.get_or_create(ATOMIC_LONG_SERVICE, name)
def get_atomic_reference(self, name):
"""
Creates cluster-wide :class:`~hazelcast.proxy.atomic_reference.AtomicReference`.
:param name: (str), name of the AtomicReference proxy.
:return: (:class:`~hazelcast.proxy.atomic_reference.AtomicReference`), AtomicReference proxy for the given name.
"""
return self.proxy.get_or_create(ATOMIC_REFERENCE_SERVICE, name)
def get_count_down_latch(self, name):
"""
Creates cluster-wide :class:`~hazelcast.proxy.count_down_latch.CountDownLatch`.
:param name: (str), name of the CountDownLatch proxy.
:return: (:class:`~hazelcast.proxy.count_down_latch.CountDownLatch`), CountDownLatch proxy for the given name.
"""
return self.proxy.get_or_create(COUNT_DOWN_LATCH_SERVICE, name)
def get_executor(self, name):
"""
Creates cluster-wide :class:`~hazelcast.proxy.executor.Executor`.
:param name: (str), name of the Executor proxy.
:return: (:class:`~hazelcast.proxy.executor.Executor`), Executor proxy for the given name.
"""
return self.proxy.get_or_create(EXECUTOR_SERVICE, name)
def get_id_generator(self, name):
"""
Creates cluster-wide :class:`~hazelcast.proxy.id_generator.IdGenerator`.
:param name: (str), name of the IdGenerator proxy.
:return: (:class:`~hazelcast.proxy.id_generator.IdGenerator`), IdGenerator proxy for the given name.
"""
atomic_long = self.get_atomic_long(ID_GENERATOR_ATOMIC_LONG_PREFIX + name)
return self.proxy.get_or_create(ID_GENERATOR_SERVICE, name, atomic_long=atomic_long)
def get_queue(self, name):
"""
Returns the distributed queue instance with the specified name.
:param name: (str), name of the distributed queue.
:return: (:class:`~hazelcast.proxy.queue.Queue`), distributed queue instance with the specified name.
"""
return self.proxy.get_or_create(QUEUE_SERVICE, name)
def get_list(self, name):
"""
Returns the distributed list instance with the specified name.
:param name: (str), name of the distributed list.
:return: (:class:`~hazelcast.proxy.list.List`), distributed list instance with the specified name.
"""
return self.proxy.get_or_create(LIST_SERVICE, name)
def get_lock(self, name):
"""
Returns the distributed lock instance with the specified name.
:param name: (str), name of the distributed lock.
:return: (:class:`~hazelcast.proxy.lock.Lock`), distributed lock instance with the specified name.
"""
return self.proxy.get_or_create(LOCK_SERVICE, name)
def get_map(self, name):
"""
Returns the distributed map instance with the specified name.
:param name: (str), name of the distributed map.
:return: (:class:`~hazelcast.proxy.map.Map`), distributed map instance with the specified name.
"""
return self.proxy.get_or_create(MAP_SERVICE, name)
def get_multi_map(self, name):
"""
Returns the distributed MultiMap instance with the specified name.
:param name: (str), name of the distributed MultiMap.
:return: (:class:`~hazelcast.proxy.multi_map.MultiMap`), distributed MultiMap instance with the specified name.
"""
return self.proxy.get_or_create(MULTI_MAP_SERVICE, name)
def get_reliable_topic(self, name):
"""
Returns the :class:`~hazelcast.proxy.reliable_topic.ReliableTopic` instance with the specified name.
:param name: (str), name of the ReliableTopic.
:return: (:class:`~hazelcast.proxy.reliable_topic.ReliableTopic`), the ReliableTopic.
"""
return self.proxy.get_or_create(RELIABLE_TOPIC_SERVICE, name)
def get_replicated_map(self, name):
"""
Returns the distributed ReplicatedMap instance with the specified name.
:param name: (str), name of the distributed ReplicatedMap.
:return: (:class:`~hazelcast.proxy.replicated_map.ReplicatedMap`), distributed ReplicatedMap instance with the specified name.
"""
return self.proxy.get_or_create(REPLICATED_MAP_SERVICE, name)
def get_ringbuffer(self, name):
"""
Returns the distributed RingBuffer instance with the specified name.
:param name: (str), name of the distributed RingBuffer.
:return: (:class:`~hazelcast.proxy.ringbuffer.RingBuffer`), distributed RingBuffer instance with the specified name.
"""
return self.proxy.get_or_create(RINGBUFFER_SERIVCE, name)
def get_semaphore(self, name):
"""
Returns the distributed Semaphore instance with the specified name.
:param name: (str), name of the distributed Semaphore.
:return: (:class:`~hazelcast.proxy.semaphore.Semaphore`), distributed Semaphore instance with the specified name.
"""
return self.proxy.get_or_create(SEMAPHORE_SERVICE, name)
def get_set(self, name):
"""
Returns the distributed Set instance with the specified name.
:param name: (str), name of the distributed Set.
:return: (:class:`~hazelcast.proxy.set.Set`), distributed Set instance with the specified name.
"""
return self.proxy.get_or_create(SET_SERVICE, name)
def get_topic(self, name):
"""
Returns the :class:`~hazelcast.proxy.topic.Topic` instance with the specified name.
:param name: (str), name of the Topic.
:return: (:class:`~hazelcast.proxy.topic.Topic`), the Topic.
"""
return self.proxy.get_or_create(TOPIC_SERVICE, name)
def new_transaction(self, timeout=120, durability=1, type=TWO_PHASE):
"""
Creates a new :class:`~hazelcast.transaction.Transaction` associated with the current thread using default or given options.
:param timeout: (long), the timeout in seconds determines the maximum lifespan of a transaction. So if a
transaction is configured with a timeout of 2 minutes, then it will automatically rollback if it hasn't
committed yet.
:param durability: (int), the durability is the number of machines that can take over if a member fails during a
transaction commit or rollback
:param type: (Transaction Type), the transaction type which can be :const:`~hazelcast.transaction.TWO_PHASE` or :const:`~hazelcast.transaction.ONE_PHASE`
:return: (:class:`~hazelcast.transaction.Transaction`), new Transaction associated with the current thread.
"""
return self.transaction_manager.new_transaction(timeout, durability, type)
def shutdown(self):
"""
Shuts down this HazelcastClient.
"""
if self.lifecycle.is_live:
self.lifecycle.fire_lifecycle_event(LIFECYCLE_STATE_SHUTTING_DOWN)
self.partition_service.shutdown()
self.heartbeat.shutdown()
self.cluster.shutdown()
self.reactor.shutdown()
self.lifecycle.fire_lifecycle_event(LIFECYCLE_STATE_SHUTDOWN)
self.logger.info("Client shutdown.")
|
py | 1a3a52ebff4e517485e0b41bfd65a297a76bd6f7 | """
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from cryptoapis.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from cryptoapis.exceptions import ApiAttributeError
def lazy_import():
from cryptoapis.model.list_tokens_forwarding_automations_ri import ListTokensForwardingAutomationsRI
globals()['ListTokensForwardingAutomationsRI'] = ListTokensForwardingAutomationsRI
class ListTokensForwardingAutomationsRData(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'offset': (int,), # noqa: E501
'limit': (int,), # noqa: E501
'total': (int,), # noqa: E501
'items': ([ListTokensForwardingAutomationsRI],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'offset': 'offset', # noqa: E501
'limit': 'limit', # noqa: E501
'total': 'total', # noqa: E501
'items': 'items', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, offset, limit, total, items, *args, **kwargs): # noqa: E501
"""ListTokensForwardingAutomationsRData - a model defined in OpenAPI
Args:
offset (int): The starting index of the response items, i.e. where the response should start listing the returned items.
limit (int): Defines how many items should be returned in the response per page basis.
total (int): Defines the total number of items returned in the response.
items ([ListTokensForwardingAutomationsRI]):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.offset = offset
self.limit = limit
self.total = total
self.items = items
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, offset, limit, total, items, *args, **kwargs): # noqa: E501
"""ListTokensForwardingAutomationsRData - a model defined in OpenAPI
Args:
offset (int): The starting index of the response items, i.e. where the response should start listing the returned items.
limit (int): Defines how many items should be returned in the response per page basis.
total (int): Defines the total number of items returned in the response.
items ([ListTokensForwardingAutomationsRI]):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.offset = offset
self.limit = limit
self.total = total
self.items = items
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
py | 1a3a53383d86bd29388775da2632e61a0a3d10d9 | from django import forms
from .configs import CNT_CHOICES
class IntegerRecordForm(forms.Form):
content_type = forms.ChoiceField(widget=forms.HiddenInput(), required=True, choices=CNT_CHOICES)
value = forms.IntegerField(
label='Значение',
widget=forms.NumberInput(
attrs={'class':'form-control', 'placeholder':'Введите значение'}
)
)
class FloatRecordForm(forms.Form):
content_type = forms.ChoiceField(widget=forms.HiddenInput(), required=True, choices=CNT_CHOICES)
value = forms.FloatField(
label='Значение',
widget=forms.NumberInput(
attrs={'class':'form-control', 'placeholder':'Введите значение'}
)
)
class TextRecordForm(forms.Form):
content_type = forms.ChoiceField(widget=forms.HiddenInput(), required=True, choices=CNT_CHOICES)
value = forms.CharField(
label='Сообщение',
max_length=400,
widget=forms.Textarea(
attrs={'class':'form-control', 'placeholder':'Введите значение',
'rows': 2}
)
) |
py | 1a3a5476e7502f41e9a5588c98800f8841677c8c | from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers, exceptions
import pyotp
from ..fields import UUIDField
from ..models import Google_Authenticator
from ..utils import decrypt_with_db_secret
class ActivateGASerializer(serializers.Serializer):
google_authenticator_id = UUIDField(required=True)
google_authenticator_token = serializers.CharField(max_length=6, min_length=6, required=True)
def validate(self, attrs: dict) -> dict:
google_authenticator_id = attrs.get('google_authenticator_id', '')
google_authenticator_token = attrs.get('google_authenticator_token', '').strip()
if not google_authenticator_token.isdigit():
msg = _('GA Tokens only contain digits.')
raise exceptions.ValidationError(msg)
try:
google_authenticator = Google_Authenticator.objects.get(pk=google_authenticator_id, user=self.context['request'].user, active=False)
except Google_Authenticator.DoesNotExist:
msg = "NO_PERMISSION_OR_NOT_EXIST"
raise exceptions.ValidationError(msg)
decrypted_ga_secret = decrypt_with_db_secret(google_authenticator.secret)
totp = pyotp.TOTP(decrypted_ga_secret.encode())
if not totp.verify(google_authenticator_token):
msg = _("GA Token incorrect.")
raise exceptions.ValidationError(msg)
attrs['google_authenticator'] = google_authenticator
return attrs
|
py | 1a3a54d34a44c58d8f41d96439efcc492c6e102f | """
ASGI config for hoodprject project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hoodprject.settings')
application = get_asgi_application()
|
py | 1a3a559f261dabfdfc5724252b19dacff81dec97 | import yaml
d = {'subcommand': 'lottery', 'platform': 'local', 'display_output_location': False, 'num_workers': 0, 'gpu': '6',
'replicate': 2, 'default_hparams': 'mnist_lenet_300_100', 'quiet': False, 'evaluate_only_at_end': False,
'levels': 0, 'rewinding_steps': None, 'pretrain': False, 'dataset_name': 'fashionmnist', 'batch_size': 128,
'do_not_augment': False, 'transformation_seed': None, 'subsample_fraction': None, 'random_labels_fraction': None,
'unsupervised_labels': None, 'blur_factor': None, 'model_name': 'mnist_lenet_300_100',
'model_init': 'kaiming_normal', 'batchnorm_init': 'uniform', 'batchnorm_frozen': False, 'output_frozen': False,
'others_frozen': False, 'others_frozen_exceptions': None, 'optimizer_name': 'sgd', 'lr': 0.1,
'training_steps': '40ep', 'data_order_seed': None, 'momentum': 0.0, 'nesterov_momentum': 0.0,
'milestone_steps': None, 'gamma': None, 'warmup_steps': None, 'weight_decay': None, 'apex_fp16': False,
'pruning_strategy': 'sparse_global', 'pruning_fraction': 0.2, 'pruning_layers_to_ignore': 'fc.weight'}
with open(r'./myyaml.yaml', 'w') as file:
print(yaml.dump(d, file))
|
py | 1a3a5618f0963e8ab23ff3b5f6be867876162baa | # -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import numpy as np
from alpharotate.utils.pretrain_zoo import PretrainModelZoo
from configs._base_.models.retinanet_r50_fpn import *
from configs._base_.datasets.dota_detection import *
from configs._base_.schedules.schedule_1x import *
# schedule
BATCH_SIZE = 1
GPU_GROUP = "0"
NUM_GPU = len(GPU_GROUP.strip().split(','))
LR = 1e-3
SAVE_WEIGHTS_INTE = 5000
DECAY_STEP = np.array(DECAY_EPOCH, np.int32) * SAVE_WEIGHTS_INTE
MAX_ITERATION = SAVE_WEIGHTS_INTE * MAX_EPOCH
WARM_SETP = int(WARM_EPOCH * SAVE_WEIGHTS_INTE)
# dataset
DATASET_NAME = 'Total_Text'
IMG_SHORT_SIDE_LEN = 512
IMG_MAX_LENGTH = 512
CLASS_NUM = 1
# model
pretrain_zoo = PretrainModelZoo()
PRETRAINED_CKPT = pretrain_zoo.pretrain_weight_path(NET_NAME, ROOT_PATH)
TRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')
# loss
CLS_WEIGHT = 1.0
REG_WEIGHT = 0.01
POINT_SAMPLING_NUM = 12
# post-processing
NMS = True
NMS_IOU_THRESHOLD = 0.1
MAXIMUM_DETECTIONS = 20
FILTERED_SCORE = 0.05
VIS_SCORE = 0.75
VERSION = 'RetinaNet_Total-Text_RIDet_1x_20210519'
"""
FLOPs: 489267589; Trainable params: 33244941
"""
|
py | 1a3a572f525afc257c904d53aca4803762014bbf | #!/usr/bin/python
import sys, os, csv
from options import Program_Options
import benchmark, planners, translation, hypothesis
def custom_partition( s, sep ) :
i = 0
while i < len(s) :
if s[i] == sep : break
i = i + 1
if i == len(s) : return (None,None,None)
if i == 0 : return ( None, s[i], s[i+1:] )
return ( s[:i-1], s[i], s[i+1:] )
def load_hypotheses() :
hyps = []
instream = open( 'hyps.dat' )
for line in instream :
line = line.strip()
H = hypothesis.Probabilistic()
H.atoms = [ tok.strip() for tok in line.split(',') ]
H.check_if_actual()
hyps.append( H )
instream.close()
return hyps
def write_report( experiment, hyps ) :
outstream = open( 'report.txt', 'w' )
print >> outstream, "Experiment=%s"%experiment
print >> outstream, "Num_Hyp=%d"%len(hyps)
for hyp in hyps :
print >> outstream, "Hyp_Atoms=%s"%",".join( hyp.atoms )
if hyp.test_failed :
print >> outstream, "Hyp_Test_Failed=True"
else :
print >> outstream, "Hyp_Test_Failed=False"
print >> outstream, "Hyp_Cost_O=%f"%hyp.cost_O
print >> outstream, "Hyp_Cost_Not_O=%f"%hyp.cost_Not_O
print >> outstream, "Hyp_Prob_O=%f"%hyp.Probability_O
print >> outstream, "Hyp_Prob_Not_O=%f"%hyp.Probability_Not_O
print >> outstream, "Hyp_Plan_Time_O=%f"%hyp.Plan_Time_O
print >> outstream, "Hyp_Plan_Time_Not_O=%f"%hyp.Plan_Time_Not_O
print >> outstream, "Hyp_Trans_Time=%f"%hyp.trans_time
print >> outstream, "Hyp_Plan_Time=%f"%hyp.plan_time
print >> outstream, "Hyp_Test_Time=%f"%hyp.total_time
print >> outstream, "Hyp_Is_True=%s"%hyp.is_true
outstream.close()
def main() :
print sys.argv
options = Program_Options( sys.argv[1:] )
if options.greedy :
planners.LAMA.greedy = True
hyps = load_hypotheses()
hyp_time_bounds = [ options.max_time / len(hyps) for h in hyps ]
for i in range( 0, len(hyps) ) :
hyps[i].test(i, hyp_time_bounds[i], options.max_memory, options.optimal)
if hyps[i].cost_O == 1e7 and hyps[i].cost_Not_O == 1e7 :
hyps[i].test_failed = True
remainder = hyp_time_bounds[i] - hyps[i].total_time
if remainder > 0 :
extra = remainder / (len(hyps)-i)
for j in range(i+1,len(hyps)) :
hyp_time_bounds[j] += extra
write_report(options.exp_file, hyps)
# pack logs, csvs and report.txt
cmd = 'tar jcvf results.tar.bz2 *.pddl *.log report.txt obs.dat hyps.dat prob-*-PR'
os.system( cmd )
cmd = 'rm -rf *.log report.txt *.res *.csv *.res.* *.pddl *.dat prob-*-PR'
os.system( cmd )
if __name__ == '__main__' :
main()
|
py | 1a3a57f84a3593bc5cb667efc5af75d68828dc68 | #!/usr/bin/env python
# encoding: utf-8
import argparse
from zstacklib import *
start_time = datetime.now()
# set default value
file_root = "files/appliancevm"
pip_url = "https=//pypi.python.org/simple/"
proxy = ""
sproxy = ""
chroot_env = 'false'
zstack_repo = 'false'
post_url = ""
chrony_servers = None
pkg_appliancevm = ""
virtualenv_version = "12.1.1"
remote_user = "root"
remote_pass = None
remote_port = None
# get parameter from shell
parser = argparse.ArgumentParser(description='Deploy appliancevm to management node')
parser.add_argument('-i', type=str, help="""specify inventory host file
default=/etc/ansible/hosts""")
parser.add_argument('--private-key', type=str, help='use this file to authenticate the connection')
parser.add_argument('-e', type=str, help='set additional variables as key=value or YAML/JSON')
args = parser.parse_args()
argument_dict = eval(args.e)
locals().update(argument_dict)
# update the variable from shell arguments
virtenv_path = "%s/virtualenv/appliancevm/" % zstack_root
appliancevm_root = "%s/appliancevm/package" % zstack_root
# create log
logger_dir = "/var/log/zstack/"
create_log(logger_dir)
host_post_info = HostPostInfo()
host_post_info.host_inventory = args.i
host_post_info.host = host
host_post_info.post_url = post_url
host_post_info.chrony_servers = chrony_servers
host_post_info.private_key = args.private_key
host_post_info.remote_user = remote_user
host_post_info.remote_pass = remote_pass
host_post_info.remote_port = remote_port
if remote_pass is not None and remote_user != 'root':
host_post_info.become = True
# include zstacklib.py
(distro, distro_version, distro_release) = get_remote_host_info(host_post_info)
zstacklib_args = ZstackLibArgs()
zstacklib_args.distro = distro
zstacklib_args.distro_release = distro_release
zstacklib_args.distro_version = distro_version
zstacklib_args.zstack_repo = zstack_repo
zstacklib_args.yum_server = yum_server
zstacklib_args.zstack_root = zstack_root
zstacklib_args.host_post_info = host_post_info
zstacklib_args.pip_url = pip_url
zstacklib_args.trusted_host = trusted_host
zstacklib = ZstackLib(zstacklib_args)
# name: judge this process is init install or upgrade
if file_dir_exist("path=" + appliancevm_root, host_post_info):
init_install = False
else:
init_install = True
# name: create root directories
command = 'mkdir -p %s %s' % (appliancevm_root, virtenv_path)
run_remote_command(command, host_post_info)
run_remote_command("rm -rf %s/*" % appliancevm_root, host_post_info)
# name: copy zstacklib and install
copy_arg = CopyArg()
copy_arg.src = "files/zstacklib/%s" % pkg_zstacklib
copy_arg.dest = "%s/%s" % (appliancevm_root, pkg_zstacklib)
copy_zstacklib = copy(copy_arg, host_post_info)
# name: copy appliancevm and install
copy_arg = CopyArg()
copy_arg.src = "%s/%s" % (file_root, pkg_appliancevm)
copy_arg.dest = "%s/%s" % (appliancevm_root, pkg_appliancevm)
copy_appliancevm = copy(copy_arg, host_post_info)
# name: copy bootstrap script
copy_arg = CopyArg()
copy_arg.src = "%s/zstack-appliancevm-bootstrap.py" % file_root
copy_arg.dest = '/sbin/zstack-appliancevm-bootstrap.py'
copy_arg.args = "mode=0777"
copy(copy_arg, host_post_info)
# name: copy appliancevm service file
copy_arg = CopyArg()
copy_arg.src = "%s/zstack-appliancevm" % file_root
copy_arg.dest = "/etc/init.d/"
copy_arg.args = "mode=755"
copy(copy_arg, host_post_info)
# name: install virtualenv
virtual_env_status = check_and_install_virtual_env(virtualenv_version, trusted_host, pip_url, host_post_info)
if virtual_env_status is False:
command = "rm -rf %s && rm -rf %s" % (virtenv_path, appliancevm_root)
run_remote_command(command, host_post_info)
sys.exit(1)
# name: make sure virtualenv has been setup
command = "[ -f %s/bin/python ] || virtualenv %s " % (virtenv_path, virtenv_path)
run_remote_command(command, host_post_info)
if distro in RPM_BASED_OS:
if zstack_repo != 'false':
# name: install appliance vm related packages on RedHat based OS from user defined repo
command = ("pkg_list=`rpm -q iputils tcpdump ethtool | grep \"not installed\" | awk '{ print $2 }'` && for pkg"
" in $pkg_list; do yum --disablerepo=* --enablerepo=%s install -y $pkg; done;") % zstack_repo
run_remote_command(command, host_post_info)
else:
# name: install appliance vm related packages on RedHat based OS
for pkg in ['iputils', 'tcpdump', 'ethtool']:
yum_install_package("openssh-clients", host_post_info)
if distro_version >= 7:
# name: workaround RHEL7 iptables service issue
command = 'mkdir -p /var/lock/subsys/'
run_remote_command(command, host_post_info)
# name: remove RHEL7 firewalld
yum_remove_package("firewalld", host_post_info)
# name: copy iptables initial rules in RedHat
copy_arg = CopyArg()
copy_arg.src = "%s/iptables" % file_root
copy_arg.dest = "/etc/sysconfig/iptables"
iptables_copy_result = copy(copy_arg, host_post_info)
if chroot_env == 'false':
if iptables_copy_result != "changed:False":
service_status("iptables", "state=restarted enabled=yes", host_post_info)
else:
# name: enable appliancevm service for RedHat on chroot
service_status("zstack-appliancevm", "enabled=yes state=stopped", host_post_info)
elif distro in DEB_BASED_OS:
install_pkg_list = ['iputils-arping', 'tcpdump', 'ethtool']
apt_install_packages(install_pkg_list, host_post_info)
# name: copy iptables initial rules in Debian
copy_arg = CopyArg()
copy_arg.src = "%s/iptables" % file_root
copy_arg.dest = "/etc/iptables"
copy(copy_arg, host_post_info)
# name: copy iptables initial start script in Debian
copy_arg = CopyArg()
copy_arg.src = "%s/iptables.up" % file_root
copy_arg.dest = "/etc/network/if-pre-up.d/iptables.up"
copy_arg.args = "mode=0777"
iptables_script_result = copy(copy_arg, host_post_info)
if iptables_script_result == "status:changed":
command = "/etc/network/if-pre-up.d/iptables.up"
run_remote_command(command, host_post_info)
# name: enable appliancevm service for Debian -1
command = "sed -i '/zstack-appliancevm start/d' /etc/rc.local"
run_remote_command(command, host_post_info)
# name: enable appliancevm service for Debian -2
update_arg = "insertbefore='^exit 0' line='/etc/init.d/zstack-appliancevm start\n'"
update_file("/etc/rc.local", update_arg, host_post_info)
# name: restore iptables
command = '/etc/network/if-pre-up.d/iptables.up'
run_remote_command(command, host_post_info)
else:
error("unsupported OS!")
# name: install zstacklib
if copy_zstacklib != "changed:False":
agent_install_arg = AgentInstallArg(trusted_host, pip_url, virtenv_path, init_install)
agent_install_arg.agent_name = "appliancevm"
agent_install_arg.agent_root = appliancevm_root
agent_install_arg.pkg_name = pkg_zstacklib
agent_install(agent_install_arg, host_post_info)
# name: install appliancevm
if copy_appliancevm != "changed:False":
agent_install_arg = AgentInstallArg(trusted_host, pip_url, virtenv_path, init_install)
agent_install_arg.agent_name = "appliancevm"
agent_install_arg.agent_root = appliancevm_root
agent_install_arg.pkg_name = pkg_appliancevm
agent_install(agent_install_arg, host_post_info)
if chroot_env == 'false':
# name: restart appliancevm
if distro in RPM_BASED_OS:
command = "service zstack-appliancevm stop && service zstack-appliancevm start && chkconfig zstack-appliancevm on"
elif distro in DEB_BASED_OS:
command = "update-rc.d zstack-appliancevm start 97 3 4 5 . stop 3 0 1 2 6 . && service zstack-appliancevm stop && service zstack-appliancevm start"
run_remote_command(command, host_post_info)
else:
if distro in RPM_BASED_OS:
# name: restart iptables
service_status("iptables", "state=restarted enabled=yes", host_post_info)
host_post_info.start_time = start_time
handle_ansible_info("SUCC: Deploy appliancevm successful", host_post_info, "INFO")
sys.exit(0)
|
py | 1a3a584309705420336639bab7187cab8ac83586 | from typing import Union
from pydantic.types import UUID5
from account.models import JWTModel
import uuid
from time import time
from datetime import datetime, timedelta
from pathlib import Path
from config.conf import JWT_KEY_PATH, JWT_CERT_PATH
from cryptography.x509 import load_pem_x509_certificate
from fastapi import HTTPException
import jwt
class JWT:
rsa_crt_path: Path = JWT_CERT_PATH
rsa_JWT_KEY_PATH: Path = JWT_KEY_PATH
JWT_NAMESPACE: uuid.UUID = uuid.UUID("69d3e8f4-0872-4f7f-9f35-d2ee437e0887")
@classmethod
def jti(cls, uid: str) -> str:
now = round(time() * 1000)
return str(uuid.uuid5(cls.JWT_NAMESPACE, str(uid) + str(now)))
@classmethod
def base_payload(cls, duration: int) -> dict:
now = datetime.utcnow()
nbf = {"nbf": now}
iat = {"iat": now}
exp = {"exp": now + timedelta(days=duration)}
payload = {**nbf, **iat, **exp}
return payload
@classmethod
def create(cls, user: dict, duration=30) -> str:
try:
jti = {"jti": cls.jti(user["uid"])}
key = cls.rsa_JWT_KEY_PATH.read_text()
payload = cls.base_payload(duration)
payload = {**payload, **user, **jti}
token = jwt.encode(payload, key, algorithm="RS256")
return token
except Exception as e:
raise HTTPException(500, "JWT error DAG: " + str(e))
@classmethod
def verify(cls, token: str) -> JWTModel:
try:
crt = cls.rsa_crt_path.read_text()
cert_obj = load_pem_x509_certificate(crt.encode())
public_key = cert_obj.public_key()
# private_key = cert_obj.private_key()
decoded = jwt.decode(token, public_key, algorithms=["RS256"])
return JWTModel(**decoded)
except Exception as e:
raise HTTPException(500, "JWT verify error DAG: " + str(e))
|
py | 1a3a58bbd949bf007b126deff53f7a42a212bad3 | # -*- coding: utf-8 -*-
"""Panagrams.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1r71Y6g3hvbecy-FOcnET1GW-8b-RPFg2
"""
l = input().lower()
s = 'abcdefghijklmnopqrstuvwxyz'
for i in s:
if i not in l:
print('No',end='')
break
else:
print('Yes',end='') |
py | 1a3a59b109897306b0d8c4454649dddebc75a27c | import mdtraj as md
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
from collections import defaultdict
import scipy.optimize
import unyt as u
class BondCalculator:
def __init__(self, traj, T):
self.traj = traj
graph = traj.top.to_bondgraph()
bonds = self.identify_bonds(graph)
angles = self.identify_angles(graph)
bond_params = dict()
angle_params = dict()
for bond_type, pairs in bonds.items():
bond_lengths, bond_prob = self.calc_lengths(pairs, range=[0, 1.0])
params = self.calc_parameters(bond_lengths, bond_prob)
k = 2 * u.kb * (T*u.K) / (params[0] * u.nm)**2 * u.Na
l0 = params[1] * u.nm
bond_params[bond_type]= {"k": k, "x0": l0}
for angle_type, triplets in angles.items():
bond_angles, angle_prob = self.calc_angles(triplets, range=[0, 2*np.pi])
params = self.calc_parameters(bond_angles, angle_prob)
k = 2 * u.kb * (T*u.K) / (params[0] * u.rad)**2 * u.Na
t0 = params[1] * u.rad
angle_params[angle_type]= {"k": k, "x0": t0}
self.bond_params = bond_params
self.angle_params = angle_params
def identify_bonds(self, graph):
all_bonds = [edge for edge in graph.edges]
bonds = defaultdict(list)
for bond in all_bonds:
index = tuple(sorted([bond[0].name, bond[1].name]))
pair = tuple([particle.index for particle in bond])
bonds[index].append(pair)
return bonds
def identify_angles(self, graph):
angle_subgraph = nx.Graph()
angle_subgraph.add_edge(0, 1)
angle_subgraph.add_edge(1, 2)
matcher = nx.algorithms.isomorphism.GraphMatcher(graph, angle_subgraph)
all_angles = []
for m in matcher.subgraph_isomorphisms_iter():
all_angles.append(tuple(k for k in m.keys()))
angles = defaultdict(list)
for angle in all_angles:
index = tuple(particle.name for particle in angle)
if angle[0].name < angle[2].name:
index = tuple(reversed(index))
triplet = tuple(particle.index for particle in angle)
angles[index].append(triplet)
return angles
def calc_lengths(self, pairs, range=None):
quantity = md.compute_distances(self.traj, pairs)
hist, edges = np.histogram(quantity, density=True, range=range, bins=200)
bins = (edges[1:]+edges[:-1]) * 0.5
return bins, hist
def calc_angles(self, triplets, range=None):
quantity = md.compute_angles(self.traj, triplets)
hist, edges = np.histogram(quantity, density=True, range=range, bins=200)
bins = (edges[1:]+edges[:-1]) * 0.5
hist /= np.sin(bins)
hist /= np.sum(hist)*(bins[1]-bins[0])
return bins, hist
def cost_function(self, args, x, y):
w, x0 = args
return np.sum((self.gaussian(w, x0, x) - y)**2)
def gaussian(self, w, x0, x):
return ((w * np.sqrt(np.pi / 2))**-1)*(np.exp(-2 * (x - x0)**2 / (w**2)))
def calc_parameters(self, x, y):
res = scipy.optimize.minimize(lambda args: self.cost_function(args, x, y), [np.ptp(x)/10, x[np.argmax(y)]])
return res.x
|
py | 1a3a59f2299b6f0a41f7bb394caee86baf06025b | class Observation:
__observation: list
def __init__(self, observation: list):
self.__observation = observation
def get_observation(self):
return self.__observation
def set_observation(self, observation: list):
self.__observation = observation
|
py | 1a3a5a005cc3762602ebb4c7f31afe125e0816e4 | #!/usr/bin/env python2.7
# coding: utf-8
import mongo
from user import User
from blockly import Blockly
import time
import os
import sys
import traceback
reload(sys)
sys.setdefaultencoding(sys.getfilesystemencoding())
MIN_GAP = 5
GENERATOR_PATH = "../compiler/generator.py"
PYTHON_CMD = "python2"
while True:
try:
for blo in Blockly.objects(enabled=True):
try:
lastexecution, timesexecuted = blo.lastexecution, blo.timesexecuted
if time.time() - lastexecution < MIN_GAP:
continue
f = open('/tmp/flockly.xml', 'wb')
print blo.content
f.write(blo.content)
f.close()
f = open('./flockly.py', 'wb')
f.write("# coding: utf-8\nfrom fbapi import *\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\nsys.tracebacklimit=0\nfrom datetime import datetime\n")
f.close()
os.system("echo [SYSTEM] Generating code > /tmp/flockly.run.log")
os.system(GENERATOR_PATH + " /tmp/flockly.xml >> ./flockly.py 2>>/tmp/flockly.run.log")
os.system("echo [SYSTEM] Generated code: >> /tmp/flockly.run.log")
os.system("cat ./flockly.py >> /tmp/flockly.run.log")
os.system("printf \"[SYSTEM] Started \" >> /tmp/flockly.run.log")
os.system("date >> /tmp/flockly.run.log")
os.system("echo [SYSTEM] Running >> /tmp/flockly.run.log")
os.system("timeout -s KILL 30 " + PYTHON_CMD + " -u ./flockly.py " + blo.userid + " " + str(blo.id) + " 1>>/tmp/flockly.run.log 2>&1")
os.system("printf \"[SYSTEM] End \" >> /tmp/flockly.run.log")
os.system("date >> /tmp/flockly.run.log")
os.unlink('./flockly.py')
blo.lastexecution = int(time.time())
blo.timesexecuted = blo.timesexecuted + 1
blo.logs.append(open('/tmp/flockly.run.log', 'rb').read(102400))
if len(blo.logs) > 5:
blo.logs = blo.logs[-5:]
blo.save()
except Exception as e:
print >>sys.stderr, e
print >>sys.stderr, traceback.format_exc()
finally:
time.sleep(1)
except Exception as e:
print >>sys.stderr, e
print >>sys.stderr, traceback.format_exc()
finally:
time.sleep(1)
|
py | 1a3a5a824259175f6fa77a3a9b5f6ad368f107f5 | # (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from __future__ import unicode_literals
import re
from ..._env import E2E_FIXTURE_NAME, deserialize_data
CONFIG_MESSAGE_PATTERN = 'DDEV_E2E_START_MESSAGE (.+) DDEV_E2E_END_MESSAGE'
def parse_config_from_result(env, result):
if 'NO E2E FIXTURE AVAILABLE' in result.stdout:
return None, None, 'The environment fixture `{}` does not exist.'.format(E2E_FIXTURE_NAME)
if '{}: platform mismatch'.format(env) in result.stdout:
return None, None, 'The environment `{}` does not support this platform.'.format(env)
decoded = parse_encoded_config_data(result.stdout)
if decoded is None:
return (
None,
None,
(
'{}\n{}\nUnable to parse configuration. Try recreating your env to get the '
'latest version of the dev package.'.format(result.stdout, result.stderr)
),
)
config = decoded['config']
metadata = decoded['metadata']
if config is None:
return None, None, 'The environment fixture `{}` did not yield any configuration.'.format(E2E_FIXTURE_NAME)
return config, metadata, None
def parse_encoded_config_data(output):
match = re.search(CONFIG_MESSAGE_PATTERN, output)
if match:
return deserialize_data(match.group(1))
|
py | 1a3a5b75db896766896e2c2389d277695d5c9eb8 | """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('blog.urls')),
]
|
py | 1a3a5c2d8a056ea3057b01299c5f3a0ddb5d968e | # -*- coding: utf-8 -*-
import numpy as np
"""
This script is for outputting PC1/PC2/PC3 data from preprocd_dataset.npz
of MD 1000K-LCx3 samples
"""
def makePC123(dtsetfile, outfile, grpname):
dtset= np.load(dtsetfile, allow_pickle=True)
#allow_pickle op is for adapting spec change of numpy 1.16.3 and later
dts= dtset['dataset']
dataset0=[]
for dt in dts:
dt0=dt['inputs/0']
dataset0.append(dt0)
dim0=len(dataset0)
dim1=len(dataset0[0])
dim2=len(dataset0[0][0])
with open(outfile, 'w') as f1:
for dt64 in dataset0:
for dt in dt64:
wdt=str(dt[0])+" "+str(dt[1])+" "+str(dt[2])+"\n"
f1.write(wdt)
print(f'Saved PC1/PC2/PC3 data of {grpname}: Shape= {dim0} x {dim1} x {dim2}')
if __name__ == '__main__':
mdfolder="/home/okugawa/HDNNP/Si-190808-md"
outfolder=mdfolder+"/result-LC/PC123/"
grps=['1000K0.99', '1000K1.0', '1000K1.01']
for grp in grps:
for j in range(1,11):
grpname=grp+"-"+str(j)
dtsetdir=mdfolder+"/"+grp+"/"+str(j)
dtsetfile=dtsetdir+"/data/CrystalSi64/preprocd_dataset.npz"
outfile=outfolder+grpname+"-PC123.txt"
makePC123(dtsetfile, outfile, grpname)
|
py | 1a3a5c6d3e21f31be080736961830c11de78062e | # Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Setup for pip package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from setuptools import find_packages
from setuptools import setup
from setuptools.dist import Distribution
__version__ = '1.2b1'
class BinaryDistribution(Distribution):
"""This class is needed in order to create OS specific wheels."""
def has_ext_modules(self):
return True
setup(
name='tensorflow-compression',
version=__version__,
description=('Data compression in TensorFlow'),
url='https://tensorflow.github.io/compression/',
author='Google LLC',
# Contained modules and scripts.
packages=find_packages(),
install_requires=[
'scipy >= 1.0.0',
'tensorflow >= 1.13.0',
],
# Add in any packaged data.
include_package_data=True,
zip_safe=False,
distclass=BinaryDistribution,
# PyPI package information.
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries',
],
project_urls={
'Documentation': 'https://tensorflow.github.io/compression/docs/api_docs/python/tfc.html',
'Discussion': 'https://groups.google.com/forum/#!forum/tensorflow-compression',
'Source': 'https://github.com/tensorflow/compression',
'Tracker': 'https://github.com/tensorflow/compression/issues',
},
license='Apache 2.0',
keywords='compression data-compression tensorflow machine-learning python deep-learning deep-neural-networks neural-network ml',
)
|
py | 1a3a5d3d3780d517082afed317d090ff66e6ac8c | import time
import dash
import dash_html_components as html
import dash_core_components as dcc
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output, State
from transformers import BartTokenizer, BartForConditionalGeneration
import torch
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Device: {device}")
# Load Model
pretrained = "sshleifer/distilbart-xsum-12-6"
model = BartForConditionalGeneration.from_pretrained(pretrained)
tokenizer = BartTokenizer.from_pretrained(pretrained)
# Switch to cuda, eval mode, and FP16 for faster inference
if device == "cuda":
model = model.half()
model.to(device)
model.eval()
# Define app
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP])
server = app.server
controls = dbc.Card(
[
dbc.FormGroup(
[
dbc.Label("Output Length (# Tokens)"),
dcc.Slider(
id="max-length",
min=10,
max=50,
value=30,
marks={i: str(i) for i in range(10, 51, 10)},
),
]
),
dbc.FormGroup(
[
dbc.Label("Beam Size"),
dcc.Slider(
id="num-beams",
min=2,
max=6,
value=4,
marks={i: str(i) for i in [2, 4, 6]},
),
]
),
dbc.FormGroup(
[
dbc.Spinner(
[
dbc.Button("Summarize", id="button-run"),
html.Div(id="time-taken"),
]
)
]
),
],
body=True,
style={"height": "275px"},
)
# Define Layout
app.layout = dbc.Container(
fluid=True,
children=[
html.H1("Dash Automatic Summarization (with DistilBART)"),
html.Hr(),
dbc.Row(
[
dbc.Col(
width=5,
children=[
controls,
dbc.Card(
body=True,
children=[
dbc.FormGroup(
[
dbc.Label("Summarized Content"),
dcc.Textarea(
id="summarized-content",
style={
"width": "100%",
"height": "calc(75vh - 275px)",
},
),
]
)
],
),
],
),
dbc.Col(
width=7,
children=[
dbc.Card(
body=True,
children=[
dbc.FormGroup(
[
dbc.Label("Original Text (Paste here)"),
dcc.Textarea(
id="original-text",
style={"width": "100%", "height": "75vh"},
),
]
)
],
)
],
),
]
),
],
)
@app.callback(
[Output("summarized-content", "value"), Output("time-taken", "children")],
[
Input("button-run", "n_clicks"),
Input("max-length", "value"),
Input("num-beams", "value"),
],
[State("original-text", "value")],
)
def summarize(n_clicks, max_len, num_beams, original_text):
if original_text is None or original_text == "":
return "", "Did not run"
t0 = time.time()
inputs = tokenizer.batch_encode_plus(
[original_text], max_length=1024, return_tensors="pt"
)
inputs = inputs.to(device)
# Generate Summary
summary_ids = model.generate(
inputs["input_ids"],
num_beams=num_beams,
max_length=max_len,
early_stopping=True,
)
out = [
tokenizer.decode(
g, skip_special_tokens=True, clean_up_tokenization_spaces=False
)
for g in summary_ids
]
t1 = time.time()
time_taken = f"Summarized on {device} in {t1-t0:.2f}s"
return out[0], time_taken
if __name__ == "__main__":
app.run_server(debug=True)
|
py | 1a3a5dc378138a65309520696af74dc8f91f1a46 | import requests
import json
import configparser as cfg
class telegram_chatbot():
def __init__(self, config):
self.token = self.read_token_from_config_file(config)
self.base = "https://api.telegram.org/bot{}/".format(self.token)
def get_updates(self, offset=None):
url = self.base + "getUpdates?timeout=100"
if offset:
url = url + "&offset={}".format(offset + 1)
r = requests.get(url)
return json.loads(r.content)
def send_message(self, msg, chat_id):
url = self.base + "sendMessage?chat_id={}&text={}".format(chat_id, msg)
if msg is not None:
requests.get(url)
def read_token_from_config_file(self, config):
parser = cfg.ConfigParser()
parser.read(config)
return parser.get('creds', 'token')
|
py | 1a3a5e9db0a4872a969d1deaa54607c027ed3607 | from discord.ext.commands import Cog
class Cancer(Cog):
def __init__(self, bot):
self.bot = bot
self.ok_list = [198101180180594688, 246291440106340352]
@Cog.listener()
async def on_member_join(self, member):
if member.guild.id not in self.ok_list:
return
await member.guild.system_channel.send("yes " + member.mention)
@Cog.listener()
async def on_member_remove(self, member):
if member.guild.id not in self.ok_list:
return
await member.guild.system_channel.send("no " + member.mention)
@Cog.listener()
async def on_guild_emojis_update(self, guild, before, after):
if guild.id not in self.ok_list:
return
await guild.system_channel.send("the emojis were updated")
def setup(bot):
bot.add_cog(Cancer(bot))
|
py | 1a3a5f7e53c9034c1bdd167f6123bd52d67a3ed0 | import pandas as pd
train = pd.read_csv('../data/train_mapped.tsv', sep='\t', header=0)
data = pd.DataFrame(columns=['SentenceId','Phrase', 'Sentiment'])
temp = list(train['SentenceId'])
count = 1
for index, row in train.iterrows():
if row['SentenceId'] == count:
data = data.append(row[['SentenceId', 'Phrase', 'Sentiment']])
count += 1
# if count == 2628 or count == 2746 or count == 4044 or count == 4365:
# count += 1
if count not in temp:
print(count)
count += 1
data = data.reset_index()
data = data.drop('index', axis=1)
print(len(data))
data.to_csv('../data/train_extract.tsv', sep='\t', index=False)
|
py | 1a3a601f2b1c79903f440ac71deb4dc5f6bf5deb | """
"""
from __future__ import division
from datetime import date
import logging
from date_helper import *
logger = logging.getLogger(__name__).addHandler(logger.NullHandler())
def check_date_objects(date1, date2):
if not(isinstance(date1, date) or isinstance(date2, date)):
raise InputError(expr = "Dates must be instances of datetime.date class")
class Error(Exception):
"""Base class for exceptions in this module.
"""
pass
class InputError(Error):
"""Exception raised for errors in parameters.
"""
pass
def _days_30_360_main(i_year, i_month, i_day, f_year, f_month, f_day):
"""Base formula calculation for the numerator and denominator of day count 30/360.
"""
num = 360 * (f_year - i_year) + 30 * (f_month - i_month) + (f_day - i_day)
den = 360
log = "[%(num)r/%(den)r]" % {'num':num, 'den':den}
logger.debug(log)
return num / den
def _daycount_act_act_ISDA(i_date, f_date):
"""Return factor to apply for interests between i_date and f_date.
:i_date: initial date.
:f_date: final date.
*i_date* and *f_date* must be instances of datetime.date class from datetime module
act/act, ISDA
Days in a month: actual
Days in a year: actual
Flavor: ISDA
This method splits up the actual number of days falling in leap years and in non-leap years.
The year fraction is the sum of the actual number of days falling in leap years divided by 366 and the actual number of days falling in non-leap years divided by 365.
"""
days_in_commons, days_in_leaps = _days_in_leap_and_common_years(i_date, f_date)
if days_in_commons == 0:
num = days_in_leaps
den = 366
elif days_in_leaps == 0:
num = days_in_commons
den = 365
else:
num = (366 * days_in_commons) + (365 * days_in_leaps)
den = 133590 #least common multiple between 366 and 365
log = "%(f_name)r(%(i_date)r, %(f_date)r)" % {'f_name':'daycount_act_act_ISDA', 'i_date':i_date, 'f_date':f_date}
logger.debug(log)
log = "[%(num)r/%(den)r]" % {'num':num, 'den':den}
logger.debug(log)
return num / den
def _daycount_act_act_Euro(i_date, f_date):
"""Return factor to apply for interests between i_date and f_date.
:i_date: initial date.
:f_date: final date.
*i_date* and *f_date* must be instances of datetime.date class from the datetime module
act/act, Euro, AFB
Days in a month: actual
Days in a year: actual
This method first calculates the number of full years counting backwards from the second date.
For any resulting stub periods, the numerator is the actual number of days in the period, the denominator being 365 or 366 depending on whether February 29th falls in the stub period.
"""
# delta = f_date - i_date
# days1 = delta.days
#
# log = "%(f_name)r(%(i_date)r, %(f_date)r)" % {'f_name':'daycount_act_act_Euro', 'i_date':i_date, 'f_date':f_date}
# logger.debug(log)
# log = "[%(num)r/%(den)r]" % {'num':num, 'den':den}
# logger.debug(log)
# return num / den
def _daycount_act_365_Fixed(i_date, f_date):
"""Return factor to apply for interests between i_date and f_date.
:i_date: initial date.
:f_date: final date.
*i_date* and *f_date* must be instances of datetime.date class from the datetime module
act/365, act/365 fixed
Days in a month: actual
Days in a year: 365 Always
Flavor: Fixed
This method first calculates the number of full years counting backwards from the second date.
For any resulting stub periods, the numerator is the actual number of days in the period, the denominator being 365 or 366 depending on whether February 29th falls in the stub period.
"""
delta = f_date - i_date
num = delta.days
den = 365
log = "%(f_name)r(%(i_date)r, %(f_date)r)" % {'f_name':'daycount_act_365_Fixed', 'i_date':i_date, 'f_date':f_date}
logger.debug(log)
log = "[%(num)r/%(den)r]" % {'num':num, 'den':den}
logger.debug(log)
return num / den
def _daycount_30_360(i_date, f_date):
"""Return factor to apply for interests between i_date and f_date.
:i_date: initial date.
:f_date: final date.
*i_date* and *f_date* must be instances of datetime.date class from the datetime module
Days in a month: 30
Days in a year: 360
Flavor: None
"""
i_year = i_date.year
i_month = i_date.month
i_day = i_date.day
f_year = f_date.year
f_month = f_date.month
f_day = f_date.day
log = "%(f_name)r(%(i_date)r, %(f_date)r)" % {'f_name':'daycount_30_360', 'i_date':i_date, 'f_date':f_date}
logger.debug(log)
factor = _days_30_360_main(i_year, i_month, i_day, f_year, f_month, f_day)
return factor
def _daycount_30_360_US(i_date, f_date):
"""Return factor to apply for interests between i_date and f_date.
:i_date: initial date.
:f_date: final date.
*i_date* and *f_date* must be instances of datetime.date class from the datetime module
Days in a month: 30
Days in a year: 360
Flavor: US
"""
i_year = i_date.year
i_month = i_date.month
i_day = i_date.day
f_year = f_date.year
f_month = f_date.month
f_day = f_date.day
if (i_date.month == 2 and _is_end_of_month(i_date)) and (f_date.month == 2 and _is_end_of_month(f_date)):
f_day = 30
if (i_date.month == 2 and _is_end_of_month(i_date)):
i_day = 30
if (f_day == 31) and (i_day in [30, 31]):
f_day = 30
if (i_day == 31):
i_day = 30
log = "%(f_name)r(%(i_date)r, %(f_date)r)" % {'f_name':'daycount_30_360_US', 'i_date':i_date, 'f_date':f_date}
logger.debug(log)
factor = _days_30_360_main(i_year, i_month, i_day, f_year, f_month, f_day)
return factor
class InterestFactor(object):
""".
Usage::
>>> date1 = date(2012, 2, 5)
>>> date2 = date(2012, 4, 6)
>>> myCounter = DayCounter(30, 360, 'fixed')
>>> myCounter.count(date1, date2)
>>>
"""
def __init__(self, dim=30, diy=360, flavor=None):
"""Constructor.
"""
self.dim = dim
self.diy = diy
self.flavor = flavor
method = '_'.join([str(self.dim), str(self.diy), str(self.flavor)])
#try:
self.factor = self._methods[method]
#except KeyError as e:
#pass #TODO: catch this key error
def __repr__(self):
"""Representation.
"""
return "interestFactor(dim=%(dim)r, diy=%(diy)r, flavor=%(flavor)r)" % {'dim':self.dim, 'diy':self.diy, 'flavor':self.flavor}
_methods = {
'30_360_None': _daycount_30_360,
'30_360_US': _daycount_30_360_US,
'act_act_Fixed': _daycount_act_365_Fixed,
'act_act_ISDA': _daycount_act_act_ISDA,
'act_act_Euro': _daycount_act_act_Euro,
}
if __name__ == '__main__':
date1 = date(2012, 2, 5)
date2 = date(2012, 4, 6)
days360 = InterestFactor(30, 360)
print(days360)
print(days360.factor(date1, date2))
|
py | 1a3a61a334af6211ae7f3f0ebee70c4b469c066f | 怎样找出一个序列中出现次数最多的元素呢?
问:假设你有一个单词列表并且想找出哪个单词出现频率最高?
words = [
'look', 'into', 'my', 'eyes', 'look', 'into', 'my', 'eyes',
'the', 'eyes', 'the', 'eyes', 'the', 'eyes', 'not', 'around', 'the',
'eyes', "don't", 'look', 'around', 'the', 'eyes', 'look', 'into',
'my', 'eyes', "you're", 'under'
]
print(words)
from collections import Counter
word_counts = Counter(words)
# 出现频率最高的3个单词
top_three = word_counts.most_common(3)
print(top_three)
|
py | 1a3a61d2a8fbb74e6ff0651530b7018d931ae31f | # %% [markdown]
# ##
import os
import warnings
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.transforms as transforms
import numpy as np
import pandas as pd
import seaborn as sns
from joblib import Parallel, delayed
from sklearn.exceptions import ConvergenceWarning
from sklearn.manifold import MDS, TSNE, Isomap
from sklearn.metrics import pairwise_distances
from sklearn.neighbors import NearestNeighbors
from sklearn.utils.testing import ignore_warnings
from tqdm.autonotebook import tqdm
from umap import UMAP
from graspy.embed import (
AdjacencySpectralEmbed,
ClassicalMDS,
LaplacianSpectralEmbed,
OmnibusEmbed,
select_dimension,
selectSVD,
)
from graspy.plot import pairplot
from graspy.simulations import sbm
from graspy.utils import (
augment_diagonal,
binarize,
pass_to_ranks,
symmetrize,
to_laplace,
)
from src.align import Procrustes
from src.cluster import MaggotCluster, get_paired_inds
from src.data import load_metagraph
from src.graph import preprocess
from src.hierarchy import signal_flow
from src.io import savecsv, savefig
from src.visualization import (
CLASS_COLOR_DICT,
add_connections,
adjplot,
barplot_text,
draw_networkx_nice,
gridmap,
matrixplot,
palplot,
screeplot,
set_axes_equal,
stacked_barplot,
)
warnings.filterwarnings(action="ignore", category=ConvergenceWarning)
FNAME = os.path.basename(__file__)[:-3]
print(FNAME)
rc_dict = {
"axes.spines.right": False,
"axes.spines.top": False,
"axes.formatter.limits": (-3, 3),
"figure.figsize": (6, 3),
"figure.dpi": 100,
}
for key, val in rc_dict.items():
mpl.rcParams[key] = val
context = sns.plotting_context(context="talk", font_scale=1, rc=rc_dict)
sns.set_context(context)
np.random.seed(8888)
def stashfig(name, **kws):
savefig(name, foldername=FNAME, save_on=True, **kws)
def stashcsv(df, name, **kws):
savecsv(df, name, foldername=FNAME, **kws)
graph_type = "G"
def plot_pairs(
X, labels, model=None, left_pair_inds=None, right_pair_inds=None, equal=False
):
n_dims = X.shape[1]
fig, axs = plt.subplots(
n_dims, n_dims, sharex=False, sharey=False, figsize=(20, 20)
)
data = pd.DataFrame(data=X)
data["label"] = labels
for i in range(n_dims):
for j in range(n_dims):
ax = axs[i, j]
ax.axis("off")
if i < j:
sns.scatterplot(
data=data,
x=j,
y=i,
ax=ax,
alpha=0.7,
linewidth=0,
s=8,
legend=False,
hue="label",
palette=CLASS_COLOR_DICT,
)
if left_pair_inds is not None and right_pair_inds is not None:
add_connections(
data.iloc[left_pair_inds, j],
data.iloc[right_pair_inds, j],
data.iloc[left_pair_inds, i],
data.iloc[right_pair_inds, i],
ax=ax,
)
plt.tight_layout()
return fig, axs
def preprocess_adjs(adjs, method="ase"):
adjs = [pass_to_ranks(a) for a in adjs]
adjs = [a + 1 / a.size for a in adjs]
if method == "ase":
adjs = [augment_diagonal(a) for a in adjs]
elif method == "lse":
adjs = [to_laplace(a) for a in adjs]
return adjs
def omni(
adjs,
n_components=4,
remove_first=None,
concat_graphs=True,
concat_directed=True,
method="ase",
):
adjs = preprocess_adjs(adjs, method=method)
omni = OmnibusEmbed(n_components=n_components, check_lcc=False, n_iter=10)
embed = omni.fit_transform(adjs)
if concat_directed:
embed = np.concatenate(
embed, axis=-1
) # this is for left/right latent positions
if remove_first is not None:
embed = embed[remove_first:]
if concat_graphs:
embed = np.concatenate(embed, axis=0)
return embed
def ipsi_omni(adj, lp_inds, rp_inds, co_adj=None, n_components=4, method="ase"):
ll_adj = adj[np.ix_(lp_inds, lp_inds)]
rr_adj = adj[np.ix_(rp_inds, rp_inds)]
ipsi_adjs = [ll_adj, rr_adj]
if co_adj is not None:
co_ll_adj = co_adj[np.ix_(lp_inds, lp_inds)]
co_rr_adj = co_adj[np.ix_(rp_inds, rp_inds)]
ipsi_adjs += [co_ll_adj, co_rr_adj]
out_ipsi, in_ipsi = omni(
ipsi_adjs,
n_components=n_components,
concat_directed=False,
concat_graphs=False,
method=method,
)
left_embed = np.concatenate((out_ipsi[0], in_ipsi[0]), axis=1)
right_embed = np.concatenate((out_ipsi[1], in_ipsi[1]), axis=1)
ipsi_embed = np.concatenate((left_embed, right_embed), axis=0)
return ipsi_embed
def contra_omni(adj, lp_inds, rp_inds, co_adj=None, n_components=4, method="ase"):
lr_adj = adj[np.ix_(lp_inds, rp_inds)]
rl_adj = adj[np.ix_(rp_inds, lp_inds)]
contra_adjs = [lr_adj, rl_adj]
if co_adj is not None:
co_lr_adj = co_adj[np.ix_(lp_inds, rp_inds)]
co_rl_adj = co_adj[np.ix_(rp_inds, lp_inds)]
contra_adjs += [co_lr_adj, co_rl_adj]
out_contra, in_contra = omni(
contra_adjs,
n_components=n_components,
concat_directed=False,
concat_graphs=False,
method=method,
)
left_embed = np.concatenate((out_contra[0], in_contra[1]), axis=1)
right_embed = np.concatenate((out_contra[1], in_contra[0]), axis=1)
contra_embed = np.concatenate((left_embed, right_embed), axis=0)
return contra_embed
def lateral_omni(adj, lp_inds, rp_inds, n_components=4, method="ase"):
ipsi_embed = ipsi_omni(
adj, lp_inds, rp_inds, n_components=n_components, method=method
)
contra_embed = contra_omni(
adj, lp_inds, rp_inds, n_components=n_components, method=method
)
embed = np.concatenate((ipsi_embed, contra_embed), axis=1)
return embed
def multi_lateral_omni(adjs, lp_inds, rp_inds, n_components=4):
ipsi_adjs = []
for a in adjs:
ll_adj = a[np.ix_(lp_inds, lp_inds)]
rr_adj = a[np.ix_(rp_inds, rp_inds)]
ipsi_adjs.append(ll_adj)
ipsi_adjs.append(rr_adj)
ipsi_embed = omni(ipsi_adjs, concat_graphs=False, n_components=n_components)
left = []
right = []
for i, e in enumerate(ipsi_embed):
if i % 2 == 0:
left.append(e)
else:
right.append(e)
left = np.concatenate(left, axis=1)
right = np.concatenate(right, axis=1)
ipsi_embed = np.concatenate((left, right), axis=0)
contra_adjs = []
for a in adjs:
lr_adj = a[np.ix_(lp_inds, rp_inds)]
rl_adj = a[np.ix_(rp_inds, lp_inds)]
contra_adjs.append(lr_adj)
contra_adjs.append(rl_adj)
contra_embed = omni(contra_adjs, concat_graphs=False, n_components=n_components)
left = []
right = []
for i, e in enumerate(contra_embed):
if i % 2 == 0:
left.append(e)
else:
right.append(e)
left = np.concatenate(left, axis=1)
right = np.concatenate(right, axis=1)
contra_embed = np.concatenate((left, right), axis=0)
embed = np.concatenate((ipsi_embed, contra_embed), axis=1)
return embed
def reg_lateral_omni(adj, base_adj, lp_inds, rp_inds, n_components=4):
base_ll_adj = base_adj[np.ix_(lp_inds, lp_inds)]
base_rr_adj = base_adj[np.ix_(rp_inds, rp_inds)]
ll_adj = adj[np.ix_(lp_inds, lp_inds)]
rr_adj = adj[np.ix_(rp_inds, rp_inds)]
ipsi_adjs = [base_ll_adj, base_rr_adj, ll_adj, rr_adj]
ipsi_embed = omni(ipsi_adjs, remove_first=2, n_components=n_components)
base_lr_adj = base_adj[np.ix_(lp_inds, rp_inds)]
base_rl_adj = base_adj[np.ix_(rp_inds, lp_inds)]
lr_adj = adj[np.ix_(lp_inds, rp_inds)]
rl_adj = adj[np.ix_(rp_inds, lp_inds)]
contra_adjs = [base_lr_adj, base_rl_adj, lr_adj, rl_adj]
contra_embed = omni(contra_adjs, remove_first=2, n_components=n_components)
embed = np.concatenate((ipsi_embed, contra_embed), axis=1)
return embed
def quick_embed_viewer(
embed, labels=None, lp_inds=None, rp_inds=None, left_right_indexing=False
):
if left_right_indexing:
lp_inds = np.arange(len(embed) // 2)
rp_inds = np.arange(len(embed) // 2) + len(embed) // 2
fig, axs = plt.subplots(3, 2, figsize=(20, 30))
cmds = ClassicalMDS(n_components=2)
cmds_euc = cmds.fit_transform(embed)
plot_df = pd.DataFrame(data=cmds_euc)
plot_df["labels"] = labels
plot_kws = dict(
x=0,
y=1,
hue="labels",
palette=CLASS_COLOR_DICT,
legend=False,
s=20,
linewidth=0.5,
alpha=0.7,
)
ax = axs[0, 0]
sns.scatterplot(data=plot_df, ax=ax, **plot_kws)
ax.axis("off")
add_connections(
plot_df.iloc[lp_inds, 0],
plot_df.iloc[rp_inds, 0],
plot_df.iloc[lp_inds, 1],
plot_df.iloc[rp_inds, 1],
ax=ax,
)
ax.set_title("CMDS o euclidean")
cmds = ClassicalMDS(n_components=2, dissimilarity="precomputed")
pdist = symmetrize(pairwise_distances(embed, metric="cosine"))
cmds_cos = cmds.fit_transform(pdist)
plot_df[0] = cmds_cos[:, 0]
plot_df[1] = cmds_cos[:, 1]
ax = axs[0, 1]
sns.scatterplot(data=plot_df, ax=ax, **plot_kws)
ax.axis("off")
add_connections(
plot_df.iloc[lp_inds, 0],
plot_df.iloc[rp_inds, 0],
plot_df.iloc[lp_inds, 1],
plot_df.iloc[rp_inds, 1],
ax=ax,
)
ax.set_title("CMDS o cosine")
tsne = TSNE(metric="euclidean")
tsne_euc = tsne.fit_transform(embed)
plot_df[0] = tsne_euc[:, 0]
plot_df[1] = tsne_euc[:, 1]
ax = axs[1, 0]
sns.scatterplot(data=plot_df, ax=ax, **plot_kws)
ax.axis("off")
add_connections(
plot_df.iloc[lp_inds, 0],
plot_df.iloc[rp_inds, 0],
plot_df.iloc[lp_inds, 1],
plot_df.iloc[rp_inds, 1],
ax=ax,
)
ax.set_title("TSNE o euclidean")
tsne = TSNE(metric="precomputed")
tsne_cos = tsne.fit_transform(pdist)
plot_df[0] = tsne_cos[:, 0]
plot_df[1] = tsne_cos[:, 1]
ax = axs[1, 1]
sns.scatterplot(data=plot_df, ax=ax, **plot_kws)
ax.axis("off")
add_connections(
plot_df.iloc[lp_inds, 0],
plot_df.iloc[rp_inds, 0],
plot_df.iloc[lp_inds, 1],
plot_df.iloc[rp_inds, 1],
ax=ax,
)
ax.set_title("TSNE o cosine")
umap = UMAP(metric="euclidean", n_neighbors=30, min_dist=1)
umap_euc = umap.fit_transform(embed)
plot_df[0] = umap_euc[:, 0]
plot_df[1] = umap_euc[:, 1]
ax = axs[2, 0]
sns.scatterplot(data=plot_df, ax=ax, **plot_kws)
ax.axis("off")
add_connections(
plot_df.iloc[lp_inds, 0],
plot_df.iloc[rp_inds, 0],
plot_df.iloc[lp_inds, 1],
plot_df.iloc[rp_inds, 1],
ax=ax,
)
ax.set_title("UMAP o euclidean")
umap = UMAP(metric="cosine", n_neighbors=30, min_dist=1)
umap_cos = umap.fit_transform(embed)
plot_df[0] = umap_cos[:, 0]
plot_df[1] = umap_cos[:, 1]
ax = axs[2, 1]
sns.scatterplot(data=plot_df, ax=ax, **plot_kws)
ax.axis("off")
add_connections(
plot_df.iloc[lp_inds, 0],
plot_df.iloc[rp_inds, 0],
plot_df.iloc[lp_inds, 1],
plot_df.iloc[rp_inds, 1],
ax=ax,
)
ax.set_title("UMAP o cosine")
def umapper(embed, metric="euclidean", n_neighbors=30, min_dist=1, **kws):
umap = UMAP(metric=metric, n_neighbors=n_neighbors, min_dist=min_dist)
umap_euc = umap.fit_transform(embed)
plot_df = pd.DataFrame(data=umap_euc)
plot_df["labels"] = labels
fig, ax = plt.subplots(1, 1, figsize=(10, 10))
plot_kws = dict(
x=0,
y=1,
hue="labels",
palette=CLASS_COLOR_DICT,
legend=False,
s=20,
linewidth=0.5,
alpha=0.7,
)
sns.scatterplot(data=plot_df, ax=ax, **plot_kws)
ax.axis("off")
left_right_indexing = True
if left_right_indexing:
tlp_inds = np.arange(len(embed) // 2)
trp_inds = np.arange(len(embed) // 2) + len(embed) // 2
add_connections(
plot_df.iloc[tlp_inds, 0],
plot_df.iloc[trp_inds, 0],
plot_df.iloc[tlp_inds, 1],
plot_df.iloc[trp_inds, 1],
ax=ax,
)
return fig, ax
# %% [markdown]
# ## Load and preprocess data
VERSION = "2020-04-23"
graph_type = "G"
master_mg = load_metagraph(graph_type, version="2020-04-23")
mg = preprocess(
master_mg,
threshold=0,
sym_threshold=False,
remove_pdiff=True,
binarize=False,
weight="weight",
)
meta = mg.meta
degrees = mg.calculate_degrees()
quant_val = np.quantile(degrees["Total edgesum"], 0.05)
# remove low degree neurons
idx = meta[degrees["Total edgesum"] > quant_val].index
print(quant_val)
mg = mg.reindex(idx, use_ids=True)
# remove center neurons # FIXME
idx = mg.meta[mg.meta["hemisphere"].isin(["L", "R"])].index
mg = mg.reindex(idx, use_ids=True)
idx = mg.meta[mg.meta["Pair"].isin(mg.meta.index)].index
mg = mg.reindex(idx, use_ids=True)
mg = mg.make_lcc()
mg.calculate_degrees(inplace=True)
meta = mg.meta
meta["pair_td"] = meta["Pair ID"].map(meta.groupby("Pair ID")["Total degree"].mean())
mg = mg.sort_values(["pair_td", "Pair ID"], ascending=False)
meta["inds"] = range(len(meta))
adj = mg.adj.copy()
lp_inds, rp_inds = get_paired_inds(meta)
left_inds = meta[meta["left"]]["inds"]
print(len(mg))
# %% [markdown]
# ## Plot the ipsilateral connectomes
if meta["pair_td"].max() > 0:
meta["pair_td"] = -meta["pair_td"]
ll_adj = adj[np.ix_(lp_inds, lp_inds)]
rr_adj = adj[np.ix_(rp_inds, rp_inds)]
left_meta = meta.iloc[lp_inds]
right_meta = meta.iloc[rp_inds]
plot_kws = dict(
plot_type="scattermap",
sort_class="merge_class",
item_order=["pair_td", "Pair ID"],
colors="merge_class",
palette=CLASS_COLOR_DICT,
ticks=False,
class_order="pair_td",
sizes=(1, 1),
gridline_kws=dict(linewidth=0.2, color="grey", linestyle="--"),
)
plot_adjs = False
if plot_adjs:
fig, axs = plt.subplots(1, 2, figsize=(20, 10))
_, _, top, _ = adjplot(ll_adj, ax=axs[0], meta=left_meta, **plot_kws)
top.set_title(r"L $\to$ L")
_, _, top, _ = adjplot(rr_adj, ax=axs[1], meta=right_meta, **plot_kws)
top.set_title(r"R $\to$ R")
plt.tight_layout()
stashfig("ipsilateral-adj")
lr_adj = adj[np.ix_(lp_inds, rp_inds)]
rl_adj = adj[np.ix_(rp_inds, lp_inds)]
fig, axs = plt.subplots(1, 2, figsize=(20, 10))
_, _, top, _ = adjplot(lr_adj, ax=axs[0], meta=left_meta, **plot_kws)
top.set_title(r"L $\to$ R")
_, _, top, _ = adjplot(rl_adj, ax=axs[1], meta=right_meta, **plot_kws)
top.set_title(r"R $\to$ L")
plt.tight_layout()
stashfig("contralateral-adj")
# %% [markdown]
# ## Load the 4-color graphs
graph_types = ["Gad", "Gaa", "Gdd", "Gda"]
adjs = []
for g in graph_types:
temp_mg = load_metagraph(g, version=VERSION)
temp_mg.reindex(mg.meta.index, use_ids=True)
temp_adj = temp_mg.adj
adjs.append(temp_adj)
# %% [markdown]
# ## simple demo of "in" vs "out" latent positions
# blocks 0, 1 differ only in their inputs, not their outputs
B = np.array(
[
[0.1, 0.1, 0.2, 0.05],
[0.1, 0.1, 0.2, 0.05],
[0.35, 0.15, 0.1, 0.1],
[0.1, 0.05, 0.3, 0.4],
]
)
sns.heatmap(B, square=True, annot=True)
sbm_sample, sbm_labels = sbm([100, 100, 100, 100], B, directed=True, return_labels=True)
ase = AdjacencySpectralEmbed()
out_embed, in_embed = ase.fit_transform(sbm_sample)
pairplot(out_embed, sbm_labels) # don't see separation between [0, 1]
pairplot(in_embed, sbm_labels) # do see separation between [0, 1]
# from this we can conclude that the "right" embedding or right singular vectors are the
# ones corresponding to input
# (out, in)
# %% [markdown]
# ## Options for the embedding
# - ASE and procrustes (not shown here)
# - Bilateral OMNI on G, SVD
# - Bilateral OMNI on each of the 4-colors, concatenated, SVD
# - Bilateral OMNI on each of the 4-colors, with regularization, concatenated, SVD
# - Bilateral OMNI jointly with all 4-colors
n_omni_components = 8 # this is used for all of the embedings initially
n_svd_components = 16 # this is for the last step
def svd(X, n_components=n_svd_components):
return selectSVD(X, n_components=n_components, algorithm="full")[0]
# %% [markdown]
# ## only contra
# just_contra_embed = omni(
# [full_adjs[0], full_adjs[2]],
# n_components=n_omni_components,
# remove_first=None,
# concat_graphs=True,
# concat_directed=True,
# method="ase",
# )
# svd_contra_embed = svd(just_contra_embed)
# %% [markdown]
# # Omni of contra/ipsi together
full_adjs = [
adj[np.ix_(lp_inds, lp_inds)],
adj[np.ix_(lp_inds, rp_inds)],
adj[np.ix_(rp_inds, rp_inds)],
adj[np.ix_(rp_inds, lp_inds)],
]
out_embed, in_embed = omni(
full_adjs,
n_components=n_omni_components,
remove_first=None,
concat_graphs=False,
concat_directed=False,
method="ase",
)
# ipsi out, contra out, ipsi in, contra in
left_embed = np.concatenate(
(out_embed[0], out_embed[1], in_embed[0], in_embed[3]), axis=1
)
right_embed = np.concatenate(
(out_embed[2], out_embed[3], in_embed[2], in_embed[1]), axis=1
)
omni_naive_embed = np.concatenate((left_embed, right_embed), axis=0)
ase_naive_embed = svd(omni_naive_embed)
# ##
# out_embed, in_embed = omni(
# full_adjs,
# n_components=n_omni_components,
# remove_first=None,
# concat_graphs=False,
# concat_directed=False,
# method="lse",
# )
# # ipsi out, contra out, ipsi in, contra in
# left_embed = np.concatenate(
# (out_embed[0], out_embed[1], in_embed[0], in_embed[3]), axis=1
# )
# right_embed = np.concatenate(
# (out_embed[2], out_embed[3], in_embed[2], in_embed[1]), axis=1
# )
# omni_naive_embed = np.concatenate((left_embed, right_embed), axis=0)
# lse_naive_embed = svd(omni_naive_embed)
# %% [markdown]
# ## Bilateral OMNI on G, SVD
omni_flat_embed = lateral_omni(
adj, lp_inds, rp_inds, n_components=n_omni_components, method="ase"
)
ase_flat_embed = svd(omni_flat_embed)
# %% [markdown]
# ## just compare
# %% [markdown]
# ## Bilateral OMNI on each of the 4-colors, concatenated, SVD
omni_multi_embed = []
for a in adjs:
omni_multi_embed.append(
lateral_omni(a, lp_inds, rp_inds, n_components=n_omni_components)
)
omni_multi_embed = np.concatenate(omni_multi_embed, axis=1)
ase_multi_embed = svd(omni_multi_embed)
# %% [markdown]
# ## Bilateral OMNI on each of the 4-colors, with regularization, concatenated, SVD
omni_reg_embed = []
for a in adjs:
omni_reg_embed.append(
reg_lateral_omni(a, adj, lp_inds, rp_inds, n_components=n_omni_components)
)
omni_reg_embed = np.concatenate(omni_reg_embed, axis=1)
ase_reg_embed = svd(omni_reg_embed)
# %% [markdown]
# ## Bilateral OMNI on all 4-colors
adjs_and_sum = adjs + [adj]
omni_joint_embed = multi_lateral_omni(
adjs_and_sum, lp_inds, rp_inds, n_components=n_omni_components
)
ase_joint_embed = svd(omni_joint_embed)
# %% [markdown]
# ## Compute neighbors at K
new_lp_inds = np.arange(len(mg) // 2)
new_rp_inds = np.arange(len(mg) // 2) + len(mg) // 2
def compute_neighbors_at_k(X, left_inds, right_inds, k_max=10, metric="euclidean"):
nn = NearestNeighbors(radius=0, n_neighbors=k_max + 1, metric=metric)
nn.fit(X)
neigh_dist, neigh_inds = nn.kneighbors(X)
is_neighbor_mat = np.zeros((X.shape[0], k_max), dtype=bool)
for left_ind, right_ind in zip(left_inds, right_inds):
left_neigh_inds = neigh_inds[left_ind]
right_neigh_inds = neigh_inds[right_ind]
for k in range(k_max):
if right_ind in left_neigh_inds[: k + 2]:
is_neighbor_mat[left_ind, k] = True
if left_ind in right_neigh_inds[: k + 2]:
is_neighbor_mat[right_ind, k] = True
neighbors_at_k = np.sum(is_neighbor_mat, axis=0) / is_neighbor_mat.shape[0]
neighbors_at_k = pd.Series(data=neighbors_at_k, index=np.arange(1, k_max + 1))
neighbors_at_k.name = "p_at_k"
return neighbors_at_k
# names = ["flat", "multi", "joint", "reg", "naive"]
# embeds = [
# ase_flat_embed,
# ase_multi_embed,
# ase_joint_embed,
# ase_reg_embed,
# ase_naive_embed,
# ]
names = ["iso", "aniso", "multi"]
embeds = [ase_naive_embed, ase_flat_embed, ase_multi_embed]
dims = np.arange(1, 16)
dfs = []
for d in dims:
for name, embed in zip(names, embeds):
p_at_k = compute_neighbors_at_k(embed[:, :d], new_lp_inds, new_rp_inds)
neighbor_df = p_at_k.to_frame()
neighbor_df.reset_index(inplace=True)
neighbor_df.rename(columns={"index": "K"}, inplace=True)
neighbor_df["method"] = name
neighbor_df["d"] = d
dfs.append(neighbor_df)
neighbor_df = pd.concat(dfs, ignore_index=True)
# %% [markdown]
# ## Plot nearest neighbor results
fig, ax = plt.subplots(1, 1, figsize=(8, 4))
k = 5
sns.lineplot(
data=neighbor_df[neighbor_df["K"] == k],
x="d",
y="p_at_k",
hue="method",
style="method",
# style_order=["reg", "joint", "multi", "flat"],
)
ax.set_ylabel(f"P @ K = {k}")
ax.set_xlabel("# dimensions")
stashfig(f"p_at_k={k}_embed-iso-aniso-multi")
# %% [markdown]
# ## Look at the best one! (ish)
new_meta = meta.iloc[np.concatenate((lp_inds, rp_inds), axis=0)].copy()
labels = new_meta["merge_class"].values
plot_pairs(
ase_flat_embed[:, :8],
labels,
left_pair_inds=new_lp_inds,
right_pair_inds=new_rp_inds,
)
stashfig("ase-flat-pairs")
quick_embed_viewer(
ase_flat_embed[:, :8], labels=labels, lp_inds=new_lp_inds, rp_inds=new_rp_inds
)
stashfig("ase-flat-manifold")
# %% [markdown]
# ## Now, try to do a similar quantification but for classes
# KC
# MBON
# MBIN
# ORN
# UPN
# some of the antennal lobe stuff
def class_neighbors_at_k(X, labels, target, k_max=10, metric="euclidean"):
nn = NearestNeighbors(radius=0, n_neighbors=k_max + 1, metric=metric)
nn.fit(X)
neigh_dist, neigh_inds = nn.kneighbors(X)
neigh_inds = neigh_inds[:, 1:] # remove self as neighbor
mask = labels == target
target_inds = np.arange(len(X))[mask]
target_neigh_inds = neigh_inds[mask]
p_nearby = []
neighbors_in_target = np.isin(target_neigh_inds, target_inds)
for k in np.arange(1, k_max + 1):
p_nearby_at_k = neighbors_in_target[:, :k].sum() / (k * len(target_inds))
p_nearby.append(p_nearby_at_k)
p_nearby = np.array(p_nearby)
neighbor_df = pd.DataFrame(data=p_nearby, index=np.arange(1, k_max + 1))
neighbor_df.index.name = "K"
neighbor_df.rename(columns={0: target}, inplace=True)
return neighbor_df
new_meta = meta.iloc[np.concatenate((lp_inds, rp_inds), axis=0)].copy()
labels = new_meta["merge_class"].values
k_max = 10
embed_df = []
for name, embed in zip(names, embeds):
neighbor_df = []
for d in np.arange(1, 16):
X = embed[:, :d]
class1 = new_meta["class1"].values
neighbors = []
for target in ["uPN", "sens-ORN"]:
neighbors.append(class_neighbors_at_k(X, labels, target))
for target in ["KC", "mPN", "MBON", "MBIN"]:
neighbors.append(class_neighbors_at_k(X, class1, target))
neighbors = pd.concat(neighbors, ignore_index=False, axis=1)
neighbors = neighbors.reset_index()
neighbors = neighbors.melt(value_name="p_at_k", var_name="class", id_vars=["K"])
neighbors["d"] = d
neighbor_df.append(neighbors)
neighbor_df = pd.concat(neighbor_df, axis=0)
neighbor_df["method"] = name
embed_df.append(neighbor_df)
embed_df = pd.concat(embed_df, axis=0)
# k = 5
# temp_df = embed_df[embed_df["K"] == k]
# fig, axs = plt.subplots(2, 2, figsize=(20, 10), sharex=True, sharey=True)
# axs = axs.ravel()
# for i, name in enumerate(names):
# ax = axs[i]
# plot_df = temp_df[temp_df["method"] == name]
# sns.lineplot(data=plot_df, x="d", y="p_at_k", hue="class", ax=ax)
# ax.set_title(name)
# ax.get_legend().remove()
# plt.tight_layout()
# ax.legend(bbox_to_anchor=(1, 1), loc="upper left")
# hard to compare directly on the above
# %% [markdown]
# ##
# fix d
# one plot for each class
# line for each of the embeddings
k = 5
plot_df = embed_df[embed_df["K"] == k]
# plot_df = plot_df[plot_df["d"] == d]
classes = ["uPN", "sens-ORN", "KC", "mPN", "MBON", "MBIN"]
fig, axs = plt.subplots(2, 3, figsize=(20, 10), sharex=True, sharey=True)
axs = axs.ravel()
for i, cell_class in enumerate(classes):
ax = axs[i]
temp_df = plot_df[plot_df["class"] == cell_class]
sns.lineplot(
data=temp_df,
x="d",
y="p_at_k",
hue="method",
ax=ax,
style="method",
# style_order=["reg", "joint", "multi", "flat"],
)
ax.set_title(cell_class)
axs[0].set_ylabel(f"Prop. @ K = {k}")
axs[3].set_ylabel(f"Prop. @ K = {k}")
plt.tight_layout()
stashfig(f"embed-class-knn-k={k}")
# %%
# # Notes
# I like aniso better than iso
# not sure about reg or not
# for sides, we have {iso, aniso}
# for method, we have {lse, ase}
# for color, we have {flat, multi (separate), joint (omni), reg (multi but with G)}
# there seems to be no single embedding that is winning at everything.
n_levels = 12
metric = "bic"
bic_ratio = 1
d = 8
basename = f"aniso-omni-bic_ratio={bic_ratio}-d={d}"
mc = MaggotCluster(
"0",
adj=adj,
n_init=25,
meta=new_meta,
stashfig=stashfig,
min_clusters=1,
max_clusters=3,
X=ase_flat_embed[:, :d],
bic_ratio=bic_ratio,
reembed=False,
min_split=4,
)
for i in range(n_levels):
for j, node in enumerate(mc.get_lowest_level()):
node.fit_candidates(show_plot=False)
for j, node in enumerate(mc.get_lowest_level()):
node.select_model(k=None, metric=metric)
mc.collect_labels()
n_levels = mc.height
fig, axs = plt.subplots(1, n_levels, figsize=(10 * n_levels, 40))
for i in range(n_levels):
ax = axs[i]
stacked_barplot(
mc.meta[f"lvl{i}_labels_side"],
mc.meta["merge_class"],
category_order=np.unique(mc.meta[f"lvl{i}_labels_side"].values),
color_dict=CLASS_COLOR_DICT,
norm_bar_width=False,
ax=ax,
)
ax.set_yticks([])
ax.get_legend().remove()
plt.tight_layout()
stashfig(f"count-barplot-lvl{i}" + basename)
plt.close()
fig, axs = plt.subplots(1, n_levels, figsize=(10 * n_levels, 40))
for i in range(n_levels):
ax = axs[i]
stacked_barplot(
mc.meta[f"lvl{i}_labels_side"],
mc.meta["merge_class"],
category_order=np.unique(mc.meta[f"lvl{i}_labels_side"].values),
color_dict=CLASS_COLOR_DICT,
norm_bar_width=True,
ax=ax,
)
ax.set_yticks([])
ax.get_legend().remove()
plt.tight_layout()
stashfig(f"prop-barplot-lvl{i}" + basename)
plt.close()
inds = np.concatenate((lp_inds, rp_inds))
new_adj = adj[np.ix_(inds, inds)]
new_meta = mc.meta
new_meta["sf"] = -signal_flow(new_adj)
for l in range(n_levels):
fig, ax = plt.subplots(1, 1, figsize=(20, 20))
sort_class = [f"lvl{i}_labels" for i in range(l)]
sort_class += [f"lvl{l}_labels_side"]
adjplot(
new_adj,
meta=new_meta,
sort_class=sort_class,
item_order="merge_class",
plot_type="scattermap",
class_order="sf",
sizes=(0.5, 1),
ticks=False,
colors="merge_class",
ax=ax,
palette=CLASS_COLOR_DICT,
gridline_kws=dict(linewidth=0.2, color="grey", linestyle="--"),
)
stashfig(f"adj-lvl{l}" + basename)
plt.close()
pairs = np.unique(new_meta["Pair ID"])
p_same_clusters = []
for l in range(n_levels):
n_same = 0
for p in pairs:
if new_meta[new_meta["Pair ID"] == p][f"lvl{l}_labels"].nunique() == 1:
n_same += 1
p_same = n_same / len(pairs)
print(p_same)
p_same_clusters.append(p_same)
fig, ax = plt.subplots(1, 1, figsize=(8, 4))
sns.lineplot(x=range(n_levels), y=p_same_clusters, ax=ax)
sns.scatterplot(x=range(n_levels), y=p_same_clusters, ax=ax)
ax.set_ylabel("P same cluster")
ax.set_xlabel("Level")
stashfig("p_in_same_cluster" + basename)
n_clusters = []
for l in range(n_levels):
n_clusters.append(new_meta[f"lvl{l}_labels"].nunique())
fig, ax = plt.subplots(1, 1, figsize=(8, 4))
sns.lineplot(x=range(n_levels), y=n_clusters, ax=ax)
sns.scatterplot(x=range(n_levels), y=n_clusters, ax=ax)
ax.set_ylabel("Clusters per side")
ax.set_xlabel("Level")
stashfig("n_cluster" + basename)
size_dfs = []
for l in range(n_levels):
sizes = new_meta.groupby(f"lvl{l}_labels_side").size().values
sizes = pd.DataFrame(data=sizes, columns=["Size"])
sizes["Level"] = l
size_dfs.append(sizes)
size_df = pd.concat(size_dfs)
fig, ax = plt.subplots(1, 1, figsize=(8, 4))
sns.stripplot(data=size_df, x="Level", y="Size", ax=ax, jitter=0.45, alpha=0.5)
ax.set_yscale("log")
stashfig("log-sizes" + basename)
# %% [markdown]
# ## some other kind of visualization
import networkx as nx
import colorcet as cc
def to_minigraph(
adj,
labels,
drop_neg=True,
remove_diag=True,
size_scaler=1,
use_counts=False,
use_weights=True,
color_map=None,
):
# convert the adjacency and a partition to a minigraph based on SBM probs
prob_df = get_blockmodel_df(
adj, labels, return_counts=use_counts, use_weights=use_weights
)
if drop_neg and ("-1" in prob_df.index):
prob_df.drop("-1", axis=0, inplace=True)
prob_df.drop("-1", axis=1, inplace=True)
if remove_diag:
adj = prob_df.values
adj -= np.diag(np.diag(adj))
prob_df = pd.DataFrame(data=adj, index=prob_df.index, columns=prob_df.columns)
g = nx.from_pandas_adjacency(prob_df, create_using=nx.DiGraph())
uni_labels, counts = np.unique(labels, return_counts=True)
# add size attribute base on number of vertices
size_map = dict(zip(uni_labels, size_scaler * counts))
nx.set_node_attributes(g, size_map, name="Size")
# add signal flow attribute (for the minigraph itself)
mini_adj = nx.to_numpy_array(g, nodelist=uni_labels)
node_signal_flow = signal_flow(mini_adj)
sf_map = dict(zip(uni_labels, node_signal_flow))
nx.set_node_attributes(g, sf_map, name="Signal Flow")
# add spectral properties
# sym_adj = symmetrize(mini_adj)
# n_components = 10
# latent = AdjacencySpectralEmbed(n_components=n_components).fit_transform(sym_adj)
# for i in range(n_components):
# latent_dim = latent[:, i]
# lap_map = dict(zip(uni_labels, latent_dim))
# nx.set_node_attributes(g, lap_map, name=f"AdjEvec-{i}")
# add spring layout properties
pos = nx.spring_layout(g)
spring_x = {}
spring_y = {}
for key, val in pos.items():
spring_x[key] = val[0]
spring_y[key] = val[1]
nx.set_node_attributes(g, spring_x, name="Spring-x")
nx.set_node_attributes(g, spring_y, name="Spring-y")
# add colors
if color_map is None:
color_map = dict(zip(uni_labels, cc.glasbey_light))
nx.set_node_attributes(g, color_map, name="Color")
return g
from src.visualization import draw_networkx_nice
from src.utils import get_blockmodel_df
for l in range(n_levels):
labels = new_meta[f"lvl{l}_labels_side"].values
# block_df = get_blockmodel_df(new_adj, labels, return_counts=False, use_weights=True)
mini_g = to_minigraph(new_adj, labels, use_counts=True, use_weights=True)
draw_networkx_nice(
mini_g,
"Spring-x",
"Signal Flow",
colors="Color",
sizes="Size",
weight_scale=1 / 1000,
)
# %%
from src.visualization import plot_neurons
from src.pymaid import start_instance
lvl = 4
uni_labels = np.unique(new_meta[f"lvl{lvl}_labels"])
start_instance()
for label in uni_labels:
plot_neurons(new_meta, f"lvl{lvl}_labels", label=label, barplot=True)
stashfig(f"label{label}_lvl{lvl}" + basename)
# %% [markdown]
# ## Do the distance thing for Michael
d = 12
n_pairs = len(X) // 2
X = ase_flat_embed[:, :d]
new_lp_inds = np.arange(n_pairs)
new_rp_inds = np.arange(n_pairs).copy() + n_pairs
left_X = X[new_lp_inds]
right_X = X[new_rp_inds]
left_meta = meta.iloc[lp_inds]
right_meta = meta.iloc[rp_inds]
# get nearest right neighbor for everyone on the left
def rank_neighbors(source_X, target_X, metric="euclidean"):
n_target = len(target_X)
n_source = len(source_X)
nn = NearestNeighbors(radius=0, n_neighbors=n_target, metric=metric)
nn.fit(target_X)
neigh_dist, neigh_inds = nn.kneighbors(source_X)
source_rank_neighbors = np.empty((n_source, n_target), dtype=int)
for i in range(n_source):
source_rank_neighbors[i, neigh_inds[i]] = np.arange(1, n_target + 1, dtype=int)
return source_rank_neighbors
left_neighbors = rank_neighbors(left_X, right_X)
right_neighbors = rank_neighbors(right_X, left_X)
left_df = pd.DataFrame(
index=left_meta.index, columns=right_meta.index, data=left_neighbors
)
stashcsv(left_df, f"left_rank_neighbors_on_right-aniso_omni-d={d}")
right_df = pd.DataFrame(
index=right_meta.index, columns=left_meta.index, data=right_neighbors
)
stashcsv(right_df, f"right_rank_neighbors_on_right-aniso_omni-d={d}")
# %% [markdown]
# ##
fig, ax = plt.subplots(1, 1, figsize=(8, 4))
sns.distplot(
np.diag(left_neighbors), bins=np.arange(0, n_pairs, 1), kde=False, norm_hist=True
)
sns.distplot(
np.diag(right_neighbors), bins=np.arange(0, n_pairs, 1), kde=False, norm_hist=True
)
ax.set_xlim((0, 20))
ax.set_xticks(np.arange(0, 20, 2))
# ax.xaxis.set_major_locator(plt.IndexLocator(1, 2))
# %%
|
py | 1a3a6202e71aae4470b2d9f75e338976e4e2d5f5 | """
Mask R-CNN
Multi-GPU Support for Keras.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
Ideas and a small code snippets from these sources:
https://github.com/fchollet/keras/issues/2436
https://medium.com/@kuza55/transparent-multi-gpu-training-on-tensorflow-with-keras-8b0016fd9012
https://github.com/avolkov1/keras_experiments/blob/master/keras_exp/multigpu/
https://github.com/fchollet/keras/blob/master/keras/utils/training_utils.py
"""
#import tensorflow as tf
#changing as tesorflow v2 does not support place holder
import tensorflow.compat as tf
tf.disable_v2_behavior()
import keras.backend as K
import keras.layers as KL
import keras.models as KM
class ParallelModel(KM.Model):
"""Subclasses the standard Keras Model and adds multi-GPU support.
It works by creating a copy of the model on each GPU. Then it slices
the inputs and sends a slice to each copy of the model, and then
merges the outputs together and applies the loss on the combined
outputs.
"""
def __init__(self, keras_model, gpu_count):
"""Class constructor.
keras_model: The Keras model to parallelize
gpu_count: Number of GPUs. Must be > 1
"""
self.inner_model = keras_model
self.gpu_count = gpu_count
merged_outputs = self.make_parallel()
super(ParallelModel, self).__init__(inputs=self.inner_model.inputs,
outputs=merged_outputs)
def __getattribute__(self, attrname):
"""Redirect loading and saving methods to the inner model. That's where
the weights are stored."""
if 'load' in attrname or 'save' in attrname:
return getattr(self.inner_model, attrname)
return super(ParallelModel, self).__getattribute__(attrname)
def summary(self, *args, **kwargs):
"""Override summary() to display summaries of both, the wrapper
and inner models."""
super(ParallelModel, self).summary(*args, **kwargs)
self.inner_model.summary(*args, **kwargs)
def make_parallel(self):
"""Creates a new wrapper model that consists of multiple replicas of
the original model placed on different GPUs.
"""
# Slice inputs. Slice inputs on the CPU to avoid sending a copy
# of the full inputs to all GPUs. Saves on bandwidth and memory.
input_slices = {name: tf.split(x, self.gpu_count)
for name, x in zip(self.inner_model.input_names,
self.inner_model.inputs)}
output_names = self.inner_model.output_names
outputs_all = []
for i in range(len(self.inner_model.outputs)):
outputs_all.append([])
# Run the model call() on each GPU to place the ops there
for i in range(self.gpu_count):
with tf.device('/gpu:%d' % i):
with tf.name_scope('tower_%d' % i):
# Run a slice of inputs through this replica
zipped_inputs = zip(self.inner_model.input_names,
self.inner_model.inputs)
inputs = [
KL.Lambda(lambda s: input_slices[name][i],
output_shape=lambda s: (None,) + s[1:])(tensor)
for name, tensor in zipped_inputs]
# Create the model replica and get the outputs
outputs = self.inner_model(inputs)
if not isinstance(outputs, list):
outputs = [outputs]
# Save the outputs for merging back together later
for l, o in enumerate(outputs):
outputs_all[l].append(o)
# Merge outputs on CPU
with tf.device('/cpu:0'):
merged = []
for outputs, name in zip(outputs_all, output_names):
# Concatenate or average outputs?
# Outputs usually have a batch dimension and we concatenate
# across it. If they don't, then the output is likely a loss
# or a metric value that gets averaged across the batch.
# Keras expects losses and metrics to be scalars.
if K.int_shape(outputs[0]) == ():
# Average
m = KL.Lambda(lambda o: tf.add_n(o) / len(outputs), name=name)(outputs)
else:
# Concatenate
m = KL.Concatenate(axis=0, name=name)(outputs)
merged.append(m)
return merged
if __name__ == "__main__":
# Testing code below. It creates a simple model to train on MNIST and
# tries to run it on 2 GPUs. It saves the graph so it can be viewed
# in TensorBoard. Run it as:
#
# python3 parallel_model.py
import os
import numpy as np
import keras.optimizers
from keras.datasets import mnist
from keras.preprocessing.image import ImageDataGenerator
GPU_COUNT = 2
# Root directory of the project
ROOT_DIR = os.path.abspath("../")
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
def build_model(x_train, num_classes):
# Reset default graph. Keras leaves old ops in the graph,
# which are ignored for execution but clutter graph
# visualization in TensorBoard.
tf.reset_default_graph()
inputs = KL.Input(shape=x_train.shape[1:], name="input_image")
x = KL.Conv2D(32, (3, 3), activation='relu', padding="same",
name="conv1")(inputs)
x = KL.Conv2D(64, (3, 3), activation='relu', padding="same",
name="conv2")(x)
x = KL.MaxPooling2D(pool_size=(2, 2), name="pool1")(x)
x = KL.Flatten(name="flat1")(x)
x = KL.Dense(128, activation='relu', name="dense1")(x)
x = KL.Dense(num_classes, activation='softmax', name="dense2")(x)
return KM.Model(inputs, x, "digit_classifier_model")
# Load MNIST Data
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = np.expand_dims(x_train, -1).astype('float32') / 255
x_test = np.expand_dims(x_test, -1).astype('float32') / 255
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
# Build data generator and model
datagen = ImageDataGenerator()
model = build_model(x_train, 10)
# Add multi-GPU support.
model = ParallelModel(model, GPU_COUNT)
optimizer = keras.optimizers.SGD(lr=0.01, momentum=0.9, clipnorm=5.0)
model.compile(loss='sparse_categorical_crossentropy',
optimizer=optimizer, metrics=['accuracy'])
model.summary()
# Train
model.fit_generator(
datagen.flow(x_train, y_train, batch_size=64),
steps_per_epoch=50, epochs=10, verbose=1,
validation_data=(x_test, y_test),
callbacks=[keras.callbacks.TensorBoard(log_dir=MODEL_DIR,
write_graph=True)]
)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.