file_path
stringlengths 21
207
| content
stringlengths 5
1.02M
| size
int64 5
1.02M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.27
0.93
|
---|---|---|---|---|---|---|
MomentFactory/Omniverse-MVR-GDTF-converter/src/usd-plugins/fileFormat/mvrFileFormat/mvrParser/MVRParser.h | #pragma once
#include "Fixture.h"
#include "../gdtfParser/ModelSpecification.h"
#include <vector>
#include <stack>
#include <string>
#include <map>
#include <memory>
namespace miniz_cpp2
{
class zip_file;
}
using ZipFile = miniz_cpp2::zip_file;
namespace MVR {
enum class FileType
{
GDTF,
MODEL,
XML,
UNKNOWN
};
struct File
{
std::string name;
std::string content;
};
class MVRParser
{
public:
MVRParser() = default;
~MVRParser() = default;
std::vector<LayerSpecification> ParseMVRFile(const std::string& path);
inline const bool HasError() const { return m_Errors.size() > 1; }
const std::string PopError()
{
if (!HasError())
{
throw std::exception("Error stack is empty.");
}
auto msg = m_Errors.top();
m_Errors.pop();
return msg;
}
bool HasGDTFSpecification(const std::string& name) const;
GDTF::GDTFSpecification GetGDTFSpecification(const std::string& name);
private:
const std::string m_SceneDescriptionFileName = "GeneralSceneDescription.xml";
std::string m_TargetPath;
std::stack<std::string> m_Errors;
std::vector<LayerSpecification> m_Layers;
std::map<std::string, GDTF::GDTFSpecification> m_GDTFSpecifications;
// File handling
void HandleZipFile(std::shared_ptr<ZipFile> zipFile);
void HandleXML(const File& fileName);
// Utilities
bool FileExists(const std::string& path) const;
std::string GetFileExtension(const std::string& path);
FileType GetFileTypeFromExtension(const std::string& extension);
std::vector<std::string> StringSplit(const std::string& input, const char delimiter);
};
} | 1,612 | C | 19.417721 | 87 | 0.700993 |
MomentFactory/Omniverse-MVR-GDTF-converter/src/usd-plugins/fileFormat/gdtfFileFormat/api.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef OMNI_GDTF_API_H_
#define OMNI_GDTF_API_H_
#include "pxr/base/arch/export.h"
#if defined(PXR_STATIC)
# define GDTF_API
# define GDTF_API_TEMPLATE_CLASS(...)
# define GDTF_API_TEMPLATE_STRUCT(...)
# define GDTF_LOCAL
#else
# if defined(GDTFFILEFORMAT_EXPORTS)
# define GDTF_API ARCH_EXPORT
# define GDTF_API_TEMPLATE_CLASS(...) ARCH_EXPORT_TEMPLATE(class, __VA_ARGS__)
# define GDTF_API_TEMPLATE_STRUCT(...) ARCH_EXPORT_TEMPLATE(struct, __VA_ARGS__)
# else
# define GDTF_API ARCH_IMPORT
# define GDTF_API_TEMPLATE_CLASS(...) ARCH_IMPORT_TEMPLATE(class, __VA_ARGS__)
# define GDTF_API_TEMPLATE_STRUCT(...) ARCH_IMPORT_TEMPLATE(struct, __VA_ARGS__)
# endif
# define GDTF_LOCAL ARCH_HIDDEN
#endif
#endif | 1,354 | C | 34.657894 | 86 | 0.704579 |
MomentFactory/Omniverse-MVR-GDTF-converter/src/usd-plugins/fileFormat/gdtfFileFormat/tinyxml2.h | /*
Original code by Lee Thomason (www.grinninglizard.com)
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any
damages arising from the use of this software.
Permission is granted to anyone to use this software for any
purpose, including commercial applications, and to alter it and
redistribute it freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must
not claim that you wrote the original software. If you use this
software in a product, an acknowledgment in the product documentation
would be appreciated but is not required.
2. Altered source versions must be plainly marked as such, and
must not be misrepresented as being the original software.
3. This notice may not be removed or altered from any source
distribution.
*/
#ifndef TINYXML2_INCLUDED
#define TINYXML2_INCLUDED
#if defined(ANDROID_NDK) || defined(__BORLANDC__) || defined(__QNXNTO__)
# include <ctype.h>
# include <limits.h>
# include <stdio.h>
# include <stdlib.h>
# include <string.h>
# if defined(__PS3__)
# include <stddef.h>
# endif
#else
# include <cctype>
# include <climits>
# include <cstdio>
# include <cstdlib>
# include <cstring>
#endif
#include <stdint.h>
/*
TODO: intern strings instead of allocation.
*/
/*
gcc:
g++ -Wall -DTINYXML2_DEBUG tinyxml2.cpp xmltest.cpp -o gccxmltest.exe
Formatting, Artistic Style:
AStyle.exe --style=1tbs --indent-switches --break-closing-brackets --indent-preprocessor tinyxml2.cpp tinyxml2.h
*/
#if defined( _DEBUG ) || defined (__DEBUG__)
# ifndef TINYXML2_DEBUG
# define TINYXML2_DEBUG
# endif
#endif
#ifdef _MSC_VER
# pragma warning(push)
# pragma warning(disable: 4251)
#endif
#ifdef _WIN32
# ifdef TINYXML2_EXPORT
# define TINYXML2_LIB __declspec(dllexport)
# elif defined(TINYXML2_IMPORT)
# define TINYXML2_LIB __declspec(dllimport)
# else
# define TINYXML2_LIB
# endif
#elif __GNUC__ >= 4
# define TINYXML2_LIB __attribute__((visibility("default")))
#else
# define TINYXML2_LIB
#endif
#if !defined(TIXMLASSERT)
#if defined(TINYXML2_DEBUG)
# if defined(_MSC_VER)
# // "(void)0," is for suppressing C4127 warning in "assert(false)", "assert(true)" and the like
# define TIXMLASSERT( x ) do { if ( !((void)0,(x))) { __debugbreak(); } } while(false)
# elif defined (ANDROID_NDK)
# include <android/log.h>
# define TIXMLASSERT( x ) do { if ( !(x)) { __android_log_assert( "assert", "grinliz", "ASSERT in '%s' at %d.", __FILE__, __LINE__ ); } } while(false)
# else
# include <assert.h>
# define TIXMLASSERT assert
# endif
#else
# define TIXMLASSERT( x ) do {} while(false)
#endif
#endif
/* Versioning, past 1.0.14:
http://semver.org/
*/
static const int TIXML2_MAJOR_VERSION = 9;
static const int TIXML2_MINOR_VERSION = 0;
static const int TIXML2_PATCH_VERSION = 0;
#define TINYXML2_MAJOR_VERSION 9
#define TINYXML2_MINOR_VERSION 0
#define TINYXML2_PATCH_VERSION 0
// A fixed element depth limit is problematic. There needs to be a
// limit to avoid a stack overflow. However, that limit varies per
// system, and the capacity of the stack. On the other hand, it's a trivial
// attack that can result from ill, malicious, or even correctly formed XML,
// so there needs to be a limit in place.
static const int TINYXML2_MAX_ELEMENT_DEPTH = 500;
namespace tinyxml2
{
class XMLDocument;
class XMLElement;
class XMLAttribute;
class XMLComment;
class XMLText;
class XMLDeclaration;
class XMLUnknown;
class XMLPrinter;
/*
A class that wraps strings. Normally stores the start and end
pointers into the XML file itself, and will apply normalization
and entity translation if actually read. Can also store (and memory
manage) a traditional char[]
Isn't clear why TINYXML2_LIB is needed; but seems to fix #719
*/
class TINYXML2_LIB StrPair
{
public:
enum Mode {
NEEDS_ENTITY_PROCESSING = 0x01,
NEEDS_NEWLINE_NORMALIZATION = 0x02,
NEEDS_WHITESPACE_COLLAPSING = 0x04,
TEXT_ELEMENT = NEEDS_ENTITY_PROCESSING | NEEDS_NEWLINE_NORMALIZATION,
TEXT_ELEMENT_LEAVE_ENTITIES = NEEDS_NEWLINE_NORMALIZATION,
ATTRIBUTE_NAME = 0,
ATTRIBUTE_VALUE = NEEDS_ENTITY_PROCESSING | NEEDS_NEWLINE_NORMALIZATION,
ATTRIBUTE_VALUE_LEAVE_ENTITIES = NEEDS_NEWLINE_NORMALIZATION,
COMMENT = NEEDS_NEWLINE_NORMALIZATION
};
StrPair() : _flags( 0 ), _start( 0 ), _end( 0 ) {}
~StrPair();
void Set( char* start, char* end, int flags ) {
TIXMLASSERT( start );
TIXMLASSERT( end );
Reset();
_start = start;
_end = end;
_flags = flags | NEEDS_FLUSH;
}
const char* GetStr();
bool Empty() const {
return _start == _end;
}
void SetInternedStr( const char* str ) {
Reset();
_start = const_cast<char*>(str);
}
void SetStr( const char* str, int flags=0 );
char* ParseText( char* in, const char* endTag, int strFlags, int* curLineNumPtr );
char* ParseName( char* in );
void TransferTo( StrPair* other );
void Reset();
private:
void CollapseWhitespace();
enum {
NEEDS_FLUSH = 0x100,
NEEDS_DELETE = 0x200
};
int _flags;
char* _start;
char* _end;
StrPair( const StrPair& other ); // not supported
void operator=( const StrPair& other ); // not supported, use TransferTo()
};
/*
A dynamic array of Plain Old Data. Doesn't support constructors, etc.
Has a small initial memory pool, so that low or no usage will not
cause a call to new/delete
*/
template <class T, int INITIAL_SIZE>
class DynArray
{
public:
DynArray() :
_mem( _pool ),
_allocated( INITIAL_SIZE ),
_size( 0 )
{
}
~DynArray() {
if ( _mem != _pool ) {
delete [] _mem;
}
}
void Clear() {
_size = 0;
}
void Push( T t ) {
TIXMLASSERT( _size < INT_MAX );
EnsureCapacity( _size+1 );
_mem[_size] = t;
++_size;
}
T* PushArr( int count ) {
TIXMLASSERT( count >= 0 );
TIXMLASSERT( _size <= INT_MAX - count );
EnsureCapacity( _size+count );
T* ret = &_mem[_size];
_size += count;
return ret;
}
T Pop() {
TIXMLASSERT( _size > 0 );
--_size;
return _mem[_size];
}
void PopArr( int count ) {
TIXMLASSERT( _size >= count );
_size -= count;
}
bool Empty() const {
return _size == 0;
}
T& operator[](int i) {
TIXMLASSERT( i>= 0 && i < _size );
return _mem[i];
}
const T& operator[](int i) const {
TIXMLASSERT( i>= 0 && i < _size );
return _mem[i];
}
const T& PeekTop() const {
TIXMLASSERT( _size > 0 );
return _mem[ _size - 1];
}
int Size() const {
TIXMLASSERT( _size >= 0 );
return _size;
}
int Capacity() const {
TIXMLASSERT( _allocated >= INITIAL_SIZE );
return _allocated;
}
void SwapRemove(int i) {
TIXMLASSERT(i >= 0 && i < _size);
TIXMLASSERT(_size > 0);
_mem[i] = _mem[_size - 1];
--_size;
}
const T* Mem() const {
TIXMLASSERT( _mem );
return _mem;
}
T* Mem() {
TIXMLASSERT( _mem );
return _mem;
}
private:
DynArray( const DynArray& ); // not supported
void operator=( const DynArray& ); // not supported
void EnsureCapacity( int cap ) {
TIXMLASSERT( cap > 0 );
if ( cap > _allocated ) {
TIXMLASSERT( cap <= INT_MAX / 2 );
const int newAllocated = cap * 2;
T* newMem = new T[newAllocated];
TIXMLASSERT( newAllocated >= _size );
memcpy( newMem, _mem, sizeof(T)*_size ); // warning: not using constructors, only works for PODs
if ( _mem != _pool ) {
delete [] _mem;
}
_mem = newMem;
_allocated = newAllocated;
}
}
T* _mem;
T _pool[INITIAL_SIZE];
int _allocated; // objects allocated
int _size; // number objects in use
};
/*
Parent virtual class of a pool for fast allocation
and deallocation of objects.
*/
class MemPool
{
public:
MemPool() {}
virtual ~MemPool() {}
virtual int ItemSize() const = 0;
virtual void* Alloc() = 0;
virtual void Free( void* ) = 0;
virtual void SetTracked() = 0;
};
/*
Template child class to create pools of the correct type.
*/
template< int ITEM_SIZE >
class MemPoolT : public MemPool
{
public:
MemPoolT() : _blockPtrs(), _root(0), _currentAllocs(0), _nAllocs(0), _maxAllocs(0), _nUntracked(0) {}
~MemPoolT() {
MemPoolT< ITEM_SIZE >::Clear();
}
void Clear() {
// Delete the blocks.
while( !_blockPtrs.Empty()) {
Block* lastBlock = _blockPtrs.Pop();
delete lastBlock;
}
_root = 0;
_currentAllocs = 0;
_nAllocs = 0;
_maxAllocs = 0;
_nUntracked = 0;
}
virtual int ItemSize() const {
return ITEM_SIZE;
}
int CurrentAllocs() const {
return _currentAllocs;
}
virtual void* Alloc() {
if ( !_root ) {
// Need a new block.
Block* block = new Block;
_blockPtrs.Push( block );
Item* blockItems = block->items;
for( int i = 0; i < ITEMS_PER_BLOCK - 1; ++i ) {
blockItems[i].next = &(blockItems[i + 1]);
}
blockItems[ITEMS_PER_BLOCK - 1].next = 0;
_root = blockItems;
}
Item* const result = _root;
TIXMLASSERT( result != 0 );
_root = _root->next;
++_currentAllocs;
if ( _currentAllocs > _maxAllocs ) {
_maxAllocs = _currentAllocs;
}
++_nAllocs;
++_nUntracked;
return result;
}
virtual void Free( void* mem ) {
if ( !mem ) {
return;
}
--_currentAllocs;
Item* item = static_cast<Item*>( mem );
#ifdef TINYXML2_DEBUG
memset( item, 0xfe, sizeof( *item ) );
#endif
item->next = _root;
_root = item;
}
void Trace( const char* name ) {
printf( "Mempool %s watermark=%d [%dk] current=%d size=%d nAlloc=%d blocks=%d\n",
name, _maxAllocs, _maxAllocs * ITEM_SIZE / 1024, _currentAllocs,
ITEM_SIZE, _nAllocs, _blockPtrs.Size() );
}
void SetTracked() {
--_nUntracked;
}
int Untracked() const {
return _nUntracked;
}
// This number is perf sensitive. 4k seems like a good tradeoff on my machine.
// The test file is large, 170k.
// Release: VS2010 gcc(no opt)
// 1k: 4000
// 2k: 4000
// 4k: 3900 21000
// 16k: 5200
// 32k: 4300
// 64k: 4000 21000
// Declared public because some compilers do not accept to use ITEMS_PER_BLOCK
// in private part if ITEMS_PER_BLOCK is private
enum { ITEMS_PER_BLOCK = (4 * 1024) / ITEM_SIZE };
private:
MemPoolT( const MemPoolT& ); // not supported
void operator=( const MemPoolT& ); // not supported
union Item {
Item* next;
char itemData[ITEM_SIZE];
};
struct Block {
Item items[ITEMS_PER_BLOCK];
};
DynArray< Block*, 10 > _blockPtrs;
Item* _root;
int _currentAllocs;
int _nAllocs;
int _maxAllocs;
int _nUntracked;
};
/**
Implements the interface to the "Visitor pattern" (see the Accept() method.)
If you call the Accept() method, it requires being passed a XMLVisitor
class to handle callbacks. For nodes that contain other nodes (Document, Element)
you will get called with a VisitEnter/VisitExit pair. Nodes that are always leafs
are simply called with Visit().
If you return 'true' from a Visit method, recursive parsing will continue. If you return
false, <b>no children of this node or its siblings</b> will be visited.
All flavors of Visit methods have a default implementation that returns 'true' (continue
visiting). You need to only override methods that are interesting to you.
Generally Accept() is called on the XMLDocument, although all nodes support visiting.
You should never change the document from a callback.
@sa XMLNode::Accept()
*/
class TINYXML2_LIB XMLVisitor
{
public:
virtual ~XMLVisitor() {}
/// Visit a document.
virtual bool VisitEnter( const XMLDocument& /*doc*/ ) {
return true;
}
/// Visit a document.
virtual bool VisitExit( const XMLDocument& /*doc*/ ) {
return true;
}
/// Visit an element.
virtual bool VisitEnter( const XMLElement& /*element*/, const XMLAttribute* /*firstAttribute*/ ) {
return true;
}
/// Visit an element.
virtual bool VisitExit( const XMLElement& /*element*/ ) {
return true;
}
/// Visit a declaration.
virtual bool Visit( const XMLDeclaration& /*declaration*/ ) {
return true;
}
/// Visit a text node.
virtual bool Visit( const XMLText& /*text*/ ) {
return true;
}
/// Visit a comment node.
virtual bool Visit( const XMLComment& /*comment*/ ) {
return true;
}
/// Visit an unknown node.
virtual bool Visit( const XMLUnknown& /*unknown*/ ) {
return true;
}
};
// WARNING: must match XMLDocument::_errorNames[]
enum XMLError {
XML_SUCCESS = 0,
XML_NO_ATTRIBUTE,
XML_WRONG_ATTRIBUTE_TYPE,
XML_ERROR_FILE_NOT_FOUND,
XML_ERROR_FILE_COULD_NOT_BE_OPENED,
XML_ERROR_FILE_READ_ERROR,
XML_ERROR_PARSING_ELEMENT,
XML_ERROR_PARSING_ATTRIBUTE,
XML_ERROR_PARSING_TEXT,
XML_ERROR_PARSING_CDATA,
XML_ERROR_PARSING_COMMENT,
XML_ERROR_PARSING_DECLARATION,
XML_ERROR_PARSING_UNKNOWN,
XML_ERROR_EMPTY_DOCUMENT,
XML_ERROR_MISMATCHED_ELEMENT,
XML_ERROR_PARSING,
XML_CAN_NOT_CONVERT_TEXT,
XML_NO_TEXT_NODE,
XML_ELEMENT_DEPTH_EXCEEDED,
XML_ERROR_COUNT
};
/*
Utility functionality.
*/
class TINYXML2_LIB XMLUtil
{
public:
static const char* SkipWhiteSpace( const char* p, int* curLineNumPtr ) {
TIXMLASSERT( p );
while( IsWhiteSpace(*p) ) {
if (curLineNumPtr && *p == '\n') {
++(*curLineNumPtr);
}
++p;
}
TIXMLASSERT( p );
return p;
}
static char* SkipWhiteSpace( char* const p, int* curLineNumPtr ) {
return const_cast<char*>( SkipWhiteSpace( const_cast<const char*>(p), curLineNumPtr ) );
}
// Anything in the high order range of UTF-8 is assumed to not be whitespace. This isn't
// correct, but simple, and usually works.
static bool IsWhiteSpace( char p ) {
return !IsUTF8Continuation(p) && isspace( static_cast<unsigned char>(p) );
}
inline static bool IsNameStartChar( unsigned char ch ) {
if ( ch >= 128 ) {
// This is a heuristic guess in attempt to not implement Unicode-aware isalpha()
return true;
}
if ( isalpha( ch ) ) {
return true;
}
return ch == ':' || ch == '_';
}
inline static bool IsNameChar( unsigned char ch ) {
return IsNameStartChar( ch )
|| isdigit( ch )
|| ch == '.'
|| ch == '-';
}
inline static bool IsPrefixHex( const char* p) {
p = SkipWhiteSpace(p, 0);
return p && *p == '0' && ( *(p + 1) == 'x' || *(p + 1) == 'X');
}
inline static bool StringEqual( const char* p, const char* q, int nChar=INT_MAX ) {
if ( p == q ) {
return true;
}
TIXMLASSERT( p );
TIXMLASSERT( q );
TIXMLASSERT( nChar >= 0 );
return strncmp( p, q, nChar ) == 0;
}
inline static bool IsUTF8Continuation( const char p ) {
return ( p & 0x80 ) != 0;
}
static const char* ReadBOM( const char* p, bool* hasBOM );
// p is the starting location,
// the UTF-8 value of the entity will be placed in value, and length filled in.
static const char* GetCharacterRef( const char* p, char* value, int* length );
static void ConvertUTF32ToUTF8( unsigned long input, char* output, int* length );
// converts primitive types to strings
static void ToStr( int v, char* buffer, int bufferSize );
static void ToStr( unsigned v, char* buffer, int bufferSize );
static void ToStr( bool v, char* buffer, int bufferSize );
static void ToStr( float v, char* buffer, int bufferSize );
static void ToStr( double v, char* buffer, int bufferSize );
static void ToStr(int64_t v, char* buffer, int bufferSize);
static void ToStr(uint64_t v, char* buffer, int bufferSize);
// converts strings to primitive types
static bool ToInt( const char* str, int* value );
static bool ToUnsigned( const char* str, unsigned* value );
static bool ToBool( const char* str, bool* value );
static bool ToFloat( const char* str, float* value );
static bool ToDouble( const char* str, double* value );
static bool ToInt64(const char* str, int64_t* value);
static bool ToUnsigned64(const char* str, uint64_t* value);
// Changes what is serialized for a boolean value.
// Default to "true" and "false". Shouldn't be changed
// unless you have a special testing or compatibility need.
// Be careful: static, global, & not thread safe.
// Be sure to set static const memory as parameters.
static void SetBoolSerialization(const char* writeTrue, const char* writeFalse);
private:
static const char* writeBoolTrue;
static const char* writeBoolFalse;
};
/** XMLNode is a base class for every object that is in the
XML Document Object Model (DOM), except XMLAttributes.
Nodes have siblings, a parent, and children which can
be navigated. A node is always in a XMLDocument.
The type of a XMLNode can be queried, and it can
be cast to its more defined type.
A XMLDocument allocates memory for all its Nodes.
When the XMLDocument gets deleted, all its Nodes
will also be deleted.
@verbatim
A Document can contain: Element (container or leaf)
Comment (leaf)
Unknown (leaf)
Declaration( leaf )
An Element can contain: Element (container or leaf)
Text (leaf)
Attributes (not on tree)
Comment (leaf)
Unknown (leaf)
@endverbatim
*/
class TINYXML2_LIB XMLNode
{
friend class XMLDocument;
friend class XMLElement;
public:
/// Get the XMLDocument that owns this XMLNode.
const XMLDocument* GetDocument() const {
TIXMLASSERT( _document );
return _document;
}
/// Get the XMLDocument that owns this XMLNode.
XMLDocument* GetDocument() {
TIXMLASSERT( _document );
return _document;
}
/// Safely cast to an Element, or null.
virtual XMLElement* ToElement() {
return 0;
}
/// Safely cast to Text, or null.
virtual XMLText* ToText() {
return 0;
}
/// Safely cast to a Comment, or null.
virtual XMLComment* ToComment() {
return 0;
}
/// Safely cast to a Document, or null.
virtual XMLDocument* ToDocument() {
return 0;
}
/// Safely cast to a Declaration, or null.
virtual XMLDeclaration* ToDeclaration() {
return 0;
}
/// Safely cast to an Unknown, or null.
virtual XMLUnknown* ToUnknown() {
return 0;
}
virtual const XMLElement* ToElement() const {
return 0;
}
virtual const XMLText* ToText() const {
return 0;
}
virtual const XMLComment* ToComment() const {
return 0;
}
virtual const XMLDocument* ToDocument() const {
return 0;
}
virtual const XMLDeclaration* ToDeclaration() const {
return 0;
}
virtual const XMLUnknown* ToUnknown() const {
return 0;
}
/** The meaning of 'value' changes for the specific type.
@verbatim
Document: empty (NULL is returned, not an empty string)
Element: name of the element
Comment: the comment text
Unknown: the tag contents
Text: the text string
@endverbatim
*/
const char* Value() const;
/** Set the Value of an XML node.
@sa Value()
*/
void SetValue( const char* val, bool staticMem=false );
/// Gets the line number the node is in, if the document was parsed from a file.
int GetLineNum() const { return _parseLineNum; }
/// Get the parent of this node on the DOM.
const XMLNode* Parent() const {
return _parent;
}
XMLNode* Parent() {
return _parent;
}
/// Returns true if this node has no children.
bool NoChildren() const {
return !_firstChild;
}
/// Get the first child node, or null if none exists.
const XMLNode* FirstChild() const {
return _firstChild;
}
XMLNode* FirstChild() {
return _firstChild;
}
/** Get the first child element, or optionally the first child
element with the specified name.
*/
const XMLElement* FirstChildElement( const char* name = 0 ) const;
XMLElement* FirstChildElement( const char* name = 0 ) {
return const_cast<XMLElement*>(const_cast<const XMLNode*>(this)->FirstChildElement( name ));
}
/// Get the last child node, or null if none exists.
const XMLNode* LastChild() const {
return _lastChild;
}
XMLNode* LastChild() {
return _lastChild;
}
/** Get the last child element or optionally the last child
element with the specified name.
*/
const XMLElement* LastChildElement( const char* name = 0 ) const;
XMLElement* LastChildElement( const char* name = 0 ) {
return const_cast<XMLElement*>(const_cast<const XMLNode*>(this)->LastChildElement(name) );
}
/// Get the previous (left) sibling node of this node.
const XMLNode* PreviousSibling() const {
return _prev;
}
XMLNode* PreviousSibling() {
return _prev;
}
/// Get the previous (left) sibling element of this node, with an optionally supplied name.
const XMLElement* PreviousSiblingElement( const char* name = 0 ) const ;
XMLElement* PreviousSiblingElement( const char* name = 0 ) {
return const_cast<XMLElement*>(const_cast<const XMLNode*>(this)->PreviousSiblingElement( name ) );
}
/// Get the next (right) sibling node of this node.
const XMLNode* NextSibling() const {
return _next;
}
XMLNode* NextSibling() {
return _next;
}
/// Get the next (right) sibling element of this node, with an optionally supplied name.
const XMLElement* NextSiblingElement( const char* name = 0 ) const;
XMLElement* NextSiblingElement( const char* name = 0 ) {
return const_cast<XMLElement*>(const_cast<const XMLNode*>(this)->NextSiblingElement( name ) );
}
/**
Add a child node as the last (right) child.
If the child node is already part of the document,
it is moved from its old location to the new location.
Returns the addThis argument or 0 if the node does not
belong to the same document.
*/
XMLNode* InsertEndChild( XMLNode* addThis );
XMLNode* LinkEndChild( XMLNode* addThis ) {
return InsertEndChild( addThis );
}
/**
Add a child node as the first (left) child.
If the child node is already part of the document,
it is moved from its old location to the new location.
Returns the addThis argument or 0 if the node does not
belong to the same document.
*/
XMLNode* InsertFirstChild( XMLNode* addThis );
/**
Add a node after the specified child node.
If the child node is already part of the document,
it is moved from its old location to the new location.
Returns the addThis argument or 0 if the afterThis node
is not a child of this node, or if the node does not
belong to the same document.
*/
XMLNode* InsertAfterChild( XMLNode* afterThis, XMLNode* addThis );
/**
Delete all the children of this node.
*/
void DeleteChildren();
/**
Delete a child of this node.
*/
void DeleteChild( XMLNode* node );
/**
Make a copy of this node, but not its children.
You may pass in a Document pointer that will be
the owner of the new Node. If the 'document' is
null, then the node returned will be allocated
from the current Document. (this->GetDocument())
Note: if called on a XMLDocument, this will return null.
*/
virtual XMLNode* ShallowClone( XMLDocument* document ) const = 0;
/**
Make a copy of this node and all its children.
If the 'target' is null, then the nodes will
be allocated in the current document. If 'target'
is specified, the memory will be allocated is the
specified XMLDocument.
NOTE: This is probably not the correct tool to
copy a document, since XMLDocuments can have multiple
top level XMLNodes. You probably want to use
XMLDocument::DeepCopy()
*/
XMLNode* DeepClone( XMLDocument* target ) const;
/**
Test if 2 nodes are the same, but don't test children.
The 2 nodes do not need to be in the same Document.
Note: if called on a XMLDocument, this will return false.
*/
virtual bool ShallowEqual( const XMLNode* compare ) const = 0;
/** Accept a hierarchical visit of the nodes in the TinyXML-2 DOM. Every node in the
XML tree will be conditionally visited and the host will be called back
via the XMLVisitor interface.
This is essentially a SAX interface for TinyXML-2. (Note however it doesn't re-parse
the XML for the callbacks, so the performance of TinyXML-2 is unchanged by using this
interface versus any other.)
The interface has been based on ideas from:
- http://www.saxproject.org/
- http://c2.com/cgi/wiki?HierarchicalVisitorPattern
Which are both good references for "visiting".
An example of using Accept():
@verbatim
XMLPrinter printer;
tinyxmlDoc.Accept( &printer );
const char* xmlcstr = printer.CStr();
@endverbatim
*/
virtual bool Accept( XMLVisitor* visitor ) const = 0;
/**
Set user data into the XMLNode. TinyXML-2 in
no way processes or interprets user data.
It is initially 0.
*/
void SetUserData(void* userData) { _userData = userData; }
/**
Get user data set into the XMLNode. TinyXML-2 in
no way processes or interprets user data.
It is initially 0.
*/
void* GetUserData() const { return _userData; }
protected:
explicit XMLNode( XMLDocument* );
virtual ~XMLNode();
virtual char* ParseDeep( char* p, StrPair* parentEndTag, int* curLineNumPtr);
XMLDocument* _document;
XMLNode* _parent;
mutable StrPair _value;
int _parseLineNum;
XMLNode* _firstChild;
XMLNode* _lastChild;
XMLNode* _prev;
XMLNode* _next;
void* _userData;
private:
MemPool* _memPool;
void Unlink( XMLNode* child );
static void DeleteNode( XMLNode* node );
void InsertChildPreamble( XMLNode* insertThis ) const;
const XMLElement* ToElementWithName( const char* name ) const;
XMLNode( const XMLNode& ); // not supported
XMLNode& operator=( const XMLNode& ); // not supported
};
/** XML text.
Note that a text node can have child element nodes, for example:
@verbatim
<root>This is <b>bold</b></root>
@endverbatim
A text node can have 2 ways to output the next. "normal" output
and CDATA. It will default to the mode it was parsed from the XML file and
you generally want to leave it alone, but you can change the output mode with
SetCData() and query it with CData().
*/
class TINYXML2_LIB XMLText : public XMLNode
{
friend class XMLDocument;
public:
virtual bool Accept( XMLVisitor* visitor ) const;
virtual XMLText* ToText() {
return this;
}
virtual const XMLText* ToText() const {
return this;
}
/// Declare whether this should be CDATA or standard text.
void SetCData( bool isCData ) {
_isCData = isCData;
}
/// Returns true if this is a CDATA text element.
bool CData() const {
return _isCData;
}
virtual XMLNode* ShallowClone( XMLDocument* document ) const;
virtual bool ShallowEqual( const XMLNode* compare ) const;
protected:
explicit XMLText( XMLDocument* doc ) : XMLNode( doc ), _isCData( false ) {}
virtual ~XMLText() {}
char* ParseDeep( char* p, StrPair* parentEndTag, int* curLineNumPtr );
private:
bool _isCData;
XMLText( const XMLText& ); // not supported
XMLText& operator=( const XMLText& ); // not supported
};
/** An XML Comment. */
class TINYXML2_LIB XMLComment : public XMLNode
{
friend class XMLDocument;
public:
virtual XMLComment* ToComment() {
return this;
}
virtual const XMLComment* ToComment() const {
return this;
}
virtual bool Accept( XMLVisitor* visitor ) const;
virtual XMLNode* ShallowClone( XMLDocument* document ) const;
virtual bool ShallowEqual( const XMLNode* compare ) const;
protected:
explicit XMLComment( XMLDocument* doc );
virtual ~XMLComment();
char* ParseDeep( char* p, StrPair* parentEndTag, int* curLineNumPtr);
private:
XMLComment( const XMLComment& ); // not supported
XMLComment& operator=( const XMLComment& ); // not supported
};
/** In correct XML the declaration is the first entry in the file.
@verbatim
<?xml version="1.0" standalone="yes"?>
@endverbatim
TinyXML-2 will happily read or write files without a declaration,
however.
The text of the declaration isn't interpreted. It is parsed
and written as a string.
*/
class TINYXML2_LIB XMLDeclaration : public XMLNode
{
friend class XMLDocument;
public:
virtual XMLDeclaration* ToDeclaration() {
return this;
}
virtual const XMLDeclaration* ToDeclaration() const {
return this;
}
virtual bool Accept( XMLVisitor* visitor ) const;
virtual XMLNode* ShallowClone( XMLDocument* document ) const;
virtual bool ShallowEqual( const XMLNode* compare ) const;
protected:
explicit XMLDeclaration( XMLDocument* doc );
virtual ~XMLDeclaration();
char* ParseDeep( char* p, StrPair* parentEndTag, int* curLineNumPtr );
private:
XMLDeclaration( const XMLDeclaration& ); // not supported
XMLDeclaration& operator=( const XMLDeclaration& ); // not supported
};
/** Any tag that TinyXML-2 doesn't recognize is saved as an
unknown. It is a tag of text, but should not be modified.
It will be written back to the XML, unchanged, when the file
is saved.
DTD tags get thrown into XMLUnknowns.
*/
class TINYXML2_LIB XMLUnknown : public XMLNode
{
friend class XMLDocument;
public:
virtual XMLUnknown* ToUnknown() {
return this;
}
virtual const XMLUnknown* ToUnknown() const {
return this;
}
virtual bool Accept( XMLVisitor* visitor ) const;
virtual XMLNode* ShallowClone( XMLDocument* document ) const;
virtual bool ShallowEqual( const XMLNode* compare ) const;
protected:
explicit XMLUnknown( XMLDocument* doc );
virtual ~XMLUnknown();
char* ParseDeep( char* p, StrPair* parentEndTag, int* curLineNumPtr );
private:
XMLUnknown( const XMLUnknown& ); // not supported
XMLUnknown& operator=( const XMLUnknown& ); // not supported
};
/** An attribute is a name-value pair. Elements have an arbitrary
number of attributes, each with a unique name.
@note The attributes are not XMLNodes. You may only query the
Next() attribute in a list.
*/
class TINYXML2_LIB XMLAttribute
{
friend class XMLElement;
public:
/// The name of the attribute.
const char* Name() const;
/// The value of the attribute.
const char* Value() const;
/// Gets the line number the attribute is in, if the document was parsed from a file.
int GetLineNum() const { return _parseLineNum; }
/// The next attribute in the list.
const XMLAttribute* Next() const {
return _next;
}
/** IntValue interprets the attribute as an integer, and returns the value.
If the value isn't an integer, 0 will be returned. There is no error checking;
use QueryIntValue() if you need error checking.
*/
int IntValue() const {
int i = 0;
QueryIntValue(&i);
return i;
}
int64_t Int64Value() const {
int64_t i = 0;
QueryInt64Value(&i);
return i;
}
uint64_t Unsigned64Value() const {
uint64_t i = 0;
QueryUnsigned64Value(&i);
return i;
}
/// Query as an unsigned integer. See IntValue()
unsigned UnsignedValue() const {
unsigned i=0;
QueryUnsignedValue( &i );
return i;
}
/// Query as a boolean. See IntValue()
bool BoolValue() const {
bool b=false;
QueryBoolValue( &b );
return b;
}
/// Query as a double. See IntValue()
double DoubleValue() const {
double d=0;
QueryDoubleValue( &d );
return d;
}
/// Query as a float. See IntValue()
float FloatValue() const {
float f=0;
QueryFloatValue( &f );
return f;
}
/** QueryIntValue interprets the attribute as an integer, and returns the value
in the provided parameter. The function will return XML_SUCCESS on success,
and XML_WRONG_ATTRIBUTE_TYPE if the conversion is not successful.
*/
XMLError QueryIntValue( int* value ) const;
/// See QueryIntValue
XMLError QueryUnsignedValue( unsigned int* value ) const;
/// See QueryIntValue
XMLError QueryInt64Value(int64_t* value) const;
/// See QueryIntValue
XMLError QueryUnsigned64Value(uint64_t* value) const;
/// See QueryIntValue
XMLError QueryBoolValue( bool* value ) const;
/// See QueryIntValue
XMLError QueryDoubleValue( double* value ) const;
/// See QueryIntValue
XMLError QueryFloatValue( float* value ) const;
/// Set the attribute to a string value.
void SetAttribute( const char* value );
/// Set the attribute to value.
void SetAttribute( int value );
/// Set the attribute to value.
void SetAttribute( unsigned value );
/// Set the attribute to value.
void SetAttribute(int64_t value);
/// Set the attribute to value.
void SetAttribute(uint64_t value);
/// Set the attribute to value.
void SetAttribute( bool value );
/// Set the attribute to value.
void SetAttribute( double value );
/// Set the attribute to value.
void SetAttribute( float value );
private:
enum { BUF_SIZE = 200 };
XMLAttribute() : _name(), _value(),_parseLineNum( 0 ), _next( 0 ), _memPool( 0 ) {}
virtual ~XMLAttribute() {}
XMLAttribute( const XMLAttribute& ); // not supported
void operator=( const XMLAttribute& ); // not supported
void SetName( const char* name );
char* ParseDeep( char* p, bool processEntities, int* curLineNumPtr );
mutable StrPair _name;
mutable StrPair _value;
int _parseLineNum;
XMLAttribute* _next;
MemPool* _memPool;
};
/** The element is a container class. It has a value, the element name,
and can contain other elements, text, comments, and unknowns.
Elements also contain an arbitrary number of attributes.
*/
class TINYXML2_LIB XMLElement : public XMLNode
{
friend class XMLDocument;
public:
/// Get the name of an element (which is the Value() of the node.)
const char* Name() const {
return Value();
}
/// Set the name of the element.
void SetName( const char* str, bool staticMem=false ) {
SetValue( str, staticMem );
}
virtual XMLElement* ToElement() {
return this;
}
virtual const XMLElement* ToElement() const {
return this;
}
virtual bool Accept( XMLVisitor* visitor ) const;
/** Given an attribute name, Attribute() returns the value
for the attribute of that name, or null if none
exists. For example:
@verbatim
const char* value = ele->Attribute( "foo" );
@endverbatim
The 'value' parameter is normally null. However, if specified,
the attribute will only be returned if the 'name' and 'value'
match. This allow you to write code:
@verbatim
if ( ele->Attribute( "foo", "bar" ) ) callFooIsBar();
@endverbatim
rather than:
@verbatim
if ( ele->Attribute( "foo" ) ) {
if ( strcmp( ele->Attribute( "foo" ), "bar" ) == 0 ) callFooIsBar();
}
@endverbatim
*/
const char* Attribute( const char* name, const char* value=0 ) const;
/** Given an attribute name, IntAttribute() returns the value
of the attribute interpreted as an integer. The default
value will be returned if the attribute isn't present,
or if there is an error. (For a method with error
checking, see QueryIntAttribute()).
*/
int IntAttribute(const char* name, int defaultValue = 0) const;
/// See IntAttribute()
unsigned UnsignedAttribute(const char* name, unsigned defaultValue = 0) const;
/// See IntAttribute()
int64_t Int64Attribute(const char* name, int64_t defaultValue = 0) const;
/// See IntAttribute()
uint64_t Unsigned64Attribute(const char* name, uint64_t defaultValue = 0) const;
/// See IntAttribute()
bool BoolAttribute(const char* name, bool defaultValue = false) const;
/// See IntAttribute()
double DoubleAttribute(const char* name, double defaultValue = 0) const;
/// See IntAttribute()
float FloatAttribute(const char* name, float defaultValue = 0) const;
/** Given an attribute name, QueryIntAttribute() returns
XML_SUCCESS, XML_WRONG_ATTRIBUTE_TYPE if the conversion
can't be performed, or XML_NO_ATTRIBUTE if the attribute
doesn't exist. If successful, the result of the conversion
will be written to 'value'. If not successful, nothing will
be written to 'value'. This allows you to provide default
value:
@verbatim
int value = 10;
QueryIntAttribute( "foo", &value ); // if "foo" isn't found, value will still be 10
@endverbatim
*/
XMLError QueryIntAttribute( const char* name, int* value ) const {
const XMLAttribute* a = FindAttribute( name );
if ( !a ) {
return XML_NO_ATTRIBUTE;
}
return a->QueryIntValue( value );
}
/// See QueryIntAttribute()
XMLError QueryUnsignedAttribute( const char* name, unsigned int* value ) const {
const XMLAttribute* a = FindAttribute( name );
if ( !a ) {
return XML_NO_ATTRIBUTE;
}
return a->QueryUnsignedValue( value );
}
/// See QueryIntAttribute()
XMLError QueryInt64Attribute(const char* name, int64_t* value) const {
const XMLAttribute* a = FindAttribute(name);
if (!a) {
return XML_NO_ATTRIBUTE;
}
return a->QueryInt64Value(value);
}
/// See QueryIntAttribute()
XMLError QueryUnsigned64Attribute(const char* name, uint64_t* value) const {
const XMLAttribute* a = FindAttribute(name);
if(!a) {
return XML_NO_ATTRIBUTE;
}
return a->QueryUnsigned64Value(value);
}
/// See QueryIntAttribute()
XMLError QueryBoolAttribute( const char* name, bool* value ) const {
const XMLAttribute* a = FindAttribute( name );
if ( !a ) {
return XML_NO_ATTRIBUTE;
}
return a->QueryBoolValue( value );
}
/// See QueryIntAttribute()
XMLError QueryDoubleAttribute( const char* name, double* value ) const {
const XMLAttribute* a = FindAttribute( name );
if ( !a ) {
return XML_NO_ATTRIBUTE;
}
return a->QueryDoubleValue( value );
}
/// See QueryIntAttribute()
XMLError QueryFloatAttribute( const char* name, float* value ) const {
const XMLAttribute* a = FindAttribute( name );
if ( !a ) {
return XML_NO_ATTRIBUTE;
}
return a->QueryFloatValue( value );
}
/// See QueryIntAttribute()
XMLError QueryStringAttribute(const char* name, const char** value) const {
const XMLAttribute* a = FindAttribute(name);
if (!a) {
return XML_NO_ATTRIBUTE;
}
*value = a->Value();
return XML_SUCCESS;
}
/** Given an attribute name, QueryAttribute() returns
XML_SUCCESS, XML_WRONG_ATTRIBUTE_TYPE if the conversion
can't be performed, or XML_NO_ATTRIBUTE if the attribute
doesn't exist. It is overloaded for the primitive types,
and is a generally more convenient replacement of
QueryIntAttribute() and related functions.
If successful, the result of the conversion
will be written to 'value'. If not successful, nothing will
be written to 'value'. This allows you to provide default
value:
@verbatim
int value = 10;
QueryAttribute( "foo", &value ); // if "foo" isn't found, value will still be 10
@endverbatim
*/
XMLError QueryAttribute( const char* name, int* value ) const {
return QueryIntAttribute( name, value );
}
XMLError QueryAttribute( const char* name, unsigned int* value ) const {
return QueryUnsignedAttribute( name, value );
}
XMLError QueryAttribute(const char* name, int64_t* value) const {
return QueryInt64Attribute(name, value);
}
XMLError QueryAttribute(const char* name, uint64_t* value) const {
return QueryUnsigned64Attribute(name, value);
}
XMLError QueryAttribute( const char* name, bool* value ) const {
return QueryBoolAttribute( name, value );
}
XMLError QueryAttribute( const char* name, double* value ) const {
return QueryDoubleAttribute( name, value );
}
XMLError QueryAttribute( const char* name, float* value ) const {
return QueryFloatAttribute( name, value );
}
XMLError QueryAttribute(const char* name, const char** value) const {
return QueryStringAttribute(name, value);
}
/// Sets the named attribute to value.
void SetAttribute( const char* name, const char* value ) {
XMLAttribute* a = FindOrCreateAttribute( name );
a->SetAttribute( value );
}
/// Sets the named attribute to value.
void SetAttribute( const char* name, int value ) {
XMLAttribute* a = FindOrCreateAttribute( name );
a->SetAttribute( value );
}
/// Sets the named attribute to value.
void SetAttribute( const char* name, unsigned value ) {
XMLAttribute* a = FindOrCreateAttribute( name );
a->SetAttribute( value );
}
/// Sets the named attribute to value.
void SetAttribute(const char* name, int64_t value) {
XMLAttribute* a = FindOrCreateAttribute(name);
a->SetAttribute(value);
}
/// Sets the named attribute to value.
void SetAttribute(const char* name, uint64_t value) {
XMLAttribute* a = FindOrCreateAttribute(name);
a->SetAttribute(value);
}
/// Sets the named attribute to value.
void SetAttribute( const char* name, bool value ) {
XMLAttribute* a = FindOrCreateAttribute( name );
a->SetAttribute( value );
}
/// Sets the named attribute to value.
void SetAttribute( const char* name, double value ) {
XMLAttribute* a = FindOrCreateAttribute( name );
a->SetAttribute( value );
}
/// Sets the named attribute to value.
void SetAttribute( const char* name, float value ) {
XMLAttribute* a = FindOrCreateAttribute( name );
a->SetAttribute( value );
}
/**
Delete an attribute.
*/
void DeleteAttribute( const char* name );
/// Return the first attribute in the list.
const XMLAttribute* FirstAttribute() const {
return _rootAttribute;
}
/// Query a specific attribute in the list.
const XMLAttribute* FindAttribute( const char* name ) const;
/** Convenience function for easy access to the text inside an element. Although easy
and concise, GetText() is limited compared to getting the XMLText child
and accessing it directly.
If the first child of 'this' is a XMLText, the GetText()
returns the character string of the Text node, else null is returned.
This is a convenient method for getting the text of simple contained text:
@verbatim
<foo>This is text</foo>
const char* str = fooElement->GetText();
@endverbatim
'str' will be a pointer to "This is text".
Note that this function can be misleading. If the element foo was created from
this XML:
@verbatim
<foo><b>This is text</b></foo>
@endverbatim
then the value of str would be null. The first child node isn't a text node, it is
another element. From this XML:
@verbatim
<foo>This is <b>text</b></foo>
@endverbatim
GetText() will return "This is ".
*/
const char* GetText() const;
/** Convenience function for easy access to the text inside an element. Although easy
and concise, SetText() is limited compared to creating an XMLText child
and mutating it directly.
If the first child of 'this' is a XMLText, SetText() sets its value to
the given string, otherwise it will create a first child that is an XMLText.
This is a convenient method for setting the text of simple contained text:
@verbatim
<foo>This is text</foo>
fooElement->SetText( "Hullaballoo!" );
<foo>Hullaballoo!</foo>
@endverbatim
Note that this function can be misleading. If the element foo was created from
this XML:
@verbatim
<foo><b>This is text</b></foo>
@endverbatim
then it will not change "This is text", but rather prefix it with a text element:
@verbatim
<foo>Hullaballoo!<b>This is text</b></foo>
@endverbatim
For this XML:
@verbatim
<foo />
@endverbatim
SetText() will generate
@verbatim
<foo>Hullaballoo!</foo>
@endverbatim
*/
void SetText( const char* inText );
/// Convenience method for setting text inside an element. See SetText() for important limitations.
void SetText( int value );
/// Convenience method for setting text inside an element. See SetText() for important limitations.
void SetText( unsigned value );
/// Convenience method for setting text inside an element. See SetText() for important limitations.
void SetText(int64_t value);
/// Convenience method for setting text inside an element. See SetText() for important limitations.
void SetText(uint64_t value);
/// Convenience method for setting text inside an element. See SetText() for important limitations.
void SetText( bool value );
/// Convenience method for setting text inside an element. See SetText() for important limitations.
void SetText( double value );
/// Convenience method for setting text inside an element. See SetText() for important limitations.
void SetText( float value );
/**
Convenience method to query the value of a child text node. This is probably best
shown by example. Given you have a document is this form:
@verbatim
<point>
<x>1</x>
<y>1.4</y>
</point>
@endverbatim
The QueryIntText() and similar functions provide a safe and easier way to get to the
"value" of x and y.
@verbatim
int x = 0;
float y = 0; // types of x and y are contrived for example
const XMLElement* xElement = pointElement->FirstChildElement( "x" );
const XMLElement* yElement = pointElement->FirstChildElement( "y" );
xElement->QueryIntText( &x );
yElement->QueryFloatText( &y );
@endverbatim
@returns XML_SUCCESS (0) on success, XML_CAN_NOT_CONVERT_TEXT if the text cannot be converted
to the requested type, and XML_NO_TEXT_NODE if there is no child text to query.
*/
XMLError QueryIntText( int* ival ) const;
/// See QueryIntText()
XMLError QueryUnsignedText( unsigned* uval ) const;
/// See QueryIntText()
XMLError QueryInt64Text(int64_t* uval) const;
/// See QueryIntText()
XMLError QueryUnsigned64Text(uint64_t* uval) const;
/// See QueryIntText()
XMLError QueryBoolText( bool* bval ) const;
/// See QueryIntText()
XMLError QueryDoubleText( double* dval ) const;
/// See QueryIntText()
XMLError QueryFloatText( float* fval ) const;
int IntText(int defaultValue = 0) const;
/// See QueryIntText()
unsigned UnsignedText(unsigned defaultValue = 0) const;
/// See QueryIntText()
int64_t Int64Text(int64_t defaultValue = 0) const;
/// See QueryIntText()
uint64_t Unsigned64Text(uint64_t defaultValue = 0) const;
/// See QueryIntText()
bool BoolText(bool defaultValue = false) const;
/// See QueryIntText()
double DoubleText(double defaultValue = 0) const;
/// See QueryIntText()
float FloatText(float defaultValue = 0) const;
/**
Convenience method to create a new XMLElement and add it as last (right)
child of this node. Returns the created and inserted element.
*/
XMLElement* InsertNewChildElement(const char* name);
/// See InsertNewChildElement()
XMLComment* InsertNewComment(const char* comment);
/// See InsertNewChildElement()
XMLText* InsertNewText(const char* text);
/// See InsertNewChildElement()
XMLDeclaration* InsertNewDeclaration(const char* text);
/// See InsertNewChildElement()
XMLUnknown* InsertNewUnknown(const char* text);
// internal:
enum ElementClosingType {
OPEN, // <foo>
CLOSED, // <foo/>
CLOSING // </foo>
};
ElementClosingType ClosingType() const {
return _closingType;
}
virtual XMLNode* ShallowClone( XMLDocument* document ) const;
virtual bool ShallowEqual( const XMLNode* compare ) const;
protected:
char* ParseDeep( char* p, StrPair* parentEndTag, int* curLineNumPtr );
private:
XMLElement( XMLDocument* doc );
virtual ~XMLElement();
XMLElement( const XMLElement& ); // not supported
void operator=( const XMLElement& ); // not supported
XMLAttribute* FindOrCreateAttribute( const char* name );
char* ParseAttributes( char* p, int* curLineNumPtr );
static void DeleteAttribute( XMLAttribute* attribute );
XMLAttribute* CreateAttribute();
enum { BUF_SIZE = 200 };
ElementClosingType _closingType;
// The attribute list is ordered; there is no 'lastAttribute'
// because the list needs to be scanned for dupes before adding
// a new attribute.
XMLAttribute* _rootAttribute;
};
enum Whitespace {
PRESERVE_WHITESPACE,
COLLAPSE_WHITESPACE
};
/** A Document binds together all the functionality.
It can be saved, loaded, and printed to the screen.
All Nodes are connected and allocated to a Document.
If the Document is deleted, all its Nodes are also deleted.
*/
class TINYXML2_LIB XMLDocument : public XMLNode
{
friend class XMLElement;
// Gives access to SetError and Push/PopDepth, but over-access for everything else.
// Wishing C++ had "internal" scope.
friend class XMLNode;
friend class XMLText;
friend class XMLComment;
friend class XMLDeclaration;
friend class XMLUnknown;
public:
/// constructor
XMLDocument( bool processEntities = true, Whitespace whitespaceMode = PRESERVE_WHITESPACE );
~XMLDocument();
virtual XMLDocument* ToDocument() {
TIXMLASSERT( this == _document );
return this;
}
virtual const XMLDocument* ToDocument() const {
TIXMLASSERT( this == _document );
return this;
}
/**
Parse an XML file from a character string.
Returns XML_SUCCESS (0) on success, or
an errorID.
You may optionally pass in the 'nBytes', which is
the number of bytes which will be parsed. If not
specified, TinyXML-2 will assume 'xml' points to a
null terminated string.
*/
XMLError Parse( const char* xml, size_t nBytes=static_cast<size_t>(-1) );
/**
Load an XML file from disk.
Returns XML_SUCCESS (0) on success, or
an errorID.
*/
XMLError LoadFile( const char* filename );
/**
Load an XML file from disk. You are responsible
for providing and closing the FILE*.
NOTE: The file should be opened as binary ("rb")
not text in order for TinyXML-2 to correctly
do newline normalization.
Returns XML_SUCCESS (0) on success, or
an errorID.
*/
XMLError LoadFile( FILE* );
/**
Save the XML file to disk.
Returns XML_SUCCESS (0) on success, or
an errorID.
*/
XMLError SaveFile( const char* filename, bool compact = false );
/**
Save the XML file to disk. You are responsible
for providing and closing the FILE*.
Returns XML_SUCCESS (0) on success, or
an errorID.
*/
XMLError SaveFile( FILE* fp, bool compact = false );
bool ProcessEntities() const {
return _processEntities;
}
Whitespace WhitespaceMode() const {
return _whitespaceMode;
}
/**
Returns true if this document has a leading Byte Order Mark of UTF8.
*/
bool HasBOM() const {
return _writeBOM;
}
/** Sets whether to write the BOM when writing the file.
*/
void SetBOM( bool useBOM ) {
_writeBOM = useBOM;
}
/** Return the root element of DOM. Equivalent to FirstChildElement().
To get the first node, use FirstChild().
*/
XMLElement* RootElement() {
return FirstChildElement();
}
const XMLElement* RootElement() const {
return FirstChildElement();
}
/** Print the Document. If the Printer is not provided, it will
print to stdout. If you provide Printer, this can print to a file:
@verbatim
XMLPrinter printer( fp );
doc.Print( &printer );
@endverbatim
Or you can use a printer to print to memory:
@verbatim
XMLPrinter printer;
doc.Print( &printer );
// printer.CStr() has a const char* to the XML
@endverbatim
*/
void Print( XMLPrinter* streamer=0 ) const;
virtual bool Accept( XMLVisitor* visitor ) const;
/**
Create a new Element associated with
this Document. The memory for the Element
is managed by the Document.
*/
XMLElement* NewElement( const char* name );
/**
Create a new Comment associated with
this Document. The memory for the Comment
is managed by the Document.
*/
XMLComment* NewComment( const char* comment );
/**
Create a new Text associated with
this Document. The memory for the Text
is managed by the Document.
*/
XMLText* NewText( const char* text );
/**
Create a new Declaration associated with
this Document. The memory for the object
is managed by the Document.
If the 'text' param is null, the standard
declaration is used.:
@verbatim
<?xml version="1.0" encoding="UTF-8"?>
@endverbatim
*/
XMLDeclaration* NewDeclaration( const char* text=0 );
/**
Create a new Unknown associated with
this Document. The memory for the object
is managed by the Document.
*/
XMLUnknown* NewUnknown( const char* text );
/**
Delete a node associated with this document.
It will be unlinked from the DOM.
*/
void DeleteNode( XMLNode* node );
/// Clears the error flags.
void ClearError();
/// Return true if there was an error parsing the document.
bool Error() const {
return _errorID != XML_SUCCESS;
}
/// Return the errorID.
XMLError ErrorID() const {
return _errorID;
}
const char* ErrorName() const;
static const char* ErrorIDToName(XMLError errorID);
/** Returns a "long form" error description. A hopefully helpful
diagnostic with location, line number, and/or additional info.
*/
const char* ErrorStr() const;
/// A (trivial) utility function that prints the ErrorStr() to stdout.
void PrintError() const;
/// Return the line where the error occurred, or zero if unknown.
int ErrorLineNum() const
{
return _errorLineNum;
}
/// Clear the document, resetting it to the initial state.
void Clear();
/**
Copies this document to a target document.
The target will be completely cleared before the copy.
If you want to copy a sub-tree, see XMLNode::DeepClone().
NOTE: that the 'target' must be non-null.
*/
void DeepCopy(XMLDocument* target) const;
// internal
char* Identify( char* p, XMLNode** node );
// internal
void MarkInUse(const XMLNode* const);
virtual XMLNode* ShallowClone( XMLDocument* /*document*/ ) const {
return 0;
}
virtual bool ShallowEqual( const XMLNode* /*compare*/ ) const {
return false;
}
private:
XMLDocument( const XMLDocument& ); // not supported
void operator=( const XMLDocument& ); // not supported
bool _writeBOM;
bool _processEntities;
XMLError _errorID;
Whitespace _whitespaceMode;
mutable StrPair _errorStr;
int _errorLineNum;
char* _charBuffer;
int _parseCurLineNum;
int _parsingDepth;
// Memory tracking does add some overhead.
// However, the code assumes that you don't
// have a bunch of unlinked nodes around.
// Therefore it takes less memory to track
// in the document vs. a linked list in the XMLNode,
// and the performance is the same.
DynArray<XMLNode*, 10> _unlinked;
MemPoolT< sizeof(XMLElement) > _elementPool;
MemPoolT< sizeof(XMLAttribute) > _attributePool;
MemPoolT< sizeof(XMLText) > _textPool;
MemPoolT< sizeof(XMLComment) > _commentPool;
static const char* _errorNames[XML_ERROR_COUNT];
void Parse();
void SetError( XMLError error, int lineNum, const char* format, ... );
// Something of an obvious security hole, once it was discovered.
// Either an ill-formed XML or an excessively deep one can overflow
// the stack. Track stack depth, and error out if needed.
class DepthTracker {
public:
explicit DepthTracker(XMLDocument * document) {
this->_document = document;
document->PushDepth();
}
~DepthTracker() {
_document->PopDepth();
}
private:
XMLDocument * _document;
};
void PushDepth();
void PopDepth();
template<class NodeType, int PoolElementSize>
NodeType* CreateUnlinkedNode( MemPoolT<PoolElementSize>& pool );
};
template<class NodeType, int PoolElementSize>
inline NodeType* XMLDocument::CreateUnlinkedNode( MemPoolT<PoolElementSize>& pool )
{
TIXMLASSERT( sizeof( NodeType ) == PoolElementSize );
TIXMLASSERT( sizeof( NodeType ) == pool.ItemSize() );
NodeType* returnNode = new (pool.Alloc()) NodeType( this );
TIXMLASSERT( returnNode );
returnNode->_memPool = &pool;
_unlinked.Push(returnNode);
return returnNode;
}
/**
A XMLHandle is a class that wraps a node pointer with null checks; this is
an incredibly useful thing. Note that XMLHandle is not part of the TinyXML-2
DOM structure. It is a separate utility class.
Take an example:
@verbatim
<Document>
<Element attributeA = "valueA">
<Child attributeB = "value1" />
<Child attributeB = "value2" />
</Element>
</Document>
@endverbatim
Assuming you want the value of "attributeB" in the 2nd "Child" element, it's very
easy to write a *lot* of code that looks like:
@verbatim
XMLElement* root = document.FirstChildElement( "Document" );
if ( root )
{
XMLElement* element = root->FirstChildElement( "Element" );
if ( element )
{
XMLElement* child = element->FirstChildElement( "Child" );
if ( child )
{
XMLElement* child2 = child->NextSiblingElement( "Child" );
if ( child2 )
{
// Finally do something useful.
@endverbatim
And that doesn't even cover "else" cases. XMLHandle addresses the verbosity
of such code. A XMLHandle checks for null pointers so it is perfectly safe
and correct to use:
@verbatim
XMLHandle docHandle( &document );
XMLElement* child2 = docHandle.FirstChildElement( "Document" ).FirstChildElement( "Element" ).FirstChildElement().NextSiblingElement();
if ( child2 )
{
// do something useful
@endverbatim
Which is MUCH more concise and useful.
It is also safe to copy handles - internally they are nothing more than node pointers.
@verbatim
XMLHandle handleCopy = handle;
@endverbatim
See also XMLConstHandle, which is the same as XMLHandle, but operates on const objects.
*/
class TINYXML2_LIB XMLHandle
{
public:
/// Create a handle from any node (at any depth of the tree.) This can be a null pointer.
explicit XMLHandle( XMLNode* node ) : _node( node ) {
}
/// Create a handle from a node.
explicit XMLHandle( XMLNode& node ) : _node( &node ) {
}
/// Copy constructor
XMLHandle( const XMLHandle& ref ) : _node( ref._node ) {
}
/// Assignment
XMLHandle& operator=( const XMLHandle& ref ) {
_node = ref._node;
return *this;
}
/// Get the first child of this handle.
XMLHandle FirstChild() {
return XMLHandle( _node ? _node->FirstChild() : 0 );
}
/// Get the first child element of this handle.
XMLHandle FirstChildElement( const char* name = 0 ) {
return XMLHandle( _node ? _node->FirstChildElement( name ) : 0 );
}
/// Get the last child of this handle.
XMLHandle LastChild() {
return XMLHandle( _node ? _node->LastChild() : 0 );
}
/// Get the last child element of this handle.
XMLHandle LastChildElement( const char* name = 0 ) {
return XMLHandle( _node ? _node->LastChildElement( name ) : 0 );
}
/// Get the previous sibling of this handle.
XMLHandle PreviousSibling() {
return XMLHandle( _node ? _node->PreviousSibling() : 0 );
}
/// Get the previous sibling element of this handle.
XMLHandle PreviousSiblingElement( const char* name = 0 ) {
return XMLHandle( _node ? _node->PreviousSiblingElement( name ) : 0 );
}
/// Get the next sibling of this handle.
XMLHandle NextSibling() {
return XMLHandle( _node ? _node->NextSibling() : 0 );
}
/// Get the next sibling element of this handle.
XMLHandle NextSiblingElement( const char* name = 0 ) {
return XMLHandle( _node ? _node->NextSiblingElement( name ) : 0 );
}
/// Safe cast to XMLNode. This can return null.
XMLNode* ToNode() {
return _node;
}
/// Safe cast to XMLElement. This can return null.
XMLElement* ToElement() {
return ( _node ? _node->ToElement() : 0 );
}
/// Safe cast to XMLText. This can return null.
XMLText* ToText() {
return ( _node ? _node->ToText() : 0 );
}
/// Safe cast to XMLUnknown. This can return null.
XMLUnknown* ToUnknown() {
return ( _node ? _node->ToUnknown() : 0 );
}
/// Safe cast to XMLDeclaration. This can return null.
XMLDeclaration* ToDeclaration() {
return ( _node ? _node->ToDeclaration() : 0 );
}
private:
XMLNode* _node;
};
/**
A variant of the XMLHandle class for working with const XMLNodes and Documents. It is the
same in all regards, except for the 'const' qualifiers. See XMLHandle for API.
*/
class TINYXML2_LIB XMLConstHandle
{
public:
explicit XMLConstHandle( const XMLNode* node ) : _node( node ) {
}
explicit XMLConstHandle( const XMLNode& node ) : _node( &node ) {
}
XMLConstHandle( const XMLConstHandle& ref ) : _node( ref._node ) {
}
XMLConstHandle& operator=( const XMLConstHandle& ref ) {
_node = ref._node;
return *this;
}
const XMLConstHandle FirstChild() const {
return XMLConstHandle( _node ? _node->FirstChild() : 0 );
}
const XMLConstHandle FirstChildElement( const char* name = 0 ) const {
return XMLConstHandle( _node ? _node->FirstChildElement( name ) : 0 );
}
const XMLConstHandle LastChild() const {
return XMLConstHandle( _node ? _node->LastChild() : 0 );
}
const XMLConstHandle LastChildElement( const char* name = 0 ) const {
return XMLConstHandle( _node ? _node->LastChildElement( name ) : 0 );
}
const XMLConstHandle PreviousSibling() const {
return XMLConstHandle( _node ? _node->PreviousSibling() : 0 );
}
const XMLConstHandle PreviousSiblingElement( const char* name = 0 ) const {
return XMLConstHandle( _node ? _node->PreviousSiblingElement( name ) : 0 );
}
const XMLConstHandle NextSibling() const {
return XMLConstHandle( _node ? _node->NextSibling() : 0 );
}
const XMLConstHandle NextSiblingElement( const char* name = 0 ) const {
return XMLConstHandle( _node ? _node->NextSiblingElement( name ) : 0 );
}
const XMLNode* ToNode() const {
return _node;
}
const XMLElement* ToElement() const {
return ( _node ? _node->ToElement() : 0 );
}
const XMLText* ToText() const {
return ( _node ? _node->ToText() : 0 );
}
const XMLUnknown* ToUnknown() const {
return ( _node ? _node->ToUnknown() : 0 );
}
const XMLDeclaration* ToDeclaration() const {
return ( _node ? _node->ToDeclaration() : 0 );
}
private:
const XMLNode* _node;
};
/**
Printing functionality. The XMLPrinter gives you more
options than the XMLDocument::Print() method.
It can:
-# Print to memory.
-# Print to a file you provide.
-# Print XML without a XMLDocument.
Print to Memory
@verbatim
XMLPrinter printer;
doc.Print( &printer );
SomeFunction( printer.CStr() );
@endverbatim
Print to a File
You provide the file pointer.
@verbatim
XMLPrinter printer( fp );
doc.Print( &printer );
@endverbatim
Print without a XMLDocument
When loading, an XML parser is very useful. However, sometimes
when saving, it just gets in the way. The code is often set up
for streaming, and constructing the DOM is just overhead.
The Printer supports the streaming case. The following code
prints out a trivially simple XML file without ever creating
an XML document.
@verbatim
XMLPrinter printer( fp );
printer.OpenElement( "foo" );
printer.PushAttribute( "foo", "bar" );
printer.CloseElement();
@endverbatim
*/
class TINYXML2_LIB XMLPrinter : public XMLVisitor
{
public:
/** Construct the printer. If the FILE* is specified,
this will print to the FILE. Else it will print
to memory, and the result is available in CStr().
If 'compact' is set to true, then output is created
with only required whitespace and newlines.
*/
XMLPrinter( FILE* file=0, bool compact = false, int depth = 0 );
virtual ~XMLPrinter() {}
/** If streaming, write the BOM and declaration. */
void PushHeader( bool writeBOM, bool writeDeclaration );
/** If streaming, start writing an element.
The element must be closed with CloseElement()
*/
void OpenElement( const char* name, bool compactMode=false );
/// If streaming, add an attribute to an open element.
void PushAttribute( const char* name, const char* value );
void PushAttribute( const char* name, int value );
void PushAttribute( const char* name, unsigned value );
void PushAttribute( const char* name, int64_t value );
void PushAttribute( const char* name, uint64_t value );
void PushAttribute( const char* name, bool value );
void PushAttribute( const char* name, double value );
/// If streaming, close the Element.
virtual void CloseElement( bool compactMode=false );
/// Add a text node.
void PushText( const char* text, bool cdata=false );
/// Add a text node from an integer.
void PushText( int value );
/// Add a text node from an unsigned.
void PushText( unsigned value );
/// Add a text node from a signed 64bit integer.
void PushText( int64_t value );
/// Add a text node from an unsigned 64bit integer.
void PushText( uint64_t value );
/// Add a text node from a bool.
void PushText( bool value );
/// Add a text node from a float.
void PushText( float value );
/// Add a text node from a double.
void PushText( double value );
/// Add a comment
void PushComment( const char* comment );
void PushDeclaration( const char* value );
void PushUnknown( const char* value );
virtual bool VisitEnter( const XMLDocument& /*doc*/ );
virtual bool VisitExit( const XMLDocument& /*doc*/ ) {
return true;
}
virtual bool VisitEnter( const XMLElement& element, const XMLAttribute* attribute );
virtual bool VisitExit( const XMLElement& element );
virtual bool Visit( const XMLText& text );
virtual bool Visit( const XMLComment& comment );
virtual bool Visit( const XMLDeclaration& declaration );
virtual bool Visit( const XMLUnknown& unknown );
/**
If in print to memory mode, return a pointer to
the XML file in memory.
*/
const char* CStr() const {
return _buffer.Mem();
}
/**
If in print to memory mode, return the size
of the XML file in memory. (Note the size returned
includes the terminating null.)
*/
int CStrSize() const {
return _buffer.Size();
}
/**
If in print to memory mode, reset the buffer to the
beginning.
*/
void ClearBuffer( bool resetToFirstElement = true ) {
_buffer.Clear();
_buffer.Push(0);
_firstElement = resetToFirstElement;
}
protected:
virtual bool CompactMode( const XMLElement& ) { return _compactMode; }
/** Prints out the space before an element. You may override to change
the space and tabs used. A PrintSpace() override should call Print().
*/
virtual void PrintSpace( int depth );
virtual void Print( const char* format, ... );
virtual void Write( const char* data, size_t size );
virtual void Putc( char ch );
inline void Write(const char* data) { Write(data, strlen(data)); }
void SealElementIfJustOpened();
bool _elementJustOpened;
DynArray< const char*, 10 > _stack;
private:
/**
Prepares to write a new node. This includes sealing an element that was
just opened, and writing any whitespace necessary if not in compact mode.
*/
void PrepareForNewNode( bool compactMode );
void PrintString( const char*, bool restrictedEntitySet ); // prints out, after detecting entities.
bool _firstElement;
FILE* _fp;
int _depth;
int _textDepth;
bool _processEntities;
bool _compactMode;
enum {
ENTITY_RANGE = 64,
BUF_SIZE = 200
};
bool _entityFlag[ENTITY_RANGE];
bool _restrictedEntityFlag[ENTITY_RANGE];
DynArray< char, 20 > _buffer;
// Prohibit cloning, intentionally not implemented
XMLPrinter( const XMLPrinter& );
XMLPrinter& operator=( const XMLPrinter& );
};
} // tinyxml2
#if defined(_MSC_VER)
# pragma warning(pop)
#endif
#endif // TINYXML2_INCLUDED | 71,400 | C | 29.00042 | 166 | 0.641106 |
MomentFactory/Omniverse-MVR-GDTF-converter/src/usd-plugins/fileFormat/gdtfFileFormat/gdtfFileFormat.cpp | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "gdtfFileFormat.h"
#include <pxr/pxr.h>
#include <pxr/base/tf/diagnostic.h>
#include <pxr/base/tf/stringUtils.h>
#include <pxr/base/tf/token.h>
#include <pxr/usd/usd/prim.h>
#include <pxr/usd/usd/stage.h>
#include <pxr/usd/usd/usdaFileFormat.h>
#include <pxr/usd/usdGeom/mesh.h>
#include <pxr/usd/usdGeom/scope.h>
#include <pxr/usd/usdGeom/camera.h>
#include <pxr/usd/usdGeom/cube.h>
#include <pxr/usd/usdGeom/xformable.h>
#include <pxr/usd/usdGeom/xform.h>
#include <pxr/usd/usdLux/rectLight.h>
#include <pxr/base/gf/matrix3f.h>
#include <pxr/base/gf/vec3f.h>
#include <pxr/base/gf/rotation.h>
#include <pxr/usd/usd/payloads.h>
#include "../mvrFileFormat/gdtfParser/GdtfParser.h"
#include "gdtfUsdConverter.h"
#include <fstream>
#include <cmath>
#include <iostream>
#define _SILENCE_EXPERIMENTAL_FILESYSTEM_DEPRECATION_WARNING
#include <experimental/filesystem>
PXR_NAMESPACE_OPEN_SCOPE
GdtfFileFormat::GdtfFileFormat() : SdfFileFormat(
GdtfFileFormatTokens->Id,
GdtfFileFormatTokens->Version,
GdtfFileFormatTokens->Target,
GdtfFileFormatTokens->Extension)
{
}
GdtfFileFormat::~GdtfFileFormat()
{
}
bool GdtfFileFormat::CanRead(const std::string& filePath) const
{
return true;
}
static std::string CleanNameForUSD(const std::string& name)
{
std::string cleanedName = name;
if(cleanedName.size() == 0)
{
return "Default";
}
if(cleanedName.size() == 1 && !TfIsValidIdentifier(cleanedName))
{
// If we have an index as a name, we only need to add _ beforehand.
return CleanNameForUSD("_" + cleanedName);
}
return TfMakeValidIdentifier(cleanedName);
}
bool GdtfFileFormat::Read(SdfLayer* layer, const std::string& resolvedPath, bool metadataOnly) const
{
// Do parsing here...
// TF_CODING_ERROR to throw errors
// Create a new anonymous layer and wrap a stage around it.
PXR_NAMESPACE_USING_DIRECTIVE
if (!TF_VERIFY(layer))
{
return false;
}
SdfLayerRefPtr newLayer = SdfLayer::CreateAnonymous(".usd");
UsdStageRefPtr stage = UsdStage::Open(newLayer);
// Parse GDTF file
auto parser = GDTF::GDTFParser();
GDTF::GDTFSpecification device = parser.ParseGDTFFile(resolvedPath);
// Write to stage
GDTF::ConvertToUsd(device, stage);
// Copy contents into output layer.
layer->TransferContent(newLayer);
return true;
}
bool GdtfFileFormat::WriteToString(const SdfLayer& layer, std::string* str, const std::string& comment) const
{
// Implementation for two-way writting potentially
return false;
}
bool GdtfFileFormat::WriteToStream(const SdfSpecHandle& spec, std::ostream& out, size_t indent) const
{
// this POC doesn't support writing
return false;
}
bool GdtfFileFormat::_ShouldSkipAnonymousReload() const
{
return false;
}
bool GdtfFileFormat::_ShouldReadAnonymousLayers() const
{
return true;
}
void GdtfFileFormat::ComposeFieldsForFileFormatArguments(const std::string& assetPath, const PcpDynamicFileFormatContext& context, FileFormatArguments* args, VtValue* contextDependencyData) const
{
}
bool GdtfFileFormat::CanFieldChangeAffectFileFormatArguments(const TfToken& field, const VtValue& oldValue, const VtValue& newValue, const VtValue& contextDependencyData) const
{
return true;
}
// these macros emit methods defined in the Pixar namespace
// but not properly scoped, so we have to use the namespace
// locally here
TF_DEFINE_PUBLIC_TOKENS(
GdtfFileFormatTokens,
((Id, "gdtfFileFormat"))
((Version, "1.0"))
((Target, "usd"))
((Extension, "gdtf"))
);
TF_REGISTRY_FUNCTION(TfType)
{
SDF_DEFINE_FILE_FORMAT(GdtfFileFormat, SdfFileFormat);
}
PXR_NAMESPACE_CLOSE_SCOPE | 4,206 | C++ | 24.969136 | 195 | 0.743462 |
MomentFactory/Omniverse-MVR-GDTF-converter/src/usd-plugins/fileFormat/gdtfFileFormat/tinyxml2.cpp | /*
Original code by Lee Thomason (www.grinninglizard.com)
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any
damages arising from the use of this software.
Permission is granted to anyone to use this software for any
purpose, including commercial applications, and to alter it and
redistribute it freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must
not claim that you wrote the original software. If you use this
software in a product, an acknowledgment in the product documentation
would be appreciated but is not required.
2. Altered source versions must be plainly marked as such, and
must not be misrepresented as being the original software.
3. This notice may not be removed or altered from any source
distribution.
*/
#include "tinyxml2.h"
#include <new> // yes, this one new style header, is in the Android SDK.
#if defined(ANDROID_NDK) || defined(__BORLANDC__) || defined(__QNXNTO__)
# include <stddef.h>
# include <stdarg.h>
#else
# include <cstddef>
# include <cstdarg>
#endif
#if defined(_MSC_VER) && (_MSC_VER >= 1400 ) && (!defined WINCE)
// Microsoft Visual Studio, version 2005 and higher. Not WinCE.
/*int _snprintf_s(
char *buffer,
size_t sizeOfBuffer,
size_t count,
const char *format [,
argument] ...
);*/
static inline int TIXML_SNPRINTF( char* buffer, size_t size, const char* format, ... )
{
va_list va;
va_start( va, format );
const int result = vsnprintf_s( buffer, size, _TRUNCATE, format, va );
va_end( va );
return result;
}
static inline int TIXML_VSNPRINTF( char* buffer, size_t size, const char* format, va_list va )
{
const int result = vsnprintf_s( buffer, size, _TRUNCATE, format, va );
return result;
}
#define TIXML_VSCPRINTF _vscprintf
#define TIXML_SSCANF sscanf_s
#elif defined _MSC_VER
// Microsoft Visual Studio 2003 and earlier or WinCE
#define TIXML_SNPRINTF _snprintf
#define TIXML_VSNPRINTF _vsnprintf
#define TIXML_SSCANF sscanf
#if (_MSC_VER < 1400 ) && (!defined WINCE)
// Microsoft Visual Studio 2003 and not WinCE.
#define TIXML_VSCPRINTF _vscprintf // VS2003's C runtime has this, but VC6 C runtime or WinCE SDK doesn't have.
#else
// Microsoft Visual Studio 2003 and earlier or WinCE.
static inline int TIXML_VSCPRINTF( const char* format, va_list va )
{
int len = 512;
for (;;) {
len = len*2;
char* str = new char[len]();
const int required = _vsnprintf(str, len, format, va);
delete[] str;
if ( required != -1 ) {
TIXMLASSERT( required >= 0 );
len = required;
break;
}
}
TIXMLASSERT( len >= 0 );
return len;
}
#endif
#else
// GCC version 3 and higher
//#warning( "Using sn* functions." )
#define TIXML_SNPRINTF snprintf
#define TIXML_VSNPRINTF vsnprintf
static inline int TIXML_VSCPRINTF( const char* format, va_list va )
{
int len = vsnprintf( 0, 0, format, va );
TIXMLASSERT( len >= 0 );
return len;
}
#define TIXML_SSCANF sscanf
#endif
#if defined(_WIN64)
#define TIXML_FSEEK _fseeki64
#define TIXML_FTELL _ftelli64
#elif defined(__APPLE__) || defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__) || defined(__DragonFly__) || (__CYGWIN__)
#define TIXML_FSEEK fseeko
#define TIXML_FTELL ftello
#elif defined(__ANDROID__)
#if __ANDROID_API__ > 24
#define TIXML_FSEEK fseeko64
#define TIXML_FTELL ftello64
#else
#define TIXML_FSEEK fseeko
#define TIXML_FTELL ftello
#endif
#elif defined(__unix__) && defined(__x86_64__)
#define TIXML_FSEEK fseeko64
#define TIXML_FTELL ftello64
#else
#define TIXML_FSEEK fseek
#define TIXML_FTELL ftell
#endif
static const char LINE_FEED = static_cast<char>(0x0a); // all line endings are normalized to LF
static const char LF = LINE_FEED;
static const char CARRIAGE_RETURN = static_cast<char>(0x0d); // CR gets filtered out
static const char CR = CARRIAGE_RETURN;
static const char SINGLE_QUOTE = '\'';
static const char DOUBLE_QUOTE = '\"';
// Bunch of unicode info at:
// http://www.unicode.org/faq/utf_bom.html
// ef bb bf (Microsoft "lead bytes") - designates UTF-8
static const unsigned char TIXML_UTF_LEAD_0 = 0xefU;
static const unsigned char TIXML_UTF_LEAD_1 = 0xbbU;
static const unsigned char TIXML_UTF_LEAD_2 = 0xbfU;
namespace tinyxml2
{
struct Entity {
const char* pattern;
int length;
char value;
};
static const int NUM_ENTITIES = 5;
static const Entity entities[NUM_ENTITIES] = {
{ "quot", 4, DOUBLE_QUOTE },
{ "amp", 3, '&' },
{ "apos", 4, SINGLE_QUOTE },
{ "lt", 2, '<' },
{ "gt", 2, '>' }
};
StrPair::~StrPair()
{
Reset();
}
void StrPair::TransferTo( StrPair* other )
{
if ( this == other ) {
return;
}
// This in effect implements the assignment operator by "moving"
// ownership (as in auto_ptr).
TIXMLASSERT( other != 0 );
TIXMLASSERT( other->_flags == 0 );
TIXMLASSERT( other->_start == 0 );
TIXMLASSERT( other->_end == 0 );
other->Reset();
other->_flags = _flags;
other->_start = _start;
other->_end = _end;
_flags = 0;
_start = 0;
_end = 0;
}
void StrPair::Reset()
{
if ( _flags & NEEDS_DELETE ) {
delete [] _start;
}
_flags = 0;
_start = 0;
_end = 0;
}
void StrPair::SetStr( const char* str, int flags )
{
TIXMLASSERT( str );
Reset();
size_t len = strlen( str );
TIXMLASSERT( _start == 0 );
_start = new char[ len+1 ];
memcpy( _start, str, len+1 );
_end = _start + len;
_flags = flags | NEEDS_DELETE;
}
char* StrPair::ParseText( char* p, const char* endTag, int strFlags, int* curLineNumPtr )
{
TIXMLASSERT( p );
TIXMLASSERT( endTag && *endTag );
TIXMLASSERT(curLineNumPtr);
char* start = p;
const char endChar = *endTag;
size_t length = strlen( endTag );
// Inner loop of text parsing.
while ( *p ) {
if ( *p == endChar && strncmp( p, endTag, length ) == 0 ) {
Set( start, p, strFlags );
return p + length;
} else if (*p == '\n') {
++(*curLineNumPtr);
}
++p;
TIXMLASSERT( p );
}
return 0;
}
char* StrPair::ParseName( char* p )
{
if ( !p || !(*p) ) {
return 0;
}
if ( !XMLUtil::IsNameStartChar( (unsigned char) *p ) ) {
return 0;
}
char* const start = p;
++p;
while ( *p && XMLUtil::IsNameChar( (unsigned char) *p ) ) {
++p;
}
Set( start, p, 0 );
return p;
}
void StrPair::CollapseWhitespace()
{
// Adjusting _start would cause undefined behavior on delete[]
TIXMLASSERT( ( _flags & NEEDS_DELETE ) == 0 );
// Trim leading space.
_start = XMLUtil::SkipWhiteSpace( _start, 0 );
if ( *_start ) {
const char* p = _start; // the read pointer
char* q = _start; // the write pointer
while( *p ) {
if ( XMLUtil::IsWhiteSpace( *p )) {
p = XMLUtil::SkipWhiteSpace( p, 0 );
if ( *p == 0 ) {
break; // don't write to q; this trims the trailing space.
}
*q = ' ';
++q;
}
*q = *p;
++q;
++p;
}
*q = 0;
}
}
const char* StrPair::GetStr()
{
TIXMLASSERT( _start );
TIXMLASSERT( _end );
if ( _flags & NEEDS_FLUSH ) {
*_end = 0;
_flags ^= NEEDS_FLUSH;
if ( _flags ) {
const char* p = _start; // the read pointer
char* q = _start; // the write pointer
while( p < _end ) {
if ( (_flags & NEEDS_NEWLINE_NORMALIZATION) && *p == CR ) {
// CR-LF pair becomes LF
// CR alone becomes LF
// LF-CR becomes LF
if ( *(p+1) == LF ) {
p += 2;
}
else {
++p;
}
*q = LF;
++q;
}
else if ( (_flags & NEEDS_NEWLINE_NORMALIZATION) && *p == LF ) {
if ( *(p+1) == CR ) {
p += 2;
}
else {
++p;
}
*q = LF;
++q;
}
else if ( (_flags & NEEDS_ENTITY_PROCESSING) && *p == '&' ) {
// Entities handled by tinyXML2:
// - special entities in the entity table [in/out]
// - numeric character reference [in]
// 中 or 中
if ( *(p+1) == '#' ) {
const int buflen = 10;
char buf[buflen] = { 0 };
int len = 0;
const char* adjusted = const_cast<char*>( XMLUtil::GetCharacterRef( p, buf, &len ) );
if ( adjusted == 0 ) {
*q = *p;
++p;
++q;
}
else {
TIXMLASSERT( 0 <= len && len <= buflen );
TIXMLASSERT( q + len <= adjusted );
p = adjusted;
memcpy( q, buf, len );
q += len;
}
}
else {
bool entityFound = false;
for( int i = 0; i < NUM_ENTITIES; ++i ) {
const Entity& entity = entities[i];
if ( strncmp( p + 1, entity.pattern, entity.length ) == 0
&& *( p + entity.length + 1 ) == ';' ) {
// Found an entity - convert.
*q = entity.value;
++q;
p += entity.length + 2;
entityFound = true;
break;
}
}
if ( !entityFound ) {
// fixme: treat as error?
++p;
++q;
}
}
}
else {
*q = *p;
++p;
++q;
}
}
*q = 0;
}
// The loop below has plenty going on, and this
// is a less useful mode. Break it out.
if ( _flags & NEEDS_WHITESPACE_COLLAPSING ) {
CollapseWhitespace();
}
_flags = (_flags & NEEDS_DELETE);
}
TIXMLASSERT( _start );
return _start;
}
// --------- XMLUtil ----------- //
const char* XMLUtil::writeBoolTrue = "true";
const char* XMLUtil::writeBoolFalse = "false";
void XMLUtil::SetBoolSerialization(const char* writeTrue, const char* writeFalse)
{
static const char* defTrue = "true";
static const char* defFalse = "false";
writeBoolTrue = (writeTrue) ? writeTrue : defTrue;
writeBoolFalse = (writeFalse) ? writeFalse : defFalse;
}
const char* XMLUtil::ReadBOM( const char* p, bool* bom )
{
TIXMLASSERT( p );
TIXMLASSERT( bom );
*bom = false;
const unsigned char* pu = reinterpret_cast<const unsigned char*>(p);
// Check for BOM:
if ( *(pu+0) == TIXML_UTF_LEAD_0
&& *(pu+1) == TIXML_UTF_LEAD_1
&& *(pu+2) == TIXML_UTF_LEAD_2 ) {
*bom = true;
p += 3;
}
TIXMLASSERT( p );
return p;
}
void XMLUtil::ConvertUTF32ToUTF8( unsigned long input, char* output, int* length )
{
const unsigned long BYTE_MASK = 0xBF;
const unsigned long BYTE_MARK = 0x80;
const unsigned long FIRST_BYTE_MARK[7] = { 0x00, 0x00, 0xC0, 0xE0, 0xF0, 0xF8, 0xFC };
if (input < 0x80) {
*length = 1;
}
else if ( input < 0x800 ) {
*length = 2;
}
else if ( input < 0x10000 ) {
*length = 3;
}
else if ( input < 0x200000 ) {
*length = 4;
}
else {
*length = 0; // This code won't convert this correctly anyway.
return;
}
output += *length;
// Scary scary fall throughs are annotated with carefully designed comments
// to suppress compiler warnings such as -Wimplicit-fallthrough in gcc
switch (*length) {
case 4:
--output;
*output = static_cast<char>((input | BYTE_MARK) & BYTE_MASK);
input >>= 6;
//fall through
case 3:
--output;
*output = static_cast<char>((input | BYTE_MARK) & BYTE_MASK);
input >>= 6;
//fall through
case 2:
--output;
*output = static_cast<char>((input | BYTE_MARK) & BYTE_MASK);
input >>= 6;
//fall through
case 1:
--output;
*output = static_cast<char>(input | FIRST_BYTE_MARK[*length]);
break;
default:
TIXMLASSERT( false );
}
}
const char* XMLUtil::GetCharacterRef( const char* p, char* value, int* length )
{
// Presume an entity, and pull it out.
*length = 0;
if ( *(p+1) == '#' && *(p+2) ) {
unsigned long ucs = 0;
TIXMLASSERT( sizeof( ucs ) >= 4 );
ptrdiff_t delta = 0;
unsigned mult = 1;
static const char SEMICOLON = ';';
if ( *(p+2) == 'x' ) {
// Hexadecimal.
const char* q = p+3;
if ( !(*q) ) {
return 0;
}
q = strchr( q, SEMICOLON );
if ( !q ) {
return 0;
}
TIXMLASSERT( *q == SEMICOLON );
delta = q-p;
--q;
while ( *q != 'x' ) {
unsigned int digit = 0;
if ( *q >= '0' && *q <= '9' ) {
digit = *q - '0';
}
else if ( *q >= 'a' && *q <= 'f' ) {
digit = *q - 'a' + 10;
}
else if ( *q >= 'A' && *q <= 'F' ) {
digit = *q - 'A' + 10;
}
else {
return 0;
}
TIXMLASSERT( digit < 16 );
TIXMLASSERT( digit == 0 || mult <= UINT_MAX / digit );
const unsigned int digitScaled = mult * digit;
TIXMLASSERT( ucs <= ULONG_MAX - digitScaled );
ucs += digitScaled;
TIXMLASSERT( mult <= UINT_MAX / 16 );
mult *= 16;
--q;
}
}
else {
// Decimal.
const char* q = p+2;
if ( !(*q) ) {
return 0;
}
q = strchr( q, SEMICOLON );
if ( !q ) {
return 0;
}
TIXMLASSERT( *q == SEMICOLON );
delta = q-p;
--q;
while ( *q != '#' ) {
if ( *q >= '0' && *q <= '9' ) {
const unsigned int digit = *q - '0';
TIXMLASSERT( digit < 10 );
TIXMLASSERT( digit == 0 || mult <= UINT_MAX / digit );
const unsigned int digitScaled = mult * digit;
TIXMLASSERT( ucs <= ULONG_MAX - digitScaled );
ucs += digitScaled;
}
else {
return 0;
}
TIXMLASSERT( mult <= UINT_MAX / 10 );
mult *= 10;
--q;
}
}
// convert the UCS to UTF-8
ConvertUTF32ToUTF8( ucs, value, length );
return p + delta + 1;
}
return p+1;
}
void XMLUtil::ToStr( int v, char* buffer, int bufferSize )
{
TIXML_SNPRINTF( buffer, bufferSize, "%d", v );
}
void XMLUtil::ToStr( unsigned v, char* buffer, int bufferSize )
{
TIXML_SNPRINTF( buffer, bufferSize, "%u", v );
}
void XMLUtil::ToStr( bool v, char* buffer, int bufferSize )
{
TIXML_SNPRINTF( buffer, bufferSize, "%s", v ? writeBoolTrue : writeBoolFalse);
}
/*
ToStr() of a number is a very tricky topic.
https://github.com/leethomason/tinyxml2/issues/106
*/
void XMLUtil::ToStr( float v, char* buffer, int bufferSize )
{
TIXML_SNPRINTF( buffer, bufferSize, "%.8g", v );
}
void XMLUtil::ToStr( double v, char* buffer, int bufferSize )
{
TIXML_SNPRINTF( buffer, bufferSize, "%.17g", v );
}
void XMLUtil::ToStr( int64_t v, char* buffer, int bufferSize )
{
// horrible syntax trick to make the compiler happy about %lld
TIXML_SNPRINTF(buffer, bufferSize, "%lld", static_cast<long long>(v));
}
void XMLUtil::ToStr( uint64_t v, char* buffer, int bufferSize )
{
// horrible syntax trick to make the compiler happy about %llu
TIXML_SNPRINTF(buffer, bufferSize, "%llu", (long long)v);
}
bool XMLUtil::ToInt(const char* str, int* value)
{
if (IsPrefixHex(str)) {
unsigned v;
if (TIXML_SSCANF(str, "%x", &v) == 1) {
*value = static_cast<int>(v);
return true;
}
}
else {
if (TIXML_SSCANF(str, "%d", value) == 1) {
return true;
}
}
return false;
}
bool XMLUtil::ToUnsigned(const char* str, unsigned* value)
{
if (TIXML_SSCANF(str, IsPrefixHex(str) ? "%x" : "%u", value) == 1) {
return true;
}
return false;
}
bool XMLUtil::ToBool( const char* str, bool* value )
{
int ival = 0;
if ( ToInt( str, &ival )) {
*value = (ival==0) ? false : true;
return true;
}
static const char* TRUE_VALS[] = { "true", "True", "TRUE", 0 };
static const char* FALSE_VALS[] = { "false", "False", "FALSE", 0 };
for (int i = 0; TRUE_VALS[i]; ++i) {
if (StringEqual(str, TRUE_VALS[i])) {
*value = true;
return true;
}
}
for (int i = 0; FALSE_VALS[i]; ++i) {
if (StringEqual(str, FALSE_VALS[i])) {
*value = false;
return true;
}
}
return false;
}
bool XMLUtil::ToFloat( const char* str, float* value )
{
if ( TIXML_SSCANF( str, "%f", value ) == 1 ) {
return true;
}
return false;
}
bool XMLUtil::ToDouble( const char* str, double* value )
{
if ( TIXML_SSCANF( str, "%lf", value ) == 1 ) {
return true;
}
return false;
}
bool XMLUtil::ToInt64(const char* str, int64_t* value)
{
if (IsPrefixHex(str)) {
unsigned long long v = 0; // horrible syntax trick to make the compiler happy about %llx
if (TIXML_SSCANF(str, "%llx", &v) == 1) {
*value = static_cast<int64_t>(v);
return true;
}
}
else {
long long v = 0; // horrible syntax trick to make the compiler happy about %lld
if (TIXML_SSCANF(str, "%lld", &v) == 1) {
*value = static_cast<int64_t>(v);
return true;
}
}
return false;
}
bool XMLUtil::ToUnsigned64(const char* str, uint64_t* value) {
unsigned long long v = 0; // horrible syntax trick to make the compiler happy about %llu
if(TIXML_SSCANF(str, IsPrefixHex(str) ? "%llx" : "%llu", &v) == 1) {
*value = (uint64_t)v;
return true;
}
return false;
}
char* XMLDocument::Identify( char* p, XMLNode** node )
{
TIXMLASSERT( node );
TIXMLASSERT( p );
char* const start = p;
int const startLine = _parseCurLineNum;
p = XMLUtil::SkipWhiteSpace( p, &_parseCurLineNum );
if( !*p ) {
*node = 0;
TIXMLASSERT( p );
return p;
}
// These strings define the matching patterns:
static const char* xmlHeader = { "<?" };
static const char* commentHeader = { "<!--" };
static const char* cdataHeader = { "<![CDATA[" };
static const char* dtdHeader = { "<!" };
static const char* elementHeader = { "<" }; // and a header for everything else; check last.
static const int xmlHeaderLen = 2;
static const int commentHeaderLen = 4;
static const int cdataHeaderLen = 9;
static const int dtdHeaderLen = 2;
static const int elementHeaderLen = 1;
TIXMLASSERT( sizeof( XMLComment ) == sizeof( XMLUnknown ) ); // use same memory pool
TIXMLASSERT( sizeof( XMLComment ) == sizeof( XMLDeclaration ) ); // use same memory pool
XMLNode* returnNode = 0;
if ( XMLUtil::StringEqual( p, xmlHeader, xmlHeaderLen ) ) {
returnNode = CreateUnlinkedNode<XMLDeclaration>( _commentPool );
returnNode->_parseLineNum = _parseCurLineNum;
p += xmlHeaderLen;
}
else if ( XMLUtil::StringEqual( p, commentHeader, commentHeaderLen ) ) {
returnNode = CreateUnlinkedNode<XMLComment>( _commentPool );
returnNode->_parseLineNum = _parseCurLineNum;
p += commentHeaderLen;
}
else if ( XMLUtil::StringEqual( p, cdataHeader, cdataHeaderLen ) ) {
XMLText* text = CreateUnlinkedNode<XMLText>( _textPool );
returnNode = text;
returnNode->_parseLineNum = _parseCurLineNum;
p += cdataHeaderLen;
text->SetCData( true );
}
else if ( XMLUtil::StringEqual( p, dtdHeader, dtdHeaderLen ) ) {
returnNode = CreateUnlinkedNode<XMLUnknown>( _commentPool );
returnNode->_parseLineNum = _parseCurLineNum;
p += dtdHeaderLen;
}
else if ( XMLUtil::StringEqual( p, elementHeader, elementHeaderLen ) ) {
returnNode = CreateUnlinkedNode<XMLElement>( _elementPool );
returnNode->_parseLineNum = _parseCurLineNum;
p += elementHeaderLen;
}
else {
returnNode = CreateUnlinkedNode<XMLText>( _textPool );
returnNode->_parseLineNum = _parseCurLineNum; // Report line of first non-whitespace character
p = start; // Back it up, all the text counts.
_parseCurLineNum = startLine;
}
TIXMLASSERT( returnNode );
TIXMLASSERT( p );
*node = returnNode;
return p;
}
bool XMLDocument::Accept( XMLVisitor* visitor ) const
{
TIXMLASSERT( visitor );
if ( visitor->VisitEnter( *this ) ) {
for ( const XMLNode* node=FirstChild(); node; node=node->NextSibling() ) {
if ( !node->Accept( visitor ) ) {
break;
}
}
}
return visitor->VisitExit( *this );
}
// --------- XMLNode ----------- //
XMLNode::XMLNode( XMLDocument* doc ) :
_document( doc ),
_parent( 0 ),
_value(),
_parseLineNum( 0 ),
_firstChild( 0 ), _lastChild( 0 ),
_prev( 0 ), _next( 0 ),
_userData( 0 ),
_memPool( 0 )
{
}
XMLNode::~XMLNode()
{
DeleteChildren();
if ( _parent ) {
_parent->Unlink( this );
}
}
const char* XMLNode::Value() const
{
// Edge case: XMLDocuments don't have a Value. Return null.
if ( this->ToDocument() )
return 0;
return _value.GetStr();
}
void XMLNode::SetValue( const char* str, bool staticMem )
{
if ( staticMem ) {
_value.SetInternedStr( str );
}
else {
_value.SetStr( str );
}
}
XMLNode* XMLNode::DeepClone(XMLDocument* target) const
{
XMLNode* clone = this->ShallowClone(target);
if (!clone) return 0;
for (const XMLNode* child = this->FirstChild(); child; child = child->NextSibling()) {
XMLNode* childClone = child->DeepClone(target);
TIXMLASSERT(childClone);
clone->InsertEndChild(childClone);
}
return clone;
}
void XMLNode::DeleteChildren()
{
while( _firstChild ) {
TIXMLASSERT( _lastChild );
DeleteChild( _firstChild );
}
_firstChild = _lastChild = 0;
}
void XMLNode::Unlink( XMLNode* child )
{
TIXMLASSERT( child );
TIXMLASSERT( child->_document == _document );
TIXMLASSERT( child->_parent == this );
if ( child == _firstChild ) {
_firstChild = _firstChild->_next;
}
if ( child == _lastChild ) {
_lastChild = _lastChild->_prev;
}
if ( child->_prev ) {
child->_prev->_next = child->_next;
}
if ( child->_next ) {
child->_next->_prev = child->_prev;
}
child->_next = 0;
child->_prev = 0;
child->_parent = 0;
}
void XMLNode::DeleteChild( XMLNode* node )
{
TIXMLASSERT( node );
TIXMLASSERT( node->_document == _document );
TIXMLASSERT( node->_parent == this );
Unlink( node );
TIXMLASSERT(node->_prev == 0);
TIXMLASSERT(node->_next == 0);
TIXMLASSERT(node->_parent == 0);
DeleteNode( node );
}
XMLNode* XMLNode::InsertEndChild( XMLNode* addThis )
{
TIXMLASSERT( addThis );
if ( addThis->_document != _document ) {
TIXMLASSERT( false );
return 0;
}
InsertChildPreamble( addThis );
if ( _lastChild ) {
TIXMLASSERT( _firstChild );
TIXMLASSERT( _lastChild->_next == 0 );
_lastChild->_next = addThis;
addThis->_prev = _lastChild;
_lastChild = addThis;
addThis->_next = 0;
}
else {
TIXMLASSERT( _firstChild == 0 );
_firstChild = _lastChild = addThis;
addThis->_prev = 0;
addThis->_next = 0;
}
addThis->_parent = this;
return addThis;
}
XMLNode* XMLNode::InsertFirstChild( XMLNode* addThis )
{
TIXMLASSERT( addThis );
if ( addThis->_document != _document ) {
TIXMLASSERT( false );
return 0;
}
InsertChildPreamble( addThis );
if ( _firstChild ) {
TIXMLASSERT( _lastChild );
TIXMLASSERT( _firstChild->_prev == 0 );
_firstChild->_prev = addThis;
addThis->_next = _firstChild;
_firstChild = addThis;
addThis->_prev = 0;
}
else {
TIXMLASSERT( _lastChild == 0 );
_firstChild = _lastChild = addThis;
addThis->_prev = 0;
addThis->_next = 0;
}
addThis->_parent = this;
return addThis;
}
XMLNode* XMLNode::InsertAfterChild( XMLNode* afterThis, XMLNode* addThis )
{
TIXMLASSERT( addThis );
if ( addThis->_document != _document ) {
TIXMLASSERT( false );
return 0;
}
TIXMLASSERT( afterThis );
if ( afterThis->_parent != this ) {
TIXMLASSERT( false );
return 0;
}
if ( afterThis == addThis ) {
// Current state: BeforeThis -> AddThis -> OneAfterAddThis
// Now AddThis must disappear from it's location and then
// reappear between BeforeThis and OneAfterAddThis.
// So just leave it where it is.
return addThis;
}
if ( afterThis->_next == 0 ) {
// The last node or the only node.
return InsertEndChild( addThis );
}
InsertChildPreamble( addThis );
addThis->_prev = afterThis;
addThis->_next = afterThis->_next;
afterThis->_next->_prev = addThis;
afterThis->_next = addThis;
addThis->_parent = this;
return addThis;
}
const XMLElement* XMLNode::FirstChildElement( const char* name ) const
{
for( const XMLNode* node = _firstChild; node; node = node->_next ) {
const XMLElement* element = node->ToElementWithName( name );
if ( element ) {
return element;
}
}
return 0;
}
const XMLElement* XMLNode::LastChildElement( const char* name ) const
{
for( const XMLNode* node = _lastChild; node; node = node->_prev ) {
const XMLElement* element = node->ToElementWithName( name );
if ( element ) {
return element;
}
}
return 0;
}
const XMLElement* XMLNode::NextSiblingElement( const char* name ) const
{
for( const XMLNode* node = _next; node; node = node->_next ) {
const XMLElement* element = node->ToElementWithName( name );
if ( element ) {
return element;
}
}
return 0;
}
const XMLElement* XMLNode::PreviousSiblingElement( const char* name ) const
{
for( const XMLNode* node = _prev; node; node = node->_prev ) {
const XMLElement* element = node->ToElementWithName( name );
if ( element ) {
return element;
}
}
return 0;
}
char* XMLNode::ParseDeep( char* p, StrPair* parentEndTag, int* curLineNumPtr )
{
// This is a recursive method, but thinking about it "at the current level"
// it is a pretty simple flat list:
// <foo/>
// <!-- comment -->
//
// With a special case:
// <foo>
// </foo>
// <!-- comment -->
//
// Where the closing element (/foo) *must* be the next thing after the opening
// element, and the names must match. BUT the tricky bit is that the closing
// element will be read by the child.
//
// 'endTag' is the end tag for this node, it is returned by a call to a child.
// 'parentEnd' is the end tag for the parent, which is filled in and returned.
XMLDocument::DepthTracker tracker(_document);
if (_document->Error())
return 0;
while( p && *p ) {
XMLNode* node = 0;
p = _document->Identify( p, &node );
TIXMLASSERT( p );
if ( node == 0 ) {
break;
}
const int initialLineNum = node->_parseLineNum;
StrPair endTag;
p = node->ParseDeep( p, &endTag, curLineNumPtr );
if ( !p ) {
_document->DeleteNode( node );
if ( !_document->Error() ) {
_document->SetError( XML_ERROR_PARSING, initialLineNum, 0);
}
break;
}
const XMLDeclaration* const decl = node->ToDeclaration();
if ( decl ) {
// Declarations are only allowed at document level
//
// Multiple declarations are allowed but all declarations
// must occur before anything else.
//
// Optimized due to a security test case. If the first node is
// a declaration, and the last node is a declaration, then only
// declarations have so far been added.
bool wellLocated = false;
if (ToDocument()) {
if (FirstChild()) {
wellLocated =
FirstChild() &&
FirstChild()->ToDeclaration() &&
LastChild() &&
LastChild()->ToDeclaration();
}
else {
wellLocated = true;
}
}
if ( !wellLocated ) {
_document->SetError( XML_ERROR_PARSING_DECLARATION, initialLineNum, "XMLDeclaration value=%s", decl->Value());
_document->DeleteNode( node );
break;
}
}
XMLElement* ele = node->ToElement();
if ( ele ) {
// We read the end tag. Return it to the parent.
if ( ele->ClosingType() == XMLElement::CLOSING ) {
if ( parentEndTag ) {
ele->_value.TransferTo( parentEndTag );
}
node->_memPool->SetTracked(); // created and then immediately deleted.
DeleteNode( node );
return p;
}
// Handle an end tag returned to this level.
// And handle a bunch of annoying errors.
bool mismatch = false;
if ( endTag.Empty() ) {
if ( ele->ClosingType() == XMLElement::OPEN ) {
mismatch = true;
}
}
else {
if ( ele->ClosingType() != XMLElement::OPEN ) {
mismatch = true;
}
else if ( !XMLUtil::StringEqual( endTag.GetStr(), ele->Name() ) ) {
mismatch = true;
}
}
if ( mismatch ) {
_document->SetError( XML_ERROR_MISMATCHED_ELEMENT, initialLineNum, "XMLElement name=%s", ele->Name());
_document->DeleteNode( node );
break;
}
}
InsertEndChild( node );
}
return 0;
}
/*static*/ void XMLNode::DeleteNode( XMLNode* node )
{
if ( node == 0 ) {
return;
}
TIXMLASSERT(node->_document);
if (!node->ToDocument()) {
node->_document->MarkInUse(node);
}
MemPool* pool = node->_memPool;
node->~XMLNode();
pool->Free( node );
}
void XMLNode::InsertChildPreamble( XMLNode* insertThis ) const
{
TIXMLASSERT( insertThis );
TIXMLASSERT( insertThis->_document == _document );
if (insertThis->_parent) {
insertThis->_parent->Unlink( insertThis );
}
else {
insertThis->_document->MarkInUse(insertThis);
insertThis->_memPool->SetTracked();
}
}
const XMLElement* XMLNode::ToElementWithName( const char* name ) const
{
const XMLElement* element = this->ToElement();
if ( element == 0 ) {
return 0;
}
if ( name == 0 ) {
return element;
}
if ( XMLUtil::StringEqual( element->Name(), name ) ) {
return element;
}
return 0;
}
// --------- XMLText ---------- //
char* XMLText::ParseDeep( char* p, StrPair*, int* curLineNumPtr )
{
if ( this->CData() ) {
p = _value.ParseText( p, "]]>", StrPair::NEEDS_NEWLINE_NORMALIZATION, curLineNumPtr );
if ( !p ) {
_document->SetError( XML_ERROR_PARSING_CDATA, _parseLineNum, 0 );
}
return p;
}
else {
int flags = _document->ProcessEntities() ? StrPair::TEXT_ELEMENT : StrPair::TEXT_ELEMENT_LEAVE_ENTITIES;
if ( _document->WhitespaceMode() == COLLAPSE_WHITESPACE ) {
flags |= StrPair::NEEDS_WHITESPACE_COLLAPSING;
}
p = _value.ParseText( p, "<", flags, curLineNumPtr );
if ( p && *p ) {
return p-1;
}
if ( !p ) {
_document->SetError( XML_ERROR_PARSING_TEXT, _parseLineNum, 0 );
}
}
return 0;
}
XMLNode* XMLText::ShallowClone( XMLDocument* doc ) const
{
if ( !doc ) {
doc = _document;
}
XMLText* text = doc->NewText( Value() ); // fixme: this will always allocate memory. Intern?
text->SetCData( this->CData() );
return text;
}
bool XMLText::ShallowEqual( const XMLNode* compare ) const
{
TIXMLASSERT( compare );
const XMLText* text = compare->ToText();
return ( text && XMLUtil::StringEqual( text->Value(), Value() ) );
}
bool XMLText::Accept( XMLVisitor* visitor ) const
{
TIXMLASSERT( visitor );
return visitor->Visit( *this );
}
// --------- XMLComment ---------- //
XMLComment::XMLComment( XMLDocument* doc ) : XMLNode( doc )
{
}
XMLComment::~XMLComment()
{
}
char* XMLComment::ParseDeep( char* p, StrPair*, int* curLineNumPtr )
{
// Comment parses as text.
p = _value.ParseText( p, "-->", StrPair::COMMENT, curLineNumPtr );
if ( p == 0 ) {
_document->SetError( XML_ERROR_PARSING_COMMENT, _parseLineNum, 0 );
}
return p;
}
XMLNode* XMLComment::ShallowClone( XMLDocument* doc ) const
{
if ( !doc ) {
doc = _document;
}
XMLComment* comment = doc->NewComment( Value() ); // fixme: this will always allocate memory. Intern?
return comment;
}
bool XMLComment::ShallowEqual( const XMLNode* compare ) const
{
TIXMLASSERT( compare );
const XMLComment* comment = compare->ToComment();
return ( comment && XMLUtil::StringEqual( comment->Value(), Value() ));
}
bool XMLComment::Accept( XMLVisitor* visitor ) const
{
TIXMLASSERT( visitor );
return visitor->Visit( *this );
}
// --------- XMLDeclaration ---------- //
XMLDeclaration::XMLDeclaration( XMLDocument* doc ) : XMLNode( doc )
{
}
XMLDeclaration::~XMLDeclaration()
{
//printf( "~XMLDeclaration\n" );
}
char* XMLDeclaration::ParseDeep( char* p, StrPair*, int* curLineNumPtr )
{
// Declaration parses as text.
p = _value.ParseText( p, "?>", StrPair::NEEDS_NEWLINE_NORMALIZATION, curLineNumPtr );
if ( p == 0 ) {
_document->SetError( XML_ERROR_PARSING_DECLARATION, _parseLineNum, 0 );
}
return p;
}
XMLNode* XMLDeclaration::ShallowClone( XMLDocument* doc ) const
{
if ( !doc ) {
doc = _document;
}
XMLDeclaration* dec = doc->NewDeclaration( Value() ); // fixme: this will always allocate memory. Intern?
return dec;
}
bool XMLDeclaration::ShallowEqual( const XMLNode* compare ) const
{
TIXMLASSERT( compare );
const XMLDeclaration* declaration = compare->ToDeclaration();
return ( declaration && XMLUtil::StringEqual( declaration->Value(), Value() ));
}
bool XMLDeclaration::Accept( XMLVisitor* visitor ) const
{
TIXMLASSERT( visitor );
return visitor->Visit( *this );
}
// --------- XMLUnknown ---------- //
XMLUnknown::XMLUnknown( XMLDocument* doc ) : XMLNode( doc )
{
}
XMLUnknown::~XMLUnknown()
{
}
char* XMLUnknown::ParseDeep( char* p, StrPair*, int* curLineNumPtr )
{
// Unknown parses as text.
p = _value.ParseText( p, ">", StrPair::NEEDS_NEWLINE_NORMALIZATION, curLineNumPtr );
if ( !p ) {
_document->SetError( XML_ERROR_PARSING_UNKNOWN, _parseLineNum, 0 );
}
return p;
}
XMLNode* XMLUnknown::ShallowClone( XMLDocument* doc ) const
{
if ( !doc ) {
doc = _document;
}
XMLUnknown* text = doc->NewUnknown( Value() ); // fixme: this will always allocate memory. Intern?
return text;
}
bool XMLUnknown::ShallowEqual( const XMLNode* compare ) const
{
TIXMLASSERT( compare );
const XMLUnknown* unknown = compare->ToUnknown();
return ( unknown && XMLUtil::StringEqual( unknown->Value(), Value() ));
}
bool XMLUnknown::Accept( XMLVisitor* visitor ) const
{
TIXMLASSERT( visitor );
return visitor->Visit( *this );
}
// --------- XMLAttribute ---------- //
const char* XMLAttribute::Name() const
{
return _name.GetStr();
}
const char* XMLAttribute::Value() const
{
return _value.GetStr();
}
char* XMLAttribute::ParseDeep( char* p, bool processEntities, int* curLineNumPtr )
{
// Parse using the name rules: bug fix, was using ParseText before
p = _name.ParseName( p );
if ( !p || !*p ) {
return 0;
}
// Skip white space before =
p = XMLUtil::SkipWhiteSpace( p, curLineNumPtr );
if ( *p != '=' ) {
return 0;
}
++p; // move up to opening quote
p = XMLUtil::SkipWhiteSpace( p, curLineNumPtr );
if ( *p != '\"' && *p != '\'' ) {
return 0;
}
const char endTag[2] = { *p, 0 };
++p; // move past opening quote
p = _value.ParseText( p, endTag, processEntities ? StrPair::ATTRIBUTE_VALUE : StrPair::ATTRIBUTE_VALUE_LEAVE_ENTITIES, curLineNumPtr );
return p;
}
void XMLAttribute::SetName( const char* n )
{
_name.SetStr( n );
}
XMLError XMLAttribute::QueryIntValue( int* value ) const
{
if ( XMLUtil::ToInt( Value(), value )) {
return XML_SUCCESS;
}
return XML_WRONG_ATTRIBUTE_TYPE;
}
XMLError XMLAttribute::QueryUnsignedValue( unsigned int* value ) const
{
if ( XMLUtil::ToUnsigned( Value(), value )) {
return XML_SUCCESS;
}
return XML_WRONG_ATTRIBUTE_TYPE;
}
XMLError XMLAttribute::QueryInt64Value(int64_t* value) const
{
if (XMLUtil::ToInt64(Value(), value)) {
return XML_SUCCESS;
}
return XML_WRONG_ATTRIBUTE_TYPE;
}
XMLError XMLAttribute::QueryUnsigned64Value(uint64_t* value) const
{
if(XMLUtil::ToUnsigned64(Value(), value)) {
return XML_SUCCESS;
}
return XML_WRONG_ATTRIBUTE_TYPE;
}
XMLError XMLAttribute::QueryBoolValue( bool* value ) const
{
if ( XMLUtil::ToBool( Value(), value )) {
return XML_SUCCESS;
}
return XML_WRONG_ATTRIBUTE_TYPE;
}
XMLError XMLAttribute::QueryFloatValue( float* value ) const
{
if ( XMLUtil::ToFloat( Value(), value )) {
return XML_SUCCESS;
}
return XML_WRONG_ATTRIBUTE_TYPE;
}
XMLError XMLAttribute::QueryDoubleValue( double* value ) const
{
if ( XMLUtil::ToDouble( Value(), value )) {
return XML_SUCCESS;
}
return XML_WRONG_ATTRIBUTE_TYPE;
}
void XMLAttribute::SetAttribute( const char* v )
{
_value.SetStr( v );
}
void XMLAttribute::SetAttribute( int v )
{
char buf[BUF_SIZE];
XMLUtil::ToStr( v, buf, BUF_SIZE );
_value.SetStr( buf );
}
void XMLAttribute::SetAttribute( unsigned v )
{
char buf[BUF_SIZE];
XMLUtil::ToStr( v, buf, BUF_SIZE );
_value.SetStr( buf );
}
void XMLAttribute::SetAttribute(int64_t v)
{
char buf[BUF_SIZE];
XMLUtil::ToStr(v, buf, BUF_SIZE);
_value.SetStr(buf);
}
void XMLAttribute::SetAttribute(uint64_t v)
{
char buf[BUF_SIZE];
XMLUtil::ToStr(v, buf, BUF_SIZE);
_value.SetStr(buf);
}
void XMLAttribute::SetAttribute( bool v )
{
char buf[BUF_SIZE];
XMLUtil::ToStr( v, buf, BUF_SIZE );
_value.SetStr( buf );
}
void XMLAttribute::SetAttribute( double v )
{
char buf[BUF_SIZE];
XMLUtil::ToStr( v, buf, BUF_SIZE );
_value.SetStr( buf );
}
void XMLAttribute::SetAttribute( float v )
{
char buf[BUF_SIZE];
XMLUtil::ToStr( v, buf, BUF_SIZE );
_value.SetStr( buf );
}
// --------- XMLElement ---------- //
XMLElement::XMLElement( XMLDocument* doc ) : XMLNode( doc ),
_closingType( OPEN ),
_rootAttribute( 0 )
{
}
XMLElement::~XMLElement()
{
while( _rootAttribute ) {
XMLAttribute* next = _rootAttribute->_next;
DeleteAttribute( _rootAttribute );
_rootAttribute = next;
}
}
const XMLAttribute* XMLElement::FindAttribute( const char* name ) const
{
for( XMLAttribute* a = _rootAttribute; a; a = a->_next ) {
if ( XMLUtil::StringEqual( a->Name(), name ) ) {
return a;
}
}
return 0;
}
const char* XMLElement::Attribute( const char* name, const char* value ) const
{
const XMLAttribute* a = FindAttribute( name );
if ( !a ) {
return 0;
}
if ( !value || XMLUtil::StringEqual( a->Value(), value )) {
return a->Value();
}
return 0;
}
int XMLElement::IntAttribute(const char* name, int defaultValue) const
{
int i = defaultValue;
QueryIntAttribute(name, &i);
return i;
}
unsigned XMLElement::UnsignedAttribute(const char* name, unsigned defaultValue) const
{
unsigned i = defaultValue;
QueryUnsignedAttribute(name, &i);
return i;
}
int64_t XMLElement::Int64Attribute(const char* name, int64_t defaultValue) const
{
int64_t i = defaultValue;
QueryInt64Attribute(name, &i);
return i;
}
uint64_t XMLElement::Unsigned64Attribute(const char* name, uint64_t defaultValue) const
{
uint64_t i = defaultValue;
QueryUnsigned64Attribute(name, &i);
return i;
}
bool XMLElement::BoolAttribute(const char* name, bool defaultValue) const
{
bool b = defaultValue;
QueryBoolAttribute(name, &b);
return b;
}
double XMLElement::DoubleAttribute(const char* name, double defaultValue) const
{
double d = defaultValue;
QueryDoubleAttribute(name, &d);
return d;
}
float XMLElement::FloatAttribute(const char* name, float defaultValue) const
{
float f = defaultValue;
QueryFloatAttribute(name, &f);
return f;
}
const char* XMLElement::GetText() const
{
/* skip comment node */
const XMLNode* node = FirstChild();
while (node) {
if (node->ToComment()) {
node = node->NextSibling();
continue;
}
break;
}
if ( node && node->ToText() ) {
return node->Value();
}
return 0;
}
void XMLElement::SetText( const char* inText )
{
if ( FirstChild() && FirstChild()->ToText() )
FirstChild()->SetValue( inText );
else {
XMLText* theText = GetDocument()->NewText( inText );
InsertFirstChild( theText );
}
}
void XMLElement::SetText( int v )
{
char buf[BUF_SIZE];
XMLUtil::ToStr( v, buf, BUF_SIZE );
SetText( buf );
}
void XMLElement::SetText( unsigned v )
{
char buf[BUF_SIZE];
XMLUtil::ToStr( v, buf, BUF_SIZE );
SetText( buf );
}
void XMLElement::SetText(int64_t v)
{
char buf[BUF_SIZE];
XMLUtil::ToStr(v, buf, BUF_SIZE);
SetText(buf);
}
void XMLElement::SetText(uint64_t v) {
char buf[BUF_SIZE];
XMLUtil::ToStr(v, buf, BUF_SIZE);
SetText(buf);
}
void XMLElement::SetText( bool v )
{
char buf[BUF_SIZE];
XMLUtil::ToStr( v, buf, BUF_SIZE );
SetText( buf );
}
void XMLElement::SetText( float v )
{
char buf[BUF_SIZE];
XMLUtil::ToStr( v, buf, BUF_SIZE );
SetText( buf );
}
void XMLElement::SetText( double v )
{
char buf[BUF_SIZE];
XMLUtil::ToStr( v, buf, BUF_SIZE );
SetText( buf );
}
XMLError XMLElement::QueryIntText( int* ival ) const
{
if ( FirstChild() && FirstChild()->ToText() ) {
const char* t = FirstChild()->Value();
if ( XMLUtil::ToInt( t, ival ) ) {
return XML_SUCCESS;
}
return XML_CAN_NOT_CONVERT_TEXT;
}
return XML_NO_TEXT_NODE;
}
XMLError XMLElement::QueryUnsignedText( unsigned* uval ) const
{
if ( FirstChild() && FirstChild()->ToText() ) {
const char* t = FirstChild()->Value();
if ( XMLUtil::ToUnsigned( t, uval ) ) {
return XML_SUCCESS;
}
return XML_CAN_NOT_CONVERT_TEXT;
}
return XML_NO_TEXT_NODE;
}
XMLError XMLElement::QueryInt64Text(int64_t* ival) const
{
if (FirstChild() && FirstChild()->ToText()) {
const char* t = FirstChild()->Value();
if (XMLUtil::ToInt64(t, ival)) {
return XML_SUCCESS;
}
return XML_CAN_NOT_CONVERT_TEXT;
}
return XML_NO_TEXT_NODE;
}
XMLError XMLElement::QueryUnsigned64Text(uint64_t* uval) const
{
if(FirstChild() && FirstChild()->ToText()) {
const char* t = FirstChild()->Value();
if(XMLUtil::ToUnsigned64(t, uval)) {
return XML_SUCCESS;
}
return XML_CAN_NOT_CONVERT_TEXT;
}
return XML_NO_TEXT_NODE;
}
XMLError XMLElement::QueryBoolText( bool* bval ) const
{
if ( FirstChild() && FirstChild()->ToText() ) {
const char* t = FirstChild()->Value();
if ( XMLUtil::ToBool( t, bval ) ) {
return XML_SUCCESS;
}
return XML_CAN_NOT_CONVERT_TEXT;
}
return XML_NO_TEXT_NODE;
}
XMLError XMLElement::QueryDoubleText( double* dval ) const
{
if ( FirstChild() && FirstChild()->ToText() ) {
const char* t = FirstChild()->Value();
if ( XMLUtil::ToDouble( t, dval ) ) {
return XML_SUCCESS;
}
return XML_CAN_NOT_CONVERT_TEXT;
}
return XML_NO_TEXT_NODE;
}
XMLError XMLElement::QueryFloatText( float* fval ) const
{
if ( FirstChild() && FirstChild()->ToText() ) {
const char* t = FirstChild()->Value();
if ( XMLUtil::ToFloat( t, fval ) ) {
return XML_SUCCESS;
}
return XML_CAN_NOT_CONVERT_TEXT;
}
return XML_NO_TEXT_NODE;
}
int XMLElement::IntText(int defaultValue) const
{
int i = defaultValue;
QueryIntText(&i);
return i;
}
unsigned XMLElement::UnsignedText(unsigned defaultValue) const
{
unsigned i = defaultValue;
QueryUnsignedText(&i);
return i;
}
int64_t XMLElement::Int64Text(int64_t defaultValue) const
{
int64_t i = defaultValue;
QueryInt64Text(&i);
return i;
}
uint64_t XMLElement::Unsigned64Text(uint64_t defaultValue) const
{
uint64_t i = defaultValue;
QueryUnsigned64Text(&i);
return i;
}
bool XMLElement::BoolText(bool defaultValue) const
{
bool b = defaultValue;
QueryBoolText(&b);
return b;
}
double XMLElement::DoubleText(double defaultValue) const
{
double d = defaultValue;
QueryDoubleText(&d);
return d;
}
float XMLElement::FloatText(float defaultValue) const
{
float f = defaultValue;
QueryFloatText(&f);
return f;
}
XMLAttribute* XMLElement::FindOrCreateAttribute( const char* name )
{
XMLAttribute* last = 0;
XMLAttribute* attrib = 0;
for( attrib = _rootAttribute;
attrib;
last = attrib, attrib = attrib->_next ) {
if ( XMLUtil::StringEqual( attrib->Name(), name ) ) {
break;
}
}
if ( !attrib ) {
attrib = CreateAttribute();
TIXMLASSERT( attrib );
if ( last ) {
TIXMLASSERT( last->_next == 0 );
last->_next = attrib;
}
else {
TIXMLASSERT( _rootAttribute == 0 );
_rootAttribute = attrib;
}
attrib->SetName( name );
}
return attrib;
}
void XMLElement::DeleteAttribute( const char* name )
{
XMLAttribute* prev = 0;
for( XMLAttribute* a=_rootAttribute; a; a=a->_next ) {
if ( XMLUtil::StringEqual( name, a->Name() ) ) {
if ( prev ) {
prev->_next = a->_next;
}
else {
_rootAttribute = a->_next;
}
DeleteAttribute( a );
break;
}
prev = a;
}
}
char* XMLElement::ParseAttributes( char* p, int* curLineNumPtr )
{
XMLAttribute* prevAttribute = 0;
// Read the attributes.
while( p ) {
p = XMLUtil::SkipWhiteSpace( p, curLineNumPtr );
if ( !(*p) ) {
_document->SetError( XML_ERROR_PARSING_ELEMENT, _parseLineNum, "XMLElement name=%s", Name() );
return 0;
}
// attribute.
if (XMLUtil::IsNameStartChar( (unsigned char) *p ) ) {
XMLAttribute* attrib = CreateAttribute();
TIXMLASSERT( attrib );
attrib->_parseLineNum = _document->_parseCurLineNum;
const int attrLineNum = attrib->_parseLineNum;
p = attrib->ParseDeep( p, _document->ProcessEntities(), curLineNumPtr );
if ( !p || Attribute( attrib->Name() ) ) {
DeleteAttribute( attrib );
_document->SetError( XML_ERROR_PARSING_ATTRIBUTE, attrLineNum, "XMLElement name=%s", Name() );
return 0;
}
// There is a minor bug here: if the attribute in the source xml
// document is duplicated, it will not be detected and the
// attribute will be doubly added. However, tracking the 'prevAttribute'
// avoids re-scanning the attribute list. Preferring performance for
// now, may reconsider in the future.
if ( prevAttribute ) {
TIXMLASSERT( prevAttribute->_next == 0 );
prevAttribute->_next = attrib;
}
else {
TIXMLASSERT( _rootAttribute == 0 );
_rootAttribute = attrib;
}
prevAttribute = attrib;
}
// end of the tag
else if ( *p == '>' ) {
++p;
break;
}
// end of the tag
else if ( *p == '/' && *(p+1) == '>' ) {
_closingType = CLOSED;
return p+2; // done; sealed element.
}
else {
_document->SetError( XML_ERROR_PARSING_ELEMENT, _parseLineNum, 0 );
return 0;
}
}
return p;
}
void XMLElement::DeleteAttribute( XMLAttribute* attribute )
{
if ( attribute == 0 ) {
return;
}
MemPool* pool = attribute->_memPool;
attribute->~XMLAttribute();
pool->Free( attribute );
}
XMLAttribute* XMLElement::CreateAttribute()
{
TIXMLASSERT( sizeof( XMLAttribute ) == _document->_attributePool.ItemSize() );
XMLAttribute* attrib = new (_document->_attributePool.Alloc() ) XMLAttribute();
TIXMLASSERT( attrib );
attrib->_memPool = &_document->_attributePool;
attrib->_memPool->SetTracked();
return attrib;
}
XMLElement* XMLElement::InsertNewChildElement(const char* name)
{
XMLElement* node = _document->NewElement(name);
return InsertEndChild(node) ? node : 0;
}
XMLComment* XMLElement::InsertNewComment(const char* comment)
{
XMLComment* node = _document->NewComment(comment);
return InsertEndChild(node) ? node : 0;
}
XMLText* XMLElement::InsertNewText(const char* text)
{
XMLText* node = _document->NewText(text);
return InsertEndChild(node) ? node : 0;
}
XMLDeclaration* XMLElement::InsertNewDeclaration(const char* text)
{
XMLDeclaration* node = _document->NewDeclaration(text);
return InsertEndChild(node) ? node : 0;
}
XMLUnknown* XMLElement::InsertNewUnknown(const char* text)
{
XMLUnknown* node = _document->NewUnknown(text);
return InsertEndChild(node) ? node : 0;
}
//
// <ele></ele>
// <ele>foo<b>bar</b></ele>
//
char* XMLElement::ParseDeep( char* p, StrPair* parentEndTag, int* curLineNumPtr )
{
// Read the element name.
p = XMLUtil::SkipWhiteSpace( p, curLineNumPtr );
// The closing element is the </element> form. It is
// parsed just like a regular element then deleted from
// the DOM.
if ( *p == '/' ) {
_closingType = CLOSING;
++p;
}
p = _value.ParseName( p );
if ( _value.Empty() ) {
return 0;
}
p = ParseAttributes( p, curLineNumPtr );
if ( !p || !*p || _closingType != OPEN ) {
return p;
}
p = XMLNode::ParseDeep( p, parentEndTag, curLineNumPtr );
return p;
}
XMLNode* XMLElement::ShallowClone( XMLDocument* doc ) const
{
if ( !doc ) {
doc = _document;
}
XMLElement* element = doc->NewElement( Value() ); // fixme: this will always allocate memory. Intern?
for( const XMLAttribute* a=FirstAttribute(); a; a=a->Next() ) {
element->SetAttribute( a->Name(), a->Value() ); // fixme: this will always allocate memory. Intern?
}
return element;
}
bool XMLElement::ShallowEqual( const XMLNode* compare ) const
{
TIXMLASSERT( compare );
const XMLElement* other = compare->ToElement();
if ( other && XMLUtil::StringEqual( other->Name(), Name() )) {
const XMLAttribute* a=FirstAttribute();
const XMLAttribute* b=other->FirstAttribute();
while ( a && b ) {
if ( !XMLUtil::StringEqual( a->Value(), b->Value() ) ) {
return false;
}
a = a->Next();
b = b->Next();
}
if ( a || b ) {
// different count
return false;
}
return true;
}
return false;
}
bool XMLElement::Accept( XMLVisitor* visitor ) const
{
TIXMLASSERT( visitor );
if ( visitor->VisitEnter( *this, _rootAttribute ) ) {
for ( const XMLNode* node=FirstChild(); node; node=node->NextSibling() ) {
if ( !node->Accept( visitor ) ) {
break;
}
}
}
return visitor->VisitExit( *this );
}
// --------- XMLDocument ----------- //
// Warning: List must match 'enum XMLError'
const char* XMLDocument::_errorNames[XML_ERROR_COUNT] = {
"XML_SUCCESS",
"XML_NO_ATTRIBUTE",
"XML_WRONG_ATTRIBUTE_TYPE",
"XML_ERROR_FILE_NOT_FOUND",
"XML_ERROR_FILE_COULD_NOT_BE_OPENED",
"XML_ERROR_FILE_READ_ERROR",
"XML_ERROR_PARSING_ELEMENT",
"XML_ERROR_PARSING_ATTRIBUTE",
"XML_ERROR_PARSING_TEXT",
"XML_ERROR_PARSING_CDATA",
"XML_ERROR_PARSING_COMMENT",
"XML_ERROR_PARSING_DECLARATION",
"XML_ERROR_PARSING_UNKNOWN",
"XML_ERROR_EMPTY_DOCUMENT",
"XML_ERROR_MISMATCHED_ELEMENT",
"XML_ERROR_PARSING",
"XML_CAN_NOT_CONVERT_TEXT",
"XML_NO_TEXT_NODE",
"XML_ELEMENT_DEPTH_EXCEEDED"
};
XMLDocument::XMLDocument( bool processEntities, Whitespace whitespaceMode ) :
XMLNode( 0 ),
_writeBOM( false ),
_processEntities( processEntities ),
_errorID(XML_SUCCESS),
_whitespaceMode( whitespaceMode ),
_errorStr(),
_errorLineNum( 0 ),
_charBuffer( 0 ),
_parseCurLineNum( 0 ),
_parsingDepth(0),
_unlinked(),
_elementPool(),
_attributePool(),
_textPool(),
_commentPool()
{
// avoid VC++ C4355 warning about 'this' in initializer list (C4355 is off by default in VS2012+)
_document = this;
}
XMLDocument::~XMLDocument()
{
Clear();
}
void XMLDocument::MarkInUse(const XMLNode* const node)
{
TIXMLASSERT(node);
TIXMLASSERT(node->_parent == 0);
for (int i = 0; i < _unlinked.Size(); ++i) {
if (node == _unlinked[i]) {
_unlinked.SwapRemove(i);
break;
}
}
}
void XMLDocument::Clear()
{
DeleteChildren();
while( _unlinked.Size()) {
DeleteNode(_unlinked[0]); // Will remove from _unlinked as part of delete.
}
#ifdef TINYXML2_DEBUG
const bool hadError = Error();
#endif
ClearError();
delete [] _charBuffer;
_charBuffer = 0;
_parsingDepth = 0;
#if 0
_textPool.Trace( "text" );
_elementPool.Trace( "element" );
_commentPool.Trace( "comment" );
_attributePool.Trace( "attribute" );
#endif
#ifdef TINYXML2_DEBUG
if ( !hadError ) {
TIXMLASSERT( _elementPool.CurrentAllocs() == _elementPool.Untracked() );
TIXMLASSERT( _attributePool.CurrentAllocs() == _attributePool.Untracked() );
TIXMLASSERT( _textPool.CurrentAllocs() == _textPool.Untracked() );
TIXMLASSERT( _commentPool.CurrentAllocs() == _commentPool.Untracked() );
}
#endif
}
void XMLDocument::DeepCopy(XMLDocument* target) const
{
TIXMLASSERT(target);
if (target == this) {
return; // technically success - a no-op.
}
target->Clear();
for (const XMLNode* node = this->FirstChild(); node; node = node->NextSibling()) {
target->InsertEndChild(node->DeepClone(target));
}
}
XMLElement* XMLDocument::NewElement( const char* name )
{
XMLElement* ele = CreateUnlinkedNode<XMLElement>( _elementPool );
ele->SetName( name );
return ele;
}
XMLComment* XMLDocument::NewComment( const char* str )
{
XMLComment* comment = CreateUnlinkedNode<XMLComment>( _commentPool );
comment->SetValue( str );
return comment;
}
XMLText* XMLDocument::NewText( const char* str )
{
XMLText* text = CreateUnlinkedNode<XMLText>( _textPool );
text->SetValue( str );
return text;
}
XMLDeclaration* XMLDocument::NewDeclaration( const char* str )
{
XMLDeclaration* dec = CreateUnlinkedNode<XMLDeclaration>( _commentPool );
dec->SetValue( str ? str : "xml version=\"1.0\" encoding=\"UTF-8\"" );
return dec;
}
XMLUnknown* XMLDocument::NewUnknown( const char* str )
{
XMLUnknown* unk = CreateUnlinkedNode<XMLUnknown>( _commentPool );
unk->SetValue( str );
return unk;
}
static FILE* callfopen( const char* filepath, const char* mode )
{
TIXMLASSERT( filepath );
TIXMLASSERT( mode );
#if defined(_MSC_VER) && (_MSC_VER >= 1400 ) && (!defined WINCE)
FILE* fp = 0;
const errno_t err = fopen_s( &fp, filepath, mode );
if ( err ) {
return 0;
}
#else
FILE* fp = fopen( filepath, mode );
#endif
return fp;
}
void XMLDocument::DeleteNode( XMLNode* node ) {
TIXMLASSERT( node );
TIXMLASSERT(node->_document == this );
if (node->_parent) {
node->_parent->DeleteChild( node );
}
else {
// Isn't in the tree.
// Use the parent delete.
// Also, we need to mark it tracked: we 'know'
// it was never used.
node->_memPool->SetTracked();
// Call the static XMLNode version:
XMLNode::DeleteNode(node);
}
}
XMLError XMLDocument::LoadFile( const char* filename )
{
if ( !filename ) {
TIXMLASSERT( false );
SetError( XML_ERROR_FILE_COULD_NOT_BE_OPENED, 0, "filename=<null>" );
return _errorID;
}
Clear();
FILE* fp = callfopen( filename, "rb" );
if ( !fp ) {
SetError( XML_ERROR_FILE_NOT_FOUND, 0, "filename=%s", filename );
return _errorID;
}
LoadFile( fp );
fclose( fp );
return _errorID;
}
XMLError XMLDocument::LoadFile( FILE* fp )
{
Clear();
TIXML_FSEEK( fp, 0, SEEK_SET );
if ( fgetc( fp ) == EOF && ferror( fp ) != 0 ) {
SetError( XML_ERROR_FILE_READ_ERROR, 0, 0 );
return _errorID;
}
TIXML_FSEEK( fp, 0, SEEK_END );
unsigned long long filelength;
{
const long long fileLengthSigned = TIXML_FTELL( fp );
TIXML_FSEEK( fp, 0, SEEK_SET );
if ( fileLengthSigned == -1L ) {
SetError( XML_ERROR_FILE_READ_ERROR, 0, 0 );
return _errorID;
}
TIXMLASSERT( fileLengthSigned >= 0 );
filelength = static_cast<unsigned long long>(fileLengthSigned);
}
const size_t maxSizeT = static_cast<size_t>(-1);
// We'll do the comparison as an unsigned long long, because that's guaranteed to be at
// least 8 bytes, even on a 32-bit platform.
if ( filelength >= static_cast<unsigned long long>(maxSizeT) ) {
// Cannot handle files which won't fit in buffer together with null terminator
SetError( XML_ERROR_FILE_READ_ERROR, 0, 0 );
return _errorID;
}
if ( filelength == 0 ) {
SetError( XML_ERROR_EMPTY_DOCUMENT, 0, 0 );
return _errorID;
}
const size_t size = static_cast<size_t>(filelength);
TIXMLASSERT( _charBuffer == 0 );
_charBuffer = new char[size+1];
const size_t read = fread( _charBuffer, 1, size, fp );
if ( read != size ) {
SetError( XML_ERROR_FILE_READ_ERROR, 0, 0 );
return _errorID;
}
_charBuffer[size] = 0;
Parse();
return _errorID;
}
XMLError XMLDocument::SaveFile( const char* filename, bool compact )
{
if ( !filename ) {
TIXMLASSERT( false );
SetError( XML_ERROR_FILE_COULD_NOT_BE_OPENED, 0, "filename=<null>" );
return _errorID;
}
FILE* fp = callfopen( filename, "w" );
if ( !fp ) {
SetError( XML_ERROR_FILE_COULD_NOT_BE_OPENED, 0, "filename=%s", filename );
return _errorID;
}
SaveFile(fp, compact);
fclose( fp );
return _errorID;
}
XMLError XMLDocument::SaveFile( FILE* fp, bool compact )
{
// Clear any error from the last save, otherwise it will get reported
// for *this* call.
ClearError();
XMLPrinter stream( fp, compact );
Print( &stream );
return _errorID;
}
XMLError XMLDocument::Parse( const char* xml, size_t nBytes )
{
Clear();
if ( nBytes == 0 || !xml || !*xml ) {
SetError( XML_ERROR_EMPTY_DOCUMENT, 0, 0 );
return _errorID;
}
if ( nBytes == static_cast<size_t>(-1) ) {
nBytes = strlen( xml );
}
TIXMLASSERT( _charBuffer == 0 );
_charBuffer = new char[ nBytes+1 ];
memcpy( _charBuffer, xml, nBytes );
_charBuffer[nBytes] = 0;
Parse();
if ( Error() ) {
// clean up now essentially dangling memory.
// and the parse fail can put objects in the
// pools that are dead and inaccessible.
DeleteChildren();
_elementPool.Clear();
_attributePool.Clear();
_textPool.Clear();
_commentPool.Clear();
}
return _errorID;
}
void XMLDocument::Print( XMLPrinter* streamer ) const
{
if ( streamer ) {
Accept( streamer );
}
else {
XMLPrinter stdoutStreamer( stdout );
Accept( &stdoutStreamer );
}
}
void XMLDocument::ClearError() {
_errorID = XML_SUCCESS;
_errorLineNum = 0;
_errorStr.Reset();
}
void XMLDocument::SetError( XMLError error, int lineNum, const char* format, ... )
{
TIXMLASSERT( error >= 0 && error < XML_ERROR_COUNT );
_errorID = error;
_errorLineNum = lineNum;
_errorStr.Reset();
const size_t BUFFER_SIZE = 1000;
char* buffer = new char[BUFFER_SIZE];
TIXMLASSERT(sizeof(error) <= sizeof(int));
TIXML_SNPRINTF(buffer, BUFFER_SIZE, "Error=%s ErrorID=%d (0x%x) Line number=%d", ErrorIDToName(error), int(error), int(error), lineNum);
if (format) {
size_t len = strlen(buffer);
TIXML_SNPRINTF(buffer + len, BUFFER_SIZE - len, ": ");
len = strlen(buffer);
va_list va;
va_start(va, format);
TIXML_VSNPRINTF(buffer + len, BUFFER_SIZE - len, format, va);
va_end(va);
}
_errorStr.SetStr(buffer);
delete[] buffer;
}
/*static*/ const char* XMLDocument::ErrorIDToName(XMLError errorID)
{
TIXMLASSERT( errorID >= 0 && errorID < XML_ERROR_COUNT );
const char* errorName = _errorNames[errorID];
TIXMLASSERT( errorName && errorName[0] );
return errorName;
}
const char* XMLDocument::ErrorStr() const
{
return _errorStr.Empty() ? "" : _errorStr.GetStr();
}
void XMLDocument::PrintError() const
{
printf("%s\n", ErrorStr());
}
const char* XMLDocument::ErrorName() const
{
return ErrorIDToName(_errorID);
}
void XMLDocument::Parse()
{
TIXMLASSERT( NoChildren() ); // Clear() must have been called previously
TIXMLASSERT( _charBuffer );
_parseCurLineNum = 1;
_parseLineNum = 1;
char* p = _charBuffer;
p = XMLUtil::SkipWhiteSpace( p, &_parseCurLineNum );
p = const_cast<char*>( XMLUtil::ReadBOM( p, &_writeBOM ) );
if ( !*p ) {
SetError( XML_ERROR_EMPTY_DOCUMENT, 0, 0 );
return;
}
ParseDeep(p, 0, &_parseCurLineNum );
}
void XMLDocument::PushDepth()
{
_parsingDepth++;
if (_parsingDepth == TINYXML2_MAX_ELEMENT_DEPTH) {
SetError(XML_ELEMENT_DEPTH_EXCEEDED, _parseCurLineNum, "Element nesting is too deep." );
}
}
void XMLDocument::PopDepth()
{
TIXMLASSERT(_parsingDepth > 0);
--_parsingDepth;
}
XMLPrinter::XMLPrinter( FILE* file, bool compact, int depth ) :
_elementJustOpened( false ),
_stack(),
_firstElement( true ),
_fp( file ),
_depth( depth ),
_textDepth( -1 ),
_processEntities( true ),
_compactMode( compact ),
_buffer()
{
for( int i=0; i<ENTITY_RANGE; ++i ) {
_entityFlag[i] = false;
_restrictedEntityFlag[i] = false;
}
for( int i=0; i<NUM_ENTITIES; ++i ) {
const char entityValue = entities[i].value;
const unsigned char flagIndex = static_cast<unsigned char>(entityValue);
TIXMLASSERT( flagIndex < ENTITY_RANGE );
_entityFlag[flagIndex] = true;
}
_restrictedEntityFlag[static_cast<unsigned char>('&')] = true;
_restrictedEntityFlag[static_cast<unsigned char>('<')] = true;
_restrictedEntityFlag[static_cast<unsigned char>('>')] = true; // not required, but consistency is nice
_buffer.Push( 0 );
}
void XMLPrinter::Print( const char* format, ... )
{
va_list va;
va_start( va, format );
if ( _fp ) {
vfprintf( _fp, format, va );
}
else {
const int len = TIXML_VSCPRINTF( format, va );
// Close out and re-start the va-args
va_end( va );
TIXMLASSERT( len >= 0 );
va_start( va, format );
TIXMLASSERT( _buffer.Size() > 0 && _buffer[_buffer.Size() - 1] == 0 );
char* p = _buffer.PushArr( len ) - 1; // back up over the null terminator.
TIXML_VSNPRINTF( p, len+1, format, va );
}
va_end( va );
}
void XMLPrinter::Write( const char* data, size_t size )
{
if ( _fp ) {
fwrite ( data , sizeof(char), size, _fp);
}
else {
char* p = _buffer.PushArr( static_cast<int>(size) ) - 1; // back up over the null terminator.
memcpy( p, data, size );
p[size] = 0;
}
}
void XMLPrinter::Putc( char ch )
{
if ( _fp ) {
fputc ( ch, _fp);
}
else {
char* p = _buffer.PushArr( sizeof(char) ) - 1; // back up over the null terminator.
p[0] = ch;
p[1] = 0;
}
}
void XMLPrinter::PrintSpace( int depth )
{
for( int i=0; i<depth; ++i ) {
Write( " " );
}
}
void XMLPrinter::PrintString( const char* p, bool restricted )
{
// Look for runs of bytes between entities to print.
const char* q = p;
if ( _processEntities ) {
const bool* flag = restricted ? _restrictedEntityFlag : _entityFlag;
while ( *q ) {
TIXMLASSERT( p <= q );
// Remember, char is sometimes signed. (How many times has that bitten me?)
if ( *q > 0 && *q < ENTITY_RANGE ) {
// Check for entities. If one is found, flush
// the stream up until the entity, write the
// entity, and keep looking.
if ( flag[static_cast<unsigned char>(*q)] ) {
while ( p < q ) {
const size_t delta = q - p;
const int toPrint = ( INT_MAX < delta ) ? INT_MAX : static_cast<int>(delta);
Write( p, toPrint );
p += toPrint;
}
bool entityPatternPrinted = false;
for( int i=0; i<NUM_ENTITIES; ++i ) {
if ( entities[i].value == *q ) {
Putc( '&' );
Write( entities[i].pattern, entities[i].length );
Putc( ';' );
entityPatternPrinted = true;
break;
}
}
if ( !entityPatternPrinted ) {
// TIXMLASSERT( entityPatternPrinted ) causes gcc -Wunused-but-set-variable in release
TIXMLASSERT( false );
}
++p;
}
}
++q;
TIXMLASSERT( p <= q );
}
// Flush the remaining string. This will be the entire
// string if an entity wasn't found.
if ( p < q ) {
const size_t delta = q - p;
const int toPrint = ( INT_MAX < delta ) ? INT_MAX : static_cast<int>(delta);
Write( p, toPrint );
}
}
else {
Write( p );
}
}
void XMLPrinter::PushHeader( bool writeBOM, bool writeDec )
{
if ( writeBOM ) {
static const unsigned char bom[] = { TIXML_UTF_LEAD_0, TIXML_UTF_LEAD_1, TIXML_UTF_LEAD_2, 0 };
Write( reinterpret_cast< const char* >( bom ) );
}
if ( writeDec ) {
PushDeclaration( "xml version=\"1.0\"" );
}
}
void XMLPrinter::PrepareForNewNode( bool compactMode )
{
SealElementIfJustOpened();
if ( compactMode ) {
return;
}
if ( _firstElement ) {
PrintSpace (_depth);
} else if ( _textDepth < 0) {
Putc( '\n' );
PrintSpace( _depth );
}
_firstElement = false;
}
void XMLPrinter::OpenElement( const char* name, bool compactMode )
{
PrepareForNewNode( compactMode );
_stack.Push( name );
Write ( "<" );
Write ( name );
_elementJustOpened = true;
++_depth;
}
void XMLPrinter::PushAttribute( const char* name, const char* value )
{
TIXMLASSERT( _elementJustOpened );
Putc ( ' ' );
Write( name );
Write( "=\"" );
PrintString( value, false );
Putc ( '\"' );
}
void XMLPrinter::PushAttribute( const char* name, int v )
{
char buf[BUF_SIZE];
XMLUtil::ToStr( v, buf, BUF_SIZE );
PushAttribute( name, buf );
}
void XMLPrinter::PushAttribute( const char* name, unsigned v )
{
char buf[BUF_SIZE];
XMLUtil::ToStr( v, buf, BUF_SIZE );
PushAttribute( name, buf );
}
void XMLPrinter::PushAttribute(const char* name, int64_t v)
{
char buf[BUF_SIZE];
XMLUtil::ToStr(v, buf, BUF_SIZE);
PushAttribute(name, buf);
}
void XMLPrinter::PushAttribute(const char* name, uint64_t v)
{
char buf[BUF_SIZE];
XMLUtil::ToStr(v, buf, BUF_SIZE);
PushAttribute(name, buf);
}
void XMLPrinter::PushAttribute( const char* name, bool v )
{
char buf[BUF_SIZE];
XMLUtil::ToStr( v, buf, BUF_SIZE );
PushAttribute( name, buf );
}
void XMLPrinter::PushAttribute( const char* name, double v )
{
char buf[BUF_SIZE];
XMLUtil::ToStr( v, buf, BUF_SIZE );
PushAttribute( name, buf );
}
void XMLPrinter::CloseElement( bool compactMode )
{
--_depth;
const char* name = _stack.Pop();
if ( _elementJustOpened ) {
Write( "/>" );
}
else {
if ( _textDepth < 0 && !compactMode) {
Putc( '\n' );
PrintSpace( _depth );
}
Write ( "</" );
Write ( name );
Write ( ">" );
}
if ( _textDepth == _depth ) {
_textDepth = -1;
}
if ( _depth == 0 && !compactMode) {
Putc( '\n' );
}
_elementJustOpened = false;
}
void XMLPrinter::SealElementIfJustOpened()
{
if ( !_elementJustOpened ) {
return;
}
_elementJustOpened = false;
Putc( '>' );
}
void XMLPrinter::PushText( const char* text, bool cdata )
{
_textDepth = _depth-1;
SealElementIfJustOpened();
if ( cdata ) {
Write( "<![CDATA[" );
Write( text );
Write( "]]>" );
}
else {
PrintString( text, true );
}
}
void XMLPrinter::PushText( int64_t value )
{
char buf[BUF_SIZE];
XMLUtil::ToStr( value, buf, BUF_SIZE );
PushText( buf, false );
}
void XMLPrinter::PushText( uint64_t value )
{
char buf[BUF_SIZE];
XMLUtil::ToStr(value, buf, BUF_SIZE);
PushText(buf, false);
}
void XMLPrinter::PushText( int value )
{
char buf[BUF_SIZE];
XMLUtil::ToStr( value, buf, BUF_SIZE );
PushText( buf, false );
}
void XMLPrinter::PushText( unsigned value )
{
char buf[BUF_SIZE];
XMLUtil::ToStr( value, buf, BUF_SIZE );
PushText( buf, false );
}
void XMLPrinter::PushText( bool value )
{
char buf[BUF_SIZE];
XMLUtil::ToStr( value, buf, BUF_SIZE );
PushText( buf, false );
}
void XMLPrinter::PushText( float value )
{
char buf[BUF_SIZE];
XMLUtil::ToStr( value, buf, BUF_SIZE );
PushText( buf, false );
}
void XMLPrinter::PushText( double value )
{
char buf[BUF_SIZE];
XMLUtil::ToStr( value, buf, BUF_SIZE );
PushText( buf, false );
}
void XMLPrinter::PushComment( const char* comment )
{
PrepareForNewNode( _compactMode );
Write( "<!--" );
Write( comment );
Write( "-->" );
}
void XMLPrinter::PushDeclaration( const char* value )
{
PrepareForNewNode( _compactMode );
Write( "<?" );
Write( value );
Write( "?>" );
}
void XMLPrinter::PushUnknown( const char* value )
{
PrepareForNewNode( _compactMode );
Write( "<!" );
Write( value );
Putc( '>' );
}
bool XMLPrinter::VisitEnter( const XMLDocument& doc )
{
_processEntities = doc.ProcessEntities();
if ( doc.HasBOM() ) {
PushHeader( true, false );
}
return true;
}
bool XMLPrinter::VisitEnter( const XMLElement& element, const XMLAttribute* attribute )
{
const XMLElement* parentElem = 0;
if ( element.Parent() ) {
parentElem = element.Parent()->ToElement();
}
const bool compactMode = parentElem ? CompactMode( *parentElem ) : _compactMode;
OpenElement( element.Name(), compactMode );
while ( attribute ) {
PushAttribute( attribute->Name(), attribute->Value() );
attribute = attribute->Next();
}
return true;
}
bool XMLPrinter::VisitExit( const XMLElement& element )
{
CloseElement( CompactMode(element) );
return true;
}
bool XMLPrinter::Visit( const XMLText& text )
{
PushText( text.Value(), text.CData() );
return true;
}
bool XMLPrinter::Visit( const XMLComment& comment )
{
PushComment( comment.Value() );
return true;
}
bool XMLPrinter::Visit( const XMLDeclaration& declaration )
{
PushDeclaration( declaration.Value() );
return true;
}
bool XMLPrinter::Visit( const XMLUnknown& unknown )
{
PushUnknown( unknown.Value() );
return true;
}
} // namespace tinyxml2 | 75,034 | C++ | 24.06179 | 140 | 0.559013 |
MomentFactory/Omniverse-MVR-GDTF-converter/src/usd-plugins/fileFormat/gdtfFileFormat/gdtfUsdConverter.h | #pragma once
#include "../mvrFileFormat/gdtfParser/ModelSpecification.h"
#include <pxr/usd/usd/stage.h>
namespace GDTF
{
void ConvertToUsd(const GDTFSpecification& spec, pxr::UsdStageRefPtr stage, const std::string& targetPrimPath = "");
} | 244 | C | 29.624996 | 120 | 0.762295 |
MomentFactory/Omniverse-MVR-GDTF-converter/src/usd-plugins/fileFormat/gdtfFileFormat/gdtfFileFormat.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef OMNI_GDTF_GDTFFILEFORMAT_H_
#define OMNI_GDTF_GDTFFILEFORMAT_H_
#define NOMINMAX
#include <pxr/base/tf/staticTokens.h>
#include <pxr/pxr.h>
#include <pxr/base/tf/token.h>
#include <pxr/usd/sdf/fileFormat.h>
#include <pxr/usd/sdf/layer.h>
#include <pxr/usd/pcp/dynamicFileFormatInterface.h>
#include <pxr/usd/pcp/dynamicFileFormatContext.h>
#include "api.h"
PXR_NAMESPACE_OPEN_SCOPE
/// \class EdfFileFormat
///
/// Represents a generic dynamic file format for external data.
/// Actual acquisition of the external data is done via a set
/// of plug-ins to various back-end external data systems.
///
class GDTF_API GdtfFileFormat : public SdfFileFormat, public PcpDynamicFileFormatInterface
{
public:
// SdfFileFormat overrides
bool CanRead(const std::string& filePath) const override;
bool Read(SdfLayer* layer, const std::string& resolvedPath, bool metadataOnly) const override;
bool WriteToString(const SdfLayer& layer, std::string* str, const std::string& comment = std::string()) const override;
bool WriteToStream(const SdfSpecHandle& spec, std::ostream& out, size_t indent) const override;
// PcpDynamicFileFormatInterface overrides
void ComposeFieldsForFileFormatArguments(const std::string& assetPath, const PcpDynamicFileFormatContext& context, FileFormatArguments* args, VtValue* contextDependencyData) const override;
bool CanFieldChangeAffectFileFormatArguments(const TfToken& field, const VtValue& oldValue, const VtValue& newValue, const VtValue& contextDependencyData) const override;
protected:
SDF_FILE_FORMAT_FACTORY_ACCESS;
bool _ShouldSkipAnonymousReload() const override;
bool _ShouldReadAnonymousLayers() const override;
virtual ~GdtfFileFormat();
GdtfFileFormat();
};
TF_DECLARE_PUBLIC_TOKENS(
GdtfFileFormatTokens,
((Id, "gdtfFileFormat"))
((Version, "1.0"))
((Target, "usd"))
((Extension, "gdtf"))
);
TF_DECLARE_WEAK_AND_REF_PTRS(GdtfFileFormat);
PXR_NAMESPACE_CLOSE_SCOPE
#endif | 2,543 | C | 32.92 | 190 | 0.769957 |
MomentFactory/Omniverse-MVR-GDTF-converter/src/usd-plugins/fileFormat/gdtfFileFormat/README.md | # usd-gdtf-plugin
An OpenUSD plugin for the gdtf VESA standard
# Requirements
1- An USD Installation
2- CMake
# Build Instructions
1- cmake . -DPXR_PATH=PATH_TO_USD_INSTALL
2- Open generated .sln file and compile | 214 | Markdown | 20.499998 | 44 | 0.766355 |
MomentFactory/Omniverse-MVR-GDTF-converter/src/usd-plugins/fileFormat/gdtfFileFormat/gdtfUsdConverter.cpp | #include "gdtfUsdConverter.h"
#include <pxr/base/tf/diagnostic.h>
#include <pxr/base/tf/stringUtils.h>
#include <pxr/base/tf/token.h>
#include <pxr/usd/usd/prim.h>
#include <pxr/usd/usd/stage.h>
#include <pxr/usd/usd/usdaFileFormat.h>
#include <pxr/usd/usdLux/diskLight.h>
#include <pxr/usd/usdGeom/mesh.h>
#include <pxr/usd/usdGeom/scope.h>
#include <pxr/usd/usdGeom/camera.h>
#include <pxr/usd/usdGeom/cube.h>
#include <pxr/usd/usdGeom/xformable.h>
#include <pxr/usd/usdGeom/xform.h>
#include <pxr/usd/usdGeom/xformOp.h>
#include <pxr/usd/usdLux/rectLight.h>
#include <pxr/base/gf/matrix3f.h>
#include <pxr/base/gf/rotation.h>
#include <pxr/base/gf/vec3f.h>
#include <pxr/usd/usd/payloads.h>
#include <pxr/base/tf/stringUtils.h>
#include <pxr/pxr.h>
#include <pxr/base/tf/diagnostic.h>
#include <pxr/base/tf/stringUtils.h>
#include <pxr/base/tf/token.h>
#include <iostream>
#define _SILENCE_EXPERIMENTAL_FILESYSTEM_DEPRECATION_WARNING
#include <experimental/filesystem>
namespace GDTF
{
std::string CleanNameForUSD(const std::string& name)
{
std::string cleanedName = name;
if(cleanedName.size() == 0)
{
return "Default";
}
if(cleanedName.size() == 1 && pxr::TfIsValidIdentifier(cleanedName))
{
// If we have an index as a name, we only need to add _ beforehand.
return CleanNameForUSD("_" + cleanedName);
}
return pxr::TfMakeValidIdentifier(cleanedName);
}
void ConvertToUsd(const GDTFSpecification& spec, pxr::UsdStageRefPtr stage, const std::string& targetPrimPath)
{
PXR_NAMESPACE_USING_DIRECTIVE
SdfPath xformPath;
const bool from3ds = spec.ConvertedFrom3ds;
const bool isOnlyGDTF = targetPrimPath.empty();
if(isOnlyGDTF)
{
xformPath = SdfPath("/default_prim");
auto defaultPrim = UsdGeomXform::Define(stage, xformPath);
stage->SetDefaultPrim(defaultPrim.GetPrim());
bool resetXform = false;
defaultPrim.ClearXformOpOrder();
defaultPrim.AddTranslateOp();
defaultPrim.AddRotateYXZOp(UsdGeomXformOp::PrecisionDouble);
defaultPrim.AddScaleOp().Set(GfVec3f(from3ds ? 1.0f : 100.0f));
}
else
{
xformPath = SdfPath(targetPrimPath);
}
const GfMatrix3d rotateMinus90deg = GfMatrix3d(1, 0, 0,
0, 0, 1,
0, -1, 0);
const std::string& parentPath = std::experimental::filesystem::temp_directory_path().string();
const auto& basePath = xformPath.AppendChild(TfToken("Base"));
const auto& baseModelPath = basePath.AppendChild(TfToken("model"));
const auto& baseXform = UsdGeomXform::Define(stage, basePath);
const auto& baseModelXform = UsdGeomXform::Define(stage, baseModelPath);
// Add GDTF custom properties to parent prim
auto fixturePrim = stage->GetPrimAtPath(xformPath);
fixturePrim.GetPrim().CreateAttribute(TfToken("mf:gdtf:LegHeight"), pxr::SdfValueTypeNames->Float).Set(spec.LegHeight);
fixturePrim.GetPrim().CreateAttribute(TfToken("mf:gdtf:OperatingTemperature:High"), pxr::SdfValueTypeNames->Float).Set(spec.HighTemperature);
fixturePrim.GetPrim().CreateAttribute(TfToken("mf:gdtf:OperatingTemperature:Low"), pxr::SdfValueTypeNames->Float).Set(spec.LowTemperature);
fixturePrim.GetPrim().CreateAttribute(TfToken("mf:gdtf:Weight"), pxr::SdfValueTypeNames->Float).Set(spec.Weight);
const float modelScaleFactor = spec.ConvertedFrom3ds ? 0.001f : 1.0f;
const float modelBaseRotateAngle = from3ds ? -90.0f : 0.0f;
if(spec.Name.empty())
{
std::cout << "spec name is empty! " << std::endl;
}
SdfPath geoPath = xformPath;
for(auto& geometry : spec.Geometries)
{
if(geometry.Name.empty())
{
continue;
}
geoPath = geoPath.AppendChild(TfToken(CleanNameForUSD(geometry.Name)));
if(!geometry.isBeam)
{
const auto& xform = UsdGeomXform::Define(stage, geoPath);
GfMatrix4d transform = GfMatrix4d(
geometry.Transform[0][0], geometry.Transform[1][0], geometry.Transform[2][0], 0,
geometry.Transform[0][1], geometry.Transform[1][1], geometry.Transform[2][1], 0,
geometry.Transform[0][2], geometry.Transform[1][2], geometry.Transform[2][2], 0,
geometry.Transform[0][3], geometry.Transform[1][3], geometry.Transform[2][3], 1
);
GfVec3d translation = rotateMinus90deg * transform.ExtractTranslation();
GfRotation rotation = transform.GetTranspose().ExtractRotation();
GfVec3d euler = rotation.Decompose(GfVec3f::XAxis(), GfVec3f::YAxis(), GfVec3f::ZAxis());
GfVec3d rotate = rotateMinus90deg * euler;
// Set transform
xform.ClearXformOpOrder();
xform.AddTranslateOp().Set(translation);
xform.AddRotateYZXOp(UsdGeomXformOp::PrecisionDouble).Set(rotate);
xform.AddScaleOp().Set(GfVec3f(1.0));
const auto& modelPath = geoPath.AppendChild(TfToken("model"));
const auto& modelXform = UsdGeomXform::Define(stage, modelPath);
modelXform.AddTranslateOp().Set(GfVec3d(0));
modelXform.AddRotateYZXOp(UsdGeomXformOp::PrecisionDouble).Set(GfVec3d(modelBaseRotateAngle, 0, 0));
auto scaleOp = modelXform.AddScaleOp();
if(from3ds)
{
scaleOp.Set(GfVec3f(modelScaleFactor));
}
std::string fileName = "";
for(auto m : spec.Models)
{
if(m.Name == geometry.Model)
{
fileName = m.File;
}
}
std::string payloadPath = parentPath + "/" + spec.SpecName + "/" + fileName + ".gltf";
modelXform.GetPrim().GetPayloads().AddPayload(SdfPayload(payloadPath));
}
else
{
SdfPath lightPath = geoPath.AppendChild(TfToken("Beam"));
auto diskLight = UsdLuxDiskLight::Define(stage, lightPath);
auto lightXform = UsdGeomXformable(diskLight);
float heightOffset = 0.0f;
// We need to find the parent of the beam, we use the depth to do this search
// We fall back to the size of the beam if we cant find it.
std::string parentModelName = geometry.Model;
for(auto g : spec.Geometries)
{
if(g.Depth == geometry.Depth - 1)
{
parentModelName = g.Model;
}
}
// Find the corresponding model of the parent
const auto modelSpecIt = std::find_if(spec.Models.begin(), spec.Models.end(),
[parentModelName](const ModelSpecification& model)
{
return model.Name == parentModelName;
}
);
// If we find it, we use the height as the offset
if(modelSpecIt != spec.Models.end())
{
const ModelSpecification& modelSpec = *modelSpecIt;
heightOffset = modelSpec.Height * -0.5f;
}
GfMatrix4d transform = GfMatrix4d(
geometry.Transform[0][0], geometry.Transform[1][0], geometry.Transform[2][0], 0,
geometry.Transform[0][1], geometry.Transform[1][1], geometry.Transform[2][1], 0,
geometry.Transform[0][2], geometry.Transform[1][2], geometry.Transform[2][2], 0,
geometry.Transform[0][3], geometry.Transform[1][3], geometry.Transform[2][3], 1
);
GfRotation rotation = transform.GetTranspose().ExtractRotation();
GfVec3d euler = rotation.Decompose(GfVec3f::XAxis(), GfVec3f::YAxis(), GfVec3f::ZAxis());
GfVec3d rotate = (rotateMinus90deg * euler) + GfVec3d(-90, 0, 0);
lightXform.ClearXformOpOrder();
lightXform.AddTranslateOp().Set(GfVec3d(0, heightOffset, 0));
lightXform.AddRotateYXZOp(UsdGeomXformOp::PrecisionDouble).Set(rotate);
lightXform.AddScaleOp().Set(GfVec3f(spec.BeamRadius * 2.0, spec.BeamRadius * 2.0, 1));
diskLight.GetPrim().CreateAttribute(
TfToken("intensity"),
SdfValueTypeNames->Float
).Set(60000.0f);
diskLight.GetPrim().CreateAttribute(
TfToken("visibleInPrimaryRay"),
SdfValueTypeNames->Bool
).Set(true);
diskLight.GetPrim().CreateAttribute(TfToken("mf:gdtf:BeamAngle"), pxr::SdfValueTypeNames->Float).Set(spec.BeamAngle);
diskLight.GetPrim().CreateAttribute(TfToken("mf:gdtf:BeamType"), pxr::SdfValueTypeNames->String).Set(spec.BeamType);
diskLight.GetPrim().CreateAttribute(TfToken("mf:gdtf:ColorRenderingIndex"), pxr::SdfValueTypeNames->Int).Set(spec.ColorRenderingIndex);
diskLight.GetPrim().CreateAttribute(TfToken("mf:gdtf:ColorTemperature"), pxr::SdfValueTypeNames->Float).Set(spec.ColorTemperature);
diskLight.GetPrim().CreateAttribute(TfToken("mf:gdtf:FieldAngle"), pxr::SdfValueTypeNames->Float).Set(spec.FieldAngle);
diskLight.GetPrim().CreateAttribute(TfToken("mf:gdtf:LampType"), pxr::SdfValueTypeNames->String).Set(spec.LampType);
diskLight.GetPrim().CreateAttribute(TfToken("mf:gdtf:PowerConsumption"), pxr::SdfValueTypeNames->Float).Set(spec.PowerConsumption);
diskLight.GetPrim().CreateAttribute(TfToken("mf:gdtf:LuminousFlux"), pxr::SdfValueTypeNames->Float).Set(spec.LuminousFlux);
}
}
}
} | 10,347 | C++ | 43.603448 | 151 | 0.586257 |
MomentFactory/Omniverse-MVR-GDTF-converter/tools/packman/packmanconf.py | # Use this file to bootstrap packman into your Python environment (3.7.x). Simply
# add the path by doing sys.insert to where packmanconf.py is located and then execute:
#
# >>> import packmanconf
# >>> packmanconf.init()
#
# It will use the configured remote(s) and the version of packman in the same folder,
# giving you full access to the packman API via the following module
#
# >> import packmanapi
# >> dir(packmanapi)
import os
import platform
import sys
def init():
"""Call this function to initialize the packman configuration.
Calls to the packman API will work after successfully calling this function.
Note:
This function only needs to be called once during the execution of your
program. Calling it repeatedly is harmless but wasteful.
Compatibility with your Python interpreter is checked and upon failure
the function will report what is required.
Example:
>>> import packmanconf
>>> packmanconf.init()
>>> import packmanapi
>>> packmanapi.set_verbosity_level(packmanapi.VERBOSITY_HIGH)
"""
major = sys.version_info[0]
minor = sys.version_info[1]
if major != 3 or minor != 10:
raise RuntimeError(
f"This version of packman requires Python 3.10.x, but {major}.{minor} was provided"
)
conf_dir = os.path.dirname(os.path.abspath(__file__))
os.environ["PM_INSTALL_PATH"] = conf_dir
packages_root = get_packages_root(conf_dir)
version = get_version(conf_dir)
module_dir = get_module_dir(conf_dir, packages_root, version)
sys.path.insert(1, module_dir)
def get_packages_root(conf_dir: str) -> str:
root = os.getenv("PM_PACKAGES_ROOT")
if not root:
platform_name = platform.system()
if platform_name == "Windows":
drive, _ = os.path.splitdrive(conf_dir)
root = os.path.join(drive, "packman-repo")
elif platform_name == "Darwin":
# macOS
root = os.path.join(
os.path.expanduser("~"), "/Library/Application Support/packman-cache"
)
elif platform_name == "Linux":
try:
cache_root = os.environ["XDG_HOME_CACHE"]
except KeyError:
cache_root = os.path.join(os.path.expanduser("~"), ".cache")
return os.path.join(cache_root, "packman")
else:
raise RuntimeError(f"Unsupported platform '{platform_name}'")
# make sure the path exists:
os.makedirs(root, exist_ok=True)
return root
def get_module_dir(conf_dir, packages_root: str, version: str) -> str:
module_dir = os.path.join(packages_root, "packman-common", version)
if not os.path.exists(module_dir):
import tempfile
tf = tempfile.NamedTemporaryFile(delete=False)
target_name = tf.name
tf.close()
url = f"http://bootstrap.packman.nvidia.com/packman-common@{version}.zip"
print(f"Downloading '{url}' ...")
import urllib.request
urllib.request.urlretrieve(url, target_name)
from importlib.machinery import SourceFileLoader
# import module from path provided
script_path = os.path.join(conf_dir, "bootstrap", "install_package.py")
ip = SourceFileLoader("install_package", script_path).load_module()
print("Unpacking ...")
ip.install_package(target_name, module_dir)
os.unlink(tf.name)
return module_dir
def get_version(conf_dir: str):
path = os.path.join(conf_dir, "packman")
if not os.path.exists(path): # in dev repo fallback
path += ".sh"
with open(path, "rt", encoding="utf8") as launch_file:
for line in launch_file.readlines():
if line.startswith("PM_PACKMAN_VERSION"):
_, value = line.split("=")
return value.strip()
raise RuntimeError(f"Unable to find 'PM_PACKMAN_VERSION' in '{path}'")
| 3,932 | Python | 35.416666 | 95 | 0.632503 |
MomentFactory/Omniverse-MVR-GDTF-converter/tools/packman/config.packman.xml | <config remotes="cloudfront">
<remote2 name="cloudfront">
<transport actions="download" protocol="https" packageLocation="d4i3qtqj3r0z5.cloudfront.net/${name}@${version}" />
</remote2>
</config> | 210 | XML | 41.199992 | 123 | 0.695238 |
MomentFactory/Omniverse-MVR-GDTF-converter/tools/packman/bootstrap/install_package.py | # Copyright 2019 NVIDIA CORPORATION
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import zipfile
import tempfile
import sys
import os
import stat
import time
from typing import Any, Callable
RENAME_RETRY_COUNT = 100
RENAME_RETRY_DELAY = 0.1
logging.basicConfig(level=logging.WARNING, format="%(message)s")
logger = logging.getLogger("install_package")
def remove_directory_item(path):
if os.path.islink(path) or os.path.isfile(path):
try:
os.remove(path)
except PermissionError:
# make sure we have access and try again:
os.chmod(path, stat.S_IRWXU)
os.remove(path)
else:
# try first to delete the dir because this will work for folder junctions, otherwise we would follow the junctions and cause destruction!
clean_out_folder = False
try:
# make sure we have access preemptively - this is necessary because recursing into a directory without permissions
# will only lead to heart ache
os.chmod(path, stat.S_IRWXU)
os.rmdir(path)
except OSError:
clean_out_folder = True
if clean_out_folder:
# we should make sure the directory is empty
names = os.listdir(path)
for name in names:
fullname = os.path.join(path, name)
remove_directory_item(fullname)
# now try to again get rid of the folder - and not catch if it raises:
os.rmdir(path)
class StagingDirectory:
def __init__(self, staging_path):
self.staging_path = staging_path
self.temp_folder_path = None
os.makedirs(staging_path, exist_ok=True)
def __enter__(self):
self.temp_folder_path = tempfile.mkdtemp(prefix="ver-", dir=self.staging_path)
return self
def get_temp_folder_path(self):
return self.temp_folder_path
# this function renames the temp staging folder to folder_name, it is required that the parent path exists!
def promote_and_rename(self, folder_name):
abs_dst_folder_name = os.path.join(self.staging_path, folder_name)
os.rename(self.temp_folder_path, abs_dst_folder_name)
def __exit__(self, type, value, traceback):
# Remove temp staging folder if it's still there (something went wrong):
path = self.temp_folder_path
if os.path.isdir(path):
remove_directory_item(path)
def rename_folder(staging_dir: StagingDirectory, folder_name: str):
try:
staging_dir.promote_and_rename(folder_name)
except OSError as exc:
# if we failed to rename because the folder now exists we can assume that another packman process
# has managed to update the package before us - in all other cases we re-raise the exception
abs_dst_folder_name = os.path.join(staging_dir.staging_path, folder_name)
if os.path.exists(abs_dst_folder_name):
logger.warning(
f"Directory {abs_dst_folder_name} already present, package installation already completed"
)
else:
raise
def call_with_retry(
op_name: str, func: Callable, retry_count: int = 3, retry_delay: float = 20
) -> Any:
retries_left = retry_count
while True:
try:
return func()
except (OSError, IOError) as exc:
logger.warning(f"Failure while executing {op_name} [{str(exc)}]")
if retries_left:
retry_str = "retry" if retries_left == 1 else "retries"
logger.warning(
f"Retrying after {retry_delay} seconds"
f" ({retries_left} {retry_str} left) ..."
)
time.sleep(retry_delay)
else:
logger.error("Maximum retries exceeded, giving up")
raise
retries_left -= 1
def rename_folder_with_retry(staging_dir: StagingDirectory, folder_name):
dst_path = os.path.join(staging_dir.staging_path, folder_name)
call_with_retry(
f"rename {staging_dir.get_temp_folder_path()} -> {dst_path}",
lambda: rename_folder(staging_dir, folder_name),
RENAME_RETRY_COUNT,
RENAME_RETRY_DELAY,
)
def install_package(package_path, install_path):
staging_path, version = os.path.split(install_path)
with StagingDirectory(staging_path) as staging_dir:
output_folder = staging_dir.get_temp_folder_path()
with zipfile.ZipFile(package_path, allowZip64=True) as zip_file:
zip_file.extractall(output_folder)
# attempt the rename operation
rename_folder_with_retry(staging_dir, version)
print(f"Package successfully installed to {install_path}")
if __name__ == "__main__":
executable_paths = os.getenv("PATH")
paths_list = executable_paths.split(os.path.pathsep) if executable_paths else []
target_path_np = os.path.normpath(sys.argv[2])
target_path_np_nc = os.path.normcase(target_path_np)
for exec_path in paths_list:
if os.path.normcase(os.path.normpath(exec_path)) == target_path_np_nc:
raise RuntimeError(f"packman will not install to executable path '{exec_path}'")
install_package(sys.argv[1], target_path_np)
| 5,776 | Python | 36.270968 | 145 | 0.645083 |
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/gltfImporter.py | import logging
import omni.client
import os
import subprocess
import tempfile
from typing import List
import xml.etree.ElementTree as ET
from zipfile import ZipFile
from .filepathUtility import Filepath
from .gdtfUtil import Model
class GLTFImporter:
TMP_ARCHIVE_EXTRACT_DIR = f"{tempfile.gettempdir()}/MF.OV.GDTF/"
def convert(root: ET.Element, archive: ZipFile, output_dir: str) -> List[Model]:
models: List[Model] = GLTFImporter._get_model_nodes(root)
models_filtered: List[Model] = GLTFImporter._filter_models(models)
GLTFImporter._extract_gltf_to_tmp(models_filtered, archive)
GLTFImporter._convert_gltf(models_filtered, output_dir)
return models
def _get_model_nodes(root: ET.Element) -> List[Model]:
node_fixture: ET.Element = root.find("FixtureType")
node_models: ET.Element = node_fixture.find("Models")
nodes_model = node_models.findall("Model")
models: List[Model] = []
for node_model in nodes_model:
models.append(Model(node_model))
return models
def _filter_models(models: List[Model]) -> List[Model]:
filters: List[str] = ['pigtail', 'beam']
filtered_models: List[Model] = []
for model in models:
if model.has_file():
filtered_models.append(model)
elif model.get_name().lower() not in filters:
logger = logging.getLogger(__name__)
logger.info(f"File attribute empty for model node {model.get_name()}, skipping.")
return filtered_models
def _extract_gltf_to_tmp(models: List[Model], gdtf_archive: ZipFile):
namelist = gdtf_archive.namelist()
to_remove: List[Model] = []
for model in models:
filename = model.get_file()
filepath_glb = f"models/gltf/{filename}.glb"
filepath_gltf = f"models/gltf/{filename}.gltf"
filepath_3ds = f"models/3ds/{filename}.3ds"
if filepath_glb in namelist:
tmp_export_path = gdtf_archive.extract(filepath_glb, GLTFImporter.TMP_ARCHIVE_EXTRACT_DIR)
model.set_tmpdir_filepath(Filepath(tmp_export_path))
elif filepath_gltf in namelist:
tmp_export_path = gdtf_archive.extract(filepath_gltf, GLTFImporter.TMP_ARCHIVE_EXTRACT_DIR)
for filepath in namelist: # Also import .bin, textures, etc.
if filepath.startswith(f"models/gltf/{filename}") and filepath != filepath_gltf:
gdtf_archive.extract(filepath, GLTFImporter.TMP_ARCHIVE_EXTRACT_DIR)
model.set_tmpdir_filepath(Filepath(tmp_export_path))
elif filepath_3ds in namelist:
tmp_export_path = gdtf_archive.extract(filepath_3ds, GLTFImporter.TMP_ARCHIVE_EXTRACT_DIR)
temp_export_path_gltf = tmp_export_path[:-4] + ".gltf"
GLTFImporter._convert_3ds_to_gltf(tmp_export_path, temp_export_path_gltf)
model.set_tmpdir_filepath(Filepath(temp_export_path_gltf))
model.set_converted_from_3ds()
os.remove(tmp_export_path)
else:
logger = logging.getLogger(__name__)
logger.warn(f"No file found for {filename}, skipping.")
to_remove.append(model)
for model in to_remove:
models.remove(model)
def _convert_3ds_to_gltf(input, output):
path = __file__
my_env = os.environ.copy()
my_env["PATH"] = path + '\\..\\' + os.pathsep + my_env['PATH']
scriptPath = path + "\\..\\3dsConverterScript.py"
try:
result = subprocess.run(["py", "-3.10", scriptPath, input, output], capture_output=True, env=my_env)
if result.returncode != 0:
logger = logging.getLogger(__name__)
logger.error(f"Failed to convert 3ds file to gltf: {input}\nerror (Requires python 3.10): {result.stderr.decode('utf-8')}\nerror message: {result.stdout.decode('utf-8')}")
except Exception as e:
logger = logging.getLogger(__name__)
logger.error(f"Failed to convert 3ds file to gltf: {input}\n{e}")
def _convert_gltf(models: List[Model], gdtf_output_dir):
output_dir = gdtf_output_dir + "gltf/"
_, files_in_output_dir = omni.client.list(output_dir) # Ignoring omni.client.Result
relative_paths_in_output_dir = [x.relative_path for x in files_in_output_dir]
converted_models: List[Model] = []
for model in models:
file: Filepath = model.get_tmpdir_filepath()
if model.get_converted_from_3ds():
bin_file = file.basename[:-5] + ".bin"
bin_path = output_dir + bin_file
if bin_file not in relative_paths_in_output_dir:
input_path = file.fullpath[:-5] + ".bin"
result = result = omni.client.copy(input_path, bin_path, omni.client.CopyBehavior.OVERWRITE)
output_file = file.basename
output_path = output_dir + output_file
if output_file not in relative_paths_in_output_dir:
input_path = file.fullpath
result = omni.client.copy(input_path, output_path, omni.client.CopyBehavior.OVERWRITE)
if result == omni.client.Result.OK:
model.set_converted_filepath(Filepath(output_path))
converted_models.append(model)
else:
logger = logging.getLogger(__name__)
logger.error(f"Failure to convert file {input_path}: {result}")
else:
model.set_converted_filepath(Filepath(output_path))
converted_models.append(model)
return converted_models
| 5,820 | Python | 46.325203 | 187 | 0.601203 |
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/converterContext.py | class ConverterContext:
usd_reference_path = ""
| 52 | Python | 16.666661 | 27 | 0.711538 |
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/gdtfUtil.py | import math
import xml.etree.ElementTree as ET
from pxr import Usd, UsdGeom, UsdLux, Sdf
from .filepathUtility import Filepath
from .USDTools import USDTools
def get_attrib_if_exists(node: ET.Element, attr: str):
return node.attrib[attr] if attr in node.attrib else None
def get_attrib_text_if_exists(node: ET.Element, attr: str):
return get_attrib_if_exists(node, attr)
def get_attrib_int_if_exists(node: ET.Element, attr: str):
str_value = get_attrib_if_exists(node, attr)
if str_value is not None:
return int(str_value)
return None
def get_attrib_float_if_exists(node: ET.Element, attr: str):
str_value = get_attrib_if_exists(node, attr)
if str_value is not None:
return float(str_value)
return None
def set_attribute_text_if_valid(prim: Usd.Prim, name: str, value: str):
if value is not None:
USDTools.set_prim_attribute(prim, name, Sdf.ValueTypeNames.String, value)
def set_attribute_int_if_valid(prim: Usd.Prim, name: str, value: str):
if value is not None:
USDTools.set_prim_attribute(prim, name, Sdf.ValueTypeNames.Int, value)
def set_attribute_float_if_valid(prim: Usd.Prim, name: str, value: str):
if value is not None:
USDTools.set_prim_attribute(prim, name, Sdf.ValueTypeNames.Float, value)
class Model:
def __init__(self, node: ET.Element):
self._name = node.attrib["Name"]
self._name_usd = USDTools.make_name_valid(self._name)
self._file = get_attrib_if_exists(node, "File")
self._primitive_type = node.attrib["PrimitiveType"]
self._height = float(node.attrib["Height"])
self._length = float(node.attrib["Length"])
self._width = float(node.attrib["Width"])
self._converted_from_3ds = False
def get_name(self) -> str:
return self._name
def get_name_usd(self) -> str:
return self._name_usd
def has_file(self) -> bool:
return self._file is not None and self._file != ""
def get_file(self) -> str:
return self._file
def set_tmpdir_filepath(self, path: Filepath):
self._tmpdir_filepath = path
def get_tmpdir_filepath(self) -> Filepath:
return self._tmpdir_filepath
def set_converted_from_3ds(self):
self._converted_from_3ds = True
def get_converted_from_3ds(self):
return self._converted_from_3ds
def set_converted_filepath(self, path: Filepath):
self._converted_filepath = path
def get_converted_filepath(self) -> Filepath:
return self._converted_filepath
def get_height(self) -> float:
return self._height
def get_width(self) -> float:
return self._width
class Geometry:
def __init__(self, node: ET.Element):
self._name: str = node.attrib["Name"]
self._model_id: str = get_attrib_if_exists(node, "Model")
self._position_matrix = node.attrib["Position"]
self._tag = node.tag
def get_tag(self) -> str:
return self._tag
def get_name(self) -> str:
return self._name
def get_model_id(self) -> str:
if self._model_id is not None:
return self._model_id
return self._name
def get_position_matrix(self) -> str:
return self._position_matrix
def set_model(self, model: Model):
self._model = model
def get_model(self) -> Model:
return self._model
def set_stage_path(self, path: str):
self._stage_path = path
def get_stage_path(self) -> str:
return self._stage_path
def set_depth(self, depth: int):
self._depth = depth
def get_depth(self) -> int:
return self._depth
def set_xform_model(self, xform: UsdGeom.Xform):
self._xform_model = xform
def get_xform_model(self) -> UsdGeom.Xform:
return self._xform_model
def set_xform_parent(self, xform: UsdGeom.Xform):
self._xform_parent = xform
def get_xform_parent(self) -> UsdGeom.Xform:
return self._xform_parent
class Beam:
def __init__(self, geometry: Geometry, node: ET.Element):
self._radius = float(node.attrib["BeamRadius"])
self._position_matrix = geometry.get_position_matrix()
self._stage_path = geometry.get_stage_path()
# The attributes should always exists as per standard definition
self._beam_angle = get_attrib_float_if_exists(node, "BeamAngle")
self._beam_type = get_attrib_text_if_exists(node, "BeamType")
self._color_rendering_index = get_attrib_int_if_exists(node, "ColorRenderingIndex")
self._color_temperature = get_attrib_float_if_exists(node, "ColorTemperature")
self._field_angle = get_attrib_float_if_exists(node, "FieldAngle")
self._lamp_type = get_attrib_text_if_exists(node, "LampType")
self._luminous_flux = get_attrib_float_if_exists(node, "LuminousFlux")
self._power_consumption = get_attrib_float_if_exists(node, "PowerConsumption")
def get_radius(self) -> float:
return self._radius
def get_position_matrix(self) -> str:
return self._position_matrix
def get_stage_path(self) -> str:
return self._stage_path
def get_intensity(self) -> float:
lumens = self._luminous_flux
radius = self._radius
if lumens is None:
return None
candela: float = lumens / 12.566
numerator = candela * 1000
denominator = 4 * math.pi * radius * radius
result = numerator / denominator
return result
def apply_attributes_to_prim(self, light: UsdLux):
prim: Usd.Prim = light.GetPrim()
set_attribute_float_if_valid(prim, "BeamAngle", self._beam_angle)
set_attribute_text_if_valid(prim, "BeamType", self._beam_type)
set_attribute_int_if_valid(prim, "ColorRenderingIndex", self._color_rendering_index)
set_attribute_float_if_valid(prim, "ColorTemperature", self._color_temperature)
set_attribute_float_if_valid(prim, "FieldAngle", self._field_angle)
set_attribute_text_if_valid(prim, "LampType", self._lamp_type)
set_attribute_float_if_valid(prim, "LuminousFlux", self._luminous_flux)
set_attribute_float_if_valid(prim, "PowerConsumption", self._power_consumption)
USDTools.set_light_attributes(light, self._beam_angle, self.get_intensity(), self._color_temperature)
class FixtureAttributes:
def __init__(self, root: ET.Element):
self._operating_temperature_high = None
self._operating_temperature_low = None
self._weight = None
self._leg_height = None
node_fixture: ET.Element = root.find("FixtureType")
node_physdesc: ET.Element = node_fixture.find("PhysicalDescriptions")
if node_physdesc is not None:
node_properties: ET.Element = node_physdesc.find("Properties")
if node_properties is not None:
node_operatingtemp: ET.Element = node_properties.find("OperatingTemperature")
if node_operatingtemp is not None:
self._operating_temperature_high = get_attrib_float_if_exists(node_operatingtemp, "High")
self._operating_temperature_low = get_attrib_float_if_exists(node_operatingtemp, "Low")
node_weight: ET.Element = node_properties.find("Weight")
if node_weight is not None:
self._weight = get_attrib_float_if_exists(node_weight, "Value")
node_legheight: ET.Element = node_properties.find("LegHeight")
if node_legheight is not None:
self._leg_height = get_attrib_float_if_exists(node_legheight, "Value")
def apply_attributes_to_prim(self, prim: Usd.Prim):
set_attribute_float_if_valid(prim, "OperatingTemperature:High", self._operating_temperature_high)
set_attribute_float_if_valid(prim, "OperatingTemperature:Low", self._operating_temperature_low)
set_attribute_float_if_valid(prim, "Weight", self._weight)
set_attribute_float_if_valid(prim, "LegHeight", self._leg_height)
| 8,096 | Python | 35.147321 | 109 | 0.643157 |
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/gdtfImporter.py | from io import BytesIO
import logging
from typing import List
import xml.etree.ElementTree as ET
from zipfile import ZipFile
from pxr import Gf, Sdf, Usd, UsdGeom
from .filepathUtility import Filepath
from .gdtfUtil import Model, Geometry, Beam, FixtureAttributes
from .gltfImporter import GLTFImporter
from .USDTools import USDTools
class GDTFImporter:
def convert(file: Filepath, output_dir: str, output_ext: str = ".usd") -> str:
try:
with ZipFile(file.fullpath, 'r') as archive:
gdtf_output_dir = output_dir + file.filename + "_gdtf/"
url: str = GDTFImporter._convert(archive, gdtf_output_dir, file.filename, output_ext)
return url
except Exception as e:
logger = logging.getLogger(__name__)
logger.error(f"Failed to parse gdtf file at {file.fullpath}. Make sure it is not corrupt. {e}")
return None
def convert_from_mvr(spec_name: str, output_dir: str, mvr_archive: ZipFile, output_ext: str = ".usd") -> bool:
spec_name_with_ext = spec_name + ".gdtf"
if spec_name_with_ext in mvr_archive.namelist():
gdtf_data = BytesIO(mvr_archive.read(spec_name_with_ext))
gdtf_output_dir = output_dir + spec_name + "_gdtf/"
with ZipFile(gdtf_data) as gdtf_archive:
GDTFImporter._convert(gdtf_archive, gdtf_output_dir, spec_name, output_ext)
return True
else:
return False
def _convert(archive: ZipFile, output_dir: str, name: str, output_ext: str) -> str:
data = archive.read("description.xml")
root = ET.fromstring(data)
converted_models: List[Model] = GLTFImporter.convert(root, archive, output_dir)
url: str = GDTFImporter._convert_gdtf_usd(output_dir, name, output_ext, root, converted_models)
return url
def _convert_gdtf_usd(output_dir: str, filename: str, ext: str, root: ET.Element, models: List[Model]) -> str:
url: str = output_dir + filename + ext
stage: Usd.Stage = GDTFImporter._get_or_create_gdtf_usd(url)
geometries, beams = GDTFImporter._get_stage_hierarchy(root, models, stage)
GDTFImporter._add_gltf_reference(stage, geometries)
GDTFImporter._apply_gdtf_matrix(stage, geometries)
GDTFImporter._add_light_to_hierarchy(stage, beams, geometries)
GDTFImporter._apply_gltf_scale(stage, geometries)
GDTFImporter._set_general_attributes(stage, root)
return url
def _get_or_create_gdtf_usd(url: str) -> Usd.Stage:
return USDTools.get_or_create_stage(url)
def _get_stage_hierarchy(root: ET.Element, models: List[Model], stage: Usd.Stage) -> (List[Geometry], List[Beam]):
node_fixture: ET.Element = root.find("FixtureType")
node_geometries = node_fixture.find("Geometries")
default_prim_path = stage.GetDefaultPrim().GetPath()
geometries: List[Geometry] = []
beams: List[Beam] = []
GDTFImporter._get_stage_hierarchy_recursive(node_geometries, models, geometries, beams, default_prim_path, 0)
return geometries, beams
def _get_stage_hierarchy_recursive(parent_node: ET.Element, models: List[Model], geometries: List[Geometry],
beams: List[Beam], path: str, depth: int):
geometry_filter: List[str] = ['Geometry', 'Axis', 'Beam', 'Inventory']
for child_node in list(parent_node):
if 'Model' in child_node.attrib:
if child_node.tag not in geometry_filter:
# Pass through (might want to add an xform)
GDTFImporter._get_stage_hierarchy_recursive(child_node, models, geometries, beams, path, depth + 1)
else:
geometry: Geometry = Geometry(child_node)
model_id: str = geometry.get_model_id()
model: Model = next((model for model in models if model.get_name() == model_id), None)
if model is not None and model.has_file():
geometry.set_model(model)
stage_path = f"{path}/{model.get_name_usd()}"
geometry.set_stage_path(stage_path)
geometry.set_depth(depth)
geometries.append(geometry)
GDTFImporter._get_stage_hierarchy_recursive(child_node, models, geometries, beams, stage_path, depth + 1)
else:
if model_id.lower() == "pigtail":
pass # Skip pigtail geometry
elif model_id.lower() == "beam":
stage_path = f"{path}/beam"
geometry.set_stage_path(stage_path)
beam: Beam = Beam(geometry, child_node)
beams.append(beam)
elif model is not None and not model.has_file():
logger = logging.getLogger(__name__)
logger.warn(f"No file found for {model_id}, skipping.")
else:
# Probably could just be a transform
pass
else:
# Probably could just be a transform
pass
def _add_gltf_reference(stage: Usd.Stage, geometries: List[Geometry]):
stage_path = Filepath(USDTools.get_stage_directory(stage))
for geometry in geometries:
model: Model = geometry.get_model()
relative_path: str = stage_path.get_relative_from(model.get_converted_filepath())
xform_parent, xform_model = USDTools.add_reference(stage, relative_path, geometry.get_stage_path(), "/model")
xform_model.GetPrim().CreateAttribute("mf:gdtf:converter_from_3ds", Sdf.ValueTypeNames.Bool).Set(model.get_converted_from_3ds())
geometry.set_xform_parent(xform_parent)
geometry.set_xform_model(xform_model)
stage.Save()
def _apply_gltf_scale(stage: Usd.Stage, geometries: List[Geometry]):
world_xform: UsdGeom.Xform = UsdGeom.Xform(stage.GetDefaultPrim())
stage_metersPerUnit = UsdGeom.GetStageMetersPerUnit(stage)
scale = 1 / stage_metersPerUnit
USDTools.apply_scale_xform_op(world_xform, scale)
converted_3ds = False
for geometry in geometries:
model = geometry.get_model()
if model.get_converted_from_3ds():
converted_3ds = True
if converted_3ds:
for geometry in geometries:
if geometry.get_tag() != 'Beam':
xform = geometry.get_xform_model()
USDTools.apply_scale_xform_op(xform, UsdGeom.LinearUnits.millimeters) # force mm
stage.Save()
def _apply_gdtf_matrix(stage: Usd.Stage, geometries: List[Geometry]):
applied_scale = USDTools.compute_applied_scale(stage)
axis_matrix = USDTools.get_axis_rotation_matrix()
for geometry in geometries:
translation, rotation = USDTools.compute_xform_values(geometry.get_position_matrix(), applied_scale, axis_matrix)
xform: UsdGeom.Xform = geometry.get_xform_parent()
xform.ClearXformOpOrder() # Prevent error when overwritting
xform.AddTranslateOp().Set(translation)
xform.AddRotateZYXOp().Set(rotation)
xform.AddScaleOp().Set(Gf.Vec3d(1, 1, 1))
stage.Save()
def _add_light_to_hierarchy(stage: Usd.Stage, beams: List[Beam], geometries: List[Geometry]):
if len(beams) > 0:
GDTFImporter._add_beam_to_hierarchy(stage, beams)
else:
# Some gdtf files only represents brackets and such. They contain only "Inventory" geometry.
# We don't want to add a light source to those.
has_not_inventory_geometry = False
for geometry in geometries:
if geometry.get_tag() != 'Inventory':
has_not_inventory_geometry = True
if has_not_inventory_geometry:
GDTFImporter._add_default_light_to_hierarchy(stage, geometries)
def _add_beam_to_hierarchy(stage: Usd.Stage, beams: List[Beam]):
for beam in beams:
light = USDTools.add_beam(stage, beam.get_stage_path(), beam.get_position_matrix(), beam.get_radius())
beam.apply_attributes_to_prim(light)
stage.Save()
def _add_default_light_to_hierarchy(stage: Usd.Stage, geometries: List[Geometry]):
deepest_geom = geometries[-1]
max_depth = deepest_geom.get_depth()
for geom in reversed(geometries):
depth = geom.get_depth()
if (depth > max_depth):
deepest_geom = geom
max_depth = depth
light_stage_path = deepest_geom.get_stage_path() + "/Beam"
model = deepest_geom.get_model()
USDTools.add_light_default(stage, light_stage_path, model.get_height(), model.get_width())
stage.Save()
def _set_general_attributes(stage: Usd.Stage, root: ET.Element):
fixtureAttr = FixtureAttributes(root)
prim: Usd.Prim = USDTools.get_default_prim(stage)
fixtureAttr.apply_attributes_to_prim(prim)
stage.Save()
| 9,327 | Python | 48.617021 | 140 | 0.599979 |
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/3dsConverterScript.py | import sys
import os
def main():
os.environ["PATH"] = __file__ + os.pathsep + os.environ["PATH"]
if len(sys.argv) <= 2:
print("Need at least 2 arguments")
exit(1)
from pyassimp import load, export
inputFile = sys.argv[1]
outputFile = sys.argv[2]
print("Input 3ds file:" + inputFile)
print("output file: " + outputFile)
with load(inputFile) as scene:
export(scene, outputFile, "gltf2")
if __name__ == "__main__":
main()
| 487 | Python | 18.519999 | 67 | 0.585216 |
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/extension.py | import omni.ext
import omni.kit.tool.asset_importer as ai
from .converterDelegate import ConverterDelegate
class MfOvGdtfExtension(omni.ext.IExt):
def on_startup(self, _):
self._delegate_gdtf = ConverterDelegate(
"GDTF Converter",
["(.*\\.gdtf$)"],
["GDTF Files (*.gdtf)"]
)
ai.register_importer(self._delegate_gdtf)
def on_shutdown(self):
ai.remove_importer(self._delegate_gdtf)
self._delegate_gdtf.destroy()
self._delegate_gdtf = None
| 533 | Python | 25.699999 | 49 | 0.617261 |
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/__init__.py | import os
from pxr import Plug
pluginsRoot = os.path.join(os.path.dirname(__file__), '../../../plugin/resources')
Plug.Registry().RegisterPlugins(pluginsRoot)
from .extension import *
| 192 | Python | 18.299998 | 82 | 0.703125 |
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/converterDelegate.py | import os
import omni.kit.tool.asset_importer as ai
from .converterOptionsBuilder import ConverterOptionsBuilder
from .converterHelper import ConverterHelper
class ConverterDelegate(ai.AbstractImporterDelegate):
def __init__(self, name, filters, descriptions):
super().__init__()
self._hoops_options_builder = ConverterOptionsBuilder()
self._hoops_converter = ConverterHelper()
self._name = name
self._filters = filters
self._descriptions = descriptions
def destroy(self):
if self._hoops_converter:
# self._hoops_converter.destroy()
self._hoops_converter = None
if self._hoops_options_builder:
self._hoops_options_builder.destroy()
self._hoops_options_builder = None
@property
def name(self):
return self._name
@property
def filter_regexes(self):
return self._filters
@property
def filter_descriptions(self):
return self._descriptions
def build_options(self, paths):
pass
# TODO enable this after the filepicker bugfix: OM-47383
# self._hoops_options_builder.build_pane(paths)
async def convert_assets(self, paths):
context = self._hoops_options_builder.get_import_options()
hoops_context = context.cad_converter_context
absolute_paths = []
relative_paths = []
for file_path in paths:
if self.is_supported_format(file_path):
absolute_paths.append(file_path)
filename = os.path.basename(file_path)
relative_paths.append(filename)
converted_assets = await self._hoops_converter.create_import_task(
absolute_paths, context.export_folder, hoops_context
)
return converted_assets
| 1,825 | Python | 28.934426 | 74 | 0.637808 |
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/converterOptionsBuilder.py | from omni.kit.menu import utils
from omni.kit.tool.asset_importer.file_picker import FilePicker
from omni.kit.tool.asset_importer.filebrowser import FileBrowserMode, FileBrowserSelectionType
import omni.kit.window.content_browser as content
from .converterOptions import ConverterOptions
class ConverterOptionsBuilder:
def __init__(self):
self._file_picker = None
self._export_content = ConverterOptions()
self._folder_button = None
self._refresh_default_folder = False
self._default_folder = None
self._clear()
def destroy(self):
self._clear()
if self._file_picker:
self._file_picker.destroy()
def _clear(self):
self._built = False
self._export_folder_field = None
if self._folder_button:
self._folder_button.set_clicked_fn(None)
self._folder_button = None
def set_default_target_folder(self, folder: str):
self._default_folder = folder
self._refresh_default_folder = True
def _select_picked_folder_callback(self, paths):
if paths:
self._export_folder_field.model.set_value(paths[0])
def _cancel_picked_folder_callback(self):
pass
def _show_file_picker(self):
if not self._file_picker:
mode = FileBrowserMode.OPEN
file_type = FileBrowserSelectionType.DIRECTORY_ONLY
filters = [(".*", "All Files (*.*)")]
self._file_picker = FilePicker("Select Folder", mode=mode, file_type=file_type, filter_options=filters)
self._file_picker.set_file_selected_fn(self._select_picked_folder_callback)
self._file_picker.set_cancel_fn(self._cancel_picked_folder_callback)
folder = self._export_folder_field.model.get_value_as_string()
if utils.is_folder(folder):
self._file_picker.show(folder)
else:
self._file_picker.show(self._get_current_dir_in_content_window())
def _get_current_dir_in_content_window(self):
content_window = content.get_content_window()
return content_window.get_current_directory()
def get_import_options(self):
return ConverterOptions()
| 2,210 | Python | 34.66129 | 115 | 0.646606 |
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/converterHelper.py | import logging
import shutil
from urllib.parse import unquote
import omni.kit.window.content_browser
from .filepathUtility import Filepath
from .gdtfImporter import GDTFImporter
from .gltfImporter import GLTFImporter
class ConverterHelper:
def _create_import_task(self, absolute_path, export_folder, _):
absolute_path_unquoted = unquote(absolute_path)
if absolute_path_unquoted.startswith("file:/"):
path = absolute_path_unquoted[6:]
else:
path = absolute_path_unquoted
current_nucleus_dir = omni.kit.window.content_browser.get_content_window().get_current_directory()
file: Filepath = Filepath(path)
output_dir = current_nucleus_dir if export_folder is None else export_folder
if export_folder is not None and export_folder != "":
output_dir = export_folder
# Cannot Unzip directly from Nucleus, must download file beforehand
if file.is_nucleus_path():
tmp_path = GLTFImporter.TMP_ARCHIVE_EXTRACT_DIR + file.basename
result = omni.client.copy(file.fullpath, tmp_path, omni.client.CopyBehavior.OVERWRITE)
if result == omni.client.Result.OK:
file = Filepath(tmp_path)
else:
logger = logging.getLogger(__name__)
logger.error(f"Could not import {file.fullpath} directly from Omniverse, try downloading the file instead")
return
url: str = GDTFImporter.convert(file, output_dir)
return url
async def create_import_task(self, absolute_paths, export_folder, hoops_context):
converted_assets = {}
for i in range(len(absolute_paths)):
converted_assets[absolute_paths[i]] = self._create_import_task(absolute_paths[i], export_folder,
hoops_context)
shutil.rmtree(GLTFImporter.TMP_ARCHIVE_EXTRACT_DIR)
return converted_assets
| 1,987 | Python | 40.416666 | 123 | 0.642174 |
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/USDTools.py | import numpy as np
from typing import List, Tuple
from unidecode import unidecode
from urllib.parse import unquote
import omni.usd
from pxr import Gf, Tf, Sdf, UsdLux, Usd, UsdGeom
class USDTools:
def make_name_valid(name: str) -> str:
if name[:1].isdigit():
name = "_" + name
return Tf.MakeValidIdentifier(unidecode(name))
def get_context():
return omni.usd.get_context()
def get_stage() -> Usd.Stage:
context = USDTools.get_context()
return context.get_stage()
def get_stage_directory(stage: Usd.Stage = None) -> str:
if stage is None:
stage = USDTools.get_stage()
root_layer = stage.GetRootLayer()
repository_path = root_layer.realPath
repository_path_unquoted = unquote(repository_path)
dir_index = repository_path_unquoted.rfind("/")
return repository_path_unquoted[:dir_index + 1]
def get_or_create_stage(url: str) -> Usd.Stage:
try: # TODO: Better way to check if stage exists?
return Usd.Stage.Open(url)
except:
stage = Usd.Stage.CreateNew(url)
UsdGeom.SetStageMetersPerUnit(stage, UsdGeom.LinearUnits.centimeters) # TODO get user defaults
UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.y) # TODO get user defaults
default_prim = stage.DefinePrim("/World", "Xform")
stage.SetDefaultPrim(default_prim)
stage.Save()
return stage
def get_default_prim(stage: Usd.Stage) -> Usd.Prim:
return stage.GetDefaultPrim()
def add_reference(stage: Usd.Stage, ref_path_relative: str, stage_path: str, stage_subpath: str) -> Tuple[
UsdGeom.Xform, UsdGeom.Xform]:
xform_parent: UsdGeom.Xform = UsdGeom.Xform.Define(stage, stage_path)
xform_ref: UsdGeom.Xform = UsdGeom.Xform.Define(stage, stage_path + stage_subpath)
xform_ref_prim: Usd.Prim = xform_ref.GetPrim()
path_unquoted = unquote(ref_path_relative)
references: Usd.References = xform_ref_prim.GetReferences()
references.AddReference(path_unquoted)
return xform_parent, xform_ref
def get_applied_scale(stage: Usd.Stage, scale_factor: float):
stage_scale = UsdGeom.GetStageMetersPerUnit(stage)
return scale_factor / stage_scale
def apply_scale_xform_op(xform: UsdGeom.Xform, scale: float):
scale_value = Gf.Vec3d(scale, scale, scale)
xform_ordered_ops: List[UsdGeom.XformOp] = xform.GetOrderedXformOps()
found_op = False
for xform_op in xform_ordered_ops:
if xform_op.GetOpType() == UsdGeom.XformOp.TypeScale:
xform_op.Set(scale_value)
found_op = True
if not found_op:
xform.AddScaleOp().Set(scale_value)
def np_matrix_from_gdtf(value: str) -> np.matrix:
# GDTF Matrix is: 4x4, row-major, Right-Handed, Z-up (Distance Unit not specified, but mm implied)
# expect form like "{x,y,z,w}{x,y,z,w}{x,y,z,w}{x,y,z,w}" where "x","y","z", "w" is similar to 1.000000
# make source compatible with np.matrix constructor: "x y z; x y z; x y z; x y z"
value_alt = value[1:] # Removes "{" prefix
value_alt = value_alt[:-1] # Removes "}" suffix
value_alt = value_alt.replace("}{", "; ")
value_alt = value_alt.replace(",", " ")
np_matrix: np.matrix = np.matrix(value_alt)
return np_matrix
def gf_matrix_from_gdtf(np_matrix: np.matrix, scale: float) -> Gf.Matrix4d:
# Row major matrix
gf_matrix = Gf.Matrix4d(
np_matrix.item((0, 0)), np_matrix.item((1, 0)), np_matrix.item((2, 0)), np_matrix.item((3, 0)),
np_matrix.item((0, 1)), np_matrix.item((1, 1)), np_matrix.item((2, 1)), np_matrix.item((3, 1)),
np_matrix.item((0, 2)), np_matrix.item((1, 2)), np_matrix.item((2, 2)), np_matrix.item((3, 2)),
np_matrix.item((0, 3)), np_matrix.item((1, 3)), np_matrix.item((2, 3)), np_matrix.item((3, 3))
)
return gf_matrix
def add_beam(stage: Usd.Stage, path: str, position_matrix: str, radius: float) -> UsdLux:
applied_scale = USDTools.compute_applied_scale(stage)
axis_matrix = USDTools.get_axis_rotation_matrix()
light: UsdLux.DiskLight = UsdLux.DiskLight.Define(stage, path)
translation, rotation = USDTools.compute_xform_values(position_matrix, applied_scale, axis_matrix)
rotation += Gf.Vec3d(-90, 0, 0)
scale = Gf.Vec3d(radius * 2, radius * 2, 1)
USDTools._set_light_xform(light, translation, rotation, scale)
USDTools._additional_default_attributes(light)
return light
def add_light_default(stage: Usd.Stage, path: str, height: float, diameter: float):
light: UsdLux.DiskLight = UsdLux.DiskLight.Define(stage, path)
translation = Gf.Vec3d(0, -height * 0.5, 0)
rotation = Gf.Vec3d(-90, 0, 0)
scale = Gf.Vec3d(diameter, diameter, 1)
USDTools._set_light_xform(light, translation, rotation, scale)
USDTools._additional_default_attributes(light)
def _additional_default_attributes(light: UsdLux):
prim = light.GetPrim()
prim.CreateAttribute("visibleInPrimaryRay", Sdf.ValueTypeNames.Bool).Set(True)
light.CreateIntensityAttr().Set(60_000)
# if UsdLux.ShapingAPI.CanApply(prim):
UsdLux.ShapingAPI.Apply(prim)
def _set_light_xform(light: UsdLux.DiskLight, translation: Gf.Vec3d, rotation: Gf.Vec3d, scale: Gf.Vec3d):
light.ClearXformOpOrder() # Prevent error when overwritting
light.AddTranslateOp().Set(translation)
light.AddRotateZYXOp().Set(rotation)
light.AddScaleOp().Set(scale)
def set_light_attributes(light: UsdLux.DiskLight, beamAngle: float, intensity: float, colorTemp: float):
if colorTemp is not None:
light.GetEnableColorTemperatureAttr().Set(True)
light.GetColorTemperatureAttr().Set(colorTemp)
else:
light.GetEnableColorTemperatureAttr().Set(False)
light.GetColorTemperatureAttr().Set(6500) # default value
if intensity is not None:
light.GetIntensityAttr().Set(intensity)
if beamAngle is not None:
prim: Usd.Prim = light.GetPrim()
shapingAPI = UsdLux.ShapingAPI(prim)
shapingAPI.GetShapingConeAngleAttr().Set(beamAngle)
def compute_applied_scale(stage: Usd.Stage) -> float:
gdtf_scale = 1 # GDTF dimensions are in meters
applied_scale = USDTools.get_applied_scale(stage, gdtf_scale)
return applied_scale
def get_axis_rotation_matrix() -> Gf.Matrix3d:
rotate_minus90deg_xaxis = Gf.Matrix3d(1, 0, 0,
0, 0, 1,
0, -1, 0)
return rotate_minus90deg_xaxis
def compute_xform_values(position_matrix: str, scale: float, axis_matrix: Gf.Matrix3d) -> (Gf.Vec3d, Gf.Vec3d):
np_matrix: np.matrix = USDTools.np_matrix_from_gdtf(position_matrix)
gf_matrix: Gf.Matrix4d = USDTools.gf_matrix_from_gdtf(np_matrix, scale)
rotation: Gf.Rotation = gf_matrix.GetTranspose().ExtractRotation()
euler: Gf.Vec3d = rotation.Decompose(Gf.Vec3d.XAxis(), Gf.Vec3d.YAxis(), Gf.Vec3d.ZAxis())
translation_value = axis_matrix * gf_matrix.ExtractTranslation()
rotation_value = axis_matrix * euler
return translation_value, rotation_value
def set_prim_attribute(prim: Usd.Prim, attribute_name: str, attribute_type: Sdf.ValueTypeNames, attribute_value):
prim.CreateAttribute(f"mf:gdtf:{attribute_name}", attribute_type).Set(attribute_value)
| 7,736 | Python | 45.329341 | 117 | 0.633273 |
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/converterOptions.py | from .converterContext import ConverterContext
class ConverterOptions:
def __init__(self):
self.cad_converter_context = ConverterContext()
self.export_folder: str = None
| 192 | Python | 23.124997 | 55 | 0.708333 |
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/filepathUtility.py | import os
class Filepath:
def __init__(self, filepath: str):
self._is_none = filepath == ""
self.fullpath = filepath
self.directory = os.path.dirname(filepath) + "/"
self.basename = os.path.basename(filepath)
self.filename, self.ext = os.path.splitext(self.basename)
def is_nucleus_path(self) -> bool:
# TODO: Replace with omni utility method
return self.directory[:12] == "omniverse://"
def get_relative_from(self, other) -> str:
if self._is_none:
return other.fullpath
else:
return "./" + other.fullpath[len(self.directory):]
| 641 | Python | 28.181817 | 65 | 0.592824 |
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/pyassimp/material.py | # Dummy value.
#
# No texture, but the value to be used as 'texture semantic'
# (#aiMaterialProperty::mSemantic) for all material properties
# # not* related to textures.
#
aiTextureType_NONE = 0x0
# The texture is combined with the result of the diffuse
# lighting equation.
#
aiTextureType_DIFFUSE = 0x1
# The texture is combined with the result of the specular
# lighting equation.
#
aiTextureType_SPECULAR = 0x2
# The texture is combined with the result of the ambient
# lighting equation.
#
aiTextureType_AMBIENT = 0x3
# The texture is added to the result of the lighting
# calculation. It isn't influenced by incoming light.
#
aiTextureType_EMISSIVE = 0x4
# The texture is a height map.
#
# By convention, higher gray-scale values stand for
# higher elevations from the base height.
#
aiTextureType_HEIGHT = 0x5
# The texture is a (tangent space) normal-map.
#
# Again, there are several conventions for tangent-space
# normal maps. Assimp does (intentionally) not
# distinguish here.
#
aiTextureType_NORMALS = 0x6
# The texture defines the glossiness of the material.
#
# The glossiness is in fact the exponent of the specular
# (phong) lighting equation. Usually there is a conversion
# function defined to map the linear color values in the
# texture to a suitable exponent. Have fun.
#
aiTextureType_SHININESS = 0x7
# The texture defines per-pixel opacity.
#
# Usually 'white' means opaque and 'black' means
# 'transparency'. Or quite the opposite. Have fun.
#
aiTextureType_OPACITY = 0x8
# Displacement texture
#
# The exact purpose and format is application-dependent.
# Higher color values stand for higher vertex displacements.
#
aiTextureType_DISPLACEMENT = 0x9
# Lightmap texture (aka Ambient Occlusion)
#
# Both 'Lightmaps' and dedicated 'ambient occlusion maps' are
# covered by this material property. The texture contains a
# scaling value for the final color value of a pixel. Its
# intensity is not affected by incoming light.
#
aiTextureType_LIGHTMAP = 0xA
# Reflection texture
#
# Contains the color of a perfect mirror reflection.
# Rarely used, almost never for real-time applications.
#
aiTextureType_REFLECTION = 0xB
# Unknown texture
#
# A texture reference that does not match any of the definitions
# above is considered to be 'unknown'. It is still imported
# but is excluded from any further postprocessing.
#
aiTextureType_UNKNOWN = 0xC
| 2,409 | Python | 25.777777 | 65 | 0.757991 |
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/pyassimp/__init__.py | from .core import *
| 20 | Python | 9.499995 | 19 | 0.7 |
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/pyassimp/core.py | """
PyAssimp
This is the main-module of PyAssimp.
"""
import sys
if sys.version_info < (2,6):
raise RuntimeError('pyassimp: need python 2.6 or newer')
# xrange was renamed range in Python 3 and the original range from Python 2 was removed.
# To keep compatibility with both Python 2 and 3, xrange is set to range for version 3.0 and up.
if sys.version_info >= (3,0):
xrange = range
try:
import numpy
except ImportError:
numpy = None
import logging
import ctypes
from contextlib import contextmanager
logger = logging.getLogger("pyassimp")
# attach default null handler to logger so it doesn't complain
# even if you don't attach another handler to logger
logger.addHandler(logging.NullHandler())
from . import structs
from . import helper
from . import postprocess
from .errors import AssimpError
class AssimpLib(object):
"""
Assimp-Singleton
"""
load, load_mem, export, export_blob, release, dll = helper.search_library()
_assimp_lib = AssimpLib()
def make_tuple(ai_obj, type = None):
res = None
#notes:
# ai_obj._fields_ = [ ("attr", c_type), ... ]
# getattr(ai_obj, e[0]).__class__ == float
if isinstance(ai_obj, structs.Matrix4x4):
if numpy:
res = numpy.array([getattr(ai_obj, e[0]) for e in ai_obj._fields_]).reshape((4,4))
#import pdb;pdb.set_trace()
else:
res = [getattr(ai_obj, e[0]) for e in ai_obj._fields_]
res = [res[i:i+4] for i in xrange(0,16,4)]
elif isinstance(ai_obj, structs.Matrix3x3):
if numpy:
res = numpy.array([getattr(ai_obj, e[0]) for e in ai_obj._fields_]).reshape((3,3))
else:
res = [getattr(ai_obj, e[0]) for e in ai_obj._fields_]
res = [res[i:i+3] for i in xrange(0,9,3)]
else:
if numpy:
res = numpy.array([getattr(ai_obj, e[0]) for e in ai_obj._fields_])
else:
res = [getattr(ai_obj, e[0]) for e in ai_obj._fields_]
return res
# Returns unicode object for Python 2, and str object for Python 3.
def _convert_assimp_string(assimp_string):
if sys.version_info >= (3, 0):
return str(assimp_string.data, errors='ignore')
else:
return unicode(assimp_string.data, errors='ignore')
# It is faster and more correct to have an init function for each assimp class
def _init_face(aiFace):
aiFace.indices = [aiFace.mIndices[i] for i in range(aiFace.mNumIndices)]
assimp_struct_inits = { structs.Face : _init_face }
def call_init(obj, caller = None):
if helper.hasattr_silent(obj,'contents'): #pointer
_init(obj.contents, obj, caller)
else:
_init(obj,parent=caller)
def _is_init_type(obj):
if obj and helper.hasattr_silent(obj,'contents'): #pointer
return _is_init_type(obj[0])
# null-pointer case that arises when we reach a mesh attribute
# like mBitangents which use mNumVertices rather than mNumBitangents
# so it breaks the 'is iterable' check.
# Basically:
# FIXME!
elif not bool(obj):
return False
tname = obj.__class__.__name__
return not (tname[:2] == 'c_' or tname == 'Structure' \
or tname == 'POINTER') and not isinstance(obj, (int, str, bytes))
def _init(self, target = None, parent = None):
"""
Custom initialize() for C structs, adds safely accessible member functionality.
:param target: set the object which receive the added methods. Useful when manipulating
pointers, to skip the intermediate 'contents' deferencing.
"""
if not target:
target = self
dirself = dir(self)
for m in dirself:
if m.startswith("_"):
continue
# We should not be accessing `mPrivate` according to structs.Scene.
if m == 'mPrivate':
continue
if m.startswith('mNum'):
if 'm' + m[4:] in dirself:
continue # will be processed later on
else:
name = m[1:].lower()
obj = getattr(self, m)
setattr(target, name, obj)
continue
if m == 'mName':
target.name = str(_convert_assimp_string(self.mName))
target.__class__.__repr__ = lambda x: str(x.__class__) + "(" + getattr(x, 'name','') + ")"
target.__class__.__str__ = lambda x: getattr(x, 'name', '')
continue
name = m[1:].lower()
obj = getattr(self, m)
# Create tuples
if isinstance(obj, structs.assimp_structs_as_tuple):
setattr(target, name, make_tuple(obj))
logger.debug(str(self) + ": Added array " + str(getattr(target, name)) + " as self." + name.lower())
continue
if m.startswith('m') and len(m) > 1 and m[1].upper() == m[1]:
if name == "parent":
setattr(target, name, parent)
logger.debug("Added a parent as self." + name)
continue
if helper.hasattr_silent(self, 'mNum' + m[1:]):
length = getattr(self, 'mNum' + m[1:])
# -> special case: properties are
# stored as a dict.
if m == 'mProperties':
setattr(target, name, _get_properties(obj, length))
continue
if not length: # empty!
setattr(target, name, [])
logger.debug(str(self) + ": " + name + " is an empty list.")
continue
try:
if obj._type_ in structs.assimp_structs_as_tuple:
if numpy:
setattr(target, name, numpy.array([make_tuple(obj[i]) for i in range(length)], dtype=numpy.float32))
logger.debug(str(self) + ": Added an array of numpy arrays (type "+ str(type(obj)) + ") as self." + name)
else:
setattr(target, name, [make_tuple(obj[i]) for i in range(length)])
logger.debug(str(self) + ": Added a list of lists (type "+ str(type(obj)) + ") as self." + name)
else:
setattr(target, name, [obj[i] for i in range(length)]) #TODO: maybe not necessary to recreate an array?
logger.debug(str(self) + ": Added list of " + str(obj) + " " + name + " as self." + name + " (type: " + str(type(obj)) + ")")
# initialize array elements
try:
init = assimp_struct_inits[type(obj[0])]
except KeyError:
if _is_init_type(obj[0]):
for e in getattr(target, name):
call_init(e, target)
else:
for e in getattr(target, name):
init(e)
except IndexError:
logger.error("in " + str(self) +" : mismatch between mNum" + name + " and the actual amount of data in m" + name + ". This may be due to version mismatch between libassimp and pyassimp. Quitting now.")
sys.exit(1)
except ValueError as e:
logger.error("In " + str(self) + "->" + name + ": " + str(e) + ". Quitting now.")
if "setting an array element with a sequence" in str(e):
logger.error("Note that pyassimp does not currently "
"support meshes with mixed triangles "
"and quads. Try to load your mesh with"
" a post-processing to triangulate your"
" faces.")
raise e
else: # starts with 'm' but not iterable
setattr(target, name, obj)
logger.debug("Added " + name + " as self." + name + " (type: " + str(type(obj)) + ")")
if _is_init_type(obj):
call_init(obj, target)
if isinstance(self, structs.Mesh):
_finalize_mesh(self, target)
if isinstance(self, structs.Texture):
_finalize_texture(self, target)
if isinstance(self, structs.Metadata):
_finalize_metadata(self, target)
return self
def pythonize_assimp(type, obj, scene):
""" This method modify the Assimp data structures
to make them easier to work with in Python.
Supported operations:
- MESH: replace a list of mesh IDs by reference to these meshes
- ADDTRANSFORMATION: add a reference to an object's transformation taken from their associated node.
:param type: the type of modification to operate (cf above)
:param obj: the input object to modify
:param scene: a reference to the whole scene
"""
if type == "MESH":
meshes = []
for i in obj:
meshes.append(scene.meshes[i])
return meshes
if type == "ADDTRANSFORMATION":
def getnode(node, name):
if node.name == name: return node
for child in node.children:
n = getnode(child, name)
if n: return n
node = getnode(scene.rootnode, obj.name)
if not node:
raise AssimpError("Object " + str(obj) + " has no associated node!")
setattr(obj, "transformation", node.transformation)
def recur_pythonize(node, scene):
'''
Recursively call pythonize_assimp on
nodes tree to apply several post-processing to
pythonize the assimp datastructures.
'''
node.meshes = pythonize_assimp("MESH", node.meshes, scene)
for mesh in node.meshes:
mesh.material = scene.materials[mesh.materialindex]
for cam in scene.cameras:
pythonize_assimp("ADDTRANSFORMATION", cam, scene)
for c in node.children:
recur_pythonize(c, scene)
def release(scene):
'''
Release resources of a loaded scene.
'''
_assimp_lib.release(ctypes.pointer(scene))
@contextmanager
def load(filename,
file_type = None,
processing = postprocess.aiProcess_Triangulate):
'''
Load a model into a scene. On failure throws AssimpError.
Arguments
---------
filename: Either a filename or a file object to load model from.
If a file object is passed, file_type MUST be specified
Otherwise Assimp has no idea which importer to use.
This is named 'filename' so as to not break legacy code.
processing: assimp postprocessing parameters. Verbose keywords are imported
from postprocessing, and the parameters can be combined bitwise to
generate the final processing value. Note that the default value will
triangulate quad faces. Example of generating other possible values:
processing = (pyassimp.postprocess.aiProcess_Triangulate |
pyassimp.postprocess.aiProcess_OptimizeMeshes)
file_type: string of file extension, such as 'stl'
Returns
---------
Scene object with model data
'''
if hasattr(filename, 'read'):
# This is the case where a file object has been passed to load.
# It is calling the following function:
# const aiScene* aiImportFileFromMemory(const char* pBuffer,
# unsigned int pLength,
# unsigned int pFlags,
# const char* pHint)
if file_type is None:
raise AssimpError('File type must be specified when passing file objects!')
data = filename.read()
model = _assimp_lib.load_mem(data,
len(data),
processing,
file_type)
else:
# a filename string has been passed
model = _assimp_lib.load(filename.encode(sys.getfilesystemencoding()), processing)
if not model:
raise AssimpError('Could not import file!')
scene = _init(model.contents)
recur_pythonize(scene.rootnode, scene)
try:
yield scene
finally:
release(scene)
def export(scene,
filename,
file_type = None,
processing = postprocess.aiProcess_Triangulate):
'''
Export a scene. On failure throws AssimpError.
Arguments
---------
scene: scene to export.
filename: Filename that the scene should be exported to.
file_type: string of file exporter to use. For example "collada".
processing: assimp postprocessing parameters. Verbose keywords are imported
from postprocessing, and the parameters can be combined bitwise to
generate the final processing value. Note that the default value will
triangulate quad faces. Example of generating other possible values:
processing = (pyassimp.postprocess.aiProcess_Triangulate |
pyassimp.postprocess.aiProcess_OptimizeMeshes)
'''
exportStatus = _assimp_lib.export(ctypes.pointer(scene), file_type.encode("ascii"), filename.encode(sys.getfilesystemencoding()), processing)
if exportStatus != 0:
raise AssimpError('Could not export scene!')
def export_blob(scene,
file_type = None,
processing = postprocess.aiProcess_Triangulate):
'''
Export a scene and return a blob in the correct format. On failure throws AssimpError.
Arguments
---------
scene: scene to export.
file_type: string of file exporter to use. For example "collada".
processing: assimp postprocessing parameters. Verbose keywords are imported
from postprocessing, and the parameters can be combined bitwise to
generate the final processing value. Note that the default value will
triangulate quad faces. Example of generating other possible values:
processing = (pyassimp.postprocess.aiProcess_Triangulate |
pyassimp.postprocess.aiProcess_OptimizeMeshes)
Returns
---------
Pointer to structs.ExportDataBlob
'''
exportBlobPtr = _assimp_lib.export_blob(ctypes.pointer(scene), file_type.encode("ascii"), processing)
if exportBlobPtr == 0:
raise AssimpError('Could not export scene to blob!')
return exportBlobPtr
def _finalize_texture(tex, target):
setattr(target, "achformathint", tex.achFormatHint)
if numpy:
data = numpy.array([make_tuple(getattr(tex, "pcData")[i]) for i in range(tex.mWidth * tex.mHeight)])
else:
data = [make_tuple(getattr(tex, "pcData")[i]) for i in range(tex.mWidth * tex.mHeight)]
setattr(target, "data", data)
def _finalize_mesh(mesh, target):
""" Building of meshes is a bit specific.
We override here the various datasets that can
not be process as regular fields.
For instance, the length of the normals array is
mNumVertices (no mNumNormals is available)
"""
nb_vertices = getattr(mesh, "mNumVertices")
def fill(name):
mAttr = getattr(mesh, name)
if numpy:
if mAttr:
data = numpy.array([make_tuple(getattr(mesh, name)[i]) for i in range(nb_vertices)], dtype=numpy.float32)
setattr(target, name[1:].lower(), data)
else:
setattr(target, name[1:].lower(), numpy.array([], dtype="float32"))
else:
if mAttr:
data = [make_tuple(getattr(mesh, name)[i]) for i in range(nb_vertices)]
setattr(target, name[1:].lower(), data)
else:
setattr(target, name[1:].lower(), [])
def fillarray(name):
mAttr = getattr(mesh, name)
data = []
for index, mSubAttr in enumerate(mAttr):
if mSubAttr:
data.append([make_tuple(getattr(mesh, name)[index][i]) for i in range(nb_vertices)])
if numpy:
setattr(target, name[1:].lower(), numpy.array(data, dtype=numpy.float32))
else:
setattr(target, name[1:].lower(), data)
fill("mNormals")
fill("mTangents")
fill("mBitangents")
fillarray("mColors")
fillarray("mTextureCoords")
# prepare faces
if numpy:
faces = numpy.array([f.indices for f in target.faces], dtype=numpy.int32)
else:
faces = [f.indices for f in target.faces]
setattr(target, 'faces', faces)
def _init_metadata_entry(entry):
entry.type = entry.mType
if entry.type == structs.MetadataEntry.AI_BOOL:
entry.data = ctypes.cast(entry.mData, ctypes.POINTER(ctypes.c_bool)).contents.value
elif entry.type == structs.MetadataEntry.AI_INT32:
entry.data = ctypes.cast(entry.mData, ctypes.POINTER(ctypes.c_int32)).contents.value
elif entry.type == structs.MetadataEntry.AI_UINT64:
entry.data = ctypes.cast(entry.mData, ctypes.POINTER(ctypes.c_uint64)).contents.value
elif entry.type == structs.MetadataEntry.AI_FLOAT:
entry.data = ctypes.cast(entry.mData, ctypes.POINTER(ctypes.c_float)).contents.value
elif entry.type == structs.MetadataEntry.AI_DOUBLE:
entry.data = ctypes.cast(entry.mData, ctypes.POINTER(ctypes.c_double)).contents.value
elif entry.type == structs.MetadataEntry.AI_AISTRING:
assimp_string = ctypes.cast(entry.mData, ctypes.POINTER(structs.String)).contents
entry.data = _convert_assimp_string(assimp_string)
elif entry.type == structs.MetadataEntry.AI_AIVECTOR3D:
assimp_vector = ctypes.cast(entry.mData, ctypes.POINTER(structs.Vector3D)).contents
entry.data = make_tuple(assimp_vector)
return entry
def _finalize_metadata(metadata, target):
""" Building the metadata object is a bit specific.
Firstly, there are two separate arrays: one with metadata keys and one
with metadata values, and there are no corresponding mNum* attributes,
so the C arrays are not converted to Python arrays using the generic
code in the _init function.
Secondly, a metadata entry value has to be cast according to declared
metadata entry type.
"""
length = metadata.mNumProperties
setattr(target, 'keys', [str(_convert_assimp_string(metadata.mKeys[i])) for i in range(length)])
setattr(target, 'values', [_init_metadata_entry(metadata.mValues[i]) for i in range(length)])
class PropertyGetter(dict):
def __getitem__(self, key):
semantic = 0
if isinstance(key, tuple):
key, semantic = key
return dict.__getitem__(self, (key, semantic))
def keys(self):
for k in dict.keys(self):
yield k[0]
def __iter__(self):
return self.keys()
def items(self):
for k, v in dict.items(self):
yield k[0], v
def _get_properties(properties, length):
"""
Convenience Function to get the material properties as a dict
and values in a python format.
"""
result = {}
#read all properties
for p in [properties[i] for i in range(length)]:
#the name
p = p.contents
key = str(_convert_assimp_string(p.mKey))
key = (key.split('.')[1], p.mSemantic)
#the data
if p.mType == 1:
arr = ctypes.cast(p.mData,
ctypes.POINTER(ctypes.c_float * int(p.mDataLength/ctypes.sizeof(ctypes.c_float)))
).contents
value = [x for x in arr]
elif p.mType == 3: #string can't be an array
value = _convert_assimp_string(ctypes.cast(p.mData, ctypes.POINTER(structs.MaterialPropertyString)).contents)
elif p.mType == 4:
arr = ctypes.cast(p.mData,
ctypes.POINTER(ctypes.c_int * int(p.mDataLength/ctypes.sizeof(ctypes.c_int)))
).contents
value = [x for x in arr]
else:
value = p.mData[:p.mDataLength]
if len(value) == 1:
[value] = value
result[key] = value
return PropertyGetter(result)
def decompose_matrix(matrix):
if not isinstance(matrix, structs.Matrix4x4):
raise AssimpError("pyassimp.decompose_matrix failed: Not a Matrix4x4!")
scaling = structs.Vector3D()
rotation = structs.Quaternion()
position = structs.Vector3D()
_assimp_lib.dll.aiDecomposeMatrix(ctypes.pointer(matrix),
ctypes.byref(scaling),
ctypes.byref(rotation),
ctypes.byref(position))
return scaling._init(), rotation._init(), position._init()
| 20,821 | Python | 36.115864 | 221 | 0.58369 |
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/pyassimp/errors.py | #-*- coding: UTF-8 -*-
"""
All possible errors.
"""
class AssimpError(BaseException):
"""
If an internal error occurs.
"""
pass
| 146 | Python | 11.249999 | 33 | 0.568493 |
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/pyassimp/structs.py | #-*- coding: utf-8 -*-
from ctypes import POINTER, c_void_p, c_uint, c_char, c_float, Structure, c_double, c_ubyte, c_size_t, c_uint32
class Vector2D(Structure):
"""
See 'vector2.h' for details.
"""
_fields_ = [
("x", c_float),("y", c_float),
]
class Matrix3x3(Structure):
"""
See 'matrix3x3.h' for details.
"""
_fields_ = [
("a1", c_float),("a2", c_float),("a3", c_float),
("b1", c_float),("b2", c_float),("b3", c_float),
("c1", c_float),("c2", c_float),("c3", c_float),
]
class Texel(Structure):
"""
See 'texture.h' for details.
"""
_fields_ = [
("b", c_ubyte),("g", c_ubyte),("r", c_ubyte),("a", c_ubyte),
]
class Color4D(Structure):
"""
See 'color4.h' for details.
"""
_fields_ = [
# Red, green, blue and alpha color values
("r", c_float),("g", c_float),("b", c_float),("a", c_float),
]
class Plane(Structure):
"""
See 'types.h' for details.
"""
_fields_ = [
# Plane equation
("a", c_float),("b", c_float),("c", c_float),("d", c_float),
]
class Color3D(Structure):
"""
See 'types.h' for details.
"""
_fields_ = [
# Red, green and blue color values
("r", c_float),("g", c_float),("b", c_float),
]
class String(Structure):
"""
See 'types.h' for details.
"""
MAXLEN = 1024
_fields_ = [
# Binary length of the string excluding the terminal 0. This is NOT the
# logical length of strings containing UTF-8 multibyte sequences! It's
# the number of bytes from the beginning of the string to its end.
("length", c_uint32),
# String buffer. Size limit is MAXLEN
("data", c_char*MAXLEN),
]
class MaterialPropertyString(Structure):
"""
See 'MaterialSystem.cpp' for details.
The size of length is truncated to 4 bytes on 64-bit platforms when used as a
material property (see MaterialSystem.cpp aiMaterial::AddProperty() for details).
"""
MAXLEN = 1024
_fields_ = [
# Binary length of the string excluding the terminal 0. This is NOT the
# logical length of strings containing UTF-8 multibyte sequences! It's
# the number of bytes from the beginning of the string to its end.
("length", c_uint32),
# String buffer. Size limit is MAXLEN
("data", c_char*MAXLEN),
]
class MemoryInfo(Structure):
"""
See 'types.h' for details.
"""
_fields_ = [
# Storage allocated for texture data
("textures", c_uint),
# Storage allocated for material data
("materials", c_uint),
# Storage allocated for mesh data
("meshes", c_uint),
# Storage allocated for node data
("nodes", c_uint),
# Storage allocated for animation data
("animations", c_uint),
# Storage allocated for camera data
("cameras", c_uint),
# Storage allocated for light data
("lights", c_uint),
# Total storage allocated for the full import.
("total", c_uint),
]
class Quaternion(Structure):
"""
See 'quaternion.h' for details.
"""
_fields_ = [
# w,x,y,z components of the quaternion
("w", c_float),("x", c_float),("y", c_float),("z", c_float),
]
class Face(Structure):
"""
See 'mesh.h' for details.
"""
_fields_ = [
# Number of indices defining this face.
# The maximum value for this member is
#AI_MAX_FACE_INDICES.
("mNumIndices", c_uint),
# Pointer to the indices array. Size of the array is given in numIndices.
("mIndices", POINTER(c_uint)),
]
class VertexWeight(Structure):
"""
See 'mesh.h' for details.
"""
_fields_ = [
# Index of the vertex which is influenced by the bone.
("mVertexId", c_uint),
# The strength of the influence in the range (0...1).
# The influence from all bones at one vertex amounts to 1.
("mWeight", c_float),
]
class Matrix4x4(Structure):
"""
See 'matrix4x4.h' for details.
"""
_fields_ = [
("a1", c_float),("a2", c_float),("a3", c_float),("a4", c_float),
("b1", c_float),("b2", c_float),("b3", c_float),("b4", c_float),
("c1", c_float),("c2", c_float),("c3", c_float),("c4", c_float),
("d1", c_float),("d2", c_float),("d3", c_float),("d4", c_float),
]
class Vector3D(Structure):
"""
See 'vector3.h' for details.
"""
_fields_ = [
("x", c_float),("y", c_float),("z", c_float),
]
class MeshKey(Structure):
"""
See 'anim.h' for details.
"""
_fields_ = [
# The time of this key
("mTime", c_double),
# Index into the aiMesh::mAnimMeshes array of the
# mesh corresponding to the
#aiMeshAnim hosting this
# key frame. The referenced anim mesh is evaluated
# according to the rules defined in the docs for
#aiAnimMesh.
("mValue", c_uint),
]
class MetadataEntry(Structure):
"""
See 'metadata.h' for details
"""
AI_BOOL = 0
AI_INT32 = 1
AI_UINT64 = 2
AI_FLOAT = 3
AI_DOUBLE = 4
AI_AISTRING = 5
AI_AIVECTOR3D = 6
AI_META_MAX = 7
_fields_ = [
# The type field uniquely identifies the underlying type of the data field
("mType", c_uint),
("mData", c_void_p),
]
class Metadata(Structure):
"""
See 'metadata.h' for details
"""
_fields_ = [
# Length of the mKeys and mValues arrays, respectively
("mNumProperties", c_uint),
# Arrays of keys, may not be NULL. Entries in this array may not be NULL
# as well.
("mKeys", POINTER(String)),
# Arrays of values, may not be NULL. Entries in this array may be NULL
# if the corresponding property key has no assigned value.
("mValues", POINTER(MetadataEntry)),
]
class Node(Structure):
"""
See 'scene.h' for details.
"""
Node._fields_ = [
# The name of the node.
# The name might be empty (length of zero) but all nodes which
# need to be accessed afterwards by bones or anims are usually named.
# Multiple nodes may have the same name, but nodes which are accessed
# by bones (see
#aiBone and
#aiMesh::mBones) *must* be unique.
# Cameras and lights are assigned to a specific node name - if there
# are multiple nodes with this name, they're assigned to each of them.
# <br>
# There are no limitations regarding the characters contained in
# this text. You should be able to handle stuff like whitespace, tabs,
# linefeeds, quotation marks, ampersands, ... .
("mName", String),
# The transformation relative to the node's parent.
("mTransformation", Matrix4x4),
# Parent node. NULL if this node is the root node.
("mParent", POINTER(Node)),
# The number of child nodes of this node.
("mNumChildren", c_uint),
# The child nodes of this node. NULL if mNumChildren is 0.
("mChildren", POINTER(POINTER(Node))),
# The number of meshes of this node.
("mNumMeshes", c_uint),
# The meshes of this node. Each entry is an index into the mesh
("mMeshes", POINTER(c_uint)),
# Metadata associated with this node or NULL if there is no metadata.
# Whether any metadata is generated depends on the source file format.
("mMetadata", POINTER(Metadata)),
]
class Light(Structure):
"""
See 'light.h' for details.
"""
_fields_ = [
# The name of the light source.
# There must be a node in the scenegraph with the same name.
# This node specifies the position of the light in the scene
# hierarchy and can be animated.
("mName", String),
# The type of the light source.
# aiLightSource_UNDEFINED is not a valid value for this member.
("mType", c_uint),
# Position of the light source in space. Relative to the
# transformation of the node corresponding to the light.
# The position is undefined for directional lights.
("mPosition", Vector3D),
# Direction of the light source in space. Relative to the
# transformation of the node corresponding to the light.
# The direction is undefined for point lights. The vector
# may be normalized, but it needn't.
("mDirection", Vector3D),
# Up direction of the light source in space. Relative to the
# transformation of the node corresponding to the light.
#
# The direction is undefined for point lights. The vector
# may be normalized, but it needn't.
("mUp", Vector3D),
# Constant light attenuation factor.
# The intensity of the light source at a given distance 'd' from
# the light's position is
# @code
# Atten = 1/( att0 + att1
# d + att2
# d*d)
# @endcode
# This member corresponds to the att0 variable in the equation.
# Naturally undefined for directional lights.
("mAttenuationConstant", c_float),
# Linear light attenuation factor.
# The intensity of the light source at a given distance 'd' from
# the light's position is
# @code
# Atten = 1/( att0 + att1
# d + att2
# d*d)
# @endcode
# This member corresponds to the att1 variable in the equation.
# Naturally undefined for directional lights.
("mAttenuationLinear", c_float),
# Quadratic light attenuation factor.
# The intensity of the light source at a given distance 'd' from
# the light's position is
# @code
# Atten = 1/( att0 + att1
# d + att2
# d*d)
# @endcode
# This member corresponds to the att2 variable in the equation.
# Naturally undefined for directional lights.
("mAttenuationQuadratic", c_float),
# Diffuse color of the light source
# The diffuse light color is multiplied with the diffuse
# material color to obtain the final color that contributes
# to the diffuse shading term.
("mColorDiffuse", Color3D),
# Specular color of the light source
# The specular light color is multiplied with the specular
# material color to obtain the final color that contributes
# to the specular shading term.
("mColorSpecular", Color3D),
# Ambient color of the light source
# The ambient light color is multiplied with the ambient
# material color to obtain the final color that contributes
# to the ambient shading term. Most renderers will ignore
# this value it, is just a remaining of the fixed-function pipeline
# that is still supported by quite many file formats.
("mColorAmbient", Color3D),
# Inner angle of a spot light's light cone.
# The spot light has maximum influence on objects inside this
# angle. The angle is given in radians. It is 2PI for point
# lights and undefined for directional lights.
("mAngleInnerCone", c_float),
# Outer angle of a spot light's light cone.
# The spot light does not affect objects outside this angle.
# The angle is given in radians. It is 2PI for point lights and
# undefined for directional lights. The outer angle must be
# greater than or equal to the inner angle.
# It is assumed that the application uses a smooth
# interpolation between the inner and the outer cone of the
# spot light.
("mAngleOuterCone", c_float),
# Size of area light source.
("mSize", Vector2D),
]
class Texture(Structure):
"""
See 'texture.h' for details.
"""
_fields_ = [
# Width of the texture, in pixels
# If mHeight is zero the texture is compressed in a format
# like JPEG. In this case mWidth specifies the size of the
# memory area pcData is pointing to, in bytes.
("mWidth", c_uint),
# Height of the texture, in pixels
# If this value is zero, pcData points to an compressed texture
# in any format (e.g. JPEG).
("mHeight", c_uint),
# A hint from the loader to make it easier for applications
# to determine the type of embedded textures.
#
# If mHeight != 0 this member is show how data is packed. Hint will consist of
# two parts: channel order and channel bitness (count of the bits for every
# color channel). For simple parsing by the viewer it's better to not omit
# absent color channel and just use 0 for bitness. For example:
# 1. Image contain RGBA and 8 bit per channel, achFormatHint == "rgba8888";
# 2. Image contain ARGB and 8 bit per channel, achFormatHint == "argb8888";
# 3. Image contain RGB and 5 bit for R and B channels and 6 bit for G channel,
# achFormatHint == "rgba5650";
# 4. One color image with B channel and 1 bit for it, achFormatHint == "rgba0010";
# If mHeight == 0 then achFormatHint is set set to '\\0\\0\\0\\0' if the loader has no additional
# information about the texture file format used OR the
# file extension of the format without a trailing dot. If there
# are multiple file extensions for a format, the shortest
# extension is chosen (JPEG maps to 'jpg', not to 'jpeg').
# E.g. 'dds\\0', 'pcx\\0', 'jpg\\0'. All characters are lower-case.
# The fourth character will always be '\\0'.
("achFormatHint", c_char*9),
# Data of the texture.
# Points to an array of mWidth
# mHeight aiTexel's.
# The format of the texture data is always ARGB8888 to
# make the implementation for user of the library as easy
# as possible. If mHeight = 0 this is a pointer to a memory
# buffer of size mWidth containing the compressed texture
# data. Good luck, have fun!
("pcData", POINTER(Texel)),
# Texture original filename
# Used to get the texture reference
("mFilename", String),
]
class Ray(Structure):
"""
See 'types.h' for details.
"""
_fields_ = [
# Position and direction of the ray
("pos", Vector3D),("dir", Vector3D),
]
class UVTransform(Structure):
"""
See 'material.h' for details.
"""
_fields_ = [
# Translation on the u and v axes.
# The default value is (0|0).
("mTranslation", Vector2D),
# Scaling on the u and v axes.
# The default value is (1|1).
("mScaling", Vector2D),
# Rotation - in counter-clockwise direction.
# The rotation angle is specified in radians. The
# rotation center is 0.5f|0.5f. The default value
# 0.f.
("mRotation", c_float),
]
class MaterialProperty(Structure):
"""
See 'material.h' for details.
"""
_fields_ = [
# Specifies the name of the property (key)
# Keys are generally case insensitive.
("mKey", String),
# Textures: Specifies their exact usage semantic.
# For non-texture properties, this member is always 0
# (or, better-said,
#aiTextureType_NONE).
("mSemantic", c_uint),
# Textures: Specifies the index of the texture.
# For non-texture properties, this member is always 0.
("mIndex", c_uint),
# Size of the buffer mData is pointing to, in bytes.
# This value may not be 0.
("mDataLength", c_uint),
# Type information for the property.
# Defines the data layout inside the data buffer. This is used
# by the library internally to perform debug checks and to
# utilize proper type conversions.
# (It's probably a hacky solution, but it works.)
("mType", c_uint),
# Binary buffer to hold the property's value.
# The size of the buffer is always mDataLength.
("mData", POINTER(c_char)),
]
class Material(Structure):
"""
See 'material.h' for details.
"""
_fields_ = [
# List of all material properties loaded.
("mProperties", POINTER(POINTER(MaterialProperty))),
# Number of properties in the data base
("mNumProperties", c_uint),
# Storage allocated
("mNumAllocated", c_uint),
]
class Bone(Structure):
"""
See 'mesh.h' for details.
"""
_fields_ = [
# The name of the bone.
("mName", String),
# The number of vertices affected by this bone
# The maximum value for this member is
#AI_MAX_BONE_WEIGHTS.
("mNumWeights", c_uint),
# The vertices affected by this bone
("mWeights", POINTER(VertexWeight)),
# Matrix that transforms from mesh space to bone space in bind pose
("mOffsetMatrix", Matrix4x4),
]
class AnimMesh(Structure):
"""
See 'mesh.h' for details.
"""
AI_MAX_NUMBER_OF_TEXTURECOORDS = 0x8
AI_MAX_NUMBER_OF_COLOR_SETS = 0x8
_fields_ = [
# Anim Mesh name
("mName", String),
# Replacement for aiMesh::mVertices. If this array is non-NULL,
# it *must* contain mNumVertices entries. The corresponding
# array in the host mesh must be non-NULL as well - animation
# meshes may neither add or nor remove vertex components (if
# a replacement array is NULL and the corresponding source
# array is not, the source data is taken instead)
("mVertices", POINTER(Vector3D)),
# Replacement for aiMesh::mNormals.
("mNormals", POINTER(Vector3D)),
# Replacement for aiMesh::mTangents.
("mTangents", POINTER(Vector3D)),
# Replacement for aiMesh::mBitangents.
("mBitangents", POINTER(Vector3D)),
# Replacement for aiMesh::mColors
("mColors", POINTER(Color4D) * AI_MAX_NUMBER_OF_COLOR_SETS),
# Replacement for aiMesh::mTextureCoords
("mTextureCoords", POINTER(Vector3D) * AI_MAX_NUMBER_OF_TEXTURECOORDS),
# The number of vertices in the aiAnimMesh, and thus the length of all
# the member arrays.
#
# This has always the same value as the mNumVertices property in the
# corresponding aiMesh. It is duplicated here merely to make the length
# of the member arrays accessible even if the aiMesh is not known, e.g.
# from language bindings.
("mNumVertices", c_uint),
# Weight of the AnimMesh.
("mWeight", c_float),
]
class Mesh(Structure):
"""
See 'mesh.h' for details.
"""
AI_MAX_FACE_INDICES = 0x7fff
AI_MAX_BONE_WEIGHTS = 0x7fffffff
AI_MAX_VERTICES = 0x7fffffff
AI_MAX_FACES = 0x7fffffff
AI_MAX_NUMBER_OF_COLOR_SETS = 0x8
AI_MAX_NUMBER_OF_TEXTURECOORDS = 0x8
_fields_ = [ # Bitwise combination of the members of the
#aiPrimitiveType enum.
# This specifies which types of primitives are present in the mesh.
# The "SortByPrimitiveType"-Step can be used to make sure the
# output meshes consist of one primitive type each.
("mPrimitiveTypes", c_uint),
# The number of vertices in this mesh.
# This is also the size of all of the per-vertex data arrays.
# The maximum value for this member is
#AI_MAX_VERTICES.
("mNumVertices", c_uint),
# The number of primitives (triangles, polygons, lines) in this mesh.
# This is also the size of the mFaces array.
# The maximum value for this member is
#AI_MAX_FACES.
("mNumFaces", c_uint),
# Vertex positions.
# This array is always present in a mesh. The array is
# mNumVertices in size.
("mVertices", POINTER(Vector3D)),
# Vertex normals.
# The array contains normalized vectors, NULL if not present.
# The array is mNumVertices in size. Normals are undefined for
# point and line primitives. A mesh consisting of points and
# lines only may not have normal vectors. Meshes with mixed
# primitive types (i.e. lines and triangles) may have normals,
# but the normals for vertices that are only referenced by
# point or line primitives are undefined and set to QNaN (WARN:
# qNaN compares to inequal to *everything*, even to qNaN itself.
# Using code like this to check whether a field is qnan is:
# @code
#define IS_QNAN(f) (f != f)
# @endcode
# still dangerous because even 1.f == 1.f could evaluate to false! (
# remember the subtleties of IEEE754 artithmetics). Use stuff like
# @c fpclassify instead.
# @note Normal vectors computed by Assimp are always unit-length.
# However, this needn't apply for normals that have been taken
# directly from the model file.
("mNormals", POINTER(Vector3D)),
# Vertex tangents.
# The tangent of a vertex points in the direction of the positive
# X texture axis. The array contains normalized vectors, NULL if
# not present. The array is mNumVertices in size. A mesh consisting
# of points and lines only may not have normal vectors. Meshes with
# mixed primitive types (i.e. lines and triangles) may have
# normals, but the normals for vertices that are only referenced by
# point or line primitives are undefined and set to qNaN. See
# the
#mNormals member for a detailed discussion of qNaNs.
# @note If the mesh contains tangents, it automatically also
# contains bitangents (the bitangent is just the cross product of
# tangent and normal vectors).
("mTangents", POINTER(Vector3D)),
# Vertex bitangents.
# The bitangent of a vertex points in the direction of the positive
# Y texture axis. The array contains normalized vectors, NULL if not
# present. The array is mNumVertices in size.
# @note If the mesh contains tangents, it automatically also contains
# bitangents.
("mBitangents", POINTER(Vector3D)),
# Vertex color sets.
# A mesh may contain 0 to
#AI_MAX_NUMBER_OF_COLOR_SETS vertex
# colors per vertex. NULL if not present. Each array is
# mNumVertices in size if present.
("mColors", POINTER(Color4D)*AI_MAX_NUMBER_OF_COLOR_SETS),
# Vertex texture coords, also known as UV channels.
# A mesh may contain 0 to AI_MAX_NUMBER_OF_TEXTURECOORDS per
# vertex. NULL if not present. The array is mNumVertices in size.
("mTextureCoords", POINTER(Vector3D)*AI_MAX_NUMBER_OF_TEXTURECOORDS),
# Specifies the number of components for a given UV channel.
# Up to three channels are supported (UVW, for accessing volume
# or cube maps). If the value is 2 for a given channel n, the
# component p.z of mTextureCoords[n][p] is set to 0.0f.
# If the value is 1 for a given channel, p.y is set to 0.0f, too.
# @note 4D coords are not supported
("mNumUVComponents", c_uint*AI_MAX_NUMBER_OF_TEXTURECOORDS),
# The faces the mesh is constructed from.
# Each face refers to a number of vertices by their indices.
# This array is always present in a mesh, its size is given
# in mNumFaces. If the
#AI_SCENE_FLAGS_NON_VERBOSE_FORMAT
# is NOT set each face references an unique set of vertices.
("mFaces", POINTER(Face)),
# The number of bones this mesh contains.
# Can be 0, in which case the mBones array is NULL.
("mNumBones", c_uint),
# The bones of this mesh.
# A bone consists of a name by which it can be found in the
# frame hierarchy and a set of vertex weights.
("mBones", POINTER(POINTER(Bone))),
# The material used by this mesh.
# A mesh does use only a single material. If an imported model uses
# multiple materials, the import splits up the mesh. Use this value
# as index into the scene's material list.
("mMaterialIndex", c_uint),
# Name of the mesh. Meshes can be named, but this is not a
# requirement and leaving this field empty is totally fine.
# There are mainly three uses for mesh names:
# - some formats name nodes and meshes independently.
# - importers tend to split meshes up to meet the
# one-material-per-mesh requirement. Assigning
# the same (dummy) name to each of the result meshes
# aids the caller at recovering the original mesh
# partitioning.
# - Vertex animations refer to meshes by their names.
("mName", String),
# The number of attachment meshes.
# Currently known to work with loaders:
# - Collada
# - gltf
("mNumAnimMeshes", c_uint),
# Attachment meshes for this mesh, for vertex-based animation.
# Attachment meshes carry replacement data for some of the
# mesh'es vertex components (usually positions, normals).
# Currently known to work with loaders:
# - Collada
# - gltf
("mAnimMeshes", POINTER(POINTER(AnimMesh))),
# Method of morphing when animeshes are specified.
("mMethod", c_uint),
]
class Camera(Structure):
"""
See 'camera.h' for details.
"""
_fields_ = [
# The name of the camera.
# There must be a node in the scenegraph with the same name.
# This node specifies the position of the camera in the scene
# hierarchy and can be animated.
("mName", String),
# Position of the camera relative to the coordinate space
# defined by the corresponding node.
# The default value is 0|0|0.
("mPosition", Vector3D),
# 'Up' - vector of the camera coordinate system relative to
# the coordinate space defined by the corresponding node.
# The 'right' vector of the camera coordinate system is
# the cross product of the up and lookAt vectors.
# The default value is 0|1|0. The vector
# may be normalized, but it needn't.
("mUp", Vector3D),
# 'LookAt' - vector of the camera coordinate system relative to
# the coordinate space defined by the corresponding node.
# This is the viewing direction of the user.
# The default value is 0|0|1. The vector
# may be normalized, but it needn't.
("mLookAt", Vector3D),
# Half horizontal field of view angle, in radians.
# The field of view angle is the angle between the center
# line of the screen and the left or right border.
# The default value is 1/4PI.
("mHorizontalFOV", c_float),
# Distance of the near clipping plane from the camera.
# The value may not be 0.f (for arithmetic reasons to prevent
# a division through zero). The default value is 0.1f.
("mClipPlaneNear", c_float),
# Distance of the far clipping plane from the camera.
# The far clipping plane must, of course, be further away than the
# near clipping plane. The default value is 1000.f. The ratio
# between the near and the far plane should not be too
# large (between 1000-10000 should be ok) to avoid floating-point
# inaccuracies which could lead to z-fighting.
("mClipPlaneFar", c_float),
# Screen aspect ratio.
# This is the ration between the width and the height of the
# screen. Typical values are 4/3, 1/2 or 1/1. This value is
# 0 if the aspect ratio is not defined in the source file.
# 0 is also the default value.
("mAspect", c_float),
]
class VectorKey(Structure):
"""
See 'anim.h' for details.
"""
_fields_ = [
# The time of this key
("mTime", c_double),
# The value of this key
("mValue", Vector3D),
]
class QuatKey(Structure):
"""
See 'anim.h' for details.
"""
_fields_ = [
# The time of this key
("mTime", c_double),
# The value of this key
("mValue", Quaternion),
]
class MeshMorphKey(Structure):
"""
See 'anim.h' for details.
"""
_fields_ = [
# The time of this key
("mTime", c_double),
# The values and weights at the time of this key
("mValues", POINTER(c_uint)),
("mWeights", POINTER(c_double)),
# The number of values and weights
("mNumValuesAndWeights", c_uint),
]
class NodeAnim(Structure):
"""
See 'anim.h' for details.
"""
_fields_ = [
# The name of the node affected by this animation. The node
# must exist and it must be unique.
("mNodeName", String),
# The number of position keys
("mNumPositionKeys", c_uint),
# The position keys of this animation channel. Positions are
# specified as 3D vector. The array is mNumPositionKeys in size.
# If there are position keys, there will also be at least one
# scaling and one rotation key.
("mPositionKeys", POINTER(VectorKey)),
# The number of rotation keys
("mNumRotationKeys", c_uint),
# The rotation keys of this animation channel. Rotations are
# given as quaternions, which are 4D vectors. The array is
# mNumRotationKeys in size.
# If there are rotation keys, there will also be at least one
# scaling and one position key.
("mRotationKeys", POINTER(QuatKey)),
# The number of scaling keys
("mNumScalingKeys", c_uint),
# The scaling keys of this animation channel. Scalings are
# specified as 3D vector. The array is mNumScalingKeys in size.
# If there are scaling keys, there will also be at least one
# position and one rotation key.
("mScalingKeys", POINTER(VectorKey)),
# Defines how the animation behaves before the first
# key is encountered.
# The default value is aiAnimBehaviour_DEFAULT (the original
# transformation matrix of the affected node is used).
("mPreState", c_uint),
# Defines how the animation behaves after the last
# key was processed.
# The default value is aiAnimBehaviour_DEFAULT (the original
# transformation matrix of the affected node is taken).
("mPostState", c_uint),
]
class MeshAnim(Structure):
"""
See 'anim.h' for details.
"""
_fields_ = [
# Name of the mesh to be animated. An empty string is not allowed,
# animated meshes need to be named (not necessarily uniquely,
# the name can basically serve as wild-card to select a group
# of meshes with similar animation setup)
("mName", String),
# Size of the #mKeys array. Must be 1, at least.
("mNumKeys", c_uint),
# Key frames of the animation. May not be NULL.
("mKeys", POINTER(MeshKey)),
]
class MeshMorphAnim(Structure):
"""
See 'anim.h' for details.
"""
_fields_ = [
# Name of the mesh to be animated. An empty string is not allowed,
# animated meshes need to be named (not necessarily uniquely,
# the name can basically serve as wildcard to select a group
# of meshes with similar animation setup)
("mName", String),
# Size of the #mKeys array. Must be 1, at least.
("mNumKeys", c_uint),
# Key frames of the animation. May not be NULL.
("mKeys", POINTER(MeshMorphKey)),
]
class Animation(Structure):
"""
See 'anim.h' for details.
"""
_fields_ = [
# The name of the animation. If the modeling package this data was
# exported from does support only a single animation channel, this
# name is usually empty (length is zero).
("mName", String),
# Duration of the animation in ticks.
("mDuration", c_double),
# Ticks per second. 0 if not specified in the imported file
("mTicksPerSecond", c_double),
# The number of bone animation channels. Each channel affects
# a single node.
("mNumChannels", c_uint),
# The node animation channels. Each channel affects a single node.
# The array is mNumChannels in size.
("mChannels", POINTER(POINTER(NodeAnim))),
# The number of mesh animation channels. Each channel affects
# a single mesh and defines vertex-based animation.
("mNumMeshChannels", c_uint),
# The mesh animation channels. Each channel affects a single mesh.
# The array is mNumMeshChannels in size.
("mMeshChannels", POINTER(POINTER(MeshAnim))),
# The number of mesh animation channels. Each channel affects
# a single mesh and defines morphing animation.
("mNumMorphMeshChannels", c_uint),
# The morph mesh animation channels. Each channel affects a single mesh.
# The array is mNumMorphMeshChannels in size.
("mMorphMeshChannels", POINTER(POINTER(MeshMorphAnim))),
]
class ExportDataBlob(Structure):
"""
See 'cexport.h' for details.
Note that the '_fields_' definition is outside the class to allow the 'next' field to be recursive
"""
pass
ExportDataBlob._fields_ = [
# Size of the data in bytes
("size", c_size_t),
# The data.
("data", c_void_p),
# Name of the blob. An empty string always
# indicates the first (and primary) blob,
# which contains the actual file data.
# Any other blobs are auxiliary files produced
# by exporters (i.e. material files). Existence
# of such files depends on the file format. Most
# formats don't split assets across multiple files.
#
# If used, blob names usually contain the file
# extension that should be used when writing
# the data to disc.
("name", String),
# Pointer to the next blob in the chain or NULL if there is none.
("next", POINTER(ExportDataBlob)),
]
class Scene(Structure):
"""
See 'aiScene.h' for details.
"""
AI_SCENE_FLAGS_INCOMPLETE = 0x1
AI_SCENE_FLAGS_VALIDATED = 0x2
AI_SCENE_FLAGS_VALIDATION_WARNING = 0x4
AI_SCENE_FLAGS_NON_VERBOSE_FORMAT = 0x8
AI_SCENE_FLAGS_TERRAIN = 0x10
AI_SCENE_FLAGS_ALLOW_SHARED = 0x20
_fields_ = [
# Any combination of the AI_SCENE_FLAGS_XXX flags. By default
# this value is 0, no flags are set. Most applications will
# want to reject all scenes with the AI_SCENE_FLAGS_INCOMPLETE
# bit set.
("mFlags", c_uint),
# The root node of the hierarchy.
# There will always be at least the root node if the import
# was successful (and no special flags have been set).
# Presence of further nodes depends on the format and content
# of the imported file.
("mRootNode", POINTER(Node)),
# The number of meshes in the scene.
("mNumMeshes", c_uint),
# The array of meshes.
# Use the indices given in the aiNode structure to access
# this array. The array is mNumMeshes in size. If the
# AI_SCENE_FLAGS_INCOMPLETE flag is not set there will always
# be at least ONE material.
("mMeshes", POINTER(POINTER(Mesh))),
# The number of materials in the scene.
("mNumMaterials", c_uint),
# The array of materials.
# Use the index given in each aiMesh structure to access this
# array. The array is mNumMaterials in size. If the
# AI_SCENE_FLAGS_INCOMPLETE flag is not set there will always
# be at least ONE material.
("mMaterials", POINTER(POINTER(Material))),
# The number of animations in the scene.
("mNumAnimations", c_uint),
# The array of animations.
# All animations imported from the given file are listed here.
# The array is mNumAnimations in size.
("mAnimations", POINTER(POINTER(Animation))),
# The number of textures embedded into the file
("mNumTextures", c_uint),
# The array of embedded textures.
# Not many file formats embed their textures into the file.
# An example is Quake's MDL format (which is also used by
# some GameStudio versions)
("mTextures", POINTER(POINTER(Texture))),
# The number of light sources in the scene. Light sources
# are fully optional, in most cases this attribute will be 0
("mNumLights", c_uint),
# The array of light sources.
# All light sources imported from the given file are
# listed here. The array is mNumLights in size.
("mLights", POINTER(POINTER(Light))),
# The number of cameras in the scene. Cameras
# are fully optional, in most cases this attribute will be 0
("mNumCameras", c_uint),
# The array of cameras.
# All cameras imported from the given file are listed here.
# The array is mNumCameras in size. The first camera in the
# array (if existing) is the default camera view into
# the scene.
("mCameras", POINTER(POINTER(Camera))),
# This data contains global metadata which belongs to the scene like
# unit-conversions, versions, vendors or other model-specific data. This
# can be used to store format-specific metadata as well.
("mMetadata", POINTER(Metadata)),
# Internal data, do not touch
("mPrivate", POINTER(c_char)),
]
assimp_structs_as_tuple = (Matrix4x4,
Matrix3x3,
Vector2D,
Vector3D,
Color3D,
Color4D,
Quaternion,
Plane,
Texel)
| 41,444 | Python | 35.3234 | 111 | 0.56136 |
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/pyassimp/postprocess.py | # <hr>Calculates the tangents and bitangents for the imported meshes.
#
# Does nothing if a mesh does not have normals. You might want this post
# processing step to be executed if you plan to use tangent space calculations
# such as normal mapping applied to the meshes. There's a config setting,
# <tt>#AI_CONFIG_PP_CT_MAX_SMOOTHING_ANGLE<tt>, which allows you to specify
# a maximum smoothing angle for the algorithm. However, usually you'll
# want to leave it at the default value.
#
aiProcess_CalcTangentSpace = 0x1
## <hr>Identifies and joins identical vertex data sets within all
# imported meshes.
#
# After this step is run, each mesh contains unique vertices,
# so a vertex may be used by multiple faces. You usually want
# to use this post processing step. If your application deals with
# indexed geometry, this step is compulsory or you'll just waste rendering
# time. <b>If this flag is not specified<b>, no vertices are referenced by
# more than one face and <b>no index buffer is required<b> for rendering.
#
aiProcess_JoinIdenticalVertices = 0x2
## <hr>Converts all the imported data to a left-handed coordinate space.
#
# By default the data is returned in a right-handed coordinate space (which
# OpenGL prefers). In this space, +X points to the right,
# +Z points towards the viewer, and +Y points upwards. In the DirectX
# coordinate space +X points to the right, +Y points upwards, and +Z points
# away from the viewer.
#
# You'll probably want to consider this flag if you use Direct3D for
# rendering. The #aiProcess_ConvertToLeftHanded flag supersedes this
# setting and bundles all conversions typically required for D3D-based
# applications.
#
aiProcess_MakeLeftHanded = 0x4
## <hr>Triangulates all faces of all meshes.
#
# By default the imported mesh data might contain faces with more than 3
# indices. For rendering you'll usually want all faces to be triangles.
# This post processing step splits up faces with more than 3 indices into
# triangles. Line and point primitives are #not# modified! If you want
# 'triangles only' with no other kinds of primitives, try the following
# solution:
# <ul>
# <li>Specify both #aiProcess_Triangulate and #aiProcess_SortByPType <li>
# <li>Ignore all point and line meshes when you process assimp's output<li>
# <ul>
#
aiProcess_Triangulate = 0x8
## <hr>Removes some parts of the data structure (animations, materials,
# light sources, cameras, textures, vertex components).
#
# The components to be removed are specified in a separate
# configuration option, <tt>#AI_CONFIG_PP_RVC_FLAGS<tt>. This is quite useful
# if you don't need all parts of the output structure. Vertex colors
# are rarely used today for example... Calling this step to remove unneeded
# data from the pipeline as early as possible results in increased
# performance and a more optimized output data structure.
# This step is also useful if you want to force Assimp to recompute
# normals or tangents. The corresponding steps don't recompute them if
# they're already there (loaded from the source asset). By using this
# step you can make sure they are NOT there.
#
# This flag is a poor one, mainly because its purpose is usually
# misunderstood. Consider the following case: a 3D model has been exported
# from a CAD app, and it has per-face vertex colors. Vertex positions can't be
# shared, thus the #aiProcess_JoinIdenticalVertices step fails to
# optimize the data because of these nasty little vertex colors.
# Most apps don't even process them, so it's all for nothing. By using
# this step, unneeded components are excluded as early as possible
# thus opening more room for internal optimizations.
#
aiProcess_RemoveComponent = 0x10
## <hr>Generates normals for all faces of all meshes.
#
# This is ignored if normals are already there at the time this flag
# is evaluated. Model importers try to load them from the source file, so
# they're usually already there. Face normals are shared between all points
# of a single face, so a single point can have multiple normals, which
# forces the library to duplicate vertices in some cases.
# #aiProcess_JoinIdenticalVertices is #senseless# then.
#
# This flag may not be specified together with #aiProcess_GenSmoothNormals.
#
aiProcess_GenNormals = 0x20
## <hr>Generates smooth normals for all vertices in the mesh.
#
# This is ignored if normals are already there at the time this flag
# is evaluated. Model importers try to load them from the source file, so
# they're usually already there.
#
# This flag may not be specified together with
# #aiProcess_GenNormals. There's a configuration option,
# <tt>#AI_CONFIG_PP_GSN_MAX_SMOOTHING_ANGLE<tt> which allows you to specify
# an angle maximum for the normal smoothing algorithm. Normals exceeding
# this limit are not smoothed, resulting in a 'hard' seam between two faces.
# Using a decent angle here (e.g. 80 degrees) results in very good visual
# appearance.
#
aiProcess_GenSmoothNormals = 0x40
## <hr>Splits large meshes into smaller sub-meshes.
#
# This is quite useful for real-time rendering, where the number of triangles
# which can be maximally processed in a single draw-call is limited
# by the video driverhardware. The maximum vertex buffer is usually limited
# too. Both requirements can be met with this step: you may specify both a
# triangle and vertex limit for a single mesh.
#
# The split limits can (and should!) be set through the
# <tt>#AI_CONFIG_PP_SLM_VERTEX_LIMIT<tt> and <tt>#AI_CONFIG_PP_SLM_TRIANGLE_LIMIT<tt>
# settings. The default values are <tt>#AI_SLM_DEFAULT_MAX_VERTICES<tt> and
# <tt>#AI_SLM_DEFAULT_MAX_TRIANGLES<tt>.
#
# Note that splitting is generally a time-consuming task, but only if there's
# something to split. The use of this step is recommended for most users.
#
aiProcess_SplitLargeMeshes = 0x80
## <hr>Removes the node graph and pre-transforms all vertices with
# the local transformation matrices of their nodes.
#
# The output scene still contains nodes, however there is only a
# root node with children, each one referencing only one mesh,
# and each mesh referencing one material. For rendering, you can
# simply render all meshes in order - you don't need to pay
# attention to local transformations and the node hierarchy.
# Animations are removed during this step.
# This step is intended for applications without a scenegraph.
# The step CAN cause some problems: if e.g. a mesh of the asset
# contains normals and another, using the same material index, does not,
# they will be brought together, but the first meshes's part of
# the normal list is zeroed. However, these artifacts are rare.
# @note The <tt>#AI_CONFIG_PP_PTV_NORMALIZE<tt> configuration property
# can be set to normalize the scene's spatial dimension to the -1...1
# range.
#
aiProcess_PreTransformVertices = 0x100
## <hr>Limits the number of bones simultaneously affecting a single vertex
# to a maximum value.
#
# If any vertex is affected by more than the maximum number of bones, the least
# important vertex weights are removed and the remaining vertex weights are
# renormalized so that the weights still sum up to 1.
# The default bone weight limit is 4 (defined as <tt>#AI_LMW_MAX_WEIGHTS<tt> in
# config.h), but you can use the <tt>#AI_CONFIG_PP_LBW_MAX_WEIGHTS<tt> setting to
# supply your own limit to the post processing step.
#
# If you intend to perform the skinning in hardware, this post processing
# step might be of interest to you.
#
aiProcess_LimitBoneWeights = 0x200
## <hr>Validates the imported scene data structure.
# This makes sure that all indices are valid, all animations and
# bones are linked correctly, all material references are correct .. etc.
#
# It is recommended that you capture Assimp's log output if you use this flag,
# so you can easily find out what's wrong if a file fails the
# validation. The validator is quite strict and will find #all#
# inconsistencies in the data structure... It is recommended that plugin
# developers use it to debug their loaders. There are two types of
# validation failures:
# <ul>
# <li>Error: There's something wrong with the imported data. Further
# postprocessing is not possible and the data is not usable at all.
# The import fails. #Importer::GetErrorString() or #aiGetErrorString()
# carry the error message around.<li>
# <li>Warning: There are some minor issues (e.g. 1000000 animation
# keyframes with the same time), but further postprocessing and use
# of the data structure is still safe. Warning details are written
# to the log file, <tt>#AI_SCENE_FLAGS_VALIDATION_WARNING<tt> is set
# in #aiScene::mFlags<li>
# <ul>
#
# This post-processing step is not time-consuming. Its use is not
# compulsory, but recommended.
#
aiProcess_ValidateDataStructure = 0x400
## <hr>Reorders triangles for better vertex cache locality.
#
# The step tries to improve the ACMR (average post-transform vertex cache
# miss ratio) for all meshes. The implementation runs in O(n) and is
# roughly based on the 'tipsify' algorithm (see <a href="
# http:www.cs.princeton.edugfxpubsSander_2007_%3ETRtipsy.pdf">this
# paper<a>).
#
# If you intend to render huge models in hardware, this step might
# be of interest to you. The <tt>#AI_CONFIG_PP_ICL_PTCACHE_SIZE<tt>config
# setting can be used to fine-tune the cache optimization.
#
aiProcess_ImproveCacheLocality = 0x800
## <hr>Searches for redundantunreferenced materials and removes them.
#
# This is especially useful in combination with the
# #aiProcess_PretransformVertices and #aiProcess_OptimizeMeshes flags.
# Both join small meshes with equal characteristics, but they can't do
# their work if two meshes have different materials. Because several
# material settings are lost during Assimp's import filters,
# (and because many exporters don't check for redundant materials), huge
# models often have materials which are are defined several times with
# exactly the same settings.
#
# Several material settings not contributing to the final appearance of
# a surface are ignored in all comparisons (e.g. the material name).
# So, if you're passing additional information through the
# content pipeline (probably using #magic# material names), don't
# specify this flag. Alternatively take a look at the
# <tt>#AI_CONFIG_PP_RRM_EXCLUDE_LIST<tt> setting.
#
aiProcess_RemoveRedundantMaterials = 0x1000
## <hr>This step tries to determine which meshes have normal vectors
# that are facing inwards and inverts them.
#
# The algorithm is simple but effective:
# the bounding box of all vertices + their normals is compared against
# the volume of the bounding box of all vertices without their normals.
# This works well for most objects, problems might occur with planar
# surfaces. However, the step tries to filter such cases.
# The step inverts all in-facing normals. Generally it is recommended
# to enable this step, although the result is not always correct.
#
aiProcess_FixInfacingNormals = 0x2000
## <hr>This step splits meshes with more than one primitive type in
# homogeneous sub-meshes.
#
# The step is executed after the triangulation step. After the step
# returns, just one bit is set in aiMesh::mPrimitiveTypes. This is
# especially useful for real-time rendering where point and line
# primitives are often ignored or rendered separately.
# You can use the <tt>#AI_CONFIG_PP_SBP_REMOVE<tt> option to specify which
# primitive types you need. This can be used to easily exclude
# lines and points, which are rarely used, from the import.
#
aiProcess_SortByPType = 0x8000
## <hr>This step searches all meshes for degenerate primitives and
# converts them to proper lines or points.
#
# A face is 'degenerate' if one or more of its points are identical.
# To have the degenerate stuff not only detected and collapsed but
# removed, try one of the following procedures:
# <br><b>1.<b> (if you support lines and points for rendering but don't
# want the degenerates)<br>
# <ul>
# <li>Specify the #aiProcess_FindDegenerates flag.
# <li>
# <li>Set the <tt>AI_CONFIG_PP_FD_REMOVE<tt> option to 1. This will
# cause the step to remove degenerate triangles from the import
# as soon as they're detected. They won't pass any further
# pipeline steps.
# <li>
# <ul>
# <br><b>2.<b>(if you don't support lines and points at all)<br>
# <ul>
# <li>Specify the #aiProcess_FindDegenerates flag.
# <li>
# <li>Specify the #aiProcess_SortByPType flag. This moves line and
# point primitives to separate meshes.
# <li>
# <li>Set the <tt>AI_CONFIG_PP_SBP_REMOVE<tt> option to
# @code aiPrimitiveType_POINTS | aiPrimitiveType_LINES
# @endcode to cause SortByPType to reject point
# and line meshes from the scene.
# <li>
# <ul>
# @note Degenerate polygons are not necessarily evil and that's why
# they're not removed by default. There are several file formats which
# don't support lines or points, and some exporters bypass the
# format specification and write them as degenerate triangles instead.
#
aiProcess_FindDegenerates = 0x10000
## <hr>This step searches all meshes for invalid data, such as zeroed
# normal vectors or invalid UV coords and removesfixes them. This is
# intended to get rid of some common exporter errors.
#
# This is especially useful for normals. If they are invalid, and
# the step recognizes this, they will be removed and can later
# be recomputed, i.e. by the #aiProcess_GenSmoothNormals flag.<br>
# The step will also remove meshes that are infinitely small and reduce
# animation tracks consisting of hundreds if redundant keys to a single
# key. The <tt>AI_CONFIG_PP_FID_ANIM_ACCURACY<tt> config property decides
# the accuracy of the check for duplicate animation tracks.
#
aiProcess_FindInvalidData = 0x20000
## <hr>This step converts non-UV mappings (such as spherical or
# cylindrical mapping) to proper texture coordinate channels.
#
# Most applications will support UV mapping only, so you will
# probably want to specify this step in every case. Note that Assimp is not
# always able to match the original mapping implementation of the
# 3D app which produced a model perfectly. It's always better to let the
# modelling app compute the UV channels - 3ds max, Maya, Blender,
# LightWave, and Modo do this for example.
#
# @note If this step is not requested, you'll need to process the
# <tt>#AI_MATKEY_MAPPING<tt> material property in order to display all assets
# properly.
#
aiProcess_GenUVCoords = 0x40000
## <hr>This step applies per-texture UV transformations and bakes
# them into stand-alone vtexture coordinate channels.
#
# UV transformations are specified per-texture - see the
# <tt>#AI_MATKEY_UVTRANSFORM<tt> material key for more information.
# This step processes all textures with
# transformed input UV coordinates and generates a new (pre-transformed) UV channel
# which replaces the old channel. Most applications won't support UV
# transformations, so you will probably want to specify this step.
#
# @note UV transformations are usually implemented in real-time apps by
# transforming texture coordinates at vertex shader stage with a 3x3
# (homogenous) transformation matrix.
#
aiProcess_TransformUVCoords = 0x80000
## <hr>This step searches for duplicate meshes and replaces them
# with references to the first mesh.
#
# This step takes a while, so don't use it if speed is a concern.
# Its main purpose is to workaround the fact that many export
# file formats don't support instanced meshes, so exporters need to
# duplicate meshes. This step removes the duplicates again. Please
# note that Assimp does not currently support per-node material
# assignment to meshes, which means that identical meshes with
# different materials are currently #not# joined, although this is
# planned for future versions.
#
aiProcess_FindInstances = 0x100000
## <hr>A postprocessing step to reduce the number of meshes.
#
# This will, in fact, reduce the number of draw calls.
#
# This is a very effective optimization and is recommended to be used
# together with #aiProcess_OptimizeGraph, if possible. The flag is fully
# compatible with both #aiProcess_SplitLargeMeshes and #aiProcess_SortByPType.
#
aiProcess_OptimizeMeshes = 0x200000
## <hr>A postprocessing step to optimize the scene hierarchy.
#
# Nodes without animations, bones, lights or cameras assigned are
# collapsed and joined.
#
# Node names can be lost during this step. If you use special 'tag nodes'
# to pass additional information through your content pipeline, use the
# <tt>#AI_CONFIG_PP_OG_EXCLUDE_LIST<tt> setting to specify a list of node
# names you want to be kept. Nodes matching one of the names in this list won't
# be touched or modified.
#
# Use this flag with caution. Most simple files will be collapsed to a
# single node, so complex hierarchies are usually completely lost. This is not
# useful for editor environments, but probably a very effective
# optimization if you just want to get the model data, convert it to your
# own format, and render it as fast as possible.
#
# This flag is designed to be used with #aiProcess_OptimizeMeshes for best
# results.
#
# @note 'Crappy' scenes with thousands of extremely small meshes packed
# in deeply nested nodes exist for almost all file formats.
# #aiProcess_OptimizeMeshes in combination with #aiProcess_OptimizeGraph
# usually fixes them all and makes them renderable.
#
aiProcess_OptimizeGraph = 0x400000
## <hr>This step flips all UV coordinates along the y-axis and adjusts
# material settings and bitangents accordingly.
#
# <b>Output UV coordinate system:<b>
# @code
# 0y|0y ---------- 1x|0y
# | |
# | |
# | |
# 0x|1y ---------- 1x|1y
# @endcode
#
# You'll probably want to consider this flag if you use Direct3D for
# rendering. The #aiProcess_ConvertToLeftHanded flag supersedes this
# setting and bundles all conversions typically required for D3D-based
# applications.
#
aiProcess_FlipUVs = 0x800000
## <hr>This step adjusts the output face winding order to be CW.
#
# The default face winding order is counter clockwise (CCW).
#
# <b>Output face order:<b>
# @code
# x2
#
# x0
# x1
# @endcode
#
aiProcess_FlipWindingOrder = 0x1000000
## <hr>This step splits meshes with many bones into sub-meshes so that each
# su-bmesh has fewer or as many bones as a given limit.
#
aiProcess_SplitByBoneCount = 0x2000000
## <hr>This step removes bones losslessly or according to some threshold.
#
# In some cases (i.e. formats that require it) exporters are forced to
# assign dummy bone weights to otherwise static meshes assigned to
# animated meshes. Full, weight-based skinning is expensive while
# animating nodes is extremely cheap, so this step is offered to clean up
# the data in that regard.
#
# Use <tt>#AI_CONFIG_PP_DB_THRESHOLD<tt> to control this.
# Use <tt>#AI_CONFIG_PP_DB_ALL_OR_NONE<tt> if you want bones removed if and
# only if all bones within the scene qualify for removal.
#
aiProcess_Debone = 0x4000000
aiProcess_GenEntityMeshes = 0x100000
aiProcess_OptimizeAnimations = 0x200000
aiProcess_FixTexturePaths = 0x200000
aiProcess_EmbedTextures = 0x10000000,
## @def aiProcess_ConvertToLeftHanded
# @brief Shortcut flag for Direct3D-based applications.
#
# Supersedes the #aiProcess_MakeLeftHanded and #aiProcess_FlipUVs and
# #aiProcess_FlipWindingOrder flags.
# The output data matches Direct3D's conventions: left-handed geometry, upper-left
# origin for UV coordinates and finally clockwise face order, suitable for CCW culling.
#
# @deprecated
#
aiProcess_ConvertToLeftHanded = ( \
aiProcess_MakeLeftHanded | \
aiProcess_FlipUVs | \
aiProcess_FlipWindingOrder | \
0 )
## @def aiProcessPreset_TargetRealtimeUse_Fast
# @brief Default postprocess configuration optimizing the data for real-time rendering.
#
# Applications would want to use this preset to load models on end-user PCs,
# maybe for direct use in game.
#
# If you're using DirectX, don't forget to combine this value with
# the #aiProcess_ConvertToLeftHanded step. If you don't support UV transformations
# in your application apply the #aiProcess_TransformUVCoords step, too.
# @note Please take the time to read the docs for the steps enabled by this preset.
# Some of them offer further configurable properties, while some of them might not be of
# use for you so it might be better to not specify them.
#
aiProcessPreset_TargetRealtime_Fast = ( \
aiProcess_CalcTangentSpace | \
aiProcess_GenNormals | \
aiProcess_JoinIdenticalVertices | \
aiProcess_Triangulate | \
aiProcess_GenUVCoords | \
aiProcess_SortByPType | \
0 )
## @def aiProcessPreset_TargetRealtime_Quality
# @brief Default postprocess configuration optimizing the data for real-time rendering.
#
# Unlike #aiProcessPreset_TargetRealtime_Fast, this configuration
# performs some extra optimizations to improve rendering speed and
# to minimize memory usage. It could be a good choice for a level editor
# environment where import speed is not so important.
#
# If you're using DirectX, don't forget to combine this value with
# the #aiProcess_ConvertToLeftHanded step. If you don't support UV transformations
# in your application apply the #aiProcess_TransformUVCoords step, too.
# @note Please take the time to read the docs for the steps enabled by this preset.
# Some of them offer further configurable properties, while some of them might not be
# of use for you so it might be better to not specify them.
#
aiProcessPreset_TargetRealtime_Quality = ( \
aiProcess_CalcTangentSpace | \
aiProcess_GenSmoothNormals | \
aiProcess_JoinIdenticalVertices | \
aiProcess_ImproveCacheLocality | \
aiProcess_LimitBoneWeights | \
aiProcess_RemoveRedundantMaterials | \
aiProcess_SplitLargeMeshes | \
aiProcess_Triangulate | \
aiProcess_GenUVCoords | \
aiProcess_SortByPType | \
aiProcess_FindDegenerates | \
aiProcess_FindInvalidData | \
0 )
## @def aiProcessPreset_TargetRealtime_MaxQuality
# @brief Default postprocess configuration optimizing the data for real-time rendering.
#
# This preset enables almost every optimization step to achieve perfectly
# optimized data. It's your choice for level editor environments where import speed
# is not important.
#
# If you're using DirectX, don't forget to combine this value with
# the #aiProcess_ConvertToLeftHanded step. If you don't support UV transformations
# in your application, apply the #aiProcess_TransformUVCoords step, too.
# @note Please take the time to read the docs for the steps enabled by this preset.
# Some of them offer further configurable properties, while some of them might not be
# of use for you so it might be better to not specify them.
#
aiProcessPreset_TargetRealtime_MaxQuality = ( \
aiProcessPreset_TargetRealtime_Quality | \
aiProcess_FindInstances | \
aiProcess_ValidateDataStructure | \
aiProcess_OptimizeMeshes | \
0 )
| 23,548 | Python | 43.348399 | 90 | 0.741422 |
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/pyassimp/helper.py | #-*- coding: UTF-8 -*-
"""
Some fancy helper functions.
"""
import os
import ctypes
import operator
from distutils.sysconfig import get_python_lib
import re
import sys
try: import numpy
except ImportError: numpy = None
import logging;logger = logging.getLogger("pyassimp")
from .errors import AssimpError
additional_dirs, ext_whitelist = [],[]
# populate search directories and lists of allowed file extensions
# depending on the platform we're running on.
if os.name=='posix':
additional_dirs.append('./')
additional_dirs.append('/usr/lib/')
additional_dirs.append('/usr/lib/x86_64-linux-gnu/')
additional_dirs.append('/usr/lib/aarch64-linux-gnu/')
additional_dirs.append('/usr/local/lib/')
if 'LD_LIBRARY_PATH' in os.environ:
additional_dirs.extend([item for item in os.environ['LD_LIBRARY_PATH'].split(':') if item])
# check if running from anaconda.
anaconda_keywords = ("conda", "continuum")
if any(k in sys.version.lower() for k in anaconda_keywords):
cur_path = get_python_lib()
pattern = re.compile('.*\/lib\/')
conda_lib = pattern.match(cur_path).group()
logger.info("Adding Anaconda lib path:"+ conda_lib)
additional_dirs.append(conda_lib)
# note - this won't catch libassimp.so.N.n, but
# currently there's always a symlink called
# libassimp.so in /usr/local/lib.
ext_whitelist.append('.so')
# libassimp.dylib in /usr/local/lib
ext_whitelist.append('.dylib')
elif os.name=='nt':
ext_whitelist.append('.dll')
path_dirs = os.environ['PATH'].split(';')
additional_dirs.extend(path_dirs)
def vec2tuple(x):
""" Converts a VECTOR3D to a Tuple """
return (x.x, x.y, x.z)
def transform(vector3, matrix4x4):
""" Apply a transformation matrix on a 3D vector.
:param vector3: array with 3 elements
:param matrix4x4: 4x4 matrix
"""
if numpy:
return numpy.dot(matrix4x4, numpy.append(vector3, 1.))
else:
m0,m1,m2,m3 = matrix4x4; x,y,z = vector3
return [
m0[0]*x + m0[1]*y + m0[2]*z + m0[3],
m1[0]*x + m1[1]*y + m1[2]*z + m1[3],
m2[0]*x + m2[1]*y + m2[2]*z + m2[3],
m3[0]*x + m3[1]*y + m3[2]*z + m3[3]
]
def _inv(matrix4x4):
m0,m1,m2,m3 = matrix4x4
det = m0[3]*m1[2]*m2[1]*m3[0] - m0[2]*m1[3]*m2[1]*m3[0] - \
m0[3]*m1[1]*m2[2]*m3[0] + m0[1]*m1[3]*m2[2]*m3[0] + \
m0[2]*m1[1]*m2[3]*m3[0] - m0[1]*m1[2]*m2[3]*m3[0] - \
m0[3]*m1[2]*m2[0]*m3[1] + m0[2]*m1[3]*m2[0]*m3[1] + \
m0[3]*m1[0]*m2[2]*m3[1] - m0[0]*m1[3]*m2[2]*m3[1] - \
m0[2]*m1[0]*m2[3]*m3[1] + m0[0]*m1[2]*m2[3]*m3[1] + \
m0[3]*m1[1]*m2[0]*m3[2] - m0[1]*m1[3]*m2[0]*m3[2] - \
m0[3]*m1[0]*m2[1]*m3[2] + m0[0]*m1[3]*m2[1]*m3[2] + \
m0[1]*m1[0]*m2[3]*m3[2] - m0[0]*m1[1]*m2[3]*m3[2] - \
m0[2]*m1[1]*m2[0]*m3[3] + m0[1]*m1[2]*m2[0]*m3[3] + \
m0[2]*m1[0]*m2[1]*m3[3] - m0[0]*m1[2]*m2[1]*m3[3] - \
m0[1]*m1[0]*m2[2]*m3[3] + m0[0]*m1[1]*m2[2]*m3[3]
return[[( m1[2]*m2[3]*m3[1] - m1[3]*m2[2]*m3[1] + m1[3]*m2[1]*m3[2] - m1[1]*m2[3]*m3[2] - m1[2]*m2[1]*m3[3] + m1[1]*m2[2]*m3[3]) /det,
( m0[3]*m2[2]*m3[1] - m0[2]*m2[3]*m3[1] - m0[3]*m2[1]*m3[2] + m0[1]*m2[3]*m3[2] + m0[2]*m2[1]*m3[3] - m0[1]*m2[2]*m3[3]) /det,
( m0[2]*m1[3]*m3[1] - m0[3]*m1[2]*m3[1] + m0[3]*m1[1]*m3[2] - m0[1]*m1[3]*m3[2] - m0[2]*m1[1]*m3[3] + m0[1]*m1[2]*m3[3]) /det,
( m0[3]*m1[2]*m2[1] - m0[2]*m1[3]*m2[1] - m0[3]*m1[1]*m2[2] + m0[1]*m1[3]*m2[2] + m0[2]*m1[1]*m2[3] - m0[1]*m1[2]*m2[3]) /det],
[( m1[3]*m2[2]*m3[0] - m1[2]*m2[3]*m3[0] - m1[3]*m2[0]*m3[2] + m1[0]*m2[3]*m3[2] + m1[2]*m2[0]*m3[3] - m1[0]*m2[2]*m3[3]) /det,
( m0[2]*m2[3]*m3[0] - m0[3]*m2[2]*m3[0] + m0[3]*m2[0]*m3[2] - m0[0]*m2[3]*m3[2] - m0[2]*m2[0]*m3[3] + m0[0]*m2[2]*m3[3]) /det,
( m0[3]*m1[2]*m3[0] - m0[2]*m1[3]*m3[0] - m0[3]*m1[0]*m3[2] + m0[0]*m1[3]*m3[2] + m0[2]*m1[0]*m3[3] - m0[0]*m1[2]*m3[3]) /det,
( m0[2]*m1[3]*m2[0] - m0[3]*m1[2]*m2[0] + m0[3]*m1[0]*m2[2] - m0[0]*m1[3]*m2[2] - m0[2]*m1[0]*m2[3] + m0[0]*m1[2]*m2[3]) /det],
[( m1[1]*m2[3]*m3[0] - m1[3]*m2[1]*m3[0] + m1[3]*m2[0]*m3[1] - m1[0]*m2[3]*m3[1] - m1[1]*m2[0]*m3[3] + m1[0]*m2[1]*m3[3]) /det,
( m0[3]*m2[1]*m3[0] - m0[1]*m2[3]*m3[0] - m0[3]*m2[0]*m3[1] + m0[0]*m2[3]*m3[1] + m0[1]*m2[0]*m3[3] - m0[0]*m2[1]*m3[3]) /det,
( m0[1]*m1[3]*m3[0] - m0[3]*m1[1]*m3[0] + m0[3]*m1[0]*m3[1] - m0[0]*m1[3]*m3[1] - m0[1]*m1[0]*m3[3] + m0[0]*m1[1]*m3[3]) /det,
( m0[3]*m1[1]*m2[0] - m0[1]*m1[3]*m2[0] - m0[3]*m1[0]*m2[1] + m0[0]*m1[3]*m2[1] + m0[1]*m1[0]*m2[3] - m0[0]*m1[1]*m2[3]) /det],
[( m1[2]*m2[1]*m3[0] - m1[1]*m2[2]*m3[0] - m1[2]*m2[0]*m3[1] + m1[0]*m2[2]*m3[1] + m1[1]*m2[0]*m3[2] - m1[0]*m2[1]*m3[2]) /det,
( m0[1]*m2[2]*m3[0] - m0[2]*m2[1]*m3[0] + m0[2]*m2[0]*m3[1] - m0[0]*m2[2]*m3[1] - m0[1]*m2[0]*m3[2] + m0[0]*m2[1]*m3[2]) /det,
( m0[2]*m1[1]*m3[0] - m0[1]*m1[2]*m3[0] - m0[2]*m1[0]*m3[1] + m0[0]*m1[2]*m3[1] + m0[1]*m1[0]*m3[2] - m0[0]*m1[1]*m3[2]) /det,
( m0[1]*m1[2]*m2[0] - m0[2]*m1[1]*m2[0] + m0[2]*m1[0]*m2[1] - m0[0]*m1[2]*m2[1] - m0[1]*m1[0]*m2[2] + m0[0]*m1[1]*m2[2]) /det]]
def get_bounding_box(scene):
bb_min = [1e10, 1e10, 1e10] # x,y,z
bb_max = [-1e10, -1e10, -1e10] # x,y,z
inv = numpy.linalg.inv if numpy else _inv
return get_bounding_box_for_node(scene.rootnode, bb_min, bb_max, inv(scene.rootnode.transformation))
def get_bounding_box_for_node(node, bb_min, bb_max, transformation):
if numpy:
transformation = numpy.dot(transformation, node.transformation)
else:
t0,t1,t2,t3 = transformation
T0,T1,T2,T3 = node.transformation
transformation = [ [
t0[0]*T0[0] + t0[1]*T1[0] + t0[2]*T2[0] + t0[3]*T3[0],
t0[0]*T0[1] + t0[1]*T1[1] + t0[2]*T2[1] + t0[3]*T3[1],
t0[0]*T0[2] + t0[1]*T1[2] + t0[2]*T2[2] + t0[3]*T3[2],
t0[0]*T0[3] + t0[1]*T1[3] + t0[2]*T2[3] + t0[3]*T3[3]
],[
t1[0]*T0[0] + t1[1]*T1[0] + t1[2]*T2[0] + t1[3]*T3[0],
t1[0]*T0[1] + t1[1]*T1[1] + t1[2]*T2[1] + t1[3]*T3[1],
t1[0]*T0[2] + t1[1]*T1[2] + t1[2]*T2[2] + t1[3]*T3[2],
t1[0]*T0[3] + t1[1]*T1[3] + t1[2]*T2[3] + t1[3]*T3[3]
],[
t2[0]*T0[0] + t2[1]*T1[0] + t2[2]*T2[0] + t2[3]*T3[0],
t2[0]*T0[1] + t2[1]*T1[1] + t2[2]*T2[1] + t2[3]*T3[1],
t2[0]*T0[2] + t2[1]*T1[2] + t2[2]*T2[2] + t2[3]*T3[2],
t2[0]*T0[3] + t2[1]*T1[3] + t2[2]*T2[3] + t2[3]*T3[3]
],[
t3[0]*T0[0] + t3[1]*T1[0] + t3[2]*T2[0] + t3[3]*T3[0],
t3[0]*T0[1] + t3[1]*T1[1] + t3[2]*T2[1] + t3[3]*T3[1],
t3[0]*T0[2] + t3[1]*T1[2] + t3[2]*T2[2] + t3[3]*T3[2],
t3[0]*T0[3] + t3[1]*T1[3] + t3[2]*T2[3] + t3[3]*T3[3]
] ]
for mesh in node.meshes:
for v in mesh.vertices:
v = transform(v, transformation)
bb_min[0] = min(bb_min[0], v[0])
bb_min[1] = min(bb_min[1], v[1])
bb_min[2] = min(bb_min[2], v[2])
bb_max[0] = max(bb_max[0], v[0])
bb_max[1] = max(bb_max[1], v[1])
bb_max[2] = max(bb_max[2], v[2])
for child in node.children:
bb_min, bb_max = get_bounding_box_for_node(child, bb_min, bb_max, transformation)
return bb_min, bb_max
def try_load_functions(library_path, dll):
'''
Try to bind to aiImportFile and aiReleaseImport
Arguments
---------
library_path: path to current lib
dll: ctypes handle to library
Returns
---------
If unsuccessful:
None
If successful:
Tuple containing (library_path,
load from filename function,
load from memory function,
export to filename function,
export to blob function,
release function,
ctypes handle to assimp library)
'''
try:
load = dll.aiImportFile
release = dll.aiReleaseImport
load_mem = dll.aiImportFileFromMemory
export = dll.aiExportScene
export2blob = dll.aiExportSceneToBlob
except AttributeError:
#OK, this is a library, but it doesn't have the functions we need
return None
# library found!
from .structs import Scene, ExportDataBlob
load.restype = ctypes.POINTER(Scene)
load_mem.restype = ctypes.POINTER(Scene)
export2blob.restype = ctypes.POINTER(ExportDataBlob)
return (library_path, load, load_mem, export, export2blob, release, dll)
def search_library():
'''
Loads the assimp library.
Throws exception AssimpError if no library_path is found
Returns: tuple, (load from filename function,
load from memory function,
export to filename function,
export to blob function,
release function,
dll)
'''
#this path
folder = os.path.dirname(__file__)
# silence 'DLL not found' message boxes on win
try:
ctypes.windll.kernel32.SetErrorMode(0x8007)
except AttributeError:
pass
candidates = []
# test every file
for curfolder in [folder]+additional_dirs:
if os.path.isdir(curfolder):
for filename in os.listdir(curfolder):
# our minimum requirement for candidates is that
# they should contain 'assimp' somewhere in
# their name
if filename.lower().find('assimp')==-1 :
continue
is_out=1
for et in ext_whitelist:
if et in filename.lower():
is_out=0
break
if is_out:
continue
library_path = os.path.join(curfolder, filename)
logger.debug('Try ' + library_path)
try:
dll = ctypes.cdll.LoadLibrary(library_path)
except Exception as e:
logger.warning(str(e))
# OK, this except is evil. But different OSs will throw different
# errors. So just ignore any errors.
continue
# see if the functions we need are in the dll
loaded = try_load_functions(library_path, dll)
if loaded: candidates.append(loaded)
if not candidates:
# no library found
raise AssimpError("assimp library not found")
else:
# get the newest library_path
candidates = map(lambda x: (os.lstat(x[0])[-2], x), candidates)
res = max(candidates, key=operator.itemgetter(0))[1]
logger.debug('Using assimp library located at ' + res[0])
# XXX: if there are 1000 dll/so files containing 'assimp'
# in their name, do we have all of them in our address
# space now until gc kicks in?
# XXX: take version postfix of the .so on linux?
return res[1:]
def hasattr_silent(object, name):
"""
Calls hasttr() with the given parameters and preserves the legacy (pre-Python 3.2)
functionality of silently catching exceptions.
Returns the result of hasatter() or False if an exception was raised.
"""
try:
if not object:
return False
return hasattr(object, name)
except AttributeError:
return False
| 11,799 | Python | 40.549296 | 139 | 0.507755 |
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/config/extension.toml | [package]
version = "1.0.0"
title = "MF GDTF converter"
description = "Support of GDTF (General Device Type Format) files in USD."
authors = ["Moment Factory", "Frederic Lestage", "Antoine Pilote"]
readme = "docs/README.md"
changelog = "docs/CHANGELOG.md"
repository = "https://github.com/MomentFactory/Omniverse-MVR-GDTF-converter"
category = "Rendering"
keywords = ["MVR", "GDTF","Audiovisual","Lighting","Fixture"]
preview_image = "data/preview.png"
icon = "data/icon.png"
toggleable = false
[core]
reloadable = false
# Load at the start, load all schemas with order -100 (with order -1000 the USD libs are loaded)
order = -100
[dependencies]
"omni.kit.uiapp" = {}
"omni.kit.tool.asset_importer" = {}
[[python.module]]
name = "mf.ov.gdtf"
[python.pipapi]
requirements = [
"unidecode"
]
use_online_index = true
[package.target]
kit = ["105.1"]
[package.writeTarget]
kit = true
python = false | 909 | TOML | 20.16279 | 96 | 0.70187 |
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/docs/CHANGELOG.md | # Changelog
# [1.0.0] - 2024-01-24
- Added native OpenUSD file format plugin for payload support.
- Fixed orientation and scale issues
- Some light parameters are now applied to USD light (cone, color temp, intensity)
- Deprecated kit 104 and 105.0
- Added Sample files for USDView
# [0.4.0] - 2023-10-02
# Added
- Sample file
# Fixed
- Enabled importing from Omniverse
- Importing within the same repository as the source file fixed (for filesystem and Omniverse)
# Changed
- The name of the folder (the one created during importation that contains the files converted to usd) won't include the file extension ("myGDTFFile.gdtf/" will now be "myGDTFFile_gdtf/")
- Properly remove the temporary directory created for archive extraction at the end of importation
## [0.3.0] - 2023-09-01
## Added
- Support for node type "Inventory"
- Use "Beam" node when present for light xform
## Fixed
- Global scale and rotation rework
- Fix relative links issue with path and character escaping
## [0.2.0] - 2023-08-17
### Fixed
- Better support for 3ds files
### Changed
- When making name valid for usd, add underscore if starts with number
## [0.1.0] - 2023-07-21
### Added
- Initial version of the extension
- Support import of GDTF files
| 1,243 | Markdown | 26.043478 | 187 | 0.738536 |
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/docs/README.md | # GDTF extension for Omniverse [mf.ov.gdtf]
Copyright 2023 Moment Factory Studios Inc.
An Omniverse extension for [GDTF (General Device Type Format)](https://github.com/mvrdevelopment/spec/blob/main/gdtf-spec.md) files. Support GTDF to OpenUSD conversion as well as References to GDTF files through a native OpenUSD FileFormat Plugin.
| 336 | Markdown | 66.399987 | 247 | 0.803571 |
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.mvr/mf/ov/mvr/converterContext.py | class ConverterContext:
usd_reference_path = ""
| 52 | Python | 16.666661 | 27 | 0.711538 |
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.mvr/mf/ov/mvr/mvrImporter.py | import logging
import numpy as np
from typing import List, Tuple
import xml.etree.ElementTree as ET
from zipfile import ZipFile
from pxr import Gf, Usd, UsdGeom
from mf.ov.gdtf import gdtfImporter as gdtf
from .filepathUtility import Filepath
from .mvrUtil import Layer, Fixture
from .USDTools import USDTools
class MVRImporter:
def convert(file: Filepath, mvr_output_dir: str, output_ext: str = ".usd") -> str:
# TODO: change output_ext to bool use_usda
try:
with ZipFile(file.fullpath, 'r') as archive:
output_dir = mvr_output_dir + file.filename + "_mvr/"
data = archive.read("GeneralSceneDescription.xml")
root = ET.fromstring(data)
MVRImporter._warn_for_version(root)
url: str = MVRImporter.convert_mvr_usd(output_dir, file.filename, output_ext, root, archive)
return url
except Exception as e:
logger = logging.getLogger(__name__)
logger.error(f"Failed to parse mvr file at {file.fullpath}. Make sure it is not corrupt. {e}")
return None
def _warn_for_version(root):
v_major = root.attrib["verMajor"]
v_minor = root.attrib["verMinor"]
if v_major != "1" or v_minor != "5":
logger = logging.getLogger(__name__)
logger.warn(f"This extension is tested with mvr v1.5, this file version is {v_major}.{v_minor}")
def convert_mvr_usd(output_dir: str, filename: str, ext: str, root: ET.Element, archive: ZipFile) -> str:
scene: ET.Element = root.find("Scene")
layers: List[Layer] = MVRImporter._get_layers(scene)
for layer in layers:
layer.find_fixtures()
stage, url = MVRImporter._make_mvr_stage(output_dir, filename, ext, layers)
MVRImporter._convert_gdtf(stage, layers, output_dir, archive, ext)
stage.Save()
return url
def _get_layers(scene: ET.Element) -> List[Layer]:
layersNode: ET.Element = scene.find("Layers")
layerNodes: ET.Element = layersNode.findall("Layer")
layers: List[Layer] = []
for layerNode in layerNodes:
layer: Layer = Layer(layerNode)
layers.append(layer)
return layers
def _make_mvr_stage(output_dir: str, filename: str, ext: str, layers: List[Layer]) -> Tuple[Usd.Stage, str]:
url: str = output_dir + filename + ext
stage: Usd.Stage = USDTools.get_or_create_stage(url)
MVRImporter._add_fixture_xform(stage, layers)
return stage, url
def _add_fixture_xform(stage: Usd.Stage, layers: List[Layer]):
rotate_minus90deg_xaxis = Gf.Matrix3d(1, 0, 0, 0, 0, 1, 0, -1, 0)
mvr_scale = UsdGeom.LinearUnits.millimeters # MVR dimensions are in millimeters
applied_scale: float = USDTools.get_applied_scale(stage, mvr_scale)
for layer in layers:
if layer.fixtures_len() > 0:
scope: UsdGeom.Scope = USDTools.add_scope(stage, layer.get_name_usd())
for fixture in layer.get_fixtures():
xform: UsdGeom.Xform = USDTools.add_fixture_xform(stage, scope, fixture.get_unique_name_usd())
fixture.set_stage_path(xform.GetPrim().GetPath())
np_matrix: np.matrix = USDTools.np_matrix_from_mvr(fixture.get_matrix())
gf_matrix: Gf.Matrix4d = USDTools.gf_matrix_from_mvr(np_matrix, applied_scale)
rotation: Gf.Rotation = gf_matrix.ExtractRotation()
euler: Gf.Vec3d = rotation.Decompose(Gf.Vec3d.XAxis(), Gf.Vec3d.YAxis(), Gf.Vec3d.ZAxis())
# Z-up to Y-up
# TODO: Validate with stage up axis
translation = rotate_minus90deg_xaxis * gf_matrix.ExtractTranslation()
rotate = rotate_minus90deg_xaxis * euler
xform.ClearXformOpOrder() # Prevent error when overwritting
xform.AddTranslateOp().Set(translation)
xform.AddRotateZYXOp().Set(rotate)
# Scale Op is added in _add_gdtf_reference
fixture.apply_attributes_to_prim(xform.GetPrim())
stage.Save()
def _convert_gdtf(stage: Usd.Stage, layers: List[Layer], mvr_output_dir: str, archive: ZipFile, ext: str):
gdtf_spec_uniq: List[str] = MVRImporter._get_gdtf_to_import(layers)
gdtf_output_dir = mvr_output_dir
for gdtf_spec in gdtf_spec_uniq:
gdtf.GDTFImporter.convert_from_mvr(gdtf_spec, gdtf_output_dir, archive)
MVRImporter._add_gdtf_reference(layers, stage, ext)
def _get_gdtf_to_import(layers: List[Layer]) -> List[str]:
result: List[str] = []
for layer in layers:
if layer.fixtures_len() > 0:
current_fixture_names = [x.get_spec_name() for x in layer.get_fixtures()]
current_fixture_names_set = set(current_fixture_names)
current_fixture_names_uniq = list(current_fixture_names_set)
for current_fixture_name_uniq in current_fixture_names_uniq:
result.append(current_fixture_name_uniq)
return result
def _add_gdtf_reference(layers: List[Layer], stage: Usd.Stage, ext: str):
for layer in layers:
if layer.fixtures_len() > 0:
for fixture in layer.get_fixtures():
spec = fixture.get_spec_name()
relative_path = f"./{spec}_gdtf/{spec}{ext}"
stage_path = fixture.get_stage_path()
USDTools.add_reference(stage, relative_path, stage_path)
USDTools.copy_gdtf_scale(stage, stage_path, relative_path)
| 5,751 | Python | 46.147541 | 114 | 0.603026 |
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.mvr/mf/ov/mvr/extension.py | import omni.ext
import omni.kit.tool.asset_importer as ai
from .converterDelegate import ConverterDelegate
class MfOvMvrExtension(omni.ext.IExt):
def on_startup(self, _):
self._delegate_mvr = ConverterDelegate(
"MVR Converter",
["(.*\\.mvr$)"],
["MVR Files (*.mvr)"]
)
ai.register_importer(self._delegate_mvr)
def on_shutdown(self):
ai.remove_importer(self._delegate_mvr)
self._delegate_mvr.destroy()
self._delegate_mvr = None
| 522 | Python | 26.526314 | 48 | 0.611111 |
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.mvr/mf/ov/mvr/__init__.py | import os
from pxr import Plug
pluginsRoot = os.path.join(os.path.dirname(__file__), '../../../plugin/resources')
Plug.Registry().RegisterPlugins(pluginsRoot)
from .extension import *
| 192 | Python | 18.299998 | 82 | 0.703125 |
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.mvr/mf/ov/mvr/converterDelegate.py | import os
import omni.kit.tool.asset_importer as ai
from .converterOptionsBuilder import ConverterOptionsBuilder
from .converterHelper import ConverterHelper
class ConverterDelegate(ai.AbstractImporterDelegate):
def __init__(self, name, filters, descriptions):
super().__init__()
self._hoops_options_builder = ConverterOptionsBuilder()
self._hoops_converter = ConverterHelper()
self._name = name
self._filters = filters
self._descriptions = descriptions
def destroy(self):
if self._hoops_converter:
# self._hoops_converter.destroy()
self._hoops_converter = None
if self._hoops_options_builder:
self._hoops_options_builder.destroy()
self._hoops_options_builder = None
@property
def name(self):
return self._name
@property
def filter_regexes(self):
return self._filters
@property
def filter_descriptions(self):
return self._descriptions
def build_options(self, paths):
pass
# TODO enable this after the filepicker bugfix: OM-47383
# self._hoops_options_builder.build_pane(paths)
async def convert_assets(self, paths):
context = self._hoops_options_builder.get_import_options()
hoops_context = context.cad_converter_context
absolute_paths = []
relative_paths = []
for file_path in paths:
if self.is_supported_format(file_path):
absolute_paths.append(file_path)
filename = os.path.basename(file_path)
relative_paths.append(filename)
converted_assets = await self._hoops_converter.create_import_task(
absolute_paths, context.export_folder, hoops_context
)
return converted_assets
| 1,825 | Python | 28.934426 | 74 | 0.637808 |
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.mvr/mf/ov/mvr/converterOptionsBuilder.py | from omni.kit.menu import utils
from omni.kit.tool.asset_importer.file_picker import FilePicker
from omni.kit.tool.asset_importer.filebrowser import FileBrowserMode, FileBrowserSelectionType
import omni.kit.window.content_browser as content
from .converterOptions import ConverterOptions
class ConverterOptionsBuilder:
def __init__(self):
self._file_picker = None
self._export_content = ConverterOptions()
self._folder_button = None
self._refresh_default_folder = False
self._default_folder = None
self._clear()
def destroy(self):
self._clear()
if self._file_picker:
self._file_picker.destroy()
def _clear(self):
self._built = False
self._export_folder_field = None
if self._folder_button:
self._folder_button.set_clicked_fn(None)
self._folder_button = None
def set_default_target_folder(self, folder: str):
self._default_folder = folder
self._refresh_default_folder = True
def _select_picked_folder_callback(self, paths):
if paths:
self._export_folder_field.model.set_value(paths[0])
def _cancel_picked_folder_callback(self):
pass
def _show_file_picker(self):
if not self._file_picker:
mode = FileBrowserMode.OPEN
file_type = FileBrowserSelectionType.DIRECTORY_ONLY
filters = [(".*", "All Files (*.*)")]
self._file_picker = FilePicker("Select Folder", mode=mode, file_type=file_type, filter_options=filters)
self._file_picker.set_file_selected_fn(self._select_picked_folder_callback)
self._file_picker.set_cancel_fn(self._cancel_picked_folder_callback)
folder = self._export_folder_field.model.get_value_as_string()
if utils.is_folder(folder):
self._file_picker.show(folder)
else:
self._file_picker.show(self._get_current_dir_in_content_window())
def _get_current_dir_in_content_window(self):
content_window = content.get_content_window()
return content_window.get_current_directory()
def get_import_options(self):
return ConverterOptions()
| 2,209 | Python | 35.229508 | 115 | 0.646899 |
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.mvr/mf/ov/mvr/converterHelper.py | import logging
import shutil
import tempfile
from urllib.parse import unquote
import omni.kit.window.content_browser
from .filepathUtility import Filepath
from .mvrImporter import MVRImporter
class ConverterHelper:
TMP_ARCHIVE_EXTRACT_DIR = f"{tempfile.gettempdir()}/MF.OV.GDTF/"
def _create_import_task(self, absolute_path, export_folder, _):
absolute_path_unquoted = unquote(absolute_path)
if absolute_path_unquoted.startswith("file:/"):
path = absolute_path_unquoted[6:]
else:
path = absolute_path_unquoted
current_nucleus_dir = omni.kit.window.content_browser.get_content_window().get_current_directory()
file: Filepath = Filepath(path)
output_dir = current_nucleus_dir if export_folder is None else export_folder
if export_folder is not None and export_folder != "":
output_dir = export_folder
# Cannot Unzip directly from Nucleus, must download file beforehand
if file.is_nucleus_path():
tmp_path = ConverterHelper.TMP_ARCHIVE_EXTRACT_DIR + file.basename
result = omni.client.copy(file.fullpath, tmp_path, omni.client.CopyBehavior.OVERWRITE)
if result == omni.client.Result.OK:
file = Filepath(tmp_path)
else:
logger = logging.getLogger(__name__)
logger.error(f"Could not import {file.fullpath} directly from Omniverse, try downloading the file instead")
return
url: str = MVRImporter.convert(file, output_dir)
return url
async def create_import_task(self, absolute_paths, export_folder, hoops_context):
converted_assets = {}
for i in range(len(absolute_paths)):
converted_assets[absolute_paths[i]] = self._create_import_task(absolute_paths[i], export_folder,
hoops_context)
shutil.rmtree(ConverterHelper.TMP_ARCHIVE_EXTRACT_DIR)
return converted_assets
| 2,037 | Python | 39.759999 | 123 | 0.641139 |
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.mvr/mf/ov/mvr/USDTools.py | import numpy as np
from typing import List
from unidecode import unidecode
from urllib.parse import unquote
from pxr import Gf, Tf, Sdf, Usd, UsdGeom
class USDTools:
def make_name_valid(name: str) -> str:
if name[:1].isdigit():
name = "_" + name
return Tf.MakeValidIdentifier(unidecode(name))
def get_or_create_stage(url: str) -> Usd.Stage:
try: # TODO: Better way to check if stage exists?
return Usd.Stage.Open(url)
except:
stage = Usd.Stage.CreateNew(url)
UsdGeom.SetStageMetersPerUnit(stage, UsdGeom.LinearUnits.centimeters) # TODO get user defaults
UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.y) # TODO get user defaults
default_prim = stage.DefinePrim("/World", "Xform")
stage.SetDefaultPrim(default_prim)
stage.Save()
return stage
def add_scope(stage: Usd.Stage, name: str) -> UsdGeom.Scope:
default_prim_path: Sdf.Path = stage.GetDefaultPrim().GetPrimPath()
scope_path: Sdf.Path = default_prim_path.AppendPath(name)
scope: UsdGeom.Scope = UsdGeom.Scope.Define(stage, scope_path)
return scope
def add_fixture_xform(stage: Usd.Stage, scope: UsdGeom.Scope, name: str) -> UsdGeom.Xform:
path = scope.GetPath().AppendPath(name)
xform: UsdGeom.Xform = UsdGeom.Xform.Define(stage, path)
return xform
def get_applied_scale(stage: Usd.Stage, scale_factor: float) -> float:
stage_scale = UsdGeom.GetStageMetersPerUnit(stage)
return scale_factor / stage_scale
def np_matrix_from_mvr(value: str) -> np.matrix:
# MVR Matrix is: 4x3, Right-handed, Z-up, 1 Distance Unit equals 1mm
# expect form like "<Matrix>{x,y,z}{x,y,z}{x,y,z}{x,y,z}</Matrix>" where "x","y","z" is similar to 1.000000
# make source compatible with np.matrix constructor: "x y z; x y z; x y z; x y z"
value_alt = value[1:] # Removes "{" prefix
value_alt = value_alt[:-1] # Removes "}" suffix
value_alt = value_alt.replace("}{", "; ")
value_alt = value_alt.replace(",", " ")
np_matrix: np.matrix = np.matrix(value_alt)
return np_matrix
def gf_matrix_from_mvr(np_matrix: np.matrix, scale: float) -> Gf.Matrix4d:
# Column major matrix
gf_matrix = Gf.Matrix4d(
np_matrix.item((0, 0)), np_matrix.item((0, 1)), np_matrix.item((0, 2)), 0,
np_matrix.item((1, 0)), np_matrix.item((1, 1)), np_matrix.item((1, 2)), 0,
np_matrix.item((2, 0)), np_matrix.item((2, 1)), np_matrix.item((2, 2)), 0,
np_matrix.item((3, 0)) * scale, np_matrix.item((3, 1)) * scale, np_matrix.item((3, 2)) * scale, 1
)
return gf_matrix
def set_fixture_attribute(prim: Usd.Prim, attribute_name: str, attribute_type: Sdf.ValueTypeNames, attribute_value):
prim.CreateAttribute(f"mf:mvr:{attribute_name}", attribute_type).Set(attribute_value)
def add_reference(stage: Usd.Stage, ref_path_relative: str, stage_path: str):
xform_ref: UsdGeom.Xform = stage.GetPrimAtPath(stage_path)
path_unquoted = unquote(ref_path_relative)
references: Usd.References = xform_ref.GetReferences()
references.AddReference(path_unquoted)
stage.Save()
def copy_gdtf_scale(mvr_stage: Usd.Stage, stage_prim_path: str, relative_path: str):
# Copy a reference default prim scale op value to a referencing xform in an other stage
curr_root_layer = mvr_stage.GetRootLayer()
curr_stage_url: str = curr_root_layer.realPath
curr_stage_url_formatted: str = curr_stage_url.replace('\\', '/')
curr_stage_dir_index: str = curr_stage_url_formatted.rindex("/")
curr_stage_dir = curr_stage_url_formatted[:curr_stage_dir_index]
mvr_xform_target = UsdGeom.Xform(mvr_stage.GetPrimAtPath(stage_prim_path))
gdtf_stage_filename: str = relative_path[1:]
gdtf_stage_path: str = curr_stage_dir + gdtf_stage_filename
gdtf_stage: Usd.Stage = Usd.Stage.Open(gdtf_stage_path)
gdtf_default_prim = UsdGeom.Xform(gdtf_stage.GetDefaultPrim())
stage_scale = UsdGeom.GetStageMetersPerUnit(mvr_stage)
scale_factor = 1 / stage_scale
scale_value = Gf.Vec3d(scale_factor, scale_factor, scale_factor)
xform_ordered_ops: List[UsdGeom.XformOp] = gdtf_default_prim.GetOrderedXformOps()
for xform_op in xform_ordered_ops:
if xform_op.GetOpType() == UsdGeom.XformOp.TypeScale:
scale_value = xform_op.Get()
mvr_xform_target.AddScaleOp().Set(scale_value)
mvr_stage.Save()
| 4,672 | Python | 46.683673 | 120 | 0.634632 |
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.mvr/mf/ov/mvr/converterOptions.py | from .converterContext import ConverterContext
class ConverterOptions:
def __init__(self):
self.cad_converter_context = ConverterContext()
self.export_folder: str = None
| 192 | Python | 23.124997 | 55 | 0.708333 |
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.mvr/mf/ov/mvr/mvrUtil.py | from typing import List
import xml.etree.ElementTree as ET
from pxr import Usd, Sdf
from .USDTools import USDTools
class Fixture:
def __init__(self, node: ET.Element):
self._root = node
self._name = node.attrib["name"]
self._uuid = node.attrib["uuid"]
self._matrix = self._get_value_text_if_exists("Matrix")
self._gdtf_spec = self._get_value_text_if_exists("GDTFSpec")
self._gdtf_mode = self._get_value_text_if_exists("GDTFMode")
self._custom_commands = self._get_custom_commands_values()
self._classing = self._get_value_text_if_exists("Classing")
self._addresses = self._get_addresses_values()
self._fixture_id = self._get_value_int_if_exists("fixtureID")
self._unit_number = self._get_value_int_if_exists("UnitNumber")
self._fixture_type_id = self._get_value_int_if_exists("FixtureTypeId")
self._custom_id = self._get_value_int_if_exists("CustomId")
self._cie_color = self._get_color_values()
self._cast_shadow = self._get_value_bool_if_exists("CastShadow")
def get_unique_name_usd(self) -> str:
return USDTools.make_name_valid(self._name + "_" + self._uuid)
def get_matrix(self) -> str:
return self._matrix
def set_stage_path(self, path: str):
self._stage_path = path
def get_stage_path(self) -> str:
return self._stage_path
def get_spec_name(self) -> str:
spec_name = self._gdtf_spec
if self._gdtf_spec[-5:] == ".gdtf":
spec_name = self._gdtf_spec[:-5]
return spec_name
def _get_value_text_if_exists(self, name: str) -> str:
node = self._get_child_node(name)
if node is not None:
text = node.text
if text is not None:
return node.text
return None
def _get_value_int_if_exists(self, name: str) -> int:
txt = self._get_value_text_if_exists(name)
if txt is None:
return None
return int(txt)
def _get_value_bool_if_exists(self, name: str) -> bool:
txt = self._get_value_text_if_exists(name)
if txt is None:
return None
return bool(txt)
def _get_child_node(self, node: str):
return self._root.find(node)
def _get_custom_commands_values(self) -> List[str]:
values: List[str] = []
node = self._get_child_node("CustomCommands")
if node is not None:
subnodes = node.findall("CustomCommand")
if subnodes is not None and len(subnodes) > 0:
values = [x.text for x in subnodes]
return values
def _get_addresses_values(self) -> List[str]:
values: List[str] = []
node = self._get_child_node("Addresses")
if node is not None:
subnodes = node.findall("Address")
if subnodes is not None and len(subnodes):
values = [int(x.text) for x in subnodes]
return values
def _get_color_values(self) -> List[float]:
colors: List[float] = []
node = self._get_child_node("Color")
if node is not None:
colors = [float(x) for x in node.text.split(",")]
return colors
def apply_attributes_to_prim(self, prim: Usd.Prim):
self._set_attribute_text_if_valid(prim, "name", self._name)
self._set_attribute_text_if_valid(prim, "uuid", self._uuid)
self._set_attribute_text_if_valid(prim, "GDTFSpec", self._gdtf_spec)
self._set_attribute_text_if_valid(prim, "GDTFMode", self._gdtf_mode)
self._set_attribute_textarray_if_valid(prim, "CustomCommands", self._custom_commands)
self._set_attribute_text_if_valid(prim, "Classing", self._classing)
self._set_attribute_intarray_if_valid(prim, "Addresses", self._addresses)
self._set_attribute_int_if_valid(prim, "FixtureID", self._fixture_id)
self._set_attribute_int_if_valid(prim, "UnitNumber", self._unit_number)
self._set_attribute_int_if_valid(prim, "FixtureTypeId", self._fixture_type_id)
self._set_attribute_int_if_valid(prim, "CustomId", self._custom_id)
self._set_attribute_floatarray_if_valid(prim, "CIEColor", self._cie_color)
self._set_attribute_bool_if_value(prim, "CastShadow", self._cast_shadow)
def _set_attribute_text_if_valid(self, prim: Usd.Prim, name: str, value: str):
if value is not None:
USDTools.set_fixture_attribute(prim, name, Sdf.ValueTypeNames.String, value)
def _set_attribute_int_if_valid(self, prim: Usd.Prim, name: str, value: int):
if value is not None:
USDTools.set_fixture_attribute(prim, name, Sdf.ValueTypeNames.Int, value)
def _set_attribute_bool_if_value(self, prim: Usd.Prim, name: str, value: bool):
if value is not None:
USDTools.set_fixture_attribute(prim, name, Sdf.ValueTypeNames.Bool, value)
def _set_attribute_textarray_if_valid(self, prim: Usd.Prim, name: str, value: List[str]):
if value is not None and len(value) > 0:
USDTools.set_fixture_attribute(prim, name, Sdf.ValueTypeNames.StringArray, value)
def _set_attribute_intarray_if_valid(self, prim: Usd.Prim, name: str, value: List[int]):
if value is not None and len(value) > 0:
USDTools.set_fixture_attribute(prim, name, Sdf.ValueTypeNames.IntArray, value)
def _set_attribute_floatarray_if_valid(self, prim: Usd.Prim, name: str, value: List[float]):
if value is not None and len(value) > 0:
USDTools.set_fixture_attribute(prim, name, Sdf.ValueTypeNames.FloatArray, value)
class Layer:
def __init__(self, node: ET.Element):
self._name = node.attrib["name"]
self._uuid = node.attrib["uuid"]
self._node = node
self._fixtures = []
def get_name_usd(self) -> str:
return USDTools.make_name_valid(self._name)
def find_fixtures(self):
childlist = self._node.find("ChildList")
fixtures = childlist.findall("Fixture")
self._fixtures = [Fixture(x) for x in fixtures]
def fixtures_len(self) -> int:
return len(self._fixtures)
def get_fixtures(self) -> List[Fixture]:
return self._fixtures
| 6,238 | Python | 39.777778 | 96 | 0.617987 |
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.mvr/mf/ov/mvr/filepathUtility.py | import os
class Filepath:
def __init__(self, filepath: str):
self._is_none = filepath == ""
self.fullpath = filepath
self.directory = os.path.dirname(filepath) + "/"
self.basename = os.path.basename(filepath)
self.filename, self.ext = os.path.splitext(self.basename)
def is_nucleus_path(self) -> bool:
# TODO: Replace with omni utility method
return self.directory[:12] == "omniverse://"
def get_relative_from(self, other) -> str:
if self._is_none:
return other.fullpath
else:
return "./" + other.fullpath[len(self.directory):]
| 641 | Python | 28.181817 | 65 | 0.592824 |
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.mvr/config/extension.toml | [package]
version = "1.0.0"
title = "MF MVR converter"
description = "Support of MVR (My Virtual Rig) files in USD."
authors = ["Moment Factory","Frederic Lestage", "Antoine Pilote"]
readme = "docs/README.md"
changelog = "docs/CHANGELOG.md"
repository = "https://github.com/MomentFactory/Omniverse-MVR-GDTF-converter"
category = "Rendering"
keywords = ["MVR", "GDTF","Audiovisual","Lighting","Fixture"]
preview_image = "data/preview.png"
icon = "data/icon.png"
toggleable = false
[core]
reloadable = false
# Load at the start, load all schemas with order -100 (with order -1000 the USD libs are loaded)
order = -100
[dependencies]
"mf.ov.gdtf" = {}
"omni.kit.uiapp" = {}
"omni.kit.tool.asset_importer" = {}
[[python.module]]
name = "mf.ov.mvr"
[python.pipapi]
requirements = [
"unidecode"
]
use_online_index = true
[package.target]
kit = ["105.1"]
[package.writeTarget]
kit = true
python = false | 913 | TOML | 18.869565 | 96 | 0.692223 |
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.mvr/docs/CHANGELOG.md | # Changelog
# [1.0.0] - 2024-01-24
- Added native OpenUSD file format plugin for payload support.
- Fixed orientation and scale issues
- Some light parameters are now applied to USD light (cone, color temp, intensity)
- Deprecated kit 104 and 105.0
- Added Sample files for USDView
# [0.4.0] - 2023-10-02
# Added
- Sample file
# Fixed
- Enabled importing from Omniverse
- Importing within the same repository as the source file fixed (for filesystem and Omniverse)
# Changed
- The name of the folder (the one created during importation that contains the files converted to usd) won't include the file extension ("myMVRFile.mvr/" will now be "myMVRFile_mvr/")
- GDTF attributes populated by MVR now better reflect naming convention of the specs ("fixture_id" becomes "FixtureID")
- Properly remove the temporary directory created for archive extraction at the end of importation
# [0.3.0] - 2023-09-01
## Fixed
- Global scale rework
- Fix relative link issue with character escaping
# [0.2.0] - 2023-08-17
### Added
- Support for multiple layers
- Layers reflected as Scope in usd
### Changed
- When making name valid for usd, add underscore if starts with number
# [0.1.0] - 2023-07-21
### Added
- Initial version of the extension
- Support import of MVR files
| 1,273 | Markdown | 27.954545 | 183 | 0.745483 |
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.mvr/docs/README.md | # MVR extension for Omniverse [mf.ov.mvr]
Copyright 2023 Moment Factory Studios Inc.
An Omniverse extension for MVR [MVR (My Virtual Rig)](https://github.com/mvrdevelopment/spec/blob/main/mvr-spec.md) files. Support MVR to OpenUSD conversion as well as References to MVR files through a native USD FileFormat Plugin.
Requires the mf.ov.gdtf extension to fully work.
MVR (My Virtual Rig) is a scene format that can describe an complete rig of lights, using GDTF assets at its core while adding capabilities to define groups, layers, DMX address and more to allow lighting designer to build virtual replicas of their lighting rigs and enforce a single file format from show design to previz to operation.
| 705 | Markdown | 87.249989 | 336 | 0.8 |
MomentFactory/Omniverse-Lidar-Live-Synthetic-Data/VERSION.md | 105.1 | 5 | Markdown | 4.999995 | 5 | 0.8 |
MomentFactory/Omniverse-Lidar-Live-Synthetic-Data/repo.toml | ########################################################################################################################
# Repo tool base settings
########################################################################################################################
[repo]
# Use the Kit Template repo configuration as a base. Only override things specific to the repo.
import_configs = ["${root}/_repo/deps/repo_kit_tools/kit-template/repo.toml"]
# Repository Name
name = "omniverse-lidar-live-synthetic-data"
[repo_build]
msbuild.vs_version = "vs2019"
post_build.commands = []
[repo_docs]
name = "MF Lidar live synthetic data"
project = "omniverse-lidar-live-synthetic-data"
api_output_directory = "api"
use_fast_doxygen_conversion=false
sphinx_version = "4.5.0.2-py3.10-${platform}"
sphinx_exclude_patterns = [
"_build",
"tools",
"VERSION.md",
"source/extensions/*/docs/Overview.md",
"source/extensions/*/docs/CHANGELOG.md",
]
[repo_docs.kit]
extensions = [
"mf.ov.lidar_live_synth"
]
[repo_package.packages."platform:windows-x86_64".docs]
windows_max_path_length = 0
| 1,103 | TOML | 28.052631 | 120 | 0.553944 |
MomentFactory/Omniverse-Lidar-Live-Synthetic-Data/README.md | # MF Lidar live synthetic data [mf.ov.lidar_live_synth]
Adds an Action Graph Node ("Generic/Beam to Ouster UDP") to send Isaac beam data via the Ouster(tm) UDP procotol.
This allows any third party software implementing Ouster(tm) lidars to be connected to simulated sensors instead of physical sensors.
Developped for kit 105.1 and currently working only in Isaac Sim.
This extensions provides pre-built binaries for Windows and Linux x86_64.
You may want to compile from the [source code](https://github.com/MomentFactory/Omniverse-Lidar-Live-Synthetic-Data)
## Requirements
- kit 105 based
- Isaac Sim > 2023.1.0
- Linux or Windows platforms
### Supported Lidars
Currently, only Ouster™ sensors are supported.
The Lidar must have 16, 32, 64 or 128 rows to be supported by the procotol.
Lidar FOVs and resolutions are not transmitted in the protocol and therefore should match those of an actual Ouster(tm) model (22.5, 45 or 90 degrees FOV) for an accurate reconstruction by the receiving software.
JSON config files that describe the angles of the beams for an external application are included in the 'data' folder (example : [OusterJsonConfigOmniverse-OS0-16.json](source/extensions/mf.ov.lidar_live_synth/data/OusterJsonConfigOmniverse-OS0-16.json)). These files can be used in Cirrus as the Ouster(tm) Json Config file to properly recronstruct the data with the correct beam angles. OS0 are 90 degrees FOV, OS1 are 45 and OS2 are 22.5.
## Build
### Windows
- Run `./build.bat`
### Linux
- Install Docker
- Run `./build.sh`
## Using the extension
Requires Isaac Sim as well as a third party software that can receive and parse Ouster Lidar sensors frames.
You can use the [isaac_lidar_sample_moving_cube.usd](source/extensions/mf.ov.lidar_live_synth/samples/isaac_lidar_sample_moving_cube.usd), or [isaac_lidar_ouster_sample.usd](source/extensions/mf.ov.lidar_live_synth/samples//isaac_lidar_ouster_sample.usd), or create your own following the instructions below.
### Enable the extension
In Isaac Sim :
- Windows > Extensions.
- Switch to THIRD PARY tab.
- Install and enable the extension.
### In Isaac Sim:
1. Open or create a scene
- Meshes requires a Rigidbody to intercept Lidar raycast
- Right-click a mesh, then select `Add / Physics / Rigid Body`
2. Add a Lidar to the scene if not present
- `Create / Isaac / Sensors / Lidar / Generic`
- Unfold Raw USD Properties
- Check `drawPoints` and/or `drawLines` if you want to see the point cloud
- Check the `enabled` property
- Use `horizontalFov`, `horizontalResolution`. `maxRange`, `minRange`, `verticalFov`, and `verticalResolution` to define the Lidar raycast zone
- set `rotationRate` to `0` if you want continuous raycast
3. Create an action graph
- Right-click the Stage, then select `Create / Visual Scripting / Action Graph`
- Right-click the Action Graph then select "Open Graph"
- Add a `Event / On Playback Tick` node
- Add a `Isaac Range Sensor / Isaac Read Lidar Beam Node`
- Connect the "Tick" output to the "Exec In" input
- Add a `Generic / Beam to Ouster UDP` node
- Connect the "Exec Out" output to the "Exec In" input
- Connect the outputs of `Isaac Read Lidar Beam Node` to the matching `Beam to Ouster UDP` inputs
- `Azimuth Range`
- `Horizontal Resolution`
- `Linear Depth Data`
- `Num Cols`
- `Num Rows`
4. Press the play icon (SPACE) to begin the simulation
#### Beam to Ouster UDP fields
- `IP Address` (string): The IP address to send the data to.
- `Port` (int): The port to send the data to.
- `Broadcast` (bool): Check to indicate the IP Address is a broadcast address.
## Developer notes
As the extension is written in C++ for performance reasons, developers need to build it before using it. Most of it works in the same way as the official [Omniverse C++ examples](https://github.com/NVIDIA-Omniverse/kit-extension-template-cpp).
The first step is to run the `build.bat` file at the root of the repo. It will generate the actual extension files usable by Omniverse, as well as the Visual Studio files. It is recommended to work in Visual Studio (2019 and above) for C++, although VSCode should also work. The `build.bat` script generates the VS2019 `.sln` files in `_compiler\vs2019\kit-extension-template-cpp.sln` . It should work as-is. Do not upgrade the compiler and Windows SDK versions if asked to do so, and install the correct Windows SDK for the VS Installer if it is missing on your machine.
Unlike the samples, we do not recommend running the project by launching it via Visual Studio, since the extension is made specifically for Isaac Sim, and Visual Studio doesnt launch it within an Isaac Sim environment. It is recommended to run Isaac and attach the VS debugger to it by going to Debug -> Attach to Process and selecting the kit.exe coresponding to Isaac. Make sure to attach to Native Code. If you have the "Python - Profiling" extension, it might want to try to attach to Python code instead. One thing to note is that the symbols for the extension will only be loaded IF the extension is enabled after attaching. If the extension is already enabled, disabling then enabling it will also work. Also, to update the extension in Isaac after doing some changes and building, it needs to be disabled and enabled again (The extension willl probably fail to build if it is in use as the dll cannot be overwritten anyways).
To add the extension to Isaac, simply add the built plugin folder (`c:/git/omniverse/omniverse-lidar-synthetic-data/_build/windows-x86_64/release/exts` or `c:/git/omniverse/omniverse-lidar-synthetic-data/_build/windows-x86_64/debug/exts` for a debug build) to the extension manager paths.
## Resources
- Inspired by : [NVIDIA's kit-extension-template-cpp](https://github.com/NVIDIA-Omniverse/kit-extension-template-cpp) | 5,882 | Markdown | 62.258064 | 933 | 0.753655 |
MomentFactory/Omniverse-Lidar-Live-Synthetic-Data/index.rst | MF Lidar live synthetic data
##########################
.. mdinclude:: README.md
Example Extensions
##################
* `mf.ov.lidar_live_synth <../../mf.ov.lidar_live_synth/1.0.0/index.html>`_
| 198 | reStructuredText | 18.899998 | 75 | 0.535354 |
MomentFactory/Omniverse-Lidar-Live-Synthetic-Data/deps/repo-deps.packman.xml | <project toolsVersion="5.0">
<dependency name="repo_build" linkPath="../_repo/deps/repo_build">
<package name="repo_build" version="0.44.6" checksum="11858f3d45b15d83f0279fa96e2813232bfd65755d0cf45861f5fdd28a5a39b6" />
</dependency>
<dependency name="repo_changelog" linkPath="../_repo/deps/repo_changelog">
<package name="repo_changelog" version="0.3.2" checksum="fbe4bc4257d5aec1c964f2616257043095a9dfac8a10e027ac96aa89340f1423" />
</dependency>
<dependency name="repo_docs" linkPath="../_repo/deps/repo_docs">
<package name="repo_docs" version="0.37.3" checksum="78bd6488c1cd7295ab6728d9cd0b79fac3684598bcaebefad710fc79e3a7b8ea" />
</dependency>
<dependency name="repo_kit_tools" linkPath="../_repo/deps/repo_kit_tools">
<package name="repo_kit_tools" version="0.11.8" checksum="8d6e1ade8b75b40f880505ba62308958d87a88e52db6a3b932be3da387a8a571" />
</dependency>
<dependency name="repo_licensing" linkPath="../_repo/deps/repo_licensing">
<package name="repo_licensing" version="1.12.0" checksum="2fa002302a776f1104896f39c8822a8c9516ef6c0ce251548b2b915979666b9d" />
</dependency>
<dependency name="repo_man" linkPath="../_repo/deps/repo_man">
<package name="repo_man" version="1.36.1" checksum="aba22f72ec46b7d2761c5fe2eee397bcb6958dda9b4a8aaca947eb69b97f6089" />
</dependency>
<dependency name="repo_package" linkPath="../_repo/deps/repo_package">
<package name="repo_package" version="5.8.8" checksum="b8279d841f7201b44d9b232b934960d9a302367be59ee64e976345854b741fec" />
</dependency>
<dependency name="repo_format" linkPath="../_repo/deps/repo_format">
<package name="repo_format" version="2.7.0" checksum="8083eb423043de585dfdfd3cf7637d7e50ba2a297abb8bebcaef4307b80503bb" />
</dependency>
<dependency name="repo_source" linkPath="../_repo/deps/repo_source">
<package name="repo_source" version="0.4.2" checksum="05776a984978d84611cb8becd5ed9c26137434e0abff6e3076f36ab354313423" />
</dependency>
<dependency name="repo_test" linkPath="../_repo/deps/repo_test">
<package name="repo_test" version="2.9.3" checksum="1903a2a1c998ca4adc87bc20520e91a9af21bf18a6a48a8e05467fe29d674931" />
</dependency>
</project>
| 2,191 | XML | 65.42424 | 130 | 0.760383 |
MomentFactory/Omniverse-Lidar-Live-Synthetic-Data/deps/kit-sdk.packman.xml | <project toolsVersion="5.0">
<!-- We always depend on the release kit-sdk package, regardless of config -->
<dependency name="kit_sdk_${config}" linkPath="../_build/${platform}/${config}/kit" tags="${config} non-redist">
<package name="kit-sdk" version="105.1+release.127680.dd92291b.tc.windows-x86_64.release" platforms="windows-x86_64" checksum="78b6054c730a44b97e6551eae9e17f45384621f244d4babde5264a1d6df3038f" />
<package name="kit-sdk" version="105.1+release.127680.dd92291b.tc.linux-x86_64.release" platforms="linux-x86_64" checksum="2f8357eda2de9232c0b4cb345eb6c4d3c3aa8c4c9685ed45d4bfe749af57b0b8" />
</dependency>
</project>
| 648 | XML | 80.12499 | 199 | 0.759259 |
MomentFactory/Omniverse-Lidar-Live-Synthetic-Data/deps/host-deps.packman.xml | <project toolsVersion="5.0">
<dependency name="premake" linkPath="../_build/host-deps/premake">
<package name="premake" version="5.0.0-alpha15.dev+pipeline3388156.1f299ea4-windows-x86_64" checksum="b1e5dcef9acf47b0c86a4630afa4fadc9485b878e25e4321ac5afbb826bbdf93" platforms="windows-x86_64" />
<package name="premake" version="5.0.0-alpha15.dev+pipeline3388156.1f299ea4-linux-x86_64" checksum="ae15e63cf6d53571fa3bdfa33ddcec8a3be90675cdd155590a26bcd75d04d73f" platforms="linux-x86_64" />
</dependency>
<dependency name="msvc" linkPath="../_build/host-deps/msvc">
<package name="msvc" version="2019-16.7.6-license" platforms="windows-x86_64" checksum="0e37c0f29899fe10dcbef6756bcd69c2c4422a3ca1101206df272dc3d295b92d" />
</dependency>
<dependency name="winsdk" linkPath="../_build/host-deps/winsdk">
<package name="winsdk" version="10.0.18362.0-license" platforms="windows-x86_64" checksum="2db7aeb2278b79c6c9fbca8f5d72b16090b3554f52b1f3e5f1c8739c5132a3d6" />
</dependency>
</project>
| 1,012 | XML | 76.923071 | 201 | 0.778656 |
MomentFactory/Omniverse-Lidar-Live-Synthetic-Data/deps/kit-sdk-deps.packman.xml | <project toolsVersion="5.0">
<!-- Import dependencies from Kit SDK to ensure we're using the same versions. -->
<import path="../_build/${platform}/${config}/kit/dev/all-deps.packman.xml">
<filter include="carb_sdk_plugins"/>
<filter include="cuda"/>
<filter include="doctest"/>
<filter include="pybind11"/>
<filter include="python"/>
</import>
<!-- Override the link paths to point to the correct locations. -->
<dependency name="carb_sdk_plugins" linkPath="../_build/target-deps/carb_sdk_plugins"/>
<dependency name="cuda" linkPath="../_build/target-deps/cuda"/>
<dependency name="doctest" linkPath="../_build/target-deps/doctest"/>
<dependency name="pybind11" linkPath="../_build/target-deps/pybind11"/>
<dependency name="python" linkPath="../_build/target-deps/python"/>
</project>
| 826 | XML | 42.526314 | 89 | 0.679177 |
MomentFactory/Omniverse-Lidar-Live-Synthetic-Data/deps/ext-deps.packman.xml | <project toolsVersion="5.0">
<!-- Import dependencies from Kit SDK to ensure we're using the same versions. -->
<import path="../_build/${platform}/${config}/kit/dev/all-deps.packman.xml">
<filter include="boost_preprocessor"/>
<filter include="imgui"/>
<filter include="nv_usd_py310_release"/>
</import>
<!-- Override the link paths to point to the correct locations. -->
<dependency name="boost_preprocessor" linkPath="../_build/target-deps/boost-preprocessor"/>
<dependency name="imgui" linkPath="../_build/target-deps/imgui"/>
<dependency name="nv_usd_py310_release" linkPath="../_build/target-deps/nv_usd/release"/>
<!-- Because we always use the release kit-sdk we have to explicitly refer to the debug usd package. -->
<dependency name="nv_usd_py310_debug" linkPath="../_build/target-deps/nv_usd/debug">
<package name="nv-usd" version="22.11.nv.0.2.1058.7d2f59ad-win64_py310_debug-dev_omniverse" platforms="windows-x86_64" checksum="02f7c3477830eb17699cc91774438edd8651f3ec0031582c67093ae3276f360b" />
<package name="nv-usd" version="22.11.nv.0.2.1058.7d2f59ad-linux64_py310-centos_debug-dev_omniverse" platforms="linux-x86_64" checksum="2ac18e0470d05b251a2f36691a1dc1b28da340da92b19175d890addb762adb0f"/>
<package name="nv-usd" version="22.11.nv.0.2.1058.7d2f59ad-linux-aarch64_py310_debug-dev_omniverse" platforms="linux-aarch64" checksum="904ede636008fb011b5f3d66c1a7c2969dfba291dcf1a227fa7503a714f1f18d" />
</dependency>
</project>
| 1,497 | XML | 67.090906 | 210 | 0.739479 |
MomentFactory/Omniverse-Lidar-Live-Synthetic-Data/tools/repoman/repoman.py | import os
import sys
import io
import contextlib
import packmanapi
REPO_ROOT = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../..")
REPO_DEPS_FILE = os.path.join(REPO_ROOT, "deps/repo-deps.packman.xml")
def bootstrap():
"""
Bootstrap all omni.repo modules.
Pull with packman from repo.packman.xml and add them all to python sys.path to enable importing.
"""
#with contextlib.redirect_stdout(io.StringIO()):
deps = packmanapi.pull(REPO_DEPS_FILE)
for dep_path in deps.values():
if dep_path not in sys.path:
sys.path.append(dep_path)
if __name__ == "__main__":
bootstrap()
import omni.repo.man
omni.repo.man.main(REPO_ROOT)
| 703 | Python | 23.275861 | 100 | 0.661451 |
MomentFactory/Omniverse-Lidar-Live-Synthetic-Data/tools/packman/packmanconf.py | # Use this file to bootstrap packman into your Python environment (3.7.x). Simply
# add the path by doing sys.insert to where packmanconf.py is located and then execute:
#
# >>> import packmanconf
# >>> packmanconf.init()
#
# It will use the configured remote(s) and the version of packman in the same folder,
# giving you full access to the packman API via the following module
#
# >> import packmanapi
# >> dir(packmanapi)
import os
import platform
import sys
def init():
"""Call this function to initialize the packman configuration.
Calls to the packman API will work after successfully calling this function.
Note:
This function only needs to be called once during the execution of your
program. Calling it repeatedly is harmless but wasteful.
Compatibility with your Python interpreter is checked and upon failure
the function will report what is required.
Example:
>>> import packmanconf
>>> packmanconf.init()
>>> import packmanapi
>>> packmanapi.set_verbosity_level(packmanapi.VERBOSITY_HIGH)
"""
major = sys.version_info[0]
minor = sys.version_info[1]
if major != 3 or minor != 10:
raise RuntimeError(
f"This version of packman requires Python 3.10.x, but {major}.{minor} was provided"
)
conf_dir = os.path.dirname(os.path.abspath(__file__))
os.environ["PM_INSTALL_PATH"] = conf_dir
packages_root = get_packages_root(conf_dir)
version = get_version(conf_dir)
module_dir = get_module_dir(conf_dir, packages_root, version)
sys.path.insert(1, module_dir)
def get_packages_root(conf_dir: str) -> str:
root = os.getenv("PM_PACKAGES_ROOT")
if not root:
platform_name = platform.system()
if platform_name == "Windows":
drive, _ = os.path.splitdrive(conf_dir)
root = os.path.join(drive, "packman-repo")
elif platform_name == "Darwin":
# macOS
root = os.path.join(
os.path.expanduser("~"), "/Library/Application Support/packman-cache"
)
elif platform_name == "Linux":
try:
cache_root = os.environ["XDG_HOME_CACHE"]
except KeyError:
cache_root = os.path.join(os.path.expanduser("~"), ".cache")
return os.path.join(cache_root, "packman")
else:
raise RuntimeError(f"Unsupported platform '{platform_name}'")
# make sure the path exists:
os.makedirs(root, exist_ok=True)
return root
def get_module_dir(conf_dir, packages_root: str, version: str) -> str:
module_dir = os.path.join(packages_root, "packman-common", version)
if not os.path.exists(module_dir):
import tempfile
tf = tempfile.NamedTemporaryFile(delete=False)
target_name = tf.name
tf.close()
url = f"https://bootstrap.packman.nvidia.com/packman-common@{version}.zip"
print(f"Downloading '{url}' ...")
import urllib.request
urllib.request.urlretrieve(url, target_name)
from importlib.machinery import SourceFileLoader
# import module from path provided
script_path = os.path.join(conf_dir, "bootstrap", "install_package.py")
ip = SourceFileLoader("install_package", script_path).load_module()
print("Unpacking ...")
ip.install_package(target_name, module_dir)
os.unlink(tf.name)
return module_dir
def get_version(conf_dir: str):
path = os.path.join(conf_dir, "packman")
if not os.path.exists(path): # in dev repo fallback
path += ".sh"
with open(path, "rt", encoding="utf8") as launch_file:
for line in launch_file.readlines():
if line.startswith("PM_PACKMAN_VERSION"):
_, value = line.split("=")
return value.strip()
raise RuntimeError(f"Unable to find 'PM_PACKMAN_VERSION' in '{path}'")
| 3,933 | Python | 35.425926 | 95 | 0.632596 |
MomentFactory/Omniverse-Lidar-Live-Synthetic-Data/tools/packman/config.packman.xml | <config remotes="cloudfront">
<remote2 name="cloudfront">
<transport actions="download" protocol="https" packageLocation="d4i3qtqj3r0z5.cloudfront.net/${name}@${version}" />
</remote2>
</config>
| 211 | XML | 34.333328 | 123 | 0.691943 |
MomentFactory/Omniverse-Lidar-Live-Synthetic-Data/tools/packman/bootstrap/install_package.py | # Copyright 2019 NVIDIA CORPORATION
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import zipfile
import tempfile
import sys
import os
import stat
import time
from typing import Any, Callable
RENAME_RETRY_COUNT = 100
RENAME_RETRY_DELAY = 0.1
logging.basicConfig(level=logging.WARNING, format="%(message)s")
logger = logging.getLogger("install_package")
def remove_directory_item(path):
if os.path.islink(path) or os.path.isfile(path):
try:
os.remove(path)
except PermissionError:
# make sure we have access and try again:
os.chmod(path, stat.S_IRWXU)
os.remove(path)
else:
# try first to delete the dir because this will work for folder junctions, otherwise we would follow the junctions and cause destruction!
clean_out_folder = False
try:
# make sure we have access preemptively - this is necessary because recursing into a directory without permissions
# will only lead to heart ache
os.chmod(path, stat.S_IRWXU)
os.rmdir(path)
except OSError:
clean_out_folder = True
if clean_out_folder:
# we should make sure the directory is empty
names = os.listdir(path)
for name in names:
fullname = os.path.join(path, name)
remove_directory_item(fullname)
# now try to again get rid of the folder - and not catch if it raises:
os.rmdir(path)
class StagingDirectory:
def __init__(self, staging_path):
self.staging_path = staging_path
self.temp_folder_path = None
os.makedirs(staging_path, exist_ok=True)
def __enter__(self):
self.temp_folder_path = tempfile.mkdtemp(prefix="ver-", dir=self.staging_path)
return self
def get_temp_folder_path(self):
return self.temp_folder_path
# this function renames the temp staging folder to folder_name, it is required that the parent path exists!
def promote_and_rename(self, folder_name):
abs_dst_folder_name = os.path.join(self.staging_path, folder_name)
os.rename(self.temp_folder_path, abs_dst_folder_name)
def __exit__(self, type, value, traceback):
# Remove temp staging folder if it's still there (something went wrong):
path = self.temp_folder_path
if os.path.isdir(path):
remove_directory_item(path)
def rename_folder(staging_dir: StagingDirectory, folder_name: str):
try:
staging_dir.promote_and_rename(folder_name)
except OSError as exc:
# if we failed to rename because the folder now exists we can assume that another packman process
# has managed to update the package before us - in all other cases we re-raise the exception
abs_dst_folder_name = os.path.join(staging_dir.staging_path, folder_name)
if os.path.exists(abs_dst_folder_name):
logger.warning(
f"Directory {abs_dst_folder_name} already present, package installation already completed"
)
else:
raise
def call_with_retry(
op_name: str, func: Callable, retry_count: int = 3, retry_delay: float = 20
) -> Any:
retries_left = retry_count
while True:
try:
return func()
except (OSError, IOError) as exc:
logger.warning(f"Failure while executing {op_name} [{str(exc)}]")
if retries_left:
retry_str = "retry" if retries_left == 1 else "retries"
logger.warning(
f"Retrying after {retry_delay} seconds"
f" ({retries_left} {retry_str} left) ..."
)
time.sleep(retry_delay)
else:
logger.error("Maximum retries exceeded, giving up")
raise
retries_left -= 1
def rename_folder_with_retry(staging_dir: StagingDirectory, folder_name):
dst_path = os.path.join(staging_dir.staging_path, folder_name)
call_with_retry(
f"rename {staging_dir.get_temp_folder_path()} -> {dst_path}",
lambda: rename_folder(staging_dir, folder_name),
RENAME_RETRY_COUNT,
RENAME_RETRY_DELAY,
)
def install_package(package_path, install_path):
staging_path, version = os.path.split(install_path)
with StagingDirectory(staging_path) as staging_dir:
output_folder = staging_dir.get_temp_folder_path()
with zipfile.ZipFile(package_path, allowZip64=True) as zip_file:
zip_file.extractall(output_folder)
# attempt the rename operation
rename_folder_with_retry(staging_dir, version)
print(f"Package successfully installed to {install_path}")
if __name__ == "__main__":
executable_paths = os.getenv("PATH")
paths_list = executable_paths.split(os.path.pathsep) if executable_paths else []
target_path_np = os.path.normpath(sys.argv[2])
target_path_np_nc = os.path.normcase(target_path_np)
for exec_path in paths_list:
if os.path.normcase(os.path.normpath(exec_path)) == target_path_np_nc:
raise RuntimeError(f"packman will not install to executable path '{exec_path}'")
install_package(sys.argv[1], target_path_np)
| 5,777 | Python | 36.277419 | 145 | 0.645145 |
MomentFactory/Omniverse-Lidar-Live-Synthetic-Data/source/extensions/mf.ov.lidar_live_synth/mf/ov/lidar_live_synth/__init__.py | ## Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
##
## NVIDIA CORPORATION and its licensors retain all intellectual property
## and proprietary rights in and to this software, related documentation
## and any modifications thereto. Any use, reproduction, disclosure or
## distribution of this software and related documentation without an express
## license agreement from NVIDIA CORPORATION is strictly prohibited.
##
# This file is needed so tests don't fail.
| 480 | Python | 42.727269 | 77 | 0.785417 |
MomentFactory/Omniverse-Lidar-Live-Synthetic-Data/source/extensions/mf.ov.lidar_live_synth/config/extension.toml | [package]
version = "0.2.0"
title = "MF Lidar live synthetic data"
description = "Send real-time Lidar synthetic point cloud data from Omniverse to third party software."
category = "Graph"
keywords = ["lidar", "UDP", "omnigraph", "Graph", "Node", "OmniGraph", "synthetic", "realtime"]
preview_image = "data/preview.png"
icon = "data/icon.png"
changelog = "docs/CHANGELOG.md"
readme = "docs/README.md"
authors = ["Moment Factory","Frederic Lestage","Steven Beliveau"]
repository = "https://github.com/MomentFactory/Omniverse-Lidar-extension"
[dependencies]
"omni.graph" = {}
[[python.module]]
name = "mf.ov.lidar_live_synth"
[[native.plugin]]
path = "bin/*.plugin"
[documentation]
pages = [
"docs/README.md",
"docs/CHANGELOG.md",
]
[package.target]
kit = ["105.1"]
[package.writeTarget]
kit = true
python = false
| 829 | TOML | 22.055555 | 103 | 0.694813 |
MomentFactory/Omniverse-Lidar-Live-Synthetic-Data/source/extensions/mf.ov.lidar_live_synth/plugins/mf.ov.lidar_live_synth/LidarLiveSyntheticDataExtension.cpp | #define CARB_EXPORTS
#include <carb/PluginUtils.h>
#include <omni/ext/IExt.h>
#include <omni/graph/core/IGraphRegistry.h>
#include <omni/graph/core/ogn/Database.h>
#include <omni/graph/core/ogn/Registration.h>
// Standard plugin definitions required by Carbonite.
const struct carb::PluginImplDesc pluginImplDesc = { "mf.ov.lidar_live_synth.plugin",
"MF Lidar live synthetic data.", "MF",
carb::PluginHotReload::eEnabled, "dev" };
// These interface dependencies are required by all OmniGraph node types
CARB_PLUGIN_IMPL_DEPS(omni::graph::core::IGraphRegistry,
omni::fabric::IPath,
omni::fabric::IToken)
// This macro sets up the information required to register your node type definitions with OmniGraph
DECLARE_OGN_NODES()
namespace mf
{
namespace ov
{
namespace lidar_live_synth
{
class LidarLiveSyntheticDataExtension : public omni::ext::IExt
{
public:
void onStartup(const char* extId) override
{
// This macro walks the list of pending node type definitions and registers them with OmniGraph
INITIALIZE_OGN_NODES()
}
void onShutdown() override
{
// This macro walks the list of registered node type definitions and deregisters all of them. This is required
// for hot reload to work.
RELEASE_OGN_NODES()
}
private:
};
}
}
}
CARB_PLUGIN_IMPL(pluginImplDesc, mf::ov::lidar_live_synth::LidarLiveSyntheticDataExtension)
void fillInterface(mf::ov::lidar_live_synth::LidarLiveSyntheticDataExtension& iface)
{
}
| 1,622 | C++ | 26.982758 | 118 | 0.676326 |
MomentFactory/Omniverse-Lidar-Live-Synthetic-Data/source/extensions/mf.ov.lidar_live_synth/plugins/nodes/OgnBeamToOusterUDPNode.cpp | #include <OgnBeamToOusterUDPNodeDatabase.h>
#include <chrono>
#define WIN32_LEAN_AND_MEAN
#define _WINSOCK_DEPRECATED_NO_WARNINGS
#ifdef _WIN32
#include <Winsock2.h>
#else
#include <arpa/inet.h>
#include <netdb.h>
#include <netinet/in.h>
#include <sys/select.h>
#include <sys/socket.h>
#include <sys/types.h>
#define SOCKET int
#define INVALID_SOCKET (SOCKET)(~0)
#define SOCKET_ERROR (-1)
#define closesocket close
#define SOCKADDR sockaddr
#endif
namespace mf {
namespace ov {
namespace lidar_live_synth {
static const int kColumnsPerPacket = 16;
static const float kPi = 3.14159265359f;
static const float kTwoPi = kPi * 2.0f;
static const float kDegToRad = kTwoPi / 360.0f;
static const int kOusterNumRotAngles = 90112;
static const float kOusterNumRotAnglesOverTwoPi = kOusterNumRotAngles / kTwoPi;
class OgnBeamToOusterUDPNode
{
int m_frameId{ 0 };
#pragma pack(push,4) // Force packing in 4-byte packs (Words)
struct OusterChannelDataBlock
{
unsigned int rangemm;
unsigned short reflectivity;
unsigned short signal_photons;
unsigned short noise_photons;
unsigned short unused;
OusterChannelDataBlock()
: rangemm(0)
, reflectivity(0)
, signal_photons(0)
, noise_photons(0)
, unused(0)
{}
};
template <int NUMROWS>
struct OusterAzimuthBlock
{
unsigned long long timeStamp; // Word 0,1
unsigned short measurementId; // Word 2[0:15]
unsigned short frameId; // Word 2[16:31]
unsigned int encoderCount; // Word 3
OusterChannelDataBlock channelDataBlock[NUMROWS]; // Word [4:195] in groups of 3
unsigned int azimuthDataBlockStatus; // word 196
OusterAzimuthBlock()
: timeStamp(0)
, measurementId(0)
, frameId(0)
, encoderCount(0)
, channelDataBlock{}
, azimuthDataBlockStatus(0)
{}
};
template <int NUMROWS>
struct OusterDataPacket
{
OusterAzimuthBlock<NUMROWS> block[16]; // Each packet consists of 16 azimuth blocks
OusterDataPacket()
:block{}
{}
};
#pragma pack(pop)
class OgnBeamToOusterUDPNodeSocket
{
public:
OgnBeamToOusterUDPNodeSocket()
: SendSocket(INVALID_SOCKET)
, isBroadcastSocket(false)
{}
virtual ~OgnBeamToOusterUDPNodeSocket()
{
if (SendSocket != INVALID_SOCKET)
{
closesocket(SendSocket);
}
}
bool prepare(OgnBeamToOusterUDPNodeDatabase& db)
{
if (isBroadcastSocket != db.inputs.broadcast())
{
closesocket(SendSocket);
SendSocket = INVALID_SOCKET;
}
if (SendSocket == INVALID_SOCKET)
{
SendSocket = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
if (SendSocket == INVALID_SOCKET)
{
db.logError("Error in OgnBeamToOusterUDPNode opening socket : %d", SendSocket);
return false;
}
if (db.inputs.broadcast())
{
char broadcast = 1;
int iResult = setsockopt(SendSocket, SOL_SOCKET, SO_BROADCAST, &broadcast, sizeof(broadcast));
if (!iResult)
{
closesocket(SendSocket);
SendSocket = INVALID_SOCKET;
db.logError("Error in OgnBeamToOusterUDPNode setting socket options : %d", iResult);
return false;
}
}
isBroadcastSocket = db.inputs.broadcast();
}
RecvAddr.sin_family = AF_INET;
RecvAddr.sin_port = htons(db.inputs.port());
std::string ipAddress = db.inputs.ip_address();
RecvAddr.sin_addr.s_addr = inet_addr(ipAddress.data());
return true;
}
template <int NUMROWS>
bool send(const OusterDataPacket<NUMROWS>& packet, OgnBeamToOusterUDPNodeDatabase& db)
{
int iResult = sendto(SendSocket, reinterpret_cast<const char*>(&packet), sizeof(packet), 0, (SOCKADDR*)&RecvAddr, sizeof(RecvAddr));
if (iResult == SOCKET_ERROR)
{
db.logError("Error in OgnBeamToOusterUDPNode sending data on socket : %d", iResult);
return false;
}
return true;
}
private:
SOCKET SendSocket;
sockaddr_in RecvAddr;
bool isBroadcastSocket;
};
OgnBeamToOusterUDPNodeSocket m_ognBeamToOusterUDPNodeSocket;
template<int NUMROWS>
static bool computeForSize(OgnBeamToOusterUDPNodeDatabase& db)
{
auto& state = db.internalState<OgnBeamToOusterUDPNode>();
const auto& linearDepthData = db.inputs.linearDepthData();
const int& numCols = db.inputs.numCols();
const float& azimuthStart = db.inputs.azimuthRange()[0] + kTwoPi + kTwoPi;
const float& horizontalStepInRads = -1.0f * db.inputs.horizontalResolution() * kDegToRad;
const int& frameId = state.m_frameId % 65536;
try
{
if (!state.m_ognBeamToOusterUDPNodeSocket.prepare(db))
{
return false;
}
int measurementId = 0;
OusterDataPacket<NUMROWS> packet;
int currentChunkColumn = 0;
// We need to send data in ascending angle (encoder_count) order
// Data is in right-to-left order, we need to iterate left-to-right
// We also need to start at the middle (center) of the data which is encoderCount 0
int colEndIndex = (numCols - 1) / 2;
int colStartIndex = colEndIndex + numCols;
for (int tempColIndex = colStartIndex; tempColIndex > colEndIndex; tempColIndex--)
{
int colIndex = tempColIndex % numCols;
// This assumes consistent input data across azimuthRange, horizontalResolution, numCols, numRows and linearDepthData size
int currentEncoderCount = int((azimuthStart + horizontalStepInRads * tempColIndex) * kOusterNumRotAnglesOverTwoPi);
if (currentEncoderCount < 0 || currentEncoderCount >= kOusterNumRotAngles)
{
db.logError("currentEncoderCount must be between 0 and %d, not %d", kOusterNumRotAngles, currentEncoderCount);
return false;
}
// If previous chunk is complete, start new one
if (currentChunkColumn == kColumnsPerPacket)
{
state.m_ognBeamToOusterUDPNodeSocket.send<NUMROWS>(packet, db);
packet = OusterDataPacket<NUMROWS>();
currentChunkColumn = 0;
}
packet.block[currentChunkColumn].timeStamp =
std::chrono::duration_cast<std::chrono::nanoseconds>(std::chrono::system_clock::now().time_since_epoch()).count();
packet.block[currentChunkColumn].measurementId = measurementId;
packet.block[currentChunkColumn].frameId = frameId;
packet.block[currentChunkColumn].encoderCount = currentEncoderCount;
measurementId = (measurementId + 1) % 65536;
int colIndexStart = colIndex * NUMROWS;
for (int rowIndex = 0; rowIndex < NUMROWS; rowIndex++)
{
packet.block[currentChunkColumn].channelDataBlock[rowIndex].rangemm = (int)(linearDepthData[colIndexStart + rowIndex] * 1000.0f);
packet.block[currentChunkColumn].channelDataBlock[rowIndex].signal_photons = 0xFFFF; //0xFFFF means valid
}
packet.block[currentChunkColumn].azimuthDataBlockStatus = 0xFFFFFFFF; //0xFFFFFFFF means valid
currentChunkColumn++;
}
if (currentChunkColumn != 0)
{
for (int extraColumnIndex = currentChunkColumn; extraColumnIndex < kColumnsPerPacket; extraColumnIndex++)
{
packet.block[extraColumnIndex].timeStamp =
std::chrono::duration_cast<std::chrono::nanoseconds>(std::chrono::system_clock::now().time_since_epoch()).count();
packet.block[extraColumnIndex].measurementId = measurementId;
packet.block[extraColumnIndex].frameId = frameId;
packet.block[extraColumnIndex].encoderCount = kOusterNumRotAngles;
}
state.m_ognBeamToOusterUDPNodeSocket.send<NUMROWS>(packet, db);
}
}
catch (...)
{
db.logError("Error in OgnBeamToOusterUDPNode::compute");
return false;
}
state.m_frameId++;
// Always enable the output execution
db.outputs.execOut() = omni::graph::core::ExecutionAttributeState::kExecutionAttributeStateEnabled;
// Even if inputs were edge cases like empty arrays, correct outputs mean success
return true;
}
public:
static bool compute(OgnBeamToOusterUDPNodeDatabase& db)
{
// TODO: why is state declared here
// auto& state = db.internalState<OgnBeamToOusterUDPNode>();
const int& numRows = db.inputs.numRows();
switch (numRows)
{
case 16:
return computeForSize<16>(db);
break;
case 32:
return computeForSize<32>(db);
break;
case 64:
return computeForSize<64>(db);
break;
case 128:
return computeForSize<128>(db);
break;
}
db.logError("Row count must be either 16, 32, 64 or 128, not %d", numRows);
return false;
}
};
// This macro provides the information necessary to OmniGraph that lets it automatically register and deregister
// your node type definition.
REGISTER_OGN_NODE()
}
}
}
| 10,237 | C++ | 32.348534 | 149 | 0.58142 |
MomentFactory/Omniverse-Lidar-Live-Synthetic-Data/source/extensions/mf.ov.lidar_live_synth/docs/CHANGELOG.md | # Changelog
## [0.2.0] - 2023-12-20
### Modified
- Adapted for compatibility with kit 105
- Enhanced documentation
### Added
- Linux support thanks to [@Samahu](https://github.com/Samahu)'s PR on Github
## [0.1.3] - 2023-08-30
### Changed
- Version bump for registry publishing
## [0.1.2] - 2023-08-30
### Added
- New example with more Lidars
### Modified
- Now comes as C++ for maximum performance.
## [0.1.1] - 2023-05-09
### Added
- Documentation
### Modified
- Name of the Node
- Icon
## [0.1.0] - 2023-05-09
### Added
- Action Graph Node that sends Isaac Lidar Point Cloud data in UDP
| 603 | Markdown | 14.894736 | 77 | 0.658375 |
MomentFactory/Omniverse-Lidar-Live-Synthetic-Data/source/extensions/mf.ov.lidar_live_synth/docs/README.md | # MF Lidar live synthetic data [mf.ov.lidar_live_synth]
Adds an Action Graph Node ("Generic/Beam to Ouster UDP") to send Isaac beam data via the Ouster(tm) UDP procotol.
This allows any third party software implementing Ouster(tm) lidars to be connected to simulated sensors instead of physical sensors.
Developped for kit 105.1 and currently working only in Isaac Sim.
This extensions provides pre-built binaries for Windows and Linux x86_64.
You may want to compile from the [source code](https://github.com/MomentFactory/Omniverse-Lidar-Live-Synthetic-Data) | 562 | Markdown | 69.374991 | 133 | 0.798932 |
openhackathons-org/End-to-End-AI-for-Science/CONTRIBUTING.md | Contributing
------------
Please use the following guidelines when contributing to this project.
Before contributing significant changes, please begin a discussion of the desired changes via a GitHub Issue to prevent doing unnecessary or overlapping work.
## License
The preferred license for source code contributed to this project is the Apache License 2.0 (https://www.apache.org/licenses/LICENSE-2.0) and for documentation, including Jupyter notebooks and text documentation, is the Creative Commons Attribution 4.0 International (CC BY 4.0) (https://creativecommons.org/licenses/by/4.0/). Contributions under other, compatible licenses will be considered on a case-by-case basis.
## Styling
Please use the following style guidelines when making contributions.
### Source Code
* Tab indentation, no spaces
* To the extent possible, variable names should be descriptive
* Code should be documentation with detail like what function does and returns making the code readable. The code should also have proper license at the beginning of the file.
* The following file extensions should be used appropriately:
* Python = .py
### Jupyter Notebooks & Markdown
* When they appear inline with the text; directive names, clauses, function or subroutine names, variable names, file names, commands and command-line arguments should appear between two backticks.
* Code blocks should begin with three backticks and either 'python' or 'yaml' to enable appropriate source formatting and end with three backticks.
* Leave an empty line before and after the codeblock.
Emphasis, including quotes made for emphasis and introduction of new terms should be highlighted between a single pair of asterisks
* A level 1 heading should appear at the top of the notebook as the title of the notebook.
* A horizontal rule should appear between sections that begin with a level 2 heading.
Please refer to the following template for jupyter notebook styling in the github repository:misc/jupyter_lab_template
## Contributing Labs/Modules
### Directory stucture for Github
Before starting to work on new lab it is important to follow the recommended git structure as shown below to avoid reformatting.
Each lab will have following files/directories consisting of training material for the lab.
* jupyter_notebook folder: Consists of jupyter notebooks and its corresponding images.
* source_code folder: Source codes are stored in a separate directory because sometime not all clusters may support jupyter notebooks. During such bootcamps, we should be able to use the source codes directly from this directory. Source code folder may optionally contain Makefile especially for HPC labs.
* presentations: Consists of presentations for the labs ( pdf format is preferred )
* Dockerfile and Singularity: Each lab should have both Docker and Singularity recipes.
The lab optionally may also add custom license in case of any deviation from the top level directory license ( Apache 2.0 ). The base of the module contains individual subdirectory containing versions of the module for languages respectively(C/C++/Fortran…). Each of these directories should contain a directory for individual language translation provided (English, for instance). Each lab translation and programming language combination should have a solutions directory containing correct solutions
Additionally there are two folders "experimental" and "archived" for labs covering features which are in early access phase ( not stable ) or deprecated features repectively.
### Git Branching
Adding a new feature/lab will follow a forking workflow. Which means a feature branch development will happen on a forked repo which later gets merged into our original project (OpenHackathons.org) repository.

The 5 main steps depicted in image above are as follows:
1. Fork: To create a new lab/feature the repository must be forked. Fork will create a snapshot of repository at the time it was forked. Any new feature/lab that will be developed should be based on the develop branch of the repository.
2. Clone: Developer can than clone this new repository to local machine
Create Feature Branch: Create a new branch with a feature name in which your changes will be done. Recommend naming convention of feature branch is naming convention for branch: <feature_name>. The new changes that developer makes can be added, committed and pushed
3. Push: After the changes are committed, the developer pushes the changes to the remote branch. Push command helps the local changes to github repository
4. Pull: Submit a pull request. Upon receiving pull request a Hackathon team reviewer/owner will review the changes and upon accepting it can be merged into the develop branch of GpuHacakthons.org
Git Branch details are as follows:
* main branch: Consists of the stable branch.
* origin/main to be the main branch where the source code of HEAD always reflects a production-ready state
* Merge request is possible through: develop branch
* develop branch: branched from master branch
* Must branch from: main branch
* Must merge back into: main branch
* It is the main development branch where the source code of HEAD always reflects a state with the latest delivered development changes for the next release.
* When the source code in the develop branch reaches a stable point and is ready to be released, all of the changes should be merged back into master somehow and then tagged with a release number
* All feature development should happen by forking and branching from develop branch only.
| 5,650 | Markdown | 76.410958 | 502 | 0.80354 |
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/dataset.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import gdown
import os
## FCN Dataset
url = 'https://drive.google.com/uc?id=1mSN6eLqPYEo9d9pBjSGzQ-ocLd8itP0P&export=download'
output = str(os.path.realpath(os.path.dirname(__file__)))+ '/fourcastnet/dataset.zip'
gdown.cached_download(url, output, quiet=False,proxy=None,postprocess=gdown.extractall)
os.remove(output)
## FCN Pre-trained
url = 'https://drive.google.com/uc?id=1oSkK69LGP3DfU2tlH5iaejOh94VNsMDu&export=download'
output = str(os.path.realpath(os.path.dirname(__file__)))+ '/../jupyter_notebook/FourCastNet/pre_trained.zip'
gdown.cached_download(url, output, quiet=False,proxy=None,postprocess=gdown.extractall)
os.remove(output)
## NS Data
url = 'https://drive.google.com/uc?id=1IXEGbM3NOO6Dig1sxG1stHubwb09-D2N&export=download'
output = str(os.path.realpath(os.path.dirname(__file__)))+ '/navier_stokes/dataset.zip'
gdown.cached_download(url, output, quiet=False,proxy=None,postprocess=gdown.extractall)
os.remove(output)
## FCN for Omniverse-P1
url = 'https://drive.google.com/uc?id=16YqSnstqoSJdgBzerbzYIkYagwS12lK3&export=download'
output = str(os.path.realpath(os.path.dirname(__file__)))+ '/FCN.zip'
gdown.cached_download(url, output, quiet=False,proxy=None,postprocess=gdown.extractall)
os.remove(output)
## FCN for Omniverse-P2
url = 'https://drive.google.com/uc?id=1lSSx8eKfqCcHAbDvXTeUMoZGHfVQe-HG&export=download'
output = str(os.path.realpath(os.path.dirname(__file__)))+ '/FCN/dataset.zip'
gdown.cached_download(url, output, quiet=False,proxy=None,postprocess=gdown.extractall)
os.remove(output)
## Download and Install Omniverse
url = 'https://drive.google.com/uc?id=1DugS2IbHhBPyCE-EuZczLHBZnlnFViIm&export=download'
output = str(os.path.realpath(os.path.dirname(__file__)))+'/ov.zip'
gdown.cached_download(url, output, quiet=False,proxy=None,postprocess=gdown.extractall)
os.remove(output)
| 2,958 | Python | 46.725806 | 110 | 0.772481 |
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/spring_mass/spring_mass_solver.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import numpy as np
from sympy import Symbol, Eq
import modulus
from modulus.sym.hydra import ModulusConfig, instantiate_arch
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry.primitives_1d import Point1D
from modulus.sym.geometry import Parameterization
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseBoundaryConstraint,
)
from modulus.sym.domain.validator import PointwiseValidator
from modulus.sym.key import Key
from modulus.sym.node import Node
from spring_mass_ode import SpringMass
@modulus.sym.main(config_path="conf", config_name="config")
def run(cfg: ModulusConfig) -> None:
# make list of nodes to unroll graph on
sm = SpringMass(k=(2, 1, 1, 2), m=(1, 1, 1))
sm_net = instantiate_arch(
input_keys=[Key("t")],
output_keys=[Key("x1"), Key("x2"), Key("x3")],
cfg=cfg.arch.fully_connected,
)
nodes = sm.make_nodes() + [
sm_net.make_node(name="spring_mass_network", jit=cfg.jit)
]
# add constraints to solver
# make geometry
geo = Point1D(0)
t_max = 10.0
t_symbol = Symbol("t")
x = Symbol("x")
time_range = {t_symbol: (0, t_max)}
# make domain
domain = Domain()
# initial conditions
IC = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=geo,
outvar={"x1": 1.0, "x2": 0, "x3": 0, "x1__t": 0, "x2__t": 0, "x3__t": 0},
batch_size=cfg.batch_size.IC,
lambda_weighting={
"x1": 1.0,
"x2": 1.0,
"x3": 1.0,
"x1__t": 1.0,
"x2__t": 1.0,
"x3__t": 1.0,
},
parameterization=Parameterization({t_symbol: 0}),
)
domain.add_constraint(IC, name="IC")
# solve over given time period
interior = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=geo,
outvar={"ode_x1": 0.0, "ode_x2": 0.0, "ode_x3": 0.0},
batch_size=cfg.batch_size.interior,
parameterization=Parameterization(time_range),
)
domain.add_constraint(interior, "interior")
# add validation data
deltaT = 0.001
t = np.arange(0, t_max, deltaT)
t = np.expand_dims(t, axis=-1)
invar_numpy = {"t": t}
outvar_numpy = {
"x1": (1 / 6) * np.cos(t)
+ (1 / 2) * np.cos(np.sqrt(3) * t)
+ (1 / 3) * np.cos(2 * t),
"x2": (2 / 6) * np.cos(t)
+ (0 / 2) * np.cos(np.sqrt(3) * t)
- (1 / 3) * np.cos(2 * t),
"x3": (1 / 6) * np.cos(t)
- (1 / 2) * np.cos(np.sqrt(3) * t)
+ (1 / 3) * np.cos(2 * t),
}
validator = PointwiseValidator(
nodes=nodes, invar=invar_numpy, true_outvar=outvar_numpy, batch_size=1024
)
domain.add_validator(validator)
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| 4,033 | Python | 31.532258 | 81 | 0.631044 |
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/spring_mass/plot_results_spring.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import numpy as np
import matplotlib.pyplot as plt
base_dir = "outputs/spring_mass_solver/validators/"
# plot in 1d
data = np.load(base_dir + "validator.npz", allow_pickle=True)
data = np.atleast_1d(data.f.arr_0)[0]
plt.plot(data["t"], data["true_x1"], label="True x1")
plt.plot(data["t"], data["true_x2"], label="True x2")
plt.plot(data["t"], data["true_x3"], label="True x3")
plt.plot(data["t"], data["pred_x1"], label="Pred x1")
plt.plot(data["t"], data["pred_x2"], label="Pred x2")
plt.plot(data["t"], data["pred_x3"], label="Pred x3")
plt.legend()
plt.savefig("comparison.png")
| 1,712 | Python | 42.923076 | 72 | 0.737734 |
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/spring_mass/spring_mass_ode.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from sympy import Symbol, Function, Number
from modulus.sym.eq.pde import PDE
class SpringMass(PDE):
name = "SpringMass"
def __init__(self, k=(2, 1, 1, 2), m=(1, 1, 1)):
self.k = k
self.m = m
k1 = k[0]
k2 = k[1]
k3 = k[2]
k4 = k[3]
m1 = m[0]
m2 = m[1]
m3 = m[2]
t = Symbol("t")
input_variables = {"t": t}
x1 = Function("x1")(*input_variables)
x2 = Function("x2")(*input_variables)
x3 = Function("x3")(*input_variables)
if type(k1) is str:
k1 = Function(k1)(*input_variables)
elif type(k1) in [float, int]:
k1 = Number(k1)
if type(k2) is str:
k2 = Function(k2)(*input_variables)
elif type(k2) in [float, int]:
k2 = Number(k2)
if type(k3) is str:
k3 = Function(k3)(*input_variables)
elif type(k3) in [float, int]:
k3 = Number(k3)
if type(k4) is str:
k4 = Function(k4)(*input_variables)
elif type(k4) in [float, int]:
k4 = Number(k4)
if type(m1) is str:
m1 = Function(m1)(*input_variables)
elif type(m1) in [float, int]:
m1 = Number(m1)
if type(m2) is str:
m2 = Function(m2)(*input_variables)
elif type(m2) in [float, int]:
m2 = Number(m2)
if type(m3) is str:
m3 = Function(m3)(*input_variables)
elif type(m3) in [float, int]:
m3 = Number(m3)
self.equations = {}
self.equations["ode_x1"] = m1 * (x1.diff(t)).diff(t) + k1 * x1 - k2 * (x2 - x1)
self.equations["ode_x2"] = (
m2 * (x2.diff(t)).diff(t) + k2 * (x2 - x1) - k3 * (x3 - x2)
)
self.equations["ode_x3"] = m3 * (x3.diff(t)).diff(t) + k3 * (x3 - x2) + k4 * x3
| 2,999 | Python | 34.294117 | 87 | 0.585195 |
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/spring_mass/spring_mass_inverse.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import torch
import numpy as np
from sympy import Symbol, Eq
import modulus
from modulus.sym.hydra import ModulusConfig, instantiate_arch
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry.primitives_1d import Point1D
from modulus.sym.geometry import Parameterization
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseConstraint,
)
from modulus.sym.domain.validator import PointwiseValidator
from modulus.sym.domain.monitor import PointwiseMonitor
from modulus.sym.key import Key
from modulus.sym.node import Node
from spring_mass_ode import SpringMass
@modulus.sym.main(config_path="conf", config_name="config_inverse")
def run(cfg: ModulusConfig) -> None:
# prepare data
t_max = 10.0
deltaT = 0.01
t = np.arange(0, t_max, deltaT)
t = np.expand_dims(t, axis=-1)
invar_numpy = {"t": t}
outvar_numpy = {
"x1": (1 / 6) * np.cos(t)
+ (1 / 2) * np.cos(np.sqrt(3) * t)
+ (1 / 3) * np.cos(2 * t),
"x2": (2 / 6) * np.cos(t)
+ (0 / 2) * np.cos(np.sqrt(3) * t)
- (1 / 3) * np.cos(2 * t),
"x3": (1 / 6) * np.cos(t)
- (1 / 2) * np.cos(np.sqrt(3) * t)
+ (1 / 3) * np.cos(2 * t),
}
outvar_numpy.update({"ode_x1": np.full_like(invar_numpy["t"], 0)})
outvar_numpy.update({"ode_x2": np.full_like(invar_numpy["t"], 0)})
outvar_numpy.update({"ode_x3": np.full_like(invar_numpy["t"], 0)})
# make list of nodes to unroll graph on
sm = SpringMass(k=(2, 1, 1, "k4"), m=("m1", 1, 1))
sm_net = instantiate_arch(
input_keys=[Key("t")],
output_keys=[Key("x1"), Key("x2"), Key("x3")],
cfg=cfg.arch.fully_connected,
)
invert_net = instantiate_arch(
input_keys=[Key("t")],
output_keys=[Key("m1"), Key("k4")],
cfg=cfg.arch.fully_connected,
)
nodes = (
sm.make_nodes(
detach_names=[
"x1",
"x1__t",
"x1__t__t",
"x2",
"x2__t",
"x2__t__t",
"x3",
"x3__t",
"x3__t__t",
]
)
+ [sm_net.make_node(name="spring_mass_network", jit=cfg.jit)]
+ [invert_net.make_node(name="invert_network", jit=cfg.jit)]
)
# add constraints to solver
# make geometry
geo = Point1D(0)
t_symbol = Symbol("t")
x = Symbol("x")
time_range = {t_symbol: (0, t_max)}
# make domain
domain = Domain()
# initial conditions
IC = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=geo,
outvar={"x1": 1.0, "x2": 0, "x3": 0, "x1__t": 0, "x2__t": 0, "x3__t": 0},
batch_size=cfg.batch_size.IC,
lambda_weighting={
"x1": 1.0,
"x2": 1.0,
"x3": 1.0,
"x1__t": 1.0,
"x2__t": 1.0,
"x3__t": 1.0,
},
parameterization=Parameterization({t_symbol: 0}),
)
domain.add_constraint(IC, name="IC")
# data and pdes
data = PointwiseConstraint.from_numpy(
nodes=nodes,
invar=invar_numpy,
outvar=outvar_numpy,
batch_size=cfg.batch_size.data,
)
domain.add_constraint(data, name="Data")
# add monitors
monitor = PointwiseMonitor(
invar_numpy,
output_names=["m1"],
metrics={"mean_m1": lambda var: torch.mean(var["m1"])},
nodes=nodes,
)
domain.add_monitor(monitor)
monitor = PointwiseMonitor(
invar_numpy,
output_names=["k4"],
metrics={"mean_k4": lambda var: torch.mean(var["k4"])},
nodes=nodes,
)
domain.add_monitor(monitor)
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| 4,988 | Python | 29.796296 | 81 | 0.591419 |
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/spring_mass/conf/config_inverse.yaml | defaults :
- modulus_default
- arch:
- fully_connected
- scheduler: tf_exponential_lr
- optimizer: adam
- loss: sum
- _self_
arch:
fully_connected:
layer_size: 256
save_filetypes : "vtk,npz"
scheduler:
decay_rate: 0.95
decay_steps: 100
training:
rec_results_freq: 1000
max_steps : 10000
batch_size:
IC: 10
data: 1000
| 364 | YAML | 12.518518 | 32 | 0.634615 |
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/spring_mass/conf/config.yaml | defaults :
- modulus_default
- arch:
- fully_connected
- scheduler: tf_exponential_lr
- optimizer: adam
- loss: sum
- _self_
save_filetypes : "vtk,npz"
scheduler:
decay_rate: 0.95
decay_steps: 100
training:
rec_results_freq: 1000
max_steps : 10000
batch_size:
IC: 10
interior: 500
| 315 | YAML | 12.73913 | 32 | 0.647619 |
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/projectile/projectile.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import numpy as np
from sympy import Symbol, sin, cos, pi, Eq
import torch
import modulus
from modulus.sym.hydra import instantiate_arch, ModulusConfig
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry.primitives_1d import Line1D,Point1D
from modulus.sym.geometry.primitives_2d import Rectangle
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
)
from modulus.sym.domain.inferencer import PointwiseInferencer
from modulus.sym.domain.validator import PointwiseValidator
from modulus.sym.key import Key
from modulus.sym.node import Node
from projectile_eqn import ProjectileEquation
from modulus.sym.utils.io import (
csv_to_dict,
ValidatorPlotter,
InferencerPlotter,
)
@modulus.sym.main(config_path="conf", config_name="config")
def run(cfg: ModulusConfig) -> None:
#Creating Nodes and Domain
pe = ProjectileEquation()
projectile_net = instantiate_arch(
input_keys=[Key("t")],
output_keys=[Key("x"),Key("y")],
cfg=cfg.arch.fully_connected,
)
nodes = pe.make_nodes() + [projectile_net.make_node(name="projectile_network")]
x, y, t = Symbol("x"), Symbol("y"), Symbol("t")
#Creating Geometry and adding constraint
geo = Point1D(0)
#make domain
projectile_domain = Domain()
#add constraint to solver
v_o = 40.0
theta = np.pi/3
time_range = {t :(0.0,5.0)}
#initial condition
# Set boundary to be only left boundary
IC = PointwiseBoundaryConstraint(
nodes = nodes,
geometry = geo,
outvar = {"x": 0.0,"y":0.0, "x__t":v_o*cos(theta), "y__t":v_o*sin(theta)},
batch_size = cfg.batch_size.initial_x,
parameterization = {t:0.0}
)
projectile_domain.add_constraint(IC,"IC")
#interior
interior = PointwiseBoundaryConstraint(
nodes = nodes,
geometry = geo,
outvar = {"ode_x":0.0,"ode_y":-9.81},
batch_size = cfg.batch_size.interior,
parameterization = time_range,
)
projectile_domain.add_constraint(interior,"interior")
# Setup validator
delta_T = 0.01
t_val = np.arange(0.,5.,delta_T)
T_val = np.expand_dims(t_val.flatten(), axis = -1)
X_val = v_o*np.cos(theta)*T_val
Y_val = v_o*np.sin(theta)*T_val - 0.5*9.81*(T_val**2)
invar_numpy = {"t": T_val}
outvar_numpy = {"x":X_val, "y": Y_val}
validator = PointwiseValidator(
nodes=nodes,
invar=invar_numpy,
true_outvar=outvar_numpy,
batch_size=128,
plotter = ValidatorPlotter(),
)
projectile_domain.add_validator(validator)
# Setup Inferencer
t_infe = np.arange(0,8,0.001)
T_infe = np.expand_dims(t_infe.flatten(), axis = -1)
invar_infe = {"t":T_infe}
grid_inference = PointwiseInferencer(
nodes=nodes,
invar=invar_infe,
output_names=["x","y"],
batch_size=128,
plotter=InferencerPlotter(),
)
projectile_domain.add_inferencer(grid_inference, "inferencer_data")
#make solver
slv = Solver(cfg, projectile_domain)
#start solve
slv.solve()
if __name__ == "__main__":
run()
| 4,482 | Python | 25.370588 | 86 | 0.657073 |
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/projectile/projectile_eqn.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from sympy import Symbol, Function, Number, sin, pi, exp
from modulus.sym.eq.pde import PDE
class ProjectileEquation(PDE):
name = "ProjectileEquation"
def __init__(self):
#time
t = Symbol("t")
#make input variables
input_variables = {"t": t}
#make y function
x = Function("x")(*input_variables)
y = Function("y")(*input_variables)
#set equation
self.equations = {}
self.equations["ode_x"] = x.diff(t,2)
self.equations["ode_y"] = y.diff(t,2)
| 1,679 | Python | 35.521738 | 72 | 0.704586 |
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/projectile/conf/config.yaml | defaults :
- modulus_default
- arch:
- fully_connected
- scheduler: tf_exponential_lr
- optimizer: adam
- loss: sum
- _self_
save_filetypes : "vtk,npz"
scheduler:
decay_rate: 0.95
decay_steps: 100
training:
rec_validation_freq: 1000
rec_inference_freq: 2000
rec_monitor_freq: 1000
rec_constraint_freq: 2000
max_steps : 5000
batch_size:
initial_x: 100
interior: 1000
graph:
func_arch: true
cuda_graphs: True
cuda_graph_warmup: 20 | 479 | YAML | 14 | 32 | 0.670146 |
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/fourcastnet/fourcastnet.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# Script to train Fourcastnet on ERA5
# Ref: https://arxiv.org/abs/2202.11214
import modulus
from modulus.sym.hydra.config import ModulusConfig
from modulus.sym.key import Key
from modulus.sym.domain import Domain
from modulus.sym.domain.constraint import SupervisedGridConstraint
from modulus.sym.domain.validator import GridValidator
from modulus.sym.solver import Solver
from modulus.sym.utils.io import GridValidatorPlotter
from src.dataset import ERA5HDF5GridDataset
from src.fourcastnet import FourcastNetArch
from src.loss import LpLoss
@modulus.sym.main(config_path="conf", config_name="config_FCN")
def run(cfg: ModulusConfig) -> None:
# load training/ test data
channels = list(range(cfg.custom.n_channels))
train_dataset = ERA5HDF5GridDataset(
cfg.custom.training_data_path,
chans=channels,
tstep=cfg.custom.tstep,
n_tsteps=cfg.custom.n_tsteps,
patch_size=cfg.arch.afno.patch_size,
)
test_dataset = ERA5HDF5GridDataset(
cfg.custom.test_data_path,
chans=channels,
tstep=cfg.custom.tstep,
n_tsteps=cfg.custom.n_tsteps,
patch_size=cfg.arch.afno.patch_size,
n_samples_per_year=20,
)
# define input/output keys
input_keys = [Key(k, size=train_dataset.nchans) for k in train_dataset.invar_keys]
output_keys = [Key(k, size=train_dataset.nchans) for k in train_dataset.outvar_keys]
# make list of nodes to unroll graph on
model = FourcastNetArch(
input_keys=input_keys,
output_keys=output_keys,
img_shape=test_dataset.img_shape,
patch_size=cfg.arch.afno.patch_size,
embed_dim=cfg.arch.afno.embed_dim,
depth=cfg.arch.afno.depth,
num_blocks=cfg.arch.afno.num_blocks,
)
nodes = [model.make_node(name="FCN")]
# make domain
domain = Domain()
# add constraints to domain
supervised = SupervisedGridConstraint(
nodes=nodes,
dataset=train_dataset,
batch_size=cfg.batch_size.grid,
loss=LpLoss(),
num_workers=cfg.custom.num_workers.grid,
)
domain.add_constraint(supervised, "supervised")
# add validator
val = GridValidator(
nodes,
dataset=test_dataset,
batch_size=cfg.batch_size.validation,
plotter=GridValidatorPlotter(n_examples=5),
num_workers=cfg.custom.num_workers.validation,
)
domain.add_validator(val, "test")
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| 3,688 | Python | 33.157407 | 88 | 0.706345 |
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/fourcastnet/inferencer.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#"Script to carry out Fourcastnet inference"
import omegaconf
import torch
import logging
import numpy as np
from torch.utils.data import DataLoader, Sampler
from modulus.sym.hydra import to_absolute_path
from modulus.sym.key import Key
from modulus.sym.distributed.manager import DistributedManager
from src.dataset import ERA5HDF5GridDataset
from src.fourcastnet import FourcastNetArch
from src.metrics import Metrics
logging.basicConfig(format="[%(levelname)s] - %(message)s", level=logging.INFO)
var_key_dict = {
0: "u10",
1: "v10",
2: "t2m",
3: "sp",
4: "msl",
5: "t850",
6: "u1000",
7: "v1000",
8: "z1000",
9: "u850",
10: "v850",
11: "z850",
12: "u500",
13: "v500",
14: "z500",
15: "t500",
16: "z50",
17: "r500",
18: "r850",
19: "tcwv",
}
def to_device(tensor_dict):
return {
key: torch.as_tensor(value, dtype=torch.float32, device=device)
for key, value in tensor_dict.items()
}
class SubsetSequentialBatchSampler(Sampler):
"""Custom subset sequential batch sampler for inferencer"""
def __init__(self, subset):
self.subset = subset
def __iter__(self):
for i in self.subset:
yield [i] # batch size of 1
def __len__(self):
return len(self.subset)
# load configuration
cfg = omegaconf.OmegaConf.load("conf/config_FCN.yaml")
model_path = to_absolute_path("fcn_era5.pth")
# get device
device = DistributedManager().device
# load test data
test_dataset = ERA5HDF5GridDataset(
cfg.custom.test_data_path, # Test data location e.g. /era5/20var/test
chans=list(range(cfg.custom.n_channels)),
tstep=cfg.custom.tstep,
n_tsteps=1, # set to one for inference
patch_size=cfg.arch.afno.patch_size,
)
m = Metrics(
test_dataset.img_shape,
clim_mean_path="/data/stats/time_means.npy", # Path to climate mean
device=device
)
# define input/output keys
input_keys = [Key(k, size=test_dataset.nchans) for k in test_dataset.invar_keys]
output_keys = [Key(k, size=test_dataset.nchans) for k in test_dataset.outvar_keys]
# create model
model = FourcastNetArch(
input_keys=input_keys,
output_keys=output_keys,
img_shape=test_dataset.img_shape,
patch_size=cfg.arch.afno.patch_size,
embed_dim=cfg.arch.afno.embed_dim,
depth=cfg.arch.afno.depth,
num_blocks=cfg.arch.afno.num_blocks,
)
# load parameters
model.load_state_dict(torch.load(model_path))
model.to(device)
logging.info(f"Loaded model {model_path}")
# define subsets of dataset to run inference
nics = 180 # Number of 2 day correl time samples
nsteps = 25
last = len(test_dataset) - 1 - nsteps * cfg.custom.tstep
# Variable dictionary
acc_recursive = {key: [] for key in var_key_dict.values()}
rmse_recursive = {key: [] for key in var_key_dict.values()}
# Normalization stats
mu = torch.tensor(test_dataset.mu[0]).to(device) # shape [C, 1, 1]
sd = torch.tensor(test_dataset.sd[0]).to(device) # shape [C, 1, 1]
# run inference
with torch.no_grad():
for ic in range(0, min([8 * nics + 1, last])):
subset = cfg.custom.tstep * np.arange(nsteps) + ic
if (ic + 1) % 8 == 0 or (ic + 1) % 36 == 0 or ic == 0:
logging.info(f"Running IC at step {ic}")
# get dataloader
dataloader = DataLoader(
dataset=test_dataset,
batch_sampler=SubsetSequentialBatchSampler(subset),
pin_memory=True,
num_workers=1,
worker_init_fn=test_dataset.worker_init_fn,
)
acc_error = torch.zeros(nsteps, test_dataset.nchans)
rmse_error = torch.zeros(nsteps, test_dataset.nchans)
for tstep, (invar, true_outvar, _) in enumerate(dataloader):
if tstep % 10 == 0:
logging.info(f"ic: {ic} tstep: {tstep}/{nsteps}")
# place tensors on device
invar = to_device(invar)
true_outvar = to_device(true_outvar)
# 1. single step inference
pred_outvar_single = model(invar)
pred_single = sd * pred_outvar_single["x_t1"][0]
# 2. recursive inference
if tstep == 0:
pred_outvar_recursive = model(invar)
else:
pred_outvar_recursive = model(
{"x_t0": pred_outvar_recursive["x_t1"]}
)
# get unormalised target / prediction
true = sd * true_outvar["x_t1"][0]
pred_recursive = sd * pred_outvar_recursive["x_t1"][0]
# Calc metrics
rmse_error[tstep] = m.weighted_rmse(pred_recursive, true).detach().cpu()
acc_error[tstep] = m.weighted_acc(pred_recursive, true).detach().cpu()
# Save fields into dictionary
if (ic + 1) % 8 == 0 or (ic + 1) % 36 == 0 or ic == 0:
for i, fld in var_key_dict.items():
# Fields with 9 day (36) dc time
if fld == "z500" or fld == "t2m" or fld == "t850":
if (ic + 1) % 36 == 0 or ic == 0:
acc_recursive[fld].append(acc_error[:, i].numpy())
rmse_recursive[fld].append(rmse_error[:, i].numpy())
# Rest have regular 2 day (8) dc time
else:
if (ic + 1) % 8 == 0 or ic == 0:
acc_recursive[fld].append(acc_error[:, i].numpy())
rmse_recursive[fld].append(rmse_error[:, i].numpy())
# Field stacking
for var_dict in [acc_recursive, rmse_recursive]:
for key, value in var_dict.items():
print(f"{len(value)} samples for field {key}")
var_dict[key] = np.stack(value, axis=0)
np.save("rmse_recursive", rmse_recursive)
np.save("acc_recursive", acc_recursive)
| 7,069 | Python | 33.827586 | 88 | 0.610553 |
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/fourcastnet/src/fourcastnet.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Defines the FCN architecture"""
import logging
import torch
from torch import Tensor
from typing import List, Tuple, Dict
from modulus.sym.models.afno.afno import AFNONet
from modulus.sym.models.arch import Arch
from modulus.sym.key import Key
class FourcastNetArch(Arch):
"Defines the FourcastNet architecture"
def __init__(
self,
input_keys: List[Key],
output_keys: List[Key],
img_shape: Tuple[int, int],
detach_keys: List[Key] = [],
patch_size: int = 16,
embed_dim: int = 256,
depth: int = 4,
num_blocks: int = 4,
) -> None:
"""Fourcastnet model. This is a simple wrapper for Modulus' AFNO model.
The only difference is that FourcastNet needs multi-step training. This class
allows the model to auto-regressively predict multiple timesteps
Parameters (Same as AFNO)
----------
input_keys : List[Key]
Input key list. The key dimension size should equal the variables channel dim.
output_keys : List[Key]
Output key list. The key dimension size should equal the variables channel dim.
img_shape : Tuple[int, int]
Input image dimensions (height, width)
detach_keys : List[Key], optional
List of keys to detach gradients, by default []
patch_size : int, optional
Size of image patchs, by default 16
embed_dim : int, optional
Embedded channel size, by default 256
depth : int, optional
Number of AFNO layers, by default 4
num_blocks : int, optional
Number of blocks in the frequency weight matrices, by default 4
"""
super().__init__(
input_keys=input_keys,
output_keys=output_keys,
detach_keys=detach_keys,
)
# get number of timesteps steps to unroll
assert (
len(self.input_keys) == 1
), "Error, FourcastNet only accepts one input variable (x_t0)"
self.n_tsteps = len(self.output_keys)
logging.info(f"Unrolling FourcastNet over {self.n_tsteps} timesteps")
# get number of input/output channels
in_channels = self.input_keys[0].size
out_channels = self.output_keys[0].size
# intialise AFNO kernel
self._impl = AFNONet(
in_channels=in_channels,
out_channels=out_channels,
patch_size=(patch_size, patch_size),
img_size=img_shape,
embed_dim=embed_dim,
depth=depth,
num_blocks=num_blocks,
)
def forward(self, in_vars: Dict[str, Tensor]) -> Dict[str, Tensor]:
# prepare input tensor
x = self.prepare_input(
input_variables=in_vars,
mask=self.input_key_dict.keys(),
detach_dict=self.detach_key_dict,
dim=1,
input_scales=self.input_scales,
)
# unroll model over multiple timesteps
ys = []
for t in range(self.n_tsteps):
x = self._impl(x)
ys.append(x)
y = torch.cat(ys, dim=1)
# prepare output dict
return self.prepare_output(
output_tensor=y,
output_var=self.output_key_dict,
dim=1,
output_scales=self.output_scales,
)
| 4,496 | Python | 35.560975 | 91 | 0.630338 |
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/fourcastnet/src/metrics.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import torch
import numpy as np
from typing import Tuple
class Metrics:
"""Class used for computing performance related metrics. Expects predictions /
targets to be of shape [C, H, W] where H is latitude dimension and W is longitude
dimension. Metrics are computed for each channel separately.
Parameters
----------
img_shape : Tuple[int]
Shape of input image (resolution for fourcastnet)
clim_mean_path : str, optional
Path to total climate mean data, needed for ACC. By default "/era5/stats/time_means.npy"
device : torch.device, optional
Pytorch device model is on, by default 'cpu'
"""
def __init__(
self,
img_shape: Tuple[int],
clim_mean_path: str = "/era5/stats/time_means.npy",
device: torch.device = "cpu",
):
self.img_shape = tuple(img_shape)
self.device = device
# Load climate mean value
self.clim_mean = torch.as_tensor(np.load(clim_mean_path))
# compute latitude weighting
nlat = img_shape[0]
lat = torch.linspace(90, -90, nlat)
lat_weight = torch.cos(torch.pi * (lat / 180))
lat_weight = nlat * lat_weight / lat_weight.sum()
self.lat_weight = lat_weight.view(1, nlat, 1)
# place on device
if self.device is not None:
self.lat_weight = self.lat_weight.to(self.device)
self.clim_mean = self.clim_mean.to(self.device)
def _check_shape(self, *args):
# checks for shape [C, H, W]
for x in args:
assert x.ndim == 3
assert tuple(x.shape[1:]) == self.img_shape
def weighted_acc(self, pred: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
"""Computes the anomaly correlation coefficient (ACC). The ACC calculation is
weighted based on the latitude.
Parameters
----------
pred : torch.Tensor
[C, H, W] Predicted tensor
target : torch.Tensor
[C, H, W] Target tensor
Returns
-------
torch.Tensor
[C] ACC values for each channel
"""
self._check_shape(pred, target)
# subtract climate means
(n_chans, img_x, img_y) = pred.shape
clim_mean = self.clim_mean[0, 0:n_chans, 0:img_x]
pred_hat = pred - clim_mean
target_hat = target - clim_mean
# Weighted mean
pred_bar = torch.sum(
self.lat_weight * pred_hat, dim=(1, 2), keepdim=True
) / torch.sum(
self.lat_weight * torch.ones_like(pred_hat), dim=(1, 2), keepdim=True
)
target_bar = torch.sum(
self.lat_weight * target_hat, dim=(1, 2), keepdim=True
) / torch.sum(
self.lat_weight * torch.ones_like(target_hat), dim=(1, 2), keepdim=True
)
pred_diff = pred_hat - pred_bar
target_diff = target_hat - target_bar
# compute weighted acc
# Ref: https://www.atmos.albany.edu/daes/atmclasses/atm401/spring_2016/ppts_pdfs/ECMWF_ACC_definition.pdf
p1 = torch.sum(self.lat_weight * pred_diff * target_diff, dim=(1, 2))
p2 = torch.sum(self.lat_weight * pred_diff * pred_diff, dim=(1, 2))
p3 = torch.sum(self.lat_weight * target_diff * target_diff, dim=(1, 2))
m = p1 / torch.sqrt(p2 * p3)
return m
def weighted_rmse(self, pred: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
"""Computes RMSE weighted based on latitude
Parameters
----------
pred : torch.Tensor
[C, H, W] Predicted tensor
target : torch.Tensor
[C, H, W] Target tensor
Returns
-------
torch.Tensor
[C] Weighted RSME values for each channel
"""
self._check_shape(pred, target)
# compute weighted rmse
m = torch.sqrt(torch.mean(self.lat_weight * (pred - target) ** 2, dim=(1, 2)))
return m
| 5,098 | Python | 34.657342 | 113 | 0.616712 |
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/fourcastnet/src/dataset.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import h5py
import logging
import numpy as np
from typing import List
from pathlib import Path
from modulus.sym.hydra import to_absolute_path
from modulus.sym.dataset import Dataset
class ERA5HDF5GridDataset(Dataset):
"""Lazy-loading ERA5 dataset.
Parameters
----------
data_dir : str
Directory where ERA5 data is stored
chans : List[int]
Defines which ERA5 variables to load
tstep : int
Defines the size of the timestep between the input and output variables
n_tsteps : int, optional
Defines how many timesteps are included in the output variables
Default is 1
patch_size : int, optional
If specified, crops input and output variables so image dimensions are
divisible by patch_size
Default is None
n_samples_per_year : int, optional
If specified, randomly selects n_samples_per_year samples from each year
rather than all of the samples per year
Default is None
stats_dir : str, optional
Directory to test data statistic numpy files that have the global mean and variance
"""
def __init__(
self,
data_dir: str,
chans: List[int],
tstep: int = 1,
n_tsteps: int = 1,
patch_size: int = None,
n_samples_per_year: int = None,
stats_dir: str = None,
):
self.data_dir = Path(to_absolute_path(data_dir))
print(self.data_dir)
self.chans = chans
self.nchans = len(self.chans)
self.tstep = tstep
self.n_tsteps = n_tsteps
self.patch_size = patch_size
self.n_samples_per_year = n_samples_per_year
if stats_dir is None:
self.stats_dir = self.data_dir.parent / "stats"
# check root directory exists
assert (
self.data_dir.is_dir()
), f"Error, data directory {self.data_dir} does not exist"
assert (
self.stats_dir.is_dir()
), f"Error, stats directory {self.stats_dir} does not exist"
# get all input data files
self.data_paths = sorted(self.data_dir.glob("??????.h5"))
for data_path in self.data_paths:
logging.info(f"ERA5 file found: {data_path}")
self.n_years = len(self.data_paths)
logging.info(f"Number of months: {self.n_years}")
# get total number of examples and image shape from the first file,
# assuming other files have exactly the same format.
logging.info(f"Getting file stats from {self.data_paths[0]}")
with h5py.File(self.data_paths[0], "r") as f:
self.n_samples_per_year_all = f["fields"].shape[0]
self.img_shape = f["fields"].shape[2:]
logging.info(f"Number of channels available: {f['fields'].shape[1]}")
# get example indices to use
if self.n_samples_per_year is None:
self.n_samples_per_year = self.n_samples_per_year_all
self.samples = [
np.arange(self.n_samples_per_year) for _ in range(self.n_years)
]
else:
if self.n_samples_per_year > self.n_samples_per_year_all:
raise ValueError(
f"n_samples_per_year ({self.n_samples_per_year}) > number of samples available ({self.n_samples_per_year_all})!"
)
self.samples = [
np.random.choice(
np.arange(self.n_samples_per_year_all),
self.n_samples_per_year,
replace=False,
)
for _ in range(self.n_years)
]
logging.info(f"Number of samples/month: {self.n_samples_per_year}")
# get total length
self.length = self.n_years * self.n_samples_per_year
# adjust image shape if patch_size defined
if self.patch_size is not None:
self.img_shape = [s - s % self.patch_size for s in self.img_shape]
logging.info(f"Input image shape: {self.img_shape}")
# load normalisation values
# has shape [1, C, 1, 1]
self.mu = np.load(self.stats_dir / "global_means.npy")[:, self.chans]
# has shape [1, C, 1, 1]
self.sd = np.load(self.stats_dir / "global_stds.npy")[:, self.chans]
assert (
self.mu.shape == self.sd.shape == (1, self.nchans, 1, 1)
), "Error, normalisation arrays have wrong shape"
def worker_init_fn(self, iworker):
super().worker_init_fn(iworker)
# open all year files at once on worker thread
self.data_files = [h5py.File(path, "r") for path in self.data_paths]
@property
def invar_keys(self):
return ["x_t0"]
@property
def outvar_keys(self):
return [f"x_t{(i+1)*self.tstep}" for i in range(self.n_tsteps)]
def __getitem__(self, idx):
# get local indices from global index
year_idx = int(idx / self.n_samples_per_year)
local_idx = int(idx % self.n_samples_per_year)
in_idx = self.samples[year_idx][local_idx]
# get output indices
out_idxs = []
for i in range(self.n_tsteps):
out_idx = in_idx + (i + 1) * self.tstep
# if at end of dataset, just learn identity instead
if out_idx > (self.n_samples_per_year_all - 1):
out_idx = in_idx
out_idxs.append(out_idx)
# get data
xs = []
for idx in [in_idx] + out_idxs:
# get array
# has shape [C, H, W]
x = self.data_files[year_idx]["fields"][idx, self.chans]
assert x.ndim == 3, f"Expected 3 dimensions, but got {x.shape}"
# apply input / output normalisation (broadcasted operation)
x = (x - self.mu[0]) / self.sd[0]
# crop data if needed
if self.patch_size is not None:
x = x[..., : self.img_shape[0], : self.img_shape[1]]
xs.append(x)
# convert to tensor dicts
invar = {"x_t0": xs[0]}
outvar = {f"x_t{(i+1)*self.tstep}": x for i, x in enumerate(xs[1:])}
invar = Dataset._to_tensor_dict(invar)
outvar = Dataset._to_tensor_dict(outvar)
# TODO: get rid to lambda weighting
lambda_weighting = Dataset._to_tensor_dict(
{k: np.ones_like(v) for k, v in outvar.items()}
)
# lambda_weighting = Dataset._to_tensor_dict(
# {k: np.array([1]) for k, v in outvar.items()}
# )
return invar, outvar, lambda_weighting
def __len__(self):
return self.length
| 7,719 | Python | 36.294686 | 132 | 0.598523 |
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/fourcastnet/src/loss.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import torch
from typing import Dict
Tensor = torch.Tensor
class LpLoss(torch.nn.Module):
def __init__(
self,
d: float = 2.0,
p: float = 2.0,
):
"""Relative Lp loss normalized seperately in the batch dimension.
Expects inputs of the shape [B, C, ...]
Parameters
----------
p : float, optional
Norm power, by default 2.0
"""
super(LpLoss, self).__init__()
# Dimension and Lp-norm type are postive
assert p > 0.0
self.p = p
def _rel(self, x: torch.Tensor, y: torch.Tensor) -> float:
num_examples = x.size()[0]
xv = x.reshape(num_examples, -1)
yv = y.reshape(num_examples, -1)
diff_norms = torch.linalg.norm(xv - yv, ord=self.p, dim=1)
y_norms = torch.linalg.norm(yv, ord=self.p, dim=1)
return torch.mean(diff_norms / y_norms)
def forward(
self,
invar: Dict[str, Tensor],
pred_outvar: Dict[str, Tensor],
true_outvar: Dict[str, Tensor],
lambda_weighting: Dict[str, Tensor],
step: int,
) -> Dict[str, float]:
losses = {}
for key, value in pred_outvar.items():
losses[key] = self._rel(pred_outvar[key], true_outvar[key])
return losses
| 2,433 | Python | 33.28169 | 73 | 0.648993 |
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/fourcastnet/conf/config_FCN.yaml | defaults :
- modulus_default
- arch:
- afno
- scheduler: cosine_annealing
- optimizer: adam
- loss: sum
- _self_
arch:
afno:
patch_size: 8
embed_dim: 512
depth: 10
num_blocks: 8
optimizer:
lr: 0.0005
scheduler:
T_max: 80000
custom:
n_channels: 20
tstep: 1
n_tsteps: 1
training_data_path: "/workspace/python/source_code/fourcastnet/data/train" # Training dataset path here
test_data_path: "/workspace/python/source_code/fourcastnet/data/test" # Test dataset path here
num_workers:
grid: 4
validation: 4
tag:
batch_size:
grid: 1
validation: 1
training:
amp: true
rec_constraint_freq: 10000
rec_results_freq : 1000
save_network_freq: 1000
print_stats_freq: 100
summary_freq: 1000
max_steps : 71000
| 787 | YAML | 15.765957 | 105 | 0.662008 |
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/diffusion_1d/plot_results.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import numpy as np
import matplotlib.pyplot as plt
network_dir = "./outputs/diffusion_bar/validators/"
data_1 = np.load(network_dir + "Val1.npz", allow_pickle=True)
data_2 = np.load(network_dir + "Val2.npz", allow_pickle=True)
data_1 = np.atleast_1d(data_1.f.arr_0)[0]
data_2 = np.atleast_1d(data_2.f.arr_0)[0]
plt.plot(data_1["x"][:, 0], data_1["pred_u_1"][:, 0], "--", label="u_1_pred")
plt.plot(data_2["x"][:, 0], data_2["pred_u_2"][:, 0], "--", label="u_2_pred")
plt.plot(data_1["x"][:, 0], data_1["true_u_1"][:, 0], label="u_1_true")
plt.plot(data_2["x"][:, 0], data_2["true_u_2"][:, 0], label="u_2_true")
plt.legend()
plt.savefig("image_diffusion_problem_bootcamp")
| 1,801 | Python | 46.421051 | 77 | 0.716824 |
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/diffusion_1d/diffusion_bar.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import torch
import numpy as np
from sympy import Symbol, Eq, Function, Number
import modulus
from modulus.sym.hydra import instantiate_arch , ModulusConfig
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry.primitives_1d import Line1D
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
)
from modulus.sym.domain.validator import PointwiseValidator
from modulus.sym.domain.monitor import PointwiseMonitor
from modulus.sym.key import Key
from modulus.sym.node import Node
from modulus.sym.eq.pde import PDE
# params for domain
L1 = Line1D(0, 1)
L2 = Line1D(1, 2)
D1 = 1e1
D2 = 1e-1
Tc = 100
Ta = 0
Tb = (Tc + (D1 / D2) * Ta) / (1 + (D1 / D2))
print(Ta)
print(Tb)
print(Tc)
class Diffusion(PDE):
name = "Diffusion"
def __init__(self, T="T", D="D", Q=0, dim=3, time=True):
# set params
self.T = T
self.dim = dim
self.time = time
# coordinates
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
# time
t = Symbol("t")
# make input variables
input_variables = {"x": x, "y": y, "z": z, "t": t}
if self.dim == 1:
input_variables.pop("y")
input_variables.pop("z")
elif self.dim == 2:
input_variables.pop("z")
if not self.time:
input_variables.pop("t")
# Temperature
assert type(T) == str, "T needs to be string"
T = Function(T)(*input_variables)
# Diffusivity
if type(D) is str:
D = Function(D)(*input_variables)
elif type(D) in [float, int]:
D = Number(D)
# Source
if type(Q) is str:
Q = Function(Q)(*input_variables)
elif type(Q) in [float, int]:
Q = Number(Q)
# set equations
self.equations = {}
self.equations["diffusion_" + self.T] = (
T.diff(t)
- (D * T.diff(x)).diff(x)
- (D * T.diff(y)).diff(y)
- (D * T.diff(z)).diff(z)
- Q
)
class DiffusionInterface(PDE):
name = "DiffusionInterface"
def __init__(self, T_1, T_2, D_1, D_2, dim=3, time=True):
# set params
self.T_1 = T_1
self.T_2 = T_2
self.dim = dim
self.time = time
# coordinates
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
normal_x, normal_y, normal_z = (
Symbol("normal_x"),
Symbol("normal_y"),
Symbol("normal_z"),
)
# time
t = Symbol("t")
# make input variables
input_variables = {"x": x, "y": y, "z": z, "t": t}
if self.dim == 1:
input_variables.pop("y")
input_variables.pop("z")
elif self.dim == 2:
input_variables.pop("z")
if not self.time:
input_variables.pop("t")
# Diffusivity
if type(D_1) is str:
D_1 = Function(D_1)(*input_variables)
elif type(D_1) in [float, int]:
D_1 = Number(D_1)
if type(D_2) is str:
D_2 = Function(D_2)(*input_variables)
elif type(D_2) in [float, int]:
D_2 = Number(D_2)
# variables to match the boundary conditions (example Temperature)
T_1 = Function(T_1)(*input_variables)
T_2 = Function(T_2)(*input_variables)
# set equations
self.equations = {}
self.equations["diffusion_interface_dirichlet_" + self.T_1 + "_" + self.T_2] = (
T_1 - T_2
)
flux_1 = D_1 * (
normal_x * T_1.diff(x) + normal_y * T_1.diff(y) + normal_z * T_1.diff(z)
)
flux_2 = D_2 * (
normal_x * T_2.diff(x) + normal_y * T_2.diff(y) + normal_z * T_2.diff(z)
)
self.equations["diffusion_interface_neumann_" + self.T_1 + "_" + self.T_2] = (
flux_1 - flux_2
)
@modulus.sym.main(config_path="conf", config_name="config")
def run(cfg: ModulusConfig) -> None:
# make list of nodes to unroll graph on
diff_u1 = Diffusion(T="u_1", D=D1, dim=1, time=False)
diff_u2 = Diffusion(T="u_2", D=D2, dim=1, time=False)
diff_in = DiffusionInterface("u_1", "u_2", D1, D2, dim=1, time=False)
diff_net_u_1 = instantiate_arch(
input_keys=[Key("x")],
output_keys=[Key("u_1")],
cfg=cfg.arch.fully_connected,
)
diff_net_u_2 = instantiate_arch(
input_keys=[Key("x")],
output_keys=[Key("u_2")],
cfg=cfg.arch.fully_connected,
)
nodes = (
diff_u1.make_nodes()
+ diff_u2.make_nodes()
+ diff_in.make_nodes()
+ [diff_net_u_1.make_node(name="u1_network", jit=cfg.jit)]
+ [diff_net_u_2.make_node(name="u2_network", jit=cfg.jit)]
)
# make domain add constraints to the solver
domain = Domain()
# sympy variables
x = Symbol("x")
# right hand side (x = 2) Pt c
rhs = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=L2,
outvar={"u_2": Tc},
batch_size=cfg.batch_size.rhs,
criteria=Eq(x, 2),
)
domain.add_constraint(rhs, "right_hand_side")
# left hand side (x = 0) Pt a
lhs = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=L1,
outvar={"u_1": Ta},
batch_size=cfg.batch_size.lhs,
criteria=Eq(x, 0),
)
domain.add_constraint(lhs, "left_hand_side")
# interface 1-2
interface = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=L1,
outvar={
"diffusion_interface_dirichlet_u_1_u_2": 0,
"diffusion_interface_neumann_u_1_u_2": 0,
},
batch_size=cfg.batch_size.interface,
criteria=Eq(x, 1),
)
domain.add_constraint(interface, "interface")
# interior 1
interior_u1 = PointwiseInteriorConstraint(
nodes=nodes,
geometry=L1,
outvar={"diffusion_u_1": 0},
bounds={x: (0, 1)},
batch_size=cfg.batch_size.interior_u1,
)
domain.add_constraint(interior_u1, "interior_u1")
# interior 2
interior_u2 = PointwiseInteriorConstraint(
nodes=nodes,
geometry=L2,
outvar={"diffusion_u_2": 0},
bounds={x: (1, 2)},
batch_size=cfg.batch_size.interior_u2,
)
domain.add_constraint(interior_u2, "interior_u2")
# validation data
x = np.expand_dims(np.linspace(0, 1, 100), axis=-1)
u_1 = x * Tb + (1 - x) * Ta
invar_numpy = {"x": x}
outvar_numpy = {"u_1": u_1}
val = PointwiseValidator(nodes=nodes,invar=invar_numpy, true_outvar=outvar_numpy)
domain.add_validator(val, name="Val1")
# make validation data line 2
x = np.expand_dims(np.linspace(1, 2, 100), axis=-1)
u_2 = (x - 1) * Tc + (2 - x) * Tb
invar_numpy = {"x": x}
outvar_numpy = {"u_2": u_2}
val = PointwiseValidator(nodes=nodes, invar=invar_numpy, true_outvar=outvar_numpy)
domain.add_validator(val, name="Val2")
# make monitors
invar_numpy = {"x": [[1.0]]}
monitor = PointwiseMonitor(
invar_numpy,
output_names=["u_1__x"],
metrics={"flux_u1": lambda var: torch.mean(var["u_1__x"])},
nodes=nodes,
requires_grad=True,
)
domain.add_monitor(monitor)
monitor = PointwiseMonitor(
invar_numpy,
output_names=["u_2__x"],
metrics={"flux_u2": lambda var: torch.mean(var["u_2__x"])},
nodes=nodes,
requires_grad=True,
)
domain.add_monitor(monitor)
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| 8,835 | Python | 28.065789 | 88 | 0.572835 |
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/diffusion_1d/plot_results_parameterized.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import numpy as np
import matplotlib.pyplot as plt
network_dir = "./outputs/diffusion_bar_parameterized/validators/"
data_1 = np.load(network_dir + "Val1.npz", allow_pickle=True)
data_2 = np.load(network_dir + "Val2.npz", allow_pickle=True)
data_1 = np.atleast_1d(data_1.f.arr_0)[0]
data_2 = np.atleast_1d(data_2.f.arr_0)[0]
plt.plot(data_1["x"][:, 0], data_1["pred_u_1"][:, 0], "--", label="u_1_pred")
plt.plot(data_2["x"][:, 0], data_2["pred_u_2"][:, 0], "--", label="u_2_pred")
plt.plot(data_1["x"][:, 0], data_1["true_u_1"][:, 0], label="u_1_true")
plt.plot(data_2["x"][:, 0], data_2["true_u_2"][:, 0], label="u_2_true")
plt.legend()
plt.savefig("image_diffusion_problem_bootcamp_parameterized")
| 1,829 | Python | 47.157893 | 77 | 0.720066 |
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/diffusion_1d/diffusion_bar_parameterized.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import torch
import numpy as np
from sympy import Symbol, Eq, Function, Number
import modulus
from modulus.sym.hydra import instantiate_arch , ModulusConfig
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry import Parameterization
from modulus.sym.geometry.primitives_1d import Line1D
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
)
from modulus.sym.domain.validator import PointwiseValidator
from modulus.sym.domain.monitor import PointwiseMonitor
from modulus.sym.key import Key
from modulus.sym.node import Node
from modulus.sym.eq.pde import PDE
# params for domain
L1 = Line1D(0, 1)
L2 = Line1D(1, 2)
D1 = Symbol("D1")
D1_range = {D1: (5, 25)}
D1_validation = 1e1
D2 = 1e-1
Tc = 100
Ta = 0
Tb = (Tc + (D1 / D2) * Ta) / (1 + (D1 / D2))
Tb_validation = float(Tb.evalf(subs={D1: 1e1}))
print(Ta)
print(Tb)
print(Tc)
class Diffusion(PDE):
name = "Diffusion"
def __init__(self, T="T", D="D", Q=0, dim=3, time=True):
# set params
self.T = T
self.dim = dim
self.time = time
# coordinates
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
# time
t = Symbol("t")
# make input variables
input_variables = {"x": x, "y": y, "z": z, "t": t}
if self.dim == 1:
input_variables.pop("y")
input_variables.pop("z")
elif self.dim == 2:
input_variables.pop("z")
if not self.time:
input_variables.pop("t")
# Temperature
assert type(T) == str, "T needs to be string"
T = Function(T)(*input_variables)
# Diffusivity
if type(D) is str:
D = Function(D)(*input_variables)
elif type(D) in [float, int]:
D = Number(D)
# Source
if type(Q) is str:
Q = Function(Q)(*input_variables)
elif type(Q) in [float, int]:
Q = Number(Q)
# set equations
self.equations = {}
self.equations["diffusion_" + self.T] = (
T.diff(t)
- (D * T.diff(x)).diff(x)
- (D * T.diff(y)).diff(y)
- (D * T.diff(z)).diff(z)
- Q
)
class DiffusionInterface(PDE):
name = "DiffusionInterface"
def __init__(self, T_1, T_2, D_1, D_2, dim=3, time=True):
# set params
self.T_1 = T_1
self.T_2 = T_2
self.dim = dim
self.time = time
# coordinates
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
normal_x, normal_y, normal_z = (
Symbol("normal_x"),
Symbol("normal_y"),
Symbol("normal_z"),
)
# time
t = Symbol("t")
# make input variables
input_variables = {"x": x, "y": y, "z": z, "t": t}
if self.dim == 1:
input_variables.pop("y")
input_variables.pop("z")
elif self.dim == 2:
input_variables.pop("z")
if not self.time:
input_variables.pop("t")
# Diffusivity
if type(D_1) is str:
D_1 = Function(D_1)(*input_variables)
elif type(D_1) in [float, int]:
D_1 = Number(D_1)
if type(D_2) is str:
D_2 = Function(D_2)(*input_variables)
elif type(D_2) in [float, int]:
D_2 = Number(D_2)
# variables to match the boundary conditions (example Temperature)
T_1 = Function(T_1)(*input_variables)
T_2 = Function(T_2)(*input_variables)
# set equations
self.equations = {}
self.equations["diffusion_interface_dirichlet_" + self.T_1 + "_" + self.T_2] = (
T_1 - T_2
)
flux_1 = D_1 * (
normal_x * T_1.diff(x) + normal_y * T_1.diff(y) + normal_z * T_1.diff(z)
)
flux_2 = D_2 * (
normal_x * T_2.diff(x) + normal_y * T_2.diff(y) + normal_z * T_2.diff(z)
)
self.equations["diffusion_interface_neumann_" + self.T_1 + "_" + self.T_2] = (
flux_1 - flux_2
)
@modulus.sym.main(config_path="conf", config_name="config_param")
def run(cfg: ModulusConfig) -> None:
# make list of nodes to unroll graph on
diff_u1 = Diffusion(T="u_1", D="D1", dim=1, time=False)
diff_u2 = Diffusion(T="u_2", D=D2, dim=1, time=False)
diff_in = DiffusionInterface("u_1", "u_2", "D1", D2, dim=1, time=False)
diff_net_u_1 = instantiate_arch(
input_keys=[Key("x"), Key("D1")],
output_keys=[Key("u_1")],
cfg=cfg.arch.fully_connected,
)
diff_net_u_2 = instantiate_arch(
input_keys=[Key("x"), Key("D1")],
output_keys=[Key("u_2")],
cfg=cfg.arch.fully_connected,
)
nodes = (
diff_u1.make_nodes()
+ diff_u2.make_nodes()
+ diff_in.make_nodes()
+ [diff_net_u_1.make_node(name="u1_network", jit=cfg.jit)]
+ [diff_net_u_2.make_node(name="u2_network", jit=cfg.jit)]
)
# make domain add constraints to the solver
domain = Domain()
# sympy variables
x = Symbol("x")
# right hand side (x = 2) Pt c
rhs = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=L2,
outvar={"u_2": Tc},
batch_size=cfg.batch_size.rhs,
criteria=Eq(x, 2),
parameterization=Parameterization(D1_range),
)
domain.add_constraint(rhs, "right_hand_side")
# left hand side (x = 0) Pt a
lhs = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=L1,
outvar={"u_1": Ta},
batch_size=cfg.batch_size.lhs,
criteria=Eq(x, 0),
parameterization=Parameterization(D1_range),
)
domain.add_constraint(lhs, "left_hand_side")
# interface 1-2
interface = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=L1,
outvar={
"diffusion_interface_dirichlet_u_1_u_2": 0,
"diffusion_interface_neumann_u_1_u_2": 0,
},
batch_size=cfg.batch_size.interface,
criteria=Eq(x, 1),
parameterization=Parameterization(D1_range),
)
domain.add_constraint(interface, "interface")
# interior 1
interior_u1 = PointwiseInteriorConstraint(
nodes=nodes,
geometry=L1,
outvar={"diffusion_u_1": 0},
bounds={x: (0, 1)},
batch_size=cfg.batch_size.interior_u1,
parameterization=Parameterization(D1_range),
)
domain.add_constraint(interior_u1, "interior_u1")
# interior 2
interior_u2 = PointwiseInteriorConstraint(
nodes=nodes,
geometry=L2,
outvar={"diffusion_u_2": 0},
bounds={x: (1, 2)},
batch_size=cfg.batch_size.interior_u2,
parameterization=Parameterization(D1_range),
)
domain.add_constraint(interior_u2, "interior_u2")
# validation data
x = np.expand_dims(np.linspace(0, 1, 100), axis=-1)
u_1 = x * Tb_validation + (1 - x) * Ta
invar_numpy = {"x": x}
invar_numpy.update({"D1": np.full_like(invar_numpy["x"], D1_validation)})
outvar_numpy = {"u_1": u_1}
val = PointwiseValidator(nodes=nodes,invar=invar_numpy, true_outvar=outvar_numpy)
domain.add_validator(val, name="Val1")
# make validation data line 2
x = np.expand_dims(np.linspace(1, 2, 100), axis=-1)
u_2 = (x - 1) * Tc + (2 - x) * Tb_validation
invar_numpy = {"x": x}
invar_numpy.update({"D1": np.full_like(invar_numpy["x"], D1_validation)})
outvar_numpy = {"u_2": u_2}
val = PointwiseValidator(nodes=nodes, invar=invar_numpy, true_outvar=outvar_numpy)
domain.add_validator(val, name="Val2")
# make monitors
invar_numpy = {"x": [[1.0]], "D1": [[D1_validation]]}
monitor = PointwiseMonitor(
invar_numpy,
output_names=["u_1__x"],
metrics={"flux_u1": lambda var: torch.mean(var["u_1__x"])},
nodes=nodes,
requires_grad=True,
)
domain.add_monitor(monitor)
monitor = PointwiseMonitor(
invar_numpy,
output_names=["u_2__x"],
metrics={"flux_u2": lambda var: torch.mean(var["u_2__x"])},
nodes=nodes,
requires_grad=True,
)
domain.add_monitor(monitor)
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| 9,499 | Python | 29.15873 | 88 | 0.580693 |
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/diffusion_1d/conf/config.yaml | defaults :
- modulus_default
- arch:
- fully_connected
- scheduler: tf_exponential_lr
- optimizer: adam
- loss: sum
- _self_
arch:
fully_connected:
layer_size: 256
save_filetypes : "vtk,npz"
scheduler:
decay_rate: 0.95
decay_steps: 100
optimizer:
lr : 1e-4
training:
rec_results_freq: 1000
max_steps : 5000
batch_size:
rhs: 2
lhs: 2
interface: 2
interior_u1: 200
interior_u2: 200
| 437 | YAML | 12.272727 | 32 | 0.631579 |
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/diffusion_1d/conf/config_param.yaml | defaults :
- modulus_default
- arch:
- fully_connected
- scheduler: tf_exponential_lr
- optimizer: adam
- loss: sum
- _self_
arch:
fully_connected:
layer_size: 256
save_filetypes : "vtk,npz"
scheduler:
decay_rate: 0.95
decay_steps: 200
optimizer:
lr : 1e-4
training:
rec_results_freq: 1000
max_steps : 10000
batch_size:
rhs: 10
lhs: 10
interface: 10
interior_u1: 400
interior_u2: 400
| 441 | YAML | 12.393939 | 32 | 0.634921 |
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/darcy/darcy_FNO_lazy.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import modulus
from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig
from modulus.sym.key import Key
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.domain.constraint import SupervisedGridConstraint
from modulus.sym.domain.validator import GridValidator
from modulus.sym.dataset import HDF5GridDataset
from modulus.sym.utils.io.plotter import GridValidatorPlotter
from utilities import download_FNO_dataset
@modulus.sym.main(config_path="conf", config_name="config_FNO")
def run(cfg: ModulusConfig) -> None:
# load training/ test data
input_keys = [Key("coeff", scale=(7.48360e00, 4.49996e00))]
output_keys = [Key("sol", scale=(5.74634e-03, 3.88433e-03))]
download_FNO_dataset("Darcy_241", outdir="datasets/")
train_path = to_absolute_path(
"datasets/Darcy_241/piececonst_r241_N1024_smooth1.hdf5"
)
test_path = to_absolute_path(
"datasets/Darcy_241/piececonst_r241_N1024_smooth2.hdf5"
)
# make datasets
train_dataset = HDF5GridDataset(
train_path, invar_keys=["coeff"], outvar_keys=["sol"], n_examples=1000
)
test_dataset = HDF5GridDataset(
test_path, invar_keys=["coeff"], outvar_keys=["sol"], n_examples=100
)
# make list of nodes to unroll graph on
decoder_net = instantiate_arch(
cfg=cfg.arch.decoder,
output_keys=output_keys,
)
fno = instantiate_arch(
cfg=cfg.arch.fno,
input_keys=input_keys,
decoder_net=decoder_net,
)
nodes = [fno.make_node('fno')]
# make domain
domain = Domain()
# add constraints to domain
supervised = SupervisedGridConstraint(
nodes=nodes,
dataset=train_dataset,
batch_size=cfg.batch_size.grid,
num_workers=4, # number of parallel data loaders
)
domain.add_constraint(supervised, "supervised")
# add validator
val = GridValidator(
nodes,
dataset=test_dataset,
batch_size=cfg.batch_size.validation,
plotter=GridValidatorPlotter(n_examples=5),
)
domain.add_validator(val, "test")
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| 3,392 | Python | 32.264706 | 79 | 0.704009 |
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/darcy/utilities.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
import zipfile
try:
import gdown
except:
gdown = None
import scipy.io
import numpy as np
import h5py
from modulus.sym.hydra import to_absolute_path
# list of FNO dataset url ids on drive: https://drive.google.com/drive/folders/1UnbQh2WWc6knEHbLn-ZaXrKUZhp7pjt-
_FNO_datatsets_ids = {
"Darcy_241": "1ViDqN7nc_VCnMackiXv_d7CHZANAFKzV",
"Darcy_421": "1Z1uxG9R8AdAGJprG5STcphysjm56_0Jf",
}
_FNO_dataset_names = {
"Darcy_241": (
"piececonst_r241_N1024_smooth1.hdf5",
"piececonst_r241_N1024_smooth2.hdf5",
),
"Darcy_421": (
"piececonst_r421_N1024_smooth1.hdf5",
"piececonst_r421_N1024_smooth2.hdf5",
),
}
def load_FNO_dataset(path, input_keys, output_keys, n_examples=None):
"Loads a FNO dataset"
if not path.endswith(".hdf5"):
raise Exception(
".hdf5 file required: please use utilities.preprocess_FNO_mat to convert .mat file"
)
# load data
path = to_absolute_path(path)
data = h5py.File(path, "r")
_ks = [k for k in data.keys() if not k.startswith("__")]
print(f"loaded: {path}\navaliable keys: {_ks}")
# parse data
invar, outvar = dict(), dict()
for d, keys in [(invar, input_keys), (outvar, output_keys)]:
for k in keys:
# get data
x = data[k] # N, C, H, W
# cut examples out
if n_examples is not None:
x = x[:n_examples]
# print out normalisation values
print(f"selected key: {k}, mean: {x.mean():.5e}, std: {x.std():.5e}")
d[k] = x
del data
return (invar, outvar)
def download_FNO_dataset(name, outdir="datasets/"):
"Tries to download FNO dataset from drive"
if name not in _FNO_datatsets_ids:
raise Exception(
f"Error: FNO dataset {name} not recognised, select one from {list(_FNO_datatsets_ids.keys())}"
)
id = _FNO_datatsets_ids[name]
outdir = to_absolute_path(outdir) + "/"
namedir = f"{outdir}{name}/"
# skip if already exists
exists = True
for file_name in _FNO_dataset_names[name]:
if not os.path.isfile(namedir + file_name):
exists = False
break
if exists:
return
print(f"FNO dataset {name} not detected, downloading dataset")
# Make sure we have gdown installed
if gdown is None:
raise ModuleNotFoundError("gdown package is required to download the dataset!")
# get output directory
os.makedirs(namedir, exist_ok=True)
# download dataset
zippath = f"{outdir}{name}.zip"
_download_file_from_google_drive(id, zippath)
# unzip
with zipfile.ZipFile(zippath, "r") as f:
f.extractall(namedir)
os.remove(zippath)
# preprocess files
for file in os.listdir(namedir):
if file.endswith(".mat"):
matpath = f"{namedir}{file}"
preprocess_FNO_mat(matpath)
os.remove(matpath)
def _download_file_from_google_drive(id, path):
"Downloads a file from google drive"
# use gdown library to download file
gdown.download(id=id, output=path)
def preprocess_FNO_mat(path):
"Convert a FNO .mat file to a hdf5 file, adding extra dimension to data arrays"
assert path.endswith(".mat")
data = scipy.io.loadmat(path)
ks = [k for k in data.keys() if not k.startswith("__")]
with h5py.File(path[:-4] + ".hdf5", "w") as f:
for k in ks:
x = np.expand_dims(data[k], axis=1) # N, C, H, W
f.create_dataset(
k, data=x, dtype="float32"
) # note h5 files larger than .mat because no compression used
| 4,794 | Python | 30.339869 | 112 | 0.646016 |
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/darcy/ops.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import torch
import torch.nn.functional as F
def dx(inpt, dx, channel, dim, order=1, padding="zeros"):
"Compute first order numerical derivatives of input tensor"
var = inpt[:, channel : channel + 1, :, :]
# get filter
if order == 1:
ddx1D = torch.Tensor(
[
-0.5,
0.0,
0.5,
]
).to(inpt.device)
elif order == 3:
ddx1D = torch.Tensor(
[
-1.0 / 60.0,
3.0 / 20.0,
-3.0 / 4.0,
0.0,
3.0 / 4.0,
-3.0 / 20.0,
1.0 / 60.0,
]
).to(inpt.device)
ddx3D = torch.reshape(ddx1D, shape=[1, 1] + dim * [1] + [-1] + (1 - dim) * [1])
# apply convolution
if padding == "zeros":
var = F.pad(var, 4 * [(ddx1D.shape[0] - 1) // 2], "constant", 0)
elif padding == "replication":
var = F.pad(var, 4 * [(ddx1D.shape[0] - 1) // 2], "replicate")
output = F.conv2d(var, ddx3D, padding="valid")
output = (1.0 / dx) * output
if dim == 0:
output = output[:, :, :, (ddx1D.shape[0] - 1) // 2 : -(ddx1D.shape[0] - 1) // 2]
elif dim == 1:
output = output[:, :, (ddx1D.shape[0] - 1) // 2 : -(ddx1D.shape[0] - 1) // 2, :]
return output
def ddx(inpt, dx, channel, dim, order=1, padding="zeros"):
"Compute second order numerical derivatives of input tensor"
var = inpt[:, channel : channel + 1, :, :]
# get filter
if order == 1:
ddx1D = torch.Tensor(
[
1.0,
-2.0,
1.0,
]
).to(inpt.device)
elif order == 3:
ddx1D = torch.Tensor(
[
1.0 / 90.0,
-3.0 / 20.0,
3.0 / 2.0,
-49.0 / 18.0,
3.0 / 2.0,
-3.0 / 20.0,
1.0 / 90.0,
]
).to(inpt.device)
ddx3D = torch.reshape(ddx1D, shape=[1, 1] + dim * [1] + [-1] + (1 - dim) * [1])
# apply convolution
if padding == "zeros":
var = F.pad(var, 4 * [(ddx1D.shape[0] - 1) // 2], "constant", 0)
elif padding == "replication":
var = F.pad(var, 4 * [(ddx1D.shape[0] - 1) // 2], "replicate")
output = F.conv2d(var, ddx3D, padding="valid")
output = (1.0 / dx ** 2) * output
if dim == 0:
output = output[:, :, :, (ddx1D.shape[0] - 1) // 2 : -(ddx1D.shape[0] - 1) // 2]
elif dim == 1:
output = output[:, :, (ddx1D.shape[0] - 1) // 2 : -(ddx1D.shape[0] - 1) // 2, :]
return output
| 3,754 | Python | 33.136363 | 88 | 0.531167 |
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/darcy/darcy_AFNO.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import modulus
from modulus.sym.hydra import instantiate_arch
from modulus.sym.hydra.config import ModulusConfig
from modulus.sym.key import Key
from modulus.sym.domain import Domain
from modulus.sym.domain.constraint import SupervisedGridConstraint
from modulus.sym.domain.validator import GridValidator
from modulus.sym.dataset import DictGridDataset
from modulus.sym.solver import Solver
from modulus.sym.utils.io.plotter import GridValidatorPlotter
from utilities import download_FNO_dataset, load_FNO_dataset
@modulus.sym.main(config_path="conf", config_name="config_AFNO")
def run(cfg: ModulusConfig) -> None:
# load training/ test data
input_keys = [Key("coeff", scale=(7.48360e00, 4.49996e00))]
output_keys = [Key("sol", scale=(5.74634e-03, 3.88433e-03))]
download_FNO_dataset("Darcy_241", outdir="datasets/")
invar_train, outvar_train = load_FNO_dataset(
"datasets/Darcy_241/piececonst_r241_N1024_smooth1.hdf5",
[k.name for k in input_keys],
[k.name for k in output_keys],
n_examples=1000,
)
invar_test, outvar_test = load_FNO_dataset(
"datasets/Darcy_241/piececonst_r241_N1024_smooth2.hdf5",
[k.name for k in input_keys],
[k.name for k in output_keys],
n_examples=100,
)
# get training image shape
img_shape = [
next(iter(invar_train.values())).shape[-2],
next(iter(invar_train.values())).shape[-1],
]
# crop out some pixels so that img_shape is divisible by patch_size of AFNO
img_shape = [s - s % cfg.arch.afno.patch_size for s in img_shape]
print(f"cropped img_shape: {img_shape}")
for d in (invar_train, outvar_train, invar_test, outvar_test):
for k in d:
d[k] = d[k][:, :, : img_shape[0], : img_shape[1]]
print(f"{k}: {d[k].shape}")
# make datasets
train_dataset = DictGridDataset(invar_train, outvar_train)
test_dataset = DictGridDataset(invar_test, outvar_test)
# make list of nodes to unroll graph on
model = instantiate_arch(
input_keys=input_keys,
output_keys=output_keys,
cfg=cfg.arch.afno,
img_shape=img_shape,
)
nodes = [model.make_node(name="AFNO", jit=cfg.jit)]
# make domain
domain = Domain()
# add constraints to domain
supervised = SupervisedGridConstraint(
nodes=nodes,
dataset=train_dataset,
batch_size=cfg.batch_size.grid,
)
domain.add_constraint(supervised, "supervised")
# add validator
val = GridValidator(
nodes,
dataset=test_dataset,
batch_size=cfg.batch_size.validation,
plotter=GridValidatorPlotter(n_examples=5),
)
domain.add_validator(val, "test")
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| 3,980 | Python | 33.617391 | 79 | 0.688191 |
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/darcy/darcy_FNO.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import modulus
from modulus.sym.hydra import instantiate_arch, ModulusConfig
from modulus.sym.key import Key
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.domain.constraint import SupervisedGridConstraint
from modulus.sym.domain.validator import GridValidator
from modulus.sym.dataset import DictGridDataset
from modulus.sym.utils.io.plotter import GridValidatorPlotter
from utilities import download_FNO_dataset, load_FNO_dataset
@modulus.sym.main(config_path="conf", config_name="config_FNO")
def run(cfg: ModulusConfig) -> None:
# load training/ test data
input_keys = [Key("coeff", scale=(7.48360e00, 4.49996e00))]
output_keys = [Key("sol", scale=(5.74634e-03, 3.88433e-03))]
download_FNO_dataset("Darcy_241", outdir="datasets/")
invar_train, outvar_train = load_FNO_dataset(
"datasets/Darcy_241/piececonst_r241_N1024_smooth1.hdf5",
[k.name for k in input_keys],
[k.name for k in output_keys],
n_examples=1000,
)
invar_test, outvar_test = load_FNO_dataset(
"datasets/Darcy_241/piececonst_r241_N1024_smooth2.hdf5",
[k.name for k in input_keys],
[k.name for k in output_keys],
n_examples=100,
)
# make datasets
train_dataset = DictGridDataset(invar_train, outvar_train)
test_dataset = DictGridDataset(invar_test, outvar_test)
# print out training/ test data shapes
for d in (invar_train, outvar_train, invar_test, outvar_test):
for k in d:
print(f"{k}: {d[k].shape}")
# make list of nodes to unroll graph on
model = instantiate_arch(
input_keys=input_keys,
output_keys=output_keys,
cfg=cfg.arch.fno,
)
nodes = model.make_nodes(name="FNO", jit=cfg.jit)
# make domain
domain = Domain()
# add constraints to domain
supervised = SupervisedGridConstraint(
nodes=nodes,
dataset=train_dataset,
batch_size=cfg.batch_size.grid,
)
domain.add_constraint(supervised, "supervised")
# add validator
val = GridValidator(
nodes,
dataset=test_dataset,
batch_size=cfg.batch_size.validation,
plotter=GridValidatorPlotter(n_examples=5),
)
domain.add_validator(val, "test")
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| 3,537 | Python | 33.01923 | 72 | 0.700311 |
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/darcy/conf/config_FNO-Backup.yaml | defaults :
- modulus_default
- arch:
- fno
- scheduler: tf_exponential_lr
- optimizer: adam
- loss: sum
- _self_
jit: false
arch:
fno:
dimension: 2
nr_fno_layers: 4
fno_layer_size: 32
fno_modes: 12
padding: 9
output_fc_layer_sizes:
- 128
scheduler:
decay_rate: 0.95
decay_steps: 1000
training:
rec_results_freq : 1000
max_steps : 10000
batch_size:
grid: 32
validation: 32 | 443 | YAML | 12.875 | 32 | 0.604966 |
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/darcy/conf/config_AFNO.yaml | defaults :
- modulus_default
- arch:
- afno
- scheduler: tf_exponential_lr
- optimizer: adam
- loss: sum
- _self_
arch:
afno:
patch_size: 16
embed_dim: 256
depth: 4
num_blocks: 8
scheduler:
decay_rate: 0.95
decay_steps: 1000
training:
rec_results_freq : 1000
max_steps : 10000
batch_size:
grid: 32
validation: 32
| 369 | YAML | 12.214285 | 32 | 0.609756 |
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/darcy/conf/config_FNO.yaml | defaults :
- modulus_default
- /arch/[email protected]
- /arch/[email protected]
- scheduler: tf_exponential_lr
- optimizer: adam
- loss: sum
- _self_
arch:
decoder:
input_keys: [z, 32]
output_keys: sol
nr_layers: 1
layer_size: 32
fno:
input_keys: coeff
dimension: 2
nr_fno_layers: 4
fno_modes: 12
padding: 9
scheduler:
decay_rate: 0.95
decay_steps: 1000
training:
rec_results_freq : 1000
max_steps : 10000
batch_size:
grid: 32
validation: 32 | 532 | YAML | 14.67647 | 47 | 0.633459 |
Subsets and Splits