file_path
stringlengths 20
207
| content
stringlengths 5
3.85M
| size
int64 5
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
daniel-kun/omni/interface/omni/core/input/abstract_syntax_element.hpp
|
#ifndef OMNI_CORE_ABSTRACT_SYNTAX_ELEMENT_HPP
#define OMNI_CORE_ABSTRACT_SYNTAX_ELEMENT_HPP
#include <omni/core/core.hpp>
#include <omni/core/input/syntax_element.hpp>
#include <omni/core/input/syntax_suggestion.hpp>
#include <vector>
#include <memory>
namespace omni {
namespace core {
namespace input {
/**
**/
class OMNI_CORE_API abstract_syntax_element : public syntax_element {
public:
abstract_syntax_element ();
abstract_syntax_element (std::vector <std::shared_ptr <syntax_element>> possibleSubstitutions);
void setPossibleSubstitutions (std::vector <std::shared_ptr <syntax_element>> possibleSubstitutions);
std::vector <std::shared_ptr <syntax_element>> getPossibleSubstitutions () const;
std::vector <syntax_suggestion> suggestImpl (std::string input, std::size_t templatePosition, std::set <syntax_element *> alreadyVisistedElements) override;
private:
std::vector <std::shared_ptr <syntax_element>> _possibleSubstitutions;
};
} // namespace input
} // namespace core
} // namespace omni
#endif // include guard
| 1,057 |
C++
| 28.388888 | 160 | 0.74456 |
daniel-kun/omni/interface/omni/core/input/syntax_tree_parser_xml.hpp
|
#ifndef OMNI_CORE_SYNTAX_TREE_PARSER_XML_HPP
#define OMNI_CORE_SYNTAX_TREE_PARSER_XML_HPP
#include <omni/core/core.hpp>
#include <istream>
#include <memory>
namespace omni {
namespace core {
namespace input {
class syntax_element;
/**
Parses an XML file and emits a root syntax_element (usually for the 'statement' model entity).
The XML syntax is as follows:
<syntax> <!-- Obligatory root node -->
<abstract name="some_syntax_element"> <!-- read as an abstract_syntax_element, contains other abstract_syntax_elements or concrete_syntax_elements. -->
<concrete name="some_other_name> <!-- read as an concrete_syntax_element, contains template_elements. -->
<fixed text="asdf"/> <!-- a fixed_template_element with the text "asdf" -->
<repeater prefix=","> <!-- a repeater_template_element that repeats it's context and prefixes "," in front of every other repeated element. -->
<syntax name="foo" type="some_syntax_element"/> <!-- A placeholder for a syntax_element (abstract or concrete) of the given type. -->
</repeater>
</concrete>
</abstract>
</syntax>
**/
class OMNI_CORE_API syntax_tree_parser_xml {
public:
static std::shared_ptr <syntax_element> parse (std::istream & stream);
};
}
}
}
#endif // include guard
| 1,306 |
C++
| 31.674999 | 155 | 0.67611 |
daniel-kun/omni/interface/omni/core/input/variable_template_element.hpp
|
#ifndef OMNI_CORE_VARIABLE_TEMPLATE_ELEMENT_HPP
#define OMNI_CORE_VARIABLE_TEMPLATE_ELEMENT_HPP
#include <omni/core/core.hpp>
#include <omni/core/input/template_element.hpp>
#include <omni/core/input/template_variables.hpp>
namespace omni {
namespace core {
namespace input {
class variable_template_provider;
/**
**/
class OMNI_CORE_API variable_template_element : public template_element {
public:
variable_template_element (
syntax_element & parent,
std::size_t templateIndex,
variable_template_provider & provider);
std::vector <std::string> suggest (std::string input) override;
void visit (template_visitor & visitor) override;
private:
variable_template_provider & _provider;
};
} // namespace input
} // namespace core
} // namespace omni
#endif // include guard
| 819 |
C++
| 22.428571 | 73 | 0.722833 |
daniel-kun/omni/interface/omni/core/input/fixed_template_element.hpp
|
#ifndef OMNI_CORE_FIXED_TEMPLATE_ELEMENT_HPP
#define OMNI_CORE_FIXED_TEMPLATE_ELEMENT_HPP
#include <omni/core/core.hpp>
#include <omni/core/input/template_element.hpp>
#include <string>
namespace omni {
namespace core {
namespace input {
/**
**/
class OMNI_CORE_API fixed_template_element : public template_element {
public:
fixed_template_element (syntax_element & parent, std::size_t templateIndex, std::string text);
std::string getText () const;
std::vector <std::string> suggest (std::string input) override;
void visit (template_visitor & visitor) override;
private:
std::string _text;
};
} // namespace input
} // namespace core
} // namespace omni
#endif // include guard
| 714 |
C++
| 20.029411 | 98 | 0.710084 |
daniel-kun/omni/interface/omni/core/input/syntax_suggestion.hpp
|
#ifndef OMNI_CORE_SYNTAX_SUGGESTION_HPP
#define OMNI_CORE_SYNTAX_SUGGESTION_HPP
#include <omni/core/core.hpp>
#include <memory>
#include <string>
namespace omni {
namespace core {
namespace input {
class syntax_element;
/**
**/
class OMNI_CORE_API syntax_suggestion {
public:
syntax_suggestion (syntax_element & syntaxElement, std::size_t templateIndex, std::string text);
syntax_element * syntaxElement;
std::size_t templateIndex;
std::string text;
};
} // namespace input
} // namespace core
} // namespace omni
#endif // include guard
| 565 |
C++
| 17.866666 | 100 | 0.713274 |
daniel-kun/omni/interface/omni/core/input/concrete_syntax_element.hpp
|
#ifndef OMNI_CORE_CONCRETE_SYNTAX_ELEMENT_HPP
#define OMNI_CORE_CONCRETE_SYNTAX_ELEMENT_HPP
#include <omni/core/core.hpp>
#include <omni/core/input/syntax_element.hpp>
#include <vector>
#include <memory>
namespace omni {
namespace core {
namespace input {
class template_element;
/**
**/
class OMNI_CORE_API concrete_syntax_element : public syntax_element {
public:
concrete_syntax_element (int indentationModifier = 0);
std::shared_ptr <template_element> templateElementAt (std::size_t templatePosition) override;
std::size_t templateElementCount () const override;
void setTemplates (std::vector <std::shared_ptr <template_element>> templates);
std::vector <syntax_suggestion> suggestImpl (std::string input, std::size_t templatePosition, std::set <syntax_element *> alreadyVisistedElements) override;
private:
std::vector <std::shared_ptr <template_element>> _templates;
int _indentationModifier;
};
} // namespace input
} // namespace core
} // namespace omni
#endif // include guard
| 1,026 |
C++
| 26.026315 | 160 | 0.739766 |
daniel-kun/omni/interface/omni/core/input/syntax_element.hpp
|
#ifndef OMNI_CORE_SYNTAX_ELEMENT_HPP
#define OMNI_CORE_SYNTAX_ELEMENT_HPP
#include <omni/core/core.hpp>
#include <omni/core/input/syntax_suggestion.hpp>
#include <vector>
#include <memory>
#include <set>
namespace omni {
namespace core {
namespace input {
class template_element;
/**
**/
class OMNI_CORE_API syntax_element {
public:
void setName (std::string const & name);
std::string getName () const;
virtual std::shared_ptr <template_element> templateElementAt (std::size_t templatePosition);
virtual std::size_t templateElementCount () const;
std::vector <syntax_suggestion> suggest (std::string input, std::size_t templateIndex = 0u);
virtual std::vector <syntax_suggestion> suggestImpl (std::string input, std::size_t templatePosition, std::set <syntax_element *> alreadyVisistedElements) = 0;
private:
std::string _name;
};
} // namespace input
} // namespace core
} // namespace omni
#endif // include guard
| 955 |
C++
| 23.51282 | 163 | 0.719372 |
daniel-kun/omni/interface/omni/core/input/syntax_template_element.hpp
|
#ifndef OMNI_CORE_SYNTAX_TEMPLATE_ELEMENT_HPP
#define OMNI_CORE_SYNTAX_TEMPLATE_ELEMENT_HPP
#include <omni/core/core.hpp>
#include <omni/core/input/template_element.hpp>
#include <memory>
namespace omni {
namespace core {
namespace input {
class syntax_element;
/**
**/
class OMNI_CORE_API syntax_template_element : public template_element {
public:
syntax_template_element (syntax_element & parent, std::size_t templateIndex, std::shared_ptr <syntax_element> target);
std::vector <std::string> suggest (std::string input) override;
std::shared_ptr <syntax_element> dive ();
std::shared_ptr <syntax_element> getSyntaxElement () const;
void visit (template_visitor & visitor) override;
private:
std::shared_ptr <syntax_element> _target;
};
} // namespace input
} // namespace core
} // namespace omni
#endif // include guard
| 859 |
C++
| 22.243243 | 122 | 0.720605 |
daniel-kun/omni/interface/omni/core/input/repeater_template_element.hpp
|
#ifndef OMNI_CORE_REPEATER_TEMPLATE_ELEMENT_HPP
#define OMNI_CORE_REPEATER_TEMPLATE_ELEMENT_HPP
#include <omni/core/core.hpp>
#include <omni/core/input/template_element.hpp>
#include <memory>
namespace omni {
namespace core {
namespace input {
/**
**/
class OMNI_CORE_API repeater_template_element : public template_element {
public:
repeater_template_element (std::shared_ptr <template_element> elementToBeRepeated);
void visit (template_visitor & visitor) override;
};
} // namespace input
} // namespace core
} // namespace omni
#endif // include guard
| 571 |
C++
| 20.185184 | 87 | 0.742557 |
daniel-kun/omni/interface/omni/core/input/template_element.hpp
|
#ifndef OMNI_CORE_TEMPLATE_ELEMENT_HPP
#define OMNI_CORE_TEMPLATE_ELEMENT_HPP
#include <omni/core/core.hpp>
#include <omni/core/input/syntax_suggestion.hpp>
#include <string>
#include <vector>
namespace omni {
namespace core {
namespace input {
class template_visitor;
/**
**/
class OMNI_CORE_API template_element {
public:
template_element (
syntax_element & parent,
std::size_t templateIndex);
virtual std::vector <std::string> suggest (std::string input) = 0;
virtual std::shared_ptr <syntax_element> dive ();
virtual void visit (template_visitor & visitor) = 0;
protected:
syntax_element & _parent;
std::size_t _templateIndex;
};
} // namespace input
} // namespace core
} // namespace omni
#endif // include guard
| 769 |
C++
| 18.743589 | 70 | 0.690507 |
daniel-kun/omni/interface/omni/core/input/template_visitor.hpp
|
#ifndef OMNI_CORE_INPUT_TEMPLATE_VISITOR_HPP
#define OMNI_CORE_INPUT_TEMPLATE_VISITOR_HPP
#include <omni/core/core.hpp>
namespace omni {
namespace core {
namespace input {
class syntax_template_element;
class variable_template_element;
class regex_template_element;
class fixed_template_element;
class repeater_template_element;
/**
**/
class OMNI_CORE_API template_visitor {
public:
virtual void visitSyntaxTemplateElement (syntax_template_element & element) = 0;
virtual void visitVariableTemplateElement (variable_template_element & element) = 0;
virtual void visitRegexTemplateElement (regex_template_element & element) = 0;
virtual void visitFixedTemplateElement (fixed_template_element & element) = 0;
virtual void visitRepeaterTemplateElement (repeater_template_element & element) = 0;
};
} // namespace input
} // namespace core
} // namespace omni
#endif // include guard
| 924 |
C++
| 28.838709 | 90 | 0.753247 |
daniel-kun/omni/interface/omni/core/input/regex_template_element.hpp
|
#ifndef OMNI_CORE_REGEX_TEMPLATE_ELEMENT_HPP
#define OMNI_CORE_REGEX_TEMPLATE_ELEMENT_HPP
#include <omni/core/core.hpp>
#include <omni/core/input/template_element.hpp>
#include <string>
namespace omni {
namespace core {
namespace input {
/**
**/
class OMNI_CORE_API regex_template_element : public template_element {
public:
regex_template_element (std::string regex);
void visit (template_visitor & visitor) override;
};
} // namespace input
} // namespace core
} // namespace omni
#endif // include guard
| 522 |
C++
| 18.37037 | 70 | 0.727969 |
daniel-kun/omni/interface/omni/tests/test_utils.hpp
|
#ifndef OMNI_TESTS_TEST_UTILS_HPP
#define OMNI_TESTS_TEST_UTILS_HPP
#include <omni/tests/test_file_manager.hpp>
#include <omni/core/model/function_call_expression.hpp>
#include <omni/core/model/return_statement.hpp>
#include <omni/core/not_implemented_error.hpp>
#include <boost/format.hpp>
#include <memory>
#include <string>
#include <vector>
#ifdef WIN32
#include <Windows.h>
#else
#include <dlfcn.h>
#endif
namespace omni {
namespace core {
namespace model {
class function;
}
}
}
namespace omni {
namespace tests {
boost::filesystem::path emitSharedLibraryWithFunction (std::shared_ptr <omni::core::model::function> func,
omni::tests::test_file_manager & testFileManager,
std::string const & fileBaseName,
std::string & functionName);
template <typename Return>
Return runFunction (std::shared_ptr <omni::core::model::function> func,
omni::tests::test_file_manager & testFileManager,
std::string const & fileBaseName);
bool checkMetaInfoChildren (const omni::core::model::meta_info & metaInfo, std::set <const omni::core::model::meta_info *> children);
} // namespace tests
} // namespace omni
/**
Runs the function `function' and returns it's result in a string representation.
**/
template <typename Return>
Return omni::tests::runFunction (std::shared_ptr <omni::core::model::function> func,
omni::tests::test_file_manager & testFileManager,
std::string const & fileBaseName)
{
std::string functionName;
boost::filesystem::path sharedLibraryPath = emitSharedLibraryWithFunction (func, testFileManager, fileBaseName, functionName);
boost::filesystem::path expPath = sharedLibraryPath;
boost::filesystem::path libPath = sharedLibraryPath;
testFileManager.getTestFileName (expPath.replace_extension (".exp").filename ().string ()); // To get rid of the temporary files after the test finishes
testFileManager.getTestFileName (expPath.replace_extension (".lib").filename ().string ()); // To get rid of the temporary files after the test finishes
boost::filesystem::path objectFilePath = sharedLibraryPath;
boost::filesystem::path objectFilePath2 = sharedLibraryPath;
typedef Return (* testFunc) ();
#ifdef WIN32
HMODULE lib = ::LoadLibraryA (sharedLibraryPath.string ().c_str ());
// HMODULE nullModule = nullptr;
// BOOST_CHECK_NE (lib, nullModule);
if (lib != nullptr) {
#pragma warning(push)
#pragma warning(disable:4191)
testFunc f = reinterpret_cast <testFunc> (::GetProcAddress(lib, functionName.c_str ()));
#pragma warning(pop)
// testFunc nullTestFunc = nullptr;
// BOOST_CHECK_NE (f, nullTestFunc);
if (f != nullptr) {
Return result = (*f)();
::FreeLibrary (lib);
return result;
} else {
::FreeLibrary (lib);
throw omni::core::logic_error (__FILE__, __FUNCTION__, __LINE__,
"Test function could not be found in temporarily created shared object file \"" + sharedLibraryPath.string () + "\".");
}
}
throw omni::core::logic_error (__FILE__, __FUNCTION__, __LINE__,
"Test shared object could not be loaded: \"" + sharedLibraryPath.string () + "\".");
#else
void * lib = dlopen (sharedLibraryPath.string ().c_str (), RTLD_NOW);
if (lib != nullptr) {
testFunc f = reinterpret_cast <testFunc> (dlsym (lib, functionName.c_str ()));
if (f != nullptr) {
Return result = (*f)();
int error = dlclose (lib);
if (error != 0) {
throw omni::core::logic_error (__FILE__, __FUNCTION__, __LINE__,
(boost::format ("dlsym returned %1%.") % error).str ());
}
return result;
} else {
int error = dlclose (lib);
if (error != 0) {
throw omni::core::logic_error (__FILE__, __FUNCTION__, __LINE__,
(boost::format ("dlsym returned %1%.") % error).str ());
}
throw omni::core::logic_error (__FILE__, __FUNCTION__, __LINE__,
"Test function could not be found in temporarily created shared object file \"" + sharedLibraryPath.string () + "\".");
}
}
throw omni::core::logic_error (__FILE__, __FUNCTION__, __LINE__,
"Test shared object could not be loaded: \"" + sharedLibraryPath.string () + "\".");
#endif
}
#endif // include guard
| 4,809 |
C++
| 40.465517 | 163 | 0.580994 |
daniel-kun/omni/interface/omni/tests/test_file_manager.hpp
|
#ifndef OMNI_TESTS_TEST_FILE_MANAGER_HPP
#define OMNI_TESTS_TEST_FILE_MANAGER_HPP
#include <boost/filesystem.hpp>
#include <string>
#include <vector>
namespace omni {
namespace tests {
/**
The test_file_manager keeps track of files created during test sessions and removes them after
the test has finished.
**/
class test_file_manager {
public:
~ test_file_manager ();
boost::filesystem::path getTestFileName (std::string const & fileName, bool autoDelete = true);
private:
std::vector <boost::filesystem::path> _files;
};
} // namespace tests
} // namespace omni
#endif
| 638 |
C++
| 21.034482 | 103 | 0.670846 |
daniel-kun/omni/interface/rapidxml/rapidxml_utils.hpp
|
#ifndef RAPIDXML_UTILS_HPP_INCLUDED
#define RAPIDXML_UTILS_HPP_INCLUDED
// Copyright (C) 2006, 2009 Marcin Kalicinski
// Version 1.13
// Revision $DateTime: 2009/05/13 01:46:17 $
//! \file rapidxml_utils.hpp This file contains high-level rapidxml utilities that can be useful
//! in certain simple scenarios. They should probably not be used if maximizing performance is the main objective.
#include "rapidxml.hpp"
#include <vector>
#include <string>
#include <fstream>
#include <stdexcept>
namespace rapidxml
{
//! Represents data loaded from a file
template<class Ch = char>
class file
{
public:
//! Loads file into the memory. Data will be automatically destroyed by the destructor.
//! \param filename Filename to load.
file(const char *filename)
{
using namespace std;
// Open stream
basic_ifstream<Ch> stream(filename, ios::binary);
if (!stream)
throw runtime_error(string("cannot open file ") + filename);
stream.unsetf(ios::skipws);
// Determine stream size
stream.seekg(0, ios::end);
size_t size = stream.tellg();
stream.seekg(0);
// Load data and add terminating 0
m_data.resize(size + 1);
stream.read(&m_data.front(), static_cast<streamsize>(size));
m_data[size] = 0;
}
//! Loads file into the memory. Data will be automatically destroyed by the destructor
//! \param stream Stream to load from
file(std::basic_istream<Ch> &stream)
{
using namespace std;
// Load data and add terminating 0
stream.unsetf(ios::skipws);
m_data.assign(istreambuf_iterator<Ch>(stream), istreambuf_iterator<Ch>());
if (stream.fail() || stream.bad())
throw runtime_error("error reading stream");
m_data.push_back(0);
}
//! Gets file data.
//! \return Pointer to data of file.
Ch *data()
{
return &m_data.front();
}
//! Gets file data.
//! \return Pointer to data of file.
const Ch *data() const
{
return &m_data.front();
}
//! Gets file data size.
//! \return Size of file data, in characters.
std::size_t size() const
{
return m_data.size();
}
private:
std::vector<Ch> m_data; // File data
};
//! Counts children of node. Time complexity is O(n).
//! \return Number of children of node
template<class Ch>
inline std::size_t count_children(xml_node<Ch> *node)
{
xml_node<Ch> *child = node->first_node();
std::size_t count = 0;
while (child)
{
++count;
child = child->next_sibling();
}
return count;
}
//! Counts attributes of node. Time complexity is O(n).
//! \return Number of attributes of node
template<class Ch>
inline std::size_t count_attributes(xml_node<Ch> *node)
{
xml_attribute<Ch> *attr = node->first_attribute();
std::size_t count = 0;
while (attr)
{
++count;
attr = attr->next_attribute();
}
return count;
}
}
#endif
| 3,417 |
C++
| 26.788618 | 114 | 0.546678 |
daniel-kun/omni/interface/rapidxml/rapidxml.hpp
|
#ifndef RAPIDXML_HPP_INCLUDED
#define RAPIDXML_HPP_INCLUDED
// Copyright (C) 2006, 2009 Marcin Kalicinski
// Version 1.13
// Revision $DateTime: 2009/05/13 01:46:17 $
//! \file rapidxml.hpp This file contains rapidxml parser and DOM implementation
// If standard library is disabled, user must provide implementations of required functions and typedefs
#if !defined(RAPIDXML_NO_STDLIB)
#include <cstdlib> // For std::size_t
#include <cassert> // For assert
#include <new> // For placement new
#endif
// On MSVC, disable "conditional expression is constant" warning (level 4).
// This warning is almost impossible to avoid with certain types of templated code
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable:4127) // Conditional expression is constant
#endif
///////////////////////////////////////////////////////////////////////////
// RAPIDXML_PARSE_ERROR
#if defined(RAPIDXML_NO_EXCEPTIONS)
#define RAPIDXML_PARSE_ERROR(what, where) { parse_error_handler(what, where); assert(0); }
namespace rapidxml
{
//! When exceptions are disabled by defining RAPIDXML_NO_EXCEPTIONS,
//! this function is called to notify user about the error.
//! It must be defined by the user.
//! <br><br>
//! This function cannot return. If it does, the results are undefined.
//! <br><br>
//! A very simple definition might look like that:
//! <pre>
//! void %rapidxml::%parse_error_handler(const char *what, void *where)
//! {
//! std::cout << "Parse error: " << what << "\n";
//! std::abort();
//! }
//! </pre>
//! \param what Human readable description of the error.
//! \param where Pointer to character data where error was detected.
void parse_error_handler(const char *what, void *where);
}
#else
#include <exception> // For std::exception
#define RAPIDXML_PARSE_ERROR(what, where) throw parse_error(what, where)
namespace rapidxml
{
//! Parse error exception.
//! This exception is thrown by the parser when an error occurs.
//! Use what() function to get human-readable error message.
//! Use where() function to get a pointer to position within source text where error was detected.
//! <br><br>
//! If throwing exceptions by the parser is undesirable,
//! it can be disabled by defining RAPIDXML_NO_EXCEPTIONS macro before rapidxml.hpp is included.
//! This will cause the parser to call rapidxml::parse_error_handler() function instead of throwing an exception.
//! This function must be defined by the user.
//! <br><br>
//! This class derives from <code>std::exception</code> class.
class parse_error: public std::exception
{
public:
//! Constructs parse error
parse_error(const char *what, void *where)
: m_what(what)
, m_where(where)
{
}
//! Gets human readable description of error.
//! \return Pointer to null terminated description of the error.
virtual const char *what() const throw()
{
return m_what;
}
//! Gets pointer to character data where error happened.
//! Ch should be the same as char type of xml_document that produced the error.
//! \return Pointer to location within the parsed string where error occured.
template<class Ch>
Ch *where() const
{
return reinterpret_cast<Ch *>(m_where);
}
private:
const char *m_what;
void *m_where;
};
}
#endif
///////////////////////////////////////////////////////////////////////////
// Pool sizes
#ifndef RAPIDXML_STATIC_POOL_SIZE
// Size of static memory block of memory_pool.
// Define RAPIDXML_STATIC_POOL_SIZE before including rapidxml.hpp if you want to override the default value.
// No dynamic memory allocations are performed by memory_pool until static memory is exhausted.
#define RAPIDXML_STATIC_POOL_SIZE (64 * 1024)
#endif
#ifndef RAPIDXML_DYNAMIC_POOL_SIZE
// Size of dynamic memory block of memory_pool.
// Define RAPIDXML_DYNAMIC_POOL_SIZE before including rapidxml.hpp if you want to override the default value.
// After the static block is exhausted, dynamic blocks with approximately this size are allocated by memory_pool.
#define RAPIDXML_DYNAMIC_POOL_SIZE (64 * 1024)
#endif
#ifndef RAPIDXML_ALIGNMENT
// Memory allocation alignment.
// Define RAPIDXML_ALIGNMENT before including rapidxml.hpp if you want to override the default value, which is the size of pointer.
// All memory allocations for nodes, attributes and strings will be aligned to this value.
// This must be a power of 2 and at least 1, otherwise memory_pool will not work.
#define RAPIDXML_ALIGNMENT sizeof(void *)
#endif
namespace rapidxml
{
// Forward declarations
template<class Ch> class xml_node;
template<class Ch> class xml_attribute;
template<class Ch> class xml_document;
//! Enumeration listing all node types produced by the parser.
//! Use xml_node::type() function to query node type.
enum node_type
{
node_document, //!< A document node. Name and value are empty.
node_element, //!< An element node. Name contains element name. Value contains text of first data node.
node_data, //!< A data node. Name is empty. Value contains data text.
node_cdata, //!< A CDATA node. Name is empty. Value contains data text.
node_comment, //!< A comment node. Name is empty. Value contains comment text.
node_declaration, //!< A declaration node. Name and value are empty. Declaration parameters (version, encoding and standalone) are in node attributes.
node_doctype, //!< A DOCTYPE node. Name is empty. Value contains DOCTYPE text.
node_pi //!< A PI node. Name contains target. Value contains instructions.
};
///////////////////////////////////////////////////////////////////////
// Parsing flags
//! Parse flag instructing the parser to not create data nodes.
//! Text of first data node will still be placed in value of parent element, unless rapidxml::parse_no_element_values flag is also specified.
//! Can be combined with other flags by use of | operator.
//! <br><br>
//! See xml_document::parse() function.
const int parse_no_data_nodes = 0x1;
//! Parse flag instructing the parser to not use text of first data node as a value of parent element.
//! Can be combined with other flags by use of | operator.
//! Note that child data nodes of element node take precendence over its value when printing.
//! That is, if element has one or more child data nodes <em>and</em> a value, the value will be ignored.
//! Use rapidxml::parse_no_data_nodes flag to prevent creation of data nodes if you want to manipulate data using values of elements.
//! <br><br>
//! See xml_document::parse() function.
const int parse_no_element_values = 0x2;
//! Parse flag instructing the parser to not place zero terminators after strings in the source text.
//! By default zero terminators are placed, modifying source text.
//! Can be combined with other flags by use of | operator.
//! <br><br>
//! See xml_document::parse() function.
const int parse_no_string_terminators = 0x4;
//! Parse flag instructing the parser to not translate entities in the source text.
//! By default entities are translated, modifying source text.
//! Can be combined with other flags by use of | operator.
//! <br><br>
//! See xml_document::parse() function.
const int parse_no_entity_translation = 0x8;
//! Parse flag instructing the parser to disable UTF-8 handling and assume plain 8 bit characters.
//! By default, UTF-8 handling is enabled.
//! Can be combined with other flags by use of | operator.
//! <br><br>
//! See xml_document::parse() function.
const int parse_no_utf8 = 0x10;
//! Parse flag instructing the parser to create XML declaration node.
//! By default, declaration node is not created.
//! Can be combined with other flags by use of | operator.
//! <br><br>
//! See xml_document::parse() function.
const int parse_declaration_node = 0x20;
//! Parse flag instructing the parser to create comments nodes.
//! By default, comment nodes are not created.
//! Can be combined with other flags by use of | operator.
//! <br><br>
//! See xml_document::parse() function.
const int parse_comment_nodes = 0x40;
//! Parse flag instructing the parser to create DOCTYPE node.
//! By default, doctype node is not created.
//! Although W3C specification allows at most one DOCTYPE node, RapidXml will silently accept documents with more than one.
//! Can be combined with other flags by use of | operator.
//! <br><br>
//! See xml_document::parse() function.
const int parse_doctype_node = 0x80;
//! Parse flag instructing the parser to create PI nodes.
//! By default, PI nodes are not created.
//! Can be combined with other flags by use of | operator.
//! <br><br>
//! See xml_document::parse() function.
const int parse_pi_nodes = 0x100;
//! Parse flag instructing the parser to validate closing tag names.
//! If not set, name inside closing tag is irrelevant to the parser.
//! By default, closing tags are not validated.
//! Can be combined with other flags by use of | operator.
//! <br><br>
//! See xml_document::parse() function.
const int parse_validate_closing_tags = 0x200;
//! Parse flag instructing the parser to trim all leading and trailing whitespace of data nodes.
//! By default, whitespace is not trimmed.
//! This flag does not cause the parser to modify source text.
//! Can be combined with other flags by use of | operator.
//! <br><br>
//! See xml_document::parse() function.
const int parse_trim_whitespace = 0x400;
//! Parse flag instructing the parser to condense all whitespace runs of data nodes to a single space character.
//! Trimming of leading and trailing whitespace of data is controlled by rapidxml::parse_trim_whitespace flag.
//! By default, whitespace is not normalized.
//! If this flag is specified, source text will be modified.
//! Can be combined with other flags by use of | operator.
//! <br><br>
//! See xml_document::parse() function.
const int parse_normalize_whitespace = 0x800;
// Compound flags
//! Parse flags which represent default behaviour of the parser.
//! This is always equal to 0, so that all other flags can be simply ored together.
//! Normally there is no need to inconveniently disable flags by anding with their negated (~) values.
//! This also means that meaning of each flag is a <i>negation</i> of the default setting.
//! For example, if flag name is rapidxml::parse_no_utf8, it means that utf-8 is <i>enabled</i> by default,
//! and using the flag will disable it.
//! <br><br>
//! See xml_document::parse() function.
const int parse_default = 0;
//! A combination of parse flags that forbids any modifications of the source text.
//! This also results in faster parsing. However, note that the following will occur:
//! <ul>
//! <li>names and values of nodes will not be zero terminated, you have to use xml_base::name_size() and xml_base::value_size() functions to determine where name and value ends</li>
//! <li>entities will not be translated</li>
//! <li>whitespace will not be normalized</li>
//! </ul>
//! See xml_document::parse() function.
const int parse_non_destructive = parse_no_string_terminators | parse_no_entity_translation;
//! A combination of parse flags resulting in fastest possible parsing, without sacrificing important data.
//! <br><br>
//! See xml_document::parse() function.
const int parse_fastest = parse_non_destructive | parse_no_data_nodes;
//! A combination of parse flags resulting in largest amount of data being extracted.
//! This usually results in slowest parsing.
//! <br><br>
//! See xml_document::parse() function.
const int parse_full = parse_declaration_node | parse_comment_nodes | parse_doctype_node | parse_pi_nodes | parse_validate_closing_tags;
///////////////////////////////////////////////////////////////////////
// Internals
//! \cond internal
namespace internal
{
// Struct that contains lookup tables for the parser
// It must be a template to allow correct linking (because it has static data members, which are defined in a header file).
template<int Dummy>
struct lookup_tables
{
static const unsigned char lookup_whitespace[256]; // Whitespace table
static const unsigned char lookup_node_name[256]; // Node name table
static const unsigned char lookup_text[256]; // Text table
static const unsigned char lookup_text_pure_no_ws[256]; // Text table
static const unsigned char lookup_text_pure_with_ws[256]; // Text table
static const unsigned char lookup_attribute_name[256]; // Attribute name table
static const unsigned char lookup_attribute_data_1[256]; // Attribute data table with single quote
static const unsigned char lookup_attribute_data_1_pure[256]; // Attribute data table with single quote
static const unsigned char lookup_attribute_data_2[256]; // Attribute data table with double quotes
static const unsigned char lookup_attribute_data_2_pure[256]; // Attribute data table with double quotes
static const unsigned char lookup_digits[256]; // Digits
static const unsigned char lookup_upcase[256]; // To uppercase conversion table for ASCII characters
};
// Find length of the string
template<class Ch>
inline std::size_t measure(const Ch *p)
{
const Ch *tmp = p;
while (*tmp)
++tmp;
return tmp - p;
}
// Compare strings for equality
template<class Ch>
inline bool compare(const Ch *p1, std::size_t size1, const Ch *p2, std::size_t size2, bool case_sensitive)
{
if (size1 != size2)
return false;
if (case_sensitive)
{
for (const Ch *end = p1 + size1; p1 < end; ++p1, ++p2)
if (*p1 != *p2)
return false;
}
else
{
for (const Ch *end = p1 + size1; p1 < end; ++p1, ++p2)
if (lookup_tables<0>::lookup_upcase[static_cast<unsigned char>(*p1)] != lookup_tables<0>::lookup_upcase[static_cast<unsigned char>(*p2)])
return false;
}
return true;
}
}
//! \endcond
///////////////////////////////////////////////////////////////////////
// Memory pool
//! This class is used by the parser to create new nodes and attributes, without overheads of dynamic memory allocation.
//! In most cases, you will not need to use this class directly.
//! However, if you need to create nodes manually or modify names/values of nodes,
//! you are encouraged to use memory_pool of relevant xml_document to allocate the memory.
//! Not only is this faster than allocating them by using <code>new</code> operator,
//! but also their lifetime will be tied to the lifetime of document,
//! possibly simplyfing memory management.
//! <br><br>
//! Call allocate_node() or allocate_attribute() functions to obtain new nodes or attributes from the pool.
//! You can also call allocate_string() function to allocate strings.
//! Such strings can then be used as names or values of nodes without worrying about their lifetime.
//! Note that there is no <code>free()</code> function -- all allocations are freed at once when clear() function is called,
//! or when the pool is destroyed.
//! <br><br>
//! It is also possible to create a standalone memory_pool, and use it
//! to allocate nodes, whose lifetime will not be tied to any document.
//! <br><br>
//! Pool maintains <code>RAPIDXML_STATIC_POOL_SIZE</code> bytes of statically allocated memory.
//! Until static memory is exhausted, no dynamic memory allocations are done.
//! When static memory is exhausted, pool allocates additional blocks of memory of size <code>RAPIDXML_DYNAMIC_POOL_SIZE</code> each,
//! by using global <code>new[]</code> and <code>delete[]</code> operators.
//! This behaviour can be changed by setting custom allocation routines.
//! Use set_allocator() function to set them.
//! <br><br>
//! Allocations for nodes, attributes and strings are aligned at <code>RAPIDXML_ALIGNMENT</code> bytes.
//! This value defaults to the size of pointer on target architecture.
//! <br><br>
//! To obtain absolutely top performance from the parser,
//! it is important that all nodes are allocated from a single, contiguous block of memory.
//! Otherwise, cache misses when jumping between two (or more) disjoint blocks of memory can slow down parsing quite considerably.
//! If required, you can tweak <code>RAPIDXML_STATIC_POOL_SIZE</code>, <code>RAPIDXML_DYNAMIC_POOL_SIZE</code> and <code>RAPIDXML_ALIGNMENT</code>
//! to obtain best wasted memory to performance compromise.
//! To do it, define their values before rapidxml.hpp file is included.
//! \param Ch Character type of created nodes.
template<class Ch = char>
class memory_pool
{
public:
//! \cond internal
typedef void *(alloc_func)(std::size_t); // Type of user-defined function used to allocate memory
typedef void (free_func)(void *); // Type of user-defined function used to free memory
//! \endcond
//! Constructs empty pool with default allocator functions.
memory_pool()
: m_alloc_func(0)
, m_free_func(0)
{
init();
}
//! Destroys pool and frees all the memory.
//! This causes memory occupied by nodes allocated by the pool to be freed.
//! Nodes allocated from the pool are no longer valid.
~memory_pool()
{
clear();
}
//! Allocates a new node from the pool, and optionally assigns name and value to it.
//! If the allocation request cannot be accomodated, this function will throw <code>std::bad_alloc</code>.
//! If exceptions are disabled by defining RAPIDXML_NO_EXCEPTIONS, this function
//! will call rapidxml::parse_error_handler() function.
//! \param type Type of node to create.
//! \param name Name to assign to the node, or 0 to assign no name.
//! \param value Value to assign to the node, or 0 to assign no value.
//! \param name_size Size of name to assign, or 0 to automatically calculate size from name string.
//! \param value_size Size of value to assign, or 0 to automatically calculate size from value string.
//! \return Pointer to allocated node. This pointer will never be NULL.
xml_node<Ch> *allocate_node(node_type type,
const Ch *name = 0, const Ch *value = 0,
std::size_t name_size = 0, std::size_t value_size = 0)
{
void *memory = allocate_aligned(sizeof(xml_node<Ch>));
xml_node<Ch> *node = new(memory) xml_node<Ch>(type);
if (name)
{
if (name_size > 0)
node->name(name, name_size);
else
node->name(name);
}
if (value)
{
if (value_size > 0)
node->value(value, value_size);
else
node->value(value);
}
return node;
}
//! Allocates a new attribute from the pool, and optionally assigns name and value to it.
//! If the allocation request cannot be accomodated, this function will throw <code>std::bad_alloc</code>.
//! If exceptions are disabled by defining RAPIDXML_NO_EXCEPTIONS, this function
//! will call rapidxml::parse_error_handler() function.
//! \param name Name to assign to the attribute, or 0 to assign no name.
//! \param value Value to assign to the attribute, or 0 to assign no value.
//! \param name_size Size of name to assign, or 0 to automatically calculate size from name string.
//! \param value_size Size of value to assign, or 0 to automatically calculate size from value string.
//! \return Pointer to allocated attribute. This pointer will never be NULL.
xml_attribute<Ch> *allocate_attribute(const Ch *name = 0, const Ch *value = 0,
std::size_t name_size = 0, std::size_t value_size = 0)
{
void *memory = allocate_aligned(sizeof(xml_attribute<Ch>));
xml_attribute<Ch> *attribute = new(memory) xml_attribute<Ch>;
if (name)
{
if (name_size > 0)
attribute->name(name, name_size);
else
attribute->name(name);
}
if (value)
{
if (value_size > 0)
attribute->value(value, value_size);
else
attribute->value(value);
}
return attribute;
}
//! Allocates a char array of given size from the pool, and optionally copies a given string to it.
//! If the allocation request cannot be accomodated, this function will throw <code>std::bad_alloc</code>.
//! If exceptions are disabled by defining RAPIDXML_NO_EXCEPTIONS, this function
//! will call rapidxml::parse_error_handler() function.
//! \param source String to initialize the allocated memory with, or 0 to not initialize it.
//! \param size Number of characters to allocate, or zero to calculate it automatically from source string length; if size is 0, source string must be specified and null terminated.
//! \return Pointer to allocated char array. This pointer will never be NULL.
Ch *allocate_string(const Ch *source = 0, std::size_t size = 0)
{
assert(source || size); // Either source or size (or both) must be specified
if (size == 0)
size = internal::measure(source) + 1;
Ch *result = static_cast<Ch *>(allocate_aligned(size * sizeof(Ch)));
if (source)
for (std::size_t i = 0; i < size; ++i)
result[i] = source[i];
return result;
}
//! Clones an xml_node and its hierarchy of child nodes and attributes.
//! Nodes and attributes are allocated from this memory pool.
//! Names and values are not cloned, they are shared between the clone and the source.
//! Result node can be optionally specified as a second parameter,
//! in which case its contents will be replaced with cloned source node.
//! This is useful when you want to clone entire document.
//! \param source Node to clone.
//! \param result Node to put results in, or 0 to automatically allocate result node
//! \return Pointer to cloned node. This pointer will never be NULL.
xml_node<Ch> *clone_node(const xml_node<Ch> *source, xml_node<Ch> *result = 0)
{
// Prepare result node
if (result)
{
result->remove_all_attributes();
result->remove_all_nodes();
result->type(source->type());
}
else
result = allocate_node(source->type());
// Clone name and value
result->name(source->name(), source->name_size());
result->value(source->value(), source->value_size());
// Clone child nodes and attributes
for (xml_node<Ch> *child = source->first_node(); child; child = child->next_sibling())
result->append_node(clone_node(child));
for (xml_attribute<Ch> *attr = source->first_attribute(); attr; attr = attr->next_attribute())
result->append_attribute(allocate_attribute(attr->name(), attr->value(), attr->name_size(), attr->value_size()));
return result;
}
//! Clears the pool.
//! This causes memory occupied by nodes allocated by the pool to be freed.
//! Any nodes or strings allocated from the pool will no longer be valid.
void clear()
{
while (m_begin != m_static_memory)
{
char *previous_begin = reinterpret_cast<header *>(align(m_begin))->previous_begin;
if (m_free_func)
m_free_func(m_begin);
else
delete[] m_begin;
m_begin = previous_begin;
}
init();
}
//! Sets or resets the user-defined memory allocation functions for the pool.
//! This can only be called when no memory is allocated from the pool yet, otherwise results are undefined.
//! Allocation function must not return invalid pointer on failure. It should either throw,
//! stop the program, or use <code>longjmp()</code> function to pass control to other place of program.
//! If it returns invalid pointer, results are undefined.
//! <br><br>
//! User defined allocation functions must have the following forms:
//! <br><code>
//! <br>void *allocate(std::size_t size);
//! <br>void free(void *pointer);
//! </code><br>
//! \param af Allocation function, or 0 to restore default function
//! \param ff Free function, or 0 to restore default function
void set_allocator(alloc_func *af, free_func *ff)
{
assert(m_begin == m_static_memory && m_ptr == align(m_begin)); // Verify that no memory is allocated yet
m_alloc_func = af;
m_free_func = ff;
}
private:
struct header
{
char *previous_begin;
};
void init()
{
m_begin = m_static_memory;
m_ptr = align(m_begin);
m_end = m_static_memory + sizeof(m_static_memory);
}
char *align(char *ptr)
{
std::size_t alignment = ((RAPIDXML_ALIGNMENT - (std::size_t(ptr) & (RAPIDXML_ALIGNMENT - 1))) & (RAPIDXML_ALIGNMENT - 1));
return ptr + alignment;
}
char *allocate_raw(std::size_t size)
{
// Allocate
void *memory;
if (m_alloc_func) // Allocate memory using either user-specified allocation function or global operator new[]
{
memory = m_alloc_func(size);
assert(memory); // Allocator is not allowed to return 0, on failure it must either throw, stop the program or use longjmp
}
else
{
memory = new char[size];
#ifdef RAPIDXML_NO_EXCEPTIONS
if (!memory) // If exceptions are disabled, verify memory allocation, because new will not be able to throw bad_alloc
RAPIDXML_PARSE_ERROR("out of memory", 0);
#endif
}
return static_cast<char *>(memory);
}
void *allocate_aligned(std::size_t size)
{
// Calculate aligned pointer
char *result = align(m_ptr);
// If not enough memory left in current pool, allocate a new pool
if (result + size > m_end)
{
// Calculate required pool size (may be bigger than RAPIDXML_DYNAMIC_POOL_SIZE)
std::size_t pool_size = RAPIDXML_DYNAMIC_POOL_SIZE;
if (pool_size < size)
pool_size = size;
// Allocate
std::size_t alloc_size = sizeof(header) + (2 * RAPIDXML_ALIGNMENT - 2) + pool_size; // 2 alignments required in worst case: one for header, one for actual allocation
char *raw_memory = allocate_raw(alloc_size);
// Setup new pool in allocated memory
char *pool = align(raw_memory);
header *new_header = reinterpret_cast<header *>(pool);
new_header->previous_begin = m_begin;
m_begin = raw_memory;
m_ptr = pool + sizeof(header);
m_end = raw_memory + alloc_size;
// Calculate aligned pointer again using new pool
result = align(m_ptr);
}
// Update pool and return aligned pointer
m_ptr = result + size;
return result;
}
char *m_begin; // Start of raw memory making up current pool
char *m_ptr; // First free byte in current pool
char *m_end; // One past last available byte in current pool
char m_static_memory[RAPIDXML_STATIC_POOL_SIZE]; // Static raw memory
alloc_func *m_alloc_func; // Allocator function, or 0 if default is to be used
free_func *m_free_func; // Free function, or 0 if default is to be used
};
///////////////////////////////////////////////////////////////////////////
// XML base
//! Base class for xml_node and xml_attribute implementing common functions:
//! name(), name_size(), value(), value_size() and parent().
//! \param Ch Character type to use
template<class Ch = char>
class xml_base
{
public:
///////////////////////////////////////////////////////////////////////////
// Construction & destruction
// Construct a base with empty name, value and parent
xml_base()
: m_name(0)
, m_value(0)
, m_parent(0)
{
}
///////////////////////////////////////////////////////////////////////////
// Node data access
//! Gets name of the node.
//! Interpretation of name depends on type of node.
//! Note that name will not be zero-terminated if rapidxml::parse_no_string_terminators option was selected during parse.
//! <br><br>
//! Use name_size() function to determine length of the name.
//! \return Name of node, or empty string if node has no name.
Ch *name() const
{
return m_name ? m_name : nullstr();
}
//! Gets size of node name, not including terminator character.
//! This function works correctly irrespective of whether name is or is not zero terminated.
//! \return Size of node name, in characters.
std::size_t name_size() const
{
return m_name ? m_name_size : 0;
}
//! Gets value of node.
//! Interpretation of value depends on type of node.
//! Note that value will not be zero-terminated if rapidxml::parse_no_string_terminators option was selected during parse.
//! <br><br>
//! Use value_size() function to determine length of the value.
//! \return Value of node, or empty string if node has no value.
Ch *value() const
{
return m_value ? m_value : nullstr();
}
//! Gets size of node value, not including terminator character.
//! This function works correctly irrespective of whether value is or is not zero terminated.
//! \return Size of node value, in characters.
std::size_t value_size() const
{
return m_value ? m_value_size : 0;
}
///////////////////////////////////////////////////////////////////////////
// Node modification
//! Sets name of node to a non zero-terminated string.
//! See \ref ownership_of_strings.
//! <br><br>
//! Note that node does not own its name or value, it only stores a pointer to it.
//! It will not delete or otherwise free the pointer on destruction.
//! It is reponsibility of the user to properly manage lifetime of the string.
//! The easiest way to achieve it is to use memory_pool of the document to allocate the string -
//! on destruction of the document the string will be automatically freed.
//! <br><br>
//! Size of name must be specified separately, because name does not have to be zero terminated.
//! Use name(const Ch *) function to have the length automatically calculated (string must be zero terminated).
//! \param name Name of node to set. Does not have to be zero terminated.
//! \param size Size of name, in characters. This does not include zero terminator, if one is present.
void name(const Ch *name, std::size_t size)
{
m_name = const_cast<Ch *>(name);
m_name_size = size;
}
//! Sets name of node to a zero-terminated string.
//! See also \ref ownership_of_strings and xml_node::name(const Ch *, std::size_t).
//! \param name Name of node to set. Must be zero terminated.
void name(const Ch *name)
{
this->name(name, internal::measure(name));
}
//! Sets value of node to a non zero-terminated string.
//! See \ref ownership_of_strings.
//! <br><br>
//! Note that node does not own its name or value, it only stores a pointer to it.
//! It will not delete or otherwise free the pointer on destruction.
//! It is reponsibility of the user to properly manage lifetime of the string.
//! The easiest way to achieve it is to use memory_pool of the document to allocate the string -
//! on destruction of the document the string will be automatically freed.
//! <br><br>
//! Size of value must be specified separately, because it does not have to be zero terminated.
//! Use value(const Ch *) function to have the length automatically calculated (string must be zero terminated).
//! <br><br>
//! If an element has a child node of type node_data, it will take precedence over element value when printing.
//! If you want to manipulate data of elements using values, use parser flag rapidxml::parse_no_data_nodes to prevent creation of data nodes by the parser.
//! \param value value of node to set. Does not have to be zero terminated.
//! \param size Size of value, in characters. This does not include zero terminator, if one is present.
void value(const Ch *value, std::size_t size)
{
m_value = const_cast<Ch *>(value);
m_value_size = size;
}
//! Sets value of node to a zero-terminated string.
//! See also \ref ownership_of_strings and xml_node::value(const Ch *, std::size_t).
//! \param value Vame of node to set. Must be zero terminated.
void value(const Ch *value)
{
this->value(value, internal::measure(value));
}
///////////////////////////////////////////////////////////////////////////
// Related nodes access
//! Gets node parent.
//! \return Pointer to parent node, or 0 if there is no parent.
xml_node<Ch> *parent() const
{
return m_parent;
}
protected:
// Return empty string
static Ch *nullstr()
{
static Ch zero = Ch('\0');
return &zero;
}
Ch *m_name; // Name of node, or 0 if no name
Ch *m_value; // Value of node, or 0 if no value
std::size_t m_name_size; // Length of node name, or undefined of no name
std::size_t m_value_size; // Length of node value, or undefined if no value
xml_node<Ch> *m_parent; // Pointer to parent node, or 0 if none
};
//! Class representing attribute node of XML document.
//! Each attribute has name and value strings, which are available through name() and value() functions (inherited from xml_base).
//! Note that after parse, both name and value of attribute will point to interior of source text used for parsing.
//! Thus, this text must persist in memory for the lifetime of attribute.
//! \param Ch Character type to use.
template<class Ch = char>
class xml_attribute: public xml_base<Ch>
{
friend class xml_node<Ch>;
public:
///////////////////////////////////////////////////////////////////////////
// Construction & destruction
//! Constructs an empty attribute with the specified type.
//! Consider using memory_pool of appropriate xml_document if allocating attributes manually.
xml_attribute()
{
}
///////////////////////////////////////////////////////////////////////////
// Related nodes access
//! Gets document of which attribute is a child.
//! \return Pointer to document that contains this attribute, or 0 if there is no parent document.
xml_document<Ch> *document() const
{
if (xml_node<Ch> *node = this->parent())
{
while (node->parent())
node = node->parent();
return node->type() == node_document ? static_cast<xml_document<Ch> *>(node) : 0;
}
else
return 0;
}
//! Gets previous attribute, optionally matching attribute name.
//! \param name Name of attribute to find, or 0 to return previous attribute regardless of its name; this string doesn't have to be zero-terminated if name_size is non-zero
//! \param name_size Size of name, in characters, or 0 to have size calculated automatically from string
//! \param case_sensitive Should name comparison be case-sensitive; non case-sensitive comparison works properly only for ASCII characters
//! \return Pointer to found attribute, or 0 if not found.
xml_attribute<Ch> *previous_attribute(const Ch *name = 0, std::size_t name_size = 0, bool case_sensitive = true) const
{
if (name)
{
if (name_size == 0)
name_size = internal::measure(name);
for (xml_attribute<Ch> *attribute = m_prev_attribute; attribute; attribute = attribute->m_prev_attribute)
if (internal::compare(attribute->name(), attribute->name_size(), name, name_size, case_sensitive))
return attribute;
return 0;
}
else
return this->m_parent ? m_prev_attribute : 0;
}
//! Gets next attribute, optionally matching attribute name.
//! \param name Name of attribute to find, or 0 to return next attribute regardless of its name; this string doesn't have to be zero-terminated if name_size is non-zero
//! \param name_size Size of name, in characters, or 0 to have size calculated automatically from string
//! \param case_sensitive Should name comparison be case-sensitive; non case-sensitive comparison works properly only for ASCII characters
//! \return Pointer to found attribute, or 0 if not found.
xml_attribute<Ch> *next_attribute(const Ch *name = 0, std::size_t name_size = 0, bool case_sensitive = true) const
{
if (name)
{
if (name_size == 0)
name_size = internal::measure(name);
for (xml_attribute<Ch> *attribute = m_next_attribute; attribute; attribute = attribute->m_next_attribute)
if (internal::compare(attribute->name(), attribute->name_size(), name, name_size, case_sensitive))
return attribute;
return 0;
}
else
return this->m_parent ? m_next_attribute : 0;
}
private:
xml_attribute<Ch> *m_prev_attribute; // Pointer to previous sibling of attribute, or 0 if none; only valid if parent is non-zero
xml_attribute<Ch> *m_next_attribute; // Pointer to next sibling of attribute, or 0 if none; only valid if parent is non-zero
};
///////////////////////////////////////////////////////////////////////////
// XML node
//! Class representing a node of XML document.
//! Each node may have associated name and value strings, which are available through name() and value() functions.
//! Interpretation of name and value depends on type of the node.
//! Type of node can be determined by using type() function.
//! <br><br>
//! Note that after parse, both name and value of node, if any, will point interior of source text used for parsing.
//! Thus, this text must persist in the memory for the lifetime of node.
//! \param Ch Character type to use.
template<class Ch = char>
class xml_node: public xml_base<Ch>
{
public:
///////////////////////////////////////////////////////////////////////////
// Construction & destruction
//! Constructs an empty node with the specified type.
//! Consider using memory_pool of appropriate document to allocate nodes manually.
//! \param type Type of node to construct.
xml_node(node_type type)
: m_type(type)
, m_first_node(0)
, m_first_attribute(0)
{
}
///////////////////////////////////////////////////////////////////////////
// Node data access
//! Gets type of node.
//! \return Type of node.
node_type type() const
{
return m_type;
}
///////////////////////////////////////////////////////////////////////////
// Related nodes access
//! Gets document of which node is a child.
//! \return Pointer to document that contains this node, or 0 if there is no parent document.
xml_document<Ch> *document() const
{
xml_node<Ch> *node = const_cast<xml_node<Ch> *>(this);
while (node->parent())
node = node->parent();
return node->type() == node_document ? static_cast<xml_document<Ch> *>(node) : 0;
}
//! Gets first child node, optionally matching node name.
//! \param name Name of child to find, or 0 to return first child regardless of its name; this string doesn't have to be zero-terminated if name_size is non-zero
//! \param name_size Size of name, in characters, or 0 to have size calculated automatically from string
//! \param case_sensitive Should name comparison be case-sensitive; non case-sensitive comparison works properly only for ASCII characters
//! \return Pointer to found child, or 0 if not found.
xml_node<Ch> *first_node(const Ch *name = 0, std::size_t name_size = 0, bool case_sensitive = true) const
{
if (name)
{
if (name_size == 0)
name_size = internal::measure(name);
for (xml_node<Ch> *child = m_first_node; child; child = child->next_sibling())
if (internal::compare(child->name(), child->name_size(), name, name_size, case_sensitive))
return child;
return 0;
}
else
return m_first_node;
}
//! Gets last child node, optionally matching node name.
//! Behaviour is undefined if node has no children.
//! Use first_node() to test if node has children.
//! \param name Name of child to find, or 0 to return last child regardless of its name; this string doesn't have to be zero-terminated if name_size is non-zero
//! \param name_size Size of name, in characters, or 0 to have size calculated automatically from string
//! \param case_sensitive Should name comparison be case-sensitive; non case-sensitive comparison works properly only for ASCII characters
//! \return Pointer to found child, or 0 if not found.
xml_node<Ch> *last_node(const Ch *name = 0, std::size_t name_size = 0, bool case_sensitive = true) const
{
assert(m_first_node); // Cannot query for last child if node has no children
if (name)
{
if (name_size == 0)
name_size = internal::measure(name);
for (xml_node<Ch> *child = m_last_node; child; child = child->previous_sibling())
if (internal::compare(child->name(), child->name_size(), name, name_size, case_sensitive))
return child;
return 0;
}
else
return m_last_node;
}
//! Gets previous sibling node, optionally matching node name.
//! Behaviour is undefined if node has no parent.
//! Use parent() to test if node has a parent.
//! \param name Name of sibling to find, or 0 to return previous sibling regardless of its name; this string doesn't have to be zero-terminated if name_size is non-zero
//! \param name_size Size of name, in characters, or 0 to have size calculated automatically from string
//! \param case_sensitive Should name comparison be case-sensitive; non case-sensitive comparison works properly only for ASCII characters
//! \return Pointer to found sibling, or 0 if not found.
xml_node<Ch> *previous_sibling(const Ch *name = 0, std::size_t name_size = 0, bool case_sensitive = true) const
{
assert(this->m_parent); // Cannot query for siblings if node has no parent
if (name)
{
if (name_size == 0)
name_size = internal::measure(name);
for (xml_node<Ch> *sibling = m_prev_sibling; sibling; sibling = sibling->m_prev_sibling)
if (internal::compare(sibling->name(), sibling->name_size(), name, name_size, case_sensitive))
return sibling;
return 0;
}
else
return m_prev_sibling;
}
//! Gets next sibling node, optionally matching node name.
//! Behaviour is undefined if node has no parent.
//! Use parent() to test if node has a parent.
//! \param name Name of sibling to find, or 0 to return next sibling regardless of its name; this string doesn't have to be zero-terminated if name_size is non-zero
//! \param name_size Size of name, in characters, or 0 to have size calculated automatically from string
//! \param case_sensitive Should name comparison be case-sensitive; non case-sensitive comparison works properly only for ASCII characters
//! \return Pointer to found sibling, or 0 if not found.
xml_node<Ch> *next_sibling(const Ch *name = 0, std::size_t name_size = 0, bool case_sensitive = true) const
{
assert(this->m_parent); // Cannot query for siblings if node has no parent
if (name)
{
if (name_size == 0)
name_size = internal::measure(name);
for (xml_node<Ch> *sibling = m_next_sibling; sibling; sibling = sibling->m_next_sibling)
if (internal::compare(sibling->name(), sibling->name_size(), name, name_size, case_sensitive))
return sibling;
return 0;
}
else
return m_next_sibling;
}
//! Gets first attribute of node, optionally matching attribute name.
//! \param name Name of attribute to find, or 0 to return first attribute regardless of its name; this string doesn't have to be zero-terminated if name_size is non-zero
//! \param name_size Size of name, in characters, or 0 to have size calculated automatically from string
//! \param case_sensitive Should name comparison be case-sensitive; non case-sensitive comparison works properly only for ASCII characters
//! \return Pointer to found attribute, or 0 if not found.
xml_attribute<Ch> *first_attribute(const Ch *name = 0, std::size_t name_size = 0, bool case_sensitive = true) const
{
if (name)
{
if (name_size == 0)
name_size = internal::measure(name);
for (xml_attribute<Ch> *attribute = m_first_attribute; attribute; attribute = attribute->m_next_attribute)
if (internal::compare(attribute->name(), attribute->name_size(), name, name_size, case_sensitive))
return attribute;
return 0;
}
else
return m_first_attribute;
}
//! Gets last attribute of node, optionally matching attribute name.
//! \param name Name of attribute to find, or 0 to return last attribute regardless of its name; this string doesn't have to be zero-terminated if name_size is non-zero
//! \param name_size Size of name, in characters, or 0 to have size calculated automatically from string
//! \param case_sensitive Should name comparison be case-sensitive; non case-sensitive comparison works properly only for ASCII characters
//! \return Pointer to found attribute, or 0 if not found.
xml_attribute<Ch> *last_attribute(const Ch *name = 0, std::size_t name_size = 0, bool case_sensitive = true) const
{
if (name)
{
if (name_size == 0)
name_size = internal::measure(name);
for (xml_attribute<Ch> *attribute = m_last_attribute; attribute; attribute = attribute->m_prev_attribute)
if (internal::compare(attribute->name(), attribute->name_size(), name, name_size, case_sensitive))
return attribute;
return 0;
}
else
return m_first_attribute ? m_last_attribute : 0;
}
///////////////////////////////////////////////////////////////////////////
// Node modification
//! Sets type of node.
//! \param type Type of node to set.
void type(node_type type)
{
m_type = type;
}
///////////////////////////////////////////////////////////////////////////
// Node manipulation
//! Prepends a new child node.
//! The prepended child becomes the first child, and all existing children are moved one position back.
//! \param child Node to prepend.
void prepend_node(xml_node<Ch> *child)
{
assert(child && !child->parent() && child->type() != node_document);
if (first_node())
{
child->m_next_sibling = m_first_node;
m_first_node->m_prev_sibling = child;
}
else
{
child->m_next_sibling = 0;
m_last_node = child;
}
m_first_node = child;
child->m_parent = this;
child->m_prev_sibling = 0;
}
//! Appends a new child node.
//! The appended child becomes the last child.
//! \param child Node to append.
void append_node(xml_node<Ch> *child)
{
assert(child && !child->parent() && child->type() != node_document);
if (first_node())
{
child->m_prev_sibling = m_last_node;
m_last_node->m_next_sibling = child;
}
else
{
child->m_prev_sibling = 0;
m_first_node = child;
}
m_last_node = child;
child->m_parent = this;
child->m_next_sibling = 0;
}
//! Inserts a new child node at specified place inside the node.
//! All children after and including the specified node are moved one position back.
//! \param where Place where to insert the child, or 0 to insert at the back.
//! \param child Node to insert.
void insert_node(xml_node<Ch> *where, xml_node<Ch> *child)
{
assert(!where || where->parent() == this);
assert(child && !child->parent() && child->type() != node_document);
if (where == m_first_node)
prepend_node(child);
else if (where == 0)
append_node(child);
else
{
child->m_prev_sibling = where->m_prev_sibling;
child->m_next_sibling = where;
where->m_prev_sibling->m_next_sibling = child;
where->m_prev_sibling = child;
child->m_parent = this;
}
}
//! Removes first child node.
//! If node has no children, behaviour is undefined.
//! Use first_node() to test if node has children.
void remove_first_node()
{
assert(first_node());
xml_node<Ch> *child = m_first_node;
m_first_node = child->m_next_sibling;
if (child->m_next_sibling)
child->m_next_sibling->m_prev_sibling = 0;
else
m_last_node = 0;
child->m_parent = 0;
}
//! Removes last child of the node.
//! If node has no children, behaviour is undefined.
//! Use first_node() to test if node has children.
void remove_last_node()
{
assert(first_node());
xml_node<Ch> *child = m_last_node;
if (child->m_prev_sibling)
{
m_last_node = child->m_prev_sibling;
child->m_prev_sibling->m_next_sibling = 0;
}
else
m_first_node = 0;
child->m_parent = 0;
}
//! Removes specified child from the node
// \param where Pointer to child to be removed.
void remove_node(xml_node<Ch> *where)
{
assert(where && where->parent() == this);
assert(first_node());
if (where == m_first_node)
remove_first_node();
else if (where == m_last_node)
remove_last_node();
else
{
where->m_prev_sibling->m_next_sibling = where->m_next_sibling;
where->m_next_sibling->m_prev_sibling = where->m_prev_sibling;
where->m_parent = 0;
}
}
//! Removes all child nodes (but not attributes).
void remove_all_nodes()
{
for (xml_node<Ch> *node = first_node(); node; node = node->m_next_sibling)
node->m_parent = 0;
m_first_node = 0;
}
//! Prepends a new attribute to the node.
//! \param attribute Attribute to prepend.
void prepend_attribute(xml_attribute<Ch> *attribute)
{
assert(attribute && !attribute->parent());
if (first_attribute())
{
attribute->m_next_attribute = m_first_attribute;
m_first_attribute->m_prev_attribute = attribute;
}
else
{
attribute->m_next_attribute = 0;
m_last_attribute = attribute;
}
m_first_attribute = attribute;
attribute->m_parent = this;
attribute->m_prev_attribute = 0;
}
//! Appends a new attribute to the node.
//! \param attribute Attribute to append.
void append_attribute(xml_attribute<Ch> *attribute)
{
assert(attribute && !attribute->parent());
if (first_attribute())
{
attribute->m_prev_attribute = m_last_attribute;
m_last_attribute->m_next_attribute = attribute;
}
else
{
attribute->m_prev_attribute = 0;
m_first_attribute = attribute;
}
m_last_attribute = attribute;
attribute->m_parent = this;
attribute->m_next_attribute = 0;
}
//! Inserts a new attribute at specified place inside the node.
//! All attributes after and including the specified attribute are moved one position back.
//! \param where Place where to insert the attribute, or 0 to insert at the back.
//! \param attribute Attribute to insert.
void insert_attribute(xml_attribute<Ch> *where, xml_attribute<Ch> *attribute)
{
assert(!where || where->parent() == this);
assert(attribute && !attribute->parent());
if (where == m_first_attribute)
prepend_attribute(attribute);
else if (where == 0)
append_attribute(attribute);
else
{
attribute->m_prev_attribute = where->m_prev_attribute;
attribute->m_next_attribute = where;
where->m_prev_attribute->m_next_attribute = attribute;
where->m_prev_attribute = attribute;
attribute->m_parent = this;
}
}
//! Removes first attribute of the node.
//! If node has no attributes, behaviour is undefined.
//! Use first_attribute() to test if node has attributes.
void remove_first_attribute()
{
assert(first_attribute());
xml_attribute<Ch> *attribute = m_first_attribute;
if (attribute->m_next_attribute)
{
attribute->m_next_attribute->m_prev_attribute = 0;
}
else
m_last_attribute = 0;
attribute->m_parent = 0;
m_first_attribute = attribute->m_next_attribute;
}
//! Removes last attribute of the node.
//! If node has no attributes, behaviour is undefined.
//! Use first_attribute() to test if node has attributes.
void remove_last_attribute()
{
assert(first_attribute());
xml_attribute<Ch> *attribute = m_last_attribute;
if (attribute->m_prev_attribute)
{
attribute->m_prev_attribute->m_next_attribute = 0;
m_last_attribute = attribute->m_prev_attribute;
}
else
m_first_attribute = 0;
attribute->m_parent = 0;
}
//! Removes specified attribute from node.
//! \param where Pointer to attribute to be removed.
void remove_attribute(xml_attribute<Ch> *where)
{
assert(first_attribute() && where->parent() == this);
if (where == m_first_attribute)
remove_first_attribute();
else if (where == m_last_attribute)
remove_last_attribute();
else
{
where->m_prev_attribute->m_next_attribute = where->m_next_attribute;
where->m_next_attribute->m_prev_attribute = where->m_prev_attribute;
where->m_parent = 0;
}
}
//! Removes all attributes of node.
void remove_all_attributes()
{
for (xml_attribute<Ch> *attribute = first_attribute(); attribute; attribute = attribute->m_next_attribute)
attribute->m_parent = 0;
m_first_attribute = 0;
}
private:
///////////////////////////////////////////////////////////////////////////
// Restrictions
// No copying
xml_node(const xml_node &);
void operator =(const xml_node &);
///////////////////////////////////////////////////////////////////////////
// Data members
// Note that some of the pointers below have UNDEFINED values if certain other pointers are 0.
// This is required for maximum performance, as it allows the parser to omit initialization of
// unneded/redundant values.
//
// The rules are as follows:
// 1. first_node and first_attribute contain valid pointers, or 0 if node has no children/attributes respectively
// 2. last_node and last_attribute are valid only if node has at least one child/attribute respectively, otherwise they contain garbage
// 3. prev_sibling and next_sibling are valid only if node has a parent, otherwise they contain garbage
node_type m_type; // Type of node; always valid
xml_node<Ch> *m_first_node; // Pointer to first child node, or 0 if none; always valid
xml_node<Ch> *m_last_node; // Pointer to last child node, or 0 if none; this value is only valid if m_first_node is non-zero
xml_attribute<Ch> *m_first_attribute; // Pointer to first attribute of node, or 0 if none; always valid
xml_attribute<Ch> *m_last_attribute; // Pointer to last attribute of node, or 0 if none; this value is only valid if m_first_attribute is non-zero
xml_node<Ch> *m_prev_sibling; // Pointer to previous sibling of node, or 0 if none; this value is only valid if m_parent is non-zero
xml_node<Ch> *m_next_sibling; // Pointer to next sibling of node, or 0 if none; this value is only valid if m_parent is non-zero
};
///////////////////////////////////////////////////////////////////////////
// XML document
//! This class represents root of the DOM hierarchy.
//! It is also an xml_node and a memory_pool through public inheritance.
//! Use parse() function to build a DOM tree from a zero-terminated XML text string.
//! parse() function allocates memory for nodes and attributes by using functions of xml_document,
//! which are inherited from memory_pool.
//! To access root node of the document, use the document itself, as if it was an xml_node.
//! \param Ch Character type to use.
template<class Ch = char>
class xml_document: public xml_node<Ch>, public memory_pool<Ch>
{
public:
//! Constructs empty XML document
xml_document()
: xml_node<Ch>(node_document)
{
}
//! Parses zero-terminated XML string according to given flags.
//! Passed string will be modified by the parser, unless rapidxml::parse_non_destructive flag is used.
//! The string must persist for the lifetime of the document.
//! In case of error, rapidxml::parse_error exception will be thrown.
//! <br><br>
//! If you want to parse contents of a file, you must first load the file into the memory, and pass pointer to its beginning.
//! Make sure that data is zero-terminated.
//! <br><br>
//! Document can be parsed into multiple times.
//! Each new call to parse removes previous nodes and attributes (if any), but does not clear memory pool.
//! \param text XML data to parse; pointer is non-const to denote fact that this data may be modified by the parser.
template<int Flags>
void parse(Ch *text)
{
assert(text);
// Remove current contents
this->remove_all_nodes();
this->remove_all_attributes();
// Parse BOM, if any
parse_bom<Flags>(text);
// Parse children
while (1)
{
// Skip whitespace before node
skip<whitespace_pred, Flags>(text);
if (*text == 0)
break;
// Parse and append new child
if (*text == Ch('<'))
{
++text; // Skip '<'
if (xml_node<Ch> *node = parse_node<Flags>(text))
this->append_node(node);
}
else
RAPIDXML_PARSE_ERROR("expected <", text);
}
}
//! Clears the document by deleting all nodes and clearing the memory pool.
//! All nodes owned by document pool are destroyed.
void clear()
{
this->remove_all_nodes();
this->remove_all_attributes();
memory_pool<Ch>::clear();
}
private:
///////////////////////////////////////////////////////////////////////
// Internal character utility functions
// Detect whitespace character
struct whitespace_pred
{
static unsigned char test(Ch ch)
{
return internal::lookup_tables<0>::lookup_whitespace[static_cast<unsigned char>(ch)];
}
};
// Detect node name character
struct node_name_pred
{
static unsigned char test(Ch ch)
{
return internal::lookup_tables<0>::lookup_node_name[static_cast<unsigned char>(ch)];
}
};
// Detect attribute name character
struct attribute_name_pred
{
static unsigned char test(Ch ch)
{
return internal::lookup_tables<0>::lookup_attribute_name[static_cast<unsigned char>(ch)];
}
};
// Detect text character (PCDATA)
struct text_pred
{
static unsigned char test(Ch ch)
{
return internal::lookup_tables<0>::lookup_text[static_cast<unsigned char>(ch)];
}
};
// Detect text character (PCDATA) that does not require processing
struct text_pure_no_ws_pred
{
static unsigned char test(Ch ch)
{
return internal::lookup_tables<0>::lookup_text_pure_no_ws[static_cast<unsigned char>(ch)];
}
};
// Detect text character (PCDATA) that does not require processing
struct text_pure_with_ws_pred
{
static unsigned char test(Ch ch)
{
return internal::lookup_tables<0>::lookup_text_pure_with_ws[static_cast<unsigned char>(ch)];
}
};
// Detect attribute value character
template<Ch Quote>
struct attribute_value_pred
{
static unsigned char test(Ch ch)
{
if (Quote == Ch('\''))
return internal::lookup_tables<0>::lookup_attribute_data_1[static_cast<unsigned char>(ch)];
if (Quote == Ch('\"'))
return internal::lookup_tables<0>::lookup_attribute_data_2[static_cast<unsigned char>(ch)];
return 0; // Should never be executed, to avoid warnings on Comeau
}
};
// Detect attribute value character
template<Ch Quote>
struct attribute_value_pure_pred
{
static unsigned char test(Ch ch)
{
if (Quote == Ch('\''))
return internal::lookup_tables<0>::lookup_attribute_data_1_pure[static_cast<unsigned char>(ch)];
if (Quote == Ch('\"'))
return internal::lookup_tables<0>::lookup_attribute_data_2_pure[static_cast<unsigned char>(ch)];
return 0; // Should never be executed, to avoid warnings on Comeau
}
};
// Insert coded character, using UTF8 or 8-bit ASCII
template<int Flags>
static void insert_coded_character(Ch *&text, unsigned long code)
{
if (Flags & parse_no_utf8)
{
// Insert 8-bit ASCII character
// Todo: possibly verify that code is less than 256 and use replacement char otherwise?
text[0] = static_cast<unsigned char>(code);
text += 1;
}
else
{
// Insert UTF8 sequence
if (code < 0x80) // 1 byte sequence
{
text[0] = static_cast<unsigned char>(code);
text += 1;
}
else if (code < 0x800) // 2 byte sequence
{
text[1] = static_cast<unsigned char>((code | 0x80) & 0xBF); code >>= 6;
text[0] = static_cast<unsigned char>(code | 0xC0);
text += 2;
}
else if (code < 0x10000) // 3 byte sequence
{
text[2] = static_cast<unsigned char>((code | 0x80) & 0xBF); code >>= 6;
text[1] = static_cast<unsigned char>((code | 0x80) & 0xBF); code >>= 6;
text[0] = static_cast<unsigned char>(code | 0xE0);
text += 3;
}
else if (code < 0x110000) // 4 byte sequence
{
text[3] = static_cast<unsigned char>((code | 0x80) & 0xBF); code >>= 6;
text[2] = static_cast<unsigned char>((code | 0x80) & 0xBF); code >>= 6;
text[1] = static_cast<unsigned char>((code | 0x80) & 0xBF); code >>= 6;
text[0] = static_cast<unsigned char>(code | 0xF0);
text += 4;
}
else // Invalid, only codes up to 0x10FFFF are allowed in Unicode
{
RAPIDXML_PARSE_ERROR("invalid numeric character entity", text);
}
}
}
// Skip characters until predicate evaluates to true
template<class StopPred, int Flags>
static void skip(Ch *&text)
{
Ch *tmp = text;
while (StopPred::test(*tmp))
++tmp;
text = tmp;
}
// Skip characters until predicate evaluates to true while doing the following:
// - replacing XML character entity references with proper characters (' & " < > &#...;)
// - condensing whitespace sequences to single space character
template<class StopPred, class StopPredPure, int Flags>
static Ch *skip_and_expand_character_refs(Ch *&text)
{
// If entity translation, whitespace condense and whitespace trimming is disabled, use plain skip
if (Flags & parse_no_entity_translation &&
!(Flags & parse_normalize_whitespace) &&
!(Flags & parse_trim_whitespace))
{
skip<StopPred, Flags>(text);
return text;
}
// Use simple skip until first modification is detected
skip<StopPredPure, Flags>(text);
// Use translation skip
Ch *src = text;
Ch *dest = src;
while (StopPred::test(*src))
{
// If entity translation is enabled
if (!(Flags & parse_no_entity_translation))
{
// Test if replacement is needed
if (src[0] == Ch('&'))
{
switch (src[1])
{
// & '
case Ch('a'):
if (src[2] == Ch('m') && src[3] == Ch('p') && src[4] == Ch(';'))
{
*dest = Ch('&');
++dest;
src += 5;
continue;
}
if (src[2] == Ch('p') && src[3] == Ch('o') && src[4] == Ch('s') && src[5] == Ch(';'))
{
*dest = Ch('\'');
++dest;
src += 6;
continue;
}
break;
// "
case Ch('q'):
if (src[2] == Ch('u') && src[3] == Ch('o') && src[4] == Ch('t') && src[5] == Ch(';'))
{
*dest = Ch('"');
++dest;
src += 6;
continue;
}
break;
// >
case Ch('g'):
if (src[2] == Ch('t') && src[3] == Ch(';'))
{
*dest = Ch('>');
++dest;
src += 4;
continue;
}
break;
// <
case Ch('l'):
if (src[2] == Ch('t') && src[3] == Ch(';'))
{
*dest = Ch('<');
++dest;
src += 4;
continue;
}
break;
// &#...; - assumes ASCII
case Ch('#'):
if (src[2] == Ch('x'))
{
unsigned long code = 0;
src += 3; // Skip &#x
while (1)
{
unsigned char digit = internal::lookup_tables<0>::lookup_digits[static_cast<unsigned char>(*src)];
if (digit == 0xFF)
break;
code = code * 16 + digit;
++src;
}
insert_coded_character<Flags>(dest, code); // Put character in output
}
else
{
unsigned long code = 0;
src += 2; // Skip &#
while (1)
{
unsigned char digit = internal::lookup_tables<0>::lookup_digits[static_cast<unsigned char>(*src)];
if (digit == 0xFF)
break;
code = code * 10 + digit;
++src;
}
insert_coded_character<Flags>(dest, code); // Put character in output
}
if (*src == Ch(';'))
++src;
else
RAPIDXML_PARSE_ERROR("expected ;", src);
continue;
// Something else
default:
// Ignore, just copy '&' verbatim
break;
}
}
}
// If whitespace condensing is enabled
if (Flags & parse_normalize_whitespace)
{
// Test if condensing is needed
if (whitespace_pred::test(*src))
{
*dest = Ch(' '); ++dest; // Put single space in dest
++src; // Skip first whitespace char
// Skip remaining whitespace chars
while (whitespace_pred::test(*src))
++src;
continue;
}
}
// No replacement, only copy character
*dest++ = *src++;
}
// Return new end
text = src;
return dest;
}
///////////////////////////////////////////////////////////////////////
// Internal parsing functions
// Parse BOM, if any
template<int Flags>
void parse_bom(Ch *&text)
{
// UTF-8?
if (static_cast<unsigned char>(text[0]) == 0xEF &&
static_cast<unsigned char>(text[1]) == 0xBB &&
static_cast<unsigned char>(text[2]) == 0xBF)
{
text += 3; // Skup utf-8 bom
}
}
// Parse XML declaration (<?xml...)
template<int Flags>
xml_node<Ch> *parse_xml_declaration(Ch *&text)
{
// If parsing of declaration is disabled
if (!(Flags & parse_declaration_node))
{
// Skip until end of declaration
while (text[0] != Ch('?') || text[1] != Ch('>'))
{
if (!text[0])
RAPIDXML_PARSE_ERROR("unexpected end of data", text);
++text;
}
text += 2; // Skip '?>'
return 0;
}
// Create declaration
xml_node<Ch> *declaration = this->allocate_node(node_declaration);
// Skip whitespace before attributes or ?>
skip<whitespace_pred, Flags>(text);
// Parse declaration attributes
parse_node_attributes<Flags>(text, declaration);
// Skip ?>
if (text[0] != Ch('?') || text[1] != Ch('>'))
RAPIDXML_PARSE_ERROR("expected ?>", text);
text += 2;
return declaration;
}
// Parse XML comment (<!--...)
template<int Flags>
xml_node<Ch> *parse_comment(Ch *&text)
{
// If parsing of comments is disabled
if (!(Flags & parse_comment_nodes))
{
// Skip until end of comment
while (text[0] != Ch('-') || text[1] != Ch('-') || text[2] != Ch('>'))
{
if (!text[0])
RAPIDXML_PARSE_ERROR("unexpected end of data", text);
++text;
}
text += 3; // Skip '-->'
return 0; // Do not produce comment node
}
// Remember value start
Ch *value = text;
// Skip until end of comment
while (text[0] != Ch('-') || text[1] != Ch('-') || text[2] != Ch('>'))
{
if (!text[0])
RAPIDXML_PARSE_ERROR("unexpected end of data", text);
++text;
}
// Create comment node
xml_node<Ch> *comment = this->allocate_node(node_comment);
comment->value(value, text - value);
// Place zero terminator after comment value
if (!(Flags & parse_no_string_terminators))
*text = Ch('\0');
text += 3; // Skip '-->'
return comment;
}
// Parse DOCTYPE
template<int Flags>
xml_node<Ch> *parse_doctype(Ch *&text)
{
// Remember value start
Ch *value = text;
// Skip to >
while (*text != Ch('>'))
{
// Determine character type
switch (*text)
{
// If '[' encountered, scan for matching ending ']' using naive algorithm with depth
// This works for all W3C test files except for 2 most wicked
case Ch('['):
{
++text; // Skip '['
int depth = 1;
while (depth > 0)
{
switch (*text)
{
case Ch('['): ++depth; break;
case Ch(']'): --depth; break;
case 0: RAPIDXML_PARSE_ERROR("unexpected end of data", text);
}
++text;
}
break;
}
// Error on end of text
case Ch('\0'):
RAPIDXML_PARSE_ERROR("unexpected end of data", text);
// Other character, skip it
default:
++text;
}
}
// If DOCTYPE nodes enabled
if (Flags & parse_doctype_node)
{
// Create a new doctype node
xml_node<Ch> *doctype = this->allocate_node(node_doctype);
doctype->value(value, text - value);
// Place zero terminator after value
if (!(Flags & parse_no_string_terminators))
*text = Ch('\0');
text += 1; // skip '>'
return doctype;
}
else
{
text += 1; // skip '>'
return 0;
}
}
// Parse PI
template<int Flags>
xml_node<Ch> *parse_pi(Ch *&text)
{
// If creation of PI nodes is enabled
if (Flags & parse_pi_nodes)
{
// Create pi node
xml_node<Ch> *pi = this->allocate_node(node_pi);
// Extract PI target name
Ch *name = text;
skip<node_name_pred, Flags>(text);
if (text == name)
RAPIDXML_PARSE_ERROR("expected PI target", text);
pi->name(name, text - name);
// Skip whitespace between pi target and pi
skip<whitespace_pred, Flags>(text);
// Remember start of pi
Ch *value = text;
// Skip to '?>'
while (text[0] != Ch('?') || text[1] != Ch('>'))
{
if (*text == Ch('\0'))
RAPIDXML_PARSE_ERROR("unexpected end of data", text);
++text;
}
// Set pi value (verbatim, no entity expansion or whitespace normalization)
pi->value(value, text - value);
// Place zero terminator after name and value
if (!(Flags & parse_no_string_terminators))
{
pi->name()[pi->name_size()] = Ch('\0');
pi->value()[pi->value_size()] = Ch('\0');
}
text += 2; // Skip '?>'
return pi;
}
else
{
// Skip to '?>'
while (text[0] != Ch('?') || text[1] != Ch('>'))
{
if (*text == Ch('\0'))
RAPIDXML_PARSE_ERROR("unexpected end of data", text);
++text;
}
text += 2; // Skip '?>'
return 0;
}
}
// Parse and append data
// Return character that ends data.
// This is necessary because this character might have been overwritten by a terminating 0
template<int Flags>
Ch parse_and_append_data(xml_node<Ch> *node, Ch *&text, Ch *contents_start)
{
// Backup to contents start if whitespace trimming is disabled
if (!(Flags & parse_trim_whitespace))
text = contents_start;
// Skip until end of data
Ch *value = text, *end;
if (Flags & parse_normalize_whitespace)
end = skip_and_expand_character_refs<text_pred, text_pure_with_ws_pred, Flags>(text);
else
end = skip_and_expand_character_refs<text_pred, text_pure_no_ws_pred, Flags>(text);
// Trim trailing whitespace if flag is set; leading was already trimmed by whitespace skip after >
if (Flags & parse_trim_whitespace)
{
if (Flags & parse_normalize_whitespace)
{
// Whitespace is already condensed to single space characters by skipping function, so just trim 1 char off the end
if (*(end - 1) == Ch(' '))
--end;
}
else
{
// Backup until non-whitespace character is found
while (whitespace_pred::test(*(end - 1)))
--end;
}
}
// If characters are still left between end and value (this test is only necessary if normalization is enabled)
// Create new data node
if (!(Flags & parse_no_data_nodes))
{
xml_node<Ch> *data = this->allocate_node(node_data);
data->value(value, end - value);
node->append_node(data);
}
// Add data to parent node if no data exists yet
if (!(Flags & parse_no_element_values))
if (*node->value() == Ch('\0'))
node->value(value, end - value);
// Place zero terminator after value
if (!(Flags & parse_no_string_terminators))
{
Ch ch = *text;
*end = Ch('\0');
return ch; // Return character that ends data; this is required because zero terminator overwritten it
}
// Return character that ends data
return *text;
}
// Parse CDATA
template<int Flags>
xml_node<Ch> *parse_cdata(Ch *&text)
{
// If CDATA is disabled
if (Flags & parse_no_data_nodes)
{
// Skip until end of cdata
while (text[0] != Ch(']') || text[1] != Ch(']') || text[2] != Ch('>'))
{
if (!text[0])
RAPIDXML_PARSE_ERROR("unexpected end of data", text);
++text;
}
text += 3; // Skip ]]>
return 0; // Do not produce CDATA node
}
// Skip until end of cdata
Ch *value = text;
while (text[0] != Ch(']') || text[1] != Ch(']') || text[2] != Ch('>'))
{
if (!text[0])
RAPIDXML_PARSE_ERROR("unexpected end of data", text);
++text;
}
// Create new cdata node
xml_node<Ch> *cdata = this->allocate_node(node_cdata);
cdata->value(value, text - value);
// Place zero terminator after value
if (!(Flags & parse_no_string_terminators))
*text = Ch('\0');
text += 3; // Skip ]]>
return cdata;
}
// Parse element node
template<int Flags>
xml_node<Ch> *parse_element(Ch *&text)
{
// Create element node
xml_node<Ch> *element = this->allocate_node(node_element);
// Extract element name
Ch *name = text;
skip<node_name_pred, Flags>(text);
if (text == name)
RAPIDXML_PARSE_ERROR("expected element name", text);
element->name(name, text - name);
// Skip whitespace between element name and attributes or >
skip<whitespace_pred, Flags>(text);
// Parse attributes, if any
parse_node_attributes<Flags>(text, element);
// Determine ending type
if (*text == Ch('>'))
{
++text;
parse_node_contents<Flags>(text, element);
}
else if (*text == Ch('/'))
{
++text;
if (*text != Ch('>'))
RAPIDXML_PARSE_ERROR("expected >", text);
++text;
}
else
RAPIDXML_PARSE_ERROR("expected >", text);
// Place zero terminator after name
if (!(Flags & parse_no_string_terminators))
element->name()[element->name_size()] = Ch('\0');
// Return parsed element
return element;
}
// Determine node type, and parse it
template<int Flags>
xml_node<Ch> *parse_node(Ch *&text)
{
// Parse proper node type
switch (text[0])
{
// <...
default:
// Parse and append element node
return parse_element<Flags>(text);
// <?...
case Ch('?'):
++text; // Skip ?
if ((text[0] == Ch('x') || text[0] == Ch('X')) &&
(text[1] == Ch('m') || text[1] == Ch('M')) &&
(text[2] == Ch('l') || text[2] == Ch('L')) &&
whitespace_pred::test(text[3]))
{
// '<?xml ' - xml declaration
text += 4; // Skip 'xml '
return parse_xml_declaration<Flags>(text);
}
else
{
// Parse PI
return parse_pi<Flags>(text);
}
// <!...
case Ch('!'):
// Parse proper subset of <! node
switch (text[1])
{
// <!-
case Ch('-'):
if (text[2] == Ch('-'))
{
// '<!--' - xml comment
text += 3; // Skip '!--'
return parse_comment<Flags>(text);
}
break;
// <![
case Ch('['):
if (text[2] == Ch('C') && text[3] == Ch('D') && text[4] == Ch('A') &&
text[5] == Ch('T') && text[6] == Ch('A') && text[7] == Ch('['))
{
// '<![CDATA[' - cdata
text += 8; // Skip '![CDATA['
return parse_cdata<Flags>(text);
}
break;
// <!D
case Ch('D'):
if (text[2] == Ch('O') && text[3] == Ch('C') && text[4] == Ch('T') &&
text[5] == Ch('Y') && text[6] == Ch('P') && text[7] == Ch('E') &&
whitespace_pred::test(text[8]))
{
// '<!DOCTYPE ' - doctype
text += 9; // skip '!DOCTYPE '
return parse_doctype<Flags>(text);
}
} // switch
// Attempt to skip other, unrecognized node types starting with <!
++text; // Skip !
while (*text != Ch('>'))
{
if (*text == 0)
RAPIDXML_PARSE_ERROR("unexpected end of data", text);
++text;
}
++text; // Skip '>'
return 0; // No node recognized
}
}
// Parse contents of the node - children, data etc.
template<int Flags>
void parse_node_contents(Ch *&text, xml_node<Ch> *node)
{
// For all children and text
while (1)
{
// Skip whitespace between > and node contents
Ch *contents_start = text; // Store start of node contents before whitespace is skipped
skip<whitespace_pred, Flags>(text);
Ch next_char = *text;
// After data nodes, instead of continuing the loop, control jumps here.
// This is because zero termination inside parse_and_append_data() function
// would wreak havoc with the above code.
// Also, skipping whitespace after data nodes is unnecessary.
after_data_node:
// Determine what comes next: node closing, child node, data node, or 0?
switch (next_char)
{
// Node closing or child node
case Ch('<'):
if (text[1] == Ch('/'))
{
// Node closing
text += 2; // Skip '</'
if (Flags & parse_validate_closing_tags)
{
// Skip and validate closing tag name
Ch *closing_name = text;
skip<node_name_pred, Flags>(text);
if (!internal::compare(node->name(), node->name_size(), closing_name, text - closing_name, true))
RAPIDXML_PARSE_ERROR("invalid closing tag name", text);
}
else
{
// No validation, just skip name
skip<node_name_pred, Flags>(text);
}
// Skip remaining whitespace after node name
skip<whitespace_pred, Flags>(text);
if (*text != Ch('>'))
RAPIDXML_PARSE_ERROR("expected >", text);
++text; // Skip '>'
return; // Node closed, finished parsing contents
}
else
{
// Child node
++text; // Skip '<'
if (xml_node<Ch> *child = parse_node<Flags>(text))
node->append_node(child);
}
break;
// End of data - error
case Ch('\0'):
RAPIDXML_PARSE_ERROR("unexpected end of data", text);
// Data node
default:
next_char = parse_and_append_data<Flags>(node, text, contents_start);
goto after_data_node; // Bypass regular processing after data nodes
}
}
}
// Parse XML attributes of the node
template<int Flags>
void parse_node_attributes(Ch *&text, xml_node<Ch> *node)
{
// For all attributes
while (attribute_name_pred::test(*text))
{
// Extract attribute name
Ch *name = text;
++text; // Skip first character of attribute name
skip<attribute_name_pred, Flags>(text);
if (text == name)
RAPIDXML_PARSE_ERROR("expected attribute name", name);
// Create new attribute
xml_attribute<Ch> *attribute = this->allocate_attribute();
attribute->name(name, text - name);
node->append_attribute(attribute);
// Skip whitespace after attribute name
skip<whitespace_pred, Flags>(text);
// Skip =
if (*text != Ch('='))
RAPIDXML_PARSE_ERROR("expected =", text);
++text;
// Add terminating zero after name
if (!(Flags & parse_no_string_terminators))
attribute->name()[attribute->name_size()] = 0;
// Skip whitespace after =
skip<whitespace_pred, Flags>(text);
// Skip quote and remember if it was ' or "
Ch quote = *text;
if (quote != Ch('\'') && quote != Ch('"'))
RAPIDXML_PARSE_ERROR("expected ' or \"", text);
++text;
// Extract attribute value and expand char refs in it
Ch *value = text, *end;
const int AttFlags = Flags & ~parse_normalize_whitespace; // No whitespace normalization in attributes
if (quote == Ch('\''))
end = skip_and_expand_character_refs<attribute_value_pred<Ch('\'')>, attribute_value_pure_pred<Ch('\'')>, AttFlags>(text);
else
end = skip_and_expand_character_refs<attribute_value_pred<Ch('"')>, attribute_value_pure_pred<Ch('"')>, AttFlags>(text);
// Set attribute value
attribute->value(value, end - value);
// Make sure that end quote is present
if (*text != quote)
RAPIDXML_PARSE_ERROR("expected ' or \"", text);
++text; // Skip quote
// Add terminating zero after value
if (!(Flags & parse_no_string_terminators))
attribute->value()[attribute->value_size()] = 0;
// Skip whitespace after attribute value
skip<whitespace_pred, Flags>(text);
}
}
};
//! \cond internal
namespace internal
{
// Whitespace (space \n \r \t)
template<int Dummy>
const unsigned char lookup_tables<Dummy>::lookup_whitespace[256] =
{
// 0 1 2 3 4 5 6 7 8 9 A B C D E F
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, // 0
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 1
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 2
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 3
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 4
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 5
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 6
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 7
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 8
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 9
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // A
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // B
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // C
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // D
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // E
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 // F
};
// Node name (anything but space \n \r \t / > ? \0)
template<int Dummy>
const unsigned char lookup_tables<Dummy>::lookup_node_name[256] =
{
// 0 1 2 3 4 5 6 7 8 9 A B C D E F
0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, // 0
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 1
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, // 2
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, // 3
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 4
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 5
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 6
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 7
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 8
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 9
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // A
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // B
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // C
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // D
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // E
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 // F
};
// Text (i.e. PCDATA) (anything but < \0)
template<int Dummy>
const unsigned char lookup_tables<Dummy>::lookup_text[256] =
{
// 0 1 2 3 4 5 6 7 8 9 A B C D E F
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 0
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 1
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 2
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, // 3
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 4
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 5
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 6
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 7
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 8
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 9
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // A
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // B
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // C
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // D
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // E
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 // F
};
// Text (i.e. PCDATA) that does not require processing when ws normalization is disabled
// (anything but < \0 &)
template<int Dummy>
const unsigned char lookup_tables<Dummy>::lookup_text_pure_no_ws[256] =
{
// 0 1 2 3 4 5 6 7 8 9 A B C D E F
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 0
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 1
1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 2
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, // 3
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 4
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 5
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 6
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 7
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 8
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 9
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // A
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // B
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // C
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // D
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // E
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 // F
};
// Text (i.e. PCDATA) that does not require processing when ws normalizationis is enabled
// (anything but < \0 & space \n \r \t)
template<int Dummy>
const unsigned char lookup_tables<Dummy>::lookup_text_pure_with_ws[256] =
{
// 0 1 2 3 4 5 6 7 8 9 A B C D E F
0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, // 0
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 1
0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 2
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, // 3
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 4
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 5
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 6
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 7
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 8
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 9
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // A
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // B
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // C
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // D
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // E
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 // F
};
// Attribute name (anything but space \n \r \t / < > = ? ! \0)
template<int Dummy>
const unsigned char lookup_tables<Dummy>::lookup_attribute_name[256] =
{
// 0 1 2 3 4 5 6 7 8 9 A B C D E F
0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, // 0
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 1
0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, // 2
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, // 3
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 4
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 5
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 6
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 7
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 8
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 9
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // A
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // B
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // C
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // D
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // E
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 // F
};
// Attribute data with single quote (anything but ' \0)
template<int Dummy>
const unsigned char lookup_tables<Dummy>::lookup_attribute_data_1[256] =
{
// 0 1 2 3 4 5 6 7 8 9 A B C D E F
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 0
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 1
1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, // 2
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 3
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 4
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 5
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 6
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 7
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 8
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 9
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // A
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // B
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // C
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // D
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // E
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 // F
};
// Attribute data with single quote that does not require processing (anything but ' \0 &)
template<int Dummy>
const unsigned char lookup_tables<Dummy>::lookup_attribute_data_1_pure[256] =
{
// 0 1 2 3 4 5 6 7 8 9 A B C D E F
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 0
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 1
1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, // 2
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 3
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 4
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 5
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 6
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 7
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 8
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 9
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // A
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // B
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // C
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // D
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // E
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 // F
};
// Attribute data with double quote (anything but " \0)
template<int Dummy>
const unsigned char lookup_tables<Dummy>::lookup_attribute_data_2[256] =
{
// 0 1 2 3 4 5 6 7 8 9 A B C D E F
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 0
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 1
1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 2
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 3
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 4
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 5
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 6
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 7
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 8
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 9
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // A
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // B
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // C
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // D
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // E
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 // F
};
// Attribute data with double quote that does not require processing (anything but " \0 &)
template<int Dummy>
const unsigned char lookup_tables<Dummy>::lookup_attribute_data_2_pure[256] =
{
// 0 1 2 3 4 5 6 7 8 9 A B C D E F
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 0
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 1
1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 2
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 3
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 4
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 5
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 6
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 7
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 8
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 9
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // A
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // B
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // C
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // D
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // E
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 // F
};
// Digits (dec and hex, 255 denotes end of numeric character reference)
template<int Dummy>
const unsigned char lookup_tables<Dummy>::lookup_digits[256] =
{
// 0 1 2 3 4 5 6 7 8 9 A B C D E F
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, // 0
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, // 1
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, // 2
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,255,255,255,255,255,255, // 3
255, 10, 11, 12, 13, 14, 15,255,255,255,255,255,255,255,255,255, // 4
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, // 5
255, 10, 11, 12, 13, 14, 15,255,255,255,255,255,255,255,255,255, // 6
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, // 7
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, // 8
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, // 9
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, // A
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, // B
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, // C
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, // D
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, // E
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255 // F
};
// Upper case conversion
template<int Dummy>
const unsigned char lookup_tables<Dummy>::lookup_upcase[256] =
{
// 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, A B C D E F
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, // 0
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, // 1
32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, // 2
48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, // 3
64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, // 4
80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, // 5
96, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, // 6
80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 123,124,125,126,127, // 7
128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143, // 8
144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159, // 9
160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175, // A
176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191, // B
192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207, // C
208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223, // D
224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239, // E
240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255 // F
};
}
//! \endcond
}
// Undefine internal macros
#undef RAPIDXML_PARSE_ERROR
// On MSVC, restore warnings state
#ifdef _MSC_VER
#pragma warning(pop)
#endif
#endif
| 118,341 |
C++
| 44.568733 | 189 | 0.475397 |
daniel-kun/omni/interface/rapidxml/rapidxml_print.hpp
|
#ifndef RAPIDXML_PRINT_HPP_INCLUDED
#define RAPIDXML_PRINT_HPP_INCLUDED
// Copyright (C) 2006, 2009 Marcin Kalicinski
// Version 1.13
// Revision $DateTime: 2009/05/13 01:46:17 $
//! \file rapidxml_print.hpp This file contains rapidxml printer implementation
#include "rapidxml.hpp"
// Only include streams if not disabled
#ifndef RAPIDXML_NO_STREAMS
#include <ostream>
#include <iterator>
#endif
namespace rapidxml
{
///////////////////////////////////////////////////////////////////////
// Printing flags
const int print_no_indenting = 0x1; //!< Printer flag instructing the printer to suppress indenting of XML. See print() function.
///////////////////////////////////////////////////////////////////////
// Internal
//! \cond internal
namespace internal
{
///////////////////////////////////////////////////////////////////////////
// Internal character operations
// Copy characters from given range to given output iterator
template<class OutIt, class Ch>
inline OutIt copy_chars(const Ch *begin, const Ch *end, OutIt out)
{
while (begin != end)
*out++ = *begin++;
return out;
}
// Copy characters from given range to given output iterator and expand
// characters into references (< > ' " &)
template<class OutIt, class Ch>
inline OutIt copy_and_expand_chars(const Ch *begin, const Ch *end, Ch noexpand, OutIt out)
{
while (begin != end)
{
if (*begin == noexpand)
{
*out++ = *begin; // No expansion, copy character
}
else
{
switch (*begin)
{
case Ch('<'):
*out++ = Ch('&'); *out++ = Ch('l'); *out++ = Ch('t'); *out++ = Ch(';');
break;
case Ch('>'):
*out++ = Ch('&'); *out++ = Ch('g'); *out++ = Ch('t'); *out++ = Ch(';');
break;
case Ch('\''):
*out++ = Ch('&'); *out++ = Ch('a'); *out++ = Ch('p'); *out++ = Ch('o'); *out++ = Ch('s'); *out++ = Ch(';');
break;
case Ch('"'):
*out++ = Ch('&'); *out++ = Ch('q'); *out++ = Ch('u'); *out++ = Ch('o'); *out++ = Ch('t'); *out++ = Ch(';');
break;
case Ch('&'):
*out++ = Ch('&'); *out++ = Ch('a'); *out++ = Ch('m'); *out++ = Ch('p'); *out++ = Ch(';');
break;
default:
*out++ = *begin; // No expansion, copy character
}
}
++begin; // Step to next character
}
return out;
}
// Fill given output iterator with repetitions of the same character
template<class OutIt, class Ch>
inline OutIt fill_chars(OutIt out, int n, Ch ch)
{
for (int i = 0; i < n; ++i)
*out++ = ch;
return out;
}
// Find character
template<class Ch, Ch ch>
inline bool find_char(const Ch *begin, const Ch *end)
{
while (begin != end)
if (*begin++ == ch)
return true;
return false;
}
///////////////////////////////////////////////////////////////////////////
// Internal printing operations
// Print node
template<class OutIt, class Ch>
inline OutIt print_node(OutIt out, const xml_node<Ch> *node, int flags, int indent)
{
// Print proper node type
switch (node->type())
{
// Document
case node_document:
out = print_children(out, node, flags, indent);
break;
// Element
case node_element:
out = print_element_node(out, node, flags, indent);
break;
// Data
case node_data:
out = print_data_node(out, node, flags, indent);
break;
// CDATA
case node_cdata:
out = print_cdata_node(out, node, flags, indent);
break;
// Declaration
case node_declaration:
out = print_declaration_node(out, node, flags, indent);
break;
// Comment
case node_comment:
out = print_comment_node(out, node, flags, indent);
break;
// Doctype
case node_doctype:
out = print_doctype_node(out, node, flags, indent);
break;
// Pi
case node_pi:
out = print_pi_node(out, node, flags, indent);
break;
// Unknown
default:
assert(0);
break;
}
// If indenting not disabled, add line break after node
if (!(flags & print_no_indenting))
*out = Ch('\n'), ++out;
// Return modified iterator
return out;
}
// Print children of the node
template<class OutIt, class Ch>
inline OutIt print_children(OutIt out, const xml_node<Ch> *node, int flags, int indent)
{
for (xml_node<Ch> *child = node->first_node(); child; child = child->next_sibling())
out = print_node(out, child, flags, indent);
return out;
}
// Print attributes of the node
template<class OutIt, class Ch>
inline OutIt print_attributes(OutIt out, const xml_node<Ch> *node, int flags)
{
for (xml_attribute<Ch> *attribute = node->first_attribute(); attribute; attribute = attribute->next_attribute())
{
if (attribute->name() && attribute->value())
{
// Print attribute name
*out = Ch(' '), ++out;
out = copy_chars(attribute->name(), attribute->name() + attribute->name_size(), out);
*out = Ch('='), ++out;
// Print attribute value using appropriate quote type
if (find_char<Ch, Ch('"')>(attribute->value(), attribute->value() + attribute->value_size()))
{
*out = Ch('\''), ++out;
out = copy_and_expand_chars(attribute->value(), attribute->value() + attribute->value_size(), Ch('"'), out);
*out = Ch('\''), ++out;
}
else
{
*out = Ch('"'), ++out;
out = copy_and_expand_chars(attribute->value(), attribute->value() + attribute->value_size(), Ch('\''), out);
*out = Ch('"'), ++out;
}
}
}
return out;
}
// Print data node
template<class OutIt, class Ch>
inline OutIt print_data_node(OutIt out, const xml_node<Ch> *node, int flags, int indent)
{
assert(node->type() == node_data);
if (!(flags & print_no_indenting))
out = fill_chars(out, indent, Ch('\t'));
out = copy_and_expand_chars(node->value(), node->value() + node->value_size(), Ch(0), out);
return out;
}
// Print data node
template<class OutIt, class Ch>
inline OutIt print_cdata_node(OutIt out, const xml_node<Ch> *node, int flags, int indent)
{
assert(node->type() == node_cdata);
if (!(flags & print_no_indenting))
out = fill_chars(out, indent, Ch('\t'));
*out = Ch('<'); ++out;
*out = Ch('!'); ++out;
*out = Ch('['); ++out;
*out = Ch('C'); ++out;
*out = Ch('D'); ++out;
*out = Ch('A'); ++out;
*out = Ch('T'); ++out;
*out = Ch('A'); ++out;
*out = Ch('['); ++out;
out = copy_chars(node->value(), node->value() + node->value_size(), out);
*out = Ch(']'); ++out;
*out = Ch(']'); ++out;
*out = Ch('>'); ++out;
return out;
}
// Print element node
template<class OutIt, class Ch>
inline OutIt print_element_node(OutIt out, const xml_node<Ch> *node, int flags, int indent)
{
assert(node->type() == node_element);
// Print element name and attributes, if any
if (!(flags & print_no_indenting))
out = fill_chars(out, indent, Ch('\t'));
*out = Ch('<'), ++out;
out = copy_chars(node->name(), node->name() + node->name_size(), out);
out = print_attributes(out, node, flags);
// If node is childless
if (node->value_size() == 0 && !node->first_node())
{
// Print childless node tag ending
*out = Ch('/'), ++out;
*out = Ch('>'), ++out;
}
else
{
// Print normal node tag ending
*out = Ch('>'), ++out;
// Test if node contains a single data node only (and no other nodes)
xml_node<Ch> *child = node->first_node();
if (!child)
{
// If node has no children, only print its value without indenting
out = copy_and_expand_chars(node->value(), node->value() + node->value_size(), Ch(0), out);
}
else if (child->next_sibling() == 0 && child->type() == node_data)
{
// If node has a sole data child, only print its value without indenting
out = copy_and_expand_chars(child->value(), child->value() + child->value_size(), Ch(0), out);
}
else
{
// Print all children with full indenting
if (!(flags & print_no_indenting))
*out = Ch('\n'), ++out;
out = print_children(out, node, flags, indent + 1);
if (!(flags & print_no_indenting))
out = fill_chars(out, indent, Ch('\t'));
}
// Print node end
*out = Ch('<'), ++out;
*out = Ch('/'), ++out;
out = copy_chars(node->name(), node->name() + node->name_size(), out);
*out = Ch('>'), ++out;
}
return out;
}
// Print declaration node
template<class OutIt, class Ch>
inline OutIt print_declaration_node(OutIt out, const xml_node<Ch> *node, int flags, int indent)
{
// Print declaration start
if (!(flags & print_no_indenting))
out = fill_chars(out, indent, Ch('\t'));
*out = Ch('<'), ++out;
*out = Ch('?'), ++out;
*out = Ch('x'), ++out;
*out = Ch('m'), ++out;
*out = Ch('l'), ++out;
// Print attributes
out = print_attributes(out, node, flags);
// Print declaration end
*out = Ch('?'), ++out;
*out = Ch('>'), ++out;
return out;
}
// Print comment node
template<class OutIt, class Ch>
inline OutIt print_comment_node(OutIt out, const xml_node<Ch> *node, int flags, int indent)
{
assert(node->type() == node_comment);
if (!(flags & print_no_indenting))
out = fill_chars(out, indent, Ch('\t'));
*out = Ch('<'), ++out;
*out = Ch('!'), ++out;
*out = Ch('-'), ++out;
*out = Ch('-'), ++out;
out = copy_chars(node->value(), node->value() + node->value_size(), out);
*out = Ch('-'), ++out;
*out = Ch('-'), ++out;
*out = Ch('>'), ++out;
return out;
}
// Print doctype node
template<class OutIt, class Ch>
inline OutIt print_doctype_node(OutIt out, const xml_node<Ch> *node, int flags, int indent)
{
assert(node->type() == node_doctype);
if (!(flags & print_no_indenting))
out = fill_chars(out, indent, Ch('\t'));
*out = Ch('<'), ++out;
*out = Ch('!'), ++out;
*out = Ch('D'), ++out;
*out = Ch('O'), ++out;
*out = Ch('C'), ++out;
*out = Ch('T'), ++out;
*out = Ch('Y'), ++out;
*out = Ch('P'), ++out;
*out = Ch('E'), ++out;
*out = Ch(' '), ++out;
out = copy_chars(node->value(), node->value() + node->value_size(), out);
*out = Ch('>'), ++out;
return out;
}
// Print pi node
template<class OutIt, class Ch>
inline OutIt print_pi_node(OutIt out, const xml_node<Ch> *node, int flags, int indent)
{
assert(node->type() == node_pi);
if (!(flags & print_no_indenting))
out = fill_chars(out, indent, Ch('\t'));
*out = Ch('<'), ++out;
*out = Ch('?'), ++out;
out = copy_chars(node->name(), node->name() + node->name_size(), out);
*out = Ch(' '), ++out;
out = copy_chars(node->value(), node->value() + node->value_size(), out);
*out = Ch('?'), ++out;
*out = Ch('>'), ++out;
return out;
}
}
//! \endcond
///////////////////////////////////////////////////////////////////////////
// Printing
//! Prints XML to given output iterator.
//! \param out Output iterator to print to.
//! \param node Node to be printed. Pass xml_document to print entire document.
//! \param flags Flags controlling how XML is printed.
//! \return Output iterator pointing to position immediately after last character of printed text.
template<class OutIt, class Ch>
inline OutIt print(OutIt out, const xml_node<Ch> &node, int flags = 0)
{
return internal::print_node(out, &node, flags, 0);
}
#ifndef RAPIDXML_NO_STREAMS
//! Prints XML to given output stream.
//! \param out Output stream to print to.
//! \param node Node to be printed. Pass xml_document to print entire document.
//! \param flags Flags controlling how XML is printed.
//! \return Output stream.
template<class Ch>
inline std::basic_ostream<Ch> &print(std::basic_ostream<Ch> &out, const xml_node<Ch> &node, int flags = 0)
{
print(std::ostream_iterator<Ch>(out), node, flags);
return out;
}
//! Prints formatted XML to given output stream. Uses default printing flags. Use print() function to customize printing process.
//! \param out Output stream to print to.
//! \param node Node to be printed.
//! \return Output stream.
template<class Ch>
inline std::basic_ostream<Ch> &operator <<(std::basic_ostream<Ch> &out, const xml_node<Ch> &node)
{
return print(out, node);
}
#endif
}
#endif
| 15,671 |
C++
| 36.137441 | 135 | 0.438389 |
daniel-kun/omni/interface/rapidxml/rapidxml_iterators.hpp
|
#ifndef RAPIDXML_ITERATORS_HPP_INCLUDED
#define RAPIDXML_ITERATORS_HPP_INCLUDED
// Copyright (C) 2006, 2009 Marcin Kalicinski
// Version 1.13
// Revision $DateTime: 2009/05/13 01:46:17 $
//! \file rapidxml_iterators.hpp This file contains rapidxml iterators
#include "rapidxml.hpp"
namespace rapidxml
{
//! Iterator of child nodes of xml_node
template<class Ch>
class node_iterator
{
public:
typedef typename xml_node<Ch> value_type;
typedef typename xml_node<Ch> &reference;
typedef typename xml_node<Ch> *pointer;
typedef std::ptrdiff_t difference_type;
typedef std::bidirectional_iterator_tag iterator_category;
node_iterator()
: m_node(0)
{
}
node_iterator(xml_node<Ch> *node)
: m_node(node->first_node())
{
}
reference operator *() const
{
assert(m_node);
return *m_node;
}
pointer operator->() const
{
assert(m_node);
return m_node;
}
node_iterator& operator++()
{
assert(m_node);
m_node = m_node->next_sibling();
return *this;
}
node_iterator operator++(int)
{
node_iterator tmp = *this;
++this;
return tmp;
}
node_iterator& operator--()
{
assert(m_node && m_node->previous_sibling());
m_node = m_node->previous_sibling();
return *this;
}
node_iterator operator--(int)
{
node_iterator tmp = *this;
++this;
return tmp;
}
bool operator ==(const node_iterator<Ch> &rhs)
{
return m_node == rhs.m_node;
}
bool operator !=(const node_iterator<Ch> &rhs)
{
return m_node != rhs.m_node;
}
private:
xml_node<Ch> *m_node;
};
//! Iterator of child attributes of xml_node
template<class Ch>
class attribute_iterator
{
public:
typedef typename xml_attribute<Ch> value_type;
typedef typename xml_attribute<Ch> &reference;
typedef typename xml_attribute<Ch> *pointer;
typedef std::ptrdiff_t difference_type;
typedef std::bidirectional_iterator_tag iterator_category;
attribute_iterator()
: m_attribute(0)
{
}
attribute_iterator(xml_node<Ch> *node)
: m_attribute(node->first_attribute())
{
}
reference operator *() const
{
assert(m_attribute);
return *m_attribute;
}
pointer operator->() const
{
assert(m_attribute);
return m_attribute;
}
attribute_iterator& operator++()
{
assert(m_attribute);
m_attribute = m_attribute->next_attribute();
return *this;
}
attribute_iterator operator++(int)
{
attribute_iterator tmp = *this;
++this;
return tmp;
}
attribute_iterator& operator--()
{
assert(m_attribute && m_attribute->previous_attribute());
m_attribute = m_attribute->previous_attribute();
return *this;
}
attribute_iterator operator--(int)
{
attribute_iterator tmp = *this;
++this;
return tmp;
}
bool operator ==(const attribute_iterator<Ch> &rhs)
{
return m_attribute == rhs.m_attribute;
}
bool operator !=(const attribute_iterator<Ch> &rhs)
{
return m_attribute != rhs.m_attribute;
}
private:
xml_attribute<Ch> *m_attribute;
};
}
#endif
| 3,918 |
C++
| 21.394286 | 70 | 0.504339 |
daniel-kun/omni/concepts/prototype/web/backend/README.md
|
[<img src="https://img.shields.io/travis/playframework/play-scala-starter-example.svg"/>](https://travis-ci.org/playframework/play-scala-starter-example)
# Play Scala Starter
This is a starter application that shows how Play works. Please see the documentation at https://www.playframework.com/documentation/latest/Home for more details.
## Running
Run this using [sbt](http://www.scala-sbt.org/). If you downloaded this project from http://www.playframework.com/download then you'll find a prepackaged version of sbt in the project directory:
```
sbt run
```
And then go to http://localhost:9000 to see the running web application.
There are several demonstration files available in this template.
## Controllers
- HomeController.scala:
Shows how to handle simple HTTP requests.
- AsyncController.scala:
Shows how to do asynchronous programming when handling a request.
- CountController.scala:
Shows how to inject a component into a controller and use the component when
handling requests.
## Components
- Module.scala:
Shows how to use Guice to bind all the components needed by your application.
- Counter.scala:
An example of a component that contains state, in this case a simple counter.
- ApplicationTimer.scala:
An example of a component that starts when the application starts and stops
when the application stops.
## Filters
- Filters.scala:
Creates the list of HTTP filters used by your application.
- ExampleFilter.scala
A simple filter that adds a header to every response.
| 1,536 |
Markdown
| 25.5 | 195 | 0.767578 |
daniel-kun/omni/concepts/prototype/web/backend/conf/logback.xml
|
<!-- https://www.playframework.com/documentation/latest/SettingsLogger -->
<configuration>
<conversionRule conversionWord="coloredLevel" converterClass="play.api.libs.logback.ColoredLevel" />
<appender name="FILE" class="ch.qos.logback.core.FileAppender">
<file>${application.home:-.}/logs/application.log</file>
<encoder>
<pattern>%date [%level] from %logger in %thread - %message%n%xException</pattern>
</encoder>
</appender>
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>%coloredLevel %logger{15} - %message%n%xException{10}</pattern>
</encoder>
</appender>
<appender name="ASYNCFILE" class="ch.qos.logback.classic.AsyncAppender">
<appender-ref ref="FILE" />
</appender>
<appender name="ASYNCSTDOUT" class="ch.qos.logback.classic.AsyncAppender">
<appender-ref ref="STDOUT" />
</appender>
<logger name="play" level="INFO" />
<logger name="application" level="DEBUG" />
<!-- Off these ones as they are annoying, and anyway we manage configuration ourselves -->
<logger name="com.avaje.ebean.config.PropertyMapLoader" level="OFF" />
<logger name="com.avaje.ebeaninternal.server.core.XmlConfigLoader" level="OFF" />
<logger name="com.avaje.ebeaninternal.server.lib.BackgroundThread" level="OFF" />
<logger name="com.gargoylesoftware.htmlunit.javascript" level="OFF" />
<root level="WARN">
<appender-ref ref="ASYNCFILE" />
<appender-ref ref="ASYNCSTDOUT" />
</root>
</configuration>
| 1,516 |
XML
| 35.119047 | 102 | 0.701847 |
Strevia/omni_warehouse/PACKAGE-LICENSES/omni.warehouse_creator-LICENSE.md
|
Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
NVIDIA CORPORATION and its licensors retain all intellectual property
and proprietary rights in and to this software, related documentation
and any modifications thereto. Any use, reproduction, disclosure or
distribution of this software and related documentation without an express
license agreement from NVIDIA CORPORATION is strictly prohibited.
| 412 |
Markdown
| 57.999992 | 74 | 0.839806 |
Strevia/omni_warehouse/config/extension.toml
|
[package]
# Semantic Versionning is used: https://semver.org/
version = "0.2.2"
# The title and description fields are primarily for displaying extension info in UI
title = "Warehouse Creator"
description="A simple extension to randomly generate warehouse environments."
# Path (relative to the root) or content of readme markdown file for UI.
readme = "docs/README.md"
changelog = "docs/CHANGELOG.md"
preview_image = "data/preview.png"
# URL of the extension source repository.
repository = ""
icon = "data/icon.png"
# One of categories for UI.
category = "Warehouse Creator"
# Keywords for the extension
keywords = ["kit", "digitaltwin", "warehouse", "generate"]
# Use omni.ui to build simple UI
[dependencies]
"omni.kit.uiapp" = {}
"omni.usd" = {}
# Main python module this extension provides, it will be publicly available as "import omni.hello.world".
[[python.module]]
name = "omni.warehouse_creator"
| 917 |
TOML
| 25.999999 | 105 | 0.736096 |
Strevia/omni_warehouse/config/extension.gen.toml
|
[package]
archivePath = "./archives/omni.warehouse_creator-0.2.2.zip"
repository = "https://gitlab-master.nvidia.com/omniverse/kit-extensions/warehouse-creator-extension"
[package.publish]
date = 1676320297
kitVersion = "104.2+release.96.cda0f258.tc"
buildNumber = "0.2.2+master.91.b2c8d76c.tc"
repoName = "warehouse_creator"
| 372 |
TOML
| 32.909088 | 104 | 0.677419 |
Strevia/omni_warehouse/omni/warehouse_creator/__init__.py
|
from .python.scripts.__init__ import *
| 39 |
Python
| 18.999991 | 38 | 0.692308 |
Strevia/omni_warehouse/omni/warehouse_creator/python/scripts/recipes.py
|
import random, math
NUCLEUS_SERVER = "http://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/ArchVis/Industrial/"
# This file contains dictionaries for different genration recipes - you can create your own here and expand the database!
# You can also edit existing to see how your rules generate scenes!
# Rule based placers (XYZ Locations based on Procedural or User-selected layout)
# X = columns; distance between racks (675 is width of rack) // Y = height // Z = rows
# R A C K S
divisor = 2
def racks_procedural():
positions = []
ROWS = 13
for i in range(ROWS):
positions.extend(
[
(2800 - (i * 675), 0, 1888),
(2800 - (i * 675), 0, 962),
(2800 - (i * 675), 0, 513),
(2800 - (i * 675), 0, -1034),
(2800 - (i * 675), 0, -1820),
]
)
totalSample = len(positions)
minSample = math.floor(totalSample/divisor)
sampleSize = random.randint(minSample,totalSample)
filledRackPos = []
emptyRackPos = positions
return filledRackPos, emptyRackPos
def empty_racks_Umode():
positions = []
for i in range(9):
if i < 5:
positions.extend([(2800 - (i * 675), 0, 1888), (2800 - (i * 675), 0, -1820), (2800 - (i * 675), 0, -2225)])
else:
positions.extend(
[
(2800 - (i * 675), 0, 1888),
(2800 - (i * 675), 0, 962),
(2800 - (i * 675), 0, 513),
(2800 - (i * 675), 0, -1034),
(2800 - (i * 675), 0, -1820),
]
)
totalSample = len(positions)
minSample = math.floor(totalSample/divisor)
sampleSize = random.randint(minSample,totalSample)
filledRackPos = [positions.pop(random.randrange(len(positions))) for _ in range(sampleSize)]
emptyRackPos = positions
return filledRackPos, emptyRackPos
def empty_racks_Lmode():
positions = []
for i in range(9):
if i < 5:
positions.extend([(2800 - (i * 675), 0, 1888), (2800 - (i * 675), 0, 962), (2800 - (i * 675), 0, 513)])
else:
positions.extend(
[
(2800 - (i * 675), 0, 1888),
(2800 - (i * 675), 0, 962),
(2800 - (i * 675), 0, 513),
(2800 - (i * 675), 0, -1034),
(2800 - (i * 675), 0, -1820),
]
)
totalSample = len(positions)
minSample = math.floor(totalSample/divisor)
sampleSize = random.randint(minSample,totalSample)
filledRackPos = [positions.pop(random.randrange(len(positions))) for _ in range(sampleSize)]
emptyRackPos = positions
return filledRackPos, emptyRackPos
def empty_racks_Imode():
positions = []
for i in range(9):
positions.extend(
[
(2800 - (i * 675), 0, 962),
(2800 - (i * 675), 0, 513),
(2800 - (i * 675), 0, -1034)
]
)
totalSample = len(positions)
minSample = math.floor(totalSample/divisor)
sampleSize = random.randint(minSample,totalSample)
filledRackPos = [positions.pop(random.randrange(len(positions))) for _ in range(sampleSize)]
emptyRackPos = positions
return filledRackPos, emptyRackPos
## P I L E S
def piles_placer_procedural():
positions = []
for i in range(9):
positions.extend([(2744.5 - (i * 675), 0, 1384), (2947 - (i * 675), 0, 35), (2947 - (i * 675), 0, -1440)])
print("pile positions (procedural): ", positions)
return positions
def piles_placer_Umode():
positions = []
for i in range(9):
if i < 5:
positions.extend([(2744.5 - (i * 675), 0, 1384), (2947 - (i * 675), 0, -1440)])
else:
positions.extend([(2744.5 - (i * 675), 0, 1384), (2947 - (i * 675), 0, 35), (2947 - (i * 675), 0, -1440)])
return positions
def piles_placer_Imode():
positions = []
for i in range(9):
positions.extend([(2744.5 - (i * 675), 0, 1384), (2947 - (i * 675), 0, 35), (2947 - (i * 675), 0, -1440)])
return positions
def piles_placer_Lmode():
positions = []
for i in range(9):
if i<5:
positions.extend([(2744.5 - (i * 675), 0, 1384), (2947 - (i * 675), 0, 35)])
else:
positions.extend([(2744.5 - (i * 675), 0, 1384), (2947 - (i * 675), 0, 35), (2947 - (i * 675), 0, -1440)])
return positions
## R A I L I N G S
def railings_placer_procedural():
positions = []
for i in range(17):
positions.extend([(3017 - (i * 337.5), 0, -119)])
return positions
def railings_placer_Lmode():
positions = []
for i in range(17):
positions.extend([(3017 - (i * 337.5), 0, -119)])
return positions
def railings_placer_Umode():
positions = []
for i in range(9,17):
positions.extend([(3017 - (i * 337.5), 0, -119)])
return positions
def railings_placer_Imode():
positions = []
for i in range(17):
positions.extend([(3017 - (i * 337.5), 0, -119)])
return positions
# R O B O T / F O R K L I F T
def robo_fork_placer_procedural(posXYZ):
positions = []
for i in range(17):
positions.extend([(posXYZ[0] - (i * 337.5), posXYZ[1], posXYZ[2])])
return positions
def robo_fork_placer_Lmode(posXYZ):
positions = []
for i in range(10,17):
positions.extend([(posXYZ[0] - (i * 337.5), posXYZ[1], posXYZ[2])])
return positions
def robo_fork_placer_Umode(posXYZ):
positions = []
for i in range(9,17):
positions.extend([(posXYZ[0] - (i * 337.5), posXYZ[1], posXYZ[2])])
return positions
def robo_fork_placer_Imode(posXYZ):
positions = []
for i in range(17):
positions.extend([(posXYZ[0] - (i * 337.5), posXYZ[1], posXYZ[2])])
return positions
# Store rack position data
filledProcMode, emptyProcMode = racks_procedural()
filledUMode, emptyUMode = empty_racks_Umode()
filledLMode, emptyLMode = empty_racks_Lmode()
filledIMode, emptyIMode = empty_racks_Imode()
warehouse_recipe = {
# First step is to identify what mode the generation is, we have 4 modes. By default, we will keep it at procedural.
# If it is a customized generation, we can update this value to the layout type chosen from the UI
# P.S : "procedural" mode is basically I-Shaped (Imode) with all objects selected
"mode": "procedural",
# Then, we point to asset paths, to pick one at random and spawn at specific positions
"empty_racks": f"{NUCLEUS_SERVER}Shelves/",
"filled_racks": f"{NUCLEUS_SERVER}Racks/",
"piles": f"{NUCLEUS_SERVER}Piles/",
"railings": f"{NUCLEUS_SERVER}Railing/",
"forklift": f"http://omniverse-content-staging.s3-us-west-2.amazonaws.com/Assets/Isaac/2022.1/Isaac/Props/Forklift/",
"robot": f"http://omniverse-content-staging.s3-us-west-2.amazonaws.com/Assets/Isaac/2022.1/Isaac/Robots/Transporter/",
# we can also have stand-alone assets, that are directly spawned in specific positions
"forklift_asset": ["forklift.usd"],
"robot_asset": ["transporter.usd"],
# We are also adding other assets from the paths above to choose from
"empty_racks_asset": ["RackLargeEmpty_A1.usd", "RackLargeEmpty_A2.usd"],
"filled_racks_asset": [
"RackLarge_A1.usd",
"RackLarge_A2.usd",
"RackLarge_A3.usd",
"RackLarge_A4.usd",
"RackLarge_A5.usd",
"RackLarge_A6.usd",
"RackLarge_A7.usd",
"RackLarge_A8.usd",
"RackLarge_A9.usd",
],
"piles_asset": [
"WarehousePile_A1.usd",
"WarehousePile_A2.usd",
"WarehousePile_A3.usd",
"WarehousePile_A4.usd",
"WarehousePile_A5.usd",
"WarehousePile_A6.usd",
"WarehousePile_A7.usd",
],
"railings_asset": ["MetalFencing_A1.usd", "MetalFencing_A2.usd", "MetalFencing_A3.usd"],
# Now, we have a sample space of positions within the default warehouse shell these objects can go to. We can randomly
# spawn prims into randomly selected positions from this sample space. These are either generated by placer functions,
# or hardcoded for standalone assets
# Empty and Filled racks both have similar dimensions, so we reuse the positions for racks
"filled_racks_procedural": filledProcMode,
"empty_racks_procedural": emptyProcMode,
"filled_racks_Umode": filledUMode,
"empty_racks_Umode": emptyUMode,
"filled_racks_Lmode": filledLMode,
"empty_racks_Lmode": emptyLMode,
"filled_racks_Imode": filledIMode,
"empty_racks_Imode": emptyIMode,
# Piles (Rules doesnt change based on layout mode here. Feel free to update rules)
"piles_procedural": piles_placer_procedural(),
"piles_Umode": piles_placer_Umode(),
"piles_Lmode": piles_placer_Lmode(),
"piles_Imode": piles_placer_Imode(),
# Railings (Similar to piles)
"railings_procedural": railings_placer_procedural(),
"railings_Umode": railings_placer_Umode(),
"railings_Lmode": railings_placer_Lmode(),
"railings_Imode": railings_placer_Imode(),
# Forklift and Robot
"forklift_procedural": robo_fork_placer_procedural((2500, 0, -333)),
"forklift_Umode": robo_fork_placer_Umode((2500, 0, -333)),
"forklift_Lmode": robo_fork_placer_Lmode((2500, 0, -333)),
"forklift_Imode": robo_fork_placer_Imode((2500, 0, -333)),
"robot_procedural": robo_fork_placer_procedural((3017, 8.2, -698)),
"robot_Umode": robo_fork_placer_Umode((3017, 8.2, -698)),
"robot_Lmode": robo_fork_placer_Lmode((3017, 8.2, -698)),
"robot_Imode": robo_fork_placer_Imode((3017, 8.2, -698)),
}
| 9,688 |
Python
| 36.554263 | 122 | 0.594034 |
Strevia/omni_warehouse/omni/warehouse_creator/python/scripts/extension.py
|
# Import necessary libraries
import omni.ext
from .warehouse_window_base import *
# Main Class
class WarehouseCreator(omni.ext.IExt):
def on_startup(self, ext_id):
self.wh_creator = WarehouseCreatorWindow()
self.wh_creator._build_content()
print("[omni.warehouse] WarehouseCreator started")
def on_shutdown(self):
self.wh_creator.destroy()
print("[omni.warehouse] WarehouseCreator shutdown")
| 442 |
Python
| 28.533331 | 59 | 0.696833 |
Strevia/omni_warehouse/omni/warehouse_creator/python/scripts/__init__.py
|
from .extension import *
| 24 |
Python
| 23.999976 | 24 | 0.791667 |
Strevia/omni_warehouse/omni/warehouse_creator/python/scripts/warehouse_window_base.py
|
import os, platform
import omni.ext
import omni.ui as ui
from omni.ui.workspace_utils import RIGHT
from .warehouse_helpers import *
import carb
WINDOW_TITLE = "Warehouse Creator"
class WarehouseCreatorWindow():
def __init__(self):
# Render Setting; Enable ambient lighting
self._system = platform.system()
carb.settings.get_settings().set("/rtx/sceneDb/ambientLightIntensity", 1.5)
if self._system == "Linux":
carb.settings.get_settings().set("/rtx/indirectDiffuse/scalingFactor", 10.0)
from os.path import dirname, abspath
path = dirname(dirname(dirname(dirname(dirname(__file__)))))
path = path + "/data/UI/"
self.UI_IMAGE_SERVER = path
self._usd_context = omni.usd.get_context()
self._wh_helper = wh_helpers()
# Main extension window (Width is 0 to fit the window to UI elements)
self._window = ui.Window(WINDOW_TITLE, width=0, height=800)
self._window.deferred_dock_in("Stage", ui.DockPolicy.CURRENT_WINDOW_IS_ACTIVE)
def destroy(self):
# Render Setting; Disable ambient lighting (default)
carb.settings.get_settings().set("/rtx/sceneDb/ambientLightIntensity", 0.0)
carb.settings.get_settings().set("/rtx/indirectDiffuse/scalingFactor", 1.0)
self._window = None
# U S E R I N T E R F A C E
def _build_content(self, runTestScript = False):
# Elements within the window frame
with self._window.frame:
# General UI Styling
style1 = {
"Button:hovered": {"background_color": 0xFF00B976, "border_color": 0xFFFD761D},
"Button": {},
"Button.Label": {"color": 0xFF00B976},
"Button.Label:hovered": {"color": 0xFFFFFFFF},
}
# UI elements in a main VStack
with ui.VStack(style=style1):
# Main title label
titleLabel = ui.Label(
"Warehouse Creator",
style={"color": 0xFF00B976, "font_size": 35},
alignment=ui.Alignment.CENTER,
height=0,
)
titleDescriptionLabel = ui.Label(
"\nWelcome to the Warehouse Creator Extension! Quickly get started with building your\nwarehouse scenes with a click of a button. For more detailed guide on quickly getting\nstarted with building scenes, checkout our official documentation here: \n\n",
width=500,
alignment=ui.Alignment.CENTER,
)
# Quick Generation label
quickgenLabel = ui.Label(
"Option 1. Quick Generation",
alignment=ui.Alignment.CENTER,
width=500,
style={"color": 0xFF00B976, "font_size": 25},
)
quickgenDescriptionLabel1 = ui.Label(
"\nQuick Generation module allows you to quickly generate your warehouse scene w/o\nany parameters. You can choose to begin with a standalone warehouse shell, to bring\n in your own assets to populate your scene!\n\n",
width=500,
alignment=ui.Alignment.CENTER,
)
# G E N E R A T E W A R E H O U S E S H E L L
with ui.VStack():
shellImage = ui.Image(
f"{self.UI_IMAGE_SERVER}shell.JPG",
width=500,
height=150,
)
shellImage.fill_policy = shellImage.fill_policy.PRESERVE_ASPECT_CROP
# Spawning warehouse shell when the button is clicked
def generateShell():
self.layoutButton1.checked = False
self.layoutButton2.checked = False
self.layoutButton3.checked = False
self.objectButton1.checked = False
self.objectButton2.checked = False
self.objectButton3.checked = False
self.objectButton4.checked = False
self.objectButton5.checked = False
self.objectButton6.checked = False
self._wh_helper.genShell()
shellButton = ui.Button(
"Generate Warehouse Shell",
clicked_fn=lambda: generateShell(),
width=500,
height=40,
tooltip="Generates an empty warehouse shell",
)
quickgenDescriptionLabel2 = ui.Label(
"\nYou can also quickly create a full, procedurally generated warehouse scene! Just click\non the button below to generate your scene now!\n\n",
width=500,
alignment=ui.Alignment.CENTER,
)
# P R O C E D U R A L W A R E H O U S E G E N E R A T I O N
with ui.VStack():
proceduralImage = ui.Image(
f"{self.UI_IMAGE_SERVER}warehouse.JPG",
width=500,
height=150,
)
proceduralImage.fill_policy = proceduralImage.fill_policy.PRESERVE_ASPECT_CROP
def genProcedural():
self._wh_helper.clear_stage()
self.layoutButton1.checked = False
self.layoutButton2.checked = False
self.layoutButton3.checked = False
self.objectButton1.checked = False
self.objectButton2.checked = False
self.objectButton3.checked = False
self.objectButton4.checked = False
self.objectButton5.checked = False
self.objectButton6.checked = False
# Pass asset buttons
self.objectButtons = [
self.objectButton1,
self.objectButton2,
self.objectButton3,
self.objectButton4,
self.objectButton5,
self.objectButton6,
]
self.mode = "procedural"
self._wh_helper.gen_custom(True, self.mode, self.objectButtons)
proceduralButton = ui.Button(
"Procedurally Generate Warehouse",
clicked_fn=lambda: genProcedural(),
width=500,
height=40,
tooltip="Procedurally Generates a warehouse scene",
)
clearDescription = ui.Label(
"\nYou can clear the current scene and start a fresh stage by clicking the button below!\n\n",
width=500,
alignment=ui.Alignment.CENTER,
)
clearButton1 = ui.Button(
"Clear Stage",
clicked_fn=lambda: self._wh_helper.clear_stage(),
width=500,
height=40,
tooltip="Removes all assets on the stage",
)
# C U S T O M W A RE H O U S E G E N E R A T I O N
customgenLabel = ui.Label(
"\nOption 2. Customized Generation",
alignment=ui.Alignment.CENTER,
width=500,
style={"color": 0xFF00B976, "font_size": 25},
)
customgenDescriptionLabel1 = ui.Label(
"\nCustomized Generation module allows you to set custom parameters to generate your\nwarehouse scene. You can choose what objects the generated scene contains, and the\nlayout you want the scene to be generated with!\n\n",
width=500,
alignment=ui.Alignment.CENTER,
)
# Customized Warehouse Generation - layouts
layoutLabel = ui.Label(
"2.1 Select Preferred Layout",
alignment=ui.Alignment.CENTER,
width=500,
style={"color": 0xFF00B976, "font_size": 20},
)
customgenDescriptionLabel2 = ui.Label(
"\nTo begin, select the preferred layout from the standard layout options given below.\n\n",
width=500,
alignment=ui.Alignment.CENTER,
)
# Layout selection
with ui.VStack(width=500):
# Layout labels
with ui.HStack(width=500):
layoutLabel1 = ui.Label(
"U-Shaped Layout", alignment=ui.Alignment.CENTER, style={"color": 0xFF00B976}
)
layoutLabel2 = ui.Label(
"I-Shaped Layout", alignment=ui.Alignment.CENTER, style={"color": 0xFF00B976}
)
layoutLabel3 = ui.Label(
"L-Shaped Layout", alignment=ui.Alignment.CENTER, style={"color": 0xFF00B976}
)
# Layout buttons
with ui.HStack(width=500):
self.layoutButton1 = ui.Button(
width=166,
height=150,
tooltip="Generates a U-Shaped Layout",
style={
"Button.Image": {
"image_url": f"{self.UI_IMAGE_SERVER}U-Shaped_Warehouse.png",
"alignment": ui.Alignment.CENTER,
}
},
clicked_fn=lambda: sel_layout(1),
)
self.layoutButton2 = ui.Button(
width=166,
height=150,
tooltip="Generates an I-Shaped Layout",
style={
"Button.Image": {
"image_url": f"{self.UI_IMAGE_SERVER}I-Shaped_Warehouse.png",
"alignment": ui.Alignment.CENTER,
}
},
clicked_fn=lambda: sel_layout(2),
)
self.layoutButton3 = ui.Button(
width=166,
height=150,
tooltip="Generates an L-Shaped Layout",
style={
"Button.Image": {
"image_url": f"{self.UI_IMAGE_SERVER}L-Shaped_Warehouse.png",
"alignment": ui.Alignment.CENTER,
}
},
clicked_fn=lambda: sel_layout(3),
)
# Customized Warehouse Generation - objects
objectsLabel = ui.Label(
"\n2.2 Select Preferred Objects",
alignment=ui.Alignment.CENTER,
width=500,
style={"color": 0xFF00B976, "font_size": 20},
)
customgenDescriptionLabel3 = ui.Label(
"\nNow, select the preferred objects you want in your scene from the options given below.\n\n",
width=500,
alignment=ui.Alignment.CENTER,
)
# Objects selection
with ui.VStack(width=500):
# Object labels row 1
with ui.HStack(width=500):
objectsLabel1 = ui.Label(
"Empty Racks", alignment=ui.Alignment.CENTER, style={"color": 0xFF00B976}
)
objectsLabel2 = ui.Label(
"Filled Racks", alignment=ui.Alignment.CENTER, style={"color": 0xFF00B976}
)
objectsLabel3 = ui.Label("Piles", alignment=ui.Alignment.CENTER, style={"color": 0xFF00B976})
# Object buttons row 1
with ui.HStack(width=500):
self.objectButton1 = ui.Button(
width=166,
height=150,
tooltip="Generates empty racks",
style={
"Button.Image": {
"image_url": f"{self.UI_IMAGE_SERVER}objects-01.png",
"alignment": ui.Alignment.CENTER,
}
},
clicked_fn=lambda: sel_object(self.objectButton1),
)
self.objectButton2 = ui.Button(
width=166,
height=150,
tooltip="Generates filled racks",
style={
"Button.Image": {
"image_url": f"{self.UI_IMAGE_SERVER}objects-02.png",
"alignment": ui.Alignment.CENTER,
}
},
clicked_fn=lambda: sel_object(self.objectButton2),
)
self.objectButton3 = ui.Button(
width=166,
height=150,
tooltip="Generates random piles of items",
style={
"Button.Image": {
"image_url": f"{self.UI_IMAGE_SERVER}objects-03.png",
"alignment": ui.Alignment.CENTER,
}
},
clicked_fn=lambda: sel_object(self.objectButton3),
)
# Object labels row 2
with ui.HStack(width=500):
objectsLabel4 = ui.Label(
"\nRailings", alignment=ui.Alignment.CENTER, style={"color": 0xFF00B976}
)
objectsLabel5 = ui.Label(
"\nForklift", alignment=ui.Alignment.CENTER, style={"color": 0xFF00B976}
)
objectsLabel6 = ui.Label("\nRobot", alignment=ui.Alignment.CENTER, style={"color": 0xFF00B976})
# Object buttons row 2
with ui.HStack(width=500):
self.objectButton4 = ui.Button(
width=166,
height=150,
tooltip="Generates safety railings",
style={
"Button.Image": {
"image_url": f"{self.UI_IMAGE_SERVER}objects-04.png",
"alignment": ui.Alignment.CENTER,
}
},
clicked_fn=lambda: sel_object(self.objectButton4),
)
self.objectButton5 = ui.Button(
width=166,
height=150,
tooltip="Generates forklifts",
style={
"Button.Image": {
"image_url": f"{self.UI_IMAGE_SERVER}objects-05.png",
"alignment": ui.Alignment.CENTER,
}
},
clicked_fn=lambda: sel_object(self.objectButton5),
)
self.objectButton6 = ui.Button(
width=166,
height=150,
tooltip="Generates transporter robots",
style={
"Button.Image": {
"image_url": f"{self.UI_IMAGE_SERVER}objects-06.png",
"alignment": ui.Alignment.CENTER,
}
},
clicked_fn=lambda: sel_object(self.objectButton6),
)
customgenDescriptionLabel4 = ui.Label(
"\nNow, click on the button below to generate your own, customized warehouse scene!\n\n",
width=500,
alignment=ui.Alignment.CENTER,
)
def genCustomWarehouse(mode):
self.mode = mode
self._wh_helper.clear_stage()
# Pass asset buttons
self.objectButtons = [
self.objectButton1,
self.objectButton2,
self.objectButton3,
self.objectButton4,
self.objectButton5,
self.objectButton6,
]
self._wh_helper.gen_custom(False, self.mode, self.objectButtons)
customizedButton = ui.Button(
"Generate Customized Warehouse",
clicked_fn=lambda: genCustomWarehouse(self.mode),
width=500,
height=40,
tooltip="Generates a warehouse scene based on custom parameters",
)
clearButton2 = ui.Button(
"Clear Stage",
clicked_fn=lambda: self._wh_helper.clear_stage(),
width=500,
height=40,
tooltip="Removes all assets on the stage",
)
# S M A R T I M P O R T
with ui.VStack(height=ui.Fraction(1)):
smartImportLabel = ui.Label(
"\nOption 3. Smart Import",
alignment=ui.Alignment.CENTER,
width=500,
style={"color": 0xFF00B976, "font_size": 25},
)
smartImportDescriptionLabel1 = ui.Label(
"\nSmart Import module allows you to instantly import your own assets - the smart way!\nSimply, select the asset type you are importing from the drop-down, copy and paste\nthe URL of the asset from the content navigator into the box below. Your asset is\nmagically imported in-place!\n\n",
width=500,
alignment=ui.Alignment.CENTER,
)
importImage = ui.Image(
f"{self.UI_IMAGE_SERVER}objects.JPG",
width=500,
height=150,
fill_policy=ui.FillPolicy.PRESERVE_ASPECT_CROP,
alignment=ui.Alignment.CENTER_TOP,
)
spacinglabel = ui.Label("")
importCategoryDropdown = ui.ComboBox(
1, "Empty Rack", "Filled Rack", "Pile", "Railing", "Forklift", "Robot"
)
spacinglabel = ui.Label("")
with ui.HStack():
importPathString = ui.StringField(width=250, height=40, tooltip= "Paste URL asset path").model
importButton = ui.Button(
"Import", clicked_fn=lambda: self._wh_helper.smart_import(importPathString, importCategoryDropdown), width=250, height=40
)
ui.Spacer(height = 65)
########################## UI Helper Functions ##############################################
def sel_layout(n):
if n == 1:
self.layoutButton1.checked = True
self.layoutButton2.checked = False
self.layoutButton3.checked = False
self.mode = "Umode"
if n == 2:
self.layoutButton1.checked = False
self.layoutButton2.checked = True
self.layoutButton3.checked = False
self.mode = "Imode"
if n == 3:
self.layoutButton1.checked = False
self.layoutButton2.checked = False
self.layoutButton3.checked = True
self.mode = "Lmode"
def sel_object(button):
if button.checked == True:
button.checked = False
else:
button.checked = True
# Generate Procedural Scene if test script flag == True
if runTestScript == True:
print("<WarehouseCreatorWindow::runTestScript>: Starting Procedural Generation...")
genProcedural()
print("<WarehouseCreatorWindow::runTestScript>: Finished Procedural Generation.")
| 22,022 |
Python
| 46.980392 | 313 | 0.435337 |
Strevia/omni_warehouse/omni/warehouse_creator/python/scripts/warehouse_helpers.py
|
import os, platform
import random
import omni.ext
import omni.ui as ui
from omni.ui.workspace_utils import RIGHT
from pxr import Usd, UsdGeom, UsdLux, Gf
from .recipes import warehouse_recipe as wh_rec
class wh_helpers():
def __init__(self):
NUCLEUS_SERVER = "http://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/ArchVis/Industrial/"
isTest = False
self._system = platform.system()
self.mode = "procedural"
self.objects = ["empty_racks", "filled_racks", "piles", "railings", "forklift", "robot"]
self.objectDict = self.initAssetPositions()
self.objDictList = list(self.objectDict)
self.isaacSimScale = Gf.Vec3f(100, 100,100)
# Shell info is hard-coded for now
self.shell_path = f"{NUCLEUS_SERVER}Buildings/Warehouse/Warehouse01.usd"
self.shell_translate = (0, 0, 55.60183)
self.shell_rotate = (-90.0, 0.0, 0.0)
self.shell_name = "WarehouseShell"
def config_environment(self):
for prim in self._stage.Traverse():
if '/Environment/' in str(prim):
prim.SetActive(False)
self.create_distant_light()
def create_distant_light(self):
environmentPath = '/Environment'
lightPath = environmentPath + '/distantLight'
prim = self._stage.DefinePrim(environmentPath, 'Xform')
distLight = UsdLux.DistantLight.Define(self._stage, lightPath)
distLight.AddRotateXYZOp().Set(Gf.Vec3f(315,0,0))
distLight.CreateColorTemperatureAttr(6500.0)
if self._system == "Linux":
distLight.CreateIntensityAttr(6500.0)
else:
distLight.CreateIntensityAttr(3000.0)
distLight.CreateAngleAttr(1.0)
lightApi = UsdLux.ShapingAPI.Apply(distLight.GetPrim())
lightApi.CreateShapingConeAngleAttr(180)
# Spawning warehouse shell when the button is clicked
def genShell(self):
self.clear_stage()
self.mode = "procedural"
self.spawn_prim(self.shell_path, self.shell_translate, self.shell_rotate, self.shell_name)
# Generate Warehouse w/ user-selected assets
def gen_custom(self, isProcedural, mode, objectButtons):
self.mode = mode
# Create shell
self.spawn_prim(self.shell_path, self.shell_translate, self.shell_rotate, self.shell_name)
selected_obj = []
# Spawn all objects (procedural) or user-selected objects
if isProcedural:
selected_obj = self.objects
else:
for i in range(len(objectButtons)):
if objectButtons[i].checked == True:
identifier = {
0: "empty_racks",
1: "filled_racks",
2: "piles",
3: "railings",
4: "forklift",
5: "robot",
}
selected_obj.append(identifier.get(i))
objectUsdPathDict = {"empty_racks": [],
"filled_racks":[],
"piles":[],
"railings":[],
"forklift":[],
"robot":[]
}
# Reserved spots are dependent on self.mode (i.e. layout)
numForkLifts = len(wh_rec[self.objects[4] + "_" + self.mode])
spots2RsrvFL = numForkLifts - 1
numRobots = len(wh_rec[self.objects[5] + "_" + self.mode])
spots2RsrvRob = numRobots - 1
self.objParams = {"empty_racks": [(-90,-180,0), (1,1,1), 5],
"filled_racks":[(-90,-180,0), (1,1,1), 5],
"piles":[(-90,-90,0), (1,1,1), 5],
"railings":[(-90,0,0), (1,1,1), 5],
"forklift":[(-90, random.uniform(0, 90), 0), self.isaacSimScale, spots2RsrvFL],
"robot":[(-90, random.uniform(0, 90), 0), self.isaacSimScale, spots2RsrvRob]
}
# Reserve spots for Smart Import feature
for h in range(0,len(selected_obj)):
for i in range(0, len(self.objDictList)):
if selected_obj[h] == self.objDictList[i]:
for j in wh_rec[selected_obj[h] + "_asset"]:
objectUsdPathDict[self.objDictList[i]].append(wh_rec[selected_obj[h]] + j)
rotation = self.objParams[self.objDictList[i]][0]
scale = self.objParams[self.objDictList[i]][1]
spots2Rsrv = self.objParams[self.objDictList[i]][2]
self.objectDict[self.objDictList[i]] = self.reservePositions(objectUsdPathDict[self.objDictList[i]], selected_obj[h], rotation, scale, spots2Rsrv)
# Function to reserve spots/positions for Smart Import feature (after initial generation of assets)
def reservePositions(self, assets, asset_prefix, rotation = (0,0,0), scale = (1,1,1), spots2reserve = 5):
if len(assets) > 0:
rotate = rotation
scale = scale
all_translates = wh_rec[asset_prefix + "_" + self.mode]
#Select all but 5 positions from available positions (reserved for Smart Import functionality)
if spots2reserve >= len(all_translates) and len(all_translates) > 0:
spots2reserve = len(all_translates) - 1
elif len(all_translates) == 0:
spots2reserve = 0
reserved_positions = random.sample(all_translates, spots2reserve)
translates = [t for t in all_translates if t not in reserved_positions]
positions = reserved_positions
for i in range(len(translates)):
name = asset_prefix + str(i)
path = random.choice(assets)
translate = translates[i]
self.spawn_prim(path, translate, rotate, name, scale)
return positions
# Smart Import
def smart_import(self, pathString, importCategory):
self.get_root()
# Get chosen index from drop-down
import_index = importCategory.model.get_item_value_model().get_value_as_int()
selected = self.objDictList[import_index] #ToDo: should idx dropdown list
path = pathString.get_value_as_string()
scale = Gf.Vec3f(1.0, 1.0, 1.0)
# Scale if contains Isaac Sim Path (these assets are typically in meters vs centimeters)
if "isaac" in path.lower():
scale = self.isaacSimScale
# If positions available, spawn selected asset
notificationMsg = "No more available positions to spawn new asset"
for idx in range(0,len(self.objectDict)):
if selected in self.objDictList[idx]:
if len(self.objectDict[self.objDictList[idx]]) != 0:
translate = self.objectDict[self.objDictList[idx]].pop()
break
else:
self.windowNum = self.notification_window("window", "Smart Import", notificationMsg)
return
pathName = os.path.basename(path).split(".")[0]
if pathName == "transporter":
pathName = "robot"
existing_assets = []
world = self._stage.GetDefaultPrim()
for i in world.GetChildren():
existing_assets.append(i.GetPath())
# If prim exists, add count suffix and spawn
counter = 0
assetName = pathName + str(counter)
while f"/World/{assetName}" in existing_assets:
assetName = f"{pathName}" + str(counter)
counter += 1
pathName = assetName
rotate = (-90.0, 0.0, 0.0)
self.spawn_prim(path, translate, rotate, pathName, scale)
pathString.set_value("")
# spawn_prim function takes in a path, XYZ position, orientation, a name and spawns the USD asset in path with
# the input name in the given position and orientation inside the world prim as an XForm
def spawn_prim(self, path, translate, rotate, name, scale=Gf.Vec3f(1.0, 1.0, 1.0)):
world = self._stage.GetDefaultPrim()
# Creating an XForm as a child to the world prim
asset = UsdGeom.Xform.Define(self._stage, f"{str(world.GetPath())}/{name}")
# Checking if asset already has a reference and clearing it
asset.GetPrim().GetReferences().ClearReferences()
# Adding USD in the path as reference to this XForm
asset.GetPrim().GetReferences().AddReference(f"{path}")
# Setting the Translate and Rotate
UsdGeom.XformCommonAPI(asset).SetTranslate(translate)
UsdGeom.XformCommonAPI(asset).SetRotate(rotate)
UsdGeom.XformCommonAPI(asset).SetScale(scale)
# Returning the Xform if needed
return asset
def initAssetPositions(self):
dictAssetPositions = {
"empty_racks": wh_rec["empty_racks" + "_" + self.mode],
"filled_racks": wh_rec["filled_racks" + "_" + self.mode],
"piles": wh_rec["piles" + "_" + self.mode],
"railings": wh_rec["railings" + "_" + self.mode],
"forklift": wh_rec["forklift" + "_" + self.mode],
"robot": wh_rec["robot" + "_" + self.mode]
}
return dictAssetPositions
# Clear stage function
def clear_stage(self):
#Removing all children of world except distant light
self.get_root()
world = self._stage.GetDefaultPrim()
doesLightExist = self._stage.GetPrimAtPath('/Environment/distantLight').IsValid()
# config environment
if doesLightExist == False:
self.config_environment()
# clear scene
for i in world.GetChildren():
if i.GetPath() == '/Environment/distantLight' or i.GetPath() == '/World':
continue
else:
self._stage.RemovePrim(i.GetPath())
# Notification window
def notification_window(self,windowNum, textWindow, textField):
windowNum = ui.Window(textWindow, width=400, height=100)
with windowNum.frame:
with ui.VStack():
ui.Label(textField)
return windowNum
# gets stage
def get_root(self):
self._stage = omni.usd.get_context().get_stage()
| 10,256 |
Python
| 45.411765 | 166 | 0.581318 |
Strevia/omni_warehouse/omni/warehouse_creator/python/tests/tests_warehouse_creator.py
|
# from warnings import catch_warnings
import omni.kit.test
import omni.kit.app
import carb
import carb.events
import carb.dictionary
from omni.warehouse_creator import *
from ..scripts.warehouse_window_base import *
from ..scripts.warehouse_helpers import *
class TestWarehouseCreator(omni.kit.test.AsyncTestCaseFailOnLogError):
print("<tests_warehouse_creator.py>: Starting test script...")
async def setUp(self):
print("<TestWarehouseCreator::setUp>: WarehouseCreator starting...")
self.wh_creator = WarehouseCreatorWindow()
print("UI Window Created... Building Content...")
self.wh_creator._build_content(True)
print("<TestWarehouseCreator::setUp>: WarehouseCreator started.")
async def tearDown(self):
print("<TestWarehouseCreator::tearDown>: WarehouseCreator stopping...")
self.wh_creator.destroy()
print("<TestWarehouseCreator::tearDown>: WarehouseCreator shutdown.")
| 953 |
Python
| 37.159999 | 79 | 0.729276 |
Strevia/omni_warehouse/omni/warehouse_creator/python/tests/__init__.py
|
from .tests_warehouse_creator import *
| 39 |
Python
| 18.999991 | 38 | 0.794872 |
Strevia/omni_warehouse/docs/CHANGELOG.md
|
# Changelog
## [0.2.2] - 2023-02-13
- Create 2022.3.3 Release v0.2.2: Update lighting for Linux
## [0.2.1] - 2023-01-31
- Create 2022.3.3 Release v0.2.1: Update to scene configuration
## [0.2.0] - 2023-01-31
- Create 2022.3.3 Release v0.2.0: Configure Linux renderer setting; updated README
## [0.1.9] - 2023-01-30
- Create 2022.3.3 Release v0.1.9: Configure environment
## [0.1.8] - 2023-01-23
- Create 2022.3.3 Release v0.1.8: Update Kit-SDK to 104.2
## [0.1.7] - 2022-11-21
- Create 2022.3.1 Release v0.1.7: Clean up
## [0.1.6] - 2022-11-20
- Create 2022.3.1 Release v0.1.6: Support for Linux compatibility and updated rack placement
## [0.1.5] - 2022-10-27
- Create 2022.3.0 Release v0.1.5: Render Settings: Enabled Ambient Light
## [0.1.4] - 2022-10-26
- Create 2022.3.0 Release v0.1.4: Code Clean-up
## [0.1.3] - 2022-09-23
- Create 2022.3.0 Release v0.1.3
## [0.1.2] - 2022-08-31
- Beta Release v0.1.2
## [0.1.1] - 2022-08-19
- Beta Release
## [0.1.0] - 2022-08-18
- Initial build.
| 1,002 |
Markdown
| 23.463414 | 92 | 0.648703 |
Strevia/omni_warehouse/docs/README.md
|
# Warehouse Creator Extension
The Warehouse Creator is an Extension for Omniverse Create that allows you to quickly get started with building Warehouse Digital Twins in an automated way. It supports procedurally generating full warehouse scenes, as well as customized generation based on layouts and assets that you want in the scene.
The extension works by spawning USD (Universal Scene Description) assets from Nucleus based on predefined asset-based rules.
The pre-defined asset placement rules control the sample-space of the various positions, rotations, and scaling of the asset to be spawned. Based on user-defined parameters, assets are randomly chosen from Nucleus and placed in random positions from the sample space of positions defined by the placement rules. Occupied positions are recorded, and new assets are spawned in the unoccupied positions of the sample space.
You can modify the assets and their paths to several types and locations based on preference – this allows you to use the extension to generate scenes with your own custom assets from their defined locations.
# Getting Started
## Enable Extension via Extensions Manager
The Extension can be enabled by:
1. Navigating to Window > Extensions Manager from the top-level menu
2. Searching for Warehouse Creator in the text field of the Extension Manager window, to reveal up the omni.warehouse_creator result.
3. Clicking the toggle to enable the Extension
# Navigating the User Interface
## Warehouse Creator - User Interface
The extension UI (User Interface) contains a vertically scrollable window that hosts several quick recipes to get started with building your warehouse scenes. The different recipes and descriptions are elucidated below:
### Option 1: Quick Generation
Quick Generation module allows you to quickly generate your warehouse scene without any parameters. It contains two different options:
### Option 2: Customized Generation
The Customized Generation option allows you to control different parameters for scene generation to generate scenes attuned to your need.
### Option 3: Smart Import
Smart Import module allows you to instantly import your own assets - the smart way! Simply select the asset type you are importing from the drop-down, copy and paste the URL of the asset from the content navigator into the box below. Your asset will be imported in-place. Import assets that fall under the following asset categories:
1. Filled Racks
2. Empty Racks
3. Piles
4. Railings
5. Forklift
6. Robot
Note:
1. URL paths containing the keyword "isaac" will be scaled by (100,100,100) as Isaac Sim assets are in meters versus the default OV units of centimeters.
2. Empty spaces are reserved for the Smart Import feature during initial generation of warehouse. When used, Smart Import will randomly choose a location from a list of reserved spaces to place an imported asset.
### Clear Stage
Quickly clear the scene. Users can select this button to delete all existing assets from their scene. This function is also called each time you generate a scene with Option 1 or 2.
# Customize
Modify the current extension with your own customizations!
## Develop with VS Code
### Pre-requisites
1. Microsoft VS Code (free)
2. Omniverse Create (Omniverse Code also works)
3. Enabled Warehouse Creator Extension
4. Enabled Kit VS Code Debug Extension
### Configuring Omniverse
1. In Create, navigate to Extensions
2. Search for Warehouse Creator
3. Select Warehouse Creator Extension from list of available extensions
4. Select Open with VSCode
Similarly, you will need to follow steps 1-3 to enable the "Kit Debug VSCode" Extension
### Configuring VS Code
1. In VS Code, you will need to create a session to connect and link the debugger to your Create's VS Code Debugger Extension
2. Navigate to Run -> Add Configuration
3. Select Python
4. Click on "Remote Attach". Once you've selected, enter the IP address and Port Number specified in the VS Code Debugger extension
5. Once configured, hit the "F5" on your keyboard or navigate to Run -> Start debugging to start a debugging session
6. Once connected, the Kit VS Code window in Create should show the debugger has attached
7. You should now be able to navigate to the source code, modify it, hit Ctrl + S to save, and see the changes take effect in the extension!
8. Source code files can be found in the following directory: "omni\warehouse_creator\python\scripts\"
Note that if the code is not executable due to a syntax error, the extension will disappear from Omniverse until the code has been fixed and saved once again.
| 4,590 |
Markdown
| 57.858974 | 420 | 0.799564 |
Strevia/omni_warehouse/docs/index.rst
|
example.python_ext
###########################
Example of Python only extension
.. toctree::
:maxdepth: 1
README
CHANGELOG
.. automodule:: example.python_ext
:platform: Windows-x86_64, Linux-x86_64
:members:
:undoc-members:
:show-inheritance:
:imported-members:
:exclude-members: contextmanager
| 334 |
reStructuredText
| 14.95238 | 43 | 0.622754 |
perfectproducts/floorplan_generator_lite/README.md
|
# floorplan generator lite
see http://www.synctwin.ai for more information on SyncTwin GmbH.
| 95 |
Markdown
| 22.999994 | 66 | 0.778947 |
perfectproducts/floorplan_generator_lite/exts/ai.synctwin.floorplan_generator_lite/ai/synctwin/floorplan_generator_lite/floorplan_semantics.py
|
from .floorplan_model import FloorPlanImagePoint, FloorPlanModel
from pxr import Usd, Kind, UsdGeom, Sdf, Gf, Tf
class FloorPlanImagePointSemantics():
SCHEMA = "PointOfInterest"
SCHEMA_VERSION = "1.0.0"
DEFAULT_ROOT_PATH = "PointsOfInterest"
def write(self, stage, poi:FloorPlanImagePoint, prim_path) :
poi_path = prim_path if prim_path!="" else f"{FloorPlanImagePointSemantics.DEFAULT_ROOT_PATH}/{Tf.MakeValidIdentifier(poi.name)}"
poi_prim = stage.DefinePrim(poi_path,'Xform')
poi_prim.CreateAttribute("mfgstd:schema", Sdf.ValueTypeNames.String).Set(f"{FloorPlanImagePointSemantics.SCHEMA}#{FloorPlanImagePointSemantics.SCHEMA_VERSION}")
poi_prim.CreateAttribute("mfgstd:properties:name", Sdf.ValueTypeNames.String).Set(poi.name)
poi_prim.CreateAttribute("mfgstd:properties:point_type", Sdf.ValueTypeNames.String).Set(poi.point_type)
xf = UsdGeom.Xformable(poi_prim)
xf.AddTranslateOp().Set(Gf.Vec3f(poi.x, poi.y, 0))
return poi_path
def read(self, stage : Usd.Stage, poi_path : str)->FloorPlanImagePoint:
poi_prim = stage.GetPrimAtPath(poi_path)
schema = str(poi_prim.GetAttribute("mfgstd:schema").Get())
if not schema.startswith(FloorPlanImagePointSemantics.SCHEMA):
print("error reading schema")
return None
xf = UsdGeom.Xformable(poi_prim)
mat = Gf.Transform(xf.GetLocalTransformation())
t = mat.GetTranslation()
name = str(poi_prim.GetAttribute("mfgstd:properties:name").Get())
point_type = str(poi_prim.GetAttribute("mfgstd:properties:point_type").Get())
poi = FloorPlanImagePoint(name=name, point_type=point_type, x=t[0], y=t[1])
return poi
class FloorPlanSemantics:
SCHEMA = "Floorplan"
SCHEMA_VERSION = "1.0.0"
DEFAULT_ROOT_PATH = "/World/FloorPlan"
def write(self, stage:Usd.Stage, model:FloorPlanModel, prim_path:str=""):
root_path = FloorPlanSemantics.DEFAULT_ROOT_PATH if prim_path == "" else prim_path
root_prim = stage.DefinePrim(root_path, "Xform")
Usd.ModelAPI(root_prim).SetKind(Kind.Tokens.component)
smantics_prim = root_prim #semantics prim might go to /semantics, for now root prim
smantics_prim.CreateAttribute("mfgstd:schema", Sdf.ValueTypeNames.String).Set(f"{FloorPlanSemantics.SCHEMA}#{FloorPlanSemantics.SCHEMA_VERSION}")
smantics_prim.CreateAttribute("mfgstd:properties:resolution_x", Sdf.ValueTypeNames.Int).Set(model.resolution_x)
smantics_prim.CreateAttribute("mfgstd:properties:resolution_y", Sdf.ValueTypeNames.Int).Set(model.resolution_y)
smantics_prim.CreateAttribute("mfgstd:properties:image_url", Sdf.ValueTypeNames.String).Set(model.image_url)
xf = UsdGeom.Xformable(root_prim)
xf.ClearXformOpOrder ()
origin = model.reference_origin()
xf.AddTranslateOp().Set(Gf.Vec3f(-origin.x*model.scale_x, -origin.y*model.scale_y,0) )
xf.AddScaleOp().Set(Gf.Vec3f(model.scale_x, model.scale_y, min(model.scale_x, model.scale_y)) )
poi_path = f"{root_path}/{FloorPlanImagePointSemantics.DEFAULT_ROOT_PATH}"
stage.DefinePrim(poi_path, "Scope")
for key, poi in model.points_of_interest.items():
FloorPlanImagePointSemantics().write(stage, poi,f"{poi_path}/{Tf.MakeValidIdentifier(key)}")
return root_path
def read(self, stage:Usd.Stage, prim_path:str="")->FloorPlanModel:
if not stage:
return None
root_path = FloorPlanSemantics.DEFAULT_ROOT_PATH if prim_path == "" else prim_path
root_prim = stage.GetPrimAtPath(root_path)
if not root_prim:
return None
schema = str(root_prim.GetAttribute("mfgstd:schema").Get())
if not schema.startswith(FloorPlanSemantics.SCHEMA):
print("error reading schema")
return None
model = FloorPlanModel()
model.resolution_x = int(root_prim.GetAttribute("mfgstd:properties:resolution_x").Get() )
model.resolution_y = int(root_prim.GetAttribute("mfgstd:properties:resolution_y").Get() )
model.image_url = str(root_prim.GetAttribute("mfgstd:properties:image_url").Get() )
xf = UsdGeom.Xformable(root_prim)
xf = Gf.Transform(xf.GetLocalTransformation())
t = xf.GetTranslation()
s = xf.GetScale()
model.set_scale(s[0], s[1])
o = model.reference_origin()
o.set(-t[0], -t[1])
pois_path = f"{root_path}/{FloorPlanImagePointSemantics.DEFAULT_ROOT_PATH}"
pois_prim = stage.GetPrimAtPath(pois_path)
for poi_prim in pois_prim.GetChildren():
point = FloorPlanImagePointSemantics().read(stage, poi_prim.GetPath())
if point:
model.add_poi(point)
return model
| 4,968 |
Python
| 51.305263 | 168 | 0.654388 |
perfectproducts/floorplan_generator_lite/exts/ai.synctwin.floorplan_generator_lite/ai/synctwin/floorplan_generator_lite/extension.py
|
import omni.ext
import omni.ui as ui
import os
from omni.kit.window.filepicker import FilePickerDialog
from omni.kit.widget.filebrowser import FileBrowserItem
from pxr import Gf, Kind, Sdf, Usd, UsdGeom, UsdShade, Tf
from .floorplan_semantics import FloorPlanSemantics, FloorPlanImagePointSemantics
from .floorplan_model import FloorPlanModel, FloorPlanImagePoint
from .floorplan_simpleviz import FloorPlanSimpleViz
from .utils.geo_utils import GeoUtils
from omni.kit.pipapi import install
import tempfile
import gc
from pdf2image import convert_from_path
import tempfile
# Python code to read image
class FloorPlanGeneratorLite(omni.ext.IExt):
def on_startup(self, ext_id):
# get script path
self._window = ui.Window("SyncTwin Floor Plan Generator Lite", width=400, height=700)
self._usd_context = omni.usd.get_context()
self._model = FloorPlanModel()
self._selected_points = []
self._ref_edited = False
self._in_create = False
self._stage_listener = None
# local script pah
ext_manager = omni.kit.app.get_app().get_extension_manager()
self._local_extension_path = f"{ext_manager.get_extension_path(ext_id)}/ai/synctwin/floorplan_generator_lite"
# subscribe to selection change
self._selection = self._usd_context.get_selection()
self._sub_stage_event = self._usd_context.get_stage_event_stream().create_subscription_to_pop(
self._on_stage_event
)
self.create_ui()
self.clear()
def create_ui(self):
with self._window.frame:
with ui.VStack():
self._img = ui.Image(f"{self._local_extension_path}/bitmaps/select_image.png")
with ui.VStack():
self._create_button = ui.Button("create...", clicked_fn=lambda :self.create(), enabled=False)
with ui.HStack():
ui.Label("Source", height=30, width=75)
self._image_label = ui.Label("[select an image]", height=30)
ui.Button(
"...",
height=30,
width=30,
tooltip="select plan image...",
clicked_fn=lambda: self.show_image_selection_dialog()
)
with ui.HStack(spacing=5):
ui.Label("Resolution", width=70, height=30)
args = [0,0]
self._res_field = ui.MultiIntField(*args, h_spacing=5, enabled=False, height=25,min=1)
with ui.HStack(spacing=5, height=30):
ui.Label("Scale", width=70)
args = [1.0,1.0]
self._scale_field = ui.MultiFloatDragField(*args, h_spacing=5, height=25,min=0.0001)
self.add_multifield_edit_cb(self._scale_field,
lambda m:self.set_scale_x(m.get_value_as_float()),
lambda m:self.set_scale_y(m.get_value_as_float()))
with ui.HStack(spacing=5, height=30):
ui.Label("Size", width=70)
args = [1.0,1.0]
self._size_field = ui.MultiFloatDragField(*args, h_spacing=5, height=25,min=0.0001)
self.add_multifield_edit_cb(self._size_field,
lambda m:self.set_size_x(m.get_value_as_float()),
lambda m:self.set_size_y(m.get_value_as_float()))
with ui.HStack(height=30):
ui.Spacer()
with ui.VStack(style={"margin_width": 0}, height=30, width=30):
ui.Spacer()
self._keep_ratio_check = ui.CheckBox( height=0 )
self._keep_ratio_check.model.set_value(True)
ui.Spacer()
ui.Label("keep aspect ratio", height=30)
with ui.CollapsableFrame("Selected References"):
with ui.VStack():
self._selection_info_label = ui.Label("[nothing selected]", height=25)
with ui.HStack(spacing=5, height=30):
ui.Label("Distance", width=70)
self._ref_dist_field = ui.FloatField(height=25)
self._ref_dist_field.model.add_end_edit_fn(lambda m:self.set_ref_dist(m.get_value_as_float()))
ui.Spacer()
def set_image_url(self, url):
self._model.set_image_url(url)
self.refresh()
def on_image_file_selected(self, dialog, dirname:str, filename: str):
print(f"selected {filename}")
filepath = f"{dirname}/{filename}"
if filename.endswith(".pdf"):
is_remote = dirname.startswith("omniverse://")
if is_remote:
temp_dir = "c:/temp"
temp_name= f"{temp_dir}/{filename}"
r = omni.client.copy(filepath, temp_name, behavior=omni.client.CopyBehavior.OVERWRITE)
print(f"copy tmp {temp_name} {r}")
if r != omni.client.Result.OK:
print("## could not copy file")
return
filepath = temp_name
print("convert pdf...")
path = f"{self._local_extension_path}/poppler-0.68.0/bin"
if not path in os.environ["PATH"]:
os.environ["PATH"] += os.pathsep + path
basename = os.path.splitext(filename)[0].replace(".","_")
if is_remote:
output_folder = temp_dir
else:
output_folder = dirname
outfile = f"{output_folder}/{basename}.png"
images_from_path = convert_from_path(filepath, output_folder=output_folder)
images_from_path[0].save(outfile)
print(f"written to {outfile}")
if is_remote:
upload_file = f"{dirname}/{basename}.png"
r = omni.client.copy(outfile, upload_file, behavior=omni.client.CopyBehavior.OVERWRITE)
print(f"upload {r}")
filepath = upload_file
else:
filepath = outfile
self._image_file = filename
self.set_image_url(filepath)
dialog.hide()
# we'd like to create the map immediately after image selection
self.create()
def show_image_selection_dialog(self):
heading = "Select File..."
dialog = FilePickerDialog(
heading,
allow_multi_selection=False,
apply_button_label="select file",
click_apply_handler=lambda filename, dirname: self.on_image_file_selected(dialog, dirname, filename),
file_extension_options = [("*.png", "Images"), ("*.jpg", "Images"), ("*.pdf", "PDF documents")]
)
dialog.show()
def is_keep_ar_checked(self):
return self._keep_ratio_check.model.get_value_as_bool()
def set_scale_x(self, v):
if v <= 0:
return
self._model.set_scale(v, v if self.is_keep_ar_checked() else self._model.scale_y)
self.refresh()
def set_scale_y(self, v):
if v <= 0:
return
self._model.set_scale(v if self.is_keep_ar_checked() else self._model.scale_x, v)
self.refresh()
def set_size_x(self, v):
if v <= 0:
return
self._model.set_size(v, v/self._model.aspect_ratio() if self.is_keep_ar_checked() else self._model.size_x)
self.refresh()
def set_size_y(self, v):
if v <= 0:
return
self._model.set_size( v*self._model.aspect_ratio() if self.is_keep_ar_checked() else self._model.size_y, v)
self.refresh()
def set_ref_dist(self, v):
if v <= 0:
return
self._ref_edited = True
self.update_scale_from_selected_references()
def set_selected_points(self, refs):
if len(refs) != 2:
self._selected_points = []
else:
self._selected_points = refs
self._ref_edited = False
self.refresh()
def update_scale_from_selected_references(self):
dx = self._selected_points[0].distance_to(self._selected_points[1]) # these are image space points (-> pixels)
x = self._ref_dist_field.model.get_value_as_float()
sx = x/dx
if sx != self._model.scale_x or sx != self._model.scale_y:
self._model.set_scale(sx,sx)
self.refresh()
def _on_stage_event(self, event):
#print(f'STAGE EVENT : stage event type int: {event.type}')
if event.type == int(omni.usd.StageEventType.SELECTION_CHANGED):
selection = self._selection.get_selected_prim_paths()
stage = self._usd_context.get_stage()
#print(f'== selection changed with {len(selection)} items')
if selection and stage:
sel_refs = []
for selected_path in selection:
#print(f' item {selected_path}:')
point = FloorPlanImagePointSemantics().read(stage, selected_path)
if point:
sel_refs.append(point)
self.set_selected_points(sel_refs)
elif event.type == int(omni.usd.StageEventType.CLOSED):
if not self._in_create:
self.clear()
elif event.type == int(omni.usd.StageEventType.OPENED):
if not self._in_create:
context = omni.usd.get_context()
# check
stage = context.get_stage()
model = FloorPlanSemantics().read(stage)
if model:
self._model = model
self._stage_listener = Tf.Notice.Register(
Usd.Notice.ObjectsChanged, self._notice_changed, stage)
self.refresh()
self.clear_dirty()
def _notice_changed(self, notice, stage):
for p in notice.GetChangedInfoOnlyPaths():
if FloorPlanImagePointSemantics.DEFAULT_ROOT_PATH in str(p.GetPrimPath()):
self.set_dirty()
break
def get_multifield_floats(self, field):
m =field.model
v1 = m.get_item_value_model(m.get_item_children()[0]).get_value_as_float()
v2 = m.get_item_value_model(m.get_item_children()[1]).get_value_as_float()
return (v1,v2)
def set_multifield(self, field, a, b)->bool:
changed = False
m =field.model
v1 = m.get_item_value_model(m.get_item_children()[0])
if v1.get_value_as_float() != float(a):
v1.set_value(a)
changed = True
v2 = m.get_item_value_model(m.get_item_children()[1])
if v2.get_value_as_float() != float(b):
v2.set_value(b)
changed = True
return changed
def add_multifield_edit_cb(self, field, cb_a, cb_b):
m = field.model
m.get_item_value_model(m.get_item_children()[0]).add_end_edit_fn(cb_a)
m.get_item_value_model(m.get_item_children()[1]).add_end_edit_fn(cb_b)
def clear(self):
self._model = FloorPlanModel()
self._selected_points = []
self._ref_edited = False
self._in_create = False
self._stage_listener = None
self.clear_dirty()
self.refresh()
def refresh(self):
if self._model.image_url:
self._img.source_url = self._model.image_url
self._has_image = True
else:
self._img.source_url = f"{self._local_extension_path}/bitmaps/select_image.png"
self._has_image = False
self._image_label.text = os.path.basename(self._model.image_url)
self._image_label.tooltip = self._model.image_url
changed = False
changed |= self.set_multifield(self._res_field, self._model.resolution_x, self._model.resolution_y)
changed |= self.set_multifield(self._scale_field, self._model.scale_x, self._model.scale_y)
changed |= self.set_multifield(self._size_field, self._model.size_x, self._model.size_y)
if changed and self._has_image:
self.set_dirty()
if not self._ref_edited:
if len(self._selected_points) == 2:
dx = self._selected_points[0].distance_to(self._selected_points[1])
self._ref_dist_field.model.set_value( dx*self._model.scale_x)
self._selection_info_label.text = f"{self._selected_points[0].name}, {self._selected_points[1].name}"
else:
self._selection_info_label.text = "[select two reference points]"
self._ref_dist_field.model.set_value(0)
self._res_field.enabled = self._has_image
self._scale_field.enabled = self._has_image
self._size_field.enabled = self._has_image
self._ref_dist_field.enabled = self._has_image
def set_dirty(self):
self._create_button.text = "create..."
self._create_button.enabled = True
def clear_dirty(self):
self._create_button.text = "up to date"
self._create_button.enabled = False
def focus_selection(self):
# omni.kit.viewport_legacy is optional dependancy
try:
import omni.kit.viewport_legacy
viewport = omni.kit.viewport_legacy.get_viewport_interface()
if viewport:
window = viewport.get_viewport_window()
window.focus_on_selected()
except:
pass
def update_poi_from_usd(self):
context = omni.usd.get_context()
stage = context.get_stage()
model = FloorPlanSemantics().read(stage)
if model:
self._model.points_of_interest = model.points_of_interest
def create(self):
self._in_create = True
self.update_poi_from_usd()
context = omni.usd.get_context()
context.new_stage()
stage = context.get_stage()
gb = GeoUtils(stage)
gb.create_lighting()
self._stage_listener = Tf.Notice.Register(Usd.Notice.ObjectsChanged, self._notice_changed, stage)
UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.z)
# get scale
root_prim = FloorPlanSemantics().write(stage, self._model)
floor_path = FloorPlanSimpleViz().write(stage, self._model, root_prim)
stage.SetDefaultPrim(stage.GetPrimAtPath("/World"))
omni.kit.commands.execute(
"SelectPrimsCommand", old_selected_paths=[], new_selected_paths=[floor_path], expand_in_stage=True
)
self.focus_selection()
self._in_create = False
self.clear_dirty()
def on_shutdown(self):
self._window = None
gc.collect()
| 15,905 |
Python
| 40.421875 | 131 | 0.520151 |
perfectproducts/floorplan_generator_lite/exts/ai.synctwin.floorplan_generator_lite/ai/synctwin/floorplan_generator_lite/__init__.py
|
from .extension import *
| 25 |
Python
| 11.999994 | 24 | 0.76 |
perfectproducts/floorplan_generator_lite/exts/ai.synctwin.floorplan_generator_lite/ai/synctwin/floorplan_generator_lite/floorplan_simpleviz.py
|
from pxr import Gf, Kind, Sdf, Usd, UsdGeom, UsdShade,Tf
from .floorplan_model import FloorPlanModel
from .utils.geo_utils import GeoUtils
from .floorplan_semantics import FloorPlanImagePointSemantics
# simple viz assumes known structure from semantics
class FloorPlanSimpleViz:
def __init__(self) -> None:
self.point_radius = 2
def write(self, stage : Usd.Stage, model : FloorPlanModel, root_path:str):
self._model = model
map_root = stage.GetPrimAtPath(root_path)
sx = self._model.resolution_x
sy = self._model.resolution_y
gb = GeoUtils(stage =stage)
point_mat = gb.create_material(root_path, "point_material", (0, 0, 1))
gb.create_textured_rect_mesh(root_path, sx, sy, model.image_url)
#--- create reference points
poi_path = f"{root_path}/{FloorPlanImagePointSemantics.DEFAULT_ROOT_PATH}"
for key, poi in model.points_of_interest.items():
prim_path = f"{poi_path}/{Tf.MakeValidIdentifier(key)}"
xf = stage.GetPrimAtPath(prim_path)
mesh_prim = stage.DefinePrim(f"{prim_path}/mesh", "Sphere")
mesh_prim.GetAttribute("radius").Set(self.point_radius)
UsdShade.MaterialBindingAPI(mesh_prim).Bind(point_mat)
return root_path
| 1,360 |
Python
| 41.531249 | 82 | 0.630882 |
perfectproducts/floorplan_generator_lite/exts/ai.synctwin.floorplan_generator_lite/ai/synctwin/floorplan_generator_lite/floorplan_model.py
|
from __future__ import annotations
from weakref import ref
from pydantic import BaseModel, Field
from typing import List, Dict , Optional
from pxr import Usd, Kind,Gf
from PIL import Image
import carb
import omni.client
import io
class FloorPlanImagePoint(BaseModel):
x : int = 0
y : int = 0
name:str= ""
point_type : str = ""
def set(self, x,y):
self.x = x
self.y = y
def distance_to(self, point:FloorPlanImagePoint):
return (Gf.Vec2f(point.x, point.y)-Gf.Vec2f(self.x,self.y)).GetLength()
def component_distance_to(self, point:FloorPlanImagePoint):
return (abs(point.x-self.x),abs(point.y-self.y))
class FloorPlanModel(BaseModel):
size_y: float = 0
size_x: float = 0
resolution_x : int = 0
resolution_y : int = 0
image_url: str = ""
scale_x: float = 1.0
scale_y: float = 1.0
points_of_interest : Dict[str, FloorPlanImagePoint] = dict()
def poi(self, name, point_type) -> FloorPlanImagePoint:
if not name:
return None
if not name in self.points_of_interest:
self.points_of_interest[name] = FloorPlanImagePoint(name = name, point_type=point_type)
return self.points_of_interest.get(name)
def add_poi(self, point:FloorPlanImagePoint):
if not point.name:
print("no name")
return
self.points_of_interest[point.name] = point
def reference_diff_x(self):
return abs(self.reference_b().x-self.reference_a().x)
def reference_diff_y(self):
return abs(self.reference_b().y-self.reference_a().y)
def reference_a(self):
return self.poi("Reference_A", "Reference")
def reference_origin(self):
return self.poi("Origin", "Reference")
def reference_b(self):
return self.poi("Reference_B", "Reference")
def set_image_url(self, url):
result, _, content = omni.client.read_file(url)
if result != omni.client.Result.OK:
carb.log_error(f"Can't read image file {url}, error code: {result}")
return
img = Image.open(io.BytesIO(memoryview(content).tobytes()))
sx, sy = img.size
if sx == 0 or sy == 0:
print("# invalid image")
return
self.image_url = url
self.resolution_x = sx
self.resolution_y = sy
self.scale_x = 1.0
self.scale_y = 1.0
self.reference_a().set(0,0)
self.reference_b().set(self.resolution_x, self.resolution_y)
self.reference_origin().set(self.resolution_x/2, self.resolution_y/2)
self._update_size()
def set_scale(self, x, y):
self.scale_x = x
self.scale_y = y
self._update_size()
def set_size(self, x, y):
self.size_x = x
self.size_y= y
self.aspect_ratio = self.size_x / self.size_y
self.scale_x = self.size_x / self.resolution_x
self.scale_y = self.size_y / self.resolution_y
def _update_size(self):
self.size_x = self.resolution_x * self.scale_x
self.size_y = self.resolution_y * self.scale_y
def aspect_ratio(self)->float:
return self.size_x / self.size_y
| 3,244 |
Python
| 28.770642 | 99 | 0.59402 |
perfectproducts/floorplan_generator_lite/exts/ai.synctwin.floorplan_generator_lite/ai/synctwin/floorplan_generator_lite/poppler-0.68.0/include/poppler/cpp/poppler-global.h
|
/*
* Copyright (C) 2009-2010, Pino Toscano <[email protected]>
* Copyright (C) 2010, Patrick Spendrin <[email protected]>
* Copyright (C) 2014, Hans-Peter Deifel <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef POPPLER_GLOBAL_H
#define POPPLER_GLOBAL_H
#if defined(_WIN32)
# define LIB_EXPORT __declspec(dllexport)
# define LIB_IMPORT __declspec(dllimport)
#else
# define LIB_EXPORT
# define LIB_IMPORT
#endif
#if defined(poppler_cpp_EXPORTS)
# define POPPLER_CPP_EXPORT LIB_EXPORT
#else
# define POPPLER_CPP_EXPORT LIB_IMPORT
#endif
#include <iosfwd>
#include <string>
#include <vector>
namespace poppler
{
/// \cond DOXYGEN_SKIP_THIS
namespace detail
{
class POPPLER_CPP_EXPORT noncopyable
{
protected:
noncopyable();
~noncopyable();
private:
noncopyable(const noncopyable &);
const noncopyable& operator=(const noncopyable &);
};
}
typedef detail::noncopyable noncopyable;
/// \endcond
enum rotation_enum { rotate_0, rotate_90, rotate_180, rotate_270 };
enum page_box_enum { media_box, crop_box, bleed_box, trim_box, art_box };
enum permission_enum { perm_print, perm_change, perm_copy, perm_add_notes,
perm_fill_forms, perm_accessibility, perm_assemble,
perm_print_high_resolution };
enum case_sensitivity_enum { case_sensitive, case_insensitive };
typedef std::vector<char> byte_array;
typedef unsigned int /* time_t */ time_type;
// to disable warning only for this occurrence
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable: 4251) /* class 'A' needs to have dll interface for to be used by clients of class 'B'. */
#endif
class POPPLER_CPP_EXPORT ustring : public std::basic_string<unsigned short>
{
public:
ustring();
ustring(size_type len, value_type ch);
~ustring();
byte_array to_utf8() const;
std::string to_latin1() const;
static ustring from_utf8(const char *str, int len = -1);
static ustring from_latin1(const std::string &str);
private:
// forbid implicit std::string conversions
ustring(const std::string &);
operator std::string() const;
ustring& operator=(const std::string &);
};
#ifdef _MSC_VER
#pragma warning(pop)
#endif
POPPLER_CPP_EXPORT time_type convert_date(const std::string &date);
POPPLER_CPP_EXPORT std::ostream& operator<<(std::ostream& stream, const byte_array &array);
typedef void(*debug_func)(const std::string &, void *);
POPPLER_CPP_EXPORT void set_debug_error_function(debug_func debug_function, void *closure);
}
#endif
| 3,194 |
C
| 26.307692 | 114 | 0.711334 |
perfectproducts/floorplan_generator_lite/exts/ai.synctwin.floorplan_generator_lite/ai/synctwin/floorplan_generator_lite/poppler-0.68.0/include/poppler/cpp/poppler-rectangle.h
|
/*
* Copyright (C) 2009-2010, Pino Toscano <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef POPPLER_RECTANGLE_H
#define POPPLER_RECTANGLE_H
#include "poppler-global.h"
namespace poppler
{
template <typename T> class rectangle
{
public:
rectangle()
: x1(), y1(), x2(), y2()
{}
rectangle(T _x, T _y, T w, T h)
: x1(_x), y1(_y), x2(x1 + w), y2(y1 + h)
{}
~rectangle()
{}
bool is_empty() const
{ return (x1 == x2) && (y1 == y2); }
T x() const
{ return x1; }
T y() const
{ return y1; }
T width() const
{ return x2 - x1; }
T height() const
{ return y2 - y1; }
T left() const
{ return x1; }
T top() const
{ return y1; }
T right() const
{ return x2; }
T bottom() const
{ return y2; }
void set_left(T value)
{ x1 = value; }
void set_top(T value)
{ y1 = value; }
void set_right(T value)
{ x2 = value; }
void set_bottom(T value)
{ y2 = value; }
private:
T x1, y1, x2, y2;
};
typedef rectangle<int> rect;
typedef rectangle<double> rectf;
POPPLER_CPP_EXPORT std::ostream& operator<<(std::ostream& stream, const rect &r);
POPPLER_CPP_EXPORT std::ostream& operator<<(std::ostream& stream, const rectf &r);
}
#endif
| 1,950 |
C
| 21.952941 | 82 | 0.62359 |
perfectproducts/floorplan_generator_lite/exts/ai.synctwin.floorplan_generator_lite/ai/synctwin/floorplan_generator_lite/poppler-0.68.0/include/poppler/cpp/poppler-font.h
|
/*
* Copyright (C) 2009, Pino Toscano <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef POPPLER_FONT_H
#define POPPLER_FONT_H
#include "poppler-global.h"
#include <vector>
namespace poppler
{
class document;
class document_private;
class font_info_private;
class font_iterator;
class font_iterator_private;
class POPPLER_CPP_EXPORT font_info
{
public:
enum type_enum {
unknown,
type1,
type1c,
type1c_ot,
type3,
truetype,
truetype_ot,
cid_type0,
cid_type0c,
cid_type0c_ot,
cid_truetype,
cid_truetype_ot
};
font_info();
font_info(const font_info &fi);
~font_info();
std::string name() const;
std::string file() const;
bool is_embedded() const;
bool is_subset() const;
type_enum type() const;
font_info& operator=(const font_info &fi);
private:
font_info(font_info_private &dd);
font_info_private *d;
friend class font_iterator;
};
class POPPLER_CPP_EXPORT font_iterator : public poppler::noncopyable
{
public:
~font_iterator();
std::vector<font_info> next();
bool has_next() const;
int current_page() const;
private:
font_iterator(int, document_private *dd);
font_iterator_private *d;
friend class document;
};
}
#endif
| 2,001 |
C
| 20.760869 | 82 | 0.668666 |
perfectproducts/floorplan_generator_lite/exts/ai.synctwin.floorplan_generator_lite/ai/synctwin/floorplan_generator_lite/poppler-0.68.0/include/poppler/cpp/poppler-toc.h
|
/*
* Copyright (C) 2009, Pino Toscano <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef POPPLER_TOC_H
#define POPPLER_TOC_H
#include "poppler-global.h"
#include <vector>
namespace poppler
{
class toc_private;
class toc_item;
class toc_item_private;
class POPPLER_CPP_EXPORT toc : public poppler::noncopyable
{
public:
~toc();
toc_item* root() const;
private:
toc();
toc_private *d;
friend class toc_private;
};
class POPPLER_CPP_EXPORT toc_item : public poppler::noncopyable
{
public:
typedef std::vector<toc_item *>::const_iterator iterator;
~toc_item();
ustring title() const;
bool is_open() const;
std::vector<toc_item *> children() const;
iterator children_begin() const;
iterator children_end() const;
private:
toc_item();
toc_item_private *d;
friend class toc;
friend class toc_private;
friend class toc_item_private;
};
}
#endif
| 1,608 |
C
| 20.453333 | 82 | 0.699627 |
perfectproducts/floorplan_generator_lite/exts/ai.synctwin.floorplan_generator_lite/ai/synctwin/floorplan_generator_lite/poppler-0.68.0/include/poppler/cpp/poppler-version.h
|
/*
* Copyright (C) 2009, Pino Toscano <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef POPPLER_VERSION_H
#define POPPLER_VERSION_H
#include "poppler-global.h"
#define POPPLER_VERSION "0.68.0"
#define POPPLER_VERSION_MAJOR 0
#define POPPLER_VERSION_MINOR 68
#define POPPLER_VERSION_MICRO 0
namespace poppler
{
POPPLER_CPP_EXPORT std::string version_string();
POPPLER_CPP_EXPORT unsigned int version_major();
POPPLER_CPP_EXPORT unsigned int version_minor();
POPPLER_CPP_EXPORT unsigned int version_micro();
}
#endif
| 1,207 |
C
| 29.199999 | 82 | 0.752278 |
perfectproducts/floorplan_generator_lite/exts/ai.synctwin.floorplan_generator_lite/ai/synctwin/floorplan_generator_lite/poppler-0.68.0/include/poppler/cpp/poppler-document.h
|
/*
* Copyright (C) 2009-2010, Pino Toscano <[email protected]>
* Copyright (C) 2016 Jakub Alba <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef POPPLER_DOCUMENT_H
#define POPPLER_DOCUMENT_H
#include "poppler-global.h"
#include "poppler-font.h"
namespace poppler
{
class document_private;
class embedded_file;
class page;
class toc;
class POPPLER_CPP_EXPORT document : public poppler::noncopyable
{
public:
enum page_mode_enum {
use_none,
use_outlines,
use_thumbs,
fullscreen,
use_oc,
use_attach
};
enum page_layout_enum {
no_layout,
single_page,
one_column,
two_column_left,
two_column_right,
two_page_left,
two_page_right
};
~document();
bool is_locked() const;
bool unlock(const std::string &owner_password, const std::string &user_password);
page_mode_enum page_mode() const;
page_layout_enum page_layout() const;
void get_pdf_version(int *major, int *minor) const;
std::vector<std::string> info_keys() const;
ustring info_key(const std::string &key) const;
bool set_info_key(const std::string &key, const ustring &val);
time_type info_date(const std::string &key) const;
bool set_info_date(const std::string &key, time_type val);
ustring get_title() const;
bool set_title(const ustring &title);
ustring get_author() const;
bool set_author(const ustring &author);
ustring get_subject() const;
bool set_subject(const ustring &subject);
ustring get_keywords() const;
bool set_keywords(const ustring &keywords);
ustring get_creator() const;
bool set_creator(const ustring &creator);
ustring get_producer() const;
bool set_producer(const ustring &producer);
time_type get_creation_date() const;
bool set_creation_date(time_type creation_date);
time_type get_modification_date() const;
bool set_modification_date(time_type mod_date);
bool remove_info();
bool is_encrypted() const;
bool is_linearized() const;
bool has_permission(permission_enum which) const;
ustring metadata() const;
bool get_pdf_id(std::string *permanent_id, std::string *update_id) const;
int pages() const;
page* create_page(const ustring &label) const;
page* create_page(int index) const;
std::vector<font_info> fonts() const;
font_iterator* create_font_iterator(int start_page = 0) const;
toc* create_toc() const;
bool has_embedded_files() const;
std::vector<embedded_file *> embedded_files() const;
bool save(const std::string &filename) const;
bool save_a_copy(const std::string &filename) const;
static document* load_from_file(const std::string &file_name,
const std::string &owner_password = std::string(),
const std::string &user_password = std::string());
static document* load_from_data(byte_array *file_data,
const std::string &owner_password = std::string(),
const std::string &user_password = std::string());
static document* load_from_raw_data(const char *file_data,
int file_data_length,
const std::string &owner_password = std::string(),
const std::string &user_password = std::string());
private:
document(document_private &dd);
document_private *d;
friend class document_private;
};
}
#endif
| 4,272 |
C
| 31.127819 | 90 | 0.64279 |
perfectproducts/floorplan_generator_lite/exts/ai.synctwin.floorplan_generator_lite/ai/synctwin/floorplan_generator_lite/poppler-0.68.0/include/poppler/cpp/poppler-page-transition.h
|
/*
* Copyright (C) 2009, Pino Toscano <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef POPPLER_PAGE_TRANSITION_H
#define POPPLER_PAGE_TRANSITION_H
#include "poppler-global.h"
class Object;
namespace poppler
{
class page;
class page_transition_private;
class POPPLER_CPP_EXPORT page_transition
{
public:
enum type_enum {
replace = 0,
split,
blinds,
box,
wipe,
dissolve,
glitter,
fly,
push,
cover,
uncover,
fade
};
enum alignment_enum {
horizontal = 0,
vertical
};
enum direction_enum {
inward = 0,
outward
};
page_transition(const page_transition &pt);
~page_transition();
type_enum type() const;
int duration() const;
alignment_enum alignment() const;
direction_enum direction() const;
int angle() const;
double scale() const;
bool is_rectangular() const;
page_transition& operator=(const page_transition &pt);
private:
page_transition(Object *params);
page_transition_private *d;
friend class page;
};
}
#endif
| 1,817 |
C
| 20.903614 | 82 | 0.657127 |
perfectproducts/floorplan_generator_lite/exts/ai.synctwin.floorplan_generator_lite/ai/synctwin/floorplan_generator_lite/poppler-0.68.0/include/poppler/cpp/poppler-embedded-file.h
|
/*
* Copyright (C) 2009-2010, Pino Toscano <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef POPPLER_EMBEDDED_FILE_H
#define POPPLER_EMBEDDED_FILE_H
#include "poppler-global.h"
#include <vector>
namespace poppler
{
class embedded_file_private;
class POPPLER_CPP_EXPORT embedded_file : public poppler::noncopyable
{
public:
~embedded_file();
bool is_valid() const;
std::string name() const;
ustring description() const;
int size() const;
time_type modification_date() const;
time_type creation_date() const;
byte_array checksum() const;
std::string mime_type() const;
byte_array data() const;
private:
embedded_file(embedded_file_private &dd);
embedded_file_private *d;
friend class embedded_file_private;
};
}
#endif
| 1,465 |
C
| 25.178571 | 82 | 0.717406 |
perfectproducts/floorplan_generator_lite/exts/ai.synctwin.floorplan_generator_lite/ai/synctwin/floorplan_generator_lite/poppler-0.68.0/include/poppler/cpp/poppler-image.h
|
/*
* Copyright (C) 2010, Pino Toscano <[email protected]>
* Copyright (C) 2018, Zsombor Hollay-Horvath <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef POPPLER_IMAGE_H
#define POPPLER_IMAGE_H
#include "poppler-global.h"
#include "poppler-rectangle.h"
namespace poppler
{
class image_private;
class POPPLER_CPP_EXPORT image
{
public:
enum format_enum {
format_invalid,
format_mono,
format_rgb24,
format_argb32,
format_gray8,
format_bgr24
};
image();
image(int iwidth, int iheight, format_enum iformat);
image(char *idata, int iwidth, int iheight, format_enum iformat);
image(const image &img);
~image();
bool is_valid() const;
format_enum format() const;
int width() const;
int height() const;
char *data();
const char *const_data() const;
int bytes_per_row() const;
image copy(const rect &r = rect()) const;
bool save(const std::string &file_name, const std::string &out_format, int dpi = -1) const;
static std::vector<std::string> supported_image_formats();
image& operator=(const image &img);
private:
void detach();
image_private *d;
friend class image_private;
};
}
#endif
| 1,918 |
C
| 24.586666 | 95 | 0.680918 |
perfectproducts/floorplan_generator_lite/exts/ai.synctwin.floorplan_generator_lite/ai/synctwin/floorplan_generator_lite/poppler-0.68.0/include/poppler/cpp/poppler-page.h
|
/*
* Copyright (C) 2009-2010, Pino Toscano <[email protected]>
* Copyright (C) 2018, Suzuki Toshiya <[email protected]>
* Copyright (C) 2018, Albert Astals Cid <[email protected]>
* Copyright (C) 2018, Zsombor Hollay-Horvath <[email protected]>
* Copyright (C) 2018, Aleksey Nikolaev <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef POPPLER_PAGE_H
#define POPPLER_PAGE_H
#include "poppler-global.h"
#include "poppler-rectangle.h"
#include <memory>
namespace poppler
{
struct text_box_data;
class POPPLER_CPP_EXPORT text_box
{
friend class page;
public:
text_box(text_box&&);
text_box& operator=(text_box&&);
~text_box();
ustring text() const;
rectf bbox() const;
/**
\since 0.68
*/
int rotation() const;
/**
Get a bbox for the i-th glyph
This method returns a rectf of the bounding box for
the i-th glyph in the text_box.
\note The text_box object owns the rectf objects,
the caller is not needed to free them.
\warning For too large glyph index, rectf(0,0,0,0)
is returned. The number of the glyphs and ustring
codepoints might be different in some complex scripts.
*/
rectf char_bbox(size_t i) const;
bool has_space_after() const;
private:
text_box(text_box_data *data);
std::unique_ptr<text_box_data> m_data;
};
class document;
class document_private;
class page_private;
class page_transition;
class POPPLER_CPP_EXPORT page : public poppler::noncopyable
{
public:
enum orientation_enum {
landscape,
portrait,
seascape,
upside_down
};
enum search_direction_enum {
search_from_top,
search_next_result,
search_previous_result
};
enum text_layout_enum {
physical_layout,
raw_order_layout
};
~page();
orientation_enum orientation() const;
double duration() const;
rectf page_rect(page_box_enum box = crop_box) const;
ustring label() const;
page_transition* transition() const;
bool search(const ustring &text, rectf &r, search_direction_enum direction,
case_sensitivity_enum case_sensitivity, rotation_enum rotation = rotate_0) const;
ustring text(const rectf &rect = rectf()) const;
ustring text(const rectf &rect, text_layout_enum layout_mode) const;
/**
Returns a list of text of the page
This method returns a std::vector of text_box that contain all
the text of the page, with roughly one text word of text
per text_box item.
For text written in western languages (left-to-right and
up-to-down), the std::vector contains the text in the proper
order.
\since 0.63
\note The page object owns the text_box objects as unique_ptr,
the caller is not needed to free them.
\warning This method is not tested with Asian scripts
*/
std::vector<text_box> text_list() const;
private:
page(document_private *doc, int index);
page_private *d;
friend class page_private;
friend class document;
};
}
#endif
| 3,828 |
C
| 25.964789 | 97 | 0.665622 |
perfectproducts/floorplan_generator_lite/exts/ai.synctwin.floorplan_generator_lite/ai/synctwin/floorplan_generator_lite/poppler-0.68.0/include/poppler/cpp/poppler-page-renderer.h
|
/*
* Copyright (C) 2010, Pino Toscano <[email protected]>
* Copyright (C) 2018, Zsombor Hollay-Horvath <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef POPPLER_PAGE_RENDERER_H
#define POPPLER_PAGE_RENDERER_H
#include "poppler-global.h"
#include "poppler-image.h"
namespace poppler
{
typedef unsigned int argb;
class page;
class page_renderer_private;
class POPPLER_CPP_EXPORT page_renderer : public poppler::noncopyable
{
public:
enum render_hint {
antialiasing = 0x00000001,
text_antialiasing = 0x00000002,
text_hinting = 0x00000004
};
enum line_mode_enum {
line_default,
line_solid,
line_shape
};
page_renderer();
~page_renderer();
argb paper_color() const;
void set_paper_color(argb c);
unsigned int render_hints() const;
void set_render_hint(render_hint hint, bool on = true);
void set_render_hints(unsigned int hints);
image::format_enum image_format() const;
void set_image_format(image::format_enum format);
line_mode_enum line_mode() const;
void set_line_mode(line_mode_enum mode);
image render_page(const page *p,
double xres = 72.0, double yres = 72.0,
int x = -1, int y = -1, int w = -1, int h = -1,
rotation_enum rotate = rotate_0) const;
static bool can_render();
private:
page_renderer_private *d;
friend class page_renderer_private;
};
}
#endif
| 2,161 |
C
| 26.025 | 82 | 0.671448 |
perfectproducts/floorplan_generator_lite/exts/ai.synctwin.floorplan_generator_lite/ai/synctwin/floorplan_generator_lite/utils/common.py
|
from pydantic import BaseModel
class Location3d(BaseModel):
x : float = 0.0
y : float = 0.0
z : float = 0.0
class RGBColor(BaseModel):
red_value : float
green_value : float
blue_value : float
| 223 |
Python
| 14.999999 | 30 | 0.623318 |
perfectproducts/floorplan_generator_lite/exts/ai.synctwin.floorplan_generator_lite/ai/synctwin/floorplan_generator_lite/utils/geo_utils.py
|
from pxr import Gf, UsdGeom, Usd, Sdf, UsdShade
import omni.usd as ou
import unicodedata
from .common import Location3d
import re
import omni.kit.actions.core
class GeoUtils:
def __init__(self, stage = None) -> None:
self._stage = stage
def open_or_create_stage(self, path, clear_exist=True) -> Usd.Stage:
layer = Sdf.Layer.FindOrOpen(path)
if not layer:
layer = Sdf.Layer.CreateNew(path)
elif clear_exist:
layer.Clear()
if layer:
self._stage = Usd.Stage.Open(layer)
return self._stage
else:
return None
def create_lighting(self):
# add lighting
ar = omni.kit.actions.core.get_action_registry()
set_lighting_mode_rig = ar.get_action("omni.kit.viewport.menubar.lighting", "set_lighting_mode_rig")
set_lighting_mode_rig.execute(2)
def create_material(self, material_path, name, diffuse_color) -> UsdShade.Material:
material_path = ou.get_stage_next_free_path(self._stage, material_path, False)
material = UsdShade.Material.Define(self._stage, material_path)
shader_path = material_path + "/Shader"
shader = UsdShade.Shader.Define(self._stage, shader_path)
shader.CreateIdAttr("UsdPreviewSurface")
shader.CreateInput("diffuseColor", Sdf.ValueTypeNames.Color3f).Set(diffuse_color)
material.CreateSurfaceOutput().ConnectToSource(shader.ConnectableAPI(), "surface")
return material
def create_textured_rect_mesh(self, root_path, sx,sy, image_url):
stage = self._stage
billboard = UsdGeom.Mesh.Define(stage, f"{root_path}/mesh")
left = 0
right = sx
top = 0
bottom = sy
billboard.CreatePointsAttr([(left, top, 0), (right, top, 0), (right, bottom, 0), (left, bottom, 0)])
billboard.CreateFaceVertexCountsAttr([4])
billboard.CreateFaceVertexIndicesAttr([0,1,2,3])
billboard.CreateExtentAttr([(left, top, 0), (right, bottom, 0)])
primvars_api = UsdGeom.PrimvarsAPI(billboard)
texCoords = primvars_api.CreatePrimvar("primvars:st",
Sdf.ValueTypeNames.TexCoord2fArray,
UsdGeom.Tokens.faceVarying)
texCoords.Set([(0, 0), (1, 0), (1,1), (0, 1)])
#--
material_path = f"{root_path}/map_material"
material = UsdShade.Material.Define(stage, material_path)
pbrShader = UsdShade.Shader.Define(stage, f'{material_path}/PBRShader')
pbrShader.CreateIdAttr("UsdPreviewSurface")
pbrShader.CreateInput("roughness", Sdf.ValueTypeNames.Float).Set(0.4)
pbrShader.CreateInput("metallic", Sdf.ValueTypeNames.Float).Set(0.0)
material.CreateSurfaceOutput().ConnectToSource(pbrShader.ConnectableAPI(), "surface")
#-
stReader = UsdShade.Shader.Define(stage, f'{material_path}/stReader')
stReader.CreateIdAttr('UsdPrimvarReader_float2')
diffuseTextureSampler = UsdShade.Shader.Define(stage,f'{material_path}/diffuseTexture')
diffuseTextureSampler.CreateIdAttr('UsdUVTexture')
diffuseTextureSampler.CreateInput('file', Sdf.ValueTypeNames.Asset).Set(image_url)
diffuseTextureSampler.CreateInput("st", Sdf.ValueTypeNames.Float2).ConnectToSource(stReader.ConnectableAPI(), 'result')
diffuseTextureSampler.CreateOutput('rgb', Sdf.ValueTypeNames.Float3)
pbrShader.CreateInput("diffuseColor",
Sdf.ValueTypeNames.Color3f).ConnectToSource(diffuseTextureSampler.ConnectableAPI()
, 'rgb')
#-
stInput = material.CreateInput('frame:stPrimvarName', Sdf.ValueTypeNames.Token)
stInput.Set('st')
stReader.CreateInput('varname',Sdf.ValueTypeNames.Token).ConnectToSource(stInput)
UsdShade.MaterialBindingAPI(billboard).Bind(material)
def location_to_vec3f(pos3d:Location3d)->Gf.Vec3f:
return Gf.Vec3f(pos3d.x, pos3d.y, pos3d.z)
| 4,097 |
Python
| 43.064516 | 127 | 0.642421 |
perfectproducts/floorplan_generator_lite/exts/ai.synctwin.floorplan_generator_lite/config/extension.toml
|
[package]
# Semantic Versionning is used: https://semver.org/
version = "1.3.0"
# The title and description fields are primarily for displaying extension info in UI
title = "SyncTwin floorplan generator lite"
description="a generator for floorplans"
# Path (relative to the root) or content of readme markdown file for UI.
readme = "docs/README.md"
# URL of the extension source repository.
repository = ""
# One of categories for UI.
category = "manufacturing"
# Keywords for the extension
keywords = ["kit", "manufacturing", "SyncTwin"]
# Preview image is shown in "Overview" of Extensions window. Screenshot of an extension might be a good preview image.
preview_image = "data/preview.png"
# Icon is shown in Extensions window, it is recommended to be square, of size 256x256.
icon = "data/logo.png"
# Use omni.ui to build simple UI
[dependencies]
"omni.kit.uiapp" = {}
"omni.kit.actions.core" = {}
# Main python module this extension provides
[[python.module]]
name = "ai.synctwin.floorplan_generator_lite"
[python.pipapi]
requirements = ['pdf2image']
use_online_index = true
| 1,091 |
TOML
| 27.736841 | 118 | 0.743355 |
perfectproducts/floorplan_generator_lite/exts/ai.synctwin.floorplan_generator_lite/docs/README.md
|
# floorplan generator lite
easily create floorplans from bitmaps
visit http://www.synctwin.ai
## about synctwin GmbH:
synctwin GmbH is an industrial metaverse enabler for small and middlesized enterprises.
We are a spinoff of ipolog GmbH and a member of the actano group.
| 283 |
Markdown
| 17.933332 | 87 | 0.777385 |
perfectproducts/mqtt_sample/README.md
|
# SyncTwin MQTT Example
An example to control a digital twin asset with an MQTT subscription.

see a tutorial on the code here : https://medium.com/@mtw75/how-to-control-a-digital-twin-asset-with-mqtt-in-nvidia-omniverse-92382e92e4dc
for more information visit https://www.synctwin.ai
| 351 |
Markdown
| 34.199997 | 138 | 0.783476 |
perfectproducts/mqtt_sample/tools/scripts/link_app.py
|
import argparse
import json
import os
import sys
import packmanapi
import urllib3
def find_omniverse_apps():
http = urllib3.PoolManager()
try:
r = http.request("GET", "http://127.0.0.1:33480/components")
except Exception as e:
print(f"Failed retrieving apps from an Omniverse Launcher, maybe it is not installed?\nError: {e}")
sys.exit(1)
apps = {}
for x in json.loads(r.data.decode("utf-8")):
latest = x.get("installedVersions", {}).get("latest", "")
if latest:
for s in x.get("settings", []):
if s.get("version", "") == latest:
root = s.get("launch", {}).get("root", "")
apps[x["slug"]] = (x["name"], root)
break
return apps
def create_link(src, dst):
print(f"Creating a link '{src}' -> '{dst}'")
packmanapi.link(src, dst)
APP_PRIORITIES = ["code", "create", "view"]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create folder link to Kit App installed from Omniverse Launcher")
parser.add_argument(
"--path",
help="Path to Kit App installed from Omniverse Launcher, e.g.: 'C:/Users/bob/AppData/Local/ov/pkg/create-2021.3.4'",
required=False,
)
parser.add_argument(
"--app", help="Name of Kit App installed from Omniverse Launcher, e.g.: 'code', 'create'", required=False
)
args = parser.parse_args()
path = args.path
if not path:
print("Path is not specified, looking for Omniverse Apps...")
apps = find_omniverse_apps()
if len(apps) == 0:
print(
"Can't find any Omniverse Apps. Use Omniverse Launcher to install one. 'Code' is the recommended app for developers."
)
sys.exit(0)
print("\nFound following Omniverse Apps:")
for i, slug in enumerate(apps):
name, root = apps[slug]
print(f"{i}: {name} ({slug}) at: '{root}'")
if args.app:
selected_app = args.app.lower()
if selected_app not in apps:
choices = ", ".join(apps.keys())
print(f"Passed app: '{selected_app}' is not found. Specify one of the following found Apps: {choices}")
sys.exit(0)
else:
selected_app = next((x for x in APP_PRIORITIES if x in apps), None)
if not selected_app:
selected_app = next(iter(apps))
print(f"\nSelected app: {selected_app}")
_, path = apps[selected_app]
if not os.path.exists(path):
print(f"Provided path doesn't exist: {path}")
else:
SCRIPT_ROOT = os.path.dirname(os.path.realpath(__file__))
create_link(f"{SCRIPT_ROOT}/../../app", path)
print("Success!")
| 2,814 |
Python
| 32.117647 | 133 | 0.562189 |
perfectproducts/mqtt_sample/tools/packman/config.packman.xml
|
<config remotes="cloudfront">
<remote2 name="cloudfront">
<transport actions="download" protocol="https" packageLocation="d4i3qtqj3r0z5.cloudfront.net/${name}@${version}" />
</remote2>
</config>
| 211 |
XML
| 34.333328 | 123 | 0.691943 |
perfectproducts/mqtt_sample/tools/packman/bootstrap/install_package.py
|
# Copyright 2019 NVIDIA CORPORATION
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import shutil
import sys
import tempfile
import zipfile
__author__ = "hfannar"
logging.basicConfig(level=logging.WARNING, format="%(message)s")
logger = logging.getLogger("install_package")
class TemporaryDirectory:
def __init__(self):
self.path = None
def __enter__(self):
self.path = tempfile.mkdtemp()
return self.path
def __exit__(self, type, value, traceback):
# Remove temporary data created
shutil.rmtree(self.path)
def install_package(package_src_path, package_dst_path):
with zipfile.ZipFile(package_src_path, allowZip64=True) as zip_file, TemporaryDirectory() as temp_dir:
zip_file.extractall(temp_dir)
# Recursively copy (temp_dir will be automatically cleaned up on exit)
try:
# Recursive copy is needed because both package name and version folder could be missing in
# target directory:
shutil.copytree(temp_dir, package_dst_path)
except OSError as exc:
logger.warning("Directory %s already present, packaged installation aborted" % package_dst_path)
else:
logger.info("Package successfully installed to %s" % package_dst_path)
install_package(sys.argv[1], sys.argv[2])
| 1,844 |
Python
| 33.166666 | 108 | 0.703362 |
perfectproducts/mqtt_sample/exts/ai.synctwin.mqtt_sample/ai/synctwin/mqtt_sample/extension.py
|
import omni.ext
import omni.ui as ui
from paho.mqtt import client as mqtt_client
import random
from pxr import Usd, Kind, UsdGeom, Sdf, Gf, Tf
class SyncTwinMqttSampleExtension(omni.ext.IExt):
def load_usd_model(self):
# load our forklift
print("loading model...")
self._usd_context.open_stage("http://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/DigitalTwin/Assets/Warehouse/Equipment/Forklifts/Forklift_A/Forklift_A01_PR_V_NVD_01.usd")
def on_startup(self, ext_id):
print("[ai.synctwin.mqtt_sample] ai synctwin mqtt_sample startup")
# init data
self.mqtt_topic_model = ui.SimpleStringModel("synctwin/mqtt_demo/forklift/fork_level")
self.mqtt_broker_host_model = ui.SimpleStringModel("test.mosquitto.org")
self.mqtt_broker_port_model = ui.SimpleStringModel("1883")
self.mqtt_value_model = ui.SimpleFloatModel(0)
self.mqtt_value_model.add_value_changed_fn(self.on_mqtt_value_changed)
self.mqtt_connected_model = ui.SimpleBoolModel(False)
self.target_prim_model = ui.SimpleStringModel("/World/Geometry/SM_Forklift_Fork_A01_01")
self.current_fork_level = 0
# init ui
self._usd_context = omni.usd.get_context()
self._window = ui.Window("SyncTwin MQTT Sample", width=300, height=350)
with self._window.frame:
with ui.VStack():
ui.Button("load model",clicked_fn=self.load_usd_model)
ui.Label("MQTT Broker")
with ui.HStack():
ui.StringField(self.mqtt_broker_host_model)
ui.StringField(self.mqtt_broker_port_model, width=ui.Percent(20))
ui.Label("Topic")
ui.StringField(self.mqtt_topic_model)
ui.Label("Target Prim")
ui.StringField(self.target_prim_model)
ui.Label("Value")
ui.StringField(self.mqtt_value_model)
self.status_label = ui.Label("- not connected -")
ui.Button("connect MQTT", clicked_fn=self.connect_mqtt)
# we want to know when model changes
self._sub_stage_event = self._usd_context.get_stage_event_stream().create_subscription_to_pop(
self._on_stage_event
)
# find our xf prim if model already present
self.find_xf_prim()
# and we need a callback on each frame to update our xf prim
self._app_update_sub = omni.kit.app.get_app().get_update_event_stream().create_subscription_to_pop(
self._on_app_update_event, name="synctwin.mqtt_sample._on_app_update_event"
)
# called on every frame, be careful what to put there
def _on_app_update_event(self, evt):
# if we have found the transform lets update the translation
if self.xf:
self.xf.ClearXformOpOrder()
self.xf.AddTranslateOp().Set(Gf.Vec3f(0, 0, self.current_fork_level))
# called on load
def _on_stage_event(self, event):
if event.type == int(omni.usd.StageEventType.OPENED):
print("opened new model")
self.find_xf_prim()
# our model callback
def on_mqtt_value_changed(self, model):
self.current_fork_level = model.get_value_as_float()
# find the prim to be transformed
def find_xf_prim(self):
# get prim from input
stage = self._usd_context.get_stage()
prim = stage.GetPrimAtPath(self.target_prim_model.get_value_as_string())
self.xf = UsdGeom.Xformable(prim)
if self.xf:
msg = "found xf."
else:
msg = "## xf not found."
self.status_label.text = msg
print(msg)
# connect to mqtt broker
def connect_mqtt(self):
# this is called when a message arrives
def on_message(client, userdata, msg):
msg_content = msg.payload.decode()
print(f"Received `{msg_content}` from `{msg.topic}` topic")
# userdata is self
userdata.mqtt_value_model.set_value(float(msg_content))
# called when connection to mqtt broker has been established
def on_connect(client, userdata, flags, rc):
print(f">> connected {client} {rc}")
if rc == 0:
self.status_label.text = "Connected to MQTT Broker!"
# connect to our topic
topic = userdata.mqtt_topic_model.get_value_as_string()
print(f"subscribing topic {topic}")
client.subscribe(topic)
else:
self.status_label.text = f"Failed to connect, return code {rc}"
# let us know when we've subscribed
def on_subscribe(client, userdata, mid, granted_qos):
print(f"subscribed {mid} {granted_qos}")
# now connect broker
broker = self.mqtt_broker_host_model.get_value_as_string()
port = self.mqtt_broker_port_model.get_value_as_int()
client_id = f'python-mqtt-{random.randint(0, 1000)}'
# Set Connecting Client ID
client = mqtt_client.Client(client_id)
client.user_data_set(self)
client.on_connect = on_connect
client.on_message = on_message
client.on_subscribe = on_subscribe
client.connect(broker, port)
client.loop_start()
return client
def on_shutdown(self):
print("[ai.synctwin.mqtt_sample] shutdown")
self.client = None
self._app_update_sub = None
| 5,796 |
Python
| 36.888889 | 206 | 0.579365 |
perfectproducts/mqtt_sample/exts/ai.synctwin.mqtt_sample/ai/synctwin/mqtt_sample/__init__.py
|
from .extension import *
| 25 |
Python
| 11.999994 | 24 | 0.76 |
perfectproducts/mqtt_sample/exts/ai.synctwin.mqtt_sample/ai/synctwin/mqtt_sample/tests/__init__.py
|
from .test_hello_world import *
| 31 |
Python
| 30.999969 | 31 | 0.774194 |
perfectproducts/mqtt_sample/exts/ai.synctwin.mqtt_sample/ai/synctwin/mqtt_sample/tests/test_hello_world.py
|
# NOTE:
# omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests
# For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html
import omni.kit.test
# Extnsion for writing UI tests (simulate UI interaction)
import omni.kit.ui_test as ui_test
# Import extension python module we are testing with absolute import path, as if we are external user (other extension)
import ai.synctwin.mqtt_sample
# Having a test class dervived from omni.kit.test.AsyncTestCase declared on the root of module will make it auto-discoverable by omni.kit.test
class Test(omni.kit.test.AsyncTestCase):
# Before running each test
async def setUp(self):
pass
# After running each test
async def tearDown(self):
pass
# Actual test, notice it is "async" function, so "await" can be used if needed
async def test_hello_public_function(self):
result = ai.synctwin.mqtt_sample.some_public_function(4)
self.assertEqual(result, 256)
async def test_window_button(self):
# Find a label in our window
label = ui_test.find("My Window//Frame/**/Label[*]")
# Find buttons in our window
add_button = ui_test.find("My Window//Frame/**/Button[*].text=='Add'")
reset_button = ui_test.find("My Window//Frame/**/Button[*].text=='Reset'")
# Click reset button
await reset_button.click()
self.assertEqual(label.widget.text, "empty")
await add_button.click()
self.assertEqual(label.widget.text, "count: 1")
await add_button.click()
self.assertEqual(label.widget.text, "count: 2")
| 1,682 |
Python
| 34.80851 | 142 | 0.682521 |
perfectproducts/mqtt_sample/exts/ai.synctwin.mqtt_sample/config/extension.toml
|
[package]
# Semantic Versioning is used: https://semver.org/
version = "1.0.0"
# Lists people or organizations that are considered the "authors" of the package.
authors = ["SyncTwin"]
# The title and description fields are primarily for displaying extension info in UI
title = "SyncTwin MQTT Sample"
description="a simple example to update USD elements with MQTT"
# Path (relative to the root) or content of readme markdown file for UI.
readme = "docs/README.md"
# URL of the extension source repository.
repository = ""
# One of categories for UI.
category = "Example"
# Keywords for the extension
keywords = ["kit", "example", "synctwin", "MQTT"]
# Location of change log file in target (final) folder of extension, relative to the root.
# More info on writing changelog: https://keepachangelog.com/en/1.0.0/
changelog="docs/CHANGELOG.md"
# Preview image and icon. Folder named "data" automatically goes in git lfs (see .gitattributes file).
# Preview image is shown in "Overview" of Extensions window. Screenshot of an extension might be a good preview image.
preview_image = "data/preview.png"
# Icon is shown in Extensions window, it is recommended to be square, of size 256x256.
icon = "data/icon.png"
# Use omni.ui to build simple UI
[dependencies]
"omni.kit.uiapp" = {}
# Main python module this extension provides, it will be publicly available as "import ai.synctwin.mqtt_sample".
[[python.module]]
name = "ai.synctwin.mqtt_sample"
[python.pipapi]
requirements = ['paho-mqtt']
use_online_index = true
[[test]]
# Extra dependencies only to be used during test run
dependencies = [
"omni.kit.ui_test" # UI testing extension
]
| 1,656 |
TOML
| 30.865384 | 118 | 0.739734 |
perfectproducts/mqtt_sample/exts/ai.synctwin.mqtt_sample/docs/CHANGELOG.md
|
# Changelog
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
## [1.0.0] - 2021-04-26
- Initial version of extension UI template with a window
| 178 |
Markdown
| 18.888887 | 80 | 0.702247 |
perfectproducts/mqtt_sample/exts/ai.synctwin.mqtt_sample/docs/README.md
|
# SyncTwin MQTT Example [ai.synctwin.mqtt_sample]
This is an example to control a USD asset with an MQTT live connection
for more info go to https://www.synctwin.ai
| 170 |
Markdown
| 23.428568 | 71 | 0.758824 |
perfectproducts/mqtt_sample/exts/ai.synctwin.mqtt_sample/docs/index.rst
|
ai.synctwin.mqtt_sample
#############################
Example of Python only extension
.. toctree::
:maxdepth: 1
README
CHANGELOG
.. automodule::"ai.synctwin.mqtt_sample"
:platform: Windows-x86_64, Linux-x86_64
:members:
:undoc-members:
:show-inheritance:
:imported-members:
:exclude-members: contextmanager
| 347 |
reStructuredText
| 15.571428 | 43 | 0.622478 |
gitLSW/robot-cloud/training/pack_task.py
|
import os
import math
import torch
from gymnasium import spaces
from omni.isaac.core.utils.extensions import enable_extension
enable_extension("omni.isaac.universal_robots")
# enable_extension("omni.isaac.sensor")
from omni.isaac.core.utils.nucleus import get_assets_root_path
# from omni.isaac.core.utils.prims import create_prim, get_prim_at_path
from omni.isaac.core.utils.stage import add_reference_to_stage
from omni.physx.scripts.utils import setRigidBody, setStaticCollider #, setColliderSubtree, setCollider, addCollisionGroup, setPhysics, removePhysics, removeRigidBody
from omni.isaac.universal_robots.ur10 import UR10
from omni.isaac.core.prims import XFormPrim, XFormPrimView, RigidPrim, RigidPrimView
from omni.isaac.core.robots.robot_view import RobotView
# from omni.isaac.core.materials.physics_material import PhysicsMaterial
from omniisaacgymenvs.rl_task import RLTask
from scipy.spatial.transform import Rotation as R
from pyquaternion import Quaternion
FALLEN_PART_THRESHOLD = 0.2
ROBOT_POS = torch.tensor([0.0, 0.0, FALLEN_PART_THRESHOLD])
DEST_BOX_POS = torch.tensor([0, -0.65, FALLEN_PART_THRESHOLD])
START_TABLE_POS = torch.tensor([0, 0.8, FALLEN_PART_THRESHOLD])
START_TABLE_HEIGHT = 0.6
START_TABLE_CENTER = START_TABLE_POS + torch.tensor([0, 0, START_TABLE_HEIGHT / 2])
IDEAL_PACKAGING = [([-0.06, -0.19984, 0.0803], [0.072, 0.99, 0, 0]),
([-0.06, -0.14044, 0.0803], [0.072, 0.99, 0, 0]),
([-0.06, -0.07827, 0.0803], [0.072, 0.99, 0, 0]),
([-0.06, -0.01597, 0.0803], [0.072, 0.99, 0, 0]),
([-0.06, 0.04664, 0.0803], [0.072, 0.99, 0, 0]),
([-0.06, 0.10918, 0.0803], [0.072, 0.99, 0, 0])]
NUMBER_PARTS = len(IDEAL_PACKAGING)
local_assets = os.getcwd() + '/assets'
TASK_CFG = {
"test": False,
"device_id": 0,
"headless": False,
"multi_gpu": False,
"sim_device": "gpu",
"enable_livestream": False,
"task": {
"name": 'Pack_Task',
# "physics_engine": "physx",
"env": {
"numEnvs": 625,
"envSpacing": 4,
"episodeLength": 300,
# "enableDebugVis": False,
# "controlFrequencyInv": 4
},
"sim": {
"dt": 1.0 / 60.0,
"gravity": [0.0, 0.0, -9.81],
"substeps": 1,
"use_gpu_pipeline": False, # Must be off for gripper to work
"add_ground_plane": True,
"add_distant_light": True,
"use_fabric": True,
"enable_scene_query_support": True, # Must be on for gripper to work
"enable_cameras": False,
"disable_contact_processing": False, # Must be off for gripper to work
"use_flatcache": True,
"default_physics_material": {
"static_friction": 1.0,
"dynamic_friction": 1.0,
"restitution": 0.0
},
"physx": {
### Per-scene settings
"use_gpu": True,
"worker_thread_count": 4,
"solver_type": 1, # 0: PGS, 1:TGS
"bounce_threshold_velocity": 0.2,
"friction_offset_threshold": 0.04, # A threshold of contact separation distance used to decide if a contact
# point will experience friction forces.
"friction_correlation_distance": 0.025, # Contact points can be merged into a single friction anchor if the
# distance between the contacts is smaller than correlation distance.
# disabling these can be useful for debugging
"enable_sleeping": True,
"enable_stabilization": True,
# GPU buffers
"gpu_max_rigid_contact_count": 512 * 1024,
"gpu_max_rigid_patch_count": 80 * 1024,
"gpu_found_lost_pairs_capacity": 1024,
"gpu_found_lost_aggregate_pairs_capacity": 1024,
"gpu_total_aggregate_pairs_capacity": 1024,
"gpu_max_soft_body_contacts": 1024 * 1024,
"gpu_max_particle_contacts": 1024 * 1024,
"gpu_heap_capacity": 64 * 1024 * 1024,
"gpu_temp_buffer_capacity": 16 * 1024 * 1024,
"gpu_max_num_partitions": 8,
"gpu_collision_stack_size": 64 * 1024 * 1024,
### Per-actor settings ( can override in actor_options )
"solver_position_iteration_count": 4,
"solver_velocity_iteration_count": 1,
"sleep_threshold": 0.0, # Mass-normalized kinetic energy threshold below which an actor may go to sleep.
# Allowed range [0, max_float).
"stabilization_threshold": 0.0, # Mass-normalized kinetic energy threshold below which an actor may
# participate in stabilization. Allowed range [0, max_float).
### Per-body settings ( can override in actor_options )
"enable_gyroscopic_forces": False,
"density": 1000.0, # density to be used for bodies that do not specify mass or density
"max_depenetration_velocity": 100.0,
### Per-shape settings ( can override in actor_options )
"contact_offset": 0.02,
"rest_offset": 0.001,
}
}
}
}
class PackTask(RLTask):
control_frequency_inv = 1
# kinematics_solver = None
"""
This class sets up a scene and calls a RL Policy, then evaluates the behaivior with rewards
Args:
offset (Optional[np.ndarray], optional): offset applied to all assets of the task.
sim_s_step_freq (int): The amount of simulation steps within a SIMULATED second.
"""
def __init__(self, name, sim_config, env, offset=None) -> None:
# self.observation_space = spaces.Dict({
# 'robot_state': spaces.Box(low=-2 * torch.pi, high=2 * torch.pi, shape=(6,)),
# 'gripper_closed': spaces.Discrete(2),
# # 'forces': spaces.Box(low=-1, high=1, shape=(8, 6)), # Forces on the Joints
# 'box_state': spaces.Box(low=-3, high=3, shape=(NUMBER_PARTS, 2)), # Pos and Rot Distance of each part currently placed in Box compared to currently gripped part
# 'part_pos_diff': spaces.Box(low=-3, high=3, shape=(3,)),
# # 'part_rot_diff': spaces.Box(low=-1, high=1, shape=(3,))
# })
self._num_observations = 10 + 2 * NUMBER_PARTS
self.observation_space = spaces.Dict({
'obs': spaces.Box(low=-math.pi, high=math.pi, shape=(self._num_observations,), dtype=float)
})
# End Effector Pose
# self.action_space = spaces.Box(low=-1, high=1, shape=(7,), dtype=float) # Delta Gripper Pose & gripper open / close
self._num_actions = 7
self.action_space = spaces.Box(low=-1, high=1, shape=(self._num_actions,), dtype=float)
self.update_config(sim_config)
# trigger __init__ of parent class
super().__init__(name, env, offset)
def cleanup(self):
super().cleanup()
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self.dt = self._task_cfg["sim"]["dt"]
self._device = self._cfg["sim_device"]
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._max_episode_length = self._task_cfg["env"]["episodeLength"]
# Robot turning ange of max speed is 191deg/s
self._max_joint_rot_speed = torch.scalar_tensor((191.0 * torch.pi / 180) * self.dt).to(self._device)
super().update_config(sim_config)
def set_up_scene(self, scene) -> None:
print('SETUP TASK', self.name)
self.create_env0()
super().set_up_scene(scene=scene, replicate_physics=True, filter_collisions=True, copy_from_source=False) # Clones env0
self._boxes_view = XFormPrimView(prim_paths_expr=f'{self.default_base_env_path}/.*/box',
name='box_view',
reset_xform_properties=False)
scene.add(self._boxes_view)
self._parts_views = []
for i in range(NUMBER_PARTS):
parts_view = RigidPrimView(prim_paths_expr=f'{self.default_base_env_path}/.*/part_{i}',
name=f'part_{i}_view',
reset_xform_properties=False)
scene.add(parts_view)
self._parts_views.append(parts_view)
self._robots_view = RobotView(prim_paths_expr=f'{self.default_base_env_path}/.*/robot', name='ur10_view')
scene.add(self._robots_view)
self._table_view = XFormPrimView(prim_paths_expr=f'{self.default_base_env_path}/.*/table',
name='table_view',
reset_xform_properties=False)
scene.add(self._table_view)
self._robots = [UR10(prim_path=robot_path, attach_gripper=True) for robot_path in self._robots_view.prim_paths]
def create_env0(self):
# This is the URL from which the Assets are downloaded
# Make sure you started and connected to your localhost Nucleus Server via Omniverse !!!
# assets_root_path = get_assets_root_path()
env0_box_path = self.default_zero_env_path + '/box'
# box_usd_path = assets_root_path + '/Isaac/Environments/Simple_Warehouse/Props/SM_CardBoxA_02.usd'
box_usd_path = local_assets + '/SM_CardBoxA_02.usd'
add_reference_to_stage(box_usd_path, env0_box_path)
box = XFormPrim(prim_path=env0_box_path,
position=DEST_BOX_POS,
scale=[1, 1, 0.4])
setStaticCollider(box.prim, approximationShape='convexDecomposition')
for i in range(NUMBER_PARTS):
env0_part_path = f'{self.default_zero_env_path}/part_{i}'
part_usd_path = local_assets + '/draexlmaier_part.usd'
add_reference_to_stage(part_usd_path, env0_part_path)
part = RigidPrim(prim_path=env0_part_path,
position=START_TABLE_CENTER + torch.tensor([0, 0.1 * i - 0.23, 0.04]),
orientation=[0, 1, 0, 0], # [-0.70711, 0.70711, 0, 0]
mass=0.4)
setRigidBody(part.prim, approximationShape='convexDecomposition', kinematic=False) # Kinematic True means immovable
# The UR10e has 6 joints, each with a maximum:
# turning angle of -360 deg to +360 deg
# turning ange of max speed is 191deg/s
# gripper_usd = assets_root_path + "/Isaac/Robots/UR10/Props/long_gripper.usd"
env0_robot_path = self.default_zero_env_path + '/robot'
robot = UR10(prim_path=env0_robot_path,
name='UR10',
# usd_path=f'{local_assets}/ur10.usd',
# gripper_usd=gripper_usd,
position=ROBOT_POS,
attach_gripper=True)
# robot.set_enabled_self_collisions(True)
env0_table_path = f'{self.default_zero_env_path}/table'
# table_path = assets_root_path + "/Isaac/Environments/Simple_Room/Props/table_low.usd"
table_path = local_assets + '/table_low.usd'
add_reference_to_stage(table_path, env0_table_path)
table = XFormPrim(prim_path=env0_table_path,
position=START_TABLE_POS,
scale=[0.5, START_TABLE_HEIGHT, 0.4])
setStaticCollider(table.prim, approximationShape='convexDecomposition')
def reset(self):
super().reset()
super().cleanup()
for robot in self._robots:
# if not robot.handles_initialized:
robot.initialize()
indices = torch.arange(self._num_envs, dtype=torch.int64).to(self._device)
self.reset_envs(indices)
def reset_envs(self, env_indices):
self.progress_buf[env_indices] = 0
self.reset_buf[env_indices] = False
self.reset_robots(env_indices)
self.reset_parts(env_indices)
def reset_robots(self, env_indices):
default_pose = torch.tensor([-math.pi / 2, -math.pi / 2, -math.pi / 2, -math.pi / 2, math.pi / 2, 0])
self._robots_view.set_joint_positions(default_pose, indices=env_indices)
def reset_parts(self, env_indices):
table_pos = self._table_view.get_world_poses(indices=env_indices)[0].to(self._device)
default_rots = torch.tensor([0, 1, 0, 0]).repeat(len(env_indices), 1)
for i in range(NUMBER_PARTS):
parts_offsets = torch.tensor([0, 0.1 * i - 0.23, START_TABLE_HEIGHT / 2 + 0.04]).repeat(len(env_indices), 1).to(self._device)
self._parts_views[i].set_world_poses(positions=table_pos + parts_offsets,
orientations=default_rots,
indices=env_indices)
# _placed_parts # [[part]] where each entry in the outer array is the placed parts for env at index
# Returns: A 2D Array where each entry is the poses of the parts in the box
def get_observations(self):
def _shortest_rot_dist(quat_1, quat_2):
part_quat = Quaternion(list(quat_1))
ideal_quat = Quaternion(list(quat_2))
return Quaternion.absolute_distance(part_quat, ideal_quat)
boxes_pos = self._boxes_view.get_world_poses()[0] # Returns: [Array of all pos, Array of all rots]
robots_states = self._robots_view.get_joint_positions()
self.obs_buf[:, 1:7] = robots_states
parts_pos = []
parts_rots = []
for parts_view in self._parts_views:
curr_parts_pos, curr_parts_rots = parts_view.get_world_poses()
curr_parts_pos -= boxes_pos
parts_pos.append(curr_parts_pos)
parts_rots.append(curr_parts_rots)
parts_pos = torch.stack(parts_pos).transpose(0, 1) # Stacks and transposes the array
parts_rots = torch.stack(parts_rots).transpose(0, 1)
for env_index in range(self._num_envs):
gripper = self._robots[env_index].gripper
gripper_closed = gripper.is_closed()
self.obs_buf[env_index, 0] = gripper_closed
ideal_selection = IDEAL_PACKAGING.copy()
gripper_pos = self._robots[env_index].gripper.get_world_pose()[0]
gripper_pos -= boxes_pos[env_index]
gripper_to_closest_part_dist = 10000000
gripper_to_closest_part_dir = None
gripper_to_ideal_part_dist = 10000000
gripper_to_ideal_part_dir = None
for part_index in range(NUMBER_PARTS):
part_pos = parts_pos[env_index][part_index]
part_rot = parts_rots[env_index][part_index]
part_to_box_dist = torch.linalg.norm(part_pos)
if 0.3 < part_to_box_dist: # Only parts that are not packed can be potential gripped parts
gripper_to_part = part_pos - gripper_pos
gripper_part_dist = torch.linalg.norm(gripper_to_part)
if gripper_part_dist < gripper_to_closest_part_dist:
gripper_to_closest_part_dist = gripper_part_dist
gripper_to_closest_part_dir = gripper_to_part
ideal_part = None
min_dist = 10000000
# Find closest ideal part
for ideal_part in ideal_selection:
ideal_part_pos = torch.tensor(ideal_part[0]).to(self._device)
dist = torch.linalg.norm(ideal_part_pos - part_pos)
if dist < min_dist:
ideal_part = ideal_part
min_dist = dist
if part_index == 0: # Only one check is needed
gripper_to_ideal_part = ideal_part_pos - gripper_pos
gripper_ideal_dist = torch.linalg.norm(gripper_to_ideal_part)
if gripper_ideal_dist < gripper_to_ideal_part_dist:
gripper_to_ideal_part_dist = gripper_ideal_dist
gripper_to_ideal_part_dir = gripper_to_ideal_part
rot_dist = _shortest_rot_dist(part_rot, ideal_part[1])
# Clip obs
min_dist = min(min_dist, 3)
rot_dist = min(rot_dist, torch.pi)
# Record obs
self.obs_buf[env_index, (10 + part_index)] = min_dist
self.obs_buf[env_index, (10 + NUMBER_PARTS + part_index)] = rot_dist
# Point to target
self.obs_buf[env_index, 7:10] = gripper_to_ideal_part_dir if gripper_to_closest_part_dist < 0.05 and gripper_closed else gripper_to_closest_part_dir
def pre_physics_step(self, actions) -> None:
reset_env_indices = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if 0 < len(reset_env_indices):
self.reset_envs(reset_env_indices)
# Rotate Joints
joint_rots = self._robots_view.get_joint_positions()
joint_rots += torch.tensor(actions[:, 0:6]).to(self._device) * self._max_joint_rot_speed
self._robots_view.set_joint_positions(positions=joint_rots)
# Open or close Gripper
for env_index in range(self._num_envs):
gripper = self._robots[env_index].gripper
is_closed = gripper.is_closed()
gripper_action = actions[env_index, 6]
if 0.9 < gripper_action and is_closed:
gripper.open()
elif gripper_action < -0.3 and not is_closed:
gripper.close()
# Calculate Rewards
def calculate_metrics(self) -> None:
targets_dirs = self.obs_buf[:, 7:10]
targets_dists = torch.linalg.norm(targets_dirs, dim=1)
# part_rot_diffs = self.obs_buf[:, 10:13]
ideal_pos_dists = self.obs_buf[:, 10:(10 + NUMBER_PARTS)]
ideal_rot_dists = self.obs_buf[:, (10 + NUMBER_PARTS):(10 + 2 * NUMBER_PARTS)]
box_error_sum = ideal_pos_dists.square().sum(dim=1) + ideal_rot_dists.abs().sum(dim=1)
self.rew_buf = -targets_dists.square() - box_error_sum
def is_done(self):
# any_flipped = False
self.reset_buf.fill_(0)
for parts_view in self._parts_views:
parts_pos = parts_view.get_world_poses()[0]
# Check if part has fallen
self.reset_buf += (parts_pos[:, 2] < FALLEN_PART_THRESHOLD)
# if _is_flipped(part_rot):
# any_flipped = True
# break
self.reset_buf += (self._max_episode_length - 1 <= self.progress_buf)
self.reset_buf = self.reset_buf >= 1 # Cast to bool
# def _is_flipped(q1):
# """
# Bestimmt, ob die Rotation von q0 zu q1 ein "Umfallen" darstellt,
# basierend auf einem Winkel größer als 60 Grad zwischen der ursprünglichen
# z-Achse und ihrer Rotation.
# :param q0: Ursprüngliches Quaternion.
# :param q1: Neues Quaternion.
# :return: True, wenn der Winkel größer als 60 Grad ist, sonst False.
# """
# q0 = torch.tensor([0, 1, 0, 0])
# # Initialer Vektor, parallel zur z-Achse
# v0 = torch.tensor([0, 0, 1])
# # Konvertiere Quaternions in Rotation-Objekte
# rotation0 = R.from_quat(q0)
# rotation1 = R.from_quat(q1)
# # Berechne die relative Rotation von q0 zu q1
# q_rel = rotation1 * rotation0.inv()
# # Berechne den rotierten Vektor v1
# v1 = q_rel.apply(v0)
# # Berechne den Winkel zwischen v0 und v1
# cos_theta = np.dot(v0, v1) / (np.linalg.norm(v0) * np.linalg.norm(v1))
# angle = np.arccos(np.clip(cos_theta, -1.0, 1.0)) * 180 / np.pi
# # Prüfe, ob der Winkel größer als 60 Grad ist
# return angle > 60
| 20,134 |
Python
| 44.657596 | 174 | 0.572862 |
gitLSW/robot-cloud/training/train_ppo.py
|
import threading
import time
import torch
import torch.nn as nn
# Import the skrl components to build the RL system
from skrl.utils import set_seed
from skrl.models.torch import Model, GaussianMixin, DeterministicMixin
from skrl.memories.torch import RandomMemory
from skrl.agents.torch.ppo import PPO, PPO_DEFAULT_CONFIG
from skrl.resources.schedulers.torch import KLAdaptiveRL
from skrl.resources.preprocessors.torch import RunningStandardScaler
from skrl.trainers.torch import ParallelTrainer, SequentialTrainer
from skrl.envs.wrappers.torch.omniverse_isaacgym_envs import OmniverseIsaacGymWrapper
from omniisaacgymenvs.isaac_gym_env_utils import get_env_instance
# Seed for reproducibility
seed = set_seed() # e.g. `set_seed(42)` for fixed seed
# Define the models (stochastic and deterministic models) for the agent using helper mixin.
# - Policy: takes as input the environment's observation/state and returns an action
# - Value: takes the state as input and provides a value to guide the policy
class Policy(GaussianMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False,
clip_log_std=True, min_log_std=-20, max_log_std=2):
Model.__init__(self, observation_space, action_space, device)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std)
self.net = nn.Sequential(nn.Linear(self.num_observations, 256),
nn.ELU(),
nn.Linear(256, 128),
nn.ELU(),
nn.Linear(128, 64),
nn.ELU(),
nn.Linear(64, self.num_actions))
self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))
def compute(self, inputs, role):
return self.net(inputs["states"]), self.log_std_parameter, {}
class Value(DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False):
Model.__init__(self, observation_space, action_space, device)
DeterministicMixin.__init__(self, clip_actions)
self.net = nn.Sequential(nn.Linear(self.num_observations, 256),
nn.ELU(),
nn.Linear(256, 128),
nn.ELU(),
nn.Linear(128, 64),
nn.ELU(),
nn.Linear(64, 1))
def compute(self, inputs, role):
return self.net(inputs["states"]), {}
headless = True # set headless to False for rendering
multi_threaded = headless
env = get_env_instance(headless=headless, multi_threaded=multi_threaded) # Multithreaded doesn't work with UI open
from omniisaacgymenvs.sim_config import SimConfig, merge
from franka_reach_task import ReachingFrankaTask, TASK_CFG
TASK_CFG["seed"] = seed
TASK_CFG["headless"] = headless
TASK_CFG["task"]["env"]["numEnvs"] = 50_000 if headless else 25
TASK_CFG["task"]["env"]["controlSpace"] = "joint" # "joint" or "cartesian"
sim_config = SimConfig(TASK_CFG)
task = ReachingFrankaTask(name="ReachingFranka", sim_config=sim_config, env=env)
env.set_task(task=task, sim_params=sim_config.get_physics_params(), backend="torch", init_sim=True, rendering_dt=TASK_CFG['task']['sim']['dt'])
# task.reset()
if multi_threaded:
env.initialize(action_queue=env.action_queue, data_queue=env.data_queue, timeout=5)
# wrap the environment
env = OmniverseIsaacGymWrapper(env)
device = env.device
# Instantiate a RandomMemory as rollout buffer (any memory can be used for this)
memory = RandomMemory(memory_size=16, num_envs=env.num_envs, device=device)
# Instantiate the agent's models (function approximators).
# PPO requires 2 models, visit its documentation for more details
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html#spaces-and-models
models_ppo = {}
models_ppo["policy"] = Policy(env.observation_space, env.action_space, device)
models_ppo["value"] = Value(env.observation_space, env.action_space, device)
# Configure and instantiate the agent.
# Only modify some of the default configuration, visit its documentation to see all the options
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html#configuration-and-hyperparameters
cfg_ppo = PPO_DEFAULT_CONFIG.copy()
cfg_ppo["rollouts"] = 16
cfg_ppo["learning_epochs"] = 8
cfg_ppo["mini_batches"] = 8
cfg_ppo["discount_factor"] = 0.99
cfg_ppo["lambda"] = 0.95
cfg_ppo["learning_rate"] = 5e-4
cfg_ppo["learning_rate_scheduler"] = KLAdaptiveRL
cfg_ppo["learning_rate_scheduler_kwargs"] = {"kl_threshold": 0.008}
cfg_ppo["random_timesteps"] = 0
cfg_ppo["learning_starts"] = 0
cfg_ppo["grad_norm_clip"] = 1.0
cfg_ppo["ratio_clip"] = 0.2
cfg_ppo["value_clip"] = 0.2
cfg_ppo["clip_predicted_values"] = True
cfg_ppo["entropy_loss_scale"] = 0.0
cfg_ppo["value_loss_scale"] = 2.0
cfg_ppo["kl_threshold"] = 0
cfg_ppo["state_preprocessor"] = RunningStandardScaler
cfg_ppo["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device}
cfg_ppo["value_preprocessor"] = RunningStandardScaler
cfg_ppo["value_preprocessor_kwargs"] = {"size": 1, "device": device}
# logging to TensorBoard and write checkpoints each 32 and 250 timesteps respectively
cfg_ppo["experiment"]["write_interval"] = 32
cfg_ppo["experiment"]["checkpoint_interval"] = 250
agent = PPO(models=models_ppo,
memory=memory,
cfg=cfg_ppo,
observation_space=env.observation_space,
action_space=env.action_space,
device=device)
# agent.load("./runs/24-02-23_10-38-35-900063_PPO/checkpoints/best_agent.pt")
# Configure and instantiate the RL trainer
cfg_trainer = {"timesteps": 50_000_000 // TASK_CFG["task"]["env"]["numEnvs"], "headless": headless}
trainer = ParallelTrainer(cfg=cfg_trainer, env=env, agents=agent)
if multi_threaded:
# start training in a separate thread
threading.Thread(target=trainer.train).start()
env.run(trainer=None) # The TraimerMT can be None, cause it is only used to stop the Sim
else:
trainer.train()
| 6,165 |
Python
| 42.422535 | 143 | 0.68159 |
gitLSW/robot-cloud/training/train_ddpg.py
|
import os
import threading
import torch
import torch.nn as nn
import wandb
# import the skrl components to build the RL system
from skrl.utils import set_seed
from skrl.agents.torch.ddpg import DDPG, DDPG_DEFAULT_CONFIG
from skrl.memories.torch import RandomMemory
from skrl.models.torch import DeterministicMixin, Model
from skrl.resources.noises.torch import OrnsteinUhlenbeckNoise
from skrl.resources.preprocessors.torch import RunningStandardScaler
from skrl.trainers.torch import ParallelTrainer
from skrl.envs.wrappers.torch import OmniverseIsaacGymWrapper
from omniisaacgymenvs.isaac_gym_env_utils import get_env_instance
name = 'DDPG_Pack'
# seed for reproducibility
seed = set_seed()
# define models (deterministic models) using mixins
class DeterministicActor(DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False):
Model.__init__(self, observation_space, action_space, device)
DeterministicMixin.__init__(self, clip_actions)
self.net = nn.Sequential(nn.Linear(self.num_observations, 512),
nn.ReLU(),
nn.Linear(512, 1024),
nn.ReLU6(),
nn.Linear(1024, 512),
nn.Tanh(),
nn.Linear(512, 256),
nn.Tanh(),
nn.Linear(256, self.num_actions))
def compute(self, inputs, role):
return self.net(inputs["states"]), {}
class Critic(DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False):
Model.__init__(self, observation_space, action_space, device)
DeterministicMixin.__init__(self, clip_actions)
self.net = nn.Sequential(nn.Linear(self.num_observations + self.num_actions, 256),
nn.ReLU(),
nn.Linear(256, 512),
nn.ReLU6(),
nn.Linear(512, 512),
nn.Tanh(),
nn.Linear(512, 256),
nn.Tanh(),
nn.Linear(256, 1))
def compute(self, inputs, role):
return self.net(torch.cat([inputs["states"], inputs["taken_actions"]], dim=1)), {}
# Load the Isaac Gym environment
headless = True # set headless to False for rendering
multi_threaded = headless
env = get_env_instance(headless=headless,
multi_threaded=multi_threaded, # Multithreaded doesn't work with UI open
experience=f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit')
from omniisaacgymenvs.sim_config import SimConfig, merge
from pack_task_part_in_gripper import PackTask as Task, TASK_CFG
TASK_CFG['name'] = name
TASK_CFG["seed"] = seed
TASK_CFG["headless"] = headless
if not headless:
TASK_CFG["task"]["env"]["numEnvs"] = 25
sim_config = SimConfig(TASK_CFG)
task = Task(name=name, sim_config=sim_config, env=env)
env.set_task(task=task, sim_params=sim_config.get_physics_params(), backend="torch", init_sim=True, rendering_dt=TASK_CFG['task']['sim']['dt'])
if multi_threaded:
env.initialize(action_queue=env.action_queue, data_queue=env.data_queue, timeout=30)
# wrap the environment
env = OmniverseIsaacGymWrapper(env)
device = env.device
# instantiate the agent's models (function approximators).
# DDPG requires 4 models, visit its documentation for more details
# https://skrl.readthedocs.io/en/latest/api/agents/ddpg.html#models
models = {
'policy': DeterministicActor(env.observation_space, env.action_space, device),
'target_policy': DeterministicActor(env.observation_space, env.action_space, device),
'critic': Critic(env.observation_space, env.action_space, device),
'target_critic': Critic(env.observation_space, env.action_space, device),
}
num_envs = TASK_CFG["task"]["env"]["numEnvs"]
# configure and instantiate the agent (visit its documentation to see all the options)
# https://skrl.readthedocs.io/en/latest/api/agents/ddpg.html#configuration-and-hyperparameters
ddpg_cfg = DDPG_DEFAULT_CONFIG.copy()
ddpg_cfg = merge({
"exploration": {
"noise": OrnsteinUhlenbeckNoise(theta=0.15, sigma=0.1, base_scale=0.5, device=device)
},
"gradient_steps": 1,
"batch_size": 4096,
"discount_factor": 0.99,
"polyak": 0.005,
"actor_learning_rate": 5e-4,
"critic_learning_rate": 5e-4,
"random_timesteps": 80,
"learning_starts": 80,
"state_preprocessor": RunningStandardScaler,
"state_preprocessor_kwargs": {
"size": env.observation_space,
"device": device
},
"experiment": {
"directory": "progress", # experiment's parent directory
"experiment_name": name, # experiment name
"write_interval": 200, # TensorBoard writing interval (iterations)
"checkpoint_interval": 1000, # interval for checkpoints (iterations)
"store_separately": False, # whether to store checkpoints separately
"wandb": True, # whether to use Weights & Biases
"wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init)
}
}, ddpg_cfg)
run = wandb.init(
project=name,
config=ddpg_cfg,
sync_tensorboard=True, # auto-upload sb3's tensorboard metrics
# monitor_gym=True, # auto-upload the videos of agents playing the game
# save_code=True, # optional
)
# instantiate a memory as experience replay
memory = RandomMemory(memory_size=100_000, num_envs=num_envs, device=device)
agent = DDPG(models=models,
memory=memory,
cfg=ddpg_cfg,
observation_space=env.observation_space,
action_space=env.action_space,
device=device)
try:
agent.load("./progress/DDPG_Pack/checkpoints/best_agent.pt")
except FileNotFoundError:
print('Cloud not load agent. Created new Agent !')
# Configure and instantiate the RL trainer
cfg_trainer = {"timesteps": 50_000_000 // num_envs, "headless": headless}
trainer = ParallelTrainer(cfg=cfg_trainer, env=env, agents=agent)
if multi_threaded:
# start training in a separate thread
threading.Thread(target=trainer.train).start()
env.run(trainer=None) # The TraimerMT can be None, cause it is only used to stop the Sim
else:
trainer.train()
| 6,451 |
Python
| 38.341463 | 143 | 0.647652 |
gitLSW/robot-cloud/training/pack_task_part_in_gripper.py
|
import os
import math
import torch
from gymnasium import spaces
from omni.isaac.core.utils.extensions import enable_extension
enable_extension("omni.isaac.universal_robots")
# enable_extension("omni.isaac.sensor")
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.prims import delete_prim, get_prim_object_type #, create_prim, get_prim_at_path
from omni.isaac.core.utils.stage import add_reference_to_stage
from omni.physx.scripts.utils import setRigidBody, setStaticCollider #, setColliderSubtree, setCollider, addCollisionGroup, setPhysics, removePhysics, removeRigidBody
from omni.isaac.universal_robots.ur10 import UR10
from omni.isaac.core.prims import XFormPrim, XFormPrimView, RigidPrim, RigidPrimView
from omni.isaac.core.robots.robot_view import RobotView
# from omni.isaac.core.materials.physics_material import PhysicsMaterial
from omniisaacgymenvs.rl_task import RLTask
from scipy.spatial.transform import Rotation as R
from pyquaternion import Quaternion
FALLEN_PART_THRESHOLD = 0.2
ROBOT_POS = torch.tensor([0.0, 0.0, FALLEN_PART_THRESHOLD])
LIGHT_OFFSET = torch.tensor([0, 0, 2])
DEST_BOX_POS = torch.tensor([0, -0.65, FALLEN_PART_THRESHOLD])
IDEAL_PACKAGING = [([-0.06, -0.19984, 0.0803], [0.072, 0.99, 0, 0]),
([-0.06, -0.14044, 0.0803], [0.072, 0.99, 0, 0]),
([-0.06, -0.07827, 0.0803], [0.072, 0.99, 0, 0]),
([-0.06, -0.01597, 0.0803], [0.072, 0.99, 0, 0]),
([-0.06, 0.04664, 0.0803], [0.072, 0.99, 0, 0]),
([-0.06, 0.10918, 0.0803], [0.072, 0.99, 0, 0])]
NUMBER_PARTS = len(IDEAL_PACKAGING)
local_assets = os.getcwd() + '/assets'
TASK_CFG = {
"test": False,
"device_id": 0,
"headless": False,
"multi_gpu": False,
"sim_device": "gpu",
"enable_livestream": False,
"task": {
"name": 'Pack_Task',
# "physics_engine": "physx",
"env": {
"numEnvs": 100,
"envSpacing": 4,
"episodeLength": 400, # The episode length is the max time for one part to be packed, not the whole box
# "enableDebugVis": False,
# "controlFrequencyInv": 4
},
"sim": {
"dt": 1.0 / 60.0,
"gravity": [0.0, 0.0, -9.81],
"substeps": 1,
"use_gpu_pipeline": False, # Must be off for gripper to work
"add_ground_plane": True,
"add_distant_light": True,
"use_fabric": True,
"enable_scene_query_support": True, # Must be on for gripper to work
"enable_cameras": False,
"disable_contact_processing": False, # Must be off for gripper to work
"use_flatcache": True,
"default_physics_material": {
"static_friction": 1.0,
"dynamic_friction": 1.0,
"restitution": 0.0
},
"physx": {
### Per-scene settings
"use_gpu": True,
"worker_thread_count": 4,
"solver_type": 1, # 0: PGS, 1:TGS
"bounce_threshold_velocity": 0.2,
"friction_offset_threshold": 0.04, # A threshold of contact separation distance used to decide if a contact
# point will experience friction forces.
"friction_correlation_distance": 0.025, # Contact points can be merged into a single friction anchor if the
# distance between the contacts is smaller than correlation distance.
# disabling these can be useful for debugging
"enable_sleeping": True,
"enable_stabilization": True,
# GPU buffers
"gpu_max_rigid_contact_count": 512 * 1024,
"gpu_max_rigid_patch_count": 80 * 1024,
"gpu_found_lost_pairs_capacity": 1024,
"gpu_found_lost_aggregate_pairs_capacity": 1024,
"gpu_total_aggregate_pairs_capacity": 1024,
"gpu_max_soft_body_contacts": 1024 * 1024,
"gpu_max_particle_contacts": 1024 * 1024,
"gpu_heap_capacity": 64 * 1024 * 1024,
"gpu_temp_buffer_capacity": 16 * 1024 * 1024,
"gpu_max_num_partitions": 8,
"gpu_collision_stack_size": 64 * 1024 * 1024,
### Per-actor settings ( can override in actor_options )
"solver_position_iteration_count": 4,
"solver_velocity_iteration_count": 1,
"sleep_threshold": 0.0, # Mass-normalized kinetic energy threshold below which an actor may go to sleep.
# Allowed range [0, max_float).
"stabilization_threshold": 0.0, # Mass-normalized kinetic energy threshold below which an actor may
# participate in stabilization. Allowed range [0, max_float).
### Per-body settings ( can override in actor_options )
"enable_gyroscopic_forces": False,
"density": 1000.0, # density to be used for bodies that do not specify mass or density
"max_depenetration_velocity": 100.0,
### Per-shape settings ( can override in actor_options )
"contact_offset": 0.02,
"rest_offset": 0.001,
}
}
}
}
class PackTask(RLTask):
control_frequency_inv = 1
# kinematics_solver = None
"""
This class sets up a scene and calls a RL Policy, then evaluates the behaivior with rewards
Args:
offset (Optional[np.ndarray], optional): offset applied to all assets of the task.
sim_s_step_freq (int): The amount of simulation steps within a SIMULATED second.
"""
def __init__(self, name, sim_config, env, offset=None) -> None:
self._num_observations = 10 + 2 * NUMBER_PARTS
# self.observation_space = spaces.Dict({ 'obs': spaces.Box(low=-math.pi, high=math.pi, shape=(self._num_observations,), dtype=float) })
self._num_actions = 7 # gripper open / close & Delta 6 joint rots
# self.action_space = spaces.Box(low=-1, high=1, shape=(self._num_actions,), dtype=float)
self.update_config(sim_config)
super().__init__(name, env, offset)
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self.dt = self._task_cfg["sim"]["dt"]
self._device = self._cfg["sim_device"]
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._max_episode_length = self._task_cfg["env"]["episodeLength"]
# Robot turning ange of max speed is 191deg/s
self._max_joint_rot_speed = torch.scalar_tensor((191.0 * torch.pi / 180) * self.dt).to(self._device)
super().update_config(sim_config)
def set_up_scene(self, scene) -> None:
print('SETUP TASK', self.name)
self.create_env0()
super().set_up_scene(scene) # Clones env0
self._boxes_view = XFormPrimView(prim_paths_expr=f'{self.default_base_env_path}/.*/box',
name='box_view',
reset_xform_properties=False)
scene.add(self._boxes_view)
self._robots_view = RobotView(prim_paths_expr=f'{self.default_base_env_path}/.*/robot', name='ur10_view')
scene.add(self._robots_view)
self._grippers = RigidPrimView(prim_paths_expr=f'{self.default_base_env_path}/.*/robot/ee_link', name="gripper_view")
scene.add(self._grippers)
self._robots = [UR10(prim_path=robot_path, attach_gripper=True) for robot_path in self._robots_view.prim_paths]
def create_env0(self):
# This is the URL from which the Assets are downloaded
# Make sure you started and connected to your localhost Nucleus Server via Omniverse !!!
assets_root_path = get_assets_root_path()
env0_box_path = self.default_zero_env_path + '/box'
box_usd_path = assets_root_path + '/Isaac/Environments/Simple_Warehouse/Props/SM_CardBoxA_02.usd'
box_usd_path = local_assets + '/SM_CardBoxA_02.usd'
add_reference_to_stage(box_usd_path, env0_box_path)
box = XFormPrim(prim_path=env0_box_path,
position=DEST_BOX_POS,
scale=[1, 1, 0.4])
setStaticCollider(box.prim, approximationShape='convexDecomposition')
# The UR10e has 6 joints, each with a maximum:
# turning angle of -360 deg to +360 deg
# turning ange of max speed is 191deg/s
env0_robot_path = self.default_zero_env_path + '/robot'
robot = UR10(prim_path=env0_robot_path, name='UR10', position=ROBOT_POS, attach_gripper=True)
robot.set_enabled_self_collisions(True)
def cleanup(self):
self._curr_parts = [None for _ in range(self._num_envs)]
self._placed_parts = [[] for _ in range(self._num_envs)]
super().cleanup()
def reset(self):
super().reset()
self.cleanup()
for env_index in range(self._num_envs):
robot = self._robots[env_index]
if not robot.handles_initialized:
robot.initialize()
self.reset_env(env_index)
def reset_env(self, env_index):
self.progress_buf[env_index] = 0
self.reset_buf[env_index] = False
self.reset_robot(env_index)
curr_part = self._curr_parts[env_index]
if curr_part:
delete_prim(curr_part.prim_path)
self._curr_parts[env_index] = None
for part in self._placed_parts[env_index]:
delete_prim(part.prim_path)
self._placed_parts[env_index] = []
self._curr_parts[env_index] = self.add_part(env_index)
def reset_robot(self, env_index):
robot = self._robots[env_index]
default_pose = torch.tensor([math.pi / 2, -math.pi / 2, -math.pi / 2, -math.pi / 2, math.pi / 2, 0])
robot.set_joint_positions(positions=default_pose)
robot.gripper.open()
def add_part(self, env_index) -> None:
# gripper = self._robots[env_index].gripper
# part_pos = torch.tensor(gripper.get_world_pose()[0]) - torch.tensor([0, 0, 0.18], device=self._device)
box_pos = self._boxes_view.get_world_poses()[0][env_index]
part_pos = box_pos + torch.tensor([-0.12, -0.05, 0.48]).to(self._device)
part_index = len(self._placed_parts[env_index])
part_path = f'{self.default_base_env_path}/env_{env_index}/parts/part_{part_index}'
part_usd_path = local_assets + '/draexlmaier_part.usd'
add_reference_to_stage(part_usd_path, part_path)
part = RigidPrim(prim_path=part_path,
name=f'env_{env_index}_part_{part_index}',
position=part_pos,
orientation=[0, 1, 0, 0],
mass=0.4) # [-0.70711, 0.70711, 0, 0]
if get_prim_object_type(part_path) != 'rigid_body':
setRigidBody(part.prim, approximationShape='convexDecomposition', kinematic=False) # Kinematic True means immovable
return part
# _placed_parts # [[part]] where each entry in the outer array is the placed parts for env at index
# Returns: A 2D Array where each entry is the poses of the parts in the box
def get_observations(self):
def _shortest_rot_dist(quat_1, quat_2):
part_quat = Quaternion(list(quat_1))
ideal_quat = Quaternion(list(quat_2))
return Quaternion.absolute_distance(part_quat, ideal_quat)
boxes_pos = self._boxes_view.get_world_poses()[0] # Returns: [Array of all pos, Array of all rots]
# obs_dicts = []
for env_index in range(self._num_envs):
# env_obs = { 'box_state': [] }
robot = self._robots[env_index]
gripper_closed = robot.gripper.is_closed()
self.obs_buf[env_index, 0] = gripper_closed
# env_obs['gripper_closed'] = gripper_closed
robot_state = robot.get_joint_positions()
self.obs_buf[env_index, 1:7] = robot_state
ideal_selection = IDEAL_PACKAGING.copy()
box_pos = boxes_pos[env_index]
curr_part = self._curr_parts[env_index]
eval_parts = self._placed_parts[env_index] + [curr_part]
# box_state = []
# ideal_pose_for_curr_part = None
for part_index in range(NUMBER_PARTS):
if len(eval_parts) <= part_index:
# The worst possible distance is 3m and 180deg
self.obs_buf[env_index, (10 + part_index)] = torch.scalar_tensor(3)
self.obs_buf[env_index, (10 + NUMBER_PARTS + part_index)] = torch.pi
# env_obs['box_state'].append([3, torch.pi])
continue
part_pos, part_rot = eval_parts[part_index].get_world_pose()
part_pos -= box_pos
ideal_part = None
min_dist = 10000000
# Find closest ideal part
for pot_part in ideal_selection:
dist = torch.linalg.norm(torch.tensor(pot_part[0], device=self._device) - part_pos)
if dist < min_dist:
ideal_part = pot_part
min_dist = dist
rot_dist = _shortest_rot_dist(part_rot, ideal_part[1])
# Clip obs
min_dist = min(min_dist, 3)
rot_dist = min(rot_dist, torch.pi)
# Record obs
self.obs_buf[env_index, (10 + part_index)] = min_dist
self.obs_buf[env_index, (10 + NUMBER_PARTS + part_index)] = rot_dist
# env_obs['box_state'].append([min_dist, rot_dist])
if part_index == len(eval_parts) - 1:
part_pos_diff = part_pos - torch.tensor(ideal_part[0]).to(self._device)
# part_rot_euler = R.from_quat(part_rot.cpu()).as_euler('xyz', degrees=False)
# ideal_rot_euler = R.from_quat(ideal_part[1]).as_euler('xyz', degrees=False)
# part_rot_diff = torch.tensor(ideal_rot_euler - part_rot_euler)
self.obs_buf[env_index, 7:10] = part_pos_diff
# self.obs_buf[env_index, 10:13] = part_rot_diff
# # env_obs['part_pos_diff'] = part_pos_diff
# # env_obs['part_rot_diff'] = part_rot_diff
# obs_dicts.append(env_obs)
# The return is itrrelevant for Multi Threading:
# The VecEnvMT Loop calls RLTask.post_physics_step to get all the data from one step.
# RLTask.post_physics_step is simply returning self.obs_buf, self.rew_buf,...
# post_physics_step calls
# - get_observations()
# - get_states()
# - calculate_metrics()
# - is_done()
# - get_extras()
# return obs_dicts
return self.obs_buf[env_index]
def pre_physics_step(self, actions) -> None:
for env_index in range(self._num_envs):
if self.reset_buf[env_index]:
self.reset_env(env_index)
continue
# Rotate Joints
robot = self._robots[env_index]
gripper = robot.gripper
env_step = self.progress_buf[env_index]
if env_step <= 1:
gripper.close()
continue
joint_rots = robot.get_joint_positions()
joint_rots += actions[env_index, 0:6].to(self._device) * self._max_joint_rot_speed
robot.set_joint_positions(positions=joint_rots)
# Open or close Gripper
is_closed = gripper.is_closed()
gripper_action = actions[env_index, 6]
if 0.9 < gripper_action and is_closed:
gripper.open()
elif gripper_action < -0.3 and not is_closed:
gripper.close()
# Calculate Rewards
def calculate_metrics(self) -> None:
pos_rew = 0
parts_to_ideal_pos = self.obs_buf[:, 7:10]
parts_to_ideal_pos_dists = torch.linalg.norm(parts_to_ideal_pos, dim=1)
# Move Parts
# if len(self._placed_parts) + (1 if self._curr_parts[env_index] else 0) < NUMBER_PARTS:
# TODO: ADD ROT DIST AS WELL AS A CONDITION
next_part_env_indices = (parts_to_ideal_pos_dists < 0.003).nonzero(as_tuple=False).squeeze(-1)
for env_index in next_part_env_indices:
self._placed_parts[env_index].append(self._curr_parts[env_index])
self._curr_parts[env_index] = self.add_part(env_index) # A new part gets placed with each reset
self.progress_buf[env_index] = 0
pos_rew += 200
# part_rot_diffs = self.obs_buf[:, 10:13]
ideal_pos_dists = self.obs_buf[:, 10:(10 + NUMBER_PARTS)]
ideal_rot_dists = self.obs_buf[:, (10 + NUMBER_PARTS):(10 + 2 * NUMBER_PARTS)]
box_error_sum = ideal_pos_dists.square().sum(dim=1) + ideal_rot_dists.abs().sum(dim=1)
self.rew_buf = -parts_to_ideal_pos_dists.square() - box_error_sum + pos_rew
def is_done(self):
# any_flipped = False
self.reset_buf.fill_(0)
for env_index in range(self._num_envs):
part = self._curr_parts[env_index]
if not part:
continue
part_pos = part.get_world_pose()[0]
# Check if part has fallen
self.reset_buf[env_index] += (part_pos[2] < FALLEN_PART_THRESHOLD - 0.05)
# TODO: FIX THIS SHIT
# if _is_flipped(part_rot):
# any_flipped = True
# break
self.reset_buf += (self._max_episode_length - 1 <= self.progress_buf)
self.reset_buf = self.reset_buf >= 1 # Cast to bool
# def _is_flipped(q1):
# """
# Bestimmt, ob die Rotation von q0 zu q1 ein "Umfallen" darstellt,
# basierend auf einem Winkel größer als 60 Grad zwischen der ursprünglichen
# z-Achse und ihrer Rotation.
# :param q0: Ursprüngliches Quaternion.
# :param q1: Neues Quaternion.
# :return: True, wenn der Winkel größer als 60 Grad ist, sonst False.
# """
# q0 = torch.tensor([0, 1, 0, 0])
# # Initialer Vektor, parallel zur z-Achse
# v0 = torch.tensor([0, 0, 1])
# # Konvertiere Quaternions in Rotation-Objekte
# rotation0 = R.from_quat(q0)
# rotation1 = R.from_quat(q1)
# # Berechne die relative Rotation von q0 zu q1
# q_rel = rotation1 * rotation0.inv()
# # Berechne den rotierten Vektor v1
# v1 = q_rel.apply(v0)
# # Berechne den Winkel zwischen v0 und v1
# cos_theta = np.dot(v0, v1) / (np.linalg.norm(v0) * np.linalg.norm(v1))
# angle = np.arccos(np.clip(cos_theta, -1.0, 1.0)) * 180 / np.pi
# # Prüfe, ob der Winkel größer als 60 Grad ist
# return angle > 60
| 19,174 |
Python
| 42.284424 | 166 | 0.569782 |
gitLSW/robot-cloud/training/omniisaacgymenvs/randomize.py
|
# Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import copy
import numpy as np
import torch
import omni
from omni.isaac.core.prims import RigidPrimView
from omni.isaac.core.utils.extensions import enable_extension
class Randomizer:
def __init__(self, main_config, task_config):
self._cfg = task_config
self._config = main_config
self.randomize = False
dr_config = self._cfg.get("domain_randomization", None)
self.distributions = dict()
self.active_domain_randomizations = dict()
self._observations_dr_params = None
self._actions_dr_params = None
if dr_config is not None:
randomize = dr_config.get("randomize", False)
randomization_params = dr_config.get("randomization_params", None)
if randomize and randomization_params is not None:
self.randomize = True
self.min_frequency = dr_config.get("min_frequency", 1)
# import DR extensions
enable_extension("omni.replicator.isaac")
import omni.replicator.core as rep
import omni.replicator.isaac as dr
self.rep = rep
self.dr = dr
def apply_on_startup_domain_randomization(self, task):
if self.randomize:
torch.manual_seed(self._config["seed"])
randomization_params = self._cfg["domain_randomization"]["randomization_params"]
for opt in randomization_params.keys():
if opt == "rigid_prim_views":
if randomization_params["rigid_prim_views"] is not None:
for view_name in randomization_params["rigid_prim_views"].keys():
if randomization_params["rigid_prim_views"][view_name] is not None:
for attribute, params in randomization_params["rigid_prim_views"][view_name].items():
params = randomization_params["rigid_prim_views"][view_name][attribute]
if attribute in ["scale", "mass", "density"] and params is not None:
if "on_startup" in params.keys():
if not set(
("operation", "distribution", "distribution_parameters")
).issubset(params["on_startup"]):
raise ValueError(
f"Please ensure the following randomization parameters for {view_name} {attribute} "
+ "on_startup are provided: operation, distribution, distribution_parameters."
)
view = task.world.scene._scene_registry.rigid_prim_views[view_name]
if attribute == "scale":
self.randomize_scale_on_startup(
view=view,
distribution=params["on_startup"]["distribution"],
distribution_parameters=params["on_startup"][
"distribution_parameters"
],
operation=params["on_startup"]["operation"],
sync_dim_noise=True,
)
elif attribute == "mass":
self.randomize_mass_on_startup(
view=view,
distribution=params["on_startup"]["distribution"],
distribution_parameters=params["on_startup"][
"distribution_parameters"
],
operation=params["on_startup"]["operation"],
)
elif attribute == "density":
self.randomize_density_on_startup(
view=view,
distribution=params["on_startup"]["distribution"],
distribution_parameters=params["on_startup"][
"distribution_parameters"
],
operation=params["on_startup"]["operation"],
)
if opt == "articulation_views":
if randomization_params["articulation_views"] is not None:
for view_name in randomization_params["articulation_views"].keys():
if randomization_params["articulation_views"][view_name] is not None:
for attribute, params in randomization_params["articulation_views"][view_name].items():
params = randomization_params["articulation_views"][view_name][attribute]
if attribute in ["scale"] and params is not None:
if "on_startup" in params.keys():
if not set(
("operation", "distribution", "distribution_parameters")
).issubset(params["on_startup"]):
raise ValueError(
f"Please ensure the following randomization parameters for {view_name} {attribute} "
+ "on_startup are provided: operation, distribution, distribution_parameters."
)
view = task.world.scene._scene_registry.articulated_views[view_name]
if attribute == "scale":
self.randomize_scale_on_startup(
view=view,
distribution=params["on_startup"]["distribution"],
distribution_parameters=params["on_startup"][
"distribution_parameters"
],
operation=params["on_startup"]["operation"],
sync_dim_noise=True,
)
else:
dr_config = self._cfg.get("domain_randomization", None)
if dr_config is None:
raise ValueError("No domain randomization parameters are specified in the task yaml config file")
randomize = dr_config.get("randomize", False)
randomization_params = dr_config.get("randomization_params", None)
if randomize == False or randomization_params is None:
print("On Startup Domain randomization will not be applied.")
def set_up_domain_randomization(self, task):
if self.randomize:
randomization_params = self._cfg["domain_randomization"]["randomization_params"]
self.rep.set_global_seed(self._config["seed"])
with self.dr.trigger.on_rl_frame(num_envs=self._cfg["env"]["numEnvs"]):
for opt in randomization_params.keys():
if opt == "observations":
self._set_up_observations_randomization(task)
elif opt == "actions":
self._set_up_actions_randomization(task)
elif opt == "simulation":
if randomization_params["simulation"] is not None:
self.distributions["simulation"] = dict()
self.dr.physics_view.register_simulation_context(task.world)
for attribute, params in randomization_params["simulation"].items():
self._set_up_simulation_randomization(attribute, params)
elif opt == "rigid_prim_views":
if randomization_params["rigid_prim_views"] is not None:
self.distributions["rigid_prim_views"] = dict()
for view_name in randomization_params["rigid_prim_views"].keys():
if randomization_params["rigid_prim_views"][view_name] is not None:
self.distributions["rigid_prim_views"][view_name] = dict()
self.dr.physics_view.register_rigid_prim_view(
rigid_prim_view=task.world.scene._scene_registry.rigid_prim_views[
view_name
],
)
for attribute, params in randomization_params["rigid_prim_views"][
view_name
].items():
if attribute not in ["scale", "density"]:
self._set_up_rigid_prim_view_randomization(view_name, attribute, params)
elif opt == "articulation_views":
if randomization_params["articulation_views"] is not None:
self.distributions["articulation_views"] = dict()
for view_name in randomization_params["articulation_views"].keys():
if randomization_params["articulation_views"][view_name] is not None:
self.distributions["articulation_views"][view_name] = dict()
self.dr.physics_view.register_articulation_view(
articulation_view=task.world.scene._scene_registry.articulated_views[
view_name
],
)
for attribute, params in randomization_params["articulation_views"][
view_name
].items():
if attribute not in ["scale"]:
self._set_up_articulation_view_randomization(view_name, attribute, params)
self.rep.orchestrator.run()
if self._config.get("enable_recording", False):
# we need to deal with initializing render product here because it has to be initialized after orchestrator.run.
# otherwise, replicator will stop the simulation
task._env.create_viewport_render_product(resolution=(task.viewport_camera_width, task.viewport_camera_height))
if not task.is_extension:
task.world.render()
else:
dr_config = self._cfg.get("domain_randomization", None)
if dr_config is None:
raise ValueError("No domain randomization parameters are specified in the task yaml config file")
randomize = dr_config.get("randomize", False)
randomization_params = dr_config.get("randomization_params", None)
if randomize == False or randomization_params is None:
print("Domain randomization will not be applied.")
def _set_up_observations_randomization(self, task):
task.randomize_observations = True
self._observations_dr_params = self._cfg["domain_randomization"]["randomization_params"]["observations"]
if self._observations_dr_params is None:
raise ValueError(f"Observations randomization parameters are not provided.")
if "on_reset" in self._observations_dr_params.keys():
if not set(("operation", "distribution", "distribution_parameters")).issubset(
self._observations_dr_params["on_reset"].keys()
):
raise ValueError(
f"Please ensure the following observations on_reset randomization parameters are provided: "
+ "operation, distribution, distribution_parameters."
)
self.active_domain_randomizations[("observations", "on_reset")] = np.array(
self._observations_dr_params["on_reset"]["distribution_parameters"]
)
if "on_interval" in self._observations_dr_params.keys():
if not set(("frequency_interval", "operation", "distribution", "distribution_parameters")).issubset(
self._observations_dr_params["on_interval"].keys()
):
raise ValueError(
f"Please ensure the following observations on_interval randomization parameters are provided: "
+ "frequency_interval, operation, distribution, distribution_parameters."
)
self.active_domain_randomizations[("observations", "on_interval")] = np.array(
self._observations_dr_params["on_interval"]["distribution_parameters"]
)
self._observations_counter_buffer = torch.zeros(
(self._cfg["env"]["numEnvs"]), dtype=torch.int, device=self._config["rl_device"]
)
self._observations_correlated_noise = torch.zeros(
(self._cfg["env"]["numEnvs"], task.num_observations), device=self._config["rl_device"]
)
def _set_up_actions_randomization(self, task):
task.randomize_actions = True
self._actions_dr_params = self._cfg["domain_randomization"]["randomization_params"]["actions"]
if self._actions_dr_params is None:
raise ValueError(f"Actions randomization parameters are not provided.")
if "on_reset" in self._actions_dr_params.keys():
if not set(("operation", "distribution", "distribution_parameters")).issubset(
self._actions_dr_params["on_reset"].keys()
):
raise ValueError(
f"Please ensure the following actions on_reset randomization parameters are provided: "
+ "operation, distribution, distribution_parameters."
)
self.active_domain_randomizations[("actions", "on_reset")] = np.array(
self._actions_dr_params["on_reset"]["distribution_parameters"]
)
if "on_interval" in self._actions_dr_params.keys():
if not set(("frequency_interval", "operation", "distribution", "distribution_parameters")).issubset(
self._actions_dr_params["on_interval"].keys()
):
raise ValueError(
f"Please ensure the following actions on_interval randomization parameters are provided: "
+ "frequency_interval, operation, distribution, distribution_parameters."
)
self.active_domain_randomizations[("actions", "on_interval")] = np.array(
self._actions_dr_params["on_interval"]["distribution_parameters"]
)
self._actions_counter_buffer = torch.zeros(
(self._cfg["env"]["numEnvs"]), dtype=torch.int, device=self._config["rl_device"]
)
self._actions_correlated_noise = torch.zeros(
(self._cfg["env"]["numEnvs"], task.num_actions), device=self._config["rl_device"]
)
def apply_observations_randomization(self, observations, reset_buf):
env_ids = reset_buf.nonzero(as_tuple=False).squeeze(-1)
self._observations_counter_buffer[env_ids] = 0
self._observations_counter_buffer += 1
if "on_reset" in self._observations_dr_params.keys():
observations[:] = self._apply_correlated_noise(
buffer_type="observations",
buffer=observations,
reset_ids=env_ids,
operation=self._observations_dr_params["on_reset"]["operation"],
distribution=self._observations_dr_params["on_reset"]["distribution"],
distribution_parameters=self._observations_dr_params["on_reset"]["distribution_parameters"],
)
if "on_interval" in self._observations_dr_params.keys():
randomize_ids = (
(self._observations_counter_buffer >= self._observations_dr_params["on_interval"]["frequency_interval"])
.nonzero(as_tuple=False)
.squeeze(-1)
)
self._observations_counter_buffer[randomize_ids] = 0
observations[:] = self._apply_uncorrelated_noise(
buffer=observations,
randomize_ids=randomize_ids,
operation=self._observations_dr_params["on_interval"]["operation"],
distribution=self._observations_dr_params["on_interval"]["distribution"],
distribution_parameters=self._observations_dr_params["on_interval"]["distribution_parameters"],
)
return observations
def apply_actions_randomization(self, actions, reset_buf):
env_ids = reset_buf.nonzero(as_tuple=False).squeeze(-1)
self._actions_counter_buffer[env_ids] = 0
self._actions_counter_buffer += 1
if "on_reset" in self._actions_dr_params.keys():
actions[:] = self._apply_correlated_noise(
buffer_type="actions",
buffer=actions,
reset_ids=env_ids,
operation=self._actions_dr_params["on_reset"]["operation"],
distribution=self._actions_dr_params["on_reset"]["distribution"],
distribution_parameters=self._actions_dr_params["on_reset"]["distribution_parameters"],
)
if "on_interval" in self._actions_dr_params.keys():
randomize_ids = (
(self._actions_counter_buffer >= self._actions_dr_params["on_interval"]["frequency_interval"])
.nonzero(as_tuple=False)
.squeeze(-1)
)
self._actions_counter_buffer[randomize_ids] = 0
actions[:] = self._apply_uncorrelated_noise(
buffer=actions,
randomize_ids=randomize_ids,
operation=self._actions_dr_params["on_interval"]["operation"],
distribution=self._actions_dr_params["on_interval"]["distribution"],
distribution_parameters=self._actions_dr_params["on_interval"]["distribution_parameters"],
)
return actions
def _apply_uncorrelated_noise(self, buffer, randomize_ids, operation, distribution, distribution_parameters):
if distribution == "gaussian" or distribution == "normal":
noise = torch.normal(
mean=distribution_parameters[0],
std=distribution_parameters[1],
size=(len(randomize_ids), buffer.shape[1]),
device=self._config["rl_device"],
)
elif distribution == "uniform":
noise = (distribution_parameters[1] - distribution_parameters[0]) * torch.rand(
(len(randomize_ids), buffer.shape[1]), device=self._config["rl_device"]
) + distribution_parameters[0]
elif distribution == "loguniform" or distribution == "log_uniform":
noise = torch.exp(
(np.log(distribution_parameters[1]) - np.log(distribution_parameters[0]))
* torch.rand((len(randomize_ids), buffer.shape[1]), device=self._config["rl_device"])
+ np.log(distribution_parameters[0])
)
else:
print(f"The specified {distribution} distribution is not supported.")
if operation == "additive":
buffer[randomize_ids] += noise
elif operation == "scaling":
buffer[randomize_ids] *= noise
else:
print(f"The specified {operation} operation type is not supported.")
return buffer
def _apply_correlated_noise(self, buffer_type, buffer, reset_ids, operation, distribution, distribution_parameters):
if buffer_type == "observations":
correlated_noise_buffer = self._observations_correlated_noise
elif buffer_type == "actions":
correlated_noise_buffer = self._actions_correlated_noise
if len(reset_ids) > 0:
if distribution == "gaussian" or distribution == "normal":
correlated_noise_buffer[reset_ids] = torch.normal(
mean=distribution_parameters[0],
std=distribution_parameters[1],
size=(len(reset_ids), buffer.shape[1]),
device=self._config["rl_device"],
)
elif distribution == "uniform":
correlated_noise_buffer[reset_ids] = (
distribution_parameters[1] - distribution_parameters[0]
) * torch.rand(
(len(reset_ids), buffer.shape[1]), device=self._config["rl_device"]
) + distribution_parameters[
0
]
elif distribution == "loguniform" or distribution == "log_uniform":
correlated_noise_buffer[reset_ids] = torch.exp(
(np.log(distribution_parameters[1]) - np.log(distribution_parameters[0]))
* torch.rand((len(reset_ids), buffer.shape[1]), device=self._config["rl_device"])
+ np.log(distribution_parameters[0])
)
else:
print(f"The specified {distribution} distribution is not supported.")
if operation == "additive":
buffer += correlated_noise_buffer
elif operation == "scaling":
buffer *= correlated_noise_buffer
else:
print(f"The specified {operation} operation type is not supported.")
return buffer
def _set_up_simulation_randomization(self, attribute, params):
if params is None:
raise ValueError(f"Randomization parameters for simulation {attribute} is not provided.")
if attribute in self.dr.SIMULATION_CONTEXT_ATTRIBUTES:
self.distributions["simulation"][attribute] = dict()
if "on_reset" in params.keys():
if not set(("operation", "distribution", "distribution_parameters")).issubset(params["on_reset"]):
raise ValueError(
f"Please ensure the following randomization parameters for simulation {attribute} on_reset are provided: "
+ "operation, distribution, distribution_parameters."
)
self.active_domain_randomizations[("simulation", attribute, "on_reset")] = np.array(
params["on_reset"]["distribution_parameters"]
)
kwargs = {"operation": params["on_reset"]["operation"]}
self.distributions["simulation"][attribute]["on_reset"] = self._generate_distribution(
dimension=self.dr.physics_view._simulation_context_initial_values[attribute].shape[0],
view_name="simulation",
attribute=attribute,
params=params["on_reset"],
)
kwargs[attribute] = self.distributions["simulation"][attribute]["on_reset"]
with self.dr.gate.on_env_reset():
self.dr.physics_view.randomize_simulation_context(**kwargs)
if "on_interval" in params.keys():
if not set(("frequency_interval", "operation", "distribution", "distribution_parameters")).issubset(
params["on_interval"]
):
raise ValueError(
f"Please ensure the following randomization parameters for simulation {attribute} on_interval are provided: "
+ "frequency_interval, operation, distribution, distribution_parameters."
)
self.active_domain_randomizations[("simulation", attribute, "on_interval")] = np.array(
params["on_interval"]["distribution_parameters"]
)
kwargs = {"operation": params["on_interval"]["operation"]}
self.distributions["simulation"][attribute]["on_interval"] = self._generate_distribution(
dimension=self.dr.physics_view._simulation_context_initial_values[attribute].shape[0],
view_name="simulation",
attribute=attribute,
params=params["on_interval"],
)
kwargs[attribute] = self.distributions["simulation"][attribute]["on_interval"]
with self.dr.gate.on_interval(interval=params["on_interval"]["frequency_interval"]):
self.dr.physics_view.randomize_simulation_context(**kwargs)
def _set_up_rigid_prim_view_randomization(self, view_name, attribute, params):
if params is None:
raise ValueError(f"Randomization parameters for rigid prim view {view_name} {attribute} is not provided.")
if attribute in self.dr.RIGID_PRIM_ATTRIBUTES:
self.distributions["rigid_prim_views"][view_name][attribute] = dict()
if "on_reset" in params.keys():
if not set(("operation", "distribution", "distribution_parameters")).issubset(params["on_reset"]):
raise ValueError(
f"Please ensure the following randomization parameters for {view_name} {attribute} on_reset are provided: "
+ "operation, distribution, distribution_parameters."
)
self.active_domain_randomizations[("rigid_prim_views", view_name, attribute, "on_reset")] = np.array(
params["on_reset"]["distribution_parameters"]
)
kwargs = {"view_name": view_name, "operation": params["on_reset"]["operation"]}
if attribute == "material_properties" and "num_buckets" in params["on_reset"].keys():
kwargs["num_buckets"] = params["on_reset"]["num_buckets"]
self.distributions["rigid_prim_views"][view_name][attribute]["on_reset"] = self._generate_distribution(
dimension=self.dr.physics_view._rigid_prim_views_initial_values[view_name][attribute].shape[1],
view_name=view_name,
attribute=attribute,
params=params["on_reset"],
)
kwargs[attribute] = self.distributions["rigid_prim_views"][view_name][attribute]["on_reset"]
with self.dr.gate.on_env_reset():
self.dr.physics_view.randomize_rigid_prim_view(**kwargs)
if "on_interval" in params.keys():
if not set(("frequency_interval", "operation", "distribution", "distribution_parameters")).issubset(
params["on_interval"]
):
raise ValueError(
f"Please ensure the following randomization parameters for {view_name} {attribute} on_interval are provided: "
+ "frequency_interval, operation, distribution, distribution_parameters."
)
self.active_domain_randomizations[("rigid_prim_views", view_name, attribute, "on_interval")] = np.array(
params["on_interval"]["distribution_parameters"]
)
kwargs = {"view_name": view_name, "operation": params["on_interval"]["operation"]}
if attribute == "material_properties" and "num_buckets" in params["on_interval"].keys():
kwargs["num_buckets"] = params["on_interval"]["num_buckets"]
self.distributions["rigid_prim_views"][view_name][attribute][
"on_interval"
] = self._generate_distribution(
dimension=self.dr.physics_view._rigid_prim_views_initial_values[view_name][attribute].shape[1],
view_name=view_name,
attribute=attribute,
params=params["on_interval"],
)
kwargs[attribute] = self.distributions["rigid_prim_views"][view_name][attribute]["on_interval"]
with self.dr.gate.on_interval(interval=params["on_interval"]["frequency_interval"]):
self.dr.physics_view.randomize_rigid_prim_view(**kwargs)
else:
raise ValueError(f"The attribute {attribute} for {view_name} is invalid for domain randomization.")
def _set_up_articulation_view_randomization(self, view_name, attribute, params):
if params is None:
raise ValueError(f"Randomization parameters for articulation view {view_name} {attribute} is not provided.")
if attribute in self.dr.ARTICULATION_ATTRIBUTES:
self.distributions["articulation_views"][view_name][attribute] = dict()
if "on_reset" in params.keys():
if not set(("operation", "distribution", "distribution_parameters")).issubset(params["on_reset"]):
raise ValueError(
f"Please ensure the following randomization parameters for {view_name} {attribute} on_reset are provided: "
+ "operation, distribution, distribution_parameters."
)
self.active_domain_randomizations[("articulation_views", view_name, attribute, "on_reset")] = np.array(
params["on_reset"]["distribution_parameters"]
)
kwargs = {"view_name": view_name, "operation": params["on_reset"]["operation"]}
if attribute == "material_properties" and "num_buckets" in params["on_reset"].keys():
kwargs["num_buckets"] = params["on_reset"]["num_buckets"]
self.distributions["articulation_views"][view_name][attribute][
"on_reset"
] = self._generate_distribution(
dimension=self.dr.physics_view._articulation_views_initial_values[view_name][attribute].shape[1],
view_name=view_name,
attribute=attribute,
params=params["on_reset"],
)
kwargs[attribute] = self.distributions["articulation_views"][view_name][attribute]["on_reset"]
with self.dr.gate.on_env_reset():
self.dr.physics_view.randomize_articulation_view(**kwargs)
if "on_interval" in params.keys():
if not set(("frequency_interval", "operation", "distribution", "distribution_parameters")).issubset(
params["on_interval"]
):
raise ValueError(
f"Please ensure the following randomization parameters for {view_name} {attribute} on_interval are provided: "
+ "frequency_interval, operation, distribution, distribution_parameters."
)
self.active_domain_randomizations[
("articulation_views", view_name, attribute, "on_interval")
] = np.array(params["on_interval"]["distribution_parameters"])
kwargs = {"view_name": view_name, "operation": params["on_interval"]["operation"]}
if attribute == "material_properties" and "num_buckets" in params["on_interval"].keys():
kwargs["num_buckets"] = params["on_interval"]["num_buckets"]
self.distributions["articulation_views"][view_name][attribute][
"on_interval"
] = self._generate_distribution(
dimension=self.dr.physics_view._articulation_views_initial_values[view_name][attribute].shape[1],
view_name=view_name,
attribute=attribute,
params=params["on_interval"],
)
kwargs[attribute] = self.distributions["articulation_views"][view_name][attribute]["on_interval"]
with self.dr.gate.on_interval(interval=params["on_interval"]["frequency_interval"]):
self.dr.physics_view.randomize_articulation_view(**kwargs)
else:
raise ValueError(f"The attribute {attribute} for {view_name} is invalid for domain randomization.")
def _generate_distribution(self, view_name, attribute, dimension, params):
dist_params = self._sanitize_distribution_parameters(attribute, dimension, params["distribution_parameters"])
if params["distribution"] == "uniform":
return self.rep.distribution.uniform(tuple(dist_params[0]), tuple(dist_params[1]))
elif params["distribution"] == "gaussian" or params["distribution"] == "normal":
return self.rep.distribution.normal(tuple(dist_params[0]), tuple(dist_params[1]))
elif params["distribution"] == "loguniform" or params["distribution"] == "log_uniform":
return self.rep.distribution.log_uniform(tuple(dist_params[0]), tuple(dist_params[1]))
else:
raise ValueError(
f"The provided distribution for {view_name} {attribute} is not supported. "
+ "Options: uniform, gaussian/normal, loguniform/log_uniform"
)
def _sanitize_distribution_parameters(self, attribute, dimension, params):
distribution_parameters = np.array(params)
if distribution_parameters.shape == (2,):
# if the user does not provide a set of parameters for each dimension
dist_params = [[distribution_parameters[0]] * dimension, [distribution_parameters[1]] * dimension]
elif distribution_parameters.shape == (2, dimension):
# if the user provides a set of parameters for each dimension in the format [[...], [...]]
dist_params = distribution_parameters.tolist()
elif attribute in ["material_properties", "body_inertias"] and distribution_parameters.shape == (2, 3):
# if the user only provides the parameters for one body in the articulation, assume the same parameters for all other links
dist_params = [
[distribution_parameters[0]] * (dimension // 3),
[distribution_parameters[1]] * (dimension // 3),
]
else:
raise ValueError(
f"The provided distribution_parameters for {view_name} {attribute} is invalid due to incorrect dimensions."
)
return dist_params
def set_dr_distribution_parameters(self, distribution_parameters, *distribution_path):
if distribution_path not in self.active_domain_randomizations.keys():
raise ValueError(
f"Cannot find a valid domain randomization distribution using the path {distribution_path}."
)
if distribution_path[0] == "observations":
if len(distribution_parameters) == 2:
self._observations_dr_params[distribution_path[1]]["distribution_parameters"] = distribution_parameters
else:
raise ValueError(
f"Please provide distribution_parameters for observations {distribution_path[1]} "
+ "in the form of [dist_param_1, dist_param_2]"
)
elif distribution_path[0] == "actions":
if len(distribution_parameters) == 2:
self._actions_dr_params[distribution_path[1]]["distribution_parameters"] = distribution_parameters
else:
raise ValueError(
f"Please provide distribution_parameters for actions {distribution_path[1]} "
+ "in the form of [dist_param_1, dist_param_2]"
)
else:
replicator_distribution = self.distributions[distribution_path[0]][distribution_path[1]][
distribution_path[2]
]
if distribution_path[0] == "rigid_prim_views" or distribution_path[0] == "articulation_views":
replicator_distribution = replicator_distribution[distribution_path[3]]
if (
replicator_distribution.node.get_node_type().get_node_type() == "omni.replicator.core.OgnSampleUniform"
or replicator_distribution.node.get_node_type().get_node_type()
== "omni.replicator.core.OgnSampleLogUniform"
):
dimension = len(self.dr.utils.get_distribution_params(replicator_distribution, ["lower"])[0])
dist_params = self._sanitize_distribution_parameters(
distribution_path[-2], dimension, distribution_parameters
)
self.dr.utils.set_distribution_params(
replicator_distribution, {"lower": dist_params[0], "upper": dist_params[1]}
)
elif replicator_distribution.node.get_node_type().get_node_type() == "omni.replicator.core.OgnSampleNormal":
dimension = len(self.dr.utils.get_distribution_params(replicator_distribution, ["mean"])[0])
dist_params = self._sanitize_distribution_parameters(
distribution_path[-2], dimension, distribution_parameters
)
self.dr.utils.set_distribution_params(
replicator_distribution, {"mean": dist_params[0], "std": dist_params[1]}
)
def get_dr_distribution_parameters(self, *distribution_path):
if distribution_path not in self.active_domain_randomizations.keys():
raise ValueError(
f"Cannot find a valid domain randomization distribution using the path {distribution_path}."
)
if distribution_path[0] == "observations":
return self._observations_dr_params[distribution_path[1]]["distribution_parameters"]
elif distribution_path[0] == "actions":
return self._actions_dr_params[distribution_path[1]]["distribution_parameters"]
else:
replicator_distribution = self.distributions[distribution_path[0]][distribution_path[1]][
distribution_path[2]
]
if distribution_path[0] == "rigid_prim_views" or distribution_path[0] == "articulation_views":
replicator_distribution = replicator_distribution[distribution_path[3]]
if (
replicator_distribution.node.get_node_type().get_node_type() == "omni.replicator.core.OgnSampleUniform"
or replicator_distribution.node.get_node_type().get_node_type()
== "omni.replicator.core.OgnSampleLogUniform"
):
return self.dr.utils.get_distribution_params(replicator_distribution, ["lower", "upper"])
elif replicator_distribution.node.get_node_type().get_node_type() == "omni.replicator.core.OgnSampleNormal":
return self.dr.utils.get_distribution_params(replicator_distribution, ["mean", "std"])
def get_initial_dr_distribution_parameters(self, *distribution_path):
if distribution_path not in self.active_domain_randomizations.keys():
raise ValueError(
f"Cannot find a valid domain randomization distribution using the path {distribution_path}."
)
return self.active_domain_randomizations[distribution_path].copy()
def _generate_noise(self, distribution, distribution_parameters, size, device):
if distribution == "gaussian" or distribution == "normal":
noise = torch.normal(
mean=distribution_parameters[0], std=distribution_parameters[1], size=size, device=device
)
elif distribution == "uniform":
noise = (distribution_parameters[1] - distribution_parameters[0]) * torch.rand(
size, device=device
) + distribution_parameters[0]
elif distribution == "loguniform" or distribution == "log_uniform":
noise = torch.exp(
(np.log(distribution_parameters[1]) - np.log(distribution_parameters[0]))
* torch.rand(size, device=device)
+ np.log(distribution_parameters[0])
)
else:
print(f"The specified {distribution} distribution is not supported.")
return noise
def randomize_scale_on_startup(self, view, distribution, distribution_parameters, operation, sync_dim_noise=True):
scales = view.get_local_scales()
if sync_dim_noise:
dist_params = np.asarray(
self._sanitize_distribution_parameters(attribute="scale", dimension=1, params=distribution_parameters)
)
noise = (
self._generate_noise(distribution, dist_params.squeeze(), (view.count,), view._device).repeat(3, 1).T
)
else:
dist_params = np.asarray(
self._sanitize_distribution_parameters(attribute="scale", dimension=3, params=distribution_parameters)
)
noise = torch.zeros((view.count, 3), device=view._device)
for i in range(3):
noise[:, i] = self._generate_noise(distribution, dist_params[:, i], (view.count,), view._device)
if operation == "additive":
scales += noise
elif operation == "scaling":
scales *= noise
elif operation == "direct":
scales = noise
else:
print(f"The specified {operation} operation type is not supported.")
view.set_local_scales(scales=scales)
def randomize_mass_on_startup(self, view, distribution, distribution_parameters, operation):
if isinstance(view, omni.isaac.core.prims.RigidPrimView) or isinstance(view, RigidPrimView):
masses = view.get_masses()
dist_params = np.asarray(
self._sanitize_distribution_parameters(
attribute=f"{view.name} mass", dimension=1, params=distribution_parameters
)
)
noise = self._generate_noise(distribution, dist_params.squeeze(), (view.count,), view._device)
set_masses = view.set_masses
if operation == "additive":
masses += noise
elif operation == "scaling":
masses *= noise
elif operation == "direct":
masses = noise
else:
print(f"The specified {operation} operation type is not supported.")
set_masses(masses)
def randomize_density_on_startup(self, view, distribution, distribution_parameters, operation):
if isinstance(view, omni.isaac.core.prims.RigidPrimView) or isinstance(view, RigidPrimView):
densities = view.get_densities()
dist_params = np.asarray(
self._sanitize_distribution_parameters(
attribute=f"{view.name} density", dimension=1, params=distribution_parameters
)
)
noise = self._generate_noise(distribution, dist_params.squeeze(), (view.count,), view._device)
set_densities = view.set_densities
if operation == "additive":
densities += noise
elif operation == "scaling":
densities *= noise
elif operation == "direct":
densities = noise
else:
print(f"The specified {operation} operation type is not supported.")
set_densities(densities)
| 46,048 |
Python
| 58.726329 | 136 | 0.555942 |
gitLSW/robot-cloud/training/omniisaacgymenvs/rl_task.py
|
# Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import asyncio
from abc import abstractmethod
import numpy as np
import omni.kit
import omni.usd
import torch
from gym import spaces
from omni.isaac.cloner import GridCloner
from omni.isaac.core.tasks import BaseTask
from omni.isaac.core.utils.prims import define_prim
from omni.isaac.core.utils.stage import get_current_stage
from omni.isaac.core.utils.types import ArticulationAction
from omni.isaac.gym.tasks.rl_task import RLTaskInterface
from omniisaacgymenvs.randomize import Randomizer
from pxr import Gf, UsdGeom, UsdLux
class RLTask(RLTaskInterface):
"""This class provides a PyTorch RL-specific interface for setting up RL tasks.
It includes utilities for setting up RL task related parameters,
cloning environments, and data collection for RL algorithms.
"""
def __init__(self, name, env, offset=None) -> None:
"""Initializes RL parameters, cloner object, and buffers.
Args:
name (str): name of the task.
env (VecEnvBase): an instance of the environment wrapper class to register task.
offset (Optional[np.ndarray], optional): offset applied to all assets of the task. Defaults to None.
"""
BaseTask.__init__(self, name=name, offset=offset)
self._rand_seed = self._cfg["seed"]
# optimization flags for pytorch JIT
torch._C._jit_set_nvfuser_enabled(False)
self.test = self._cfg["test"]
self._device = self._cfg["sim_device"]
# set up randomizer for DR
self._dr_randomizer = Randomizer(self._cfg, self._task_cfg)
if self._dr_randomizer.randomize:
import omni.replicator.isaac as dr
self.dr = dr
# set up replicator for camera data collection
self.enable_cameras = self._task_cfg["sim"].get("enable_cameras", False)
if self.enable_cameras:
from omni.replicator.isaac.scripts.writers.pytorch_writer import PytorchWriter
from omni.replicator.isaac.scripts.writers.pytorch_listener import PytorchListener
import omni.replicator.core as rep
self.rep = rep
self.PytorchWriter = PytorchWriter
self.PytorchListener = PytorchListener
print("Task Device:", self._device)
self.randomize_actions = False
self.randomize_observations = False
self.clip_obs = self._task_cfg["env"].get("clipObservations", np.Inf)
self.clip_actions = self._task_cfg["env"].get("clipActions", np.Inf)
self.rl_device = self._cfg.get("rl_device", "cuda:0")
self.control_frequency_inv = self._task_cfg["env"].get("controlFrequencyInv", 1)
self.rendering_interval = self._task_cfg.get("renderingInterval", 1)
# parse default viewport camera position and lookat target and resolution (width, height)
self.camera_position = [10, 10, 3]
self.camera_target = [0, 0, 0]
self.viewport_camera_width = 1280
self.viewport_camera_height = 720
if "viewport" in self._task_cfg:
self.camera_position = self._task_cfg["viewport"].get("camera_position", self.camera_position)
self.camera_target = self._task_cfg["viewport"].get("camera_target", self.camera_target)
self.viewport_camera_width = self._task_cfg["viewport"].get("viewport_camera_width", self.viewport_camera_width)
self.viewport_camera_height = self._task_cfg["viewport"].get("viewport_camera_height", self.viewport_camera_height)
print("RL device: ", self.rl_device)
self._env = env
self.is_extension = False
if not hasattr(self, "_num_agents"):
self._num_agents = 1 # used for multi-agent environments
if not hasattr(self, "_num_states"):
self._num_states = 0
# initialize data spaces (defaults to gym.Box)
if not hasattr(self, "action_space"):
self.action_space = spaces.Box(
np.ones(self.num_actions, dtype=np.float32) * -1.0, np.ones(self.num_actions, dtype=np.float32) * 1.0
)
if not hasattr(self, "observation_space"):
self.observation_space = spaces.Box(
np.ones(self.num_observations, dtype=np.float32) * -np.Inf,
np.ones(self.num_observations, dtype=np.float32) * np.Inf,
)
if not hasattr(self, "state_space"):
self.state_space = spaces.Box(
np.ones(self.num_states, dtype=np.float32) * -np.Inf,
np.ones(self.num_states, dtype=np.float32) * np.Inf,
)
self.cleanup()
def cleanup(self) -> None:
"""Prepares torch buffers for RL data collection."""
# prepare tensors
self.obs_buf = torch.zeros((self._num_envs, self.num_observations), device=self._device, dtype=torch.float)
self.states_buf = torch.zeros((self._num_envs, self.num_states), device=self._device, dtype=torch.float)
self.rew_buf = torch.zeros(self._num_envs, device=self._device, dtype=torch.float)
self.reset_buf = torch.ones(self._num_envs, device=self._device, dtype=torch.long)
self.progress_buf = torch.zeros(self._num_envs, device=self._device, dtype=torch.long)
self.extras = {}
def set_up_scene(
self, scene, replicate_physics=True, collision_filter_global_paths=[], filter_collisions=True, copy_from_source=False
) -> None:
"""Clones environments based on value provided in task config and applies collision filters to mask
collisions across environments.
Args:
scene (Scene): Scene to add objects to.
replicate_physics (bool): Clone physics using PhysX API for better performance.
collision_filter_global_paths (list): Prim paths of global objects that should not have collision masked.
filter_collisions (bool): Mask off collision between environments.
copy_from_source (bool): Copy from source prim when cloning instead of inheriting.
"""
super().set_up_scene(scene)
self._cloner = GridCloner(spacing=self._env_spacing)
self._cloner.define_base_env(self.default_base_env_path)
stage = omni.usd.get_context().get_stage()
UsdGeom.Xform.Define(stage, self.default_zero_env_path)
if self._task_cfg["sim"].get("add_ground_plane", True):
self._ground_plane_path = "/World/defaultGroundPlane"
collision_filter_global_paths.append(self._ground_plane_path)
scene.add_default_ground_plane(prim_path=self._ground_plane_path)
prim_paths = self._cloner.generate_paths("/World/envs/env", self._num_envs)
self._env_pos = self._cloner.clone(
source_prim_path="/World/envs/env_0", prim_paths=prim_paths, replicate_physics=replicate_physics, copy_from_source=copy_from_source
)
self._env_pos = torch.tensor(np.array(self._env_pos), device=self._device, dtype=torch.float)
if filter_collisions:
self._cloner.filter_collisions(
self._env.world.get_physics_context().prim_path,
"/World/collisions",
prim_paths,
collision_filter_global_paths,
)
if self._env.render_enabled:
self.set_initial_camera_params(camera_position=self.camera_position, camera_target=self.camera_target)
if self._task_cfg["sim"].get("add_distant_light", True):
self._create_distant_light()
# initialize capturer for viewport recording
# this has to be called after initializing replicator for DR
if self._cfg.get("enable_recording", False) and not self._dr_randomizer.randomize:
self._env.create_viewport_render_product(resolution=(self.viewport_camera_width, self.viewport_camera_height))
def set_initial_camera_params(self, camera_position, camera_target):
from omni.kit.viewport.utility import get_viewport_from_window_name
from omni.kit.viewport.utility.camera_state import ViewportCameraState
viewport_api_2 = get_viewport_from_window_name("Viewport")
viewport_api_2.set_active_camera("/OmniverseKit_Persp")
camera_state = ViewportCameraState("/OmniverseKit_Persp", viewport_api_2)
camera_state.set_position_world(Gf.Vec3d(camera_position[0], camera_position[1], camera_position[2]), True)
camera_state.set_target_world(Gf.Vec3d(camera_target[0], camera_target[1], camera_target[2]), True)
def _create_distant_light(self, prim_path="/World/defaultDistantLight", intensity=5000):
stage = get_current_stage()
light = UsdLux.DistantLight.Define(stage, prim_path)
light.CreateIntensityAttr().Set(intensity)
def initialize_views(self, scene):
"""Optionally implemented by individual task classes to initialize views used in the task.
This API is required for the extension workflow, where tasks are expected to train on a pre-defined stage.
Args:
scene (Scene): Scene to remove existing views and initialize/add new views.
"""
self._cloner = GridCloner(spacing=self._env_spacing)
pos, _ = self._cloner.get_clone_transforms(self._num_envs)
self._env_pos = torch.tensor(np.array(pos), device=self._device, dtype=torch.float)
if self._env.render_enabled:
# initialize capturer for viewport recording
if self._cfg.get("enable_recording", False) and not self._dr_randomizer.randomize:
self._env.create_viewport_render_product(resolution=(self.viewport_camera_width, self.viewport_camera_height))
@property
def default_base_env_path(self):
"""Retrieves default path to the parent of all env prims.
Returns:
default_base_env_path(str): Defaults to "/World/envs".
"""
return "/World/envs"
@property
def default_zero_env_path(self):
"""Retrieves default path to the first env prim (index 0).
Returns:
default_zero_env_path(str): Defaults to "/World/envs/env_0".
"""
return f"{self.default_base_env_path}/env_0"
def reset(self):
"""Flags all environments for reset."""
self.reset_buf = torch.ones_like(self.reset_buf)
def post_physics_step(self):
"""Processes RL required computations for observations, states, rewards, resets, and extras.
Also maintains progress buffer for tracking step count per environment.
Returns:
obs_buf(torch.Tensor): Tensor of observation data.
rew_buf(torch.Tensor): Tensor of rewards data.
reset_buf(torch.Tensor): Tensor of resets/dones data.
extras(dict): Dictionary of extras data.
"""
self.progress_buf[:] += 1
if self._env.world.is_playing():
self.get_observations()
self.get_states()
self.calculate_metrics()
self.is_done()
self.get_extras()
return self.obs_buf, self.rew_buf, self.reset_buf, self.extras
@property
def world(self):
"""Retrieves the World object for simulation.
Returns:
world(World): Simulation World.
"""
return self._env.world
@property
def cfg(self):
"""Retrieves the main config.
Returns:
cfg(dict): Main config dictionary.
"""
return self._cfg
def set_is_extension(self, is_extension):
self.is_extension = is_extension
| 13,131 |
Python
| 43.666667 | 143 | 0.659889 |
gitLSW/robot-cloud/training/omniisaacgymenvs/sim_config.py
|
# Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import copy
import carb
import numpy as np
import omni.usd
import torch
from omni.isaac.core.utils.extensions import enable_extension
def merge(source, destination):
for key, value in source.items():
if isinstance(value, dict):
# get node or create one
node = destination.setdefault(key, {})
merge(value, node)
else:
destination[key] = value
return destination
default_physx_params = {
### Per-scene settings
"use_gpu": False,
"worker_thread_count": 4,
"solver_type": 1, # 0: PGS, 1:TGS
"bounce_threshold_velocity": 0.2,
"friction_offset_threshold": 0.04, # A threshold of contact separation distance used to decide if a contact
# point will experience friction forces.
"friction_correlation_distance": 0.025, # Contact points can be merged into a single friction anchor if the
# distance between the contacts is smaller than correlation distance.
# disabling these can be useful for debugging
"enable_sleeping": True,
"enable_stabilization": True,
# GPU buffers
"gpu_max_rigid_contact_count": 512 * 1024,
"gpu_max_rigid_patch_count": 80 * 1024,
"gpu_found_lost_pairs_capacity": 1024,
"gpu_found_lost_aggregate_pairs_capacity": 1024,
"gpu_total_aggregate_pairs_capacity": 1024,
"gpu_max_soft_body_contacts": 1024 * 1024,
"gpu_max_particle_contacts": 1024 * 1024,
"gpu_heap_capacity": 64 * 1024 * 1024,
"gpu_temp_buffer_capacity": 16 * 1024 * 1024,
"gpu_max_num_partitions": 8,
"gpu_collision_stack_size": 64 * 1024 * 1024,
### Per-actor settings ( can override in actor_options )
"solver_position_iteration_count": 4,
"solver_velocity_iteration_count": 1,
"sleep_threshold": 0.0, # Mass-normalized kinetic energy threshold below which an actor may go to sleep.
# Allowed range [0, max_float).
"stabilization_threshold": 0.0, # Mass-normalized kinetic energy threshold below which an actor may
# participate in stabilization. Allowed range [0, max_float).
### Per-body settings ( can override in actor_options )
"enable_gyroscopic_forces": False,
"density": 1000.0, # density to be used for bodies that do not specify mass or density
"max_depenetration_velocity": 100.0,
### Per-shape settings ( can override in actor_options )
"contact_offset": 0.02,
"rest_offset": 0.001,
}
default_physics_material = {"static_friction": 1.0, "dynamic_friction": 1.0, "restitution": 0.0}
default_sim_params = {
"gravity": [0.0, 0.0, -9.81],
"dt": 1.0 / 60.0,
"rendering_dt": -1.0, # we don't want to override this if it's set from cfg
"substeps": 1,
"use_gpu_pipeline": True,
"add_ground_plane": True,
"add_distant_light": True,
"use_fabric": True,
"enable_scene_query_support": False,
"enable_cameras": False,
"disable_contact_processing": False,
"default_physics_material": default_physics_material,
}
default_actor_options = {
# -1 means use authored value from USD or default values from default_sim_params if not explicitly authored in USD.
# If an attribute value is not explicitly authored in USD, add one with the value given here,
# which overrides the USD default.
"override_usd_defaults": False,
"make_kinematic": -1,
"enable_self_collisions": -1,
"enable_gyroscopic_forces": -1,
"solver_position_iteration_count": -1,
"solver_velocity_iteration_count": -1,
"sleep_threshold": -1,
"stabilization_threshold": -1,
"max_depenetration_velocity": -1,
"density": -1,
"mass": -1,
"contact_offset": -1,
"rest_offset": -1,
}
class SimConfig:
def __init__(self, config: dict = None):
if config is None:
config = dict()
self._config = config
self._cfg = config.get("task", dict())
self._parse_config()
if self._config["test"] == True:
self._sim_params["enable_scene_query_support"] = True
if (
self._config["headless"] == True
and not self._sim_params["enable_cameras"]
and not self._config["enable_livestream"]
and not self._config.get("enable_recording", False)
):
self._sim_params["use_fabric"] = False
self._sim_params["enable_viewport"] = False
else:
self._sim_params["enable_viewport"] = True
enable_extension("omni.kit.viewport.bundle")
if self._sim_params["enable_cameras"] or self._config.get("enable_recording", False):
enable_extension("omni.replicator.isaac")
# self._sim_params["warp"] = self._config["warp"]
self._sim_params["sim_device"] = self._config["sim_device"]
self._adjust_dt()
if self._sim_params["disable_contact_processing"]:
carb.settings.get_settings().set_bool("/physics/disableContactProcessing", True)
carb.settings.get_settings().set_bool("/physics/physxDispatcher", True)
# Force the background grid off all the time for RL tasks, to avoid the grid showing up in any RL camera task
carb.settings.get_settings().set("/app/viewport/grid/enabled", False)
# Disable framerate limiting which might cause rendering slowdowns
carb.settings.get_settings().set("/app/runLoops/main/rateLimitEnabled", False)
import omni.ui
# Dock floating UIs this might not be needed anymore as extensions dock themselves
# Method for docking a particular window to a location
def dock_window(space, name, location, ratio=0.5):
window = omni.ui.Workspace.get_window(name)
if window and space:
window.dock_in(space, location, ratio=ratio)
return window
# Acquire the main docking station
main_dockspace = omni.ui.Workspace.get_window("DockSpace")
dock_window(main_dockspace, "Content", omni.ui.DockPosition.BOTTOM, 0.3)
window = omni.ui.Workspace.get_window("Content")
if window:
window.visible = False
window = omni.ui.Workspace.get_window("Simulation Settings")
if window:
window.visible = False
def _parse_config(self):
# general sim parameter
self._sim_params = copy.deepcopy(default_sim_params)
self._default_physics_material = copy.deepcopy(default_physics_material)
sim_cfg = self._cfg.get("sim", None)
if sim_cfg is not None:
for opt in sim_cfg.keys():
if opt in self._sim_params:
if opt == "default_physics_material":
for material_opt in sim_cfg[opt]:
self._default_physics_material[material_opt] = sim_cfg[opt][material_opt]
else:
self._sim_params[opt] = sim_cfg[opt]
else:
print("Sim params does not have attribute: ", opt)
self._sim_params["default_physics_material"] = self._default_physics_material
# physx parameters
self._physx_params = copy.deepcopy(default_physx_params)
if sim_cfg is not None and "physx" in sim_cfg:
for opt in sim_cfg["physx"].keys():
if opt in self._physx_params:
self._physx_params[opt] = sim_cfg["physx"][opt]
else:
print("Physx sim params does not have attribute: ", opt)
self._sanitize_device()
def _sanitize_device(self):
if self._sim_params["use_gpu_pipeline"]:
self._physx_params["use_gpu"] = True
# device should be in sync with pipeline
if self._sim_params["use_gpu_pipeline"]:
self._config["sim_device"] = f"cuda:{self._config['device_id']}"
else:
self._config["sim_device"] = "cpu"
# also write to physics params for setting sim device
self._physx_params["sim_device"] = self._config["sim_device"]
print("Pipeline: ", "GPU" if self._sim_params["use_gpu_pipeline"] else "CPU")
print("Pipeline Device: ", self._config["sim_device"])
print("Sim Device: ", "GPU" if self._physx_params["use_gpu"] else "CPU")
def parse_actor_config(self, actor_name):
actor_params = copy.deepcopy(default_actor_options)
if "sim" in self._cfg and actor_name in self._cfg["sim"]:
actor_cfg = self._cfg["sim"][actor_name]
for opt in actor_cfg.keys():
if actor_cfg[opt] != -1 and opt in actor_params:
actor_params[opt] = actor_cfg[opt]
elif opt not in actor_params:
print("Actor params does not have attribute: ", opt)
return actor_params
def _get_actor_config_value(self, actor_name, attribute_name, attribute=None):
actor_params = self.parse_actor_config(actor_name)
if attribute is not None:
if attribute_name not in actor_params:
return attribute.Get()
if actor_params[attribute_name] != -1:
return actor_params[attribute_name]
elif actor_params["override_usd_defaults"] and not attribute.IsAuthored():
return self._physx_params[attribute_name]
else:
if actor_params[attribute_name] != -1:
return actor_params[attribute_name]
def _adjust_dt(self):
# re-evaluate rendering dt to simulate physics substeps
physics_dt = self.sim_params["dt"]
rendering_dt = self.sim_params["rendering_dt"]
# by default, rendering dt = physics dt
if rendering_dt <= 0:
rendering_dt = physics_dt
self.task_config["renderingInterval"] = max(round((1/physics_dt) / (1/rendering_dt)), 1)
# we always set rendering dt to be the same as physics dt, stepping is taken care of in VecEnvRLGames
self.sim_params["rendering_dt"] = physics_dt
@property
def sim_params(self):
return self._sim_params
@property
def config(self):
return self._config
@property
def task_config(self):
return self._cfg
@property
def physx_params(self):
return self._physx_params
def get_physics_params(self):
return {**self.sim_params, **self.physx_params}
def _get_physx_collision_api(self, prim):
from pxr import PhysxSchema, UsdPhysics
physx_collision_api = PhysxSchema.PhysxCollisionAPI(prim)
if not physx_collision_api:
physx_collision_api = PhysxSchema.PhysxCollisionAPI.Apply(prim)
return physx_collision_api
def _get_physx_rigid_body_api(self, prim):
from pxr import PhysxSchema, UsdPhysics
physx_rb_api = PhysxSchema.PhysxRigidBodyAPI(prim)
if not physx_rb_api:
physx_rb_api = PhysxSchema.PhysxRigidBodyAPI.Apply(prim)
return physx_rb_api
def _get_physx_articulation_api(self, prim):
from pxr import PhysxSchema, UsdPhysics
arti_api = PhysxSchema.PhysxArticulationAPI(prim)
if not arti_api:
arti_api = PhysxSchema.PhysxArticulationAPI.Apply(prim)
return arti_api
def set_contact_offset(self, name, prim, value=None):
physx_collision_api = self._get_physx_collision_api(prim)
contact_offset = physx_collision_api.GetContactOffsetAttr()
# if not contact_offset:
# contact_offset = physx_collision_api.CreateContactOffsetAttr()
if value is None:
value = self._get_actor_config_value(name, "contact_offset", contact_offset)
if value != -1:
contact_offset.Set(value)
def set_rest_offset(self, name, prim, value=None):
physx_collision_api = self._get_physx_collision_api(prim)
rest_offset = physx_collision_api.GetRestOffsetAttr()
# if not rest_offset:
# rest_offset = physx_collision_api.CreateRestOffsetAttr()
if value is None:
value = self._get_actor_config_value(name, "rest_offset", rest_offset)
if value != -1:
rest_offset.Set(value)
def set_position_iteration(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
solver_position_iteration_count = physx_rb_api.GetSolverPositionIterationCountAttr()
if value is None:
value = self._get_actor_config_value(
name, "solver_position_iteration_count", solver_position_iteration_count
)
if value != -1:
solver_position_iteration_count.Set(value)
def set_velocity_iteration(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
solver_velocity_iteration_count = physx_rb_api.GetSolverVelocityIterationCountAttr()
if value is None:
value = self._get_actor_config_value(
name, "solver_velocity_iteration_count", solver_velocity_iteration_count
)
if value != -1:
solver_velocity_iteration_count.Set(value)
def set_max_depenetration_velocity(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
max_depenetration_velocity = physx_rb_api.GetMaxDepenetrationVelocityAttr()
if value is None:
value = self._get_actor_config_value(name, "max_depenetration_velocity", max_depenetration_velocity)
if value != -1:
max_depenetration_velocity.Set(value)
def set_sleep_threshold(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
sleep_threshold = physx_rb_api.GetSleepThresholdAttr()
if value is None:
value = self._get_actor_config_value(name, "sleep_threshold", sleep_threshold)
if value != -1:
sleep_threshold.Set(value)
def set_stabilization_threshold(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
stabilization_threshold = physx_rb_api.GetStabilizationThresholdAttr()
if value is None:
value = self._get_actor_config_value(name, "stabilization_threshold", stabilization_threshold)
if value != -1:
stabilization_threshold.Set(value)
def set_gyroscopic_forces(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
enable_gyroscopic_forces = physx_rb_api.GetEnableGyroscopicForcesAttr()
if value is None:
value = self._get_actor_config_value(name, "enable_gyroscopic_forces", enable_gyroscopic_forces)
if value != -1:
enable_gyroscopic_forces.Set(value)
def set_density(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
density = physx_rb_api.GetDensityAttr()
if value is None:
value = self._get_actor_config_value(name, "density", density)
if value != -1:
density.Set(value)
# auto-compute mass
self.set_mass(prim, 0.0)
def set_mass(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
mass = physx_rb_api.GetMassAttr()
if value is None:
value = self._get_actor_config_value(name, "mass", mass)
if value != -1:
mass.Set(value)
def retain_acceleration(self, prim):
# retain accelerations if running with more than one substep
physx_rb_api = self._get_physx_rigid_body_api(prim)
if self._sim_params["substeps"] > 1:
physx_rb_api.GetRetainAccelerationsAttr().Set(True)
def make_kinematic(self, name, prim, cfg, value=None):
# make rigid body kinematic (fixed base and no collision)
from pxr import PhysxSchema, UsdPhysics
stage = omni.usd.get_context().get_stage()
if value is None:
value = self._get_actor_config_value(name, "make_kinematic")
if value == True:
# parse through all children prims
prims = [prim]
while len(prims) > 0:
cur_prim = prims.pop(0)
rb = UsdPhysics.RigidBodyAPI.Get(stage, cur_prim.GetPath())
if rb:
rb.CreateKinematicEnabledAttr().Set(True)
children_prims = cur_prim.GetPrim().GetChildren()
prims = prims + children_prims
def set_articulation_position_iteration(self, name, prim, value=None):
arti_api = self._get_physx_articulation_api(prim)
solver_position_iteration_count = arti_api.GetSolverPositionIterationCountAttr()
if value is None:
value = self._get_actor_config_value(
name, "solver_position_iteration_count", solver_position_iteration_count
)
if value != -1:
solver_position_iteration_count.Set(value)
def set_articulation_velocity_iteration(self, name, prim, value=None):
arti_api = self._get_physx_articulation_api(prim)
solver_velocity_iteration_count = arti_api.GetSolverVelocityIterationCountAttr()
if value is None:
value = self._get_actor_config_value(
name, "solver_velocity_iteration_count", solver_velocity_iteration_count
)
if value != -1:
solver_velocity_iteration_count.Set(value)
def set_articulation_sleep_threshold(self, name, prim, value=None):
arti_api = self._get_physx_articulation_api(prim)
sleep_threshold = arti_api.GetSleepThresholdAttr()
if value is None:
value = self._get_actor_config_value(name, "sleep_threshold", sleep_threshold)
if value != -1:
sleep_threshold.Set(value)
def set_articulation_stabilization_threshold(self, name, prim, value=None):
arti_api = self._get_physx_articulation_api(prim)
stabilization_threshold = arti_api.GetStabilizationThresholdAttr()
if value is None:
value = self._get_actor_config_value(name, "stabilization_threshold", stabilization_threshold)
if value != -1:
stabilization_threshold.Set(value)
def apply_rigid_body_settings(self, name, prim, cfg, is_articulation):
from pxr import PhysxSchema, UsdPhysics
stage = omni.usd.get_context().get_stage()
rb_api = UsdPhysics.RigidBodyAPI.Get(stage, prim.GetPath())
physx_rb_api = PhysxSchema.PhysxRigidBodyAPI.Get(stage, prim.GetPath())
if not physx_rb_api:
physx_rb_api = PhysxSchema.PhysxRigidBodyAPI.Apply(prim)
# if it's a body in an articulation, it's handled at articulation root
if not is_articulation:
self.make_kinematic(name, prim, cfg, cfg["make_kinematic"])
self.set_position_iteration(name, prim, cfg["solver_position_iteration_count"])
self.set_velocity_iteration(name, prim, cfg["solver_velocity_iteration_count"])
self.set_max_depenetration_velocity(name, prim, cfg["max_depenetration_velocity"])
self.set_sleep_threshold(name, prim, cfg["sleep_threshold"])
self.set_stabilization_threshold(name, prim, cfg["stabilization_threshold"])
self.set_gyroscopic_forces(name, prim, cfg["enable_gyroscopic_forces"])
# density and mass
mass_api = UsdPhysics.MassAPI.Get(stage, prim.GetPath())
if mass_api is None:
mass_api = UsdPhysics.MassAPI.Apply(prim)
mass_attr = mass_api.GetMassAttr()
density_attr = mass_api.GetDensityAttr()
if not mass_attr:
mass_attr = mass_api.CreateMassAttr()
if not density_attr:
density_attr = mass_api.CreateDensityAttr()
if cfg["density"] != -1:
density_attr.Set(cfg["density"])
mass_attr.Set(0.0) # mass is to be computed
elif cfg["override_usd_defaults"] and not density_attr.IsAuthored() and not mass_attr.IsAuthored():
density_attr.Set(self._physx_params["density"])
self.retain_acceleration(prim)
def apply_rigid_shape_settings(self, name, prim, cfg):
from pxr import PhysxSchema, UsdPhysics
stage = omni.usd.get_context().get_stage()
# collision APIs
collision_api = UsdPhysics.CollisionAPI(prim)
if not collision_api:
collision_api = UsdPhysics.CollisionAPI.Apply(prim)
physx_collision_api = PhysxSchema.PhysxCollisionAPI(prim)
if not physx_collision_api:
physx_collision_api = PhysxSchema.PhysxCollisionAPI.Apply(prim)
self.set_contact_offset(name, prim, cfg["contact_offset"])
self.set_rest_offset(name, prim, cfg["rest_offset"])
def apply_articulation_settings(self, name, prim, cfg):
from pxr import PhysxSchema, UsdPhysics
stage = omni.usd.get_context().get_stage()
is_articulation = False
# check if is articulation
prims = [prim]
while len(prims) > 0:
prim_tmp = prims.pop(0)
articulation_api = UsdPhysics.ArticulationRootAPI.Get(stage, prim_tmp.GetPath())
physx_articulation_api = PhysxSchema.PhysxArticulationAPI.Get(stage, prim_tmp.GetPath())
if articulation_api or physx_articulation_api:
is_articulation = True
children_prims = prim_tmp.GetPrim().GetChildren()
prims = prims + children_prims
# parse through all children prims
prims = [prim]
while len(prims) > 0:
cur_prim = prims.pop(0)
rb = UsdPhysics.RigidBodyAPI.Get(stage, cur_prim.GetPath())
collision_body = UsdPhysics.CollisionAPI.Get(stage, cur_prim.GetPath())
articulation = UsdPhysics.ArticulationRootAPI.Get(stage, cur_prim.GetPath())
if rb:
self.apply_rigid_body_settings(name, cur_prim, cfg, is_articulation)
if collision_body:
self.apply_rigid_shape_settings(name, cur_prim, cfg)
if articulation:
articulation_api = UsdPhysics.ArticulationRootAPI.Get(stage, cur_prim.GetPath())
physx_articulation_api = PhysxSchema.PhysxArticulationAPI.Get(stage, cur_prim.GetPath())
# enable self collisions
enable_self_collisions = physx_articulation_api.GetEnabledSelfCollisionsAttr()
if cfg["enable_self_collisions"] != -1:
enable_self_collisions.Set(cfg["enable_self_collisions"])
self.set_articulation_position_iteration(name, cur_prim, cfg["solver_position_iteration_count"])
self.set_articulation_velocity_iteration(name, cur_prim, cfg["solver_velocity_iteration_count"])
self.set_articulation_sleep_threshold(name, cur_prim, cfg["sleep_threshold"])
self.set_articulation_stabilization_threshold(name, cur_prim, cfg["stabilization_threshold"])
children_prims = cur_prim.GetPrim().GetChildren()
prims = prims + children_prims
| 24,620 |
Python
| 42.42328 | 119 | 0.635297 |
gitLSW/robot-cloud/training/omniisaacgymenvs/isaac_gym_env_utils.py
|
import asyncio
import queue
import torch
from skrl import logger
def get_env_instance(headless: bool = True,
enable_livestream: bool = False,
enable_viewport: bool = False,
multi_threaded: bool = False,
experience = None) -> "omni.isaac.gym.vec_env.VecEnvBase":
"""
Instantiate a VecEnvBase-based object compatible with OmniIsaacGymEnvs
:param headless: Disable UI when running (default: ``True``)
:type headless: bool, optional
:param enable_livestream: Whether to enable live streaming (default: ``False``)
:type enable_livestream: bool, optional
:param enable_viewport: Whether to enable viewport (default: ``False``)
:type enable_viewport: bool, optional
:param multi_threaded: Whether to return a multi-threaded environment instance (default: ``False``)
:type multi_threaded: bool, optional
:return: Environment instance
:rtype: omni.isaac.gym.vec_env.VecEnvBase
Example::
from skrl.envs.wrappers.torch import wrap_env
from skrl.utils.omniverse_isaacgym_utils import get_env_instance
# get environment instance
env = get_env_instance(headless=True)
# parse sim configuration
from omniisaacgymenvs.utils.config_utils.sim_config import SimConfig
sim_config = SimConfig({"test": False,
"device_id": 0,
"headless": True,
"multi_gpu": False,
"sim_device": "gpu",
"enable_livestream": False,
"task": {"name": "CustomTask",
"physics_engine": "physx",
"env": {"numEnvs": 512,
"envSpacing": 1.5,
"enableDebugVis": False,
"clipObservations": 1000.0,
"clipActions": 1.0,
"controlFrequencyInv": 4},
"sim": {"dt": 0.0083, # 1 / 120
"use_gpu_pipeline": True,
"gravity": [0.0, 0.0, -9.81],
"add_ground_plane": True,
"use_flatcache": True,
"enable_scene_query_support": False,
"enable_cameras": False,
"default_physics_material": {"static_friction": 1.0,
"dynamic_friction": 1.0,
"restitution": 0.0},
"physx": {"worker_thread_count": 4,
"solver_type": 1,
"use_gpu": True,
"solver_position_iteration_count": 4,
"solver_velocity_iteration_count": 1,
"contact_offset": 0.005,
"rest_offset": 0.0,
"bounce_threshold_velocity": 0.2,
"friction_offset_threshold": 0.04,
"friction_correlation_distance": 0.025,
"enable_sleeping": True,
"enable_stabilization": True,
"max_depenetration_velocity": 1000.0,
"gpu_max_rigid_contact_count": 524288,
"gpu_max_rigid_patch_count": 33554432,
"gpu_found_lost_pairs_capacity": 524288,
"gpu_found_lost_aggregate_pairs_capacity": 262144,
"gpu_total_aggregate_pairs_capacity": 1048576,
"gpu_max_soft_body_contacts": 1048576,
"gpu_max_particle_contacts": 1048576,
"gpu_heap_capacity": 33554432,
"gpu_temp_buffer_capacity": 16777216,
"gpu_max_num_partitions": 8}}}})
# import and setup custom task
from custom_task import CustomTask
task = CustomTask(name="CustomTask", sim_config=sim_config, env=env)
env.set_task(task=task, sim_params=sim_config.get_physics_params(), backend="torch", init_sim=True)
# wrap the environment
env = wrap_env(env, "omniverse-isaacgym")
"""
from omni.isaac.gym.vec_env import TaskStopException, VecEnvBase, VecEnvMT
from omni.isaac.gym.vec_env.vec_env_mt import TrainerMT
class _OmniIsaacGymVecEnv(VecEnvBase):
def step(self, actions):
actions = torch.clamp(actions, -self._task.clip_actions, self._task.clip_actions).to(self._task.device).clone()
self._task.pre_physics_step(actions)
for _ in range(self._task.control_frequency_inv):
self._world.step(render=self._render)
self.sim_frame_count += 1
observations, rewards, dones, info = self._task.post_physics_step()
return {"obs": torch.clamp(observations, -self._task.clip_obs, self._task.clip_obs).to(self._task.rl_device).clone()}, \
rewards.to(self._task.rl_device).clone(), dones.to(self._task.rl_device).clone(), info.copy()
def reset(self):
self._task.reset()
actions = torch.zeros((self.num_envs, self._task.num_actions), device=self._task.device)
return self.step(actions)[0]
class _OmniIsaacGymTrainerMT(TrainerMT):
def run(self):
pass
def stop(self):
pass
class _OmniIsaacGymVecEnvMT(VecEnvMT):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.set_render_mode(-1 if kwargs['headless'] else 2)
self.action_queue = queue.Queue(1)
self.data_queue = queue.Queue(1)
def run(self, trainer=None):
asyncio.run(super().run(_OmniIsaacGymTrainerMT() if trainer is None else trainer))
def _parse_data(self, data):
self._observations = torch.clamp(data["obs"], -self._task.clip_obs, self._task.clip_obs).to(self._task.rl_device).clone()
self._rewards = data["rew"].to(self._task.rl_device).clone()
self._dones = data["reset"].to(self._task.rl_device).clone()
self._info = data["extras"].copy()
def step(self, actions):
if self._stop:
raise TaskStopException()
actions = torch.clamp(actions, -self._task.clip_actions, self._task.clip_actions).clone()
self.send_actions(actions) # Send actions
data = self.get_data() # this waits until data queue has content and then calls _parse_data
return {"obs": self._observations}, self._rewards, self._dones, self._info
def reset(self):
self._task.reset()
actions = torch.zeros((self.num_envs, self._task.num_actions), device=self._task.device)
return self.step(actions)[0]
def close(self):
# end stop signal to main thread
self.send_actions(None)
self.stop = True
if multi_threaded:
try:
return _OmniIsaacGymVecEnvMT(headless=headless, enable_livestream=enable_livestream, enable_viewport=enable_viewport, experience=experience)
except TypeError:
logger.warning("Using an older version of Isaac Sim (2022.2.0 or earlier)")
return _OmniIsaacGymVecEnvMT(headless=headless, experience=experience)
else:
try:
return _OmniIsaacGymVecEnv(headless=headless, enable_livestream=enable_livestream, enable_viewport=enable_viewport, experience=experience)
except TypeError:
logger.warning("Using an older version of Isaac Sim (2022.2.0 or earlier)")
return _OmniIsaacGymVecEnv(headless=headless, experience=experience) # Isaac Sim 2022.2.0 and earlier
| 9,114 |
Python
| 52.304093 | 152 | 0.475752 |
gitLSW/robot-cloud/remnants/SKRL_easy_pack_task.py
|
import os
import math
import random
import torch
from pxr import Gf, UsdLux, Sdf
from gymnasium import spaces
import omni.kit.commands
from omni.isaac.core.utils.extensions import enable_extension
# enable_extension("omni.importer.urdf")
enable_extension("omni.isaac.universal_robots")
enable_extension("omni.isaac.sensor")
# from omni.importer.urdf import _urdf
from omni.isaac.sensor import Camera
from omni.isaac.universal_robots.ur10 import UR10
from omni.isaac.universal_robots import KinematicsSolver
# from omni.isaac.universal_robots.controllers.pick_place_controller import PickPlaceController
import omni.isaac.core.utils.prims as prims_utils
from omni.isaac.core.prims import XFormPrim, XFormPrimView, RigidPrim, RigidPrimView
from omni.isaac.core.materials.physics_material import PhysicsMaterial
from omni.isaac.core.utils.prims import create_prim, get_prim_at_path
from omni.isaac.core.tasks.base_task import BaseTask
from omni.isaac.gym.tasks.rl_task import RLTaskInterface
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage
from omni.isaac.core.utils.viewports import set_camera_view
from omni.kit.viewport.utility import get_active_viewport
import omni.isaac.core.objects as objs
import omni.isaac.core.utils.numpy.rotations as rot_utils
from omni.isaac.core.utils.rotations import lookat_to_quatf, gf_quat_to_np_array
from omni.physx.scripts.utils import setRigidBody, setStaticCollider, setColliderSubtree, setCollider, addCollisionGroup, setPhysics, removePhysics, removeRigidBody
from scipy.spatial.transform import Rotation as R
from pyquaternion import Quaternion
from omniisaacgymenvs.rl_task import RLTask
from omni.isaac.core.robots.robot_view import RobotView
from omni.isaac.cloner import GridCloner
LEARNING_STARTS = 10
FALLEN_PART_THRESHOLD = 0.2
ROBOT_PATH = 'World/UR10e'
ROBOT_POS = torch.tensor([0.0, 0.0, FALLEN_PART_THRESHOLD])
LIGHT_PATH = 'World/Light'
LIGHT_OFFSET = torch.tensor([0, 0, 2])
DEST_BOX_PATH = "World/DestinationBox"
DEST_BOX_POS = torch.tensor([0, -0.65, FALLEN_PART_THRESHOLD])
PART_PATH = 'World/Part'
PART_OFFSET = torch.tensor([0, 0, 0.4])
# NUM_PARTS = 5
PART_PILLAR_PATH = "World/Pillar"
MAX_STEP_PUNISHMENT = 300
START_TABLE_POS = torch.tensor([0.36, 0.8, 0])
START_TABLE_HEIGHT = 0.6
START_TABLE_CENTER = START_TABLE_POS + torch.tensor([0, 0, START_TABLE_HEIGHT])
IDEAL_PACKAGING = [([-0.06, -0.19984, 0.0803], [0.072, 0.99, 0, 0]),
([-0.06, -0.14044, 0.0803], [0.072, 0.99, 0, 0]),
([-0.06, -0.07827, 0.0803], [0.072, 0.99, 0, 0]),
([-0.06, -0.01597, 0.0803], [0.072, 0.99, 0, 0]),
([-0.06, 0.04664, 0.0803], [0.072, 0.99, 0, 0]),
([-0.06, 0.10918, 0.0803], [0.072, 0.99, 0, 0])]
NUMBER_PARTS = len(IDEAL_PACKAGING)
local_assets = os.getcwd() + '/assets'
TASK_CFG = {
"test": False,
"device_id": 0,
"headless": False,
"multi_gpu": False,
"sim_device": "cpu",
"enable_livestream": False,
"task": {
"name": 'Pack_Task',
"physics_engine": "physx",
"env": {
"numEnvs": 100,
"envSpacing": 1.5,
"episodeLength": 100,
# "enableDebugVis": False,
# "controlFrequencyInv": 4
},
"sim": {
"dt": 1 / 60,
"use_gpu_pipeline": True,
"gravity": [0.0, 0.0, -9.81],
"add_ground_plane": True,
"use_flatcache": True,
"enable_scene_query_support": False,
"enable_cameras": False,
"default_physics_material": {
"static_friction": 1.0,
"dynamic_friction": 1.0,
"restitution": 0.0
},
"physx": {
"worker_thread_count": 4,
"solver_type": 1,
"use_gpu": True,
"solver_position_iteration_count": 4,
"solver_velocity_iteration_count": 1,
"contact_offset": 0.005,
"rest_offset": 0.0,
"bounce_threshold_velocity": 0.2,
"friction_offset_threshold": 0.04,
"friction_correlation_distance": 0.025,
"enable_sleeping": True,
"enable_stabilization": True,
"max_depenetration_velocity": 1000.0,
"gpu_max_rigid_contact_count": 524288,
"gpu_max_rigid_patch_count": 33554432,
"gpu_found_lost_pairs_capacity": 524288,
"gpu_found_lost_aggregate_pairs_capacity": 262144,
"gpu_total_aggregate_pairs_capacity": 1048576,
"gpu_max_soft_body_contacts": 1048576,
"gpu_max_particle_contacts": 1048576,
"gpu_heap_capacity": 33554432,
"gpu_temp_buffer_capacity": 16777216,
"gpu_max_num_partitions": 8
}
}
}
}
class PackTask(RLTask):
control_frequency_inv = 1
# kinematics_solver = None
"""
This class sets up a scene and calls a RL Policy, then evaluates the behaivior with rewards
Args:
offset (Optional[np.ndarray], optional): offset applied to all assets of the task.
sim_s_step_freq (int): The amount of simulation steps within a SIMULATED second.
"""
def __init__(self, name, sim_config, env, offset=None) -> None:
# self.observation_space = spaces.Dict({
# 'robot_state': spaces.Box(low=-2 * torch.pi, high=2 * torch.pi, shape=(6,)),
# 'gripper_closed': spaces.Discrete(2),
# # 'forces': spaces.Box(low=-1, high=1, shape=(8, 6)), # Forces on the Joints
# 'box_state': spaces.Box(low=-3, high=3, shape=(NUMBER_PARTS, 2)), # Pos and Rot Distance of each part currently placed in Box compared to currently gripped part
# 'part_pos_diff': spaces.Box(low=-3, high=3, shape=(3,)),
# 'part_rot_diff': spaces.Box(low=-1, high=1, shape=(3,))
# })
self._num_observations = 13 + 2 * NUMBER_PARTS
# End Effector Pose
# self.action_space = spaces.Box(low=-1, high=1, shape=(7,), dtype=float) # Delta Gripper Pose & gripper open / close
self._num_actions = 7
self.update_config(sim_config)
# trigger __init__ of parent class
super().__init__(name, env, offset)
def cleanup(self):
super().cleanup()
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self.dt = self._task_cfg["sim"]["dt"]
self._device = self._cfg["sim_device"]
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._max_episode_length = self._task_cfg["env"]["episodeLength"]
# Robot turning ange of max speed is 191deg/s
self._max_joint_rot_speed = (191.0 * math.pi / 180) * self.dt
super().update_config(sim_config)
def set_up_scene(self, scene) -> None:
print('SETUP TASK', self.name)
self.create_env0()
super().set_up_scene(scene) # Clones env0
self._boxes_view = XFormPrimView(prim_paths_expr=f'{self.default_base_env_path}/.*/box',
name='box_view',
reset_xform_properties=False)
scene.add(self._boxes_view)
# self._parts_views = []
# for i in range(NUMBER_PARTS):
i=0
self._parts_view = RigidPrimView(prim_paths_expr=f'{self.default_base_env_path}/.*/part_{i}',
name=f'part_{i}_view',
reset_xform_properties=False)
scene.add(self._parts_view)
# self._parts_views.append(parts_view)
self._robots_view = RobotView(prim_paths_expr=f'{self.default_base_env_path}/.*/robot', name='ur10_view')
scene.add(self._robots_view)
self._grippers = RigidPrimView(prim_paths_expr=f'{self.default_base_env_path}/.*/robot/ee_link', name="gripper_view")
scene.add(self._grippers)
self._curr_parts = [XFormPrim(prim_path=path) for path in self._parts_view.prim_paths]
self._robots = [UR10(prim_path=robot_path, attach_gripper=True) for robot_path in self._robots_view.prim_paths]
# # self.table = RigidPrim(rim_path=self._start_table_path, name='TABLE')
# self._task_objects[self._start_table_path] = self.table
def create_env0(self):
# This is the URL from which the Assets are downloaded
# Make sure you started and connected to your localhost Nucleus Server via Omniverse !!!
assets_root_path = get_assets_root_path()
env0_box_path = self.default_zero_env_path + '/box'
box_usd_path = assets_root_path + '/Isaac/Environments/Simple_Warehouse/Props/SM_CardBoxA_02.usd'
box_usd_path = local_assets + '/SM_CardBoxA_02.usd'
add_reference_to_stage(box_usd_path, env0_box_path)
box = XFormPrim(prim_path=env0_box_path,
position=DEST_BOX_POS,
scale=[1, 1, 0.4])
setStaticCollider(box.prim, approximationShape='convexDecomposition')
# for i in range(NUMBER_PARTS):
env0_part_path = f'{self.default_zero_env_path}/part_{i}'
part_usd_path = local_assets + '/draexlmaier_part.usd'
add_reference_to_stage(part_usd_path, env0_part_path)
part = RigidPrim(prim_path=env0_part_path,
position=START_TABLE_CENTER + torch.tensor([0, 0.06 * i - 0.2, 0.1]),
orientation=[0, 1, 0, 0]) # [-0.70711, 0.70711, 0, 0]
setRigidBody(part.prim, approximationShape='convexDecomposition', kinematic=False) # Kinematic True means immovable
# The UR10e has 6 joints, each with a maximum:
# turning angle of -360 deg to +360 deg
# turning ange of max speed is 191deg/s
env0_robot_path = self.default_zero_env_path + '/robot'
robot = UR10(prim_path=env0_robot_path, name='UR10', position=ROBOT_POS, attach_gripper=True)
robot.set_enabled_self_collisions(True)
# env0_table_path = f'{self.default_zero_env_path}/table'
# table_path = assets_root_path + "/Isaac/Environments/Simple_Room/Props/table_low.usd"
# # table_path = local_assets + '/table_low.usd'
# add_reference_to_stage(table_path, env0_table_path)
# table = XFormPrim(prim_path=env0_table_path, position=START_TABLE_POS, scale=[0.5, START_TABLE_HEIGHT, 0.4])
# setRigidBody(table.prim, approximationShape='convexHull', kinematic=True) # Kinematic True means immovable
def reset(self):
super().reset()
super().cleanup()
self._placed_parts = [[] for _ in range(self._num_envs)]
for env_index in range(self._num_envs):
robot = self._robots[env_index]
if not robot.handles_initialized:
robot.initialize()
self.reset_env(env_index)
pos = torch.tensor([1, 0, 0]).to(torch.float32).repeat(self._num_envs, 1)
rots = torch.tensor([0, 1, 0, 0]).to(torch.float32).repeat(self._num_envs, 1)
self._parts_view.set_world_poses(pos, rots)
def reset_env(self, env_index):
self.progress_buf[env_index] = 0
self.reset_buf[env_index] = False
self.reset_robot(env_index)
def reset_robot(self, env_index):
robot = self._robots[env_index]
default_pose = torch.tensor([math.pi / 2, -math.pi / 2, -math.pi / 2, -math.pi / 2, math.pi / 2, 0])
robot.set_joint_positions(positions=default_pose)
robot.gripper.open()
def reset_part(self, env_index):
gripper = self._robots[env_index].gripper
part_pos = torch.tensor([gripper.get_world_pose()[0] - torch.tensor([-0.00147, 0.0154, 0.193], device=self._device)])
# part = self._curr_parts[env_index]
# part.set_world_pose(part_pos, [-0.70711, 0.70711, 0, -0.06])
self._parts_view.set_world_poses(part_pos, torch.tensor([[-0.70711, 0.70711, 0, -0.06]]), [env_index])
# removePhysics(part.prim)
# setRigidBody(part.prim, approximationShape='convexDecomposition', kinematic=False)
# def next_part(self, env_index) -> None:
# gripper = self._robots[env_index].gripper
# gripper_pos = torch.tensor(gripper.get_world_pose()[0]) - torch.tensor([0, 0, 0.05], device=self._device)
# env0_part_path = self.default_zero_env_path + '/part_0'
# part_usd_path = local_assets + '/draexlmaier_part.usd'
# add_reference_to_stage(part_usd_path, env0_part_path)
# part = RigidPrim(prim_path=env0_part_path,
# position=gripper_pos,
# orientation=[0, 1, 0, 0],
# mass=0.5)
# self.world.scene.add(part)
# self._placed_parts[env_index].append(self._curr_parts[env_index])
# self._curr_parts[env_index] = part
# _placed_parts # [[part]] where each entry in the outer array is the placed parts for env at index
# Returns: A 2D Array where each entry is the poses of the parts in the box
def get_observations(self):
def _shortest_rot_dist(quat_1, quat_2):
part_quat = Quaternion(list(quat_1))
ideal_quat = Quaternion(list(quat_2))
return Quaternion.absolute_distance(part_quat, ideal_quat)
boxes_pos = self._boxes_view.get_world_poses()[0] # Returns: [Array of all pos, Array of all rots]
# obs_dicts = []
for env_index in range(self._num_envs):
# env_obs = { 'box_state': [] }
robot = self._robots[env_index]
gripper_closed = robot.gripper.is_closed()
self.obs_buf[env_index, 0] = gripper_closed
# env_obs['gripper_closed'] = gripper_closed
robot_state = robot.get_joint_positions()
self.obs_buf[env_index, 1:7] = robot_state
ideal_selection = IDEAL_PACKAGING.copy()
box_pos = boxes_pos[env_index]
curr_part = self._curr_parts[env_index]
eval_parts = self._placed_parts[env_index] + [curr_part]
# box_state = []
# ideal_pose_for_curr_part = None
for part_index in range(NUMBER_PARTS):
if len(eval_parts) <= part_index:
# The worst possible distance is 3m and 180deg
self.obs_buf[env_index, (13 + 2 * part_index)] = torch.scalar_tensor(3)
self.obs_buf[env_index, (14 + 2 * part_index)] = torch.pi
# env_obs['box_state'].append([3, torch.pi])
continue
part_pos, part_rot = eval_parts[part_index].get_world_pose()
part_pos -= box_pos
ideal_part = None
min_dist = 10000000
# Find closest ideal part
for pot_part in ideal_selection:
dist = torch.linalg.norm(torch.tensor(pot_part[0], device=self._device) - part_pos)
if dist < min_dist:
ideal_part = pot_part
min_dist = dist
rot_dist = _shortest_rot_dist(part_rot, ideal_part[1])
self.obs_buf[env_index, (13 + 2 * part_index)] = min_dist
self.obs_buf[env_index, (14 + 2 * part_index)] = rot_dist
# env_obs['box_state'].append([min_dist, rot_dist])
if part_index == len(eval_parts) - 1:
part_pos_diff = part_pos - torch.tensor(ideal_part[0], device=self._device)
part_rot_euler = R.from_quat(part_rot.cpu()).as_euler('xyz', degrees=False)
ideal_rot_euler = R.from_quat(ideal_part[1]).as_euler('xyz', degrees=False)
part_rot_diff = torch.tensor(ideal_rot_euler - part_rot_euler)
self.obs_buf[env_index, 7:10] = part_pos_diff
self.obs_buf[env_index, 10:13] = part_rot_diff
# env_obs['part_pos_diff'] = part_pos_diff
# env_obs['part_rot_diff'] = part_rot_diff
# obs_dicts.append(env_obs)
# The return is itrrelevant for Multi Threading:
# The VecEnvMT Loop calls RLTask.post_physics_step to get all the data from one step.
# RLTask.post_physics_step is simply returning self.obs_buf, self.rew_buf,...
# post_physics_step calls
# - get_observations()
# - get_states()
# - calculate_metrics()
# - is_done()
# - get_extras()
# return obs_dicts
return self.obs_buf[env_index]
def pre_physics_step(self, actions) -> None:
# reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
# if len(reset_env_ids) > 0:
# self.reset_idx(reset_env_ids)
for env_index in range(self._num_envs):
if self.reset_buf[env_index]:
self.reset_env(env_index)
continue
# Rotate Joints
robot = self._robots[env_index]
gripper = robot.gripper
env_step = self.progress_buf[env_index]
if env_step == 1:
# We cannot call this in the same step as reset robot since the world needs
# to update once to update the gripper position to the new joint rotations
self.reset_part(env_index)
gripper.close()
continue
joint_rots = robot.get_joint_positions()
joint_rots += torch.tensor(actions[env_index, 0:6]) * self._max_joint_rot_speed
robot.set_joint_positions(positions=joint_rots)
# Open or close Gripper
# is_closed = gripper.is_closed()
# gripper_action = actions[env_index, 6]
# if 0.9 < gripper_action and is_closed:
# gripper.open()
# elif gripper_action < -0.3 and not is_closed:
# gripper.close()
# Calculate Rewards
def calculate_metrics(self) -> None:
pass
# part_pos_diffs = self.obs_buf[:, 7:10]
# part_rot_diffs = self.obs_buf[:, 10:13]
# box_states = self.obs_buf[:, 13:(2 * NUMBER_PARTS)]
# self.rew_buf = part_pos_diffs.squared().sum(dim=1) + part_rot_diffs.squared().sum(dim=1) + box_states.squared().sum(dim=1)
# Terminate: Umgefallene Teile, Gefallene Teile
# Success:
# part_pos, part_rot = self.part.get_world_pose()
# any_flipped = False
# for part in self.placed_parts:
# part_rot = part.get_world_pose()[1]
# if _is_flipped(part_rot):
# any_flipped = True
# break
# if part_pos[2] < FALLEN_PART_THRESHOLD or self.max_steps < self.step or any_flipped:
# return -MAX_STEP_PUNISHMENT, True
# box_state, _ = self.compute_box_state()
# box_deviation = torch.sum(torch.square(box_state))
# # placed_parts.append(self.part)
# return -box_deviation, False
# gripper_pos = self.robot.gripper.get_world_pose()[0]
# self.step += 1
# if self.step < LEARNING_STARTS:
# return 0, False
# done = False
# reward= 0
# part_pos, part_rot = self.part.get_world_pose()
# dest_box_pos = self.part.get_world_pose()[0]
# part_to_dest = np.linalg.norm(dest_box_pos - part_pos) * 100 # In cm
# print('PART TO BOX:', part_to_dest)
# if 10 < part_to_dest:
# reward -= part_to_dest
# else: # Part reached box
# # reward += (100 + self.max_steps - self.step) * MAX_STEP_PUNISHMENT
# ideal_part = _get_closest_part(part_pos)
# pos_error = np.linalg.norm(part_pos - ideal_part[0]) * 100
# rot_error = ((part_rot - ideal_part[1])**2).mean()
# print('PART REACHED BOX:', part_to_dest)
# # print('THIS MUST BE TRUE ABOUT THE PUNISHMENT:', pos_error + rot_error, '<', MAX_STEP_PUNISHMENT) # CHeck the average punishment of stage 0 to see how much it tapers off
# reward -= pos_error + rot_error
# # if not done and (part_pos[2] < 0.1 or self.max_steps <= self.step): # Part was dropped or time ran out means end
# # reward -= (100 + self.max_steps - self.step) * MAX_STEP_PUNISHMENT
# # done = True
# if done:
# print('END REWARD TASK', self.name, ':', reward)
# return reward, done
def is_done(self):
self.reset_buf = self.progress_buf >= 50
def _is_flipped(q1):
"""
Bestimmt, ob die Rotation von q0 zu q1 ein "Umfallen" darstellt,
basierend auf einem Winkel größer als 60 Grad zwischen der ursprünglichen
z-Achse und ihrer Rotation.
:param q0: Ursprüngliches Quaternion.
:param q1: Neues Quaternion.
:return: True, wenn der Winkel größer als 60 Grad ist, sonst False.
"""
q0 = torch.tensor([0, 1, 0, 0])
# Initialer Vektor, parallel zur z-Achse
v0 = torch.tensor([0, 0, 1])
# Konvertiere Quaternions in Rotation-Objekte
rotation0 = R.from_quat(q0)
rotation1 = R.from_quat(q1)
# Berechne die relative Rotation von q0 zu q1
q_rel = rotation1 * rotation0.inv()
# Berechne den rotierten Vektor v1
v1 = q_rel.apply(v0)
# Berechne den Winkel zwischen v0 und v1
cos_theta = np.dot(v0, v1) / (np.linalg.norm(v0) * np.linalg.norm(v1))
angle = np.arccos(np.clip(cos_theta, -1.0, 1.0)) * 180 / np.pi
# Prüfe, ob der Winkel größer als 60 Grad ist
return angle > 60
| 21,994 |
Python
| 41.298077 | 185 | 0.581204 |
gitLSW/robot-cloud/remnants/train_dreamer.py
|
import os
import dreamerv3
from dreamerv3 import embodied
from embodied.envs import from_gym
from gym_env import DreamerEnv
MODEL_NAME = "Dreamer"
MAX_STEPS_PER_EPISODE = 300
SIM_STEP_FREQ_HZ = 60
# See configs.yaml for all options.
config = embodied.Config(dreamerv3.configs['defaults'])
config = config.update(dreamerv3.configs['small'])
# config = config.update(dreamerv3.configs['medium'])
# config = config.update(dreamerv3.configs['large'])
#config = config.update(dreamerv3.configs['xlarge'])
config = config.update({
'logdir': './progress/' + MODEL_NAME,
'run.train_ratio': 64,
'run.log_every': 30, # Seconds
'batch_size': 8,
'batch_length': 16,
'jax.prealloc': True,
'jax.debug': True,
'encoder.mlp_keys': 'vector',
'decoder.mlp_keys': 'vector',
'encoder.cnn_keys': 'image',
'decoder.cnn_keys': 'image',
'run.eval_every' : 10000,
#'jax.platform': 'cpu',
})
config = embodied.Flags(config).parse()
logdir = embodied.Path(config.logdir)
step = embodied.Counter()
logger = embodied.Logger(step, [
embodied.logger.TerminalOutput(),
# embodied.logger.TerminalOutput(config.filter),
embodied.logger.JSONLOutput(logdir, 'metrics.jsonl'),
embodied.logger.TensorBoardOutput(logdir),
embodied.logger.WandBOutput(r".*", 'qwertyasd', 'robot-cloud', MODEL_NAME, config),
# embodied.logger.MLFlowOutput(logdir.name),
])
# Create Isaac environment and open Sim Window
# env = GymEnv(headless=False, experience=f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit')
# https://docs.omniverse.nvidia.com/isaacsim/latest/installation/manual_livestream_clients.html
env = DreamerEnv(headless=False, experience=f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit', enable_livestream=False)
from pack_task import PackTask # Cannot be imported before Sim has started
task = PackTask(name="Pack", max_steps=MAX_STEPS_PER_EPISODE, sim_s_step_freq=SIM_STEP_FREQ_HZ)
env.set_task(task, backend="numpy", rendering_dt=1 / SIM_STEP_FREQ_HZ)
# env.reset()
env = from_gym.FromGym(env, obs_key='image')
env = dreamerv3.wrap_env(env, config)
env = embodied.BatchEnv([env], parallel=False)
print('Starting Training...')
# env.act_space.discrete = True
# act_space = { 'action': env.act_space }
agent = dreamerv3.Agent(env.obs_space, env.act_space, step, config)
replay = embodied.replay.Uniform(config.batch_length, config.replay_size, logdir / 'replay')
args = embodied.Config(**config.run, logdir=config.logdir, batch_steps=config.batch_size * config.batch_length)
print(args)
embodied.run.train(agent, env, replay, logger, args)
print('Finished Traing')
# env.close()
| 2,632 |
Python
| 36.084507 | 123 | 0.718465 |
gitLSW/robot-cloud/remnants/ur16e.py
|
from typing import Optional
import os
from omni.isaac.core.robots.robot import Robot
from omni.isaac.manipulators.grippers.surface_gripper import SurfaceGripper
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.prims.rigid_prim import RigidPrim
from omni.isaac.core.robots.robot import Robot
from omni.isaac.core.utils.extensions import enable_extension, get_extension_path_from_name
enable_extension('omni.isaac.motion_gneration')
from omni.isaac.motion_generation import LulaKinematicsSolver, ArticulationKinematicsSolver
from omni.isaac.motion_generation import interface_config_loader
class UR16e(Robot):
"""[summary]
Args:
prim_path (str): [description]
name (str, optional): [description]. Defaults to "ur10_robot".
usd_path (Optional[str], optional): [description]. Defaults to None.
position (Optional[np.ndarray], optional): [description]. Defaults to None.
orientation (Optional[np.ndarray], optional): [description]. Defaults to None.
end_effector_prim_name (Optional[str], optional): [description]. Defaults to None.
attach_gripper (bool, optional): [description]. Defaults to False.
gripper_usd (Optional[str], optional): [description]. Defaults to "default".
Raises:
NotImplementedError: [description]
"""
def __init__(
self,
prim_path: str,
name: str = "ur10_robot",
usd_path: Optional[str] = '/assets/ur16e.usd',
position: Optional[np.ndarray] = None,
orientation: Optional[np.ndarray] = None,
gripper_usd: Optional[str] = "/assets/long_gripper.usd",
) -> None:
prim = get_prim_at_path(prim_path)
self._end_effector = None
self._gripper = None
# assets_root_path = get_assets_root_path()
# usd_path = assets_root_path() + "/Isaac/Robots/UniversalRobots/ur16e/ur16e.usd"
add_reference_to_stage(usd_path=usd_path, prim_path=prim_path)
self._end_effector_prim_path = prim_path + "/Gripper"
super().__init__(prim_path=prim_path, name=name, position=position, orientation=orientation, articulation_controller=None)
# gripper_usd = assets_root_path + "/Isaac/Robots/UR10/Props/long_gripper.usd"
add_reference_to_stage(usd_path=gripper_usd, prim_path=self._end_effector_prim_path)
self._gripper = SurfaceGripper(
end_effector_prim_path=self._end_effector_prim_path, translate=0.1611, direction="x"
)
mg_extension_path = get_extension_path_from_name("omni.isaac.motion_generation")
kinematics_config_dir = os.path.join(mg_extension_path, "motion_policy_configs")
self._kinematics_solver = LulaKinematicsSolver(
robot_description_path = kinematics_config_dir + "/universal_robots/ur16e/rmpflow/ur16e_robot_description.yaml",
urdf_path = kinematics_config_dir + "/universal_robots/ur16e/ur16e.urdf"
)
# Kinematics for supported robots can be loaded with a simpler equivalent
print("Supported Robots with a Lula Kinematics Config:", interface_config_loader.get_supported_robots_with_lula_kinematics())
# kinematics_config = interface_config_loader.load_supported_lula_kinematics_solver_config("Franka")
# self._kinematics_solver = LulaKinematicsSolver(**kinematics_config)
print("Valid frame names at which to compute kinematics:", self._kinematics_solver.get_all_frame_names())
end_effector_name = "gripper"
self._articulation_kinematics_solver = ArticulationKinematicsSolver(self._articulation,self._kinematics_solver, end_effector_name)
@property
def gripper(self) -> SurfaceGripper:
return self._gripper
def initialize(self, physics_sim_view=None) -> None:
super().initialize(physics_sim_view)
self._gripper.initialize(physics_sim_view=physics_sim_view, articulation_num_dofs=self.num_dof)
self.disable_gravity()
# self._end_effector = RigidPrim(prim_path=self._end_effector_prim_path, name=self.name + "_end_effector")
# self._end_effector.initialize(physics_sim_view)
return
def post_reset(self) -> None:
Robot.post_reset(self)
| 4,379 |
Python
| 46.096774 | 138 | 0.692396 |
gitLSW/robot-cloud/remnants/gym_env.py
|
from omni.isaac.gym.vec_env import VecEnvBase
class GymEnv(VecEnvBase):
"""This class provides a base interface for connecting RL policies with task implementations.
APIs provided in this interface follow the interface in gym.Env.
This class also provides utilities for initializing simulation apps, creating the World,
and registering a task.
"""
def __init__(
self,
headless: bool,
sim_device: int = 0,
enable_livestream: bool = False,
enable_viewport: bool = False,
launch_simulation_app: bool = True,
experience: str = None,
) -> None:
"""Initializes RL and task parameters.
Args:
headless (bool): Whether to run training headless.
sim_device (int): GPU device ID for running physics simulation. Defaults to 0.
enable_livestream (bool): Whether to enable running with livestream.
enable_viewport (bool): Whether to enable rendering in headless mode.
launch_simulation_app (bool): Whether to launch the simulation app (required if launching from python). Defaults to True.
experience (str): Path to the desired kit app file. Defaults to None, which will automatically choose the most suitable app file.
"""
super().__init__(headless, sim_device, enable_livestream, enable_viewport, launch_simulation_app, experience)
def is_done(self) -> bool:
"""Returns True of the task is done.
Raises:
NotImplementedError: [description]
"""
raise False
def step(self, actions):
"""Basic implementation for stepping simulation.
Can be overriden by inherited Env classes
to satisfy requirements of specific RL libraries. This method passes actions to task
for processing, steps simulation, and computes observations, rewards, and resets.
Args:
actions (Union[numpy.ndarray, torch.Tensor]): Actions buffer from policy.
Returns:
observations(Union[numpy.ndarray, torch.Tensor]): Buffer of observation data.
rewards(Union[numpy.ndarray, torch.Tensor]): Buffer of rewards data.
dones(Union[numpy.ndarray, torch.Tensor]): Buffer of resets/dones data.
info(dict): Dictionary of extras data.
"""
if not self._world.is_playing():
self.close()
self._task.pre_physics_step(actions)
self._world.step(render=self._render)
self.sim_frame_count += 1
# if not self._world.is_playing():
# self.close()
observations = self._task.get_observations()
rewards, done = self._task.calculate_metrics()
truncated = done * 0
info = {}
return observations, rewards, done, truncated, info
class DreamerEnv(GymEnv):
def __init__(
self,
headless: bool,
sim_device: int = 0,
enable_livestream: bool = False,
enable_viewport: bool = False,
launch_simulation_app: bool = True,
experience: str = None,
) -> None:
super().__init__(headless, sim_device, enable_livestream, enable_viewport, launch_simulation_app, experience)
def step(self, actions):
observations, rewards, done, truncated, info = super().step(actions)
return observations, rewards, done, info
def reset(self, seed=None, options=None):
observations, info = super().reset(seed, options)
return observations
| 3,536 |
Python
| 36.231579 | 141 | 0.632353 |
gitLSW/robot-cloud/remnants/run_sim.py
|
import os
from omni.isaac.kit import SimulationApp
sim = SimulationApp({"headless": False}, experience=f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit')
from omni.isaac.core.world import World
world = World(stage_units_in_meters=1.0, backend='numpy')
world.scene.add_default_ground_plane()
from pack_task import PackTask # Cannot be imported before Sim has started
sim_s_step_freq = 60
task = PackTask(name="Pack", max_steps=100000, sim_s_step_freq=sim_s_step_freq)
world.add_task(task)
world.reset()
while True:
world.step(render=True)
| 550 |
Python
| 29.611109 | 106 | 0.750909 |
gitLSW/robot-cloud/remnants/skrl-tutorial/rl_task.py
|
# Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import asyncio
from abc import abstractmethod
import numpy as np
import omni.isaac.core.utils.warp.tensor as wp_utils
import omni.kit
import omni.usd
import torch
import warp as wp
from gym import spaces
from omni.isaac.cloner import GridCloner
from omni.isaac.core.tasks import BaseTask
from omni.isaac.core.utils.prims import define_prim
from omni.isaac.core.utils.stage import get_current_stage
from omni.isaac.core.utils.types import ArticulationAction
from omni.isaac.gym.tasks.rl_task import RLTaskInterface
from omniisaacgymenvs.utils.domain_randomization.randomize import Randomizer
from pxr import Gf, UsdGeom, UsdLux
class RLTask(RLTaskInterface):
"""This class provides a PyTorch RL-specific interface for setting up RL tasks.
It includes utilities for setting up RL task related parameters,
cloning environments, and data collection for RL algorithms.
"""
def __init__(self, name, env, offset=None) -> None:
"""Initializes RL parameters, cloner object, and buffers.
Args:
name (str): name of the task.
env (VecEnvBase): an instance of the environment wrapper class to register task.
offset (Optional[np.ndarray], optional): offset applied to all assets of the task. Defaults to None.
"""
BaseTask.__init__(self, name=name, offset=offset)
self._rand_seed = self._cfg["seed"]
# optimization flags for pytorch JIT
torch._C._jit_set_nvfuser_enabled(False)
self.test = self._cfg["test"]
self._device = self._cfg["sim_device"] # 'gpu' or 'cpu'
# set up randomizer for DR
self._dr_randomizer = Randomizer(self._cfg, self._task_cfg)
if self._dr_randomizer.randomize:
import omni.replicator.isaac as dr
self.dr = dr
# set up replicator for camera data collection
self.enable_cameras = self._task_cfg["sim"].get("enable_cameras", False)
if self.enable_cameras:
from omni.replicator.isaac.scripts.writers.pytorch_writer import PytorchWriter
from omni.replicator.isaac.scripts.writers.pytorch_listener import PytorchListener
import omni.replicator.core as rep
self.rep = rep
self.PytorchWriter = PytorchWriter
self.PytorchListener = PytorchListener
print("Task Device:", self._device)
self.randomize_actions = False
self.randomize_observations = False
self.clip_obs = self._task_cfg["env"].get("clipObservations", np.Inf)
self.clip_actions = self._task_cfg["env"].get("clipActions", np.Inf)
self.rl_device = self._cfg.get("rl_device", "cuda:0")
self.control_frequency_inv = self._task_cfg["env"].get("controlFrequencyInv", 1)
self.rendering_interval = self._task_cfg.get("renderingInterval", 1)
# parse default viewport camera position and lookat target and resolution (width, height)
self.camera_position = [10, 10, 3]
self.camera_target = [0, 0, 0]
self.viewport_camera_width = 1280
self.viewport_camera_height = 720
if "viewport" in self._task_cfg:
self.camera_position = self._task_cfg["viewport"].get("camera_position", self.camera_position)
self.camera_target = self._task_cfg["viewport"].get("camera_target", self.camera_target)
self.viewport_camera_width = self._task_cfg["viewport"].get("viewport_camera_width", self.viewport_camera_width)
self.viewport_camera_height = self._task_cfg["viewport"].get("viewport_camera_height", self.viewport_camera_height)
print("RL device: ", self.rl_device)
self._env = env
self.is_extension = False
if not hasattr(self, "_num_agents"):
self._num_agents = 1 # used for multi-agent environments
if not hasattr(self, "_num_states"):
self._num_states = 0
# initialize data spaces (defaults to gym.Box)
if not hasattr(self, "action_space"):
self.action_space = spaces.Box(
np.ones(self.num_actions, dtype=np.float32) * -1.0, np.ones(self.num_actions, dtype=np.float32) * 1.0
)
if not hasattr(self, "observation_space"):
self.observation_space = spaces.Box(
np.ones(self.num_observations, dtype=np.float32) * -np.Inf,
np.ones(self.num_observations, dtype=np.float32) * np.Inf,
)
if not hasattr(self, "state_space"):
self.state_space = spaces.Box(
np.ones(self.num_states, dtype=np.float32) * -np.Inf,
np.ones(self.num_states, dtype=np.float32) * np.Inf,
)
self.cleanup()
def cleanup(self) -> None:
"""Prepares torch buffers for RL data collection."""
# prepare tensors
self.obs_buf = torch.zeros((self._num_envs, self.num_observations), device=self._device, dtype=torch.float)
self.states_buf = torch.zeros((self._num_envs, self.num_states), device=self._device, dtype=torch.float)
self.rew_buf = torch.zeros(self._num_envs, device=self._device, dtype=torch.float)
self.reset_buf = torch.ones(self._num_envs, device=self._device, dtype=torch.long)
self.progress_buf = torch.zeros(self._num_envs, device=self._device, dtype=torch.long)
self.extras = {}
def set_up_scene(
self, scene, replicate_physics=True, collision_filter_global_paths=[], filter_collisions=True, copy_from_source=False
) -> None:
"""Clones environments based on value provided in task config and applies collision filters to mask
collisions across environments.
Args:
scene (Scene): Scene to add objects to.
replicate_physics (bool): Clone physics using PhysX API for better performance.
collision_filter_global_paths (list): Prim paths of global objects that should not have collision masked.
filter_collisions (bool): Mask off collision between environments.
copy_from_source (bool): Copy from source prim when cloning instead of inheriting.
"""
super().set_up_scene(scene)
self._cloner = GridCloner(spacing=self._env_spacing)
self._cloner.define_base_env(self.default_base_env_path)
stage = omni.usd.get_context().get_stage()
UsdGeom.Xform.Define(stage, self.default_zero_env_path)
if self._task_cfg["sim"].get("add_ground_plane", True):
self._ground_plane_path = "/World/defaultGroundPlane"
collision_filter_global_paths.append(self._ground_plane_path)
scene.add_default_ground_plane(prim_path=self._ground_plane_path)
prim_paths = self._cloner.generate_paths("/World/envs/env", self._num_envs)
self._env_pos = self._cloner.clone(
source_prim_path="/World/envs/env_0", prim_paths=prim_paths, replicate_physics=replicate_physics, copy_from_source=copy_from_source
)
self._env_pos = torch.tensor(np.array(self._env_pos), device=self._device, dtype=torch.float)
if filter_collisions:
self._cloner.filter_collisions(
self._env.world.get_physics_context().prim_path,
"/World/collisions",
prim_paths,
collision_filter_global_paths,
)
if self._env.render_enabled:
self.set_initial_camera_params(camera_position=self.camera_position, camera_target=self.camera_target)
if self._task_cfg["sim"].get("add_distant_light", True):
self._create_distant_light()
# initialize capturer for viewport recording
# this has to be called after initializing replicator for DR
if self._cfg.get("enable_recording", False) and not self._dr_randomizer.randomize:
self._env.create_viewport_render_product(resolution=(self.viewport_camera_width, self.viewport_camera_height))
def set_initial_camera_params(self, camera_position, camera_target):
from omni.kit.viewport.utility import get_viewport_from_window_name
from omni.kit.viewport.utility.camera_state import ViewportCameraState
viewport_api_2 = get_viewport_from_window_name("Viewport")
viewport_api_2.set_active_camera("/OmniverseKit_Persp")
camera_state = ViewportCameraState("/OmniverseKit_Persp", viewport_api_2)
camera_state.set_position_world(Gf.Vec3d(camera_position[0], camera_position[1], camera_position[2]), True)
camera_state.set_target_world(Gf.Vec3d(camera_target[0], camera_target[1], camera_target[2]), True)
def _create_distant_light(self, prim_path="/World/defaultDistantLight", intensity=5000):
stage = get_current_stage()
light = UsdLux.DistantLight.Define(stage, prim_path)
light.CreateIntensityAttr().Set(intensity)
def initialize_views(self, scene):
"""Optionally implemented by individual task classes to initialize views used in the task.
This API is required for the extension workflow, where tasks are expected to train on a pre-defined stage.
Args:
scene (Scene): Scene to remove existing views and initialize/add new views.
"""
self._cloner = GridCloner(spacing=self._env_spacing)
pos, _ = self._cloner.get_clone_transforms(self._num_envs)
self._env_pos = torch.tensor(np.array(pos), device=self._device, dtype=torch.float)
if self._env.render_enabled:
# initialize capturer for viewport recording
if self._cfg.get("enable_recording", False) and not self._dr_randomizer.randomize:
self._env.create_viewport_render_product(resolution=(self.viewport_camera_width, self.viewport_camera_height))
@property
def default_base_env_path(self):
"""Retrieves default path to the parent of all env prims.
Returns:
default_base_env_path(str): Defaults to "/World/envs".
"""
return "/World/envs"
@property
def default_zero_env_path(self):
"""Retrieves default path to the first env prim (index 0).
Returns:
default_zero_env_path(str): Defaults to "/World/envs/env_0".
"""
return f"{self.default_base_env_path}/env_0"
def reset(self):
"""Flags all environments for reset."""
self.reset_buf = torch.ones_like(self.reset_buf)
def post_physics_step(self):
"""Processes RL required computations for observations, states, rewards, resets, and extras.
Also maintains progress buffer for tracking step count per environment.
Returns:
obs_buf(torch.Tensor): Tensor of observation data.
rew_buf(torch.Tensor): Tensor of rewards data.
reset_buf(torch.Tensor): Tensor of resets/dones data.
extras(dict): Dictionary of extras data.
"""
self.progress_buf[:] += 1
if self._env.world.is_playing():
self.get_observations()
self.get_states()
self.calculate_metrics()
self.is_done()
self.get_extras()
return self.obs_buf, self.rew_buf, self.reset_buf, self.extras
@property
def world(self):
"""Retrieves the World object for simulation.
Returns:
world(World): Simulation World.
"""
return self._env.world
@property
def cfg(self):
"""Retrieves the main config.
Returns:
cfg(dict): Main config dictionary.
"""
return self._cfg
def set_is_extension(self, is_extension):
self.is_extension = is_extension
class RLTaskWarp(RLTask):
def cleanup(self) -> None:
"""Prepares torch buffers for RL data collection."""
# prepare tensors
self.obs_buf = wp.zeros((self._num_envs, self.num_observations), device=self._device, dtype=wp.float32)
self.states_buf = wp.zeros((self._num_envs, self.num_states), device=self._device, dtype=wp.float32)
self.rew_buf = wp.zeros(self._num_envs, device=self._device, dtype=wp.float32)
self.reset_buf = wp_utils.ones(self._num_envs, device=self._device, dtype=wp.int32)
self.progress_buf = wp.zeros(self._num_envs, device=self._device, dtype=wp.int32)
self.zero_states_buf_torch = torch.zeros(
(self._num_envs, self.num_states), device=self._device, dtype=torch.float32
)
self.extras = {}
def reset(self):
"""Flags all environments for reset."""
wp.launch(reset_progress, dim=self._num_envs, inputs=[self.progress_buf], device=self._device)
def post_physics_step(self):
"""Processes RL required computations for observations, states, rewards, resets, and extras.
Also maintains progress buffer for tracking step count per environment.
Returns:
obs_buf(torch.Tensor): Tensor of observation data.
rew_buf(torch.Tensor): Tensor of rewards data.
reset_buf(torch.Tensor): Tensor of resets/dones data.
extras(dict): Dictionary of extras data.
"""
wp.launch(increment_progress, dim=self._num_envs, inputs=[self.progress_buf], device=self._device)
if self._env.world.is_playing():
self.get_observations()
self.get_states()
self.calculate_metrics()
self.is_done()
self.get_extras()
obs_buf_torch = wp.to_torch(self.obs_buf)
rew_buf_torch = wp.to_torch(self.rew_buf)
reset_buf_torch = wp.to_torch(self.reset_buf)
return obs_buf_torch, rew_buf_torch, reset_buf_torch, self.extras
def get_states(self):
"""API for retrieving states buffer, used for asymmetric AC training.
Returns:
states_buf(torch.Tensor): States buffer.
"""
if self.num_states > 0:
return wp.to_torch(self.states_buf)
else:
return self.zero_states_buf_torch
def set_up_scene(self, scene) -> None:
"""Clones environments based on value provided in task config and applies collision filters to mask
collisions across environments.
Args:
scene (Scene): Scene to add objects to.
"""
super().set_up_scene(scene)
self._env_pos = wp.from_torch(self._env_pos)
@wp.kernel
def increment_progress(progress_buf: wp.array(dtype=wp.int32)):
i = wp.tid()
progress_buf[i] = progress_buf[i] + 1
@wp.kernel
def reset_progress(progress_buf: wp.array(dtype=wp.int32)):
i = wp.tid()
progress_buf[i] = 1
| 16,200 |
Python
| 42.318182 | 143 | 0.653704 |
gitLSW/robot-cloud/remnants/skrl-tutorial/vec_env_rlgames_mt.py
|
# Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import torch
from omni.isaac.gym.vec_env import TaskStopException, VecEnvMT
from .vec_env_rlgames import VecEnvRLGames
# VecEnv Wrapper for RL training
class VecEnvRLGamesMT(VecEnvRLGames, VecEnvMT):
def _parse_data(self, data):
self._obs = data["obs"]
self._rew = data["rew"].to(self._task.rl_device)
self._states = torch.clamp(data["states"], -self._task.clip_obs, self._task.clip_obs).to(self._task.rl_device)
self._resets = data["reset"].to(self._task.rl_device)
self._extras = data["extras"]
def step(self, actions):
if self._stop:
raise TaskStopException()
if self._task.randomize_actions:
actions = self._task._dr_randomizer.apply_actions_randomization(
actions=actions, reset_buf=self._task.reset_buf
)
actions = torch.clamp(actions, -self._task.clip_actions, self._task.clip_actions).to(self._task.device)
self.send_actions(actions)
data = self.get_data()
if self._task.randomize_observations:
self._obs = self._task._dr_randomizer.apply_observations_randomization(
observations=self._obs.to(self._task.rl_device), reset_buf=self._task.reset_buf
)
self._obs = torch.clamp(self._obs, -self._task.clip_obs, self._task.clip_obs).to(self._task.rl_device)
obs_dict = {}
obs_dict["obs"] = self._obs
obs_dict["states"] = self._states
return obs_dict, self._rew, self._resets, self._extras
| 3,109 |
Python
| 42.194444 | 118 | 0.705693 |
gitLSW/robot-cloud/remnants/skrl-tutorial/vec_env_rlgames.py
|
# Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from datetime import datetime
import numpy as np
import torch
from omni.isaac.gym.vec_env import VecEnvBase
# VecEnv Wrapper for RL training
class VecEnvRLGames(VecEnvBase):
def _process_data(self):
self._obs = torch.clamp(self._obs, -self._task.clip_obs, self._task.clip_obs).to(self._task.rl_device)
self._rew = self._rew.to(self._task.rl_device)
self._states = torch.clamp(self._states, -self._task.clip_obs, self._task.clip_obs).to(self._task.rl_device)
self._resets = self._resets.to(self._task.rl_device)
self._extras = self._extras
def set_task(self, task, backend="numpy", sim_params=None, init_sim=True, rendering_dt=1.0 / 60.0) -> None:
super().set_task(task, backend, sim_params, init_sim, rendering_dt)
self.num_states = self._task.num_states
self.state_space = self._task.state_space
def step(self, actions):
# only enable rendering when we are recording, or if the task already has it enabled
to_render = self._render
if self._record:
if not hasattr(self, "step_count"):
self.step_count = 0
if self.step_count % self._task.cfg["recording_interval"] == 0:
self.is_recording = True
self.record_length = 0
if self.is_recording:
self.record_length += 1
if self.record_length > self._task.cfg["recording_length"]:
self.is_recording = False
if self.is_recording:
to_render = True
else:
if (self._task.cfg["headless"] and not self._task.enable_cameras and not self._task.cfg["enable_livestream"]):
to_render = False
self.step_count += 1
if self._task.randomize_actions:
actions = self._task._dr_randomizer.apply_actions_randomization(
actions=actions, reset_buf=self._task.reset_buf
)
actions = torch.clamp(actions, -self._task.clip_actions, self._task.clip_actions).to(self._task.device)
self._task.pre_physics_step(actions)
if (self.sim_frame_count + self._task.control_frequency_inv) % self._task.rendering_interval == 0:
for _ in range(self._task.control_frequency_inv - 1):
self._world.step(render=False)
self.sim_frame_count += 1
self._world.step(render=to_render)
self.sim_frame_count += 1
else:
for _ in range(self._task.control_frequency_inv):
self._world.step(render=False)
self.sim_frame_count += 1
self._obs, self._rew, self._resets, self._extras = self._task.post_physics_step()
if self._task.randomize_observations:
self._obs = self._task._dr_randomizer.apply_observations_randomization(
observations=self._obs.to(device=self._task.rl_device), reset_buf=self._task.reset_buf
)
self._states = self._task.get_states()
self._process_data()
obs_dict = {"obs": self._obs, "states": self._states}
return obs_dict, self._rew, self._resets, self._extras
def reset(self, seed=None, options=None):
"""Resets the task and applies default zero actions to recompute observations and states."""
now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print(f"[{now}] Running RL reset")
self._task.reset()
actions = torch.zeros((self.num_envs, self._task.num_actions), device=self._task.rl_device)
obs_dict, _, _, _ = self.step(actions)
return obs_dict
| 5,196 |
Python
| 43.801724 | 126 | 0.65127 |
gitLSW/robot-cloud/remnants/skrl-tutorial/test_quat.py
|
from pyquaternion import Quaternion
from scipy.spatial.transform import Rotation as R
import numpy as np
for _ in range(3000):
q1 = Quaternion.random().unit
q2 = Quaternion.random().unit
q_diff = q1 * q2.inverse
q_diff_e = R.from_quat(q_diff.elements).as_euler('xyz', degrees=True)
e1 = R.from_quat(q1.elements).as_euler('xyz', degrees=True)
e2 = R.from_quat(q2.elements).as_euler('xyz', degrees=True)
e_diff = e1 - e2
print(q_diff_e == e_diff, q_diff_e, e_diff)
| 500 |
Python
| 26.833332 | 73 | 0.664 |
gitLSW/robot-cloud/remnants/skrl-tutorial/torch_ant_mt_ppo.py
|
import threading
import torch
import torch.nn as nn
# import the skrl components to build the RL system
from skrl.agents.torch.ppo import PPO, PPO_DEFAULT_CONFIG
from skrl.envs.loaders.torch import load_omniverse_isaacgym_env
from skrl.envs.wrappers.torch import wrap_env
from skrl.memories.torch import RandomMemory
from skrl.models.torch import DeterministicMixin, GaussianMixin, Model
from skrl.resources.preprocessors.torch import RunningStandardScaler
from skrl.resources.schedulers.torch import KLAdaptiveRL
from skrl.trainers.torch import SequentialTrainer
from skrl.utils import set_seed
# seed for reproducibility
set_seed() # e.g. `set_seed(42)` for fixed seed
# define shared model (stochastic and deterministic models) using mixins
class Shared(GaussianMixin, DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False,
clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum"):
Model.__init__(self, observation_space, action_space, device)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction)
DeterministicMixin.__init__(self, clip_actions)
self.net = nn.Sequential(nn.Linear(self.num_observations, 256),
nn.ELU(),
nn.Linear(256, 128),
nn.ELU(),
nn.Linear(128, 64),
nn.ELU())
self.mean_layer = nn.Linear(64, self.num_actions)
self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))
self.value_layer = nn.Linear(64, 1)
def act(self, inputs, role):
if role == "policy":
return GaussianMixin.act(self, inputs, role)
elif role == "value":
return DeterministicMixin.act(self, inputs, role)
def compute(self, inputs, role):
if role == "policy":
return self.mean_layer(self.net(inputs["states"])), self.log_std_parameter, {}
elif role == "value":
return self.value_layer(self.net(inputs["states"])), {}
# load and wrap the multi-threaded Omniverse Isaac Gym environment
env = load_omniverse_isaacgym_env(task_name="Ant", multi_threaded=True, timeout=30)
env = wrap_env(env)
device = env.device
# instantiate a memory as rollout buffer (any memory can be used for this)
memory = RandomMemory(memory_size=16, num_envs=env.num_envs, device=device)
# instantiate the agent's models (function approximators).
# PPO requires 2 models, visit its documentation for more details
# https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#models
models = {}
models["policy"] = Shared(env.observation_space, env.action_space, device)
models["value"] = models["policy"] # same instance: shared model
# configure and instantiate the agent (visit its documentation to see all the options)
# https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#configuration-and-hyperparameters
cfg = PPO_DEFAULT_CONFIG.copy()
cfg["rollouts"] = 16 # memory_size
cfg["learning_epochs"] = 4
cfg["mini_batches"] = 2 # 16 * 4096 / 32768
cfg["discount_factor"] = 0.99
cfg["lambda"] = 0.95
cfg["learning_rate"] = 3e-4
cfg["learning_rate_scheduler"] = KLAdaptiveRL
cfg["learning_rate_scheduler_kwargs"] = {"kl_threshold": 0.008}
cfg["random_timesteps"] = 0
cfg["learning_starts"] = 0
cfg["grad_norm_clip"] = 1.0
cfg["ratio_clip"] = 0.2
cfg["value_clip"] = 0.2
cfg["clip_predicted_values"] = True
cfg["entropy_loss_scale"] = 0.0
cfg["value_loss_scale"] = 1.0
cfg["kl_threshold"] = 0
cfg["rewards_shaper"] = lambda rewards, timestep, timesteps: rewards * 0.01
cfg["state_preprocessor"] = RunningStandardScaler
cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device}
cfg["value_preprocessor"] = RunningStandardScaler
cfg["value_preprocessor_kwargs"] = {"size": 1, "device": device}
# logging to TensorBoard and write checkpoints (in timesteps)
cfg["experiment"]["write_interval"] = 40
cfg["experiment"]["checkpoint_interval"] = 400
cfg["experiment"]["directory"] = "runs/torch/Ant"
agent = PPO(models=models,
memory=memory,
cfg=cfg,
observation_space=env.observation_space,
action_space=env.action_space,
device=device)
# configure and instantiate the RL trainer
cfg_trainer = {"timesteps": 8000, "headless": True}
trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent)
# start training in a separate thread
threading.Thread(target=trainer.train).start()
# run the simulation in the main thread
env.run()
| 4,650 |
Python
| 37.122951 | 101 | 0.683871 |
gitLSW/robot-cloud/remnants/skrl-tutorial/franka_reach_task.py
|
import torch
import numpy as np
from omniisaacgymenvs.rl_task import RLTask
from omniisaacgymenvs.robots.franka import Franka as Robot
from omni.isaac.core.prims import RigidPrimView
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.objects import DynamicSphere
from omni.isaac.core.utils.prims import get_prim_at_path
from skrl.utils import omniverse_isaacgym_utils
# post_physics_step calls
# - get_observations()
# - get_states()
# - calculate_metrics()
# - is_done()
# - get_extras()
TASK_CFG = {"test": False,
"device_id": 0,
"headless": True,
"sim_device": "gpu",
"enable_livestream": False,
"warp": False,
"seed": 42,
"task": {"name": "ReachingFranka",
"physics_engine": "physx",
"env": {"numEnvs": 50_000,
"envSpacing": 1.5,
"episodeLength": 100,
"enableDebugVis": False,
"clipObservations": 1000.0,
"clipActions": 1.0,
"controlFrequencyInv": 4,
"actionScale": 2.5,
"dofVelocityScale": 0.1,
"controlSpace": "joint"},
"sim": {"dt": 1 / 60,
"use_gpu_pipeline": True,
"gravity": [0.0, 0.0, -9.81],
"add_ground_plane": True,
"use_flatcache": True,
"enable_scene_query_support": False,
"enable_cameras": False,
"default_physics_material": {"static_friction": 1.0,
"dynamic_friction": 1.0,
"restitution": 0.0},
"physx": {"worker_thread_count": 4,
"solver_type": 1,
"use_gpu": True,
"solver_position_iteration_count": 4,
"solver_velocity_iteration_count": 1,
"contact_offset": 0.005,
"rest_offset": 0.0,
"bounce_threshold_velocity": 0.2,
"friction_offset_threshold": 0.04,
"friction_correlation_distance": 0.025,
"enable_sleeping": True,
"enable_stabilization": True,
"max_depenetration_velocity": 1000.0,
"gpu_max_rigid_contact_count": 524288,
"gpu_max_rigid_patch_count": 33554432,
"gpu_found_lost_pairs_capacity": 524288,
"gpu_found_lost_aggregate_pairs_capacity": 262144,
"gpu_total_aggregate_pairs_capacity": 1048576,
"gpu_max_soft_body_contacts": 1048576,
"gpu_max_particle_contacts": 1048576,
"gpu_heap_capacity": 33554432,
"gpu_temp_buffer_capacity": 16777216,
"gpu_max_num_partitions": 8},
"robot": {"override_usd_defaults": False,
"fixed_base": False,
"enable_self_collisions": False,
"enable_gyroscopic_forces": True,
"solver_position_iteration_count": 4,
"solver_velocity_iteration_count": 1,
"sleep_threshold": 0.005,
"stabilization_threshold": 0.001,
"density": -1,
"max_depenetration_velocity": 1000.0,
"contact_offset": 0.005,
"rest_offset": 0.0},
"target": {"override_usd_defaults": False,
"fixed_base": True,
"make_kinematic": True,
"enable_self_collisions": False,
"enable_gyroscopic_forces": True,
"solver_position_iteration_count": 4,
"solver_velocity_iteration_count": 1,
"sleep_threshold": 0.005,
"stabilization_threshold": 0.001,
"density": -1,
"max_depenetration_velocity": 1000.0,
"contact_offset": 0.005,
"rest_offset": 0.0}}}}
class RobotView(ArticulationView):
def __init__(self, prim_paths_expr: str, name: str = "robot_view") -> None:
super().__init__(prim_paths_expr=prim_paths_expr, name=name, reset_xform_properties=False)
class ReachingFrankaTask(RLTask):
def __init__(self, name, sim_config, env, offset=None) -> None:
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self.dt = self._task_cfg["sim"]["dt"]
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._action_scale = self._task_cfg["env"]["actionScale"]
self._dof_vel_scale = self._task_cfg["env"]["dofVelocityScale"]
self._max_episode_length = self._task_cfg["env"]["episodeLength"]
self._control_space = self._task_cfg["env"]["controlSpace"]
# observation and action space
self._num_observations = 18
if self._control_space == "joint":
self._num_actions = 7
elif self._control_space == "cartesian":
self._num_actions = 3
else:
raise ValueError("Invalid control space: {}".format(self._control_space))
self._end_effector_link = "panda_leftfinger"
RLTask.__init__(self, name, env)
def set_up_scene(self, scene) -> None:
self.get_robot()
self.get_target()
super().set_up_scene(scene)
# robot view
self._robots = RobotView(prim_paths_expr="/World/envs/.*/robot", name="robot_view")
scene.add(self._robots)
# end-effectors view
self._end_effectors = RigidPrimView(prim_paths_expr="/World/envs/.*/robot/{}".format(self._end_effector_link), name="end_effector_view")
scene.add(self._end_effectors)
# hands view (cartesian)
if self._control_space == "cartesian":
self._hands = RigidPrimView(prim_paths_expr="/World/envs/.*/robot/panda_hand", name="hand_view", reset_xform_properties=False)
scene.add(self._hands)
# target view
self._targets = RigidPrimView(prim_paths_expr="/World/envs/.*/target", name="target_view", reset_xform_properties=False)
scene.add(self._targets)
self.init_data()
def get_robot(self):
robot = Robot(prim_path=self.default_zero_env_path + "/robot",
translation=torch.tensor([0.0, 0.0, 0.0]),
orientation=torch.tensor([1.0, 0.0, 0.0, 0.0]),
name="robot")
self._sim_config.apply_articulation_settings("robot", get_prim_at_path(robot.prim_path), self._sim_config.parse_actor_config("robot"))
def get_target(self):
target = DynamicSphere(prim_path=self.default_zero_env_path + "/target",
name="target",
radius=0.025,
color=torch.tensor([1, 0, 0]))
self._sim_config.apply_articulation_settings("target", get_prim_at_path(target.prim_path), self._sim_config.parse_actor_config("target"))
target.set_collision_enabled(False)
def init_data(self) -> None:
self.robot_default_dof_pos = torch.tensor(np.radians([0, -45, 0, -135, 0, 90, 45, 0, 0]), device=self._device, dtype=torch.float32)
self.actions = torch.zeros((self._num_envs, self.num_actions), device=self._device)
if self._control_space == "cartesian":
self.jacobians = torch.zeros((self._num_envs, 10, 6, 9), device=self._device)
self.hand_pos, self.hand_rot = torch.zeros((self._num_envs, 3), device=self._device), torch.zeros((self._num_envs, 4), device=self._device)
def get_observations(self) -> dict:
robot_dof_pos = self._robots.get_joint_positions(clone=False)
robot_dof_vel = self._robots.get_joint_velocities(clone=False)
end_effector_pos, end_effector_rot = self._end_effectors.get_world_poses(clone=False)
target_pos, target_rot = self._targets.get_world_poses(clone=False)
dof_pos_scaled = 2.0 * (robot_dof_pos - self.robot_dof_lower_limits) \
/ (self.robot_dof_upper_limits - self.robot_dof_lower_limits) - 1.0
dof_vel_scaled = robot_dof_vel * self._dof_vel_scale
generalization_noise = torch.rand((dof_vel_scaled.shape[0], 7), device=self._device) + 0.5
self.obs_buf[:, 0] = self.progress_buf / self._max_episode_length
self.obs_buf[:, 1:8] = dof_pos_scaled[:, :7]
self.obs_buf[:, 8:15] = dof_vel_scaled[:, :7] * generalization_noise
self.obs_buf[:, 15:18] = target_pos - self._env_pos
# compute distance for calculate_metrics() and is_done()
self._computed_distance = torch.norm(end_effector_pos - target_pos, dim=-1)
if self._control_space == "cartesian":
self.jacobians = self._robots.get_jacobians(clone=False)
self.hand_pos, self.hand_rot = self._hands.get_world_poses(clone=False)
self.hand_pos -= self._env_pos
# The return is itrrelevant for Multi Threading:
# The VecEnvMT Loop calls RLTask.post_physics_step to get all the data from one step.
# RLTask.post_physics_step is simply returning self.obs_buf, self.rew_buf,...
return { self._robots.name: {"obs_buf": self.obs_buf } }
def pre_physics_step(self, actions) -> None:
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
self.actions = actions.clone().to(self._device)
env_ids_int32 = torch.arange(self._robots.count, dtype=torch.int32, device=self._device)
if self._control_space == "joint":
targets = self.robot_dof_targets[:, :7] + self.robot_dof_speed_scales[:7] * self.dt * self.actions * self._action_scale
elif self._control_space == "cartesian":
goal_position = self.hand_pos + actions / 100.0
delta_dof_pos = omniverse_isaacgym_utils.ik(jacobian_end_effector=self.jacobians[:, 8 - 1, :, :7], # franka hand index: 8
current_position=self.hand_pos,
current_orientation=self.hand_rot,
goal_position=goal_position,
goal_orientation=None)
targets = self.robot_dof_targets[:, :7] + delta_dof_pos
self.robot_dof_targets[:, :7] = torch.clamp(targets, self.robot_dof_lower_limits[:7], self.robot_dof_upper_limits[:7])
self.robot_dof_targets[:, 7:] = 0
self._robots.set_joint_position_targets(self.robot_dof_targets, indices=env_ids_int32)
def reset_idx(self, env_ids) -> None:
indices = env_ids.to(dtype=torch.int32)
# reset robot
pos = torch.clamp(self.robot_default_dof_pos.unsqueeze(0) + 0.25 * (torch.rand((len(env_ids), self.num_robot_dofs), device=self._device) - 0.5),
self.robot_dof_lower_limits, self.robot_dof_upper_limits)
dof_pos = torch.zeros((len(indices), self._robots.num_dof), device=self._device)
dof_pos[:, :] = pos
dof_pos[:, 7:] = 0
dof_vel = torch.zeros((len(indices), self._robots.num_dof), device=self._device)
self.robot_dof_targets[env_ids, :] = pos
self.robot_dof_pos[env_ids, :] = pos
self._robots.set_joint_position_targets(self.robot_dof_targets[env_ids], indices=indices)
self._robots.set_joint_positions(dof_pos, indices=indices)
self._robots.set_joint_velocities(dof_vel, indices=indices)
# reset target
pos = (torch.rand((len(env_ids), 3), device=self._device) - 0.5) * 2 \
* torch.tensor([0.25, 0.25, 0.10], device=self._device) \
+ torch.tensor([0.50, 0.00, 0.20], device=self._device)
self._targets.set_world_poses(pos + self._env_pos[env_ids], indices=indices)
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def post_reset(self):
self.num_robot_dofs = self._robots.num_dof
self.robot_dof_pos = torch.zeros((self.num_envs, self.num_robot_dofs), device=self._device)
dof_limits = self._robots.get_dof_limits()
self.robot_dof_lower_limits = dof_limits[0, :, 0].to(device=self._device)
self.robot_dof_upper_limits = dof_limits[0, :, 1].to(device=self._device)
self.robot_dof_speed_scales = torch.ones_like(self.robot_dof_lower_limits)
self.robot_dof_targets = torch.zeros((self._num_envs, self.num_robot_dofs), dtype=torch.float, device=self._device)
# randomize all envs
indices = torch.arange(self._num_envs, dtype=torch.int64, device=self._device)
self.reset_idx(indices)
def calculate_metrics(self) -> None:
self.rew_buf[:] = -self._computed_distance
def is_done(self) -> None:
self.reset_buf.fill_(0)
# target reached
self.reset_buf = torch.where(self._computed_distance <= 0.035, torch.ones_like(self.reset_buf), self.reset_buf)
# max episode length
self.reset_buf = torch.where(self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf)
| 14,689 |
Python
| 51.092198 | 152 | 0.519641 |
gitLSW/robot-cloud/remnants/skrl-tutorial/cartpole_task.py
|
# Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from omniisaacgymenvs.robots.articulations.cartpole import Cartpole
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.utils.prims import get_prim_at_path
import omni.isaac.core.utils.warp as warp_utils
from omniisaacgymenvs.tasks.base.rl_task import RLTaskWarp
import numpy as np
import torch
import warp as wp
import math
class CartpoleTask(RLTaskWarp):
def __init__(
self,
name,
sim_config,
env,
offset=None
) -> None:
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._cartpole_positions = wp.array([0.0, 0.0, 2.0], dtype=wp.float32)
self._reset_dist = self._task_cfg["env"]["resetDist"]
self._max_push_effort = self._task_cfg["env"]["maxEffort"]
self._max_episode_length = 500
self._num_observations = 4
self._num_actions = 1
RLTaskWarp.__init__(self, name, env)
return
def set_up_scene(self, scene) -> None:
self.get_cartpole()
super().set_up_scene(scene)
self._cartpoles = ArticulationView(prim_paths_expr="/World/envs/.*/Cartpole", name="cartpole_view", reset_xform_properties=False)
scene.add(self._cartpoles)
return
def get_cartpole(self):
cartpole = Cartpole(prim_path=self.default_zero_env_path + "/Cartpole", name="Cartpole", translation=self._cartpole_positions)
# applies articulation settings from the task configuration yaml file
self._sim_config.apply_articulation_settings("Cartpole", get_prim_at_path(cartpole.prim_path), self._sim_config.parse_actor_config("Cartpole"))
def get_observations(self) -> dict:
dof_pos = self._cartpoles.get_joint_positions(clone=False)
dof_vel = self._cartpoles.get_joint_velocities(clone=False)
wp.launch(get_observations, dim=self._num_envs,
inputs=[self.obs_buf, dof_pos, dof_vel, self._cart_dof_idx, self._pole_dof_idx], device=self._device)
observations = {
self._cartpoles.name: {
"obs_buf": self.obs_buf
}
}
return observations
def pre_physics_step(self, actions) -> None:
self.reset_idx()
actions_wp = wp.from_torch(actions)
forces = wp.zeros((self._cartpoles.count, self._cartpoles.num_dof), dtype=wp.float32, device=self._device)
wp.launch(compute_forces, dim=self._num_envs,
inputs=[forces, actions_wp, self._cart_dof_idx, self._max_push_effort], device=self._device)
self._cartpoles.set_joint_efforts(forces)
def reset_idx(self):
reset_env_ids = wp.to_torch(self.reset_buf).nonzero(as_tuple=False).squeeze(-1)
num_resets = len(reset_env_ids)
indices = wp.from_torch(reset_env_ids.to(dtype=torch.int32), dtype=wp.int32)
if num_resets > 0:
wp.launch(reset_idx, num_resets,
inputs=[self.dof_pos, self.dof_vel, indices, self.reset_buf, self.progress_buf, self._cart_dof_idx, self._pole_dof_idx, self._rand_seed],
device=self._device)
# apply resets
self._cartpoles.set_joint_positions(self.dof_pos[indices], indices=indices)
self._cartpoles.set_joint_velocities(self.dof_vel[indices], indices=indices)
def post_reset(self):
self._cart_dof_idx = self._cartpoles.get_dof_index("cartJoint")
self._pole_dof_idx = self._cartpoles.get_dof_index("poleJoint")
self.dof_pos = wp.zeros((self._num_envs, self._cartpoles.num_dof), device=self._device, dtype=wp.float32)
self.dof_vel = wp.zeros((self._num_envs, self._cartpoles.num_dof), device=self._device, dtype=wp.float32)
# randomize all envs
self.reset_idx()
def calculate_metrics(self) -> None:
wp.launch(calculate_metrics, dim=self._num_envs,
inputs=[self.obs_buf, self.rew_buf, self._reset_dist], device=self._device)
def is_done(self) -> None:
wp.launch(is_done, dim=self._num_envs,
inputs=[self.obs_buf, self.reset_buf, self.progress_buf, self._reset_dist, self._max_episode_length],
device=self._device)
@wp.kernel
def reset_idx(dof_pos: wp.array(dtype=wp.float32, ndim=2),
dof_vel: wp.array(dtype=wp.float32, ndim=2),
indices: wp.array(dtype=wp.int32),
reset_buf: wp.array(dtype=wp.int32),
progress_buf: wp.array(dtype=wp.int32),
cart_dof_idx: int,
pole_dof_idx: int,
rand_seed: int):
i = wp.tid()
idx = indices[i]
rand_state = wp.rand_init(rand_seed, i)
# randomize DOF positions
dof_pos[idx, cart_dof_idx] = 1.0 * (1.0 - 2.0 * wp.randf(rand_state))
dof_pos[idx, pole_dof_idx] = 0.125 * warp_utils.PI * (1.0 - 2.0 * wp.randf(rand_state))
# randomize DOF velocities
dof_vel[idx, cart_dof_idx] = 0.5 * (1.0 - 2.0 * wp.randf(rand_state))
dof_vel[idx, pole_dof_idx] = 0.25 * warp_utils.PI * (1.0 - 2.0 * wp.randf(rand_state))
# bookkeeping
progress_buf[idx] = 0
reset_buf[idx] = 0
@wp.kernel
def compute_forces(forces: wp.array(dtype=wp.float32, ndim=2),
actions: wp.array(dtype=wp.float32, ndim=2),
cart_dof_idx: int,
max_push_effort: float):
i = wp.tid()
forces[i, cart_dof_idx] = max_push_effort * actions[i, 0]
@wp.kernel
def get_observations(obs_buf: wp.array(dtype=wp.float32, ndim=2),
dof_pos: wp.indexedarray(dtype=wp.float32, ndim=2),
dof_vel: wp.indexedarray(dtype=wp.float32, ndim=2),
cart_dof_idx: int,
pole_dof_idx: int):
i = wp.tid()
obs_buf[i, 0] = dof_pos[i, cart_dof_idx]
obs_buf[i, 1] = dof_vel[i, cart_dof_idx]
obs_buf[i, 2] = dof_pos[i, pole_dof_idx]
obs_buf[i, 3] = dof_vel[i, pole_dof_idx]
@wp.kernel
def calculate_metrics(obs_buf: wp.array(dtype=wp.float32, ndim=2),
rew_buf: wp.array(dtype=wp.float32),
reset_dist: float):
i = wp.tid()
cart_pos = obs_buf[i, 0]
cart_vel = obs_buf[i, 1]
pole_angle = obs_buf[i, 2]
pole_vel = obs_buf[i, 3]
rew_buf[i] = 1.0 - pole_angle * pole_angle - 0.01 * wp.abs(cart_vel) - 0.005 * wp.abs(pole_vel)
if wp.abs(cart_pos) > reset_dist or wp.abs(pole_angle) > warp_utils.PI / 2.0:
rew_buf[i] = -2.0
@wp.kernel
def is_done(obs_buf: wp.array(dtype=wp.float32, ndim=2),
reset_buf: wp.array(dtype=wp.int32),
progress_buf: wp.array(dtype=wp.int32),
reset_dist: float,
max_episode_length: int):
i = wp.tid()
cart_pos = obs_buf[i, 0]
pole_pos = obs_buf[i, 2]
if wp.abs(cart_pos) > reset_dist or wp.abs(pole_pos) > warp_utils.PI / 2.0 or progress_buf[i] > max_episode_length:
reset_buf[i] = 1
else:
reset_buf[i] = 0
| 8,664 |
Python
| 38.56621 | 154 | 0.635734 |
gitLSW/robot-cloud/remnants/skrl-tutorial/rlgames_train_mt.py
|
# Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import copy
import datetime
import os
import queue
import threading
import traceback
import hydra
from omegaconf import DictConfig
from omni.isaac.gym.vec_env.vec_env_mt import TrainerMT
import omniisaacgymenvs
from omniisaacgymenvs.envs.vec_env_rlgames_mt import VecEnvRLGamesMT
from omniisaacgymenvs.utils.config_utils.path_utils import retrieve_checkpoint_path
from omniisaacgymenvs.utils.hydra_cfg.hydra_utils import *
from omniisaacgymenvs.utils.hydra_cfg.reformat import omegaconf_to_dict, print_dict
from omniisaacgymenvs.utils.rlgames.rlgames_utils import RLGPUAlgoObserver, RLGPUEnv
from omniisaacgymenvs.utils.task_util import initialize_task
from rl_games.common import env_configurations, vecenv
from rl_games.torch_runner import Runner
class RLGTrainer:
def __init__(self, cfg, cfg_dict):
self.cfg = cfg
self.cfg_dict = cfg_dict
# ensure checkpoints can be specified as relative paths
self._bad_checkpoint = False
if self.cfg.checkpoint:
self.cfg.checkpoint = retrieve_checkpoint_path(self.cfg.checkpoint)
if not self.cfg.checkpoint:
self._bad_checkpoint = True
def launch_rlg_hydra(self, env):
# `create_rlgpu_env` is environment construction function which is passed to RL Games and called internally.
# We use the helper function here to specify the environment config.
self.cfg_dict["task"]["test"] = self.cfg.test
# register the rl-games adapter to use inside the runner
vecenv.register("RLGPU", lambda config_name, num_actors, **kwargs: RLGPUEnv(config_name, num_actors, **kwargs))
env_configurations.register("rlgpu", {"vecenv_type": "RLGPU", "env_creator": lambda **kwargs: env})
self.rlg_config_dict = omegaconf_to_dict(self.cfg.train)
def run(self):
# create runner and set the settings
runner = Runner(RLGPUAlgoObserver())
# add evaluation parameters
if self.cfg.evaluation:
player_config = self.rlg_config_dict["params"]["config"].get("player", {})
player_config["evaluation"] = True
player_config["update_checkpoint_freq"] = 100
player_config["dir_to_monitor"] = os.path.dirname(self.cfg.checkpoint)
self.rlg_config_dict["params"]["config"]["player"] = player_config
module_path = os.path.abspath(os.path.join(os.path.dirname(omniisaacgymenvs.__file__)))
self.rlg_config_dict["params"]["config"]["train_dir"] = os.path.join(module_path, "runs")
# load config
runner.load(copy.deepcopy(self.rlg_config_dict))
runner.reset()
# dump config dict
experiment_dir = os.path.join(module_path, "runs", self.cfg.train.params.config.name)
os.makedirs(experiment_dir, exist_ok=True)
with open(os.path.join(experiment_dir, "config.yaml"), "w") as f:
f.write(OmegaConf.to_yaml(self.cfg))
time_str = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
if self.cfg.wandb_activate:
# Make sure to install WandB if you actually use this.
import wandb
run_name = f"{self.cfg.wandb_name}_{time_str}"
wandb.init(
project=self.cfg.wandb_project,
group=self.cfg.wandb_group,
entity=self.cfg.wandb_entity,
config=self.cfg_dict,
sync_tensorboard=True,
id=run_name,
resume="allow",
monitor_gym=True,
)
runner.run(
{"train": not self.cfg.test, "play": self.cfg.test, "checkpoint": self.cfg.checkpoint, "sigma": None}
)
if self.cfg.wandb_activate:
wandb.finish()
class Trainer(TrainerMT):
def __init__(self, trainer, env):
self.ppo_thread = None
self.action_queue = None
self.data_queue = None
self.trainer = trainer
self.is_running = False
self.env = env
self.create_task()
self.run()
def create_task(self):
self.trainer.launch_rlg_hydra(self.env)
# task = initialize_task(self.trainer.cfg_dict, self.env, init_sim=False)
self.task = self.env.task
def run(self):
self.is_running = True
self.action_queue = queue.Queue(1)
self.data_queue = queue.Queue(1)
if "mt_timeout" in self.trainer.cfg_dict:
self.env.initialize(self.action_queue, self.data_queue, self.trainer.cfg_dict["mt_timeout"])
else:
self.env.initialize(self.action_queue, self.data_queue)
self.ppo_thread = PPOTrainer(self.env, self.task, self.trainer)
self.ppo_thread.daemon = True
self.ppo_thread.start()
def stop(self):
self.env.stop = True
self.env.clear_queues()
if self.action_queue:
self.action_queue.join()
if self.data_queue:
self.data_queue.join()
if self.ppo_thread:
self.ppo_thread.join()
self.action_queue = None
self.data_queue = None
self.ppo_thread = None
self.is_running = False
class PPOTrainer(threading.Thread):
def __init__(self, env, task, trainer):
super().__init__()
self.env = env
self.task = task
self.trainer = trainer
def run(self):
from omni.isaac.gym.vec_env import TaskStopException
print("starting ppo...")
try:
self.trainer.run()
# trainer finished - send stop signal to main thread
self.env.should_run = False
self.env.send_actions(None, block=False)
except TaskStopException:
print("Task Stopped!")
self.env.should_run = False
self.env.send_actions(None, block=False)
except Exception as e:
# an error occurred on the RL side - signal stop to main thread
print(traceback.format_exc())
self.env.should_run = False
self.env.send_actions(None, block=False)
| 7,633 |
Python
| 37.17 | 119 | 0.654395 |
gitLSW/robot-cloud/remnants/skrl-tutorial/robots/usd_utils.py
|
# Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import get_current_stage
from pxr import UsdLux, UsdPhysics
def set_drive_type(prim_path, drive_type):
joint_prim = get_prim_at_path(prim_path)
# set drive type ("angular" or "linear")
drive = UsdPhysics.DriveAPI.Apply(joint_prim, drive_type)
return drive
def set_drive_target_position(drive, target_value):
if not drive.GetTargetPositionAttr():
drive.CreateTargetPositionAttr(target_value)
else:
drive.GetTargetPositionAttr().Set(target_value)
def set_drive_target_velocity(drive, target_value):
if not drive.GetTargetVelocityAttr():
drive.CreateTargetVelocityAttr(target_value)
else:
drive.GetTargetVelocityAttr().Set(target_value)
def set_drive_stiffness(drive, stiffness):
if not drive.GetStiffnessAttr():
drive.CreateStiffnessAttr(stiffness)
else:
drive.GetStiffnessAttr().Set(stiffness)
def set_drive_damping(drive, damping):
if not drive.GetDampingAttr():
drive.CreateDampingAttr(damping)
else:
drive.GetDampingAttr().Set(damping)
def set_drive_max_force(drive, max_force):
if not drive.GetMaxForceAttr():
drive.CreateMaxForceAttr(max_force)
else:
drive.GetMaxForceAttr().Set(max_force)
def set_drive(prim_path, drive_type, target_type, target_value, stiffness, damping, max_force) -> None:
drive = set_drive_type(prim_path, drive_type)
# set target type ("position" or "velocity")
if target_type == "position":
set_drive_target_position(drive, target_value)
elif target_type == "velocity":
set_drive_target_velocity(drive, target_value)
set_drive_stiffness(drive, stiffness)
set_drive_damping(drive, damping)
set_drive_max_force(drive, max_force)
| 3,403 |
Python
| 36.406593 | 103 | 0.740229 |
gitLSW/robot-cloud/remnants/skrl-tutorial/robots/franka.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import math
from typing import Optional
import numpy as np
import torch
from omni.isaac.core.robots.robot import Robot
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import add_reference_to_stage
from omniisaacgymenvs.robots.usd_utils import set_drive
from pxr import PhysxSchema
class Franka(Robot):
def __init__(
self,
prim_path: str,
name: Optional[str] = "franka",
usd_path: Optional[str] = None,
translation: Optional[torch.tensor] = None,
orientation: Optional[torch.tensor] = None,
) -> None:
"""[summary]"""
self._usd_path = usd_path
self._name = name
self._position = torch.tensor([1.0, 0.0, 0.0]) if translation is None else translation
self._orientation = torch.tensor([0.0, 0.0, 0.0, 1.0]) if orientation is None else orientation
if self._usd_path is None:
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
self._usd_path = assets_root_path + "/Isaac/Robots/Franka/franka_instanceable.usd"
add_reference_to_stage(self._usd_path, prim_path)
super().__init__(
prim_path=prim_path,
name=name,
translation=self._position,
orientation=self._orientation,
articulation_controller=None,
)
dof_paths = [
"panda_link0/panda_joint1",
"panda_link1/panda_joint2",
"panda_link2/panda_joint3",
"panda_link3/panda_joint4",
"panda_link4/panda_joint5",
"panda_link5/panda_joint6",
"panda_link6/panda_joint7",
"panda_hand/panda_finger_joint1",
"panda_hand/panda_finger_joint2",
]
drive_type = ["angular"] * 7 + ["linear"] * 2
default_dof_pos = [math.degrees(x) for x in [0.0, -1.0, 0.0, -2.2, 0.0, 2.4, 0.8]] + [0.02, 0.02]
stiffness = [400 * np.pi / 180] * 7 + [10000] * 2
damping = [80 * np.pi / 180] * 7 + [100] * 2
max_force = [87, 87, 87, 87, 12, 12, 12, 200, 200]
max_velocity = [math.degrees(x) for x in [2.175, 2.175, 2.175, 2.175, 2.61, 2.61, 2.61]] + [0.2, 0.2]
for i, dof in enumerate(dof_paths):
set_drive(
prim_path=f"{self.prim_path}/{dof}",
drive_type=drive_type[i],
target_type="position",
target_value=default_dof_pos[i],
stiffness=stiffness[i],
damping=damping[i],
max_force=max_force[i],
)
PhysxSchema.PhysxJointAPI(get_prim_at_path(f"{self.prim_path}/{dof}")).CreateMaxJointVelocityAttr().Set(
max_velocity[i]
)
def set_franka_properties(self, stage, prim):
for link_prim in prim.GetChildren():
if link_prim.HasAPI(PhysxSchema.PhysxRigidBodyAPI):
rb = PhysxSchema.PhysxRigidBodyAPI.Get(stage, link_prim.GetPrimPath())
rb.GetDisableGravityAttr().Set(True)
| 3,646 |
Python
| 37.797872 | 116 | 0.599835 |
gitLSW/robot-cloud/remnants/parallel-training/parallel_ppo.py
|
import os
import numpy as np
from gym_env_mt import GymEnvMT
from stable_baselines3 import PPO, DDPG
# from stable_baselines3.common.noise import NormalActionNoise, OrnsteinUhlenbeckActionNoise
from stable_baselines3.common.vec_env import DummyVecEnv, subproc_vec_env
import wandb
from wandb.integration.sb3 import WandbCallback
MAX_STEPS_PER_EPISODE = 300
SIM_STEP_FREQ_HZ = 60
# Create Isaac environment and open Sim Window
env = GymEnvMT(max_steps = MAX_STEPS_PER_EPISODE,
sim_s_step_freq = SIM_STEP_FREQ_HZ,
headless=False,
experience=f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit')
spacing = 5
offsets = []
tasks_per_side_1 = 1
tasks_per_side_2 = 1
NUM_ENVS = tasks_per_side_1 * tasks_per_side_2
for i in range(tasks_per_side_1):
for j in range(tasks_per_side_2):
offsets.append([i * spacing, j * spacing, 0])
task_envs = env.init_tasks(offsets, backend="numpy")
def make_env(env):
return lambda: env
env = DummyVecEnv([make_env(task_env) for task_env in task_envs])
# The noise objects for DDPG
# n_actions = env.action_space.shape[-1]
# action_noise = NormalActionNoise(mean=np.zeros(n_actions), sigma=0.1 * np.ones(n_actions))
ddpg_config = {
# 'learning_starts': 0,
# 'action_noise': action_noise,
'learning_rate': 0.001,
# 'tau': 0.005,
# 'gamma': 0.99,
# 'learning_starts': 0,
# 'train_freq': MAX_STEPS_PER_EPISODE * NUM_ENVS, # How many steps until models get updated
'batch_size': 256,
# 'buffer_size': MAX_STEPS_PER_EPISODE * NUM_ENVS,
'verbose': 1
}
name = 'ppo-pack'
run = wandb.init(
project=name,
config=ddpg_config,
sync_tensorboard=True, # auto-upload sb3's tensorboard metrics
# monitor_gym=True, # auto-upload the videos of agents playing the game
# save_code=True, # optional
)
ddpg_config['tensorboard_log'] = f"./progress/runs/{run.id}"
model = None
try:
model = PPO.load(f"progress/{name}", env, print_system_info=True, custom_objects=ddpg_config)
# model.set_parameters(params)
except FileNotFoundError:
print('Failed to load model')
finally:
model = PPO("MultiInputPolicy", env, **ddpg_config)
while (True):
model.learn(total_timesteps=MAX_STEPS_PER_EPISODE * 2, log_interval=NUM_ENVS, tb_log_name='DDPG',
callback=WandbCallback(
model_save_path=f"models/{run.id}",
verbose=2,
))
print('Saving model')
model.save("progress/ddpg")
run.finish()
print('Finished Traing')
# env.close()
| 2,506 |
Python
| 28.845238 | 101 | 0.678771 |
gitLSW/robot-cloud/remnants/parallel-training/pack_task_table.py
|
import os
import math
import random
import numpy as np
from pxr import Gf, UsdLux, Sdf
from gymnasium import spaces
from omni.isaac.core.utils.extensions import enable_extension
# enable_extension("omni.importer.urdf")
enable_extension("omni.isaac.universal_robots")
enable_extension("omni.isaac.sensor")
# from omni.importer.urdf import _urdf
from omni.isaac.sensor import Camera
from omni.isaac.universal_robots.ur10 import UR10
# from omni.isaac.universal_robots.controllers.pick_place_controller import PickPlaceController
from omni.isaac.core.prims import XFormPrim, RigidPrim, GeometryPrim
from omni.isaac.core.materials.physics_material import PhysicsMaterial
from omni.isaac.core.utils.prims import create_prim, get_prim_at_path
from omni.isaac.core.tasks.base_task import BaseTask
from omni.isaac.gym.tasks.rl_task import RLTaskInterface
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage
from omni.isaac.core.utils.viewports import set_camera_view
from omni.kit.viewport.utility import get_active_viewport
import omni.isaac.core.objects as objs
import omni.isaac.core.utils.numpy.rotations as rot_utils
from omni.isaac.core.utils.rotations import lookat_to_quatf, gf_quat_to_np_array
from omni.physx.scripts.utils import setRigidBody, setStaticCollider, setCollider, addCollisionGroup
# MESH_APPROXIMATIONS = {
# "none": PhysxSchema.PhysxTriangleMeshCollisionAPI,
# "convexHull": PhysxSchema.PhysxConvexHullCollisionAPI,
# "convexDecomposition": PhysxSchema.PhysxConvexDecompositionCollisionAPI,
# "meshSimplification": PhysxSchema.PhysxTriangleMeshSimplificationCollisionAPI,
# "convexMeshSimplification": PhysxSchema.PhysxTriangleMeshSimplificationCollisionAPI,
# "boundingCube": None,
# "boundingSphere": None,
# "sphereFill": PhysxSchema.PhysxSphereFillCollisionAPI,
# "sdf": PhysxSchema.PhysxSDFMeshCollisionAPI,
# }
LEARNING_STARTS = 10
ENV_PATH = "World/Env"
ROBOT_PATH = 'World/UR10e'
ROBOT_POS = np.array([0.0, 0.0, 0.0])
LIGHT_PATH = 'World/Light'
LIGHT_OFFSET = np.array([0, 0, 2])
# 5.45, 3, 0
START_TABLE_PATH = "World/StartTable"
START_TABLE_POS = np.array([0.36, 0.8, 0])
START_TABLE_HEIGHT = 0.6
START_TABLE_CENTER = START_TABLE_POS + np.array([0, 0, START_TABLE_HEIGHT])
DEST_BOX_PATH = "World/DestinationBox"
DEST_BOX_POS = np.array([0, -0.65, 0])
PARTS_PATH = 'World/Parts'
PARTS_SOURCE = START_TABLE_CENTER + np.array([0, 0, 0.05])
# NUM_PARTS = 5
IMG_RESOLUTION = (128, 128)
CAMERA_PATH = 'World/Camera'
CAM_TARGET_OFFSET = (2.5, 2) # Distance and Height
CAM_MEAN_ANGLE = math.pi # Box=math.pi / 2, Table=3 * math.pi / 2
# CAMERA_POS_START = np.array([-2, 2, 2.5])
# CAMERA_POS_DEST = np.array([2, -2, 2.5])
MAX_STEP_PUNISHMENT = 300
IDEAL_PACKAGING = [([-0.06, -0.19984, 0.0803], [0.072, 0.99, 0, 0]),
([-0.06, -0.14044, 0.0803], [0.072, 0.99, 0, 0]),
([-0.06, -0.07827, 0.0803], [0.072, 0.99, 0, 0]),
([-0.06, -0.01597, 0.0803], [0.072, 0.99, 0, 0]),
([-0.06, 0.04664, 0.0803], [0.072, 0.99, 0, 0]),
([-0.06, 0.10918, 0.0803], [0.072, 0.99, 0, 0])]
# Seed Env or DDPG will always be the same !!
class PackTask(BaseTask):
"""
This class sets up a scene and calls a RL Policy, then evaluates the behaivior with rewards
Args:
offset (Optional[np.ndarray], optional): offset applied to all assets of the task.
sim_s_step_freq (int): The amount of simulation steps within a SIMULATED second.
"""
def __init__(self, name, max_steps, offset=None, sim_s_step_freq: int = 60) -> None:
self._env_path = f"/{name}/{ENV_PATH}"
self._light_path = {"/{name}/{LIGHT_PATH}"}
self._robot_path = f"/{name}/{ROBOT_PATH}"
self._start_table_path = f"/{name}/{START_TABLE_PATH}"
self._dest_box_path = f"/{name}/{DEST_BOX_PATH}"
self._parts_path = f"/{name}/{PARTS_PATH}"
self._camera_path = f"/{name}/{CAMERA_PATH}"
# self._num_observations = 1
# self._num_actions = 1
self._device = "cpu"
self.num_envs = 1
# Robot turning ange of max speed is 191deg/s
self.__joint_rot_max = (191.0 * math.pi / 180) / sim_s_step_freq
self.max_steps = max_steps
self.observation_space = spaces.Dict({
# The NN will see the Robot via a single video feed that can run from one of two camera positions
# The NN will receive this feed in rgb, depth and image segmented to highlight objects of interest
'image': spaces.Box(low=0, high=1, shape=(*IMG_RESOLUTION, 7)),
# The Robot also receives the shape rotations of all 6 joints and the gripper state
# the gripper state is -1 when open and 1 when closed
'vector': spaces.Box(low=-1, high=1, shape=(7,)),
})
# The NN outputs the change in rotation for each joint as a fraction of the max rot speed per timestep (=__joint_rot_max)
self.action_space = spaces.Box(low=-1, high=1, shape=(7,), dtype=float)
# trigger __init__ of parent class
BaseTask.__init__(self, name=name, offset=offset)
def set_up_scene(self, scene) -> None:
print('SETUP TASK', self.name)
super().set_up_scene(scene)
local_assets = os.getcwd() + '/assets'
# This is the URL from which the Assets are downloaded
# Make sure you started and connected to your localhost Nucleus Server via Omniverse !!!
# assets_root_path = get_assets_root_path()
# _ = XFormPrim(prim_path=self._env_path, position=-np.array([5, 4.5, 0]))
# warehouse_path = assets_root_path + "/Isaac/Environments/Simple_Warehouse/warehouse_multiple_shelves.usd"
# add_reference_to_stage(warehouse_path, self._env_path)
self.light = create_prim(
'/World/Light_' + self.name,
"SphereLight",
position=ROBOT_POS + LIGHT_OFFSET + self._offset,
attributes={
"inputs:radius": 0.01,
"inputs:intensity": 3e7,
"inputs:color": (1.0, 1.0, 1.0)
}
)
# table_path = assets_root_path + "/Isaac/Environments/Simple_Room/Props/table_low.usd"
table_path = local_assets + '/table_low.usd'
self.table = XFormPrim(prim_path=self._start_table_path, position=START_TABLE_POS, scale=[0.5, START_TABLE_HEIGHT, 0.4])
add_reference_to_stage(table_path, self._start_table_path)
setRigidBody(self.table.prim, approximationShape='convexHull', kinematic=True) # Kinematic True means immovable
# self.table = RigidPrim(rim_path=self._start_table_path, name='TABLE')
self._task_objects[self._start_table_path] = self.table
# box_path = assets_root_path + "/Isaac/Environments/Simple_Warehouse/Props/SM_CardBoxA_02.usd"
box_path = local_assets + '/SM_CardBoxA_02.usd'
self.box = XFormPrim(prim_path=self._dest_box_path, position=DEST_BOX_POS, scale=[1, 1, 0.4])
add_reference_to_stage(box_path, self._dest_box_path)
setRigidBody(self.box.prim, approximationShape='convexDecomposition', kinematic=True) # Kinematic True means immovable
self._task_objects[self._dest_box_path] = self.box
# The UR10e has 6 joints, each with a maximum:
# turning angle of -360 deg to +360 deg
# turning ange of max speed is 191deg/s
self.robot = UR10(prim_path=self._robot_path, name='UR16e', position=ROBOT_POS, attach_gripper=True)
self._task_objects[self._robot_path] = self.robot
# self.robot.set_joints_default_state(positions=torch.tensor([-math.pi / 2, -math.pi / 2, -math.pi / 2, -math.pi / 2, math.pi / 2, 0]))
i = 0
part_usd_path = local_assets + '/draexlmaier_part.usd'
part_path = f'{self._parts_path}/Part_{i}'
self.part = XFormPrim(prim_path=part_path, position=PARTS_SOURCE, orientation=[0, 1, 0, 0])
add_reference_to_stage(part_usd_path, part_path)
setRigidBody(self.part.prim, approximationShape='convexDecomposition', kinematic=False) # Kinematic True means immovable
self._task_objects[part_path] = self.part
cam_start_pos = self.__get_cam_pos(ROBOT_POS, *CAM_TARGET_OFFSET, mean_angle=CAM_MEAN_ANGLE)
self._camera = Camera(
prim_path=self._camera_path,
frequency=20,
resolution=IMG_RESOLUTION
)
self._camera.set_focal_length(2.0)
self.__move_camera(position=cam_start_pos, target=ROBOT_POS)
self._task_objects[self._camera_path] = self._camera
# viewport = get_active_viewport()
# viewport.set_active_camera(self._camera_path)
# set_camera_view(eye=ROBOT_POS + np.array([1.5, 6, 1.5]), target=ROBOT_POS, camera_prim_path="/OmniverseKit_Persp")
self._move_task_objects_to_their_frame()
def reset(self):
# super().cleanup()
# if not self.robot.handles_initialized():
self.robot.initialize()
self._camera.initialize()
self._camera.add_distance_to_image_plane_to_frame() # depth cam
self._camera.add_instance_id_segmentation_to_frame() # simulated segmentation NN
robot_pos = ROBOT_POS + self._offset
cam_start_pos = self.__get_cam_pos(robot_pos, *CAM_TARGET_OFFSET, mean_angle=CAM_MEAN_ANGLE)
self.__move_camera(position=cam_start_pos, target=robot_pos)
self.step = 0
self.stage = 0
# self.part.set_world_pose(PARTS_SOURCE + self._offset)
default_pose = np.array([-math.pi / 2, -math.pi / 2, -math.pi / 2, -math.pi / 2, math.pi / 2, 0, -1])
self.robot.gripper.open()
self.robot.set_joint_positions(positions=default_pose[0:6])
return {
'image': np.zeros((*IMG_RESOLUTION, 7)),
'vector': default_pose
}
def __move_camera(self, position, target):
# USD Frame flips target and position, so they have to be flipped here
quat = gf_quat_to_np_array(lookat_to_quatf(camera=Gf.Vec3f(*target),
target=Gf.Vec3f(*position),
up=Gf.Vec3f(0, 0, 1)))
self._camera.set_world_pose(position=position, orientation=quat, camera_axes='usd')
def __get_cam_pos(self, center, distance, height, mean_angle = None):
angle = None
if mean_angle:
angle = np.random.normal(mean_angle, math.sqrt(math.pi / 16)) # Normal Distribution with mean mean_angle and sd=sqrt(10deg)
else:
angle = random.random() * 2 * math.pi
pos = np.array([distance, 0])
rot_matr = np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]])
pos = np.matmul(pos, rot_matr)
pos = np.array([*pos, height])
return center + pos
def get_observations(self):
frame = self._camera.get_current_frame()
img_rgba = frame['rgba'] # Shape: (Width, Height, 4)
img_rgb = img_rgba[:, :, :3] / 255.0 # Remove alpha from rgba and scale between 0-1
img_depth = frame['distance_to_image_plane'] # Shape: (Width, Height)
if img_depth is not None:
img_depth = np.clip(img_depth, 0, 2) / 2.0 # Clip depth at 2m and scale between 0-1
img_depth = img_depth[:, :, np.newaxis]
# img_depth = np.expand_dims(img_depth, axis=-1)
else:
img_depth = np.zeros((*IMG_RESOLUTION, 1))
# Segmentation
img_seg_dict = frame['instance_id_segmentation']
one_hot_img_seg = img_seg = np.zeros((*IMG_RESOLUTION, 3))
if img_seg_dict:
img_seg_info_dict = img_seg_dict['info']['idToLabels'] # Dict: [pixel label: prim path]
img_seg = img_seg_dict['data'] # Shape: (Width, Height)
# Vectorised One-Hot-Encoding
for label, path in img_seg_info_dict.items():
label = int(label)
mask = (img_seg == label) # creates a bool matrix of an element wise comparison
if path == self._robot_path:
one_hot_img_seg[:, :, 0] = mask
elif path.startswith(self._parts_path):
one_hot_img_seg[:, :, 1] = mask
elif path == self._dest_box_path:
one_hot_img_seg[:, :, 2] = mask
# TODO: CHECK IF get_joint_positions ACCURATELY HANDLES ROTATIONS ABOVE 360deg AND NORMALIZE FOR MAX ROBOT ROTATION (+/-360deg)
robot_state = np.append(self.robot.get_joint_positions() / 2 * math.pi, 2 * float(self.robot.gripper.is_closed()) - 1)
return {
'image': np.concatenate([img_rgb, img_depth, one_hot_img_seg], axis=-1),
'vector': robot_state
}
def pre_physics_step(self, actions) -> None:
if self.step < LEARNING_STARTS:
return
# Rotate Joints
joint_rots = self.robot.get_joint_positions()
joint_rots += np.array(actions[0:6]) * self.__joint_rot_max
self.robot.set_joint_positions(positions=joint_rots)
# Open or close Gripper
gripper = self.robot.gripper
is_closed = gripper.is_closed()
gripper_action = actions[6]
if 0.9 < gripper_action and not is_closed:
gripper.close()
elif gripper_action < -0.9 and is_closed:
gripper.open()
# Calculate Rewards
stage = 0
step = 0
def calculate_metrics(self) -> None:
gripper_pos = self.robot.gripper.get_world_pose()[0]
self.step += 1
if self.step < LEARNING_STARTS:
return 0, False
done = False
reward= 0
part_pos, part_rot = self.part.get_world_pose()
if self.stage == 0:
gripper_to_part = np.linalg.norm(part_pos - gripper_pos) * 100 # In cm
reward -= gripper_to_part
if START_TABLE_HEIGHT + 0.03 < part_pos[2]: # Part was picked up
reward += MAX_STEP_PUNISHMENT
self.stage = 1
elif self.stage == 1:
dest_box_pos = self.part.get_world_pose()[0]
part_to_dest = np.linalg.norm(dest_box_pos - part_pos) * 100 # In cm
print('PART TO BOX:', part_to_dest)
if 10 < part_to_dest:
reward -= part_to_dest
else: # Part reached box
# reward += (100 + self.max_steps - self.step) * MAX_STEP_PUNISHMENT
ideal_part = self._get_closest_part(part_pos)
pos_error = np.linalg.norm(part_pos - ideal_part[0]) * 100
rot_error = ((part_rot - ideal_part[1])**2).mean()
print('PART REACHED BOX:', part_to_dest)
# print('THIS MUST BE TRUE ABOUT THE PUNISHMENT:', pos_error + rot_error, '<', MAX_STEP_PUNISHMENT) # CHeck the average punishment of stage 0 to see how much it tapers off
reward -= pos_error + rot_error
done = True
# End successfully if all parts are in the Box
# if self.part
# self.reset()
# done = True
if not done and (part_pos[2] < 0.1 or self.max_steps <= self.step): # Part was dropped or time ran out means end
reward -= (100 + self.max_steps - self.step) * MAX_STEP_PUNISHMENT
# self.reset()
done = True
if done:
print('END REWARD TASK', self.name, ':', reward)
return reward, done
def _get_closest_part(self, pos):
pos -= self.box.get_world_pose()[0]
closest_part = None
min_dist = 10000000
for part in IDEAL_PACKAGING:
dist = np.linalg.norm(part[0] - pos)
if dist < min_dist:
closest_part = part
min_dist = dist
return closest_part
| 16,079 |
Python
| 41.88 | 187 | 0.603334 |
gitLSW/robot-cloud/remnants/parallel-training/train_dreamer_mt.py
|
import os
import dreamerv3
from dreamerv3 import embodied
from embodied.envs import from_gym
from gym_env_mt import GymEnvMT
from functools import partial as bind
name = "test"
MAX_STEPS_PER_EPISODE = 300
# See configs.yaml for all options.
config = embodied.Config(dreamerv3.configs['defaults'])
config = config.update(dreamerv3.configs['small'])
# config = config.update(dreamerv3.configs['medium'])
# config = config.update(dreamerv3.configs['large'])
#config = config.update(dreamerv3.configs['xlarge'])
config = config.update({
'logdir': './logdir/' + name,
'run.train_ratio': 64,
'run.log_every': 30, # Seconds
'batch_size': 8,
'batch_length': 16,
'jax.prealloc': False,
'encoder.mlp_keys': 'vector',
'decoder.mlp_keys': 'vector',
'encoder.cnn_keys': 'image',
'decoder.cnn_keys': 'image',
'run.eval_every' : 10000,
#'jax.platform': 'cpu',
})
config = embodied.Flags(config).parse()
logdir = embodied.Path(config.logdir)
step = embodied.Counter()
logger = embodied.Logger(step, [
embodied.logger.TerminalOutput(),
embodied.logger.JSONLOutput(logdir, 'metrics.jsonl'),
embodied.logger.TensorBoardOutput(logdir),
#embodied.logger.WandBOutput(r".*",logdir, config),
# WandBOutputMy(r".*",logdir, config, name),
# embodied.logger.MLFlowOutput(logdir.name),
])
# Create Isaac environment and open Sim Window
env = GymEnvMT(max_steps = MAX_STEPS_PER_EPISODE,
sim_s_step_freq = 60,
headless=False,
experience=f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit')
spacing = 5
offsets = [[spacing, spacing, 0], [spacing, 0, 0], [spacing, -spacing, 0],
[0, spacing, 0], [0, 0, 0], [0, -spacing, 0],
[-spacing, spacing, 0], [-spacing, 0, 0], [-spacing, -spacing, 0]]
task_envs = env.init_tasks(offsets, backend="numpy")
# env.reset()
def make_env(task_env):
task_env = from_gym.FromGym(task_env, obs_key='image')
task_env = dreamerv3.wrap_env(task_env, config)
return task_env
ctors = []
for task_env in task_envs:
ctor = lambda : make_env(task_env)
ctor = bind(embodied.Parallel, ctor, config.envs.parallel)
ctors.append(ctor)
task_envs = [ctor() for ctor in ctors]
env = embodied.BatchEnv(task_envs, parallel=True)
print('Starting Training...')
# env.act_space.discrete = True
# act_space = { 'action': env.act_space }
agent = dreamerv3.Agent(env.obs_space, env.act_space, step, config)
replay = embodied.replay.Uniform(config.batch_length, config.replay_size, logdir / 'replay')
args = embodied.Config(**config.run, logdir=config.logdir, batch_steps=config.batch_size * config.batch_length)
print(args)
embodied.run.train(agent, env, replay, logger, args)
print('Finished Traing')
# env.close()
| 2,766 |
Python
| 33.160493 | 111 | 0.677151 |
gitLSW/robot-cloud/remnants/parallel-training/parallel_ddpg.py
|
import os
import numpy as np
from gym_env_mt import GymEnvMT
from stable_baselines3 import DDPG
from stable_baselines3.common.noise import NormalActionNoise, OrnsteinUhlenbeckActionNoise
from stable_baselines3.common.vec_env import DummyVecEnv, subproc_vec_env
import wandb
from wandb.integration.sb3 import WandbCallback
MAX_STEPS_PER_EPISODE = 300
SIM_STEP_FREQ_HZ = 60
# Create Isaac environment and open Sim Window
env = GymEnvMT(max_steps = MAX_STEPS_PER_EPISODE,
sim_s_step_freq = SIM_STEP_FREQ_HZ,
headless=False,
experience=f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit')
spacing = 5
offsets = []
tasks_per_side_1 = 50
tasks_per_side_2 = 50
# USE GRIP CLONER INSTEAD !!!
# CHECK THSI FROM OmniIsaacGymEnvs RLTask:
# self._cloner = GridCloner(spacing=self._env_spacing)
# self._cloner.define_base_env(self.default_base_env_path)
# stage = omni.usd.get_context().get_stage()
# UsdGeom.Xform.Define(stage, self.default_zero_env_path)
# if self._task_cfg["sim"].get("add_ground_plane", True):
# self._ground_plane_path = "/World/defaultGroundPlane"
# collision_filter_global_paths.append(self._ground_plane_path)
# scene.add_default_ground_plane(prim_path=self._ground_plane_path)
# prim_paths = self._cloner.generate_paths("/World/envs/env", self._num_envs)
# self._env_pos = self._cloner.clone(
# source_prim_path="/World/envs/env_0", prim_paths=prim_paths, replicate_physics=replicate_physics, copy_from_source=copy_from_source
# )
# self._env_pos = torch.tensor(np.array(self._env_pos), device=self._device, dtype=torch.float)
# if filter_collisions:
# self._cloner.filter_collisions(
# self._env.world.get_physics_context().prim_path,
# "/World/collisions",
# prim_paths,
# collision_filter_global_paths,
# )
from omni.isaac.cloner import GridCloner
NUM_ENVS = tasks_per_side_1 * tasks_per_side_2
for i in range(tasks_per_side_1):
for j in range(tasks_per_side_2):
offsets.append([i * spacing, j * spacing, 0])
task_envs = env.init_tasks(offsets, backend="numpy")
def make_env(env):
return lambda: env
env = DummyVecEnv([make_env(task_env) for task_env in task_envs])
# The noise objects for DDPG
n_actions = env.action_space.shape[-1]
action_noise = NormalActionNoise(mean=np.zeros(n_actions), sigma=0.1 * np.ones(n_actions))
ddpg_config = {
'learning_starts': 0,
'action_noise': action_noise,
'learning_rate': 0.001,
'tau': 0.005,
'gamma': 0.99,
'learning_starts': 0,
'train_freq': MAX_STEPS_PER_EPISODE * NUM_ENVS, # How many steps until models get updated
'batch_size': 256,
'buffer_size': MAX_STEPS_PER_EPISODE * NUM_ENVS,
'verbose': 1
}
name = 'ddpg-pack'
run = wandb.init(
project=name,
config=ddpg_config,
sync_tensorboard=True, # auto-upload sb3's tensorboard metrics
# monitor_gym=True, # auto-upload the videos of agents playing the game
# save_code=True, # optional
)
ddpg_config['tensorboard_log'] = f"./progress/runs/{run.id}"
model = None
try:
model = DDPG.load(f"progress/{name}", env, print_system_info=True, custom_objects=ddpg_config)
# model.set_parameters(params)
except FileNotFoundError:
print('Failed to load model')
finally:
model = DDPG("MultiInputPolicy", env, **ddpg_config)
while (True):
model.learn(total_timesteps=MAX_STEPS_PER_EPISODE * 2, log_interval=NUM_ENVS, tb_log_name='DDPG',
callback=WandbCallback(
model_save_path=f"models/{run.id}",
verbose=2,
))
print('Saving model')
model.save("progress/ddpg")
run.finish()
print('Finished Traing')
# env.close()
| 3,818 |
Python
| 32.79646 | 145 | 0.660555 |
gitLSW/robot-cloud/remnants/parallel-training/run_parallel_sim.py
|
import os
import numpy as np
from gym_env_mt import GymEnvMT
MAX_STEPS_PER_EPISODE = 500
# Create Isaac environment and open Sim Window
env = GymEnvMT(max_steps = MAX_STEPS_PER_EPISODE,
sim_s_step_freq = 60,
headless=False,
experience=f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit')
spacing = 5
offsets = [[spacing, spacing, 0], [spacing, 0, 0], [spacing, -spacing, 0],
[0, spacing, 0], [0, 0, 0], [0, -spacing, 0],
[-spacing, spacing, 0], [-spacing, 0, 0], [-spacing, -spacing, 0]]
task_envs = env.init_tasks(offsets, backend="numpy")
while True:
for task in task_envs:
task.step(np.ones(7))
| 681 |
Python
| 33.099998 | 80 | 0.60793 |
gitLSW/robot-cloud/remnants/parallel-training/pack_task_easy.py
|
import os
import math
import random
import numpy as np
from pxr import Gf, UsdLux, Sdf
from gymnasium import spaces
from omni.isaac.core.utils.extensions import enable_extension
# enable_extension("omni.importer.urdf")
enable_extension("omni.isaac.universal_robots")
enable_extension("omni.isaac.sensor")
# from omni.importer.urdf import _urdf
from omni.isaac.sensor import Camera
from omni.isaac.universal_robots.ur10 import UR10
from omni.isaac.universal_robots import KinematicsSolver
# from omni.isaac.universal_robots.controllers.pick_place_controller import PickPlaceController
import omni.isaac.core.utils.prims as prims_utils
from omni.isaac.core.prims import XFormPrim, RigidPrim, GeometryPrim
from omni.isaac.core.materials.physics_material import PhysicsMaterial
from omni.isaac.core.utils.prims import create_prim, get_prim_at_path
from omni.isaac.core.tasks.base_task import BaseTask
from omni.isaac.gym.tasks.rl_task import RLTaskInterface
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage
from omni.isaac.core.utils.viewports import set_camera_view
from omni.kit.viewport.utility import get_active_viewport
import omni.isaac.core.objects as objs
import omni.isaac.core.utils.numpy.rotations as rot_utils
from omni.isaac.core.utils.rotations import lookat_to_quatf, gf_quat_to_np_array
from omni.physx.scripts.utils import setRigidBody, setStaticCollider, setCollider, addCollisionGroup
from scipy.spatial.transform import Rotation as R
from pyquaternion import Quaternion
# MESH_APPROXIMATIONS = {
# "none": PhysxSchema.PhysxTriangleMeshCollisionAPI,
# "convexHull": PhysxSchema.PhysxConvexHullCollisionAPI,
# "convexDecomposition": PhysxSchema.PhysxConvexDecompositionCollisionAPI,
# "meshSimplification": PhysxSchema.PhysxTriangleMeshSimplificationCollisionAPI,
# "convexMeshSimplification": PhysxSchema.PhysxTriangleMeshSimplificationCollisionAPI,
# "boundingCube": None,
# "boundingSphere": None,
# "sphereFill": PhysxSchema.PhysxSphereFillCollisionAPI,
# "sdf": PhysxSchema.PhysxSDFMeshCollisionAPI,
# }
LEARNING_STARTS = 10
ENV_PATH = "World/Env"
FALLEN_PART_THRESHOLD = 0.2
ROBOT_PATH = 'World/UR10e'
ROBOT_POS = np.array([0.0, 0.0, FALLEN_PART_THRESHOLD])
LIGHT_PATH = 'World/Light'
LIGHT_OFFSET = np.array([0, 0, 2])
DEST_BOX_PATH = "World/DestinationBox"
DEST_BOX_POS = np.array([0, -0.65, FALLEN_PART_THRESHOLD])
PART_PATH = 'World/Part'
PART_SOURCE = DEST_BOX_POS + np.array([0, 0, 1.4])
# NUM_PARTS = 5
PART_PILLAR_PATH = "World/Pillar"
MAX_STEP_PUNISHMENT = 300
IDEAL_PACKAGING = [([-0.06, -0.19984, 0.0803], [0.072, 0.99, 0, 0]),
([-0.06, -0.14044, 0.0803], [0.072, 0.99, 0, 0]),
([-0.06, -0.07827, 0.0803], [0.072, 0.99, 0, 0]),
([-0.06, -0.01597, 0.0803], [0.072, 0.99, 0, 0]),
([-0.06, 0.04664, 0.0803], [0.072, 0.99, 0, 0]),
([-0.06, 0.10918, 0.0803], [0.072, 0.99, 0, 0])]
NUMBER_PARTS = len(IDEAL_PACKAGING)
# Seed Env or DDPG will always be the same !!
class PackTask(BaseTask):
kinematics_solver = None
"""
This class sets up a scene and calls a RL Policy, then evaluates the behaivior with rewards
Args:
offset (Optional[np.ndarray], optional): offset applied to all assets of the task.
sim_s_step_freq (int): The amount of simulation steps within a SIMULATED second.
"""
def __init__(self, name, max_steps, offset=None, sim_s_step_freq: int = 60) -> None:
self._env_path = f"/{name}/{ENV_PATH}"
self._light_path = {"/{name}/{LIGHT_PATH}"}
self._robot_path = f"/{name}/{ROBOT_PATH}"
self._dest_box_path = f"/{name}/{DEST_BOX_PATH}"
self._part_path = f"/{name}/{PART_PATH}"
self._pillar_path = f"/{name}/{PART_PILLAR_PATH}"
# self._num_observations = 1
# self._num_actions = 1
self._device = "cpu"
self.num_envs = 1
# Robot turning ange of max speed is 191deg/s
self.__joint_rot_max = (191.0 * math.pi / 180) / sim_s_step_freq
self.max_steps = max_steps
self.observation_space = spaces.Dict({
'gripper_closed': spaces.Discrete(2),
# 'forces': spaces.Box(low=-1, high=1, shape=(8, 6)), # Forces on the Joints
'box_state': spaces.Box(low=-3, high=3, shape=(NUMBER_PARTS, 2)), # Pos and Rot Distance of each part currently placed in Box compared to currently gripped part
'part_state': spaces.Box(low=-3, high=3, shape=(6,))
})
# End Effector Pose
self.action_space = spaces.Box(low=-1, high=1, shape=(7,), dtype=float) # Delta Gripper Pose & gripper open / close
# trigger __init__ of parent class
BaseTask.__init__(self, name=name, offset=offset)
def set_up_scene(self, scene) -> None:
print('SETUP TASK', self.name)
super().set_up_scene(scene)
local_assets = os.getcwd() + '/assets'
# This is the URL from which the Assets are downloaded
# Make sure you started and connected to your localhost Nucleus Server via Omniverse !!!
# assets_root_path = get_assets_root_path()
# _ = XFormPrim(prim_path=self._env_path, position=-np.array([5, 4.5, 0]))
# warehouse_path = assets_root_path + "/Isaac/Environments/Simple_Warehouse/warehouse_multiple_shelves.usd"
# add_reference_to_stage(warehouse_path, self._env_path)
# self.light = create_prim(
# '/World/Light_' + self.name,
# "SphereLight",
# position=ROBOT_POS + LIGHT_OFFSET + self._offset,
# attributes={
# "inputs:radius": 0.01,
# "inputs:intensity": 3e6,
# "inputs:color": (1.0, 1.0, 1.0)
# }
# )
# box_path = assets_root_path + "/Isaac/Environments/Simple_Warehouse/Props/SM_CardBoxA_02.usd"
box_path = local_assets + '/SM_CardBoxA_02.usd'
self.box = XFormPrim(prim_path=self._dest_box_path, position=DEST_BOX_POS, scale=[1, 1, 0.4])
add_reference_to_stage(box_path, self._dest_box_path)
setRigidBody(self.box.prim, approximationShape='convexDecomposition', kinematic=True) # Kinematic True means immovable
self._task_objects[self._dest_box_path] = self.box
# The UR10e has 6 joints, each with a maximum:
# turning angle of -360 deg to +360 deg
# turning ange of max speed is 191deg/s
self.robot = UR10(prim_path=self._robot_path, name='UR10', position=ROBOT_POS, attach_gripper=True)
self._task_objects[self._robot_path] = self.robot
# self.robot.set_joints_default_state(positions=torch.tensor([-math.pi / 2, -math.pi / 2, -math.pi / 2, -math.pi / 2, math.pi / 2, 0]))
self.part_pillar = objs.FixedCuboid(
name=self._pillar_path,
prim_path=self._pillar_path,
position=[0, 0, -100],
scale=np.array([1, 1, 1])
)
scene.add(self.part_pillar)
self._task_objects[self._pillar_path] = self.part_pillar
part_usd_path = local_assets + '/draexlmaier_part.usd'
self.part = XFormPrim(prim_path=self._part_path, position=PART_SOURCE, orientation=[0, 1, 0, 0])
add_reference_to_stage(part_usd_path, self._part_path)
setRigidBody(self.part.prim, approximationShape='convexDecomposition', kinematic=False) # Kinematic True means immovable
self._task_objects[self._part_path] = self.part
# set_camera_view(eye=ROBOT_POS + np.array([1.5, 6, 1.5]), target=ROBOT_POS, camera_prim_path="/OmniverseKit_Persp")
self._move_task_objects_to_their_frame()
def reset(self):
# super().cleanup()
if not self.robot.handles_initialized:
self.robot.initialize()
default_pose = np.array([math.pi / 2, -math.pi / 2, -math.pi / 2, -math.pi / 2, math.pi / 2, 0])
self.robot.set_joint_positions(positions=default_pose)
if not self.kinematics_solver:
self.kinematics_solver = KinematicsSolver(robot_articulation=self.robot, attach_gripper=True)
self.step = 0
gripper_pos = np.array(self.robot.gripper.get_world_pose()[0]) - np.array([0, 0, 0.25])
self.part_pillar.set_world_pose([gripper_pos[0], gripper_pos[1], gripper_pos[2] / 2])
self.part_pillar.set_local_scale([1, 1, gripper_pos[2]])
self.part.set_world_pose(gripper_pos, [0, 1, 0, 0])
# gripper_pos, gripper_rot = self.robot.gripper.get_world_pose()
# gripper_pos -= self.robot.get_world_pose()[0]
# self.robot.gripper.open()
# np.concatenate((gripper_pos, gripper_rot, [-1]), axis=0)
box_state, current_ideal_pose = self.compute_box_state()
part_pos, part_rot = self.part.get_world_pose()
part_pos -= self.box.get_world_pose()[0]
part_rot_euler = R.from_quat(part_rot).as_euler('xyz',degrees=False)
ideal_rot_euler = R.from_quat(current_ideal_pose[1]).as_euler('xyz',degrees=False)
part_pos_diff = current_ideal_pose[0] - part_pos
part_rot_diff = ideal_rot_euler - part_rot_euler
return {
'gripper_closed': False,
'box_state': box_state,
'part_state': np.concatenate((part_pos_diff, part_rot_diff), axis=0)
}
placed_parts = []
def get_observations(self):
# gripper = self.robot.gripper
# gripper_pos, gripper_rot = gripper.get_world_pose()
# gripper_pos -= self.robot.get_world_pose()[0]
# gripper_pose_kin = self.kinematics_solver.compute_end_effector_pose()
# gripper_closed = 2 * float(gripper.is_closed()) - 1
# # 'gripper_state': np.concatenate((gripper_pos, gripper_rot, [gripper_closed]), axis=0),
# # TODO: Was sollen wir verwenden ??!!
# print('COMPARE:')
# print(gripper_pose_kin)
# print((gripper_pos, gripper_rot))
# forces = self.robot.get_measured_joint_forces()
box_state, current_ideal_pose = self.compute_box_state()
part_pos, part_rot = self.part.get_world_pose()
part_pos -= self.box.get_world_pose()[0]
part_rot_euler = R.from_quat(part_rot).as_euler('xyz',degrees=False)
ideal_rot_euler = R.from_quat(current_ideal_pose[1]).as_euler('xyz',degrees=False)
part_pos_diff = current_ideal_pose[0] - part_pos
part_rot_diff = ideal_rot_euler - part_rot_euler
return {
'gripper_closed': self.robot.gripper.is_closed(),
'box_state': box_state,
# 'forces': forces,
'part_state': np.concatenate((part_pos_diff, part_rot_diff), axis=0)
}
# Returns: A 2D Array where each entry is the poses of the parts in the box
def compute_box_state(self):
box_state = []
ideal_selection = IDEAL_PACKAGING.copy()
parts = self.placed_parts + [self.part]
current_ideal_pose = None
for i in range(NUMBER_PARTS):
if len(parts) <= i:
box_state.append([3, math.pi])
continue
part = parts[i]
part_pos, part_rot= part.get_world_pose()
part_pos -= self.box.get_world_pose()[0]
ideal_part = None
min_dist = 10000000
# Find closest ideal part
for sel_part in ideal_selection:
dist = np.linalg.norm(sel_part[0] - part_pos)
if dist < min_dist:
ideal_part = sel_part
min_dist = dist
if i == len(parts) - 1:
current_ideal_pose = ideal_part
ideal_selection.remove(ideal_part)
rot_dist = _shortest_rot_dist(part_rot, ideal_part[1])
box_state.append([min_dist, rot_dist])
return box_state, current_ideal_pose
def pre_physics_step(self, actions) -> None:
gripper = self.robot.gripper
if self.step == LEARNING_STARTS - 1:
gripper.close()
return
# elif self.step == LEARNING_STARTS:
# self.part_pillar.set_world_pose([0, 0, -100])
# return
elif self.step < LEARNING_STARTS:
return
# Rotate Joints
gripper_pos = actions[0:3]
gripper_rot_euler = actions[3:6]
gripper_action = actions[6]
gripper_rot = R.from_euler('xyz', gripper_rot_euler, degrees=False).as_quat()
movement, success = self.kinematics_solver.compute_inverse_kinematics(gripper_pos, gripper_rot)
# print('success', success)
if success:
self.robot.apply_action(movement)
# is_closed = gripper.is_closed()
# if 0.9 < gripper_action and not is_closed:
# gripper.close()
# elif gripper_action < -0.9 and is_closed:
# gripper.open()
# def is_done(self):
# return False
# Calculate Rewards
step = 0
def calculate_metrics(self) -> None:
self.step += 1
# Terminate: Umgefallene Teile, Gefallene Teile
# Success:
part_pos, part_rot = self.part.get_world_pose()
any_flipped = False
for part in self.placed_parts:
part_rot = part.get_world_pose()[1]
if _is_flipped(part_rot):
any_flipped = True
break
if part_pos[2] < FALLEN_PART_THRESHOLD or self.max_steps < self.step or any_flipped:
return -MAX_STEP_PUNISHMENT, True
box_state, _ = self.compute_box_state()
box_deviation = np.sum(np.square(box_state))
# placed_parts.append(self.part)
return -box_deviation, False
# gripper_pos = self.robot.gripper.get_world_pose()[0]
# self.step += 1
# if self.step < LEARNING_STARTS:
# return 0, False
# done = False
# reward= 0
# part_pos, part_rot = self.part.get_world_pose()
# dest_box_pos = self.part.get_world_pose()[0]
# part_to_dest = np.linalg.norm(dest_box_pos - part_pos) * 100 # In cm
# print('PART TO BOX:', part_to_dest)
# if 10 < part_to_dest:
# reward -= part_to_dest
# else: # Part reached box
# # reward += (100 + self.max_steps - self.step) * MAX_STEP_PUNISHMENT
# ideal_part = _get_closest_part(part_pos)
# pos_error = np.linalg.norm(part_pos - ideal_part[0]) * 100
# rot_error = ((part_rot - ideal_part[1])**2).mean()
# print('PART REACHED BOX:', part_to_dest)
# # print('THIS MUST BE TRUE ABOUT THE PUNISHMENT:', pos_error + rot_error, '<', MAX_STEP_PUNISHMENT) # CHeck the average punishment of stage 0 to see how much it tapers off
# reward -= pos_error + rot_error
# # if not done and (part_pos[2] < 0.1 or self.max_steps <= self.step): # Part was dropped or time ran out means end
# # reward -= (100 + self.max_steps - self.step) * MAX_STEP_PUNISHMENT
# # done = True
# if done:
# print('END REWARD TASK', self.name, ':', reward)
# return reward, done
def _is_flipped(q1):
"""
Bestimmt, ob die Rotation von q0 zu q1 ein "Umfallen" darstellt,
basierend auf einem Winkel größer als 60 Grad zwischen der ursprünglichen
z-Achse und ihrer Rotation.
:param q0: Ursprüngliches Quaternion.
:param q1: Neues Quaternion.
:return: True, wenn der Winkel größer als 60 Grad ist, sonst False.
"""
q0 = np.array([0, 1, 0, 0])
# Initialer Vektor, parallel zur z-Achse
v0 = np.array([0, 0, 1])
# Konvertiere Quaternions in Rotation-Objekte
rotation0 = R.from_quat(q0)
rotation1 = R.from_quat(q1)
# Berechne die relative Rotation von q0 zu q1
q_rel = rotation1 * rotation0.inv()
# Berechne den rotierten Vektor v1
v1 = q_rel.apply(v0)
# Berechne den Winkel zwischen v0 und v1
cos_theta = np.dot(v0, v1) / (np.linalg.norm(v0) * np.linalg.norm(v1))
angle = np.arccos(np.clip(cos_theta, -1.0, 1.0)) * 180 / np.pi
# Prüfe, ob der Winkel größer als 60 Grad ist
return angle > 60
def _shortest_rot_dist(quat_1, quat_2):
part_quat = Quaternion(quat_1)
ideal_quat = Quaternion(quat_2)
return Quaternion.absolute_distance(part_quat, ideal_quat)
| 16,525 |
Python
| 38.917874 | 185 | 0.609682 |
gitLSW/robot-cloud/remnants/parallel-training/gym_env_mt.py
|
import carb
import uuid
from omni.isaac.gym.vec_env import VecEnvBase
import gymnasium as gym
class GymTaskEnv(gym.Env):
"""
This class provides a fascade for Gym, so Tasks can be treated like they were envirments.
"""
def __init__(self, task, gymEnvMT) -> None:
self._env = gymEnvMT
self.id = task.name
self._task = task
self.observation_space = task.observation_space
self.action_space = task.action_space
def reset(self, seed=None, options=None):
return self._env.reset(self._task, seed)
def step(self, actions):
return self._env.step(actions, self._task)
class GymEnvMT(VecEnvBase):
_tasks = []
_stepped_tasks = []
"""
This class handles the Interaction between the different GymTaskEnvs and Isaac
"""
def __init__(
self,
headless: bool,
sim_device: int = 0,
enable_livestream: bool = False,
enable_viewport: bool = False,
launch_simulation_app: bool = True,
experience: str = None,
sim_s_step_freq: float = 60.0,
max_steps: int = 2000
) -> None:
"""Initializes RL and task parameters.
Args:
headless (bool): Whether to run training headless.
sim_device (int): GPU device ID for running physics simulation. Defaults to 0.
enable_livestream (bool): Whether to enable running with livestream.
enable_viewport (bool): Whether to enable rendering in headless mode.
launch_simulation_app (bool): Whether to launch the simulation app (required if launching from python). Defaults to True.
experience (str): Path to the desired kit app file. Defaults to None, which will automatically choose the most suitable app file.
"""
self.max_steps = max_steps
self.rendering_dt = 1 / sim_s_step_freq
super().__init__(headless, sim_device, enable_livestream, enable_viewport, launch_simulation_app, experience)
def step(self, actions, task):
"""Basic implementation for stepping simulation.
Can be overriden by inherited Env classes
to satisfy requirements of specific RL libraries. This method passes actions to task
for processing, steps simulation, and computes observations, rewards, and resets.
Args:
actions (Union[numpy.ndarray, torch.Tensor]): Actions buffer from policy.
Returns:
observations(Union[numpy.ndarray, torch.Tensor]): Buffer of observation data.
rewards(Union[numpy.ndarray, torch.Tensor]): Buffer of rewards data.
dones(Union[numpy.ndarray, torch.Tensor]): Buffer of resets/dones data.
info(dict): Dictionary of extras data.
"""
if task.name in self._stepped_tasks:
# Stop thread until all envs have stepped
raise ValueError(f"Task {task.name} was already stepped in this timestep")
self._stepped_tasks.append(task.name)
task.pre_physics_step(actions)
if (len(self._stepped_tasks) == len(self._tasks)):
self._world.step(render=self._render)
self._stepped_tasks = []
self.sim_frame_count += 1
if not self._world.is_playing():
self.close()
info = {}
observations = task.get_observations()
rewards, done = task.calculate_metrics()
truncated = done * 0
return observations, rewards, done, truncated, info
def reset(self, task, seed=None):
"""Resets the task and updates observations.
Args:
seed (Optional[int]): Seed.
Returns:
observations(Union[numpy.ndarray, torch.Tensor]): Buffer of observation data.
# info(dict): Dictionary of extras data.
"""
if seed is not None:
print('RESET GRANDCHILD CLASS UNTESTED')
seed = self.seed(seed)
super(GymEnvMT.__bases__[0], self).reset(seed=seed)
obs = task.reset()
info = {}
# Cannot advance world as resets can happen at any time
# self._world.step(render=self._render)
return obs, info # np.zeros(self.observation_space)
tasks_initialized = False
def init_tasks(self, offsets, backend="numpy", sim_params=None, init_sim=True) -> None:
"""Creates a World object and adds Task to World.
Initializes and registers task to the environment interface.
Triggers task start-up.
Args:
task (RLTask): The task to register to the env.
backend (str): Backend to use for task. Can be "numpy" or "torch". Defaults to "numpy".
sim_params (dict): Simulation parameters for physics settings. Defaults to None.
init_sim (Optional[bool]): Automatically starts simulation. Defaults to True.
rendering_dt (Optional[float]): dt for rendering. Defaults to 1/60s.
"""
if self.tasks_initialized:
return [GymTaskEnv(task, self) for task in self._tasks]
else:
self.tasks_initialized = True
from omni.isaac.core.world import World
# parse device based on sim_param settings
if sim_params and "sim_device" in sim_params:
device = sim_params["sim_device"]
else:
device = "cpu"
physics_device_id = carb.settings.get_settings().get_as_int("/physics/cudaDevice")
gpu_id = 0 if physics_device_id < 0 else physics_device_id
if sim_params and "use_gpu_pipeline" in sim_params:
# GPU pipeline must use GPU simulation
if sim_params["use_gpu_pipeline"]:
device = "cuda:" + str(gpu_id)
elif sim_params and "use_gpu" in sim_params:
if sim_params["use_gpu"]:
device = "cuda:" + str(gpu_id)
self._world = World(
stage_units_in_meters=1.0, rendering_dt=self.rendering_dt, backend=backend, sim_params=sim_params, device=device
)
# self._world._current_tasks = dict()
from pack_task_easy import PackTask
from omni.isaac.core.utils.viewports import set_camera_view
self._world.scene.add_default_ground_plane()
for i, offset in enumerate(offsets):
task = PackTask(f"Task_{i}", self.max_steps, offset, 1 / self.rendering_dt)
self._world.add_task(task)
self._tasks.append(task)
self._num_envs = len(self._tasks)
first_task = next(iter(self._tasks))
self.observation_space = first_task.observation_space
self.action_space = first_task.action_space
if sim_params and "enable_viewport" in sim_params:
self._render = sim_params["enable_viewport"]
if init_sim:
self._world.reset()
for task in self._tasks:
task.reset()
set_camera_view(eye=[-4, -4, 6], target=offsets[len(offsets) - 1], camera_prim_path="/OmniverseKit_Persp")
return [GymTaskEnv(task, self) for task in self._tasks]
def set_task(self, task, backend="numpy", sim_params=None, init_sim=True) -> None:
# Not available for multi task
raise NotImplementedError()
def update_task_params(self):
# Not available for multi task
raise NotImplementedError()
| 7,479 |
Python
| 36.58794 | 141 | 0.603423 |
gitLSW/robot-cloud/remnants/dependency_fixes/dreamer_setup.py
|
import os
import pathlib
import setuptools
from setuptools import find_namespace_packages
path = os.getcwd() + '/temp'
setuptools.setup(
name='dreamerv3',
version='1.5.0',
description='Mastering Diverse Domains through World Models',
author='Danijar Hafner',
url='http://github.com/danijar/dreamerv3',
long_description=pathlib.Path(path + '/README.md').read_text(),
long_description_content_type='text/markdown',
packages=find_namespace_packages(exclude=['example.py']),
include_package_data=True,
install_requires=pathlib.Path(path + '/requirements.txt').read_text().splitlines(),
classifiers=[
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
)
| 866 |
Python
| 33.679999 | 87 | 0.684758 |
gitLSW/robot-cloud/remnants/dependency_fixes/logger.py
|
import collections
import concurrent.futures
import datetime
import json
import os
import re
import time
import numpy as np
from . import path
class Logger:
def __init__(self, step, outputs, multiplier=1):
assert outputs, 'Provide a list of logger outputs.'
self.step = step
self.outputs = outputs
self.multiplier = multiplier
self._last_step = None
self._last_time = None
self._metrics = []
def add(self, mapping, prefix=None):
step = int(self.step) * self.multiplier
for name, value in dict(mapping).items():
name = f'{prefix}/{name}' if prefix else name
value = np.asarray(value)
if len(value.shape) not in (0, 1, 2, 3, 4):
raise ValueError(
f"Shape {value.shape} for name '{name}' cannot be "
"interpreted as scalar, histogram, image, or video.")
self._metrics.append((step, name, value))
def scalar(self, name, value):
self.add({name: value})
def image(self, name, value):
self.add({name: value})
def video(self, name, value):
self.add({name: value})
def write(self, fps=False):
if fps:
value = self._compute_fps()
if value is not None:
self.scalar('fps', value)
if not self._metrics:
return
for output in self.outputs:
output(tuple(self._metrics))
self._metrics.clear()
def _compute_fps(self):
step = int(self.step) * self.multiplier
if self._last_step is None:
self._last_time = time.time()
self._last_step = step
return None
steps = step - self._last_step
duration = time.time() - self._last_time
self._last_time += duration
self._last_step = step
return steps / duration
class AsyncOutput:
def __init__(self, callback, parallel=True):
self._callback = callback
self._parallel = parallel
if parallel:
self._executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
self._future = None
def __call__(self, summaries):
if self._parallel:
self._future and self._future.result()
self._future = self._executor.submit(self._callback, summaries)
else:
self._callback(summaries)
class TerminalOutput:
def __init__(self, pattern=r'.*', name=None):
self._pattern = re.compile(pattern)
self._name = name
try:
import rich.console
self._console = rich.console.Console()
except ImportError:
self._console = None
def __call__(self, summaries):
step = max(s for s, _, _, in summaries)
scalars = {k: float(v) for _, k, v in summaries if len(v.shape) == 0}
scalars = {k: v for k, v in scalars.items() if self._pattern.search(k)}
formatted = {k: self._format_value(v) for k, v in scalars.items()}
if self._console:
if self._name:
self._console.rule(f'[green bold]{self._name} (Step {step})')
else:
self._console.rule(f'[green bold]Step {step}')
self._console.print(' [blue]/[/blue] '.join(
f'{k} {v}' for k, v in formatted.items()))
print('')
else:
message = ' / '.join(f'{k} {v}' for k, v in formatted.items())
message = f'[{step}] {message}'
if self._name:
message = f'[{self._name}] {message}'
print(message, flush=True)
def _format_value(self, value):
value = float(value)
if value == 0:
return '0'
elif 0.01 < abs(value) < 10000:
value = f'{value:.2f}'
value = value.rstrip('0')
value = value.rstrip('0')
value = value.rstrip('.')
return value
else:
value = f'{value:.1e}'
value = value.replace('.0e', 'e')
value = value.replace('+0', '')
value = value.replace('+', '')
value = value.replace('-0', '-')
return value
class JSONLOutput(AsyncOutput):
def __init__(
self, logdir, filename='metrics.jsonl', pattern=r'.*', parallel=True):
super().__init__(self._write, parallel)
self._filename = filename
self._pattern = re.compile(pattern)
self._logdir = path.Path(logdir)
self._logdir.mkdirs()
def _write(self, summaries):
bystep = collections.defaultdict(dict)
for step, name, value in summaries:
if len(value.shape) == 0 and self._pattern.search(name):
bystep[step][name] = float(value)
lines = ''.join([
json.dumps({'step': step, **scalars}) + '\n'
for step, scalars in bystep.items()])
with (self._logdir / self._filename).open('a') as f:
f.write(lines)
class TensorBoardOutput(AsyncOutput):
def __init__(self, logdir, fps=20, maxsize=1e9, parallel=True):
super().__init__(self._write, parallel)
self._logdir = str(logdir)
if self._logdir.startswith('/gcs/'):
self._logdir = self._logdir.replace('/gcs/', 'gs://')
self._fps = fps
self._writer = None
self._maxsize = self._logdir.startswith('gs://') and maxsize
if self._maxsize:
self._checker = concurrent.futures.ThreadPoolExecutor(max_workers=1)
self._promise = None
def _write(self, summaries):
import tensorflow as tf
reset = False
if self._maxsize:
result = self._promise and self._promise.result()
# print('Current TensorBoard event file size:', result)
reset = (self._promise and result >= self._maxsize)
self._promise = self._checker.submit(self._check)
if not self._writer or reset:
print('Creating new TensorBoard event file writer.')
self._writer = tf.summary.create_file_writer(
self._logdir, flush_millis=1000, max_queue=10000)
self._writer.set_as_default()
for step, name, value in summaries:
try:
if len(value.shape) == 0:
tf.summary.scalar(name, value, step)
elif len(value.shape) == 1:
if len(value) > 1024:
value = value.copy()
np.random.shuffle(value)
value = value[:1024]
tf.summary.histogram(name, value, step)
elif len(value.shape) == 2:
tf.summary.image(name, value, step)
elif len(value.shape) == 3:
tf.summary.image(name, value, step)
elif len(value.shape) == 4:
self._video_summary(name, value, step)
except Exception:
print('Error writing summary:', name)
raise
self._writer.flush()
def _check(self):
import tensorflow as tf
events = tf.io.gfile.glob(self._logdir.rstrip('/') + '/events.out.*')
return tf.io.gfile.stat(sorted(events)[-1]).length if events else 0
def _video_summary(self, name, video, step):
import tensorflow as tf
import tensorflow.compat.v1 as tf1
name = name if isinstance(name, str) else name.decode('utf-8')
if np.issubdtype(video.dtype, np.floating):
video = np.clip(255 * video, 0, 255).astype(np.uint8)
try:
video = video[:, :, :, :3]
T, H, W, C = video.shape
summary = tf1.Summary()
image = tf1.Summary.Image(height=H, width=W, colorspace=C)
image.encoded_image_string = _encode_gif(video, self._fps)
summary.value.add(tag=name, image=image)
tf.summary.experimental.write_raw_pb(summary.SerializeToString(), step)
except (IOError, OSError) as e:
print('GIF summaries require ffmpeg in $PATH.', e)
tf.summary.image(name, video, step)
class WandBOutput:
def __init__(self, pattern, username, project_name, model_name, config):
self._pattern = re.compile(pattern)
import wandb
wandb.init(
project=project_name,
name=model_name,
entity=username,
# sync_tensorboard=True,
config=dict(config),
)
self._wandb = wandb
def __call__(self, summaries):
bystep = collections.defaultdict(dict)
wandb = self._wandb
for step, name, value in summaries:
if len(value.shape) == 0 and self._pattern.search(name):
bystep[step][name] = float(value)
elif len(value.shape) == 1:
bystep[step][name] = wandb.Histogram(value)
elif len(value.shape) == 2:
value = np.clip(255 * value, 0, 255).astype(np.uint8)
value = np.transpose(value, [2, 0, 1])
bystep[step][name] = wandb.Image(value)
elif len(value.shape) == 3:
value = np.clip(255 * value, 0, 255).astype(np.uint8)
value = np.transpose(value, [2, 0, 1])
bystep[step][name] = wandb.Image(value)
elif len(value.shape) == 4:
value = value[:, :, :, :3]
# Sanity check that the channeld dimension is last
assert value.shape[3] in [1, 3, 4], f"Invalid shape: {value.shape}"
value = np.transpose(value, [0, 3, 1, 2])
# If the video is a float, convert it to uint8
if np.issubdtype(value.dtype, np.floating):
value = np.clip(255 * value, 0, 255).astype(np.uint8)
bystep[step][name] = wandb.Video(value)
for step, metrics in bystep.items():
self._wandb.log(metrics, step=step)
class MLFlowOutput:
def __init__(self, run_name=None, resume_id=None, config=None, prefix=None):
import mlflow
self._mlflow = mlflow
self._prefix = prefix
self._setup(run_name, resume_id, config)
def __call__(self, summaries):
bystep = collections.defaultdict(dict)
for step, name, value in summaries:
if len(value.shape) == 0 and self._pattern.search(name):
name = f'{self._prefix}/{name}' if self._prefix else name
bystep[step][name] = float(value)
for step, metrics in bystep.items():
self._mlflow.log_metrics(metrics, step=step)
def _setup(self, run_name, resume_id, config):
tracking_uri = os.environ.get('MLFLOW_TRACKING_URI', 'local')
run_name = run_name or os.environ.get('MLFLOW_RUN_NAME')
resume_id = resume_id or os.environ.get('MLFLOW_RESUME_ID')
print('MLFlow Tracking URI:', tracking_uri)
print('MLFlow Run Name: ', run_name)
print('MLFlow Resume ID: ', resume_id)
if resume_id:
runs = self._mlflow.search_runs(None, f'tags.resume_id="{resume_id}"')
assert len(runs), ('No runs to resume found.', resume_id)
self._mlflow.start_run(run_name=run_name, run_id=runs['run_id'].iloc[0])
for key, value in config.items():
self._mlflow.log_param(key, value)
else:
tags = {'resume_id': resume_id or ''}
self._mlflow.start_run(run_name=run_name, tags=tags)
def _encode_gif(frames, fps):
from subprocess import Popen, PIPE
h, w, c = frames[0].shape
pxfmt = {1: 'gray', 3: 'rgb24'}[c]
cmd = ' '.join([
'ffmpeg -y -f rawvideo -vcodec rawvideo',
f'-r {fps:.02f} -s {w}x{h} -pix_fmt {pxfmt} -i - -filter_complex',
'[0:v]split[x][z];[z]palettegen[y];[x]fifo[x];[x][y]paletteuse',
f'-r {fps:.02f} -f gif -'])
proc = Popen(cmd.split(' '), stdin=PIPE, stdout=PIPE, stderr=PIPE)
for image in frames:
proc.stdin.write(image.tobytes())
out, err = proc.communicate()
if proc.returncode:
raise IOError('\n'.join([' '.join(cmd), err.decode('utf8')]))
del proc
return out
| 10,907 |
Python
| 32.563077 | 78 | 0.610159 |
RoboticExplorationLab/Deep-ILC/main.py
|
import argparse
import datetime
import numpy as np
import itertools
from dmc_grad import DMControlEnvWrapper
from envs.cartpole import Cartpole
from envs.airplane import YakPlane
from envs.quadrotor import Quadrotor
from envs.rex_quadrotor import RexQuadrotor
from envs.acrobot import AcrobotEnv
from envs.cheetah import CheetahEnv
from envs.hopper import HopperEnv
from envs.ant import AntEnv
import torch
from sac import SAC
from tensorboardX import SummaryWriter
from replay_memory import ReplayMemory
from utils import compute_jacobian_online, compute_jacobian_batch
from pretrain import pretrain_sac
import ipdb
import time
import sys
from rl_plotter.logger import Logger
parser = argparse.ArgumentParser(description='PyTorch Soft Actor-Critic Args')
parser.add_argument('--env-name', default="rexquadrotor",
help='Choose environment (cartpole, acrobot, rexquadrotor, halfcheetah, hopper, DmcCheetah, DmcHopper)')
parser.add_argument('--policy', default="Gaussian",
help='Policy Type: Gaussian | Deterministic (default: Gaussian)')
parser.add_argument('--eval', type=bool, default=True,
help='Evaluates a policy a policy every 10 episode (default: True)')
parser.add_argument('--gamma', type=float, default=0.99, metavar='G',
help='discount factor for reward (default: 0.99)')
parser.add_argument('--tau', type=float, default=0.005, metavar='G',
help='target smoothing coefficient(τ) (default: 0.005)')
parser.add_argument('--lr', type=float, default=0.0003, metavar='G',
help='learning rate (default: 0.0003)')
parser.add_argument('--alpha', type=float, default=0.2, metavar='G',
help='Temperature parameter α determines the relative importance of the entropy\
term against the reward (default: 0.2)')
parser.add_argument('--automatic_entropy_tuning', type=bool, default=False, metavar='G',
help='Automaically adjust α (default: False)')
parser.add_argument('--seed', type=int, default=123456, metavar='N',
help='random seed (default: 123456)')
parser.add_argument('--batch_size', type=int, default=256, metavar='N',
help='batch size (default: 256)')
parser.add_argument('--num_steps', type=int, default=1000001, metavar='N',
help='maximum number of steps (default: 1000000)')
parser.add_argument('--hidden_sizePi', type=int, default=256, metavar='N',
help='hidden size (default: 256)')
parser.add_argument('--hidden_sizeQ', type=int, default=256, metavar='N',
help='hidden size (default: 256)')
parser.add_argument('--updates_per_step', type=int, default=1, metavar='N',
help='model updates per simulator step (default: 1)')
parser.add_argument('--start_steps', type=int, default=1000, metavar='N',
help='Steps sampling random actions (default: 10000)')
parser.add_argument('--target_update_interval', type=int, default=1, metavar='N',
help='Value target update per no. of updates per step (default: 1)')
parser.add_argument('--replay_size', type=int, default=1000000, metavar='N',
help='size of replay buffer (default: 10000000)')
parser.add_argument('--cuda', action="store_true",
help='run on CUDA (default: False)')
parser.add_argument('--eval_interval', type=int, default=40,
help='evaluation interval (default: 40 episodes)')
parser.add_argument('--exp_name', type=str, default='scratch_training_test',
help='name of the experiment')
parser.add_argument('--offline', action="store_true",
help='Train with offline data (i.e without collecting any experiences) (default: False)')
parser.add_argument('--jacobian', action="store_true",
help='Train with approximate model jacobians (default: False)')
parser.add_argument('--pretrain', action="store_true",
help='Pretrain the model in the approximate model(default: False)')
parser.add_argument('--pretrain_jacobian', action="store_true",
help='Pretrain the model in the approximate model with model jacobians (default: False)')
parser.add_argument('--load', action="store_true",
help='Load the model saved with exp-name (default: False)')
parser.add_argument('--test', action="store_true",
help='Run without logging or saving (default: False)')
parser.add_argument('--zeroth', action="store_true",
help='Train with zeroth order jacobians from the approximate model (default: False)')
parser.add_argument('--jac_s_coeff', type=float, default=1.0,
help='Coefficient for the state jacobian loss (default: 1.0)')
args = parser.parse_args()
args.dflex_env = False
args.dmc_env = False
device = torch.device("cuda" if args.cuda else "cpu")
if args.env_name == 'cartpole':
T = 100
dt = 0.05
env = Cartpole(mc=1.8, mp=0.6, b=0.08, deadband=0.05, mu=0.6, device=device, dt=dt, max_steps=T) #Cartpole(mu=0.4, device=device)
env_nom = Cartpole(b=0.0, deadband=0.0, u_max=np.inf, mu=0.0, device=device, dt=dt, max_steps=T)
elif args.env_name == 'acrobot':
T = 100
dt = 0.05
env_nom = AcrobotEnv(1, device=device, dt=dt, T=T)
env = AcrobotEnv(1, device=device, dt=dt, T=T, l1=1.2, m1=1.2)
elif args.env_name == 'quadrotor':
env = Quadrotor(mass=0.75,
J=([[0.0026, 0.0003, 0.0],
[0.0003, 0.0026, 0.0],
[0.0, 0.0, 0.005]]),
motor_dist=0.2,
kf=0.95,
km=0.026,
cross_A_x = 0.3,
cross_A_y = 0.3,
cross_A_z = 0.65,
cd=[0.4, 0.4, 0.4],
max_steps=100,
dt=0.05)
env_nom = Quadrotor(max_steps=100, dt=0.05)
elif args.env_name == 'rexquadrotor':
env_nom = RexQuadrotor()
env = RexQuadrotor(mass=1.1*2.0,
J=[[0.0165, 0.0, 0.0], [0.0, 0.0165, 0.0],[0.0, 0.0, 0.0234]],
kf=1.1*0.0244101,
km=1.1*0.00029958,
bf=0.9*(-30.48576),
bm=0.9*(-0.367697),
cd=[0.3, 0.3, 0.3])
elif args.env_name == 'airplane':
env = YakPlane(m=0.075*1.5, b=0.45*0.85, lin=True, max_steps=50, dt=0.04)
dtype = torch.float32
if args.zeroth:
dtype = torch.double
env_nom = YakPlane(m=0.075*1.2, b=0.45*0.95, lin=True, max_steps=50, dt=0.04, dtype=dtype)
elif args.env_name == 'halfcheetah':
env_params = { 'density' : 1000.0,
'stiffness' : 0.0,
'damping' : 0.7,
'contact_ke': 1.e+5,
'contact_kd': 5.e+3,
'contact_kf': 1.e+3,
'contact_mu': 0.7,
'limit_ke' : 1.e+3,
'limit_kd' : 1.e+1,
'armature' : 0.1}
env = CheetahEnv(num_envs = 1, \
device = 'cpu', \
render = False, \
seed = args.seed, \
episode_length=1000, \
stochastic_init = True, \
MM_caching_frequency = 16, \
no_grad=False,
env_params=env_params)
env_nom = CheetahEnv(num_envs = 1, \
device = 'cpu', \
render = False, \
seed = args.seed, \
episode_length=1000, \
stochastic_init = True, \
MM_caching_frequency = 16, \
no_grad=False)
args.dflex_env = True
elif args.env_name == 'hopper':
env_params = { 'density' : 1000.0,
'stiffness' : 0.0,
'damping' : 1.6,
'contact_ke': 1.e+5,
'contact_kd': 5.e+3,
'contact_kf': 1.e+3,
'contact_mu': 0.72,
'limit_ke' : 1.e+3,
'limit_kd' : 1.e+1,
'armature' : 1.0}
env = HopperEnv(num_envs = 1, \
device = 'cpu', \
render = False, \
seed = args.seed, \
episode_length=1000, \
stochastic_init = True, \
MM_caching_frequency = 16, \
no_grad=False,
env_params=env_params)
env_nom = HopperEnv(num_envs = 1, \
device = 'cpu', \
render = False, \
seed = args.seed, \
episode_length=1000, \
stochastic_init = True, \
MM_caching_frequency = 16, \
no_grad=False)
args.dflex_env = True
elif args.env_name == 'DmcCheetah':
from dm_control import suite
args.dmc_env = True
env = DMControlEnvWrapper(domain_name='cheetah', task_name='run')
env_nom = DMControlEnvWrapper(domain_name='cheetah', task_name='run')
args.jac_s_coeff = 0.0
# Access the MuJoCo model
model = env.dm_control_env.physics.model
# Change damping coefficients for all joints
model.dof_damping = model.dof_damping*0.7
# Change contact stiffness coefficients
# model.actuator_gainprm = model.actuator_gainprm
# Change friction coefficients
model.geom_friction = model.geom_friction*0.7
if args.offline:
data = np.load('data')
else:
data = None
torch.manual_seed(args.seed)
np.random.seed(args.seed)
# Agent
agent = SAC(env.observation_space.shape[0], env.action_space, args)
# Saving and Loading
save_path = 'runs/SAC_{}_{}'.format(args.env_name,args.exp_name)
ckpt_save_path = save_path+'/checkpoint'
args.ckpt_save_path = ckpt_save_path
if args.load:
if args.jacobian:
save_path+= '/ft_jac_up2/'
args.exp_name += '_ft_jac_up2/'
else:
save_path+= '/ft_rl/'
args.exp_name += '_ft_rl/'
if args.load:
agent.load_nets(ckpt_save_path)
# Tensorboard
if not args.test:
writer = SummaryWriter(save_path+f'/seed{args.seed}/')
config = {'args' : args}
logger = Logger(log_dir="./rl_plotter_logs", exp_name=args.exp_name, env_name=args.env_name, seed=args.seed, config=config)
else:
writer = None
# Memory
if args.offline:
state, action, next_state = torch.tensor(data['state']).to(device).float(), torch.tensor(data['action']).to(device).float(), torch.tensor(data['next_state']).to(device).float()
data['jac_ssa'][i], data['grad_rsa'][i] = compute_jacobians_batch(state, action, next_state, env_nom, args)
memory = ReplayMemory(args.replay_size, args.seed, data)
# Training Loop
total_numsteps = 0
updates = 0
if args.pretrain:
pretrain_sac(agent, env_nom, writer, args, data)
agent.refresh_optim()
agent.reset_agent_to_best()
sys.exit()
test_init = False
for i_episode in itertools.count(1):
episode_reward = 0
episode_steps = 0
done = False
states_arr = []
actions_arr = []
state = env.reset()
while not done and not test_init:
states_arr.append(state)
if args.start_steps > total_numsteps and not args.pretrain and not args.load:
action = env.action_space.sample() # Sample random action
else:
action = agent.select_action(state) # Sample action from policy
if len(memory) > args.batch_size:
# Number of updates per step in environment
for i in range(args.updates_per_step):
# Update parameters of all the networks
if args.jacobian:
critic_1_loss, critic_2_loss, policy_loss, ent_loss, alpha, stats = agent.update_parameters_jac(memory, args.batch_size, updates)
else:
critic_1_loss, critic_2_loss, policy_loss, ent_loss, alpha = agent.update_parameters(memory, args.batch_size, updates)
if not args.test:
writer.add_scalar('loss/critic_1', critic_1_loss, updates)
writer.add_scalar('loss/critic_2', critic_2_loss, updates)
writer.add_scalar('loss/policy', policy_loss, updates)
writer.add_scalar('loss/entropy_loss', ent_loss, updates)
writer.add_scalar('entropy_temprature/alpha', alpha, updates)
if args.jacobian:
writer.add_scalar('loss/jac_act', stats['jac_a_loss'], updates)
writer.add_scalar('loss/jac_state', stats['jac_s_loss'], updates)
writer.add_scalar('loss/critic_loss', stats['critic_loss'], updates)
if 'act_coeff' in stats:
writer.add_scalar('entropy_temprature/act_coeff', stats['act_coeff'], updates)
writer.add_scalar('entropy_temprature/state_coeff', stats['state_coeff'], updates)
writer.add_scalar('grads/grad_q', stats['grad_q'], updates)
writer.add_scalar('grads/grad_action', stats['grad_action'], updates)
writer.add_scalar('grads/grad_state', stats['grad_state'], updates)
writer.add_scalar('grads/policy', stats['policy_grad'], updates)
writer.add_scalar('grads/critic_grad', stats['critic_grad'], updates)
writer.add_scalar('grads/nq_jac_state', stats['nq_jac_state'].abs().mean(), updates)
writer.add_scalar('grads/nq_jac_action', stats['nq_jac_act'].abs().mean(), updates)
writer.add_scalar('grads_qf/nq_st_med', stats['nq_jac_state'].abs().mean(dim=-1).median(), updates)
writer.add_scalar('grads_qf/nq_act_med', stats['nq_jac_act'].abs().mean(dim=-1).median(), updates)
writer.add_scalar('grads_qf/nq_st_max', stats['nq_jac_state'].abs().mean(dim=-1).max(), updates)
writer.add_scalar('grads_qf/nq_act_max', stats['nq_jac_act'].abs().mean(dim=-1).max(), updates)
updates += 1
with torch.no_grad():
next_state, reward, done, info = env.step(action) # Step
episode_steps += 1
total_numsteps += 1
episode_reward += reward
# Ignore the "done" signal if it comes from hitting the time horizon.
# (https://github.com/openai/spinningup/blob/master/spinup/algos/sac/sac.py)
mask = 1 if (episode_steps == env._max_episode_steps and not info["done_inf"]) else float(not done)
if not args.offline:
# jac/grads are set to 0 when args.jacobian is False or done_inf is True
jac_ssa, grad_rsa = compute_jacobian_online(state, action, next_state, env_nom, args, info['done_inf'])
memory.push(state, action, reward, next_state, mask, jac_ssa, grad_rsa) # Append transition to memory
state = next_state
actions_arr.append(action)
if total_numsteps > args.num_steps:
break
if not args.test and not test_init:
writer.add_scalar('reward/train', episode_reward, i_episode)
if i_episode % 1 == 0:
print("Episode: {}, total numsteps: {}, episode steps: {}, reward: {}".format(i_episode, total_numsteps, episode_steps, round(episode_reward, 2)))
if (i_episode-1) % args.eval_interval == 0 and args.eval is True:
test_init=False
avg_reward = 0.
episodes = 40
avg_num_steps = 0.0
episode_reward_list = []
for _ in range(episodes):
state = env.reset()
episode_reward = 0
done = False
num_steps = 0
while not done:
action = agent.select_action(state, evaluate=True)
with torch.no_grad():
next_state, reward, done, _ = env.step(action)
episode_reward += reward
num_steps +=1
state = next_state
episode_reward_list.append(episode_reward)
avg_reward += episode_reward
avg_num_steps += num_steps
avg_reward /= episodes
avg_num_steps /= episodes
if not args.test:
writer.add_scalar('avg_reward/test', avg_reward, i_episode)
logger.update(score=episode_reward_list, total_steps=total_numsteps)
print("----------------------------------------")
print("Test Episodes: {}, Avg. Reward: {}, Avg. Numsteps: {}".format(episodes, round(avg_reward, 2), avg_num_steps))
print("----------------------------------------")
env.close()
| 16,756 |
Python
| 44.535326 | 180 | 0.569348 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.