file_path
stringlengths 21
207
| content
stringlengths 5
1.02M
| size
int64 5
1.02M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.27
0.93
|
---|---|---|---|---|---|---|
NVIDIA-Omniverse/usd-plugin-samples/src/usd-plugins/dynamicPayload/omniMetProvider/omniMetProvider.cpp | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <pxr/base/tf/token.h>
#include <pxr/base/vt/value.h>
#include <pxr/base/js/json.h>
#include <pxr/usd/sdf/path.h>
#include <pxr/usd/sdf/schema.h>
#include <pxr/usd/sdf/payload.h>
#include <pxr/usd/sdf/primSpec.h>
#include <pxr/usd/sdf/attributeSpec.h>
#include <pxr/usd/usd/tokens.h>
#include <edfDataProviderFactory.h>
#include "omniMetProvider.h"
#include <iostream>
#include <curl/curl.h>
PXR_NAMESPACE_OPEN_SCOPE
EDF_DEFINE_DATAPROVIDER(OmniMetProvider);
TF_DEFINE_PUBLIC_TOKENS(
OmniMetProviderProviderArgKeys,
(dataLodLevel)
(deferredRead)
(lod1Count)
);
TF_DEFINE_PRIVATE_TOKENS(
EdfFieldKeys,
(EdfDataParameters)
);
TF_DEFINE_PRIVATE_TOKENS(
OmniMetProviderTypeNames,
(AmaDepartment)
(AmaObject)
);
TF_DEFINE_PRIVATE_TOKENS(
OmniMetProviderFieldKeys,
(departmentId)
(displayName)
(objectID)
(isHighlight)
(accessionNumber)
(accessionYear)
(isPublicDomain)
(primaryImage)
(primaryImageSmall)
(additionalImages)
(constituents)
(department)
(objectName)
(title)
(culture)
(period)
(dynasty)
(reign)
(portfolio)
(artistRole)
(artistPrefix)
(artistDisplayName)
(artistDisplayBio)
(artistSuffix)
(artistAlphaSort)
(artistNationality)
(artistGender)
(artistWikidata_URL)
(artistULAN_URL)
(objectDate)
(objectBeginDate)
(objectEndDate)
(medium)
(dimensions)
(measurements)
(creditLine)
(geographyType)
(city)
(state)
(county)
(country)
(region)
(subregion)
(locale)
(locus)
(excavation)
(river)
(classification)
(rightsAndReproduction)
(linkResource)
(metadataDate)
(repository)
(objectURL)
(objectWikidataURL)
(isTimelineWork)
(galleryNumber)
);
enum struct DataLodLevel
{
Level0 = 0,
Level1 = 1,
Level2 = 2
};
// urls used to retrieve the data
static const std::string DEPARTMENT_URL = "https://collectionapi.metmuseum.org/public/collection/v1/departments";
static const std::string OBJECTS_IN_DEPARTMENT_URL = "https://collectionapi.metmuseum.org/public/collection/v1/objects?departmentIds=";
static const std::string OBJECT_URL = "https://collectionapi.metmuseum.org/public/collection/v1/objects/";
static const SdfPath DATA_ROOT_PATH("/Data");
OmniMetProvider::OmniMetProvider(const EdfDataParameters& parameters) : IEdfDataProvider(parameters)
{
curl_global_init(CURL_GLOBAL_DEFAULT);
}
OmniMetProvider::~OmniMetProvider()
{
curl_global_cleanup();
}
bool OmniMetProvider::Read(std::shared_ptr<IEdfSourceData> sourceData)
{
// this gives the provider a chance to load all data it needs to on first layer read
// if we are parameterized for a deferred read, we do nothing and read on demand
// at first ask, if it's not a deferred read, we load all appropriate content from the
// back-end here
if(!this->IsDeferredRead())
{
// it's not a deferred read, so determine how much data we want to really load
int lodLevel = this->GetDataLodLevel();
if (lodLevel == static_cast<int>(DataLodLevel::Level0))
{
// load the departments
this->_LoadData(false, 0, sourceData);
}
else if (lodLevel == static_cast<int>(DataLodLevel::Level1))
{
// load the departments and their children
// but cap the number of children at the specified level
this->_LoadData(true, this->GetLod1Count(), sourceData);
}
else
{
// max lod level, load everything
this->_LoadData(true, 0, sourceData);
}
}
return true;
}
void OmniMetProvider::_LoadData(bool includeObjects, size_t objectCount, std::shared_ptr<IEdfSourceData> sourceData)
{
// load the department data
std::string departmentData = this->_LoadDepartments();
std::vector<std::pair<std::string, int>> departments = this->_ParseDepartments(departmentData, sourceData);
// do we want to load objects as well?
if (includeObjects)
{
for (auto it = departments.begin(); it != departments.end(); it++)
{
std::vector<std::string> objectData = this->_LoadObjects(TfStringify(it->second), objectCount);
for (auto itt = objectData.begin(); itt != objectData.end(); itt++)
{
this->_ParseObject(*itt, it->first, sourceData);
}
}
}
}
std::string OmniMetProvider::_LoadDepartments()
{
std::string departments;
CURL* departmentCurl = curl_easy_init();
if (departmentCurl != nullptr)
{
CURLcode resultCode;
curl_easy_setopt(departmentCurl, CURLOPT_URL, DEPARTMENT_URL.c_str());
curl_easy_setopt(departmentCurl, CURLOPT_HTTPGET, 1L);
curl_easy_setopt(departmentCurl, CURLOPT_WRITEFUNCTION, OmniMetProvider::_CurlWriteCallback);
// allocate a string that we can append the result onto
std::string* result = new std::string();
curl_easy_setopt(departmentCurl, CURLOPT_WRITEDATA, reinterpret_cast<void*>(result));
resultCode = curl_easy_perform(departmentCurl);
if (resultCode == CURLE_OK)
{
departments = *result;
}
else
{
TF_CODING_ERROR("Unable to load departments from '%s'!", DEPARTMENT_URL.c_str());
}
// done with the callback data
delete result;
// done with the request handle
curl_easy_cleanup(departmentCurl);
}
return departments;
}
std::vector<int> OmniMetProvider::_ParseObjectIds(const std::string& response) const
{
std::vector<int> objectIds;
PXR_NS::JsValue jsValue = PXR_NS::JsParseString(response, nullptr);
if (!jsValue.IsNull())
{
PXR_NS::JsObject rootObject = jsValue.GetJsObject();
PXR_NS::JsObject::iterator it = rootObject.find("objectIDs");
if (it != rootObject.end())
{
PXR_NS::JsArray jsonObjectIdArray = it->second.GetJsArray();
for (auto objectIdIt = jsonObjectIdArray.begin(); objectIdIt != jsonObjectIdArray.end(); objectIdIt++)
{
objectIds.push_back((*objectIdIt).GetInt());
}
}
else
{
TF_CODING_ERROR("Unable to find 'objectIDs' array in returned data '%s'!", response.c_str());
}
}
else
{
TF_CODING_ERROR("Data returned '%s' was not JSON or was empty!", response.c_str());
}
return objectIds;
}
std::vector<std::string> OmniMetProvider::_LoadObjects(const std::string& departmentId, size_t objectCount)
{
// NOTE: this should be updated to make these requests in parallel in the case
// where we aren't doing deferred reads
// ideally we wouldn't want to initialize a new curl handle here, but since this
// call can be made in the parallel prim indexing, we can't share the easy handle
// across threads, so we take the overhead hit here
std::vector<std::string> objects;
CURL* objectCurl = curl_easy_init();
std::string url = OBJECTS_IN_DEPARTMENT_URL + departmentId;
std::string* result = new std::string();
CURLcode resultCode;
*result = "";
curl_easy_setopt(objectCurl, CURLOPT_URL, url.c_str());
curl_easy_setopt(objectCurl, CURLOPT_HTTPGET, 1L);
curl_easy_setopt(objectCurl, CURLOPT_WRITEFUNCTION, OmniMetProvider::_CurlWriteCallback);
curl_easy_setopt(objectCurl, CURLOPT_WRITEDATA, reinterpret_cast<void*>(result));
resultCode = curl_easy_perform(objectCurl);
if (resultCode == CURLE_OK)
{
// process result
std::vector<int> objectIds = this->_ParseObjectIds(*result);
// objectCount = 0 means load all objects
// objectCount > 0 means load max that many objects
size_t counter = 0;
for (auto objectIdIterator = objectIds.begin(); objectIdIterator != objectIds.end() && (objectCount == 0 || counter < objectCount); objectIdIterator++)
{
// reset the URL and result buffer
// NOTE: this should be updated to make these requests in parallel
url = OBJECT_URL + TfStringify(*objectIdIterator);
*result = "";
curl_easy_setopt(objectCurl, CURLOPT_URL, url.c_str());
resultCode = curl_easy_perform(objectCurl);
if (resultCode == CURLE_OK)
{
objects.push_back(*result);
}
counter++;
}
}
// done with the callback data
delete result;
// done with the request handle
curl_easy_cleanup(objectCurl);
return objects;
}
std::vector<std::pair<std::string, int>> OmniMetProvider::_ParseDepartments(const std::string& departmentJson,
std::shared_ptr<IEdfSourceData> sourceData)
{
std::vector<std::pair<std::string, int>> parsedDepartments;
JsValue jsValue = JsParseString(departmentJson, nullptr);
if (!jsValue.IsNull())
{
JsObject rootObject = jsValue.GetJsObject();
JsObject::iterator it = rootObject.find("departments");
if (it != rootObject.end())
{
JsArray departments = it->second.GetJsArray();
std::string parent = DATA_ROOT_PATH.GetAsString();
for (auto departmentIt = departments.begin(); departmentIt != departments.end(); departmentIt++)
{
// for each department, create a prim to represent it
JsObject department = (*departmentIt).GetJsObject();
int departmentId = department[OmniMetProviderFieldKeys->departmentId.GetString()].GetInt();
std::string displayName = department[OmniMetProviderFieldKeys->displayName.GetString()].GetString();
// create the prim
std::string primName = TfMakeValidIdentifier(displayName);
sourceData->CreatePrim(DATA_ROOT_PATH, primName, SdfSpecifier::SdfSpecifierDef,
OmniMetProviderTypeNames->AmaDepartment);
// create the attributes for the prim
SdfPath parentPrim = SdfPath(parent + "/" + primName);
sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->departmentId.GetString(),
SdfValueTypeNames->Int, SdfVariability::SdfVariabilityUniform, VtValue(departmentId));
sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->displayName.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform, VtValue(displayName));
parsedDepartments.push_back(std::make_pair(parentPrim.GetAsString(), departmentId));
}
}
else
{
TF_CODING_ERROR("Unable to find 'departments' array in returned data '%s'!", departmentJson.c_str());
}
}
else
{
TF_CODING_ERROR("Data returned '%s' was not JSON or was empty!", departmentJson.c_str());
}
return parsedDepartments;
}
void OmniMetProvider::_ParseObject(const std::string& objectData, const std::string& parentPath,
std::shared_ptr<IEdfSourceData> sourceData)
{
// from the parent path given and the data contained in the JSON
// object retrieved from the server, we can create the full prim
JsValue jsValue = JsParseString(objectData, nullptr);
if (!jsValue.IsNull())
{
JsObject rootObject = jsValue.GetJsObject();
// the root object contains all of our properties that we now need
// to create a prim spec for the object and a set of property
// specs for it
// NOTE: this code uses the "default value" of a property spec
// to represent the authored value coming from the external system
// We don't need to do sub-composition over the data coming
// from the external system, so we ever only have a value or not
// so if HasDefaultValue is true on the property spec, it means
// there was an authored value that came from the remote system
// One optimization we could do in the layer above (EdfData) is
// to add schema acquisition and checking in the loop. This would allow us
// to create the property spec or not depending on if the value that came in
// is different from the true fallback declared in the schema
// (but we'd have to change the ask for the property to check whether
// the schema has the property rather than if the property spec exists)
std::string objectName = rootObject[OmniMetProviderFieldKeys->objectName.GetString()].GetString();
std::string primName = TfMakeValidIdentifier(objectName) +
TfStringify(rootObject[OmniMetProviderFieldKeys->objectID.GetString()].GetInt());
// create the prim
SdfPath newPrimParentPath(parentPath);
sourceData->CreatePrim(newPrimParentPath, primName, SdfSpecifier::SdfSpecifierDef,
OmniMetProviderTypeNames->AmaObject);
// set the fact that this prim has an API schema attached to it
// usdGenSchema doesn't generate a public token for the actual
// API schema class name, so we hard code that here
SdfPath parentPrim = SdfPath(parentPath + "/" + primName);
TfTokenVector apiSchemas;
apiSchemas.push_back(TfToken("OmniMetArtistAPI"));
VtValue apiSchemasValue(apiSchemas);
sourceData->SetField(parentPrim, UsdTokens->apiSchemas, apiSchemasValue);
// create the attributes for the prim
sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->objectID.GetString(),
SdfValueTypeNames->Int, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->objectID.GetString()].GetInt()));
sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->isHighlight.GetString(),
SdfValueTypeNames->Bool, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->isHighlight.GetString()].GetBool()));
sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->accessionNumber.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->accessionNumber.GetString()].GetString()));
sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->accessionYear.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->accessionYear.GetString()].GetString()));
sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->isPublicDomain.GetString(),
SdfValueTypeNames->Bool, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->isPublicDomain.GetString()].GetBool()));
sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->primaryImage.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->primaryImage.GetString()].GetString()));
sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->primaryImageSmall.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->primaryImageSmall.GetString()].GetString()));
sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->department.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->department.GetString()].GetString()));
sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->title.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->title.GetString()].GetString()));
sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->culture.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->culture.GetString()].GetString()));
sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->period.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->period.GetString()].GetString()));
sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->dynasty.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->dynasty.GetString()].GetString()));
sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->reign.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->reign.GetString()].GetString()));
sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->portfolio.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->portfolio.GetString()].GetString()));
// artist information complying with sample API schema
std::string namespaceFieldPrefix = "omni:met:artist:";
JsObject::const_iterator i = rootObject.find(OmniMetProviderFieldKeys->artistRole.GetString());
if (i != rootObject.end())
{
sourceData->CreateAttribute(parentPrim, namespaceFieldPrefix + OmniMetProviderFieldKeys->artistRole.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->artistRole.GetString()].GetString()));
}
i = rootObject.find(OmniMetProviderFieldKeys->artistPrefix.GetString());
if (i != rootObject.end())
{
sourceData->CreateAttribute(parentPrim, namespaceFieldPrefix + OmniMetProviderFieldKeys->artistPrefix.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->artistPrefix.GetString()].GetString()));
}
i = rootObject.find(OmniMetProviderFieldKeys->artistDisplayName.GetString());
if (i != rootObject.end())
{
sourceData->CreateAttribute(parentPrim, namespaceFieldPrefix + OmniMetProviderFieldKeys->artistDisplayName.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->artistDisplayName.GetString()].GetString()));
}
i = rootObject.find(OmniMetProviderFieldKeys->artistDisplayBio.GetString());
if (i != rootObject.end())
{
sourceData->CreateAttribute(parentPrim, namespaceFieldPrefix + OmniMetProviderFieldKeys->artistDisplayBio.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->artistDisplayBio.GetString()].GetString()));
}
i = rootObject.find(OmniMetProviderFieldKeys->artistSuffix.GetString());
if (i != rootObject.end())
{
sourceData->CreateAttribute(parentPrim, namespaceFieldPrefix + OmniMetProviderFieldKeys->artistSuffix.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->artistSuffix.GetString()].GetString()));
}
i = rootObject.find(OmniMetProviderFieldKeys->artistAlphaSort.GetString());
if (i != rootObject.end())
{
sourceData->CreateAttribute(parentPrim, namespaceFieldPrefix + OmniMetProviderFieldKeys->artistAlphaSort.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->artistAlphaSort.GetString()].GetString()));
}
i = rootObject.find(OmniMetProviderFieldKeys->artistNationality.GetString());
if (i != rootObject.end())
{
sourceData->CreateAttribute(parentPrim, namespaceFieldPrefix + OmniMetProviderFieldKeys->artistNationality.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->artistNationality.GetString()].GetString()));
}
i = rootObject.find(OmniMetProviderFieldKeys->artistGender.GetString());
if (i != rootObject.end())
{
sourceData->CreateAttribute(parentPrim, namespaceFieldPrefix + OmniMetProviderFieldKeys->artistGender.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->artistGender.GetString()].GetString()));
}
i = rootObject.find(OmniMetProviderFieldKeys->artistWikidata_URL.GetString());
if (i != rootObject.end())
{
sourceData->CreateAttribute(parentPrim, namespaceFieldPrefix + OmniMetProviderFieldKeys->artistWikidata_URL.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->artistWikidata_URL.GetString()].GetString()));
}
i = rootObject.find(OmniMetProviderFieldKeys->artistULAN_URL.GetString());
if (i != rootObject.end())
{
sourceData->CreateAttribute(parentPrim, namespaceFieldPrefix + OmniMetProviderFieldKeys->artistULAN_URL.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->artistULAN_URL.GetString()].GetString()));
}
// note that there are quite a few additional properties that could be pulled, the above
// represents only a sample of the data that is there - if you'd like to try the rest as an
// exercise, you can enhance the schema attributes and read the remaining ones here
}
else
{
TF_CODING_ERROR("Data returned '%s' was not JSON or was empty!", objectData.c_str());
}
}
bool OmniMetProvider::ReadChildren(const std::string& parentPath, std::shared_ptr<IEdfSourceData> sourceData)
{
// if the parent path is the root, we need to load the departments
// but only if we are in a deferred read scenario
if (this->IsDeferredRead())
{
SdfPath parentPrimPath = SdfPath(parentPath);
int lodLevel = this->GetDataLodLevel();
if (parentPrimPath == DATA_ROOT_PATH)
{
// load the department data
std::cout << "Loading department data..." << std::endl;
std::string departmentData = this->_LoadDepartments();
std::vector<std::pair<std::string, int>> departments = this->_ParseDepartments(departmentData,
sourceData);
}
else
{
VtValue typeNameValue;
if(sourceData->HasField(SdfPath(parentPath), SdfFieldKeys->TypeName, &typeNameValue))
{
if (typeNameValue.UncheckedGet<TfToken>() == OmniMetProviderTypeNames->AmaDepartment &&
this->GetDataLodLevel() != static_cast<int>(DataLodLevel::Level0))
{
// it's a department, we need to load the objects
// associated with the department
std::string departmentIdPath = parentPath + "." + OmniMetProviderFieldKeys->departmentId.GetString();
VtValue departmentId;
if (sourceData->HasAttribute(SdfPath(departmentIdPath), &departmentId))
{
size_t objectCount = 0;
if (lodLevel == static_cast<int>(DataLodLevel::Level1))
{
objectCount = this->GetLod1Count();
}
// load the object data
std::cout << "Loading object data for " + parentPath + "..." << std::endl;
std::vector<std::string> objectData = this->_LoadObjects(TfStringify(departmentId.UncheckedGet<int>()), objectCount);
for (auto it = objectData.begin(); it != objectData.end(); it++)
{
this->_ParseObject(*it, parentPath, sourceData);
}
}
}
}
}
return true;
}
return false;
}
bool OmniMetProvider::IsDataCached() const
{
return !this->IsDeferredRead();
}
int OmniMetProvider::GetDataLodLevel() const
{
int dataLodLevel = 0;
EdfDataParameters parameters = this->GetParameters();
std::unordered_map<std::string, std::string>::const_iterator it = parameters.providerArgs.find(OmniMetProviderProviderArgKeys->dataLodLevel);
if (it != parameters.providerArgs.end())
{
dataLodLevel = TfUnstringify<int>(it->second);
if (dataLodLevel < 0)
{
dataLodLevel = 0;
}
}
return dataLodLevel;
}
size_t OmniMetProvider::GetLod1Count() const
{
// although the incoming string from the parameter set
// might be interpretable as a negative integer
// it doesn't really make practical sense, so if
// it is interpreted as negative, we clamp to 0
// and return an unsigned version to the caller
size_t lod1Count = 0;
EdfDataParameters parameters = this->GetParameters();
std::unordered_map<std::string, std::string>::const_iterator it = parameters.providerArgs.find(OmniMetProviderProviderArgKeys->lod1Count);
if (it != parameters.providerArgs.end())
{
lod1Count = TfUnstringify<int>(it->second);
if (lod1Count < 0)
{
lod1Count = 0;
}
}
return static_cast<size_t>(lod1Count);
}
bool OmniMetProvider::IsDeferredRead() const
{
bool deferredRead = false;
EdfDataParameters parameters = this->GetParameters();
std::unordered_map<std::string, std::string>::const_iterator it = parameters.providerArgs.find(OmniMetProviderProviderArgKeys->deferredRead);
if (it != parameters.providerArgs.end())
{
deferredRead = TfUnstringify<bool>(it->second);
}
return deferredRead;
}
size_t OmniMetProvider::_CurlWriteCallback(void* data, size_t size, size_t nmemb, void* userp)
{
std::string* result = reinterpret_cast<std::string*>(userp);
result->append(reinterpret_cast<const char* const>(data), nmemb);
return nmemb;
}
PXR_NAMESPACE_CLOSE_SCOPE
| 27,507 | C++ | 41.780715 | 159 | 0.662704 |
NVIDIA-Omniverse/usd-plugin-samples/src/usd-plugins/dynamicPayload/omniMetProvider/omniMetProvider.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef OMNI_OMNIMETPROVIDER_OMNIMETPROVIDER_H_
#define OMNI_OMNIMETPROVIDER_OMNIMETPROVIDER_H_
#include <string>
#include <vector>
#include <utility>
#include <pxr/pxr.h>
#include <pxr/base/tf/token.h>
#include <pxr/usd/sdf/layer.h>
#include <pxr/usd/sdf/schema.h>
#include <iEdfDataProvider.h>
PXR_NAMESPACE_OPEN_SCOPE
TF_DECLARE_PUBLIC_TOKENS(
OmniMetProviderProviderArgKeys,
(dataLodLevel)
(deferredRead)
(lod1Count)
);
/// \class OmniMetProvider
///
/// Defines a specific EDF back-end data provider for reading information
/// from the Metropolitan Museum of Art REST APIs and converting that
/// into prim and attribute data that can be processed by USD.
///
class OmniMetProvider : public IEdfDataProvider
{
public:
OmniMetProvider(const EdfDataParameters& parameters);
virtual ~OmniMetProvider();
virtual bool Read(std::shared_ptr<IEdfSourceData> sourceData) override;
virtual bool ReadChildren(const std::string& parentPath, std::shared_ptr<IEdfSourceData> sourceData) override;
virtual bool IsDataCached() const override;
private:
int GetDataLodLevel() const;
size_t GetLod1Count() const;
bool IsDeferredRead() const;
void _LoadData(bool includeObjects, size_t objectCount, std::shared_ptr<IEdfSourceData> sourceData);
std::string _LoadDepartments();
std::vector<std::string> _LoadObjects(const std::string& departmentId, size_t objectCount);
std::vector<std::pair<std::string, int>> _ParseDepartments(const std::string& departmentJson,
std::shared_ptr<IEdfSourceData> sourceData);
void _ParseObject(const std::string& objectData, const std::string& parentPath, std::shared_ptr<IEdfSourceData> sourceData);
// NOTE: these methods are not technically const, since they do change internal state
// in the edfData object's layer data. This is ok, because that object is a cache
// https://isocpp.github.io/CppCoreGuidelines/CppCoreGuidelines#es50-dont-cast-away-const
// the mutuable cache state is allowed to change internally and still keep the semantics
// of the object not changing from the outside
void _LoadDepartments(bool includeObjects) const;
void _LoadObjects(const std::string& departmentId, const std::string& parentPath) const;
bool _IsDepartmentDataCached() const;
bool _IsObjectDataCached(const std::string& parentPath) const;
void _ParseDepartments(const std::string& response) const;
std::vector<int> _ParseObjectIds(const std::string& response) const;
void _ParseObject(const std::string& parentPath, const std::string& response) const;
static size_t _CurlWriteCallback(void* data, size_t size, size_t nmemb, void* userp);
};
PXR_NAMESPACE_CLOSE_SCOPE
#endif | 3,321 | C | 37.627907 | 128 | 0.747666 |
NVIDIA-Omniverse/usd-plugin-samples/tools/packman/packmanconf.py | # Use this file to bootstrap packman into your Python environment (3.7.x). Simply
# add the path by doing sys.insert to where packmanconf.py is located and then execute:
#
# >>> import packmanconf
# >>> packmanconf.init()
#
# It will use the configured remote(s) and the version of packman in the same folder,
# giving you full access to the packman API via the following module
#
# >> import packmanapi
# >> dir(packmanapi)
import os
import platform
import sys
def init():
"""Call this function to initialize the packman configuration.
Calls to the packman API will work after successfully calling this function.
Note:
This function only needs to be called once during the execution of your
program. Calling it repeatedly is harmless but wasteful.
Compatibility with your Python interpreter is checked and upon failure
the function will report what is required.
Example:
>>> import packmanconf
>>> packmanconf.init()
>>> import packmanapi
>>> packmanapi.set_verbosity_level(packmanapi.VERBOSITY_HIGH)
"""
major = sys.version_info[0]
minor = sys.version_info[1]
if major != 3 or minor != 10:
raise RuntimeError(
f"This version of packman requires Python 3.10.x, but {major}.{minor} was provided"
)
conf_dir = os.path.dirname(os.path.abspath(__file__))
os.environ["PM_INSTALL_PATH"] = conf_dir
packages_root = get_packages_root(conf_dir)
version = get_version(conf_dir)
module_dir = get_module_dir(conf_dir, packages_root, version)
sys.path.insert(1, module_dir)
def get_packages_root(conf_dir: str) -> str:
root = os.getenv("PM_PACKAGES_ROOT")
if not root:
platform_name = platform.system()
if platform_name == "Windows":
drive, _ = os.path.splitdrive(conf_dir)
root = os.path.join(drive, "packman-repo")
elif platform_name == "Darwin":
# macOS
root = os.path.join(
os.path.expanduser("~"), "/Library/Application Support/packman-cache"
)
elif platform_name == "Linux":
try:
cache_root = os.environ["XDG_HOME_CACHE"]
except KeyError:
cache_root = os.path.join(os.path.expanduser("~"), ".cache")
return os.path.join(cache_root, "packman")
else:
raise RuntimeError(f"Unsupported platform '{platform_name}'")
# make sure the path exists:
os.makedirs(root, exist_ok=True)
return root
def get_module_dir(conf_dir, packages_root: str, version: str) -> str:
module_dir = os.path.join(packages_root, "packman-common", version)
if not os.path.exists(module_dir):
import tempfile
tf = tempfile.NamedTemporaryFile(delete=False)
target_name = tf.name
tf.close()
url = f"http://bootstrap.packman.nvidia.com/packman-common@{version}.zip"
print(f"Downloading '{url}' ...")
import urllib.request
urllib.request.urlretrieve(url, target_name)
from importlib.machinery import SourceFileLoader
# import module from path provided
script_path = os.path.join(conf_dir, "bootstrap", "install_package.py")
ip = SourceFileLoader("install_package", script_path).load_module()
print("Unpacking ...")
ip.install_package(target_name, module_dir)
os.unlink(tf.name)
return module_dir
def get_version(conf_dir: str):
path = os.path.join(conf_dir, "packman")
if not os.path.exists(path): # in dev repo fallback
path += ".sh"
with open(path, "rt", encoding="utf8") as launch_file:
for line in launch_file.readlines():
if line.startswith("PM_PACKMAN_VERSION"):
_, value = line.split("=")
return value.strip()
raise RuntimeError(f"Unable to find 'PM_PACKMAN_VERSION' in '{path}'")
| 3,932 | Python | 35.416666 | 95 | 0.632503 |
NVIDIA-Omniverse/usd-plugin-samples/tools/packman/config.packman.xml | <config remotes="cloudfront">
<remote2 name="cloudfront">
<transport actions="download" protocol="https" packageLocation="d4i3qtqj3r0z5.cloudfront.net/${name}@${version}" />
</remote2>
</config> | 210 | XML | 41.199992 | 123 | 0.695238 |
NVIDIA-Omniverse/usd-plugin-samples/tools/packman/bootstrap/install_package.py | # Copyright 2019 NVIDIA CORPORATION
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import zipfile
import tempfile
import sys
import os
import stat
import time
from typing import Any, Callable
RENAME_RETRY_COUNT = 100
RENAME_RETRY_DELAY = 0.1
logging.basicConfig(level=logging.WARNING, format="%(message)s")
logger = logging.getLogger("install_package")
def remove_directory_item(path):
if os.path.islink(path) or os.path.isfile(path):
try:
os.remove(path)
except PermissionError:
# make sure we have access and try again:
os.chmod(path, stat.S_IRWXU)
os.remove(path)
else:
# try first to delete the dir because this will work for folder junctions, otherwise we would follow the junctions and cause destruction!
clean_out_folder = False
try:
# make sure we have access preemptively - this is necessary because recursing into a directory without permissions
# will only lead to heart ache
os.chmod(path, stat.S_IRWXU)
os.rmdir(path)
except OSError:
clean_out_folder = True
if clean_out_folder:
# we should make sure the directory is empty
names = os.listdir(path)
for name in names:
fullname = os.path.join(path, name)
remove_directory_item(fullname)
# now try to again get rid of the folder - and not catch if it raises:
os.rmdir(path)
class StagingDirectory:
def __init__(self, staging_path):
self.staging_path = staging_path
self.temp_folder_path = None
os.makedirs(staging_path, exist_ok=True)
def __enter__(self):
self.temp_folder_path = tempfile.mkdtemp(prefix="ver-", dir=self.staging_path)
return self
def get_temp_folder_path(self):
return self.temp_folder_path
# this function renames the temp staging folder to folder_name, it is required that the parent path exists!
def promote_and_rename(self, folder_name):
abs_dst_folder_name = os.path.join(self.staging_path, folder_name)
os.rename(self.temp_folder_path, abs_dst_folder_name)
def __exit__(self, type, value, traceback):
# Remove temp staging folder if it's still there (something went wrong):
path = self.temp_folder_path
if os.path.isdir(path):
remove_directory_item(path)
def rename_folder(staging_dir: StagingDirectory, folder_name: str):
try:
staging_dir.promote_and_rename(folder_name)
except OSError as exc:
# if we failed to rename because the folder now exists we can assume that another packman process
# has managed to update the package before us - in all other cases we re-raise the exception
abs_dst_folder_name = os.path.join(staging_dir.staging_path, folder_name)
if os.path.exists(abs_dst_folder_name):
logger.warning(
f"Directory {abs_dst_folder_name} already present, package installation already completed"
)
else:
raise
def call_with_retry(
op_name: str, func: Callable, retry_count: int = 3, retry_delay: float = 20
) -> Any:
retries_left = retry_count
while True:
try:
return func()
except (OSError, IOError) as exc:
logger.warning(f"Failure while executing {op_name} [{str(exc)}]")
if retries_left:
retry_str = "retry" if retries_left == 1 else "retries"
logger.warning(
f"Retrying after {retry_delay} seconds"
f" ({retries_left} {retry_str} left) ..."
)
time.sleep(retry_delay)
else:
logger.error("Maximum retries exceeded, giving up")
raise
retries_left -= 1
def rename_folder_with_retry(staging_dir: StagingDirectory, folder_name):
dst_path = os.path.join(staging_dir.staging_path, folder_name)
call_with_retry(
f"rename {staging_dir.get_temp_folder_path()} -> {dst_path}",
lambda: rename_folder(staging_dir, folder_name),
RENAME_RETRY_COUNT,
RENAME_RETRY_DELAY,
)
def install_package(package_path, install_path):
staging_path, version = os.path.split(install_path)
with StagingDirectory(staging_path) as staging_dir:
output_folder = staging_dir.get_temp_folder_path()
with zipfile.ZipFile(package_path, allowZip64=True) as zip_file:
zip_file.extractall(output_folder)
# attempt the rename operation
rename_folder_with_retry(staging_dir, version)
print(f"Package successfully installed to {install_path}")
if __name__ == "__main__":
executable_paths = os.getenv("PATH")
paths_list = executable_paths.split(os.path.pathsep) if executable_paths else []
target_path_np = os.path.normpath(sys.argv[2])
target_path_np_nc = os.path.normcase(target_path_np)
for exec_path in paths_list:
if os.path.normcase(os.path.normpath(exec_path)) == target_path_np_nc:
raise RuntimeError(f"packman will not install to executable path '{exec_path}'")
install_package(sys.argv[1], target_path_np)
| 5,776 | Python | 36.270968 | 145 | 0.645083 |
NVIDIA-Omniverse/kit-osc/README.md | # OSC Omniverse Kit Extension [omni.osc]
Omniverse Kit extension for sending and receiving OSC (Open Sound Control) messages.

*The OSC control surface app running on the iPad is [TouchOSC](https://hexler.net/touchosc).*
# Getting Started
Open the Community tab under Extensions window (`Window > Extensions`), search for `OSC`, and install and enable the `omni.osc` extension.

## Running the server
After installing and enabling the extension, you should see the following window.

Enter the private IP address of the computer running your Kit application and the desired port, then click `Start`. If you are prompted to configure your Windows Firewall, ensure that the Kit application is allowed to communicate with other devices on the private network.

You can find the private IP address of your computer by running `ipconfig` in the Windows terminal.

If you run the server on `localhost`, that means the server can only receive messages from OSC clients running on the same machine. If you want to receive messages from OSC clients running on other devices on the same network, you must run the server on an IP address that is visible to those devices.
Once the server is running, confirm that it can successfully receive messages by inspecting the verbose console logs. It might be helpful to filter only the logs that originate from `omni.osc`.

## Receiving messages with Python
Below is a python snippet that demonstrates how to handle OSC messages received by the server. It assumes that the OSC server configured above is running. You can paste and run the below snippet directly into the Omniverse Script Editor for testing.
```python
import carb
import carb.events
import omni.osc
def on_event(event: carb.events.IEvent) -> None:
addr, args = omni.osc.osc_message_from_carb_event(event)
carb.log_info(f"Received OSC message: [{addr}, {args}]")
sub = omni.osc.subscribe_to_osc_event_stream(on_event)
```
## Receiving messages with ActionGraph
Search for `OSC` in the Action Graph nodes list and add the `On OSC Message` node to your graph. The node takes a single input,
the OSC address path that this node will handle. This input can be a valid regular expression. Note that this input field does *not* support
OSC pattern matching expressions. The node outputs an OmniGraph bundle with two attributes named `address` and `arguments` which you
can access by using the `Extract Attribute` node.

You can find example USD stages that demonstrate how to configure an ActionGraph using this extension at [exts/omni.osc/data/examples](/exts/omni.osc/data/examples).
## Sending messages from Python
Since `omni.osc` depends on [python-osc](https://pypi.org/project/python-osc/), you can import this module directly in
your own Python code to send OSC messages. Please see the [documentation](https://python-osc.readthedocs.io/en/latest/) for additional
information and support.
```python
import random
import time
from pythonosc import udp_client
client = udp_client.SimpleUDPClient("127.0.0.1", 3334)
client.send_message("/scale", [random.random(), random.random(), random.random()])
```
You can paste and run the above snippet directly into the Omniverse Script Editor for testing.
## Sending messages from ActionGraph
This is not currently implemented.
## Limitations & Known Issues
- OSC Bundles are currently not supported.
- The OmniGraph `On OSC Message` node can only handle OSC messages containing lists of floating-point arguments.
# Help
The below sections should help you diagnose any potential issues you may encounter while working with `omni.osc` extension.
## Unable to receive messages
1. First, enable verbose logs in the console (filter by the `omni.osc` extension). The server will log any messages received.
2. Confirm that the computer running the Kit application and the device sending the OSC messages are on the same network.
3. Confirm that kit.exe is allowed to communicate with the private network through the Windows Defender Firewall. Note that
you may have multiple instances of kit.exe on this list. When in doubt, ensure that all of them have the appropriate permission.

4. Confirm that the Windows Defender Firewall allows incoming UDP traffic to the port in use.
5. Confirm that the device sending the OSC messages is sending the messages via UDP to the correct IP address and port.
6. Use a tool such as [wireshark](https://www.wireshark.org/) to confirm that the computer running the Kit application is receiving UDP traffic from the device.
## Unable to send messages
1. Confirm that the computer running the Kit application and the device receiving the OSC messages are on the same network.
2. Confirm that kit.exe is allowed to communicate with the private network through the Windows Defender Firewall.
3. Confirm that the device receiving the OSC messages is able to receive incoming UDP traffic at the port in use.
# Contributing
The source code for this repository is provided as-is and we are not accepting outside contributions.
# License
- The code in this repository is licensed under the Apache License 2.0. See [LICENSE](/LICENSE).
- python-osc is licensed under the Unlicense. See [exts/omni.osc/vendor/LICENSE-python-osc](/exts/omni.osc/vendor/LICENSE-python-osc).
# Resources
- [https://opensoundcontrol.stanford.edu/spec-1_0.html](https://opensoundcontrol.stanford.edu/spec-1_0.html)
- [https://en.wikipedia.org/wiki/Open_Sound_Control](https://en.wikipedia.org/wiki/Open_Sound_Control)
- [https://python-osc.readthedocs.io/en/latest/](https://python-osc.readthedocs.io/en/latest/)
| 5,998 | Markdown | 46.992 | 301 | 0.779593 |
NVIDIA-Omniverse/kit-osc/tools/scripts/link_app.py | import os
import argparse
import sys
import json
import packmanapi
import urllib3
def find_omniverse_apps():
http = urllib3.PoolManager()
try:
r = http.request("GET", "http://127.0.0.1:33480/components")
except Exception as e:
print(f"Failed retrieving apps from an Omniverse Launcher, maybe it is not installed?\nError: {e}")
sys.exit(1)
apps = {}
for x in json.loads(r.data.decode("utf-8")):
latest = x.get("installedVersions", {}).get("latest", "")
if latest:
for s in x.get("settings", []):
if s.get("version", "") == latest:
root = s.get("launch", {}).get("root", "")
apps[x["slug"]] = (x["name"], root)
break
return apps
def create_link(src, dst):
print(f"Creating a link '{src}' -> '{dst}'")
packmanapi.link(src, dst)
APP_PRIORITIES = ["code", "create", "view"]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create folder link to Kit App installed from Omniverse Launcher")
parser.add_argument(
"--path",
help="Path to Kit App installed from Omniverse Launcher, e.g.: 'C:/Users/bob/AppData/Local/ov/pkg/create-2021.3.4'",
required=False,
)
parser.add_argument(
"--app", help="Name of Kit App installed from Omniverse Launcher, e.g.: 'code', 'create'", required=False
)
args = parser.parse_args()
path = args.path
if not path:
print("Path is not specified, looking for Omniverse Apps...")
apps = find_omniverse_apps()
if len(apps) == 0:
print(
"Can't find any Omniverse Apps. Use Omniverse Launcher to install one. 'Code' is the recommended app for developers."
)
sys.exit(0)
print("\nFound following Omniverse Apps:")
for i, slug in enumerate(apps):
name, root = apps[slug]
print(f"{i}: {name} ({slug}) at: '{root}'")
if args.app:
selected_app = args.app.lower()
if selected_app not in apps:
choices = ", ".join(apps.keys())
print(f"Passed app: '{selected_app}' is not found. Specify one of the following found Apps: {choices}")
sys.exit(0)
else:
selected_app = next((x for x in APP_PRIORITIES if x in apps), None)
if not selected_app:
selected_app = next(iter(apps))
print(f"\nSelected app: {selected_app}")
_, path = apps[selected_app]
if not os.path.exists(path):
print(f"Provided path doesn't exist: {path}")
else:
SCRIPT_ROOT = os.path.dirname(os.path.realpath(__file__))
create_link(f"{SCRIPT_ROOT}/../../app", path)
print("Success!")
| 2,813 | Python | 32.5 | 133 | 0.562389 |
NVIDIA-Omniverse/kit-osc/tools/packman/config.packman.xml | <config remotes="cloudfront">
<remote2 name="cloudfront">
<transport actions="download" protocol="https" packageLocation="d4i3qtqj3r0z5.cloudfront.net/${name}@${version}" />
</remote2>
</config>
| 211 | XML | 34.333328 | 123 | 0.691943 |
NVIDIA-Omniverse/kit-osc/tools/packman/bootstrap/install_package.py | # Copyright 2019 NVIDIA CORPORATION
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import zipfile
import tempfile
import sys
import shutil
__author__ = "hfannar"
logging.basicConfig(level=logging.WARNING, format="%(message)s")
logger = logging.getLogger("install_package")
class TemporaryDirectory:
def __init__(self):
self.path = None
def __enter__(self):
self.path = tempfile.mkdtemp()
return self.path
def __exit__(self, type, value, traceback):
# Remove temporary data created
shutil.rmtree(self.path)
def install_package(package_src_path, package_dst_path):
with zipfile.ZipFile(
package_src_path, allowZip64=True
) as zip_file, TemporaryDirectory() as temp_dir:
zip_file.extractall(temp_dir)
# Recursively copy (temp_dir will be automatically cleaned up on exit)
try:
# Recursive copy is needed because both package name and version folder could be missing in
# target directory:
shutil.copytree(temp_dir, package_dst_path)
except OSError as exc:
logger.warning(
"Directory %s already present, packaged installation aborted" % package_dst_path
)
else:
logger.info("Package successfully installed to %s" % package_dst_path)
install_package(sys.argv[1], sys.argv[2])
| 1,888 | Python | 31.568965 | 103 | 0.68697 |
NVIDIA-Omniverse/kit-osc/exts/omni.osc/config/extension.toml | [package]
# Semantic Versionning is used: https://semver.org/
version = "0.3.1"
# The title and description fields are primarily for displaying extension info in UI
title = "OSC (Open Sound Control)"
description="Send and receive OSC (Open Sound Control) messages"
authors = ["NVIDIA"]
repository = "https://github.com/NVIDIA-Omniverse/kit-osc"
readme = "docs/README.md"
changelog = "docs/CHANGELOG.md"
icon = "data/icon.png"
preview_image = "data/preview.png"
# One of categories for UI.
category = "Other"
# Keywords for the extension
keywords = ["kit", "osc"]
[dependencies]
"omni.kit.uiapp" = {}
"omni.kit.pipapi" = {}
"omni.graph" = {}
"omni.graph.bundle.action" = {}
# Main python module this extension provides, it will be publicly available as "import omni.osc.core".
[[python.module]]
name = "omni.osc"
[python.pipapi]
archiveDirs = ["vendor"]
[settings.exts."omni.osc"]
address = "localhost"
port = 3334
[[test]]
dependencies = ["omni.graph", "omni.kit.test"]
| 983 | TOML | 22.999999 | 102 | 0.703967 |
NVIDIA-Omniverse/kit-osc/exts/omni.osc/omni/osc/extension.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from typing import Any, List
import carb
import carb.events
import carb.profiler
import omni.ext
import omni.kit.app
from pythonosc.dispatcher import Dispatcher
from .core import carb_event_payload_from_osc_message, push_to_osc_event_stream
from .menu import OscMenu
from .server import DaemonOSCUDPServer
from .window import OscWindow
class OmniOscExt(omni.ext.IExt):
def on_startup(self, ext_id):
def on_start(host: str, port: int) -> bool:
return self.server.start(host, port)
def on_stop() -> bool:
return self.server.stop()
def toggle_window_visible(_arg0, _arg1) -> None:
"""
Toggle the window visibility from the editor menu item
"""
self.window.visible = not self.window.visible
self.server = OmniOscExt.create_server()
# The main UI window
default_addr = carb.settings.get_settings().get("exts/omni.osc/address")
default_port = carb.settings.get_settings().get("exts/omni.osc/port")
self.window = OscWindow(
on_start=on_start, on_stop=on_stop, default_addr=default_addr, default_port=default_port
)
# The editor menu entry that toggles the window visibility
self.menu = OscMenu(on_click=toggle_window_visible)
# Toggle the editor menu entry when the user closes the window
self.window.set_visibility_changed_fn(lambda visible: self.menu.set_item_value(visible))
def on_shutdown(self):
self.window = None
self.menu = None
if self.server is not None:
self.server.stop()
self.server = None
def create_server() -> DaemonOSCUDPServer:
"""
Create a server that routes all OSC messages to a carbonite event stream
"""
@carb.profiler.profile
def on_osc_msg(addr: str, *args: List[Any]) -> None:
"""
OSC message handler
"""
carb.log_verbose(f"OSC message: [{addr}, {args}]")
payload = carb_event_payload_from_osc_message(addr, args)
push_to_osc_event_stream(payload)
# Server
dispatcher = Dispatcher()
dispatcher.set_default_handler(on_osc_msg)
return DaemonOSCUDPServer(dispatcher)
| 2,714 | Python | 34.723684 | 100 | 0.658438 |
NVIDIA-Omniverse/kit-osc/exts/omni.osc/omni/osc/__init__.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import omni.kit.pipapi
# python-osc:
# - SWIPAT request: http://nvbugs/3684871
# - A copy of the source is forked to https://github.com/NVIDIA-Omniverse/python-osc
# - The dependency vendored and installed from exts/omni.osc/vendor/python_osc-1.8.0-py3-none-any.whl
omni.kit.pipapi.install(
package="python-osc", module="pythonosc", use_online_index=False, ignore_cache=True, ignore_import_check=False
)
from pythonosc import * # noqa: F401
from .core import * # noqa: F401,F403
from .extension import * # noqa: F401,F403
from .server import * # noqa: F401,F403
# NOTE(jshrake): omni.graph is an optional dependency so handle the case
# that the below import fails
try:
from .ogn import *
except Exception as e:
print(f"omni.osc failed to import OGN due to {e}")
pass
| 1,219 | Python | 37.124999 | 114 | 0.754717 |
NVIDIA-Omniverse/kit-osc/exts/omni.osc/omni/osc/core.py | ## Copyright © 2022 NVIDIA CORPORATION & AFFILIATES. ALL RIGHTS RESERVED.
##
## This software product is a proprietary product of Nvidia Corporation and its affiliates
## (the "Company") and all right, title, and interest in and to the software
## product, including all associated intellectual property rights, are and
## shall remain exclusively with the Company.
##
## This software product is governed by the End User License Agreement
## provided with the software product.
from typing import Callable, Tuple
import carb
import carb.events
import omni.ext
import omni.kit.app
OSC_EVENT_TYPE_NAME: str = "omni.osc"
OSC_EVENT_TYPE: int = carb.events.type_from_string(OSC_EVENT_TYPE_NAME)
OSC_MESSAGE_ADDRESS_STR = "address"
OSC_MESSAGE_ARGUMENTS_STR = "arguments"
def get_osc_event_stream() -> carb.events._events.IEventStream:
"""
Returns the OSC event stream
"""
return omni.kit.app.get_app().get_message_bus_event_stream()
def push_to_osc_event_stream(payload: dict) -> None:
"""
Push a payload to the OSC event stream
"""
get_osc_event_stream().push(OSC_EVENT_TYPE, sender=0, payload=payload)
def subscribe_to_osc_event_stream(
cb: Callable[[carb.events._events.IEvent], None]
) -> carb.events._events.ISubscription:
"""
Returns a Carbonite event subscription to the OSC event stream
"""
return get_osc_event_stream().create_subscription_to_pop_by_type(OSC_EVENT_TYPE, cb)
def carb_event_payload_from_osc_message(address: str, args: list) -> dict:
"""
Return a carbonite event payload suitable for pushing to the OSC event stream
"""
return {OSC_MESSAGE_ADDRESS_STR: address, OSC_MESSAGE_ARGUMENTS_STR: args}
def osc_message_from_carb_event(e: carb.events.IEvent) -> Tuple[str, list]:
"""
Return the OSC message address and arguments extracted from a carbonite event payload
"""
return (e.payload[OSC_MESSAGE_ADDRESS_STR], e.payload[OSC_MESSAGE_ARGUMENTS_STR])
| 1,961 | Python | 34.672727 | 90 | 0.7231 |
NVIDIA-Omniverse/kit-osc/exts/omni.osc/omni/osc/server.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import threading
import carb
import carb.events
from pythonosc import osc_server
from pythonosc.dispatcher import Dispatcher
class DaemonOSCUDPServer:
"""
Run a python-osc BlockingOSCUDPServer in a separate thread.
Usage::
import omni.osc.core as osc
dispatcher = osc.Dispatcher()
dispatcher.set_default_handler(lambda(path, args): print(f"{path}: {args}"))
server = osc.DaemonOSCUDPServer(dispatcher)
server.start("192.168.0.1", 3434)
# ...
server.stop()
"""
def __init__(self, dispatcher: Dispatcher):
self.dispatcher: Dispatcher = dispatcher
self.server: osc_server.BlockingOSCUDPServer = None
self.thread: threading.Thread = None
def running(self) -> bool:
"""
Returns true if the server is running
"""
return self.thread is not None and self.thread.is_alive()
def start(self, addr: str, port: int) -> bool:
"""
Start the OSC server on the specified address and port.
Does nothing if the server is already running.
"""
if not self.running():
carb.log_info(f"Starting OSC server on {addr}:{port}")
try:
self.server = osc_server.BlockingOSCUDPServer((addr, port), dispatcher=self.dispatcher)
self.thread = threading.Thread(target=lambda: self.server.serve_forever())
# NOTE(jshrake): Running the thread in daemon mode ensures that the thread and server
# are properly disposed of in the event that the main thread exits unexpectedly.
self.thread.daemon = True
self.thread.start()
except Exception as e:
carb.log_error(f"Error starting OSC server: {e}")
else:
carb.log_info("OSC server already running")
return self.running()
def stop(self) -> bool:
"""
Stops the OSC server.
"""
if self.running():
carb.log_info("Stopping OSC server")
try:
self.server.shutdown()
self.thread.join()
except Exception as e:
carb.log_error(f"Error stopping OSC server: {e}")
finally:
self.server = None
self.thread = None
else:
carb.log_info("OSC server not running")
return self.running()
| 2,857 | Python | 34.28395 | 103 | 0.615681 |
NVIDIA-Omniverse/kit-osc/exts/omni.osc/omni/osc/menu.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import omni.kit.ui
MENU_PATH = "Window/OSC"
class OscMenu:
def __init__(self, on_click):
editor_menu = omni.kit.ui.get_editor_menu()
if not editor_menu:
return
editor_menu.add_item(menu_path=MENU_PATH, on_click=on_click, toggle=True, value=True)
def set_item_value(self, val: bool) -> None:
editor_menu = omni.kit.ui.get_editor_menu()
if not editor_menu:
return
editor_menu.set_value(MENU_PATH, val)
def __del__(self):
editor_menu = omni.kit.ui.get_editor_menu()
if not editor_menu:
return
if editor_menu.has_item(MENU_PATH):
editor_menu.remove_item(MENU_PATH)
| 1,125 | Python | 33.121211 | 93 | 0.672889 |
NVIDIA-Omniverse/kit-osc/exts/omni.osc/omni/osc/window.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from typing import Callable
import omni.ui as ui
OnStartCallback = Callable[[str, int], bool]
OnStopCallback = Callable[[], bool]
class OscWindow(ui.Window):
def __init__(
self, default_addr: str, default_port: int, on_start: OnStartCallback, on_stop: OnStopCallback
) -> None:
super().__init__("OSC UDP Server", width=300, height=300)
def start() -> None:
"""
Callback when the user presses the start button
"""
is_running = on_start(addr.as_string, port.as_int)
running.set_value(is_running)
def stop() -> None:
"""
Callback when the user presses the stop button
"""
is_running = on_stop()
running.set_value(is_running)
def update_running_label(label: ui.Label, running: bool) -> None:
"""
Keep the UI label up to date with the state of the server
"""
if running:
label.text = f"Running UDP server @ {addr.as_string}:{port.as_int}"
label.set_style({"color": "green"})
else:
label.text = "Stopped"
label.set_style({"color": "red"})
def toggle_enabled(field: ui.AbstractField, running: bool) -> None:
"""
Enable or disable the input field based on the state of the server
"""
field.enabled = not running
color = "gray" if running else "white"
field.set_style({"color": color})
# Settings
addr = ui.SimpleStringModel(default_addr)
port = ui.SimpleIntModel(default_port)
running = ui.SimpleBoolModel(False)
with self.frame:
with ui.VStack():
label = ui.Label("", height=20)
update_running_label(label, running.get_value_as_bool())
running.add_value_changed_fn(lambda m: update_running_label(label, m.get_value_as_bool()))
with ui.VStack(height=20):
with ui.HStack():
ui.Label("Address:")
addr_field = ui.StringField(addr)
toggle_enabled(addr_field, running.get_value_as_bool())
running.add_value_changed_fn(lambda m: toggle_enabled(addr_field, m.get_value_as_bool()))
ui.Spacer(height=2)
with ui.HStack():
ui.Label("Port:")
port_field = ui.IntField(port)
toggle_enabled(port_field, running.get_value_as_bool())
running.add_value_changed_fn(lambda m: toggle_enabled(port_field, m.get_value_as_bool()))
with ui.VStack():
ui.Button("Start", clicked_fn=start)
ui.Button("Stop", clicked_fn=stop)
| 3,323 | Python | 39.536585 | 113 | 0.560036 |
NVIDIA-Omniverse/kit-osc/exts/omni.osc/omni/osc/ogn/__init__.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""
Dynamically import every file in a directory tree that looks like a Python Ogn Node.
This includes linked directories, which is the mechanism by which nodes can be hot-reloaded from the source tree.
"""
# Required to register nodes in Kit 104
try:
import omni.graph.core as og
og.register_ogn_nodes(__file__, "omni.osc")
except Exception:
# Swallow any exceptions
pass
| 817 | Python | 37.952379 | 113 | 0.774786 |
NVIDIA-Omniverse/kit-osc/exts/omni.osc/omni/osc/ogn/nodes/OgnOnOscEvent.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""
This is the implementation of the OGN node defined in OgnOnOscEvent.ogn
This implementation is inspired by the OgnOnCustomEvent node
See https://gitlab-master.nvidia.com/omniverse/kit/-/blob/master/kit/source/extensions/omni.graph.action/nodes/OgnOnCustomEvent.py # noqa E501
"""
import re
from typing import Any, List, Union
import carb
import carb.events
import carb.profiler
import omni.graph.core as og
import omni.osc
from omni.osc.core import OSC_MESSAGE_ADDRESS_STR, OSC_MESSAGE_ARGUMENTS_STR
from .. import OgnOnOscEventDatabase
class OgnOnOscEventInternalState:
"""Convenience class for maintaining per-node state information"""
def __init__(self):
"""Instantiate the per-node state information."""
# This subscription object controls the lifetime of our callback, it will be
# cleaned up automatically when our node is destroyed
self.sub = None
# Set when the callback has triggered
self.is_set = False
# The last event received
self.event: Union[None, carb.events.IEvent] = None
# The node instance handle
self.node = None
# The regex used to match the OSC address path
self.osc_path_regex = ""
# The compiled regex pattern
self.osc_path_regex_pattern = None
@carb.profiler.profile
def on_event(self, event: carb.events.IEvent):
"""The event callback"""
if event is None:
return
# Only handle messages with a path that matches the OSC address path regex
osc_addr, _ = omni.osc.osc_message_from_carb_event(event)
if self.osc_path_regex_pattern is None or not self.osc_path_regex_pattern.match(osc_addr):
return
self.is_set = True
self.event = event
# Tell the evaluator we need to be computed
if self.node.is_valid():
self.node.request_compute()
@carb.profiler.profile
def first_time_subscribe(self, node: og.Node, osc_path_regex: str) -> bool:
"""Checked call to set up carb subscription
Args:
node: The node instance
event_name: The name of the carb event
Returns:
True if we subscribed, False if we are already subscribed
"""
if self.osc_path_regex != osc_path_regex:
# osc path regex changed since we last subscribed, re-compile
try:
self.osc_path_regex_pattern = re.compile(osc_path_regex)
self.osc_path_regex = osc_path_regex
except Exception as e:
carb.log_error(f"Error compiling OSC Address Path Regex '{osc_path_regex}': {e}")
if self.sub is None:
self.sub = omni.osc.subscribe_to_osc_event_stream(self.on_event)
self.node = node
return True
return False
def try_pop_event(self) -> Union[None, carb.events.IEvent]:
"""Pop the last event received, or None if there is no event to pop"""
if self.is_set:
self.is_set = False
event = self.event
self.event = None
return event
return None
# ======================================================================
class OgnOnOscEvent:
"""
This node triggers when an OSC event is received that matches the OSC address path regex.
"""
@staticmethod
def internal_state():
"""Returns an object that will contain per-node state information"""
return OgnOnOscEventInternalState()
@staticmethod
def release(node):
state = OgnOnOscEventDatabase.OgnOnOscEventDatabase.per_node_internal_state(node)
if state.sub:
state.sub.unsubscribe()
state.sub = None
@staticmethod
def check_all_args_are_floats(args: List[Any]) -> bool:
"""
Returns true if the OSC message arguments has the shape of List[float]
"""
all_args_are_float = all(isinstance(arg, float) for arg in args)
return all_args_are_float
@staticmethod
@carb.profiler.profile
def compute(db: og.Database) -> bool:
state: OgnOnOscEventInternalState = db.internal_state
osc_path_regex = db.inputs.path
state.first_time_subscribe(db.node, osc_path_regex)
event = state.try_pop_event()
if event is None:
return False
try:
addr, args = omni.osc.osc_message_from_carb_event(event)
# Populate the output bundle
bundle: og._impl.bundles.BundleContents = db.outputs.message
bundle.clear()
# Update the address attribute
addr_attribute = bundle.insert((og.Type(og.BaseDataType.TOKEN), OSC_MESSAGE_ADDRESS_STR))
addr_attribute.value = addr
# Update the arguments attribute
all_args_are_floats = OgnOnOscEvent.check_all_args_are_floats(args)
# NOTE(jshrake): This node currently only supports OSC arguments shaped like a List[Float]
if all_args_are_floats:
if len(args) == 1:
# Argument list contains a single element, write it as a double
args_attribute = bundle.insert((og.Type(og.BaseDataType.DOUBLE), OSC_MESSAGE_ARGUMENTS_STR))
args_attribute.value = args[0]
elif len(args) > 1:
# Argument list contains multiple element, write it as a list
args_attribute = bundle.insert((og.Type(og.BaseDataType.DOUBLE, tuple_count=len(args), array_depth=0), OSC_MESSAGE_ARGUMENTS_STR))
args_attribute.value = args
else:
carb.log_warn(f"OnOscMessage node expected OSC message arguments to be of type List[Float], instead got {args}")
return False
db.outputs.execOut = og.ExecutionAttributeState.ENABLED
except Exception as e:
carb.log_error(f"Error in OgnOnOscEvent::compute: {e}")
return False
return True
| 6,464 | Python | 37.254438 | 150 | 0.629332 |
NVIDIA-Omniverse/kit-osc/exts/omni.osc/omni/osc/tests/tests.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import asyncio
import omni.kit.test
import omni.osc
class Test(omni.kit.test.AsyncTestCase):
# Before running each test
async def setUp(self):
pass
# After running each test
async def tearDown(self):
pass
async def test_can_start_and_stop_server(self):
server = omni.osc.DaemonOSCUDPServer(None)
is_running = server.start("localhost", 12345)
self.assertTrue(is_running)
await asyncio.sleep(0.1)
is_running = server.running()
self.assertTrue(is_running)
is_running = server.stop()
self.assertFalse(is_running)
async def test_server_can_receive_messages(self):
server = omni.osc.OmniOscExt.create_server()
is_running = server.start("localhost", 3337)
self.assertTrue(is_running)
self.count = 0
def on_event(e) -> None:
addr, _ = omni.osc.osc_message_from_carb_event(e)
self.assertEqual(e.type, omni.osc.core.OSC_EVENT_TYPE)
self.assertEqual(addr, "/filter")
self.count += 1
sub = omni.osc.subscribe_to_osc_event_stream(on_event)
total_msg_count = 10
def send_messages():
import random
from pythonosc import udp_client
client = udp_client.SimpleUDPClient(address="127.0.0.1", port=3337)
self.assertTrue(client is not None)
for _ in range(total_msg_count):
client.send_message("/filter", random.random())
send_messages()
# Wait a few seconds for the server to receive the messages
await asyncio.sleep(3)
# Manually pump the stream so our subscription callback executes
omni.osc.get_osc_event_stream().pump()
self.assertEqual(self.count, total_msg_count)
| 2,226 | Python | 34.919354 | 79 | 0.655436 |
NVIDIA-Omniverse/kit-osc/exts/omni.osc/omni/osc/tests/__init__.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from .tests import * # noqa: F401,F403
| 467 | Python | 45.799995 | 76 | 0.798715 |
NVIDIA-Omniverse/kit-osc/exts/omni.osc/docs/CHANGELOG.md | # Changelog
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
## [0.3.1] - 2023-09-28
### Changed
- Update CHANGELOG
## [0.3.0] - 2023-09-26
### Changed
- Fix OGN node registration for Kit 105.1
## [0.2.0] - 2022-09-12
### Changed
- The `On OSC Message` OmniGraph node now outputs a Bundle typed value rather than an Unknown typed value.
- Users can extract the "address" and the "arguments" of the OSC message with the `Extract Attribute` node.
## [0.1.1] - 2022-09-12
### Changed
- Updated documentation.
## [0.1.0] - 2022-09-02
### Added
- Initial release.
| 600 | Markdown | 22.115384 | 107 | 0.671667 |
NVIDIA-Omniverse/kit-osc/exts/omni.osc/docs/README.md | # omni.osc
Omniverse Kit extension for sending and receiving OSC (Open Sound Control) messages. | 96 | Markdown | 31.333323 | 84 | 0.802083 |
AccelerationAgency/omniverse-extensions/exts/taa.google.spreadsheet.api/taa/google/spreadsheet/api/extension.py | import omni.ext
import omni.ui as ui
import omni.kit.commands
from typing import List
from pxr import Gf
omni.kit.pipapi.install('google-api-python-client')
omni.kit.pipapi.install('google-auth-httplib2')
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
SPACING = 4
LABEL_WIDTH = 120
class MyExtension(omni.ext.IExt):
data = {'translate_x': 0, 'translate_y': 0, 'translate_z': 0, 'rotate_x': 0, 'rotate_y': 0, 'rotate_z': 0, 'scale_x': 0, 'scale_y': 0, 'scale_z': 0}
subscription = None
stage = None
google_sheet = None
label_width = 50
_source_prim_model = ui.SimpleStringModel()
# lifecycle
def on_startup(self, ext_id):
print("[taa.google.spreadsheet.api] Extension starting up")
self.stage = omni.usd.get_context().get_stage()
self._window = ui.Window("TAA Google Spreadsheet API", width=400, height=270)
with self._window.frame:
with ui.VStack(height=0, spacing=SPACING):
with ui.CollapsableFrame("Source", name="group"):
with ui.VStack(height=0, spacing=SPACING):
with ui.HStack():
ui.Label("Prim", name="attribute_name", width=LABEL_WIDTH)
ui.StringField(model=self._source_prim_model)
ui.Button(" S ", width=0, height=0, style={"margin": 0}, clicked_fn=self._on_get_selection, tooltip="Get From Selection")
ui.Spacer(height= 12)
with ui.CollapsableFrame("Settings", name="group"):
with ui.VStack(height=0, spacing=SPACING):
ui.Label('Spreadsheet ID', height=20)
self.spreadsheet_id_field = ui.StringField(height=20)
ui.Label('Range', height=20)
self.range_field = ui.StringField(height=20)
ui.Label('API Key', height=20)
self.api_key_field = ui.StringField(height=20)
ui.Spacer(height= 12)
self.startButton = ui.Button("Start", height=54, clicked_fn=lambda: self.start(), style={"background_color": "green"})
self.stopButton = ui.Button("Stop", height=54, clicked_fn=lambda: self.stop(), style={"color": "red"})
ui.Spacer(height= 12)
self.statusLabel = ui.Label('Click start to begin', height=14, style={"font_size": 12})
self.stopButton.visible = False
print("[taa.google.spreadsheet.api] Extension start up complete")
def on_shutdown(self):
print("Extension shutting down")
self.stop()
print("Extension shutdown complete")
# custom methods
def _on_get_selection(self):
print('_on_get_selection', self.get_selection())
self._source_prim_model.as_string = ", ".join(self.get_selection())
def get_selection(self) -> List[str]:
return omni.usd.get_context().get_selection().get_selected_prim_paths()
def apply_changes(self, frame):
try:
# load the data from Google Spreadsheet ever few seconds; this API is rate limited
frameNumber = int(frame.payload["SWHFrameNumber"])
if(frameNumber % 180 != 0): return
print('applying changes')
self.read_data()
# act on all selected prims
paths = self.list_paths_of_selected_prims()
for path in paths:
# get reference to the prim on stage, making sure that it's valid
prim = self.stage.GetPrimAtPath(path)
if prim.IsValid() == False: continue
# transform the prim based on the settings in the Google Spreadsheet
self.move_prim(prim)
self.rotate_prim(prim)
self.scale_prim(prim)
print('changes applied successfully')
except Exception as err:
print(err)
def read_config(self):
try:
spreadsheetId = self.spreadsheet_id_field.model.get_value_as_string()
range = self.range_field.model.get_value_as_string()
api_key = self.api_key_field.model.get_value_as_string()
return (spreadsheetId, range, api_key)
except Exception as err:
print(err)
def read_data(self):
try:
spreadsheetId, range, api_key = self.read_config()
if self.google_sheet == None:
service = build('sheets', 'v4', developerKey=api_key)
self.google_sheet = service.spreadsheets()
result = self.google_sheet.values().get(spreadsheetId=spreadsheetId, range=range).execute()
values = result.get('values', [])
data = toJSON(values)
# normalize and clean data
self.data["shape"] = data.setdefault('shape', 'Cube')
self.data["size"] = float(data.setdefault('size', 100))
self.data["radius"] = float(data.setdefault('radius', 100))
self.data["translate_x"] = float(data.setdefault('translate_x', 0))
self.data["translate_y"] = float(data.setdefault('translate_y', 0))
self.data["translate_z"] = float(data.setdefault('translate_z', 0))
self.data["rotate_x"] = float(data.setdefault('rotate_x', 0))
self.data["rotate_y"] = float(data.setdefault('rotate_y', 0))
self.data["rotate_z"] = float(data.setdefault('rotate_z', 0))
self.data["scale_x"] = float(data.setdefault('scale_x', 1))
self.data["scale_y"] = float(data.setdefault('scale_y', 1))
self.data["scale_z"] = float(data.setdefault('scale_z', 1))
except HttpError as err:
print(err)
def move_prim(self, prim):
try:
x = self.data.get('translate_x')
y = self.data.get('translate_y')
z = self.data.get('translate_z')
omni.kit.commands.execute('TransformPrimSRT',
path=prim.GetPath(),
new_translation=Gf.Vec3d(x, y, z),
)
except Exception as err:
print("Failed to move prim", err)
def rotate_prim(self, prim):
try:
x = self.data.get('rotate_x')
y = self.data.get('rotate_y')
z = self.data.get('rotate_z')
omni.kit.commands.execute('TransformPrimSRT',
path=prim.GetPath(),
new_rotation_euler=Gf.Vec3d(x, y, z),
)
except Exception as err:
print("Failed to rotate prime", err)
def scale_prim(self, prim):
try:
x = self.data.get('scale_x')
y = self.data.get('scale_y')
z = self.data.get('scale_z')
omni.kit.commands.execute('TransformPrimSRT',
path=prim.GetPath(),
new_scale=Gf.Vec3d(x, y, z),
)
except Exception as err:
print("Failed to scale prim", err)
def list_paths_of_selected_prims(self):
try:
paths = [i.strip() for i in self._source_prim_model.as_string.split(",")]
if not paths:
paths = self.get_selection()
if not paths:
pass
return paths
except Exception as err:
print(err)
def start(self):
self.read_data()
def on_update_apply(frame): self.apply_changes(frame)
self.subscription = omni.kit.app.get_app().get_update_event_stream().create_subscription_to_pop(on_update_apply)
self.startButton.visible = False
self.stopButton.visible = True
self.statusLabel.text = "Status: started"
def stop(self):
if self.subscription: del self.subscription
self.startButton.visible = True
self.stopButton.visible = False
self.statusLabel.text = "Status: stopped"
"""
Utility functions
"""
def toJSON(values):
json = {}
if not values:
return json
for row in values:
key = row[0]
value = row[1]
if not key or not value:
continue
json[row[0]] = row[1]
return json
| 8,802 | Python | 27.124601 | 152 | 0.527153 |
AccelerationAgency/omniverse-extensions/exts/taa.google.spreadsheet.api/taa/google/spreadsheet/api/__init__.py | from .extension import *
| 25 | Python | 11.999994 | 24 | 0.76 |
AccelerationAgency/omniverse-extensions/exts/taa.google.spreadsheet.api/config/extension.toml | [package]
version = "1.0.0"
title = "TAA - Google Spreadsheet API"
description="An exploration into using Google Spreadsheet data to objects on the stage"
readme = "docs/README.md"
repository = ""
category = "Other"
keywords = ["taa", "google", "spreadsheet", "api", "example"]
icon = "data/taa-logo.png"
[dependencies]
"omni.kit.uiapp" = {}
[[python.module]]
name = "taa.google.spreadsheet.api" | 399 | TOML | 23.999999 | 87 | 0.696742 |
AccelerationAgency/omniverse-extensions/exts/taa.omniverse.cameracreator/taa/omniverse/cameracreator/extension.py | import omni.ext
import omni.ui as ui
import omni.kit.commands as commands
class MyExtension(omni.ext.IExt):
# Lifecycle
def on_startup(self, ext_id):
print("[taa.omniverse.viewport] Extension starting up")
self._window = ui.Window("TAA Quick Camera", width=200, height = 200)
with self._window.frame:
with ui.VStack(height = 0, spacing = 4):
self.perspectiveButton = ui.Button("Perspective", height=40, clicked_fn=lambda: self.create_perspective_camera(), style={"background_color":"black"})
self.topButton = ui.Button("Top", height=40, clicked_fn=lambda: self.create_top_camera(), style={"background_color":"black"})
self.frontButton = ui.Button("Front", height=40, clicked_fn=lambda: self.create_front_camera(), style={"background_color":"black"})
self.rightButton = ui.Button("Right", height=40, clicked_fn=lambda: self.create_right_camera(), style={"background_color":"black"})
print("[taa.omniverse.viewport] Extension start up complete")
def on_shutdown(self):
print("[taa.omniverse.viewport] Extension shutting down")
self.stop()
print("[taa.omniverse.viewport] Extension shutdown complete")
# Custom methods
def set_camera(self, path):
omni.kit.viewport_legacy.get_viewport_interface().get_viewport_window().set_active_camera(path)
def rename_camera(self, name):
cameraPath = omni.kit.viewport_legacy.get_viewport_interface().get_viewport_window().get_active_camera()
omni.kit.commands.execute('MovePrims', paths_to_move={cameraPath: f'/World/Camera_{name}'})
def create_perspective_camera(self):
print("[taa.omniverse.viewport] Creating new perspective camera")
self.set_camera("/OmniverseKit_Persp")
commands.execute('DuplicateFromActiveViewportCameraCommand', viewport_name='Viewport')
self.rename_camera("Perspective")
def create_top_camera(self):
print("[taa.omniverse.viewport] Creating new top-down camera")
self.set_camera("/OmniverseKit_Top")
commands.execute('DuplicateFromActiveViewportCameraCommand', viewport_name='Viewport')
self.rename_camera("Top")
def create_front_camera(self):
print("[taa.omniverse.viewport] Creating new front view camera")
self.set_camera("/OmniverseKit_Front")
commands.execute('DuplicateFromActiveViewportCameraCommand', viewport_name='Viewport')
self.rename_camera("Front")
def create_right_camera(self):
print("[taa.omniverse.viewport] Creating new right view camera")
self.set_camera("/OmniverseKit_Right")
commands.execute('DuplicateFromActiveViewportCameraCommand', viewport_name='Viewport')
self.rename_camera("Right")
def start(self):
print("[taa.omniverse.viewport] Starting...")
def stop(self):
print("[taa.omniverse.viewport] Stopping...")
| 2,974 | Python | 44.76923 | 165 | 0.675521 |
AccelerationAgency/omniverse-extensions/exts/taa.omniverse.cameracreator/taa/omniverse/cameracreator/__init__.py | from .extension import *
| 26 | Python | 7.999997 | 24 | 0.730769 |
AccelerationAgency/omniverse-extensions/exts/taa.omniverse.cameracreator/config/extension.toml | [package]
version = "1.0.0"
title = "TAA - Omniverse Camera Creator"
description = "An simple extension that lets you quickly create cameras with a single click."
readme = "docs/README.md"
repository = ""
category = "Other"
keywords = ["taa", "viewport", "create", "camera", "view"]
icon = "data/taa-logo.png"
[dependencies]
"omni.kit.uiapp" = {}
[[python.module]]
name = "taa.omniverse.cameracreator" | 405 | TOML | 24.374998 | 93 | 0.693827 |
ilanhuang/audio2face-streamgpt-public/README.md | # Stream-GPT
Stream-GPT is an Omniverse Extension that uses OpenAI's GPT-3 model to create a virtual assistant. It allows users to interact with the assistant through both text and voice, and the assistant responds in kind. The extension uses OpenAI's Whisper ASR system to transcribe audio input and Eleven Labs' API to convert the assistant's text responses into audio.
## Getting Started
### Prerequisites
- Python 3.6 or higher
- Omniverse Kit
- Omniverse Audio2Face
- OpenAI API key
- Eleven Labs API key
### Installation
1. Clone the repository:
```bash
git clone https://github.com/ilanhuang/audio2face-stream-chatgpt.git
```
2. Install the required Python packages:
```bash
pip install -r requirements.txt
```
3. Update the `sys.path.append` in `extension.py` with the correct path to the `streaming_server` directory in your local clone of the repository.
```python
sys.path.append("C:\\Users\\YourUsername\\path\\to\\stream-gpt\\pkg\\audio2face-2022.2.1\\exts\\omni.audio2face.player\omni\\audio2face\\player\\scripts\\streaming_server")
```
4. Add the custom extension to Omniverse:
- Go to the "Windows" tab on the top of the screen.
- Scroll down to "Extensions".
- Click on the gear icon to open the Extensions settings.
- Click on the "+" button to add a new path to the custom extension.
- A window will pop up when you turn on the extension.
5. Set your OpenAI and Eleven Labs API keys, as well as the voice_id, model_id, and the Audio2Face's audioplayer's prim path (instance_name) in the extension's settings:
- Open the extension and click on the "Settings" button.
- Enter your OpenAI API key, Eleven Labs API key, voice_id, model_id and instance name in the corresponding fields. (A text file in the repository lists the available voice ids.)
## Usage
Once the application is running, you can interact with the virtual assistant through the UI. You can type your prompts into the text field and click on the "Send" button or use the "Record Audio" button to speak your prompts. The assistant will respond in the chat log and through your speakers.
You can also add a system to the GPT virtual assistant by typing it in the "System" field in the UI.
All interactions made with the extension are saved in a folder named "chat_logs" for future reference. | 2,294 | Markdown | 40.727272 | 358 | 0.762424 |
ilanhuang/audio2face-streamgpt-public/tools/scripts/link_app.py | import argparse
import json
import os
import sys
import packmanapi
import urllib3
def find_omniverse_apps():
http = urllib3.PoolManager()
try:
r = http.request("GET", "http://127.0.0.1:33480/components")
except Exception as e:
print(f"Failed retrieving apps from an Omniverse Launcher, maybe it is not installed?\nError: {e}")
sys.exit(1)
apps = {}
for x in json.loads(r.data.decode("utf-8")):
latest = x.get("installedVersions", {}).get("latest", "")
if latest:
for s in x.get("settings", []):
if s.get("version", "") == latest:
root = s.get("launch", {}).get("root", "")
apps[x["slug"]] = (x["name"], root)
break
return apps
def create_link(src, dst):
print(f"Creating a link '{src}' -> '{dst}'")
packmanapi.link(src, dst)
APP_PRIORITIES = ["code", "create", "view"]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create folder link to Kit App installed from Omniverse Launcher")
parser.add_argument(
"--path",
help="Path to Kit App installed from Omniverse Launcher, e.g.: 'C:/Users/bob/AppData/Local/ov/pkg/create-2021.3.4'",
required=False,
)
parser.add_argument(
"--app", help="Name of Kit App installed from Omniverse Launcher, e.g.: 'code', 'create'", required=False
)
args = parser.parse_args()
path = args.path
if not path:
print("Path is not specified, looking for Omniverse Apps...")
apps = find_omniverse_apps()
if len(apps) == 0:
print(
"Can't find any Omniverse Apps. Use Omniverse Launcher to install one. 'Code' is the recommended app for developers."
)
sys.exit(0)
print("\nFound following Omniverse Apps:")
for i, slug in enumerate(apps):
name, root = apps[slug]
print(f"{i}: {name} ({slug}) at: '{root}'")
if args.app:
selected_app = args.app.lower()
if selected_app not in apps:
choices = ", ".join(apps.keys())
print(f"Passed app: '{selected_app}' is not found. Specify one of the following found Apps: {choices}")
sys.exit(0)
else:
selected_app = next((x for x in APP_PRIORITIES if x in apps), None)
if not selected_app:
selected_app = next(iter(apps))
print(f"\nSelected app: {selected_app}")
_, path = apps[selected_app]
if not os.path.exists(path):
print(f"Provided path doesn't exist: {path}")
else:
SCRIPT_ROOT = os.path.dirname(os.path.realpath(__file__))
create_link(f"{SCRIPT_ROOT}/../../app", path)
print("Success!")
| 2,814 | Python | 32.117647 | 133 | 0.562189 |
ilanhuang/audio2face-streamgpt-public/tools/packman/config.packman.xml | <config remotes="cloudfront">
<remote2 name="cloudfront">
<transport actions="download" protocol="https" packageLocation="d4i3qtqj3r0z5.cloudfront.net/${name}@${version}" />
</remote2>
</config>
| 211 | XML | 34.333328 | 123 | 0.691943 |
ilanhuang/audio2face-streamgpt-public/tools/packman/bootstrap/install_package.py | # Copyright 2019 NVIDIA CORPORATION
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import shutil
import sys
import tempfile
import zipfile
__author__ = "hfannar"
logging.basicConfig(level=logging.WARNING, format="%(message)s")
logger = logging.getLogger("install_package")
class TemporaryDirectory:
def __init__(self):
self.path = None
def __enter__(self):
self.path = tempfile.mkdtemp()
return self.path
def __exit__(self, type, value, traceback):
# Remove temporary data created
shutil.rmtree(self.path)
def install_package(package_src_path, package_dst_path):
with zipfile.ZipFile(package_src_path, allowZip64=True) as zip_file, TemporaryDirectory() as temp_dir:
zip_file.extractall(temp_dir)
# Recursively copy (temp_dir will be automatically cleaned up on exit)
try:
# Recursive copy is needed because both package name and version folder could be missing in
# target directory:
shutil.copytree(temp_dir, package_dst_path)
except OSError as exc:
logger.warning("Directory %s already present, packaged installation aborted" % package_dst_path)
else:
logger.info("Package successfully installed to %s" % package_dst_path)
install_package(sys.argv[1], sys.argv[2])
| 1,844 | Python | 33.166666 | 108 | 0.703362 |
ilanhuang/audio2face-streamgpt-public/exts/stream.gptchat/stream/gptchat/recording_transcription.py | #Stream-GPT
#GNU - GLP Licence
#Copyright (C) <year> <Huang I Lan & Erks - Virtual Studio>
#This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
#This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#You should have received a copy of the GNU General Public License along with this program. If not, see <https://www.gnu.org/licenses/>.
import os
import pyaudio
import wave
import keyboard
import time
from time import sleep
import openai
import datetime
def open_file(filepath):
with open(filepath, 'r', encoding='utf-8') as infile:
return infile.read()
def save_file(filepath, content):
with open(filepath, 'w', encoding='utf-8') as outfile:
outfile.write(content)
def timestamp_to_datetime(unix_time):
return datetime.datetime.fromtimestamp(unix_time).strftime("%A, %B %d, %Y at %I:%M%p %Z")
def record_client_voice(output_filename, recording_status):
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
frames = []
p = pyaudio.PyAudio()
stream = None
try:
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
start_time = time.time()
min_duration = 0.1
while recording_status() or time.time() - start_time < min_duration:
data = stream.read(CHUNK)
frames.append(data)
except Exception as e:
print(f"Error while recording audio: {e}")
finally:
if stream is not None:
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open(output_filename, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
return output_filename
def transcribe_audio_to_text(file_path):
with open(file_path, 'rb') as audio_file:
transcript_response = openai.Audio.transcribe("whisper-1", audio_file)
return transcript_response["text"] | 2,508 | Python | 32.013157 | 240 | 0.64673 |
ilanhuang/audio2face-streamgpt-public/exts/stream.gptchat/stream/gptchat/transmission.py | #Stream-GPT
#GNU - GLP Licence
#Copyright (C) <year> <Huang I Lan & Erks - Virtual Studio>
#This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
#This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#You should have received a copy of the GNU General Public License along with this program. If not, see <https://www.gnu.org/licenses/>.
import grpc
import os
import soundfile
import numpy as np
import audio2face_pb2
import audio2face_pb2_grpc
import sounddevice as sd
import time
from typing import Iterator
import requests
import queue
import threading
import carb
def generate_stream(text: str, voice_id: str, model_id: str, api_key: str, stream_chunk_size: int = 2048) -> Iterator[bytes]:
url = f"https://api.elevenlabs.io/v1/text-to-speech/{voice_id}/stream"
data = dict(text=text, model_id=model_id, voice_settings=None)
headers = {"xi-api-key": api_key}
response = requests.post(url, json=data, headers=headers, stream=True)
for chunk in response.iter_content(chunk_size=stream_chunk_size):
if chunk:
yield chunk
def read_api_key_from_file(file_path: str) -> str:
with open(file_path, 'r') as f:
return f.read().strip()
def text_to_audio_stream(text, instance_name, api_key):
print("text_to_audio_stream: start")
settings = carb.settings.get_settings()
voice_id = settings.get_as_string("/persistent/exts/omni.example.streamgpt/VOICE_ID")
model_id = settings.get_as_string("/persistent/exts/omni.example.streamgpt/MODEL_ID")
audio_stream = generate_stream(text, voice_id, model_id, api_key)
current_dir = os.path.dirname(os.path.realpath(__file__))
audio_filename = os.path.join(current_dir, "temp_audio_response.mp3")
with open(audio_filename, 'wb') as f:
for chunk in audio_stream:
f.write(chunk)
audio_data, samplerate = soundfile.read(audio_filename, dtype="float32")
if len(audio_data.shape) > 1:
audio_data = np.average(audio_data, axis=1)
url = "localhost:50051"
audio_queue = queue.Queue()
audio_queue.put(audio_data)
def audio_streamer():
while not audio_queue.empty():
audio_chunk = audio_queue.get()
push_audio_track_stream(url, audio_chunk, samplerate, instance_name)
audio_thread = threading.Thread(target=audio_streamer)
audio_thread.start()
os.remove(audio_filename)
print("text_to_audio_stream: end")
def push_audio_track_stream(url, audio_data, samplerate, instance_name):
print("push_audio_track_stream: start")
chunk_size = samplerate // 10
sleep_between_chunks = 0.04
with grpc.insecure_channel(url) as channel:
print("Channel created")
stub = audio2face_pb2_grpc.Audio2FaceStub(channel)
def make_generator():
start_marker = audio2face_pb2.PushAudioRequestStart(
samplerate=samplerate,
instance_name=instance_name,
block_until_playback_is_finished=False,
)
yield audio2face_pb2.PushAudioStreamRequest(start_marker=start_marker)
for i in range(len(audio_data) // chunk_size + 1):
try:
time.sleep(sleep_between_chunks)
chunk = audio_data[i * chunk_size : i * chunk_size + chunk_size]
yield audio2face_pb2.PushAudioStreamRequest(audio_data=chunk.astype(np.float32).tobytes())
except Exception as e:
print(f"Error in generator function: {e}")
break
request_generator = make_generator()
print("Sending audio data...")
response = stub.PushAudioStream(request_generator)
if response.success:
print("SUCCESS")
else:
print(f"ERROR: {response.message}")
print("Channel closed") | 4,203 | Python | 39.038095 | 240 | 0.66738 |
ilanhuang/audio2face-streamgpt-public/exts/stream.gptchat/stream/gptchat/extension.py | #Stream-GPT
#GNU - GLP Licence
#Copyright (C) <year> <Huang I Lan & Erks - Virtual Studio>
#This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
#This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#You should have received a copy of the GNU General Public License along with this program. If not, see <https://www.gnu.org/licenses/>.
import omni.ext
import sys
sys.path.append("C:\\Users\\ERKS 2\\Documents\\Omniverse\\ov\\pkg\\audio2face-2022.2.1\\exts\\omni.audio2face.player\omni\\audio2face\\player\\scripts\\streaming_server")
import openai
import carb
from .window import AudioChatWindow
def open_file(filepath):
with open(filepath, 'r', encoding='utf-8') as infile:
return infile.read()
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class MyExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
openai.api_key = AudioChatWindow.get_openai_api_key()
self._window = AudioChatWindow("VIRTUAL ASSISTANT", width=400, height=525)
def on_shutdown(self):
self._window.destroy()
self._window = None
| 1,821 | Python | 55.937498 | 240 | 0.741351 |
ilanhuang/audio2face-streamgpt-public/exts/stream.gptchat/stream/gptchat/__init__.py | from .extension import *
| 25 | Python | 11.999994 | 24 | 0.76 |
ilanhuang/audio2face-streamgpt-public/exts/stream.gptchat/stream/gptchat/chatbot.py | #Stream-GPT
#GNU - GLP Licence
#Copyright (C) <year> <Huang I Lan & Erks - Virtual Studio>
#This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
#This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#You should have received a copy of the GNU General Public License along with this program. If not, see <https://www.gnu.org/licenses/>.
import os
import openai
import json
import numpy as np
from numpy.linalg import norm
import re
from time import time,sleep
from uuid import uuid4
import datetime
def open_file(filepath):
with open(filepath, 'r', encoding='utf-8') as infile:
return infile.read()
def save_file(filepath, content):
with open(filepath, 'w', encoding='utf-8') as outfile:
outfile.write(content)
def load_json(filepath):
with open(filepath, 'r', encoding='utf-8') as infile:
return json.load(infile)
def save_json(filepath, payload):
with open(filepath, 'w', encoding='utf-8') as outfile:
json.dump(payload, outfile, ensure_ascii=False, sort_keys=True, indent=2)
def timestamp_to_datetime(unix_time):
return datetime.datetime.fromtimestamp(unix_time).strftime("%A, %B %d, %Y at %I:%M%p %Z")
def gpt3_embedding(content, engine='text-embedding-ada-002'):
content = content.encode(encoding='ASCII',errors='ignore').decode() # fix any UNICODE errors
response = openai.Embedding.create(input=content,engine=engine)
vector = response['data'][0]['embedding'] # this is a normal list
return vector
def chatgpt_completion(messages, model="gpt-4", temp=0.0, top_p=1.0, tokens=400, freq_pen=0.0, pres_pen=0.0):
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=temp,
max_tokens=tokens,
top_p=top_p,
frequency_penalty=freq_pen,
presence_penalty=pres_pen,)
text = response['choices'][0]['message']['content']
tokens_used = response['usage']['total_tokens']
filename = 'chat_%s_aibot.json' % time()
script_dir = os.path.dirname(os.path.realpath(__file__))
chat_logs_path = os.path.join(script_dir, 'chat_logs')
if not os.path.exists(chat_logs_path):
os.makedirs(chat_logs_path)
input_message = messages[-1]['content']
log_content = f"User:\n{input_message}\n\nAi_Bot:\n{text}\n\nTokens used: {tokens_used}"
save_file(os.path.join(chat_logs_path, filename), log_content)
return text
def flatten_convo(conversation):
convo = ''
for i in conversation:
convo += '%s: %s\n' % (i['role'].upper(), i['content'])
return convo.strip()
def set_openai_api_key(api_key):
openai.api_key = api_key
def set_system_content(content):
global system_content
system_content = content
if __name__ == '__main__':
convo_length = 30
set_openai_api_key(api_key)
conversation = list()
conversation.append({'role': 'system', 'content': system_content})
counter = 0
while True:
# get user input, save to file
a = input('\n\nCLIENT: ')
conversation.append({'role': 'user', 'content': a})
filename = 'chat_%s_client.txt' % time()
if not os.path.exists('chat_logs'):
os.makedirs('chat_logs')
save_file('chat_logs/%s' % filename, a)
flat = flatten_convo(conversation)
# generate a response
response = chatgpt_completion(conversation)
conversation.append({'role': 'assistant', 'content': response})
print('\n\nAI_Bot: %s' % response)
# increment counter and consolidate memories
counter += 2
if counter >= 10:
# reset conversation
conversation = list()
conversation.append({'role': 'system', 'content': system_content})
| 4,226 | Python | 35.128205 | 240 | 0.643871 |
ilanhuang/audio2face-streamgpt-public/exts/stream.gptchat/stream/gptchat/window.py | #Stream-GPT
#GNU - GLP Licence
#Copyright (C) <year> <Huang I Lan & Erks - Virtual Studio>
#This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
#This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#You should have received a copy of the GNU General Public License along with this program. If not, see <https://www.gnu.org/licenses/>.
import os
import omni.ui as ui
import omni.kit.commands
from omni.kit.window.popup_dialog.form_dialog import FormDialog
from time import time
from .recording_transcription import record_client_voice, transcribe_audio_to_text
from .chatbot import chatgpt_completion, set_system_content
from .transmission import text_to_audio_stream
import threading
import time
import tempfile
import datetime
import carb
def save_file(filepath, content):
with open(filepath, 'w', encoding='utf-8') as outfile:
outfile.write(content)
def timestamp_to_datetime(unix_time):
return datetime.datetime.fromtimestamp(unix_time).strftime("%A, %B %d, %Y at %I:%M%p %Z")
class AudioChatWindow(ui.Window):
def _build_fn(self):
with self.frame:
with ui.VStack():
with ui.ScrollingFrame(height=ui.Percent(75)):
self.chat_log = ui.Label("", word_wrap=True)
with ui.HStack(height=ui.Percent(10)):
ui.StringField(model=self._prompt_model, multiline=True)
with ui.HStack(height=ui.Percent(10)):
self.record_audio_button = ui.Button("Record Audio", height=40, clicked_fn=lambda *_args, **_kwargs: self._toggle_record_audio())
ui.Button("Send", height=40, clicked_fn=lambda: self._send_text_prompt())
with ui.HStack():
ui.Button("Settings", tooltip="Configure API Key, Instance name and Default System", width=0, height=0, clicked_fn=lambda: self._open_settings())
system_settings_button = ui.Button("System", height=0, width=0)
system_settings_button.set_clicked_fn(lambda: self.show_system_settings_menu())
def __init__(self, title: str, **kwargs) -> None:
self.conversation = [{"role": "system", "content": ""}]
self.system_content_model = ui.SimpleStringModel()
self.lock = threading.Lock()
super().__init__(title, **kwargs)
self._prompt_model = ui.SimpleStringModel()
self.frame.set_build_fn(self._build_fn)
def show_system_settings_menu(self):
self.system_settings_menu = ui.Menu("")
with self.system_settings_menu:
ui.StringField(model=self.system_content_model, multiline=True)
self.system_settings_menu.show()
def _toggle_record_audio(self):
if not hasattr(self, "recording"):
self.recording = False
if not self.recording:
self.recording = True
threading.Thread(target=self._record_and_transcribe_audio).start()
else:
self.recording = False
def _process_conversation(self, user_content):
current_system_content = self.system_content_model.get_value_as_string().strip()
if current_system_content != self.conversation[0]['content']:
self.reset_chat()
set_system_content(current_system_content)
self.conversation.append({"role": "user", "content": user_content})
response = chatgpt_completion(self.conversation)
self.chat_log.text += f"\nUser: {user_content}\nAssistant: {response}"
settings = carb.settings.get_settings()
instance_name = settings.get_as_string("/persistent/exts/omni.example.streamgpt/INSTANCE_NAME")
threading.Thread(target=text_to_audio_stream, args=(response, instance_name, self.get_elevenlabs_api_key())).start()
def _record_and_transcribe_audio(self):
output_filename = "recorded_audio.wav"
record_client_voice(output_filename)
transcript = transcribe_audio_to_text(output_filename)
self._send_audio_transcript(transcript)
def _send_audio_transcript(self, transcript):
self.chat_log.text += "\nThinking..."
threading.Thread(target=self._process_conversation, args=(transcript,)).start()
def reset_chat(self):
self.chat_log.text = ""
self.conversation = [{"role": "system", "content": self.system_content_model.get_value_as_string().strip()}]
def _save_settings(self, dialog):
values = dialog.get_values()
settings = carb.settings.get_settings()
settings.set_string("/persistent/exts/omni.example.streamgpt/APIKey_OPEN_AI", values["APIKey_OPEN_AI"])
settings.set_string("/persistent/exts/omni.example.streamgpt/APIKey_ELEVEN_LABS", values["APIKey_ELEVEN_LABS"])
settings.set_string("/persistent/exts/omni.example.streamgpt/VOICE_ID", values["ELEVEN_LABS_VOICE_ID"])
settings.set_string("/persistent/exts/omni.example.streamgpt/MODEL_ID", values["ELEVEN_LABS_MODEL_ID"])
settings.set_string("/persistent/exts/omni.example.streamgpt/INSTANCE_NAME", values["INSTANCE_NAME"])
dialog.hide()
def _open_settings(self):
settings = carb.settings.get_settings()
apikey_open_ai = settings.get_as_string("/persistent/exts/omni.example.streamgpt/APIKey_OPEN_AI")
apikey_eleven_labs = settings.get_as_string("/persistent/exts/omni.example.streamgpt/APIKey_ELEVEN_LABS")
voice_id = settings.get_as_string("/persistent/exts/omni.example.streamgpt/VOICE_ID")
model_id = settings.get_as_string("/persistent/exts/omni.example.streamgpt/MODEL_ID")
instance_name = settings.get_as_string("/persistent/exts/omni.example.streamgpt/INSTANCE_NAME")
if apikey_open_ai == "":
apikey_open_ai = "Enter OPEN-AI API Key Here"
if apikey_eleven_labs == "":
apikey_eleven_labs = "Enter ELEVEN-LABS API Key Here"
if instance_name == "":
instance_name = "Enter Instance Name Here"
if voice_id == "":
voice_id = "Enter Eleven Labs Voice ID Here"
if model_id == "":
model_id = "Enter Eleven Labs Model ID Here"
field_defs = [
FormDialog.FieldDef("APIKey_OPEN_AI", "OPEN-AI API Key: ", ui.StringField, apikey_open_ai),
FormDialog.FieldDef("APIKey_ELEVEN_LABS", "ELEVEN-LABS API Key: ", ui.StringField, apikey_eleven_labs),
FormDialog.FieldDef("ELEVEN_LABS_VOICE_ID", "Voice ID: ", ui.StringField, voice_id),
FormDialog.FieldDef("ELEVEN_LABS_MODEL_ID", "Model ID: ", ui.StringField, model_id),
FormDialog.FieldDef("INSTANCE_NAME", "Instance Name: ", ui.StringField, instance_name),
]
dialog = FormDialog(
title="Settings",
message="Your Settings: ",
field_defs=field_defs,
ok_handler=lambda dialog: self._save_settings(dialog))
dialog.show()
@staticmethod
def get_openai_api_key():
settings = carb.settings.get_settings()
return settings.get_as_string("/persistent/exts/omni.example.streamgpt/APIKey_OPEN_AI")
def get_elevenlabs_api_key(self):
settings = carb.settings.get_settings()
return settings.get_as_string("/persistent/exts/omni.example.streamgpt/APIKey_ELEVEN_LABS")
def _send_text_prompt(self):
prompt = self._prompt_model.get_value_as_string()
self.chat_log.text += "\nThinking..."
threading.Thread(target=self._process_conversation, args=(prompt,)).start()
self._prompt_model.set_value("")
def _toggle_record_audio(self):
if not hasattr(self, "recording"):
self.recording = False
self.recording = not self.recording
if self.recording:
self.record_audio_button.text = "Stop Recording"
else:
self.record_audio_button.text = "Record Audio"
threading.Thread(target=self._record_and_transcribe_audio_alternative).start()
def recording_status(self):
return self.recording
def _record_and_transcribe_audio_alternative(self):
with self.lock:
temp_audio_file = tempfile.NamedTemporaryFile(delete=False, suffix=".wav")
temp_audio_filename = temp_audio_file.name
temp_audio_file.close()
recorded_audio_filename = record_client_voice(temp_audio_filename, self.recording_status)
transcript = transcribe_audio_to_text(recorded_audio_filename)
os.remove(temp_audio_filename)
if transcript.strip():
self._send_audio_transcript(transcript)
def destroy(self):
super().destroy()
self._prompt_model = None | 9,174 | Python | 47.036649 | 240 | 0.645193 |
ilanhuang/audio2face-streamgpt-public/exts/stream.gptchat/stream/gptchat/pytransform/__init__.py | # These module alos are used by protection code, so that protection
# code needn't import anything
import os
import platform
import sys
import struct
# Because ctypes is new from Python 2.5, so pytransform doesn't work
# before Python 2.5
#
from ctypes import cdll, c_char, c_char_p, c_int, c_void_p, \
pythonapi, py_object, PYFUNCTYPE, CFUNCTYPE
from fnmatch import fnmatch
#
# Support Platforms
#
plat_path = 'platforms'
plat_table = (
('windows', ('windows', 'cygwin*')),
('darwin', ('darwin',)),
('ios', ('ios',)),
('linux', ('linux*',)),
('freebsd', ('freebsd*', 'openbsd*', 'isilon onefs')),
('poky', ('poky',)),
)
arch_table = (
('x86', ('i?86', )),
('x86_64', ('x64', 'x86_64', 'amd64', 'intel')),
('arm', ('armv5',)),
('armv6', ('armv6l',)),
('armv7', ('armv7l',)),
('ppc64', ('ppc64le',)),
('mips32', ('mips',)),
('aarch32', ('aarch32',)),
('aarch64', ('aarch64', 'arm64'))
)
#
# Hardware type
#
HT_HARDDISK, HT_IFMAC, HT_IPV4, HT_IPV6, HT_DOMAIN = range(5)
#
# Global
#
_pytransform = None
class PytransformError(Exception):
pass
def dllmethod(func):
def wrap(*args, **kwargs):
return func(*args, **kwargs)
return wrap
@dllmethod
def version_info():
prototype = PYFUNCTYPE(py_object)
dlfunc = prototype(('version_info', _pytransform))
return dlfunc()
@dllmethod
def init_pytransform():
major, minor = sys.version_info[0:2]
# Python2.5 no sys.maxsize but sys.maxint
# bitness = 64 if sys.maxsize > 2**32 else 32
prototype = PYFUNCTYPE(c_int, c_int, c_int, c_void_p)
init_module = prototype(('init_module', _pytransform))
ret = init_module(major, minor, pythonapi._handle)
if (ret & 0xF000) == 0x1000:
raise PytransformError('Initialize python wrapper failed (%d)'
% (ret & 0xFFF))
return ret
@dllmethod
def init_runtime():
prototype = PYFUNCTYPE(c_int, c_int, c_int, c_int, c_int)
_init_runtime = prototype(('init_runtime', _pytransform))
return _init_runtime(0, 0, 0, 0)
@dllmethod
def encrypt_code_object(pubkey, co, flags, suffix=''):
_pytransform.set_option(6, suffix.encode())
prototype = PYFUNCTYPE(py_object, py_object, py_object, c_int)
dlfunc = prototype(('encrypt_code_object', _pytransform))
return dlfunc(pubkey, co, flags)
@dllmethod
def generate_license_key(prikey, keysize, rcode):
prototype = PYFUNCTYPE(py_object, c_char_p, c_int, c_char_p)
dlfunc = prototype(('generate_license_key', _pytransform))
return dlfunc(prikey, keysize, rcode) if sys.version_info[0] == 2 \
else dlfunc(prikey, keysize, rcode.encode())
@dllmethod
def get_registration_code():
prototype = PYFUNCTYPE(py_object)
dlfunc = prototype(('get_registration_code', _pytransform))
return dlfunc()
@dllmethod
def get_expired_days():
prototype = PYFUNCTYPE(py_object)
dlfunc = prototype(('get_expired_days', _pytransform))
return dlfunc()
@dllmethod
def clean_obj(obj, kind):
prototype = PYFUNCTYPE(c_int, py_object, c_int)
dlfunc = prototype(('clean_obj', _pytransform))
return dlfunc(obj, kind)
def clean_str(*args):
tdict = {
'str': 0,
'bytearray': 1,
'unicode': 2
}
for obj in args:
k = tdict.get(type(obj).__name__)
if k is None:
raise RuntimeError('Can not clean object: %s' % obj)
clean_obj(obj, k)
def get_hd_info(hdtype, name=None):
if hdtype not in range(HT_DOMAIN + 1):
raise RuntimeError('Invalid parameter hdtype: %s' % hdtype)
size = 256
t_buf = c_char * size
buf = t_buf()
cname = c_char_p(0 if name is None
else name.encode('utf-8') if hasattr('name', 'encode')
else name)
if (_pytransform.get_hd_info(hdtype, buf, size, cname) == -1):
raise PytransformError('Get hardware information failed')
return buf.value.decode()
def show_hd_info():
return _pytransform.show_hd_info()
def assert_armored(*names):
prototype = PYFUNCTYPE(py_object, py_object)
dlfunc = prototype(('assert_armored', _pytransform))
def wrapper(func):
def wrap_execute(*args, **kwargs):
dlfunc(names)
return func(*args, **kwargs)
return wrap_execute
return wrapper
def check_armored(*names):
try:
prototype = PYFUNCTYPE(py_object, py_object)
prototype(('assert_armored', _pytransform))(names)
return True
except RuntimeError:
return False
def get_license_info():
info = {
'ISSUER': None,
'EXPIRED': None,
'HARDDISK': None,
'IFMAC': None,
'IFIPV4': None,
'DOMAIN': None,
'DATA': None,
'CODE': None,
}
rcode = get_registration_code().decode()
if rcode.startswith('*VERSION:'):
index = rcode.find('\n')
info['ISSUER'] = rcode[9:index].split('.')[0].replace('-sn-1.txt', '')
rcode = rcode[index+1:]
index = 0
if rcode.startswith('*TIME:'):
from time import ctime
index = rcode.find('\n')
info['EXPIRED'] = ctime(float(rcode[6:index]))
index += 1
if rcode[index:].startswith('*FLAGS:'):
index += len('*FLAGS:') + 1
info['FLAGS'] = ord(rcode[index - 1])
prev = None
start = index
for k in ['HARDDISK', 'IFMAC', 'IFIPV4', 'DOMAIN', 'FIXKEY', 'CODE']:
index = rcode.find('*%s:' % k)
if index > -1:
if prev is not None:
info[prev] = rcode[start:index]
prev = k
start = index + len(k) + 2
info['CODE'] = rcode[start:]
i = info['CODE'].find(';')
if i > 0:
info['DATA'] = info['CODE'][i+1:]
info['CODE'] = info['CODE'][:i]
return info
def get_license_code():
return get_license_info()['CODE']
def get_user_data():
return get_license_info()['DATA']
def _match_features(patterns, s):
for pat in patterns:
if fnmatch(s, pat):
return True
def _gnu_get_libc_version():
try:
prototype = CFUNCTYPE(c_char_p)
ver = prototype(('gnu_get_libc_version', cdll.LoadLibrary('')))()
return ver.decode().split('.')
except Exception:
pass
def format_platform(platid=None):
if platid:
return os.path.normpath(platid)
plat = platform.system().lower()
mach = platform.machine().lower()
for alias, platlist in plat_table:
if _match_features(platlist, plat):
plat = alias
break
if plat == 'linux':
cname, cver = platform.libc_ver()
if cname == 'musl':
plat = 'musl'
elif cname == 'libc':
plat = 'android'
elif cname == 'glibc':
v = _gnu_get_libc_version()
if v and len(v) >= 2 and (int(v[0]) * 100 + int(v[1])) < 214:
plat = 'centos6'
for alias, archlist in arch_table:
if _match_features(archlist, mach):
mach = alias
break
if plat == 'windows' and mach == 'x86_64':
bitness = struct.calcsize('P'.encode()) * 8
if bitness == 32:
mach = 'x86'
return os.path.join(plat, mach)
# Load _pytransform library
def _load_library(path=None, is_runtime=0, platid=None, suffix='', advanced=0):
path = os.path.dirname(__file__) if path is None \
else os.path.normpath(path)
plat = platform.system().lower()
for alias, platlist in plat_table:
if _match_features(platlist, plat):
plat = alias
break
name = '_pytransform' + suffix
if plat == 'linux':
filename = os.path.abspath(os.path.join(path, name + '.so'))
elif plat in ('darwin', 'ios'):
filename = os.path.join(path, name + '.dylib')
elif plat == 'windows':
filename = os.path.join(path, name + '.dll')
elif plat in ('freebsd', 'poky'):
filename = os.path.join(path, name + '.so')
else:
filename = None
if platid is not None and os.path.isfile(platid):
filename = platid
elif platid is not None or not os.path.exists(filename) or not is_runtime:
libpath = platid if platid is not None and os.path.isabs(platid) else \
os.path.join(path, plat_path, format_platform(platid))
filename = os.path.join(libpath, os.path.basename(filename))
if filename is None:
raise PytransformError('Platform %s not supported' % plat)
if not os.path.exists(filename):
raise PytransformError('Could not find "%s"' % filename)
try:
m = cdll.LoadLibrary(filename)
except Exception as e:
if sys.flags.debug:
print('Load %s failed:\n%s' % (filename, e))
raise
# Removed from v4.6.1
# if plat == 'linux':
# m.set_option(-1, find_library('c').encode())
if not os.path.abspath('.') == os.path.abspath(path):
m.set_option(1, path.encode() if sys.version_info[0] == 3 else path)
elif (not is_runtime) and sys.platform.startswith('cygwin'):
path = os.environ['PYARMOR_CYGHOME']
m.set_option(1, path.encode() if sys.version_info[0] == 3 else path)
# Required from Python3.6
m.set_option(2, sys.byteorder.encode())
if sys.flags.debug:
m.set_option(3, c_char_p(1))
m.set_option(4, c_char_p(not is_runtime))
# Disable advanced mode by default
m.set_option(5, c_char_p(not advanced))
# Set suffix for private package
if suffix:
m.set_option(6, suffix.encode())
return m
def pyarmor_init(path=None, is_runtime=0, platid=None, suffix='', advanced=0):
global _pytransform
_pytransform = _load_library(path, is_runtime, platid, suffix, advanced)
return init_pytransform()
def pyarmor_runtime(path=None, suffix='', advanced=0):
if _pytransform is not None:
return
try:
pyarmor_init(path, is_runtime=1, suffix=suffix, advanced=advanced)
init_runtime()
except Exception as e:
if sys.flags.debug or hasattr(sys, '_catch_pyarmor'):
raise
sys.stderr.write("%s\n" % str(e))
sys.exit(1)
# ----------------------------------------------------------
# End of pytransform
# ----------------------------------------------------------
#
# Unused
#
@dllmethod
def generate_license_file(filename, priname, rcode, start=-1, count=1):
prototype = PYFUNCTYPE(c_int, c_char_p, c_char_p, c_char_p, c_int, c_int)
dlfunc = prototype(('generate_project_license_files', _pytransform))
return dlfunc(filename.encode(), priname.encode(), rcode.encode(),
start, count) if sys.version_info[0] == 3 \
else dlfunc(filename, priname, rcode, start, count)
#
# Not available from v5.6
#
def generate_capsule(licfile):
prikey, pubkey, prolic = _generate_project_capsule()
capkey, newkey = _generate_pytransform_key(licfile, pubkey)
return prikey, pubkey, capkey, newkey, prolic
@dllmethod
def _generate_project_capsule():
prototype = PYFUNCTYPE(py_object)
dlfunc = prototype(('generate_project_capsule', _pytransform))
return dlfunc()
@dllmethod
def _generate_pytransform_key(licfile, pubkey):
prototype = PYFUNCTYPE(py_object, c_char_p, py_object)
dlfunc = prototype(('generate_pytransform_key', _pytransform))
return dlfunc(licfile.encode() if sys.version_info[0] == 3 else licfile,
pubkey)
#
# Deprecated functions from v5.1
#
@dllmethod
def encrypt_project_files(proname, filelist, mode=0):
prototype = PYFUNCTYPE(c_int, c_char_p, py_object, c_int)
dlfunc = prototype(('encrypt_project_files', _pytransform))
return dlfunc(proname.encode(), filelist, mode)
def generate_project_capsule(licfile):
prikey, pubkey, prolic = _generate_project_capsule()
capkey = _encode_capsule_key_file(licfile)
return prikey, pubkey, capkey, prolic
@dllmethod
def _encode_capsule_key_file(licfile):
prototype = PYFUNCTYPE(py_object, c_char_p, c_char_p)
dlfunc = prototype(('encode_capsule_key_file', _pytransform))
return dlfunc(licfile.encode(), None)
@dllmethod
def encrypt_files(key, filelist, mode=0):
t_key = c_char * 32
prototype = PYFUNCTYPE(c_int, t_key, py_object, c_int)
dlfunc = prototype(('encrypt_files', _pytransform))
return dlfunc(t_key(*key), filelist, mode)
@dllmethod
def generate_module_key(pubname, key):
t_key = c_char * 32
prototype = PYFUNCTYPE(py_object, c_char_p, t_key, c_char_p)
dlfunc = prototype(('generate_module_key', _pytransform))
return dlfunc(pubname.encode(), t_key(*key), None)
#
# Compatible for PyArmor v3.0
#
@dllmethod
def old_init_runtime(systrace=0, sysprofile=1, threadtrace=0, threadprofile=1):
'''Only for old version, before PyArmor 3'''
pyarmor_init(is_runtime=1)
prototype = PYFUNCTYPE(c_int, c_int, c_int, c_int, c_int)
_init_runtime = prototype(('init_runtime', _pytransform))
return _init_runtime(systrace, sysprofile, threadtrace, threadprofile)
@dllmethod
def import_module(modname, filename):
'''Only for old version, before PyArmor 3'''
prototype = PYFUNCTYPE(py_object, c_char_p, c_char_p)
_import_module = prototype(('import_module', _pytransform))
return _import_module(modname.encode(), filename.encode())
@dllmethod
def exec_file(filename):
'''Only for old version, before PyArmor 3'''
prototype = PYFUNCTYPE(c_int, c_char_p)
_exec_file = prototype(('exec_file', _pytransform))
return _exec_file(filename.encode())
| 13,587 | Python | 27.07438 | 79 | 0.60499 |
ilanhuang/audio2face-streamgpt-public/exts/stream.gptchat/stream/gptchat/tests/__init__.py | from .test_hello_world import * | 31 | Python | 30.999969 | 31 | 0.774194 |
ilanhuang/audio2face-streamgpt-public/exts/stream.gptchat/stream/gptchat/tests/test_hello_world.py | # NOTE:
# omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests
# For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html
import omni.kit.test
# Extnsion for writing UI tests (simulate UI interaction)
import omni.kit.ui_test as ui_test
# Import extension python module we are testing with absolute import path, as if we are external user (other extension)
import stream.gptchat
# Having a test class dervived from omni.kit.test.AsyncTestCase declared on the root of module will make it auto-discoverable by omni.kit.test
class Test(omni.kit.test.AsyncTestCase):
# Before running each test
async def setUp(self):
pass
# After running each test
async def tearDown(self):
pass
# Actual test, notice it is "async" function, so "await" can be used if needed
async def test_hello_public_function(self):
result = stream.gptchat.some_public_function(4)
self.assertEqual(result, 256)
async def test_window_button(self):
# Find a label in our window
label = ui_test.find("My Window//Frame/**/Label[*]")
# Find buttons in our window
add_button = ui_test.find("My Window//Frame/**/Button[*].text=='Add'")
reset_button = ui_test.find("My Window//Frame/**/Button[*].text=='Reset'")
# Click reset button
await reset_button.click()
self.assertEqual(label.widget.text, "empty")
await add_button.click()
self.assertEqual(label.widget.text, "count: 1")
await add_button.click()
self.assertEqual(label.widget.text, "count: 2")
| 1,664 | Python | 34.425531 | 142 | 0.68149 |
ilanhuang/audio2face-streamgpt-public/exts/stream.gptchat/config/extension.toml | [package]
# Semantic Versioning is used: https://semver.org/
version = "1.0.2"
# Lists people or organizations that are considered the "authors" of the package.
authors = ["Huang I Lan - Erks Virtual Studio"]
# The title and description fields are primarily for displaying extension info in UI
title = "stream-gpt"
description="Extension for NVIDIA Omniverse that provides a simple chatbot UI to record audio inputs, transcribe them, use transcriptions as chat GPT prompts, generate responses, convert responses to audio, and transmit them to Audio2Face via gRPC, while maintaining your original scripting style and modular system.."
# Path (relative to the root) or content of readme markdown file for UI.
readme = "docs/README.md"
# URL of the extension source repository.
repository = ""
# One of categories for UI.
category = "Chatbot"
# Keywords for the extension
keywords = ["Chat_GPT", "AI_assistant"]
# Location of change log file in target (final) folder of extension, relative to the root.
# More info on writing changelog: https://keepachangelog.com/en/1.0.0/
changelog="docs/CHANGELOG.md"
# Preview image and icon. Folder named "data" automatically goes in git lfs (see .gitattributes file).
# Preview image is shown in "Overview" of Extensions window. Screenshot of an extension might be a good preview image.
preview_image = "data/preview.png"
# Icon is shown in Extensions window, it is recommended to be square, of size 256x256.
icon = "data/icon.png"
# Use omni.ui to build simple UI
[dependencies]
"omni.kit.uiapp" = {}
[python.pipapi]
requirements = [
"pyaudio",
"openai",
"keyboard",
"soundfile",
"elevenlabs",
"pydub",
"gtts",
]
# Allow going to online index if package can't be found locally (not recommended)
use_online_index = true
# Main python module this extension provides, it will be publicly available as "import stream.gptchat".
[[python.module]]
name = "stream.gptchat"
[[test]]
# Extra dependencies only to be used during test run
dependencies = [
"omni.kit.ui_test" # UI testing extension
]
| 2,071 | TOML | 32.967213 | 318 | 0.740222 |
ilanhuang/audio2face-streamgpt-public/exts/stream.gptchat/docs/CHANGELOG.md | # Changelog
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
## [1.0.2] - 2023-07-06
- Upgraded the UI to allow users to add API keys, Voice_ID, Voice_Models, and Instance Name directly from the UI, eliminating the need for hardcoding.
## [1.0.0] - 2023-04-13
- Initial version of extension UI template with a window.
| 355 | Markdown | 28.666664 | 150 | 0.715493 |
ilanhuang/audio2face-streamgpt-public/exts/stream.gptchat/docs/README.md | # Stream-GPT
Stream-GPT is an Omniverse Extension that uses OpenAI's GPT-3 model to create a virtual assistant. It allows users to interact with the assistant through both text and voice, and the assistant responds in kind. The extension uses OpenAI's Whisper ASR system to transcribe audio input and Eleven Labs' API to convert the assistant's text responses into audio.
## Getting Started
### Prerequisites
- OpenAI API key
- Eleven Labs API key
### SET UP
1. Set your OpenAI and Eleven Labs API keys, as well as the voice_id, model_id, and the Audio2Face's audioplayer's prim path (instance_name) in the extension's settings:
- Open the extension and click on the "Settings" button.
- Enter your OpenAI API key, Eleven Labs API key, voice_id, model_id and instance name in the corresponding fields. (A text file in the repository lists the available voice ids.)
## Usage
Once the application is running, you can interact with the virtual assistant through the UI. You can type your prompts into the text field and click on the "Send" button or use the "Record Audio" button to speak your prompts. The assistant will respond in the chat log and through your speakers.
You can also add a system to the GPT virtual assistant by typing it in the "System" field in the UI.
All interactions made with the extension are saved in a folder named "chat_logs" for future reference.
| 1,389 | Markdown | 46.931033 | 358 | 0.773938 |
ilanhuang/audio2face-streamgpt-public/exts/stream.gptchat/docs/index.rst | stream.gpt
#############################
Example of Python only extension
.. toctree::
:maxdepth: 1
README
CHANGELOG
.. automodule::"stream-gpt"
:platform: Windows-x86_64, Linux-x86_64
:members:
:undoc-members:
:show-inheritance:
:imported-members:
:exclude-members: contextmanager
| 321 | reStructuredText | 14.333333 | 43 | 0.604361 |
ilanhuang/audio2face-streamgpt-public/UE5_install_files/extension.toml | [package]
version = "104.10.8"
title = "Audio2Face Exporter"
authors = ["NVIDIA"]
description="Custom Kit exporter for audio2face"
repository = ""
keywords = ["audio2face"]
category = "Animation"
readme = "docs/README.md"
changelog = "docs/CHANGELOG.md"
preview_image = "data/preview.png"
icon = "data/icon.png"
[dependencies]
"omni.ui" = {optional = true}
"omni.kit.window.filepicker" = {optional = true}
"omni.graph" = {}
"omni.graph.tools" = {}
"omni.kit.menu.utils" = {optional = true}
"omni.kit.window.viewport" = {optional = true}
"omni.kit.viewport.utility" = {optional = true}
"omni.client" = {}
"omni.anim.shared" = {}
"omni.deform.shared" = {}
"omni.audio2face.common" = {}
"omni.audio2face.ui.common" = {optional = true}
"omni.audio2face.tool" = {}
"omni.services.core"={}
[[python.module]]
name = "omni.audio2face.exporter"
[[test]]
dependencies = [
"omni.kit.renderer.core",
"omni.ui",
"omni.kit.window.filepicker",
"omni.kit.menu.utils",
"omni.kit.window.viewport",
"omni.kit.viewport.utility",
"omni.audio2face.ui.common"
]
timeout = 900
stdoutFailPatterns.exclude = [
"*failed to upload minidump*", # Exclude grahics leaks until fixed
]
[package.writeTarget]
kit = true
platform = true
[python.pipapi]
requirements = ['python-osc']
use_online_index = true | 1,310 | TOML | 22.836363 | 71 | 0.681679 |
ilanhuang/audio2face-streamgpt-public/UE5_install_files/from pythonosc import udp_client.py | from pythonosc import udp_client
blend = ["eyeBlinkLeft", "eyeLookDownLeft", "eyeLookInLeft", "eyeLookOutLeft", "eyeLookUpLeft", "eyeSquintLeft", "eyeWideLeft", "eyeBlinkRight", "eyeLookDownRight", "eyeLookInRight", "eyeLookOutRight", "eyeLookUpRight", "eyeSquintRight", "eyeWideRight", "jawForward", "jawLeft", "jawRight", "jawOpen", "mouthClose", "mouthFunnel", "mouthPucker", "mouthLeft", "mouthRight", "mouthSmileLeft", "mouthSmileRight", "mouthFrownLeft", "mouthFrownRight", "mouthDimpleLeft", "mouthDimpleRight", "mouthStretchLeft", "mouthStretchRight", "mouthRollLower", "mouthRollUpper", "mouthShrugLower", "mouthShrugUpper", "mouthPressLeft", "mouthPressRight", "mouthLowerDownLeft", "mouthLowerDownRight", "mouthUpperUpLeft", "mouthUpperUpRight", "browDownLeft", "browDownRight", "browInnerUp", "browOuterUpLeft", "browOuterUpRight", "cheekPuff", "cheekSquintLeft", "cheekSquintRight", "noseSneerLeft", "noseSneerRight", "tongueOut"]
client = udp_client.SimpleUDPClient('127.0.0.1', 5008)
osc_array = outWeight.tolist()
count = 0
for i in osc_array:
client.send_message('/' + str(blend[count]), i)
count += 1
[python.pipapi]
requirements = ['python-osc']
use_online_index = true | 1,267 | Python | 89.571422 | 910 | 0.708761 |
ilanhuang/audio2face-streamgpt-public/UE5_install_files/facsSolver.py | import numpy as np
from omni.audio2face.common import log_error, log_info, log_warn
from scipy.optimize import lsq_linear
from pythonosc import udp_client
class FacsSolver:
def __init__(self, neutral_mat, delta_mat):
self.weightRegulCoeff = 3.5
self.weightRegulCoeff_scale = 10.0
self.prevRegulCoeff = 3.5
self.prevRegulCoeff_scale = 100.0
self.sparseRegulCoeff = 1.0
self.sparseRegulCoeff_scale = 0.25
self.symmetryRegulCoeff = 1.0
self.symmetryRegulCoeff_scale = 10.0
self.neutral_mat = neutral_mat
self.delta_mat_orig = delta_mat
self.delta_mat = delta_mat
self.numPoses_orig = self.delta_mat_orig.shape[1]
self.numPoses = self.numPoses_orig
self.lb_orig = np.zeros(self.numPoses_orig)
self.ub_orig = self.lb_orig + 1.0
self.lb = self.lb_orig.copy()
self.ub = self.ub_orig.copy()
self.activeIdxMap = range(self.numPoses_orig)
self.activePosesBool = np.array([True for pi in range(self.numPoses_orig)], dtype=bool)
self.cancelPoseIndices = np.array([-1 for pi in range(self.numPoses_orig)], dtype=int)
self.symmetryPoseIndices = np.array([-1 for pi in range(self.numPoses_orig)], dtype=int)
self.cancelList = []
self.symmetryList = []
self.symShapeMat = np.zeros((self.numPoses_orig, self.numPoses_orig))
self.prevWeights = np.zeros(self.numPoses_orig)
# TODO L1 implementation
l1RegulMat = np.ones((1, self.numPoses))
self.l1RegulMat = np.dot(l1RegulMat.T, l1RegulMat)
self.compute_A_mat()
def compute_A_mat(self):
self.A = (
np.dot(self.delta_mat.T, self.delta_mat)
+ self.weightRegulCoeff * self.weightRegulCoeff_scale * np.eye(self.numPoses)
+ self.prevRegulCoeff * self.prevRegulCoeff_scale * np.eye(self.numPoses)
+ self.sparseRegulCoeff ** 2 * self.sparseRegulCoeff_scale * self.l1RegulMat
+ self.symmetryRegulCoeff * self.symmetryRegulCoeff_scale * self.symShapeMat
)
self.A = self.A.astype(np.float64)
def set_activePoses(self, activePosesBool):
self.activePosesBool = activePosesBool
# 1 - simple approach
# self.ub *= np.array(self.activePosesBool)
# 2- less computation way
self.delta_mat = self.delta_mat_orig[:, self.activePosesBool]
self.numPoses = self.delta_mat.shape[1]
self.lb = self.lb_orig[self.activePosesBool]
self.ub = self.ub_orig[self.activePosesBool]
self.prevWeights = np.zeros(self.numPoses)
self.activeIdxMap = []
cnt = 0
for idx in range(self.numPoses_orig):
if self.activePosesBool[idx]:
self.activeIdxMap.append(cnt)
cnt += 1
else:
self.activeIdxMap.append(-1)
# update L1 regularization mat
l1RegulMat = np.ones((1, self.numPoses))
self.l1RegulMat = np.dot(l1RegulMat.T, l1RegulMat)
# update cancel pair index
self.set_cancelPoses(self.cancelPoseIndices)
# update symmetry pair index
self.set_symmetryPoses(self.symmetryPoseIndices) # update self.A here
def set_cancelPoses(self, cancelPoseIndices):
self.cancelPoseIndices = cancelPoseIndices
# filter out cancel shapes
self.cancelList = []
maxIdx = np.max(self.cancelPoseIndices)
if maxIdx < 0:
return
for ci in range(maxIdx + 1):
cancelIndices = np.where(self.cancelPoseIndices == ci)[0]
if len(cancelIndices) > 2:
log_warn("There is more than 2 poses for a cancel index %d" % ci)
break
elif len(cancelIndices) < 2:
log_warn("There is less than 2 poses for a cancel index %d" % ci)
break
self.cancelList.append(cancelIndices)
# print ('cancel shape list', self.cancelList)
activeCancelList = []
for pIdx1, pIdx2 in self.cancelList:
if self.activePosesBool[pIdx1] and self.activePosesBool[pIdx2]:
activeCancelList.append([self.activeIdxMap[pIdx1], self.activeIdxMap[pIdx2]])
# print (activeCancelList)
self.cancelList = activeCancelList
def set_symmetryPoses(self, symmetryPoseIndices):
self.symmetryPoseIndices = symmetryPoseIndices
self.symmetryList = []
maxIdx = np.max(self.symmetryPoseIndices)
if maxIdx < 0:
self.symShapeMat = np.zeros((self.numPoses, self.numPoses))
else:
for ci in range(maxIdx + 1):
symmetryIndices = np.where(self.symmetryPoseIndices == ci)[0]
if len(symmetryIndices) > 2:
log_warn("There is more than 2 poses for a cancel index %d" % ci)
break
elif len(symmetryIndices) < 2:
log_warn("There is less than 2 poses for a cancel index %d" % ci)
break
self.symmetryList.append(symmetryIndices)
activeSymmetryList = []
for pIdx1, pIdx2 in self.symmetryList:
if self.activePosesBool[pIdx1] and self.activePosesBool[pIdx2]:
activeSymmetryList.append([self.activeIdxMap[pIdx1], self.activeIdxMap[pIdx2]])
self.symmetryList = activeSymmetryList
symShapeMat = np.zeros((len(self.symmetryList), self.numPoses))
for si, [pose1Idx, pose2Idx] in enumerate(self.symmetryList):
symShapeMat[si, pose1Idx] = 1.0
symShapeMat[si, pose2Idx] = -1.0
self.symShapeMat = np.dot(symShapeMat.T, symShapeMat)
self.compute_A_mat()
def set_l2_regularization(self, L2=3.5):
self.weightRegulCoeff = L2
self.compute_A_mat()
def set_tempo_regularization(self, temporal=3.5):
self.prevRegulCoeff = temporal
self.compute_A_mat()
def set_l1_regularization(self, L1=1.0):
self.sparseRegulCoeff = L1
self.compute_A_mat()
def set_symmetry_regularization(self, value=1.0):
self.symmetryRegulCoeff = value
self.compute_A_mat()
def computeFacsWeights(self, point_mat):
target_delta_mat = point_mat - self.neutral_mat
B = (
np.dot(self.delta_mat.T, target_delta_mat).flatten()
+ self.prevRegulCoeff * self.prevRegulCoeff_scale * self.prevWeights
)
B = B.astype(np.float64)
res = lsq_linear(self.A, B, bounds=(self.lb, self.ub), lsmr_tol="auto", verbose=0, method="bvls")
# print ('first pass:', res.x)
if len(self.cancelList) > 0:
# check cancelling poses -
ub = self.ub.copy()
lb = self.lb.copy()
for pose1Idx, pose2Idx in self.cancelList:
if res.x[pose1Idx] >= res.x[pose2Idx]:
ub[pose2Idx] = 1e-10
else:
ub[pose1Idx] = 1e-10
res = lsq_linear(self.A, B, bounds=(lb, ub), lsmr_tol="auto", verbose=0, method="bvls")
self.prevWeights = res.x
# print ('second pass:', res.x)
outWeight = np.zeros(self.numPoses_orig)
outWeight[self.activePosesBool] = res.x
outWeight = outWeight * (outWeight > 1.0e-9)
# print (outWeight)
blend = ["eyeBlinkLeft", "eyeLookDownLeft", "eyeLookInLeft", "eyeLookOutLeft", "eyeLookUpLeft", "eyeSquintLeft", "eyeWideLeft", "eyeBlinkRight", "eyeLookDownRight", "eyeLookInRight", "eyeLookOutRight", "eyeLookUpRight", "eyeSquintRight", "eyeWideRight", "jawForward", "jawLeft", "jawRight", "jawOpen", "mouthClose", "mouthFunnel", "mouthPucker", "mouthLeft", "mouthRight", "mouthSmileLeft", "mouthSmileRight", "mouthFrownLeft", "mouthFrownRight", "mouthDimpleLeft", "mouthDimpleRight", "mouthStretchLeft", "mouthStretchRight", "mouthRollLower", "mouthRollUpper", "mouthShrugLower", "mouthShrugUpper", "mouthPressLeft", "mouthPressRight", "mouthLowerDownLeft", "mouthLowerDownRight", "mouthUpperUpLeft", "mouthUpperUpRight", "browDownLeft", "browDownRight", "browInnerUp", "browOuterUpLeft", "browOuterUpRight", "cheekPuff", "cheekSquintLeft", "cheekSquintRight", "noseSneerLeft", "noseSneerRight", "tongueOut"]
try:
client = udp_client.SimpleUDPClient('127.0.0.1', 27008)
osc_array = outWeight.tolist()
count = 0
for i in osc_array:
client.send_message('/' + str(blend[count]), i)
count += 1
except Exception as e:
log_error(f"Error in OSC communication: {e}") | 8,708 | Python | 41.276699 | 918 | 0.614378 |
matthias-research/omni.fun/README.md | # omni.fun
A simple plugin for nvidia's omniverse
| 50 | Markdown | 15.999995 | 38 | 0.78 |
matthias-research/omni.fun/exts/omni.fun/config/extension.toml |
[package]
# Semantic Versioning is used: https://semver.org/
version = "0.1.0"
authors = ["Ten Minute Physics"]
title = "Fun"
description="Ten Minute Physics domniverse extension"
readme = "docs/README.md"
repository="https://github.com/matthias-research/omni.fun"
category = "sim"
keywords = ["simulation"]
changelog="docs/CHANGELOG.md"
preview_image = "data/preview.png"
icon = "data/icon.png"
# Watch the .ogn files for hot reloading (only works for Python files)
[fswatcher.patterns]
include = ["*.ogn", "*.py"]
exclude = ["Ogn*Database.py", "*/ogn*"]
[dependencies]
"omni.kit.test" = {}
"omni.kit.menu.utils" = {}
"omni.timeline" = {}
"omni.usd" = {}
# Main python module this extension provides, it will be publicly available as "import omni.play".
[[python.module]]
name = "omni.fun"
| 797 | TOML | 24.741935 | 98 | 0.697616 |
matthias-research/omni.fun/exts/omni.fun/config/extension.gen.toml | [package]
[package.target]
python = ["cp37"]
[package.publish]
date = 1635811509
kitVersion = "103.0+master.0.75457a67.gitlab"
| 127 | TOML | 17.285712 | 45 | 0.732283 |
matthias-research/omni.fun/exts/omni.fun/omni/fun/__init__.py | from .scripts.extension import *
| 33 | Python | 15.999992 | 32 | 0.787879 |
matthias-research/omni.fun/exts/omni.fun/omni/fun/scripts/sim.py | # Copyright 2022 Matthias Müller - Ten Minute Physics,
# https://www.youtube.com/c/TenMinutePhysics
# www.matthiasMueller.info/tenMinutePhysics
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import numpy as np
import math
import warp as wp
from pxr import Usd, UsdGeom, Gf, Sdf
from .usdutils import *
gravity = -9.81
@wp.struct
class SimData:
sphere_radius: wp.array(dtype=float)
sphere_mass: wp.array(dtype=float)
sphere_pos: wp.array(dtype=wp.vec3)
sphere_rot: wp.array(dtype=wp.quat)
sphere_lin_vel: wp.array(dtype=wp.vec3)
sphere_ang_vel: wp.array(dtype=wp.vec3)
sphere_pos_corr: wp.array(dtype=wp.vec3)
sphere_lin_corr: wp.array(dtype=wp.vec3)
sphere_ang_corr: wp.array(dtype=wp.vec3)
sphere_num_corr: wp.array(dtype=int)
sphere_lower_bounds: wp.array(dtype=wp.vec3)
sphere_upper_bounds: wp.array(dtype=wp.vec3)
sphere_bvh_id: wp.uint64
obj_mesh_id: wp.uint64
obj_tri_ids: wp.array(dtype=int)
obj_orig_pos: wp.array(dtype=wp.vec3)
obj_pos: wp.array(dtype=wp.vec3)
obj_prev_pos: wp.array(dtype=wp.vec3)
obj_transforms: wp.array(dtype=wp.mat44)
obj_pos_transform_nr: wp.array(dtype=int)
@wp.kernel
def dev_integrate(
dt: float,
gravity: wp.vec3,
bounds_margin: float,
sim: SimData):
sphere_nr = wp.tid()
pos = sim.sphere_pos[sphere_nr]
lin_vel = sim.sphere_lin_vel[sphere_nr]
rot = sim.sphere_rot[sphere_nr]
ang_vel = sim.sphere_ang_vel[sphere_nr]
# move state forward in time
lin_vel = lin_vel + gravity * dt
pos = pos + lin_vel * dt
qt = wp.quat(ang_vel[0], ang_vel[1], ang_vel[2], 0.0) * (dt * 0.5)
rot = wp.normalize(rot + qt * rot)
sim.sphere_pos[sphere_nr] = pos
sim.sphere_lin_vel[sphere_nr] = lin_vel
sim.sphere_rot[sphere_nr] = rot
# compute bounding box for bvh
pred_pos = pos + lin_vel * dt
lower = wp.vec3(wp.min(pos[0], pred_pos[0]), wp.min(pos[1], pred_pos[1]), wp.min(pos[2], pred_pos[2]))
upper = wp.vec3(wp.max(pos[0], pred_pos[0]), wp.max(pos[1], pred_pos[1]), wp.max(pos[2], pred_pos[2]))
m = bounds_margin + sim.sphere_radius[sphere_nr]
sim.sphere_lower_bounds[sphere_nr] = lower - wp.vec3(m, m, m)
sim.sphere_upper_bounds[sphere_nr] = upper + wp.vec3(m, m, m)
@wp.kernel
def dev_handle_sphere_sphere_collisions(
restitution: float,
sim: SimData):
sphere0 = wp.tid()
eps = 0.00001
pos0 = sim.sphere_pos[sphere0]
radius0 = sim.sphere_radius[sphere0]
m0 = sim.sphere_mass[sphere0]
w0 = 1.0 / (m0 + eps)
vel0 = sim.lin_vel[sphere0]
ang0 = sim.ang_vel[sphere0]
lower = sim.sphere_lower_bounds[sphere0]
upper = sim.sphere_upper_bounds[sphere0]
query = wp.bvh_query_aabb(sim.spheres_bvh_id, lower, upper)
sphere1 = int(0)
while (wp.bvh_query_next(query, sphere1)):
if sphere1 < sphere0: # handle each pair only once!
pos1 = sim.sphere_pos[sphere1]
radius1 = sim.sphere_radius[sphere1]
m1 = sim.sphere_mass[sphere1]
w1 = 1.0 / (m1 + eps)
vel1 = sim.lin_vel[sphere1]
ang1 = sim.ang_vel[sphere1]
min_dist = radius0 + radius1
pos_normal = wp.normalize(pos1 - pos0)
dist = wp.dot(pos_normal, pos1 - pos0)
if dist < min_dist:
# bounce
wp.atomic_add(sim.sphere_num_corr, sphere0, 1)
wp.atomic_add(sim.sphere_num_corr, sphere1, 1)
pos_corr = pos_normal / (w0 + w1) * (min_dist - dist + eps)
wp.atomic_add(sim.pos_corr, sphere0, -w0 * pos_corr)
wp.atomic_add(sim.pos_corr, sphere1, +w1 * pos_corr)
vn0 = wp.dot(vel0, pos_normal)
vn1 = wp.dot(vel1, pos_normal)
new_vn0 = (m0 * vn0 + m1 * vn1 - m1 * (vn0 - vn1) * restitution) / (m0 + m1)
new_vn1 = (m0 * vn0 + m1 * vn1 - m0 * (vn1 - vn0) * restitution) / (m0 + m1)
new_vn0 = wp.min(0.0, new_vn0)
new_vn1 = wp.max(0.0, new_vn1)
lin_corr0 = pos_normal * (new_vn0 - vn0)
lin_corr1 = pos_normal * (new_vn1 - vn1)
wp.atomic_add(sim.sphere_lin_corr, sphere0, lin_corr0)
wp.atomic_add(sim.sphere_lin_corr, sphere1, lin_corr1)
vel0 = vel0 + lin_corr0
vel1 = vel1 + lin_corr1
# friction
ang_normal = wp.normalize(ang0 * m0 + ang1 * m1)
ang_normal = wp.nomralize(ang_normal - pos_normal * wp.dot(pos_normal, ang_normal))
vt0 = wp.dot(vel0, wp.cross(ang_normal, pos_normal))
vt1 = wp.dot(vel1, wp.cross(ang_normal, pos_normal))
omega0 = wp.dot(ang0, ang_normal)
omega1 = wp.dot(ang1, ang_normal)
# v0 + (o0 - do*w0) * r0 = v1 + (o1 + do*w1) * r1
domega = (vt1 + omega1 * radius1 - vt0 - omega0 * radius0) / (radius0 * w0 + radius1 * w1)
ang_corr0 = ang_normal * (omega0 - domega * w0) - ang0
ang_corr1 = ang_normal * (omega1 + domega * w1) - ang1
ang0 = ang0 + ang_corr0
ang1 = ang1 + ang_corr1
wp.atomic_add(sim.sphere_ang_corr, sphere0, ang_corr0)
wp.atomic_add(sim.sphere_ang_corr, sphere1, ang_corr1)
@wp.kernel
def dev_update_obj_pos(sim: SimData):
id = wp.tid()
trans_nr = sim.pos_transform_nr[id]
pos = sim.obj_transforms[trans_nr] * sim.orig_pos[id]
sim.prev_pos[id] = sim.pos[id]
sim.pos[id] = pos
@wp.kernel
def dev_handle_sphere_obj_collisions(
dt: float,
restitution: float,
sim: SimData):
sphere_nr = wp.tid()
pos = sim.sphere_pos[sphere_nr]
radius = sim.sphere_radius[sphere_nr]
vel = sim.lin_vel[sphere_nr]
ang = sim.ang_vel[sphere_nr]
inside = float(0.0)
face_nr = int(0)
u = float(0.0)
v = float(0.0)
found = wp.mesh_query_point(sim.obj_mesh_id, pos, radius, inside, face_nr, u, v)
if not found:
return
id0 = sim.obj_tri_ids[3 * face_nr]
id1 = sim.obj_tri_ids[3 * face_nr + 1]
id2 = sim.obj_tri_ids[3 * face_nr + 2]
p0 = sim.obj_pos[id0]
p1 = sim.obj_pos[id1]
p2 = sim.obj_pos[id2]
closest = u * p0 + v * p1 + (1.0 - u - v) * p2
pos_normal = wp.normalize(pos - closest)
dist = wp.dot(pos_normal, pos - closest)
if dist >= radius:
return
sim.sphere_pos[sphere_nr] = pos - pos_normal * (radius - dist)
v0 = (p0 - sim.mesh_prev_points[id0]) / dt
v1 = (p1 - sim.mesh_prev_points[id1]) / dt
v2 = (p2 - sim.mesh_prev_points[id2]) / dt
v_mesh = v0 + u * (v1 - v0) + v * (v2 - v0)
v_mesh = u * v0 + v * v1 + (1.0 - u - v) * v2
vn_sphere = wp.dot(sim.sphere_lin_vel[sphere_nr], pos_normal)
vn_mesh = wp.dot(v_mesh, pos_normal)
new_vn = wp.min(vn_mesh - (vn_sphere - vn_mesh) * restitution, 0.0)
sim.sphere_lin_vel[sphere_nr] = v + pos_normal * (new_vn - vn_sphere)
# friction
ang_normal = wp.normalize(ang)
ang_normal = wp.nomralize(ang - pos_normal * wp.dot(pos_normal, ang_normal))
vt = wp.dot(vel, wp.cross(ang_normal, pos_normal))
omega = wp.dot(ang, ang_normal)
# vel + (omega + do) * r = v_mesh
domega = (vt + omega * radius - v_mesh) / radius
ang = ang + ang_normal * (omega - domega)
sim.sphere_ang_vel[sphere_nr] = ang
@wp.kernel
def dev_apply_corrections(
sim: SimData):
sphere_nr = wp.tid()
num = sim.sphere_num_corr[sphere_nr]
if num > 0:
s = 1.0 / float(num)
sim.sphere_pos[sphere_nr] += sim.sphere_pos_corr[sphere_nr] * s
sim.sphere_lin_vel[sphere_nr] += sim.sphere_lin_corr[sphere_nr] * s
sim.sphere_ang_vel[sphere_nr] += sim.sphere_ang_corr[sphere_nr] * s
class Sim():
def __init__(self, stage):
self.paused = True
self.stage = stage
self.device = 'cuda'
self.prim_cache = UsdGeom.XformCache()
self.dev_sim_data = SimData()
self.host_sim_data = SimData()
self.spheres_bvh = None
self.obj_mesh = None
self.sphere_usd_meshes = []
self.obj_usd_prims = []
self.obj_usd_transforms = []
self.initialized = False
self.time_step = 1.0 / 30.0
self.num_substeps = 5
self.restitution = 0.1
self.jacobi_scale = 0.25
self.num_spheres = 0
self.frame_nr = 0
def init(self):
if not self.stage:
return
obj_pos = []
obj_pos_transform_nr = []
obj_tri_ids = []
sphere_pos = []
sphere_radius = []
sphere_inv_mass = []
self.sphere_usd_meshes = []
self.sphere_usd_transforms = []
s = 4.0 / 3.0 * 3.141592
print("traversing stage")
for prim in self.stage.Traverse():
if prim.GetTypeName() == "Mesh":
mesh = UsdGeom.Mesh(prim)
name = mesh.GetName()
points = mesh.GetPointsAttr().Get(0.0)
if name.find("sphere") != 0 or name.find("Sphere") != 0:
# create a sphere
trans_mat, trans_t = get_global_transform(prim, 0.0, False)
trans_points = points @ trans_mat
min = np.min(trans_points, axis = 0)
max = np.max(trans_points, axis = 0)
radius = np.max(max - min) * 0.5
sphere_radius.append(radius)
sphere_pos.append(trans_t)
mass = s * radius * radius * radius
sphere_inv_mass.append(1.0 / mass)
clone = clone_prim(self.stage, prim)
self.sphere_usd_meshes.append(UsdGeom.Mesh(clone))
self.sphere_usd_transforms.append(clone.Get)
else:
obj_nr = len(self.obj_usd_prims)
self.object_usd_prims.append(prim)
# create obstacle points
first_pos = len(obj_pos)
for i in range(len(mesh_points)):
p = mesh_points[i]
obj_pos.append(wp.vec3(*p))
obj_pos_transform_nr.append(obj_nr)
# create obstacle triangles
mesh_poly_indices = mesh.GetFaceVertexIndicesAttr().Get(0.0)
mesh_face_sizes = mesh.GetFaceVertexCountsAttr().Get(0.0)
mesh_points = np.array(points)
first_index = 0
for i in range(len(mesh_face_sizes)):
face_size = mesh_face_sizes[i]
for j in range(1, face_size-1):
obj_tri_ids.append(first_pos + mesh_poly_indices[first_index])
obj_tri_ids.append(first_pos + mesh_poly_indices[first_index + j])
obj_tri_ids.append(first_pos + mesh_poly_indices[first_index + j + 1])
first_index += face_size
# create objects warp buffers
if len(obj_pos) > 0:
self.dev_sim_data.obj_pos = wp.array(obj_pos, dtype=wp.vec3, device=self.device)
self.dev_sim_data.pbj_prev_pos = wp.array(obj_pos, dtype=wp.vec3, device=self.device)
self.dev_sim_data.obj_tri_ids = wp.array(obj_tri_ids, dtype=int, device=self.device)
self.obj_mesh = wp.Mesh(self.dev_sim_data.obj_pos, self.dev_sim_data.obj_tri_ids)
self.dev_sim_data.obj_mesh_id = self.obj_mesh.id
num_objs = len(self.object_usd_prims)
mat = wp.mat44()
self.obj_transforms = np.array([mat] * num_objs)
self.dev_sim_data.obj_transforms = wp.zeros(shape=(num_objs), dtype=wp.mat44, device=self.device)
# create sphere warp buffers
self.num_spheres = len(sphere_pos)
if self.num_spheres > 0:
self.dev_sim_data.sphere_radius = wp.array(sphere_radius, dtype=float, device=self.device)
self.dev_sim_data.sphere_pos = wp.array(sphere_pos, dtype=wp.vec3, device=self.device)
self.dev_sim_data.sphere_quat = wp.zeros(shape=(self.num_spheres), dtype=wp.quat, device=self.device)
self.dev_sim_data.sphere_vel = wp.zeros(shape=(self.num_spheres), dtype=wp.vec3, device=self.device)
self.dev_sim_data.sphere_omega = wp.zeros(shape=(self.num_spheres), dtype=wp.vec3, device=self.device)
self.dev_sim_data.sphere_lower_bounds = wp.zeros(shape=(self.num_spheres), dtype=wp.vec3, device=self.device)
self.dev_sim_data.sphere_upper_bounds = wp.zeros(shape=(self.num_spheres), dtype=wp.vec3, device=self.device)
self.host_sim_data.sphere_pos = wp.array(sphere_pos, dtype=wp.vec3, device="cpu")
self.host_sim_data.sphere_quat = wp.zeros(shape=(self.num_spheres), dtype=wp.quat, device="cpu")
# zero time step to initialize sphere bounds
wp.launch(kernel = self.dev_integrate,
inputs = [0.0, wp.vec3(0.0, 0.0, 0.0), self.dev_sim_data],
dim = self.num_spheres, device=self.device)
self.sphere_bvh = wp.Bvh(self.dev_sim_data.sphere_lower_bounds, self.dev_sim_data.sphere_upper_bounds)
self.dev_sim_data.sphere_bvh_id = self.sphere_bvh.id
def simulate(self):
if self.paused:
return
self.frame_nr += 1
print("simulating", self.frame_nr)
return
# update objects
for i in range(len(self.object_usd_prims)):
self.obj_transforms[i] = get_global_transform(self.object_usd_prims[i], 0.0, True)
wp.copy(self.dev_sim_data.obj_transforms, wp.array(self.obj_transforms, dtype=wp.array(wp.mat44), copy=False, device="cpu"))
wp.launch(kernel = dev_update_obj_pos,
inputs = [self.dev_sim_data],
dim = len(self.dev_sim_data.obj_pos), device=self.device)
self.obj_mesh.refit()
#simulate spheres
wp.launch(kernel = dev_integrate,
inputs = [self.time_step, self.gravity, self.dev_sim_data],
dim = self.num_spheres, device=self.device)
self.sphere_bvh.refit()
self.dev_sim_data.sphere_pos_corr.zero_()
self.dev_sim_data.sphere_lin_corr.zero_()
self.dev_sim_data.sphere_ang_corr.zero_()
self.dev_sim_data.sphere_num_corr.zero_()
wp.launch(kernel = dev_handle_sphere_sphere_collisions,
inputs = [self.restitution, self.dev_sim_data],
dim = self.num_spheres, device=self.device)
wp.launch(kernel = dev_apply_corrections,
inputs = [self.dev_sim_data],
dim = self.num_spheres, device=self.device)
wp.launch(kernel = dev_handle_sphere_obj_collisions,
inputs = [self.time_step, self.restitution, self.dev_sim_data],
dim = self.num_spheres, device=self.device)
# update stage
wp.copy(self.host_sim_data.sphere_pos, self.dev_sim_data.sphere_pos)
wp.copy(self.host_sim_data.sphere_quat, self.dev_sim_data.sphere_quat)
pos = self.host_sim_data.numpy()
quat = self.host_sim_data.numpy()
for i in range(self.num_spheres):
set_transform(self.sphere_usd_meshes, pos[i], quat[i])
def reset(self):
hide_clones(self.stage)
self.paused = True
| 16,580 | Python | 34.734914 | 462 | 0.5769 |
matthias-research/omni.fun/exts/omni.fun/omni/fun/scripts/extension.py | # Copyright 2022 Matthias Müller - Ten Minute Physics,
# https://www.youtube.com/c/TenMinutePhysics
# www.matthiasMueller.info/tenMinutePhysics
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import omni.ext
import os
import omni.usd
from omni import ui
from pxr import Usd
from .controls import ControlsWindow
from .sim import Sim
EXAMPLES_PATH = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../../data/scenes"))
class OmniFunExtension(omni.ext.IExt):
def on_startup(self, ext_id):
print("fun on_startup")
setattr(self, "controls", None)
setattr(self, "sim", None)
stage = omni.usd.get_context().get_stage()
self.sim = Sim(stage)
self.sim.init()
editor_menu = omni.kit.ui.get_editor_menu()
self.menu_items = []
if editor_menu:
self.controls_menu = editor_menu.add_item(
f"Window/Fun/Controls",
lambda _, value: self.show_controls(value),
toggle=True, value=False
)
self.menu_items.append(editor_menu.add_item(
f"Window/Fun/SimpleScene",
lambda _, value: self.load_example("simple.usd"),
toggle=False, value=False
))
# self.show_controls(True)
# set callbacks
self.update_event_stream = omni.kit.app.get_app_interface().get_update_event_stream()
self.stage_event_sub = omni.usd.get_context().get_stage_event_stream().create_subscription_to_pop(self.on_event)
def on_shutdown(self):
print("fun on_shutdown")
self.menu_items = None
self.update_event_stream = None
self.stage_event_sub = None
if self.sim:
self.sim.reset()
self.show_controls(False)
def init_callback(self, state):
if state:
stage = omni.usd.get_context().get_stage()
if self.sim:
self.sim = Sim(stage)
self.update_event_sub = self.update_event_stream.create_subscription_to_pop(self.on_update)
else:
if self.sim:
self.sim.reset()
self.sim = None
def play_callback(self, state):
if self.sim:
self.sim.paused = not state
def on_update(self, dt):
if self.sim:
self.sim.simulate()
def set_controls_menu(self, visible):
omni.kit.ui.get_editor_menu().set_value(f"Window/Fun/Controls", visible)
def show_controls(self, is_visible):
if is_visible:
if not hasattr(self, "controls"):
setattr(self, "controls", None)
if self.controls is None:
self.controls = ControlsWindow(
init_callback=self.init_callback,
play_callback=self.play_callback)
self.controls.create_window(lambda visible: self.set_controls_menu(visible))
self.controls.show_window()
else:
self.controls.show_window()
elif self.controls:
self.controls.destroy_window()
self.controls = None
def on_event(self, event):
if event.type == int(omni.usd.StageEventType.CLOSED):
if self.sim:
self.sim.reset()
if event.type == int(omni.usd.StageEventType.OPENED):
if self.sim:
self.sim.init()
def load_example(self, scene_name):
def new_stage():
stage_path = os.path.normpath(os.path.join(EXAMPLES_PATH, scene_name))
omni.usd.get_context().open_stage(stage_path)
if self.sim:
self.sim.init()
omni.kit.window.file.prompt_if_unsaved_stage(new_stage)
| 4,788 | Python | 35.007519 | 462 | 0.618421 |
matthias-research/omni.fun/exts/omni.fun/omni/fun/scripts/gpu.py | # Copyright 2022 Matthias Müller - Ten Minute Physics,
# https://www.youtube.com/c/TenMinutePhysics
# www.matthiasMueller.info/tenMinutePhysics
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import numpy as np
import warp as wp
@wp.struct
class SimData:
spheres_pos: wp.array(dtype=wp.vec3)
spheres_prev_pos: wp.array(dtype=wp.vec3)
spheres_pos_corr: wp.array(dtype=wp.vec3)
spheres_vel: wp.array(dtype=wp.vec3)
spheres_radius: wp.array(dtype=float)
spheres_inv_mass: wp.array(dtype=float)
mesh_id: wp.uint64
mesh_verts: wp.array(dtype=wp.vec3)
mesh_tri_ids: wp.array(dtype=int)
@wp.func
def closest_point_on_triangle(
p: wp.vec3, p0: wp.vec3, p1: wp.vec3, p2: wp.vec3):
e0 = p1 - p0
e1 = p2 - p0
tmp = p0 - p
a = wp.dot(e0, e0)
b = wp.dot(e0, e1)
c = wp.dot(e1, e1)
d = wp.dot(e0, tmp)
e = wp.dot(e1, tmp)
coords = wp.vec3(b*e - c*d, b*d - a*e, a*c - b*b)
x = 0.0
y = 0.0
z = 0.0
if coords[0] <= 0.0:
if c != 0.0:
y = -e / c
elif coords[1] <= 0.0:
if a != 0.0:
x = -d / a
elif coords[0] + coords[1] > coords[2]:
den = a + c - b - b
num = c + e - b - d
if den != 0.0:
x = num / den
y = 1.0 - x
else:
if coords[2] != 0.0:
x = coords[0] / coords[2]
y = coords[1] / coords[2]
x = wp.clamp(x, 0.0, 1.0)
y = wp.clamp(y, 0.0, 1.0)
bary = wp.vec3(1.0 - x - y, x, y)
return bary
@wp.kernel
def dev_integrate_spheres(
dt: float,
gravity: wp.vec3,
data: SimData):
sphere_nr = wp.tid()
w = data.spheres_inv_mass[sphere_nr]
if w > 0.0:
data.spheres_vel[sphere_nr] += gravity * dt
data.spheres_prev_pos[sphere_nr] = data.spheres_pos[sphere_nr]
data.spheres_pos[sphere_nr] += data.spheres_vel[sphere_nr] * dt
def integrate_spheres(num_spheres: int, dt: float, gravity: wp.vec3, data: SimData, device):
wp.launch(kernel = dev_integrate_spheres,
inputs = [dt, gravity, data], dim=num_spheres, device=device)
@wp.kernel
def dev_update_spheres(
dt: float,
jacobi_scale: float,
data: SimData):
sphere_nr = wp.tid()
w = data.spheres_inv_mass[sphere_nr]
if w > 0.0:
data.spheres_pos[sphere_nr] = data.spheres_pos[sphere_nr] + jacobi_scale * data.spheres_pos_corr
data.spheres_vel[sphere_nr] = (data.spheres_pos[sphere_nr] - data.spheres_prev_pos[sphere_nr]) / dt
def update_spheres(num_spheres: int, dt: float, jacobi_scale: float, data: SimData, device):
wp.launch(kernel = dev_update_spheres,
inputs = [dt, jacobi_scale, data], dim=num_spheres, device=device)
@wp.kernel
def dev_solve_mesh_collisions(
data: SimData):
sphere_nr = wp.tid()
w = data.spheres_inv_mass[sphere_nr]
if w > 0.0:
pos = data.spheres_pos[sphere_nr]
r = data.spheres_radius[sphere_nr]
# query bounding volume hierarchy
bounds_lower = pos - wp.vec3(r, r, r)
bounds_upper = pos + wp.vec3(r, r, r)
query = wp.mesh_query_aabb(data.mesh_id, bounds_lower, bounds_upper)
tri_nr = int(0)
while (wp.mesh_query_aabb_next(query, tri_nr)):
p0 = data.mesh_verts[data.mesh_tri_ids[3*tri_nr]]
p1 = data.mesh_verts[data.mesh_tri_ids[3*tri_nr + 1]]
p2 = data.mesh_verts[data.mesh_tri_ids[3*tri_nr + 2]]
hit = closest_point_on_triangle(pos, p0, p1, p2)
n = pos - hit
d = wp.length(n)
if d < r:
n = wp.normalize(n)
data.spheres_pos[sphere_nr] = data.spheres_pos[sphere_nr] + n * (r - d)
def solve_mesh_collisions(num_spheres: int, data: SimData, device):
wp.launch(kernel = dev_solve_mesh_collisions,
inputs = [data], dim=num_spheres, device=device)
@wp.kernel
def dev_solve_sphere_collisions(
num_spheres: int,
data: SimData):
i0 = wp.tid()
p0 = data.spheres_pos[i0]
r0 = data.spheres_radius[i0]
w0 = data.spheres_inv_mass[i0]
# simpe O(n^2) collision detection
for i1 in range(num_spheres):
if i1 > i0:
p1 = data.spheres_pos[i1]
r1 = data.spheres_radius[i1]
w1 = data.spheres_inv_mass[i1]
w = w0 + w1
if w > 0.0:
n = p1 - p0
d = wp.length(n)
n = wp.noramlize(n)
if d < r0 + r1:
corr = n * (r0 + r1 - d) / w
data.spheres_corr[i0] = data.spheres_corr[i0] - corr * w0
data.spheres_corr[i1] = data.spheres_corr[i1] - corr * w0
def solve_sphere_collisions(num_spheres: int, data: SimData, device):
wp.launch(kernel = dev_solve_sphere_collisions,
inputs = [num_spheres, data], dim=num_spheres, device=device)
| 6,034 | Python | 32.342541 | 462 | 0.586841 |
matthias-research/omni.fun/exts/omni.fun/omni/fun/scripts/controls.py | import carb
import omni.ui
import omni.usd
import omni.kit.app
from pxr import Usd, Sdf
from .sim import gravity
class ControlsWindow:
def __init__(self, init_callback=None, play_callback=None):
self._window = None
self.buttons = [
[None, init_callback, False, "Init", "Reset"],
[None, play_callback, False, "Play", "Pause"]]
def __bool__(self):
return self._window is not None
def create_window(self, visibility_changed_fn):
window_flags = omni.ui.WINDOW_FLAGS_NO_SCROLLBAR
self._window = omni.ui.Window("Fun Controls", flags=window_flags, width=400, height=400, dockPreference=omni.ui.DockPreference.RIGHT_TOP)
self._window.set_visibility_changed_fn(visibility_changed_fn)
self.rebuild_ui()
def show_window(self):
self._window.visible = True
def hide_window(self):
self._window.visible = False
def destroy_window(self):
if self._window:
self._window.visible = False
self._window.destroy()
self._window = None
def button_pressed(self, button):
state = not button[2]
button[2] = state
button[0].text = button[4] if state else button[3]
button[1](state)
def set_parameter(self, param_name, val):
if param_name == "gravity":
gravity = val
def rebuild_ui(self):
ui = omni.ui
row_height = 20
v_spacing = 10
h_spacing = 20
if self._window and self._window.visible:
with self._window.frame:
with ui.VStack(spacing=v_spacing, padding=50):
with ui.HStack(spacing=h_spacing, height=row_height):
for button in self.buttons:
button[0] = ui.Button(
button[3], width=100, height=15, margin=10,
clicked_fn=lambda button=button: self.button_pressed(button))
with ui.HStack(spacing=h_spacing, height=row_height):
ui.Label("Gravity", width=ui.Percent(50), height=10, name="Gravity")
slider = ui.FloatSlider(min=0.0,max=10.0, width=ui.Percent(50))
slider.model.add_value_changed_fn(
lambda val, param_name="gravity": self.set_parameter(param_name, val.get_value_as_float()))
| 2,487 | Python | 29.341463 | 145 | 0.554483 |
matthias-research/omni.fun/exts/omni.fun/omni/fun/scripts/usdutils.py | from pxr import Usd, UsdGeom, Gf, UsdShade
import numpy as np
import warp as wp
prim_cache = None
def get_global_transform(prim, time, return_mat44):
if prim_cache is None:
prim_cache = UsdGeom.XformCache()
prim_cache.SetTime(time)
m = prim_cache.GetLocalToWorldTransform(prim)
if return_mat44:
return wp.mat44(
m[0][0], m[1][0], m[2][0], m[3][0],
m[0][1], m[1][1], m[2][1], m[3][1],
m[0][2], m[1][2], m[2][2], m[3][2],
m[0][3], m[1][3], m[2][3], m[3][3])
else:
A = np.array([[m[0][0], m[0][1], m[0][2]], [m[1][0], m[1][1], m[1][2]], [m[2][0], m[2][1], m[2][2]]])
b = np.array([m[3][0], m[3][1], m[3][2]])
return A, b
def set_transform(mesh, trans, quat):
usd_mat = Gf.Matrix4d()
usd_mat.SetRotateOnly(Gf.Quatd(*quat))
usd_mat.SetTranslateOnly(Gf.Vec3d(*trans))
xform = UsdGeom.Xform(mesh)
xform.GetOrderedXformOps()[0].Set(usd_mat)
def clone_primvar(self, prim, prim_clone, name, time=0.0):
try:
attr = UsdGeom.Primvar(prim.GetAttribute(name))
prim_clone.CreatePrimvar(name, attr.GetTypeName(), attr.GetInterpolation()).Set(attr.Get(time))
except:
pass
def clone_prim(stage, prim):
vis = prim.GetAttribute("visibility")
if vis:
vis.Set("invisible")
mesh = UsdGeom.Mesh(prim)
clone_prim_path = '/' + str(prim.GetPath()).replace("/", "_") + '_clone'
UsdGeom.Mesh.Define(stage, clone_prim_path)
prim_clone = UsdGeom.Mesh(stage.GetPrimAtPath(clone_prim_path))
mesh_clone = UsdGeom.Mesh(prim_clone)
stage.GetPrimAtPath(clone_prim_path).SetActive(True)
xform = UsdGeom.Xform(mesh_clone)
xform.ClearXformOpOrder()
xform.AddXformOp(UsdGeom.XformOp.TypeTransform)
trans_mat, trans_t = get_global_transform(prim, 0.0, True)
trans_points = mesh.GetPointsAttr().Get(0.0) @ trans_mat + trans_t
normal_mat = np.array([\
trans_mat[0,:] / np.linalg.norm(trans_mat[0,:]), \
trans_mat[1,:] / np.linalg.norm(trans_mat[1,:]), \
trans_mat[2,:] / np.linalg.norm(trans_mat[2,:])])
trans_normals = mesh.GetNormalsAttr().Get(0.0) @ normal_mat
mesh_clone.GetPointsAttr().Set(trans_points)
mesh_clone.GetNormalsAttr().Set(trans_normals)
mesh_clone.SetNormalsInterpolation(mesh.GetNormalsInterpolation())
mesh_clone.GetFaceVertexIndicesAttr().Set(mesh.GetFaceVertexIndicesAttr().Get(0.0))
mesh_clone.GetFaceVertexCountsAttr().Set(mesh.GetFaceVertexCountsAttr().Get(0.0))
mesh_clone.GetCornerIndicesAttr().Set(mesh.GetCornerIndicesAttr().Get(0.0))
mesh_clone.GetCornerSharpnessesAttr().Set(mesh.GetCornerSharpnessesAttr().Get(0.0))
mesh_clone.GetCreaseIndicesAttr().Set(mesh.GetCreaseIndicesAttr().Get(0.0))
mesh_clone.GetCreaseLengthsAttr().Set(mesh.GetCreaseLengthsAttr().Get(0.0))
mesh_clone.GetCreaseSharpnessesAttr().Set(mesh.GetCreaseSharpnessesAttr().Get(0.0))
mesh_clone.GetSubdivisionSchemeAttr().Set(mesh.GetSubdivisionSchemeAttr().Get(0.0))
mesh_clone.GetInterpolateBoundaryAttr().Set(mesh.GetInterpolateBoundaryAttr().Get(0.0))
mesh_clone.GetFaceVaryingLinearInterpolationAttr().Set(mesh.GetFaceVaryingLinearInterpolationAttr().Get(0.0))
mesh_clone.GetTriangleSubdivisionRuleAttr().Set(mesh.GetTriangleSubdivisionRuleAttr().Get(0.0))
mesh_clone.GetHoleIndicesAttr().Set(mesh.GetHoleIndicesAttr().Get(0.0))
for attr in prim.GetAttributes():
type = str(attr.GetTypeName())
if type.find("texCoord") >= 0:
clone_primvar(prim, prim_clone, attr.GetName())
try:
mat = UsdShade.MaterialBindingAPI(prim).GetDirectBinding().GetMaterial()
UsdShade.MaterialBindingAPI(prim_clone).Bind(mat)
except:
pass
return prim_clone
def hide_clones(stage):
if stage is None:
return
for prim in stage.Traverse():
if str(prim.GetName()).find("_clone") >= 0:
prim.SetActive(False)
else:
vis = prim.GetAttribute("visibility")
if vis:
vis.Set("inherited")
| 4,122 | Python | 34.543103 | 113 | 0.643862 |
matthias-research/omni.fun/exts/omni.fun/docs/CHANGELOG.md | # CHANGELOG
## [0.1.0] - 2022-08-15
- Initial publish for alpha testing
| 77 | Markdown | 7.666666 | 35 | 0.636364 |
matthias-research/omni.fun/exts/omni.fun/docs/README.md | # Play [omni.ten]
A simple plugin from ten minute physics.
## Documentation
None
## Source Code
None
| 109 | Markdown | 6.333333 | 40 | 0.688073 |
qcr/benchbot_sim_omni/pip_package_fix.py | import subprocess
import sys
print("HACK FIX FOR BROKEN PACKAGES")
def install(package):
subprocess.check_call([sys.executable, "-m", "pip", "install", package])
def uninstall(package):
subprocess.check_call([sys.executable, "-m", "pip", "uninstall", "--yes", package])
uninstall("click")
install("click")
uninstall("typing-extensions")
install("typing-extensions")
| 375 | Python | 27.923075 | 87 | 0.717333 |
qcr/benchbot_sim_omni/run.py | import flask
import numpy as np
import os
import signal
from builtins import print as bprint
from gevent import event, pywsgi, signal
from pathlib import Path
from spatialmath import SE3, UnitQuaternion
print("STARTING RUN.PY IN BENCHBOT_SIM_OMNI")
DEFAULT_POSE = np.array([1, 0, 0, 0, 0, 0, 0])
DIRTY_EPSILON_DIST = 1
DIRTY_EPSILON_YAW = 2
DIRTY_FILE = '/tmp/benchbot_dirty'
MAP_PRIM_PATH = '/env'
ROBOT_NAME = 'robot'
ROBOT_PRIM_PATH = '/%s' % ROBOT_NAME
ROBOT_COMPONENTS = {
'clock': '/ROS_Clock',
'diff_base': '%s/ROS_DifferentialBase' % ROBOT_PRIM_PATH,
'lidar': '%s/ROS_Lidar' % ROBOT_PRIM_PATH,
'rgbd': '%s/ROS_Camera_Stereo_Left' % ROBOT_PRIM_PATH,
'tf_sensors': '%s/ROS_Carter_Sensors_Broadcaster' % ROBOT_PRIM_PATH,
'tf': '%s/ROS_Carter_Broadcaster' % ROBOT_PRIM_PATH
}
UPDATE_DELAY_SECS = 3.0
def _dc_tf_to_SE3(tf):
r = np.array(tf.r)
return SE3(np.array(tf.p)) * UnitQuaternion(r[3], r[0:3]).SE3()
def _to_SE3(pose):
return SE3(pose[4::]) * UnitQuaternion(pose[0], pose[1:4]).SE3()
def disable_component(prop_path):
from omni.kit.commands import execute
from pxr import Sdf
print("DISABLING '%s.enabled'" % prop_path)
execute("ChangeProperty",
prop_path=Sdf.Path("%s.enabled" % prop_path),
value=False,
prev=None)
def print(*args, **kwargs):
bprint(*args, **kwargs, flush=True)
class SimulatorDaemon:
def __init__(self, port):
self.address = 'localhost:%s' % port
self.inst = None
self.sim = None
self.sim_i = 0
self.sim_collided = False
self.sim_dirty = False
self.map_usd = None
self.robot_usd = None
self.start_pose = None
self._map_usd = None
self._robot_usd = None
self._start_pose = None
self._dc = None
self._robot = None
self._robot_dc = None
def check_dirty(self):
delta = (_to_SE3(self.start_pose).inv() *
_dc_tf_to_SE3(self._dc.get_rigid_body_pose(self._robot_dc)))
return (np.linalg.norm(delta.t[0:2]) > DIRTY_EPSILON_DIST or
np.abs(delta.rpy(unit='deg')[2]) > DIRTY_EPSILON_YAW)
def check_collided(self):
return False
def open_usd(self):
# Bail early if we can't act
if self.inst is None:
print("No simulator running. "
"Stored environment USD, but not opening.")
return
if self.map_usd is None:
print("No environment USD selected. Returning.")
return
# Imports must go after bail early checks pass as they throw errors
# when called in an "inappropriate state" (no idea what that
# corresponds to...)
from omni.isaac.core.utils.stage import open_stage, update_stage
# Stop simulation if running
self.stop_simulation()
# Update the map
if self.map_usd != self._map_usd:
self._dc = None
self._start_pose = None
self._robot = None
self._robot_dc = None
self._robot_usd = None
open_stage(usd_path=self.map_usd)
update_stage()
self._map_usd = self.map_usd
else:
print("Skipping map load; already loaded.")
# Attempt to replace the robot
self.place_robot()
def place_robot(self):
# Bail early if we can't act
if self.inst is None:
print("No simulator running. "
"Stored robot USD & pose, but not opening.")
return
if self.robot_usd is None:
print("No robot USD selected. Returning.")
return
# Imports must go after bail early checks pass as they throw errors
# when called in an "inappropriate state" (no idea what that
# corresponds to...)
from omni.isaac.core.robots import Robot
from omni.isaac.core.utils.stage import (add_reference_to_stage,
update_stage)
# Stop simulation if running
self.stop_simulation()
# Add robot to the environment at the requested pose
p = DEFAULT_POSE if self.start_pose is None else self.start_pose
if self.robot_usd != self._robot_usd:
add_reference_to_stage(usd_path=self.robot_usd,
prim_path=ROBOT_PRIM_PATH)
self._robot = Robot(prim_path=ROBOT_PRIM_PATH, name=ROBOT_NAME)
update_stage()
self._robot_usd = self.robot_usd
else:
print("Skipping robot load; already loaded.")
if (p != self._start_pose).any():
self._robot.set_world_pose(position=p[4::],
orientation=p[:4])
update_stage()
self._start_pose = p
else:
print("Skipping robot move; already at requested pose.")
# Disable auto-publishing of all robot components (we'll manually
# publish at varying frequencies instead)
for p in ROBOT_COMPONENTS.values():
disable_component(p)
# Attempt to start the simulation
self.start_simulation()
def run(self):
f = flask.Flask('benchbot_sim_omni')
@f.route('/', methods=['GET'])
def __hello():
return flask.jsonify("Hello, I am the Omniverse Sim Daemon")
@f.route('/open_environment', methods=['POST'])
def __open_env():
r = flask.request.json
if 'environment' in r:
self.map_usd = r['environment']
self.open_usd()
return flask.jsonify({})
@f.route('/place_robot', methods=['POST'])
def __place_robot():
r = flask.request.json
if 'robot' in r:
self.robot_usd = r['robot']
if 'start_pose' in r:
# Probably should be regexing...
self.start_pose = np.array([
float(x.strip()) for x in r['start_pose'].replace(
'[', '').replace(']', '').split(',')
])
self.place_robot()
return flask.jsonify({})
@f.route('/restart_sim', methods=['POST'])
def __restart_sim():
self.stop_simulation()
self.start_simulation()
return flask.jsonify({})
@f.route('/start', methods=['POST'])
def __start_inst():
self.start_instance()
return flask.jsonify({})
@f.route('/start_sim', methods=['POST'])
def __start_sim():
self.start_simulation()
return flask.jsonify({})
@f.route('/started', methods=['GET'])
def __started():
# TODO note there is a race condition (returns true before a /start
# job finishes)
return flask.jsonify({'started': self.inst is not None})
@f.route('/stop_sim', methods=['POST'])
def __stop_sim():
self.stop_simulation()
return flask.jsonify({})
# Start long-running server
server = pywsgi.WSGIServer(self.address, f)
evt = event.Event()
for s in [signal.SIGINT, signal.SIGQUIT, signal.SIGTERM]:
signal.signal(s, lambda n, frame: evt.set())
server.start()
while not evt.is_set():
evt.wait(0.001)
self.tick_simulator()
# Cleanup
self.stop_instance()
def start_instance(self):
print("STARTING INSTANCE!!")
if not self.inst is None:
print("Instance already running. Please /stop first.")
return
env = {} if self.map_usd is None else {"open_usd": self.map_usd}
from omni.isaac.kit import SimulationApp
# Start the simulator
self.inst = SimulationApp({
"renderer": "RayTracedLighting",
"headless": False,
**env
})
# Import all required modules, and configure application
from omni.isaac.core.utils.extensions import enable_extension
enable_extension("omni.isaac.ros_bridge")
# Attempt to place the robot if we had a map
if env:
self.place_robot()
def start_simulation(self):
if self.sim is not None:
self.stop_simulation()
if self.inst is None or self.map_usd is None or self.robot_usd is None:
print("Can't start simulation. Missing some required state.")
return
from omni.isaac.core import SimulationContext
self.sim_i = 0
self.sim_collided = False
self.sim_dirty = False
self.sim = SimulationContext()
self.sim.play()
from omni.isaac.dynamic_control import _dynamic_control
self._dc = _dynamic_control.acquire_dynamic_control_interface()
self._robot_dc = self._dc.get_articulation_root_body(
self._dc.get_object(ROBOT_PRIM_PATH))
def stop_instance(self):
if self.inst is None:
print("No instance is running to stop.")
return
self.stop_simulation()
self.inst.close()
self.inst = None
def stop_simulation(self):
if self.sim is None:
print("Skipping. No running simulation to stop")
return
if self.inst is None:
print("Skipping. No running simulator found.")
return
self.sim.stop()
self.sim = None # TODO maybe could reuse with more guarding logic?
def tick_simulator(self):
# Tick simulator steps. Does less now than in 2021.2.1 due to new action graph
if self.inst is None:
return
if self.sim is None:
self.inst.update()
return
self.sim.step()
# Tick at 10Hz CHECK DIRTY
if self.sim_i % 6 == 0:
if not self.sim_dirty:
self.sim_dirty = self.check_dirty()
if self.sim_dirty:
Path(DIRTY_FILE).touch()
# Tick at 1Hz CHECK COLLIDED
if self.sim_i % 60 == 0:
self.sim_collided = self.check_collided()
self.sim_i += 1
if __name__ == '__main__':
print("inside run.py __main__")
sd = SimulatorDaemon(port=os.environ.get('PORT'))
sd.run()
| 10,394 | Python | 30.122754 | 86 | 0.554166 |
qcr/benchbot_sim_omni/README.md | **NOTE: this software is part of the BenchBot software stack. For a complete working BenchBot system, please install the BenchBot software stack by following the instructions [here](https://github.com/qcr/benchbot).**
# BenchBot Simulator for Omniverse-powered Isaac Sim
[](http://benchbot.org)
[](https://qcr.github.io)

[](./LICENSE.txt)

The BenchBot Simulator bindings for Omniverse-powered Isaac Sim provide a simple `run` script that makes powerful photorealistic simulations available in ROS, and controllable through a basic HTTP API.
Through a single script, this package provides:
- creation of, and management of, a running [Omniverse-powered Isaac Sim](https://developer.nvidia.com/isaac-sim) instance
- a simple HTTP API for programmatically loading environments, placing robots, and controlling simulations
- ROS topics for common mobile robot topics: transforms, odometry, command velocity, RGB images, depth images, laser scans
The configuration is currently Carter specific, but could easily be extended in the future to target other robots. Also all simulator interactions come from a simple Python script that could be used as a starting point for more complex projects.
## Installation
**Please see the note at the top of the page; the BenchBot ecosystem contains much more than just these bindings**
There is no physical installation step for these bindings, simply install Isaac Sim, clone this repository, and install Python dependencies:
1. Follow the instructions on the [NVIDIA Isaac Sim documentation site](https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/overview.html) for [installing Isaac Sim](https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/install_basic.html)
2. Clone this repository:
```
git clone https://github.com/qcr/benchbot_sim_omni
```
3. Install declared Python dependencies:
```
pip install -r ./.custom_deps
```
## Running and using the simulator bindings
Simulator bindings are run through the `run` script, which will start a blank instance of the simulator with the HTTP API bound on port 10001 by default:
```
./run
```
A simulation in environment `my_env.usd`, with robot `my_robot.usd` at position `(0,0,0)` and quaternion (w,x,y,z) `(1,0,0,0)` can then be started by the following two CURL commands:
```
curl localhost:10001/open_environment \
-H "Content-Type: application/json" \
-d '{"environment": "my_env.usd"}'
curl localhost:10001/place_robot \
-H "Content-Type: application/json" \
-d '{"robot": "my_robot.usd", "start_pose": "1,0,0,0,0,0,0"}'
```
Full documentation of configuration options and HTTP API routes is available through the script's `--help` flag:
```
user@pc:~/benchbot_sim_omni/$ ./run --help
run -- BenchBot simulator daemon for Omniverse-powered Isaac Sim
USAGE:
Start the daemon:
run
run -p /path/to/python.sh -P 8080
Print this help information:
run [-h|--help]
OPTION DETAILS:
-h, --help
Show this help menu.
-P,--port
Port the daemon will bind to. Default port of 10001 will
be used if not provided.
-p,--python-sh-path
Path to the 'python.sh' environment script included with your Isaac
Sim installation. Will recursively search for the script in the
current directory if this flag is not provided.
INTERACTING WITH THE DAEMON:
The daemon responds to HTTP requests.
Following routes are supported:
/
Returns a greeting message
/open_environment
Opens a new environment, with USD path specified via 'environment'
data field
/place_robot
Places a robot at a specified pose. Robot USD is specified via
'robot' data field, and start pose via a comma-separated 7-tuple in
the 'pose' field. Format for pose is:
quat_w,quat_x,quat_y,quat_z,pos_x,pos_y,pos_z
/start
Starts a simulator instance (happens by default when first opened)
/stop
Stops a currently running simulator instance if it exists
/restart
Restarts the entire simulator (generally not needed)
FURTHER DETAILS:
Please contact the authors of BenchBot for support or to report bugs:
[email protected]
```
## Using this simulator with the BenchBot Robot Controller
The [BenchBot Robot Controller](https://github.com/qcr/benchbot_robot_controller) is a wrapping ROS / HTTP hybrid script that manages running robots and their required subprocesses. It is ultimately fed configurations from [BenchBot add-ons](https://github.com/qcr/benchbot_addons) through our [BenchBot supervisor](https://github.com/qcr/benchbot_supervisor) service.
These details are superfluous to these BenchBot simulator bindings, but are provided here for context. This context may be helpful if looking for examples of more complex interactions with the simulator bindings. For example, the `carter_sim_omni.yaml` file in the [robots_sim_omni](https://github.com/benchbot-addons/robots_sim_omni) BenchBot add-on may be of interest.
| 5,559 | Markdown | 41.442748 | 370 | 0.729808 |
AndrePatri/OmniRoboGym/pyproject.toml | [build-system]
requires = ["flit_core >=2,<4"]
build-backend = "flit_core.buildapi"
[project]
name = "omni_robo_gym"
version = "0.1.0"
description = ""
authors = [{name = "AndrePatri", email = "[email protected]"}]
readme = "README.md"
license = {file = "LICENSE"} | 276 | TOML | 24.181816 | 73 | 0.666667 |
AndrePatri/OmniRoboGym/omnirobogym_mamba_env.yml | name: omni_robo_gym_isaac2023.1.1
channels:
- defaults
- pytorch
- nvidia
- conda-forge
- omnia
- robostack-staging
- AndrePatri
dependencies:
- python=3.10
- pip
- pytorch == 2.0.1
- torchvision
- torchaudio
- cuda-toolkit=11.7
- compilers
- cmake
- make
- quaternion
- anaconda-client
- yaml-cpp
- pybind11
- gtest
- eigen3
- posix_ipc=1.0.4
- rospkg=1.5.0
- ros-humble-xacro
- empy
- python-devtools
- perf_sleep
- pyqt
- pyqtgraph
- pip:
- flit
- nvidia-cublas-cu11==11.11.3.6
- gym==0.26.2
- gymnasium==0.28.1
- stable_baselines3[extra]==2.0.0a10
- box2d-py
- tensorboard
- tensorboard-plugin-wit
- protobuf
- matplotlib
- scipy
- urdf-parser-py
- multiprocess
| 789 | YAML | 15.122449 | 40 | 0.593156 |
AndrePatri/OmniRoboGym/meta.yaml | package:
name: omni_robo_gym
version: 0.1.0
source:
path: . # Path to the directory containing your built distribution artifacts
requirements:
build:
- python=3.7
- flit
run:
- python=3.7
about:
home: https://github.com/AndrePatri/CoClusterBridge
license: MIT
summary: Some custom implementations of Tasks and Gyms for Omniverse Isaac Sim based on Gymnasium. Easy URDF and SRDF import/cloning and simulation configuration exploiting Omniverse API
extra:
recipe-maintainers:
- AndrePatri
| 537 | YAML | 20.519999 | 189 | 0.722533 |
AndrePatri/OmniRoboGym/README.md | # OmniRoboGym
Wrapper on top of [Omniverse Isaac Sim](https://developer.nvidia.com/isaac-sim), a photo-realistic GPU accelerated simulator from NVIDIA.
The aim of the package is to a easy interface for loading floating-base robots and their configuration from URDF and SRDF into IsaacSim, cloning them with Isaac Sim API and, in general, simplify simulation setup for RL-based robotics applications. | 402 | Markdown | 79.599984 | 248 | 0.80597 |
AndrePatri/OmniRoboGym/LICENSE.md | GNU GENERAL PUBLIC LICENSE
Version 2, June 1991
Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
License is intended to guarantee your freedom to share and change free
software--to make sure the software is free for all its users. This
General Public License applies to most of the Free Software
Foundation's software and to any other program whose authors commit to
using it. (Some other Free Software Foundation software is covered by
the GNU Lesser General Public License instead.) You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
this service if you wish), that you receive source code or can get it
if you want it, that you can change the software or use pieces of it
in new free programs; and that you know you can do these things.
To protect your rights, we need to make restrictions that forbid
anyone to deny you these rights or to ask you to surrender the rights.
These restrictions translate to certain responsibilities for you if you
distribute copies of the software, or if you modify it.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must give the recipients all the rights that
you have. You must make sure that they, too, receive or can get the
source code. And you must show them these terms so they know their
rights.
We protect your rights with two steps: (1) copyright the software, and
(2) offer you this license which gives you legal permission to copy,
distribute and/or modify the software.
Also, for each author's protection and ours, we want to make certain
that everyone understands that there is no warranty for this free
software. If the software is modified by someone else and passed on, we
want its recipients to know that what they have is not the original, so
that any problems introduced by others will not reflect on the original
authors' reputations.
Finally, any free program is threatened constantly by software
patents. We wish to avoid the danger that redistributors of a free
program will individually obtain patent licenses, in effect making the
program proprietary. To prevent this, we have made it clear that any
patent must be licensed for everyone's free use or not licensed at all.
The precise terms and conditions for copying, distribution and
modification follow.
GNU GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License applies to any program or other work which contains
a notice placed by the copyright holder saying it may be distributed
under the terms of this General Public License. The "Program", below,
refers to any such program or work, and a "work based on the Program"
means either the Program or any derivative work under copyright law:
that is to say, a work containing the Program or a portion of it,
either verbatim or with modifications and/or translated into another
language. (Hereinafter, translation is included without limitation in
the term "modification".) Each licensee is addressed as "you".
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of
running the Program is not restricted, and the output from the Program
is covered only if its contents constitute a work based on the
Program (independent of having been made by running the Program).
Whether that is true depends on what the Program does.
1. You may copy and distribute verbatim copies of the Program's
source code as you receive it, in any medium, provided that you
conspicuously and appropriately publish on each copy an appropriate
copyright notice and disclaimer of warranty; keep intact all the
notices that refer to this License and to the absence of any warranty;
and give any other recipients of the Program a copy of this License
along with the Program.
You may charge a fee for the physical act of transferring a copy, and
you may at your option offer warranty protection in exchange for a fee.
2. You may modify your copy or copies of the Program or any portion
of it, thus forming a work based on the Program, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:
a) You must cause the modified files to carry prominent notices
stating that you changed the files and the date of any change.
b) You must cause any work that you distribute or publish, that in
whole or in part contains or is derived from the Program or any
part thereof, to be licensed as a whole at no charge to all third
parties under the terms of this License.
c) If the modified program normally reads commands interactively
when run, you must cause it, when started running for such
interactive use in the most ordinary way, to print or display an
announcement including an appropriate copyright notice and a
notice that there is no warranty (or else, saying that you provide
a warranty) and that users may redistribute the program under
these conditions, and telling the user how to view a copy of this
License. (Exception: if the Program itself is interactive but
does not normally print such an announcement, your work based on
the Program is not required to print an announcement.)
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Program,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works. But when you
distribute the same sections as part of a whole which is a work based
on the Program, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote it.
Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Program.
In addition, mere aggregation of another work not based on the Program
with the Program (or with a work based on the Program) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.
3. You may copy and distribute the Program (or a work based on it,
under Section 2) in object code or executable form under the terms of
Sections 1 and 2 above provided that you also do one of the following:
a) Accompany it with the complete corresponding machine-readable
source code, which must be distributed under the terms of Sections
1 and 2 above on a medium customarily used for software interchange; or,
b) Accompany it with a written offer, valid for at least three
years, to give any third party, for a charge no more than your
cost of physically performing source distribution, a complete
machine-readable copy of the corresponding source code, to be
distributed under the terms of Sections 1 and 2 above on a medium
customarily used for software interchange; or,
c) Accompany it with the information you received as to the offer
to distribute corresponding source code. (This alternative is
allowed only for noncommercial distribution and only if you
received the program in object code or executable form with such
an offer, in accord with Subsection b above.)
The source code for a work means the preferred form of the work for
making modifications to it. For an executable work, complete source
code means all the source code for all modules it contains, plus any
associated interface definition files, plus the scripts used to
control compilation and installation of the executable. However, as a
special exception, the source code distributed need not include
anything that is normally distributed (in either source or binary
form) with the major components (compiler, kernel, and so on) of the
operating system on which the executable runs, unless that component
itself accompanies the executable.
If distribution of executable or object code is made by offering
access to copy from a designated place, then offering equivalent
access to copy the source code from the same place counts as
distribution of the source code, even though third parties are not
compelled to copy the source along with the object code.
4. You may not copy, modify, sublicense, or distribute the Program
except as expressly provided under this License. Any attempt
otherwise to copy, modify, sublicense or distribute the Program is
void, and will automatically terminate your rights under this License.
However, parties who have received copies, or rights, from you under
this License will not have their licenses terminated so long as such
parties remain in full compliance.
5. You are not required to accept this License, since you have not
signed it. However, nothing else grants you permission to modify or
distribute the Program or its derivative works. These actions are
prohibited by law if you do not accept this License. Therefore, by
modifying or distributing the Program (or any work based on the
Program), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Program or works based on it.
6. Each time you redistribute the Program (or any work based on the
Program), the recipient automatically receives a license from the
original licensor to copy, distribute or modify the Program subject to
these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties to
this License.
7. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Program at all. For example, if a patent
license would not permit royalty-free redistribution of the Program by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Program.
If any portion of this section is held invalid or unenforceable under
any particular circumstance, the balance of the section is intended to
apply and the section as a whole is intended to apply in other
circumstances.
It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system, which is
implemented by public license practices. Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
8. If the distribution and/or use of the Program is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Program under this License
may add an explicit geographical distribution limitation excluding
those countries, so that distribution is permitted only in or among
countries not thus excluded. In such case, this License incorporates
the limitation as if written in the body of this License.
9. The Free Software Foundation may publish revised and/or new versions
of the General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the Program
specifies a version number of this License which applies to it and "any
later version", you have the option of following the terms and conditions
either of that version or of any later version published by the Free
Software Foundation. If the Program does not specify a version number of
this License, you may choose any version ever published by the Free Software
Foundation.
10. If you wish to incorporate parts of the Program into other free
programs whose distribution conditions are different, write to the author
to ask for permission. For software which is copyrighted by the Free
Software Foundation, write to the Free Software Foundation; we sometimes
make exceptions for this. Our decision will be guided by the two goals
of preserving the free status of all derivatives of our free software and
of promoting the sharing and reuse of software generally.
NO WARRANTY
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
REPAIR OR CORRECTION.
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
convey the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
Also add information on how to contact you by electronic and paper mail.
If the program is interactive, make it output a short notice like this
when it starts in an interactive mode:
Gnomovision version 69, Copyright (C) year name of author
Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, the commands you use may
be called something other than `show w' and `show c'; they could even be
mouse-clicks or menu items--whatever suits your program.
You should also get your employer (if you work as a programmer) or your
school, if any, to sign a "copyright disclaimer" for the program, if
necessary. Here is a sample; alter the names:
Yoyodyne, Inc., hereby disclaims all copyright interest in the program
`Gnomovision' (which makes passes at compilers) written by James Hacker.
<signature of Ty Coon>, 1 April 1989
Ty Coon, President of Vice
This General Public License does not permit incorporating your program into
proprietary programs. If your program is a subroutine library, you may
consider it more useful to permit linking proprietary applications with the
library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License.
| 18,092 | Markdown | 52.214706 | 77 | 0.785541 |
AndrePatri/OmniRoboGym/omni_robo_gym/envs/isaac_env.py | # Copyright (C) 2023 Andrea Patrizi (AndrePatri, [email protected])
#
# This file is part of OmniRoboGym and distributed under the General Public License version 2 license.
#
# OmniRoboGym is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# OmniRoboGym is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OmniRoboGym. If not, see <http://www.gnu.org/licenses/>.
#
from omni.isaac.kit import SimulationApp
import os
import signal
import carb
import torch
from abc import ABC, abstractmethod
from typing import Union, Tuple, Dict
from SharsorIPCpp.PySharsorIPC import VLevel
from SharsorIPCpp.PySharsorIPC import LogType
from SharsorIPCpp.PySharsorIPC import Journal
import numpy as np
# import gymnasium as gym
# class IsaacSimEnv(gym.Env):
class IsaacSimEnv():
def __init__(
self,
headless: bool,
sim_device: int = 0,
enable_livestream: bool = False,
enable_viewport: bool = False,
debug = False
) -> None:
""" Initializes RL and task parameters.
Args:
headless (bool): Whether to run training headless.
sim_device (int): GPU device ID for running physics simulation. Defaults to 0.
enable_livestream (bool): Whether to enable running with livestream.
enable_viewport (bool): Whether to enable rendering in headless mode.
"""
self.debug = debug
experience = f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.omnirobogym.kit'
# experience = ""
if headless:
info = f"Will run in headless mode."
Journal.log(self.__class__.__name__,
"__init__",
info,
LogType.STAT,
throw_when_excep = True)
if enable_livestream:
experience = ""
elif enable_viewport:
exception = f"Using viewport is not supported yet."
Journal.log(self.__class__.__name__,
"__init__",
exception,
LogType.EXCEP,
throw_when_excep = True)
else:
experience = f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.omnirobogym.headless.kit'
# experience = f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.gym.headless.kit'
self._simulation_app = SimulationApp({"headless": headless,
"physics_gpu": sim_device},
experience=experience)
info = "Using IsaacSim experience file @ " + experience
Journal.log(self.__class__.__name__,
"__init__",
info,
LogType.STAT,
throw_when_excep = True)
# carb.settings.get_settings().set("/persistent/omnihydra/useSceneGraphInstancing", True)
if enable_livestream:
info = "Livestream enabled"
Journal.log(self.__class__.__name__,
"__init__",
info,
LogType.STAT,
throw_when_excep = True)
from omni.isaac.core.utils.extensions import enable_extension
self._simulation_app.set_setting("/app/livestream/enabled", True)
self._simulation_app.set_setting("/app/window/drawMouse", True)
self._simulation_app.set_setting("/app/livestream/proto", "ws")
self._simulation_app.set_setting("/app/livestream/websocket/framerate_limit", 120)
self._simulation_app.set_setting("/ngx/enabled", False)
enable_extension("omni.kit.livestream.native")
enable_extension("omni.services.streaming.manager")
# handle ctrl+c event
signal.signal(signal.SIGINT, self.signal_handler)
self._render = not headless or enable_livestream or enable_viewport
self._record = False
self.step_counter = 0 # step counter
self._world = None
self.metadata = None
self.gpu_pipeline_enabled = False
def signal_handler(self, sig, frame):
self.close()
def set_task(self,
task,
backend="torch",
sim_params=None,
init_sim=True) -> None:
""" Creates a World object and adds Task to World.
Initializes and registers task to the environment interface.
Triggers task start-up.
Args:
task (RLTask): The task to register to the env.
backend (str): Backend to use for task. Can be "numpy" or "torch". Defaults to "numpy".
sim_params (dict): Simulation parameters for physics settings. Defaults to None.
init_sim (Optional[bool]): Automatically starts simulation. Defaults to True.
"""
from omni.isaac.core.world import World
# parse device based on sim_param settings
if sim_params and "sim_device" in sim_params:
device = sim_params["sim_device"]
else:
device = "cpu"
physics_device_id = carb.settings.get_settings().get_as_int("/physics/cudaDevice")
gpu_id = 0 if physics_device_id < 0 else physics_device_id
if sim_params and "use_gpu_pipeline" in sim_params:
# GPU pipeline must use GPU simulation
if sim_params["use_gpu_pipeline"]:
device = "cuda:" + str(gpu_id)
elif sim_params and "use_gpu" in sim_params:
if sim_params["use_gpu"]:
device = "cuda:" + str(gpu_id)
self.gpu_pipeline_enabled = sim_params["use_gpu_pipeline"]
info = "Using device: " + str(device)
Journal.log(self.__class__.__name__,
"__init__",
info,
LogType.STAT,
throw_when_excep = True)
if (sim_params is None):
info = f"No sim params provided -> defaults will be used."
Journal.log(self.__class__.__name__,
"set_task",
info,
LogType.STAT,
throw_when_excep = True)
sim_params = {}
# defaults for integration and rendering dt
if not("physics_dt" in sim_params):
sim_params["physics_dt"] = 1.0/60.0
dt = sim_params["physics_dt"]
info = f"Using default integration_dt of {dt} s."
Journal.log(self.__class__.__name__,
"set_task",
info,
LogType.STAT,
throw_when_excep = True)
if not("rendering_dt" in sim_params):
sim_params["rendering_dt"] = sim_params["physics_dt"]
dt = sim_params["rendering_dt"]
info = f"Using default rendering_dt of {dt} s."
Journal.log(self.__class__.__name__,
"set_task",
info,
LogType.STAT,
throw_when_excep = True)
self._world = World(
stage_units_in_meters=1.0,
physics_dt=sim_params["physics_dt"],
rendering_dt=sim_params["rendering_dt"], # dt between rendering steps. Note: rendering means rendering a frame of
# the current application and not only rendering a frame to the viewports/ cameras.
# So UI elements of Isaac Sim will be refereshed with this dt as well if running non-headless
backend=backend,
device=str(device),
physics_prim_path="/physicsScene",
set_defaults = False, # set to True to use the defaults settings [physics_dt = 1.0/ 60.0,
# stage units in meters = 0.01 (i.e in cms), rendering_dt = 1.0 / 60.0, gravity = -9.81 m / s
# ccd_enabled, stabilization_enabled, gpu dynamics turned off,
# broadcast type is MBP, solver type is TGS]
sim_params=sim_params
)
self._sim_params = sim_params
big_info = "[World] Creating task " + task.name + "\n" + \
"use_gpu_pipeline: " + str(sim_params["use_gpu_pipeline"]) + "\n" + \
"device: " + str(device) + "\n" +\
"backend: " + str(backend) + "\n" +\
"integration_dt: " + str(sim_params["physics_dt"]) + "\n" + \
"rendering_dt: " + str(sim_params["rendering_dt"]) + "\n" \
Journal.log(self.__class__.__name__,
"set_task",
big_info,
LogType.STAT,
throw_when_excep = True)
## we get the physics context to expose additional low-level ##
# settings of the simulation
self._physics_context = self._world.get_physics_context()
self._physics_scene_path = self._physics_context.prim_path
self._physics_context.enable_gpu_dynamics(True)
self._physics_context.enable_stablization(True)
self._physics_scene_prim = self._physics_context.get_current_physics_scene_prim()
self._solver_type = self._physics_context.get_solver_type()
# we set parameters, depending on sim_params dict
if "gpu_max_rigid_contact_count" in sim_params:
self._physics_context.set_gpu_max_rigid_contact_count(sim_params["gpu_max_rigid_contact_count"])
if "gpu_max_rigid_patch_count" in sim_params:
self._physics_context.set_gpu_max_rigid_patch_count(sim_params["gpu_max_rigid_patch_count"])
if "gpu_found_lost_pairs_capacity" in sim_params:
self._physics_context.set_gpu_found_lost_pairs_capacity(sim_params["gpu_found_lost_pairs_capacity"])
if "gpu_found_lost_aggregate_pairs_capacity" in sim_params:
self._physics_context.set_gpu_found_lost_aggregate_pairs_capacity(sim_params["gpu_found_lost_aggregate_pairs_capacity"])
if "gpu_total_aggregate_pairs_capacity" in sim_params:
self._physics_context.set_gpu_total_aggregate_pairs_capacity(sim_params["gpu_total_aggregate_pairs_capacity"])
if "gpu_max_soft_body_contacts" in sim_params:
self._physics_context.set_gpu_max_soft_body_contacts(sim_params["gpu_max_soft_body_contacts"])
if "gpu_max_particle_contacts" in sim_params:
self._physics_context.set_gpu_max_particle_contacts(sim_params["gpu_max_particle_contacts"])
if "gpu_heap_capacity" in sim_params:
self._physics_context.set_gpu_heap_capacity(sim_params["gpu_heap_capacity"])
if "gpu_temp_buffer_capacity" in sim_params:
self._physics_context.set_gpu_temp_buffer_capacity(sim_params["gpu_temp_buffer_capacity"])
if "gpu_max_num_partitions" in sim_params:
self._physics_context.set_gpu_max_num_partitions(sim_params["gpu_max_num_partitions"])
# overwriting defaults
# self._physics_context.set_gpu_max_rigid_contact_count(2 * self._physics_context.get_gpu_max_rigid_contact_count())
# self._physics_context.set_gpu_max_rigid_patch_count(2 * self._physics_context.get_gpu_max_rigid_patch_count())
# self._physics_context.set_gpu_found_lost_pairs_capacity(2 * self._physics_context.get_gpu_found_lost_pairs_capacity())
# self._physics_context.set_gpu_found_lost_aggregate_pairs_capacity(20 * self._physics_context.get_gpu_found_lost_aggregate_pairs_capacity())
# self._physics_context.set_gpu_total_aggregate_pairs_capacity(20 * self._physics_context.get_gpu_total_aggregate_pairs_capacity())
# self._physics_context.set_gpu_heap_capacity(2 * self._physics_context.get_gpu_heap_capacity())
# self._physics_context.set_gpu_temp_buffer_capacity(20 * self._physics_context.get_gpu_heap_capacity())
# self._physics_context.set_gpu_max_num_partitions(20 * self._physics_context.get_gpu_temp_buffer_capacity())
# GPU buffers
self._gpu_max_rigid_contact_count = self._physics_context.get_gpu_max_rigid_contact_count()
self._gpu_max_rigid_patch_count = self._physics_context.get_gpu_max_rigid_patch_count()
self._gpu_found_lost_pairs_capacity = self._physics_context.get_gpu_found_lost_pairs_capacity()
self._gpu_found_lost_aggregate_pairs_capacity = self._physics_context.get_gpu_found_lost_aggregate_pairs_capacity()
self._gpu_total_aggregate_pairs_capacity = self._physics_context.get_gpu_total_aggregate_pairs_capacity()
self._gpu_max_soft_body_contacts = self._physics_context.get_gpu_max_soft_body_contacts()
self._gpu_max_particle_contacts = self._physics_context.get_gpu_max_particle_contacts()
self._gpu_heap_capacity = self._physics_context.get_gpu_heap_capacity()
self._gpu_temp_buffer_capacity = self._physics_context.get_gpu_temp_buffer_capacity()
# self._gpu_max_num_partitions = physics_context.get_gpu_max_num_partitions() # BROKEN->method does not exist
big_info2 = "[physics context]:" + "\n" + \
"gpu_max_rigid_contact_count: " + str(self._gpu_max_rigid_contact_count) + "\n" + \
"gpu_max_rigid_patch_count: " + str(self._gpu_max_rigid_patch_count) + "\n" + \
"gpu_found_lost_pairs_capacity: " + str(self._gpu_found_lost_pairs_capacity) + "\n" + \
"gpu_found_lost_aggregate_pairs_capacity: " + str(self._gpu_found_lost_aggregate_pairs_capacity) + "\n" + \
"gpu_total_aggregate_pairs_capacity: " + str(self._gpu_total_aggregate_pairs_capacity) + "\n" + \
"gpu_max_soft_body_contacts: " + str(self._gpu_max_soft_body_contacts) + "\n" + \
"gpu_max_particle_contacts: " + str(self._gpu_max_particle_contacts) + "\n" + \
"gpu_heap_capacity: " + str(self._gpu_heap_capacity) + "\n" + \
"gpu_temp_buffer_capacity: " + str(self._gpu_temp_buffer_capacity)
Journal.log(self.__class__.__name__,
"set_task",
big_info2,
LogType.STAT,
throw_when_excep = True)
self._scene = self._world.scene
from omni.usd import get_context
self._stage = get_context().get_stage()
from pxr import UsdLux, Sdf, Gf, UsdPhysics, PhysicsSchemaTools
# add lighting
distantLight = UsdLux.DistantLight.Define(self._stage, Sdf.Path("/World/DistantLight"))
distantLight.CreateIntensityAttr(500)
self._world._current_tasks = dict() # resets registered tasks
self._task = task
self._task.set_world(self._world)
self._task.configure_scene()
self._world.add_task(self._task)
self._num_envs = self._task.num_envs
if sim_params and "enable_viewport" in sim_params:
self._render = sim_params["enable_viewport"]
Journal.log(self.__class__.__name__,
"set_task",
"[render]: " + str(self._render),
LogType.STAT,
throw_when_excep = True)
# if init_sim:
# self._world.reset() # after the first reset we get get all quantities
# # from the scene
# self._task.post_initialization_steps() # performs initializations
# # steps after the fisrt world reset was called
def render(self, mode="human") -> None:
""" Step the renderer.
Args:
mode (str): Select mode of rendering based on OpenAI environments.
"""
if mode == "human":
self._world.render()
return None
elif mode == "rgb_array":
# check if viewport is enabled -- if not, then complain because we won't get any data
if not self._render or not self._record:
exception = f"Cannot render '{mode}' when rendering is not enabled. Please check the provided" + \
"arguments to the environment class at initialization."
Journal.log(self.__class__.__name__,
"__init__",
exception,
LogType.EXCEP,
throw_when_excep = True)
# obtain the rgb data
rgb_data = self._rgb_annotator.get_data()
# convert to numpy array
rgb_data = np.frombuffer(rgb_data, dtype=np.uint8).reshape(*rgb_data.shape)
# return the rgb data
return rgb_data[:, :, :3]
else:
# gym.Env.render(self, mode=mode)
return None
def create_viewport_render_product(self, resolution=(1280, 720)):
"""Create a render product of the viewport for rendering."""
try:
import omni.replicator.core as rep
# create render product
self._render_product = rep.create.render_product("/OmniverseKit_Persp", resolution)
# create rgb annotator -- used to read data from the render product
self._rgb_annotator = rep.AnnotatorRegistry.get_annotator("rgb", device="cpu")
self._rgb_annotator.attach([self._render_product])
self._record = True
except Exception as e:
carb.log_info("omni.replicator.core could not be imported. Skipping creation of render product.")
carb.log_info(str(e))
def close(self) -> None:
""" Closes simulation.
"""
if self._simulation_app.is_running():
self._simulation_app.close()
return
@abstractmethod
def step(self,
actions = None):
""" Basic implementation for stepping simulation"""
pass
@abstractmethod
def reset(self):
""" Usually resets the task and updates observations +
# other custom operations. """
pass
@property
def num_envs(self):
""" Retrieves number of environments.
Returns:
num_envs(int): Number of environments.
"""
return self._num_envs
@property
def simulation_app(self):
"""Retrieves the SimulationApp object.
Returns:
simulation_app(SimulationApp): SimulationApp.
"""
return self._simulation_app
@property
def get_world(self):
"""Retrieves the World object for simulation.
Returns:
world(World): Simulation World.
"""
return self._world
@property
def task(self):
"""Retrieves the task.
Returns:
task(BaseTask): Task.
"""
return self._task
@property
def render_enabled(self):
"""Whether rendering is enabled.
Returns:
render(bool): is render enabled.
"""
return self._render
| 19,383 | Python | 39.299376 | 149 | 0.579735 |
AndrePatri/OmniRoboGym/omni_robo_gym/tasks/isaac_task.py | # Copyright (C) 2023 Andrea Patrizi (AndrePatri, [email protected])
#
# This file is part of OmniRoboGym and distributed under the General Public License version 2 license.
#
# OmniRoboGym is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# OmniRoboGym is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OmniRoboGym. If not, see <http://www.gnu.org/licenses/>.
#
from omni.isaac.core.tasks.base_task import BaseTask
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.utils.viewports import set_camera_view
from omni.isaac.core.world import World
import omni.kit
import numpy as np
import torch
from omni.importer.urdf import _urdf
from omni.isaac.core.utils.prims import move_prim
from omni.isaac.cloner import GridCloner
import omni.isaac.core.utils.prims as prim_utils
# from omni.isaac.sensor import ContactSensor
from omni.isaac.core.utils.stage import get_current_stage
from omni.isaac.core.scenes.scene import Scene
from omni_robo_gym.utils.jnt_imp_cntrl import OmniJntImpCntrl
from omni_robo_gym.utils.homing import OmniRobotHomer
from omni_robo_gym.utils.contact_sensor import OmniContactSensors
from omni_robo_gym.utils.terrains import RlTerrains
from omni_robo_gym.utils.math_utils import quat_to_omega, quaternion_difference, rel_vel
from abc import abstractmethod
from typing import List, Dict
from SharsorIPCpp.PySharsorIPC import LogType
from SharsorIPCpp.PySharsorIPC import Journal
class IsaacTask(BaseTask):
def __init__(self,
name: str,
integration_dt: float,
robot_names: List[str],
robot_pkg_names: List[str] = None,
contact_prims: Dict[str, List] = None,
contact_offsets: Dict[str, Dict[str, np.ndarray]] = None,
sensor_radii: Dict[str, Dict[str, np.ndarray]] = None,
num_envs = 1,
device = "cuda",
cloning_offset: np.array = None,
fix_base: List[bool] = None,
self_collide: List[bool] = None,
merge_fixed: List[bool] = None,
replicate_physics: bool = True,
solver_position_iteration_count: int = 4,
solver_velocity_iteration_count: int = 1,
solver_stabilization_thresh: float = 1e-5,
offset=None,
env_spacing = 5.0,
spawning_radius = 1.0,
use_flat_ground = True,
default_jnt_stiffness = 300.0,
default_jnt_damping = 20.0,
default_wheel_stiffness = 0.0,
default_wheel_damping = 10.0,
override_art_controller = False,
dtype = torch.float64,
debug_enabled: bool = False,
verbose = False,
use_diff_velocities = False) -> None:
self.torch_dtype = dtype
self._debug_enabled = debug_enabled
self._verbose = verbose
self.use_diff_velocities = use_diff_velocities
self.num_envs = num_envs
self._override_art_controller = override_art_controller
self._integration_dt = integration_dt # just used for contact reporting
self.torch_device = torch.device(device) # defaults to "cuda" ("cpu" also valid)
self.using_gpu = False
if self.torch_device == torch.device("cuda"):
self.using_gpu = True
self.robot_names = robot_names # these are (potentially) custom names to
self.robot_pkg_names = robot_pkg_names # will be used to search for URDF and SRDF packages
self.scene_setup_completed = False
if self.robot_pkg_names is None:
self.robot_pkg_names = self.robot_names # if not provided, robot_names are the same as robot_pkg_names
else:
# check dimension consistency
if len(robot_names) != len(robot_pkg_names):
exception = "The provided robot names list must match the length " + \
"of the provided robot package names"
raise Exception(exception)
if fix_base is None:
self._fix_base = [False] * len(self.robot_names)
else:
# check dimension consistency
if len(fix_base) != len(robot_pkg_names):
exception = "The provided fix_base list of boolean must match the length " + \
"of the provided robot package names"
raise Exception(exception)
self._fix_base = fix_base
if self_collide is None:
self._self_collide = [False] * len(self.robot_names)
else:
# check dimension consistency
if len(self_collide) != len(robot_pkg_names):
exception = "The provided self_collide list of boolean must match the length " + \
"of the provided robot package names"
raise Exception(exception)
self._self_collide = self_collide
if merge_fixed is None:
self._merge_fixed = [False] * len(self.robot_names)
else:
# check dimension consistency
if len(merge_fixed) != len(robot_pkg_names):
exception = "The provided merge_fixed list of boolean must match the length " + \
"of the provided robot package names"
raise Exception(exception)
self._merge_fixed = merge_fixed
self._urdf_paths = {}
self._srdf_paths = {}
self._robots_art_views = {}
self._robots_articulations = {}
self._robots_geom_prim_views = {}
self._solver_position_iteration_count = solver_position_iteration_count # solver position iteration count
# -> higher number makes simulation more accurate
self._solver_velocity_iteration_count = solver_velocity_iteration_count
self._solver_stabilization_thresh = solver_stabilization_thresh # threshold for kin. energy below which an articulatiion
# "goes to sleep", i.e. it's not simulated anymore until some action wakes him up
# potentially, each robot could have its own setting for the solver (not supported yet)
self._solver_position_iteration_counts = {}
self._solver_velocity_iteration_counts = {}
self._solver_stabilization_threshs = {}
self.robot_bodynames = {}
self.robot_n_links = {}
self.robot_n_dofs = {}
self.robot_dof_names = {}
self._root_p = {}
self._root_q = {}
self._jnts_q = {}
self._root_p_prev = {} # used for num differentiation
self._root_q_prev = {} # used for num differentiation
self._jnts_q_prev = {} # used for num differentiation
self._root_p_default = {}
self._root_q_default = {}
self._jnts_q_default = {}
self._root_v = {}
self._root_v_default = {}
self._root_omega = {}
self._root_omega_default = {}
self._jnts_v = {}
self._jnts_v_default = {}
self._jnts_eff_default = {}
self._root_pos_offsets = {}
self._root_q_offsets = {}
self.distr_offset = {} # decribed how robots within each env are distributed
self.jnt_imp_controllers = {}
self.homers = {}
# default jnt impedance settings
self.default_jnt_stiffness = default_jnt_stiffness
self.default_jnt_damping = default_jnt_damping
self.default_wheel_stiffness = default_wheel_stiffness
self.default_wheel_damping = default_wheel_damping
self.use_flat_ground = use_flat_ground
self.spawning_radius = spawning_radius # [m] -> default distance between roots of robots in a single
# environment
self._calc_robot_distrib() # computes the offsets of robots withing each env.
self._env_ns = "/World/envs"
self._env_spacing = env_spacing # [m]
self._template_env_ns = self._env_ns + "/env_0"
self._cloner = GridCloner(spacing=self._env_spacing)
self._cloner.define_base_env(self._env_ns)
prim_utils.define_prim(self._template_env_ns)
self._envs_prim_paths = self._cloner.generate_paths(self._env_ns + "/env",
self.num_envs)
self._cloning_offset = cloning_offset
if self._cloning_offset is None:
self._cloning_offset = np.array([[0, 0, 0]] * self.num_envs)
self._replicate_physics = replicate_physics
self._world_initialized = False
self._ground_plane_prim_path = "/World/terrain"
self._world = None
self._world_scene = None
self._world_physics_context = None
self.omni_contact_sensors = {}
self.contact_prims = contact_prims
for robot_name in contact_prims:
self.omni_contact_sensors[robot_name] = OmniContactSensors(
name = robot_name,
n_envs = self.num_envs,
contact_prims = contact_prims,
contact_offsets = contact_offsets,
sensor_radii = sensor_radii,
device = self.torch_device,
dtype = self.torch_dtype,
enable_debug=self._debug_enabled)
# trigger __init__ of parent class
BaseTask.__init__(self,
name=name,
offset=offset)
self.xrdf_cmd_vals = [] # by default empty, needs to be overriden by
# child class
def update_jnt_imp_control_gains(self,
robot_name: str,
jnt_stiffness: float,
jnt_damping: float,
wheel_stiffness: float,
wheel_damping: float,
env_indxs: torch.Tensor = None):
# updates joint imp. controller with new impedance values
if self._debug_enabled:
for_robots = ""
if env_indxs is not None:
if not isinstance(env_indxs, torch.Tensor):
msg = "Provided env_indxs should be a torch tensor of indexes!"
Journal.log(self.__class__.__name__,
"update_jnt_imp_control_gains",
msg,
LogType.EXCEP,
throw_when_excep = True)
if self.using_gpu:
if not env_indxs.device.type == "cuda":
error = "Provided env_indxs should be on GPU!"
Journal.log(self.__class__.__name__,
"_step_jnt_imp_control",
error,
LogType.EXCEP,
True)
else:
if not env_indxs.device.type == "cpu":
error = "Provided env_indxs should be on CPU!"
Journal.log(self.__class__.__name__,
"_step_jnt_imp_control",
error,
LogType.EXCEP,
True)
for_robots = f"for robot {robot_name}, indexes: " + str(env_indxs.tolist())
if self._verbose:
Journal.log(self.__class__.__name__,
"update_jnt_imp_control_gains",
f"updating joint impedances " + for_robots,
LogType.STAT,
throw_when_excep = True)
# set jnt imp gains for the whole robot
if env_indxs is None:
gains_pos = torch.full((self.num_envs, \
self.jnt_imp_controllers[robot_name].n_dofs),
jnt_stiffness,
device = self.torch_device,
dtype=self.torch_dtype)
gains_vel = torch.full((self.num_envs, \
self.jnt_imp_controllers[robot_name].n_dofs),
jnt_damping,
device = self.torch_device,
dtype=self.torch_dtype)
else:
gains_pos = torch.full((env_indxs.shape[0], \
self.jnt_imp_controllers[robot_name].n_dofs),
jnt_stiffness,
device = self.torch_device,
dtype=self.torch_dtype)
gains_vel = torch.full((env_indxs.shape[0], \
self.jnt_imp_controllers[robot_name].n_dofs),
jnt_damping,
device = self.torch_device,
dtype=self.torch_dtype)
self.jnt_imp_controllers[robot_name].set_gains(
pos_gains = gains_pos,
vel_gains = gains_vel,
robot_indxs = env_indxs)
# in case of wheels
wheels_indxs = self.jnt_imp_controllers[robot_name].get_jnt_idxs_matching(
name_pattern="wheel")
if wheels_indxs is not None:
if env_indxs is None:
# wheels are velocity-controlled
wheels_pos_gains = torch.full((self.num_envs, len(wheels_indxs)),
wheel_stiffness,
device = self.torch_device,
dtype=self.torch_dtype)
wheels_vel_gains = torch.full((self.num_envs, len(wheels_indxs)),
wheel_damping,
device = self.torch_device,
dtype=self.torch_dtype)
else:
# wheels are velocity-controlled
wheels_pos_gains = torch.full((env_indxs.shape[0], len(wheels_indxs)),
wheel_stiffness,
device = self.torch_device,
dtype=self.torch_dtype)
wheels_vel_gains = torch.full((env_indxs.shape[0], len(wheels_indxs)),
wheel_damping,
device = self.torch_device,
dtype=self.torch_dtype)
self.jnt_imp_controllers[robot_name].set_gains(
pos_gains = wheels_pos_gains,
vel_gains = wheels_vel_gains,
jnt_indxs=wheels_indxs,
robot_indxs = env_indxs)
def update_root_offsets(self,
robot_name: str,
env_indxs: torch.Tensor = None):
if self._debug_enabled:
for_robots = ""
if env_indxs is not None:
if not isinstance(env_indxs, torch.Tensor):
msg = "Provided env_indxs should be a torch tensor of indexes!"
Journal.log(self.__class__.__name__,
"update_root_offsets",
msg,
LogType.EXCEP,
throw_when_excep = True)
if self.using_gpu:
if not env_indxs.device.type == "cuda":
error = "Provided env_indxs should be on GPU!"
Journal.log(self.__class__.__name__,
"_step_jnt_imp_control",
error,
LogType.EXCEP,
True)
else:
if not env_indxs.device.type == "cpu":
error = "Provided env_indxs should be on CPU!"
Journal.log(self.__class__.__name__,
"_step_jnt_imp_control",
error,
LogType.EXCEP,
True)
for_robots = f"for robot {robot_name}, indexes: " + str(env_indxs.tolist())
if self._verbose:
Journal.log(self.__class__.__name__,
"update_root_offsets",
f"updating root offsets " + for_robots,
LogType.STAT,
throw_when_excep = True)
# only planar position used
if env_indxs is None:
self._root_pos_offsets[robot_name][:, 0:2] = self._root_p[robot_name][:, 0:2]
self._root_q_offsets[robot_name][:, :] = self._root_q[robot_name]
else:
self._root_pos_offsets[robot_name][env_indxs, 0:2] = self._root_p[robot_name][env_indxs, 0:2]
self._root_q_offsets[robot_name][env_indxs, :] = self._root_q[robot_name][env_indxs, :]
def synch_default_root_states(self,
robot_name: str = None,
env_indxs: torch.Tensor = None):
if self._debug_enabled:
for_robots = ""
if env_indxs is not None:
if not isinstance(env_indxs, torch.Tensor):
msg = "Provided env_indxs should be a torch tensor of indexes!"
Journal.log(self.__class__.__name__,
"synch_default_root_states",
msg,
LogType.EXCEP,
throw_when_excep = True)
if self.using_gpu:
if not env_indxs.device.type == "cuda":
error = "Provided env_indxs should be on GPU!"
Journal.log(self.__class__.__name__,
"_step_jnt_imp_control",
error,
LogType.EXCEP,
True)
else:
if not env_indxs.device.type == "cpu":
error = "Provided env_indxs should be on CPU!"
Journal.log(self.__class__.__name__,
"_step_jnt_imp_control",
error,
LogType.EXCEP,
True)
for_robots = f"for robot {robot_name}, indexes: " + str(env_indxs.tolist())
if self._verbose:
Journal.log(self.__class__.__name__,
"synch_default_root_states",
f"updating default root states " + for_robots,
LogType.STAT,
throw_when_excep = True)
if env_indxs is None:
self._root_p_default[robot_name][:, :] = self._root_p[robot_name]
self._root_q_default[robot_name][:, :] = self._root_q[robot_name]
else:
self._root_p_default[robot_name][env_indxs, :] = self._root_p[robot_name][env_indxs, :]
self._root_q_default[robot_name][env_indxs, :] = self._root_q[robot_name][env_indxs, :]
def post_initialization_steps(self):
print("Performing post-initialization steps")
self._world_initialized = True # used by other methods which nees to run
# only when the world was initialized
# populates robot info fields
self._fill_robot_info_from_world()
# initializes homing managers
self._init_homing_managers()
# initializes robot state data
self._init_robots_state()
# default robot state
self._set_robots_default_jnt_config()
self._set_robots_root_default_config()
# initializes joint impedance controllers
self._init_jnt_imp_control()
# update solver options
self._update_art_solver_options()
self.reset()
self._custom_post_init()
self._get_solver_info() # get again solver option before printing everything
self._print_envs_info() # debug prints
def apply_collision_filters(self,
physicscene_path: str,
coll_root_path: str):
self._cloner.filter_collisions(physicsscene_path = physicscene_path,
collision_root_path = coll_root_path,
prim_paths=self._envs_prim_paths,
global_paths=[self._ground_plane_prim_path] # can collide with these prims
)
def reset_jnt_imp_control(self,
robot_name: str,
env_indxs: torch.Tensor = None):
if self._debug_enabled:
for_robots = ""
if env_indxs is not None:
if not isinstance(env_indxs, torch.Tensor):
Journal.log(self.__class__.__name__,
"reset_jnt_imp_control",
"Provided env_indxs should be a torch tensor of indexes!",
LogType.EXCEP,
throw_when_excep = True)
if self.using_gpu:
if not env_indxs.device.type == "cuda":
error = "Provided env_indxs should be on GPU!"
Journal.log(self.__class__.__name__,
"_step_jnt_imp_control",
error,
LogType.EXCEP,
True)
else:
if not env_indxs.device.type == "cpu":
error = "Provided env_indxs should be on CPU!"
Journal.log(self.__class__.__name__,
"_step_jnt_imp_control",
error,
LogType.EXCEP,
True)
for_robots = f"for robot {robot_name}, indexes: " + str(env_indxs)
if self._verbose:
Journal.log(self.__class__.__name__,
"reset_jnt_imp_control",
f"resetting joint impedances " + for_robots,
LogType.STAT,
throw_when_excep = True)
# resets all internal data, refs to defaults
self.jnt_imp_controllers[robot_name].reset(robot_indxs = env_indxs)
# restore current state
if env_indxs is None:
self.jnt_imp_controllers[robot_name].update_state(pos = self._jnts_q[robot_name][:, :],
vel = self._jnts_v[robot_name][:, :],
eff = None,
robot_indxs = None)
else:
self.jnt_imp_controllers[robot_name].update_state(pos = self._jnts_q[robot_name][env_indxs, :],
vel = self._jnts_v[robot_name][env_indxs, :],
eff = None,
robot_indxs = env_indxs)
# restore default gains
self.update_jnt_imp_control_gains(robot_name = robot_name,
jnt_stiffness = self.default_jnt_stiffness,
jnt_damping = self.default_jnt_damping,
wheel_stiffness = self.default_wheel_stiffness,
wheel_damping = self.default_wheel_damping,
env_indxs = env_indxs)
#restore jnt imp refs to homing
if env_indxs is None:
self.jnt_imp_controllers[robot_name].set_refs(pos_ref=self.homers[robot_name].get_homing()[:, :],
robot_indxs = None)
else:
self.jnt_imp_controllers[robot_name].set_refs(pos_ref=self.homers[robot_name].get_homing()[env_indxs, :],
robot_indxs = env_indxs)
# actually applies reset commands to the articulation
# self.jnt_imp_controllers[robot_name].apply_cmds()
def set_world(self,
world: World):
if not isinstance(world, World):
Journal.log(self.__class__.__name__,
"configure_scene",
"world should be an instance of omni.isaac.core.world.World!",
LogType.EXCEP,
throw_when_excep = True)
self._world = world
self._world_scene = self._world.scene
self._world_physics_context = self._world.get_physics_context()
def set_up_scene(self,
scene: Scene):
super().set_up_scene(scene)
def configure_scene(self) -> None:
# this is called automatically by the environment BEFORE
# initializing the simulation
if self._world is None:
Journal.log(self.__class__.__name__,
"configure_scene",
"Did you call the set_world() method??",
LogType.EXCEP,
throw_when_excep = True)
if not self.scene_setup_completed:
for i in range(len(self.robot_names)):
robot_name = self.robot_names[i]
robot_pkg_name = self.robot_pkg_names[i]
fix_base = self._fix_base[i]
self_collide = self._self_collide[i]
merge_fixed = self._merge_fixed[i]
self._generate_rob_descriptions(robot_name=robot_name,
robot_pkg_name=robot_pkg_name)
self._import_urdf(robot_name,
fix_base=fix_base,
self_collide=self_collide,
merge_fixed=merge_fixed)
Journal.log(self.__class__.__name__,
"set_up_scene",
"cloning environments...",
LogType.STAT,
throw_when_excep = True)
self._cloner.clone(
source_prim_path=self._template_env_ns,
prim_paths=self._envs_prim_paths,
replicate_physics=self._replicate_physics,
position_offsets = self._cloning_offset
) # we can clone the environment in which all the robos are
Journal.log(self.__class__.__name__,
"set_up_scene",
"finishing scene setup...",
LogType.STAT,
throw_when_excep = True)
for i in range(len(self.robot_names)):
robot_name = self.robot_names[i]
self._robots_art_views[robot_name] = ArticulationView(name = robot_name + "ArtView",
prim_paths_expr = self._env_ns + "/env_.*"+ "/" + robot_name + "/base_link",
reset_xform_properties=False)
self._robots_articulations[robot_name] = self._world_scene.add(self._robots_art_views[robot_name])
# self._robots_geom_prim_views[robot_name] = GeometryPrimView(name = robot_name + "GeomView",
# prim_paths_expr = self._env_ns + "/env*"+ "/" + robot_name,
# # prepare_contact_sensors = True
# )
# self._robots_geom_prim_views[robot_name].apply_collision_apis() # to be able to apply contact sensors
if self.use_flat_ground:
self._world_scene.add_default_ground_plane(z_position=0,
name="terrain",
prim_path= self._ground_plane_prim_path,
static_friction=1.0,
dynamic_friction=1.0,
restitution=0.2)
else:
self.terrains = RlTerrains(get_current_stage())
self.terrains.get_obstacles_terrain(terrain_size=40,
num_obs=100,
max_height=0.4,
min_size=0.5,
max_size=5.0)
# delete_prim(self._ground_plane_prim_path + "/SphereLight") # we remove the default spherical light
# set default camera viewport position and target
self._set_initial_camera_params()
self.apply_collision_filters(self._world_physics_context.prim_path,
"/World/collisions")
# init contact sensors
self._init_contact_sensors() # IMPORTANT: this has to be called
# after calling the clone() method and initializing articulation views!!!
self._world.reset() # reset world to make art views available
self.post_initialization_steps()
self.scene_setup_completed = True
def post_reset(self):
pass
def reset(self,
env_indxs: torch.Tensor = None,
robot_names: List[str] =None):
# we first reset all target articulations to their default state
rob_names = robot_names if (robot_names is not None) else self.robot_names
# resets the state of target robot and env to the defaults
self.reset_state(env_indxs=env_indxs,
robot_names=rob_names)
# and jnt imp. controllers
for i in range(len(rob_names)):
self.reset_jnt_imp_control(robot_name=rob_names[i],
env_indxs=env_indxs)
def reset_state(self,
env_indxs: torch.Tensor = None,
robot_names: List[str] =None):
rob_names = robot_names if (robot_names is not None) else self.robot_names
if env_indxs is not None:
if self._debug_enabled:
if self.using_gpu:
if not env_indxs.device.type == "cuda":
error = "Provided env_indxs should be on GPU!"
Journal.log(self.__class__.__name__,
"_step_jnt_imp_control",
error,
LogType.EXCEP,
True)
else:
if not env_indxs.device.type == "cpu":
error = "Provided env_indxs should be on CPU!"
Journal.log(self.__class__.__name__,
"_step_jnt_imp_control",
error,
LogType.EXCEP,
True)
for i in range(len(rob_names)):
robot_name = rob_names[i]
# root q
self._robots_art_views[robot_name].set_world_poses(positions = self._root_p_default[robot_name][env_indxs, :],
orientations=self._root_q_default[robot_name][env_indxs, :],
indices = env_indxs)
# jnts q
self._robots_art_views[robot_name].set_joint_positions(positions = self._jnts_q_default[robot_name][env_indxs, :],
indices = env_indxs)
# root v and omega
self._robots_art_views[robot_name].set_joint_velocities(velocities = self._jnts_v_default[robot_name][env_indxs, :],
indices = env_indxs)
# jnts v
concatenated_vel = torch.cat((self._root_v_default[robot_name][env_indxs, :],
self._root_omega_default[robot_name][env_indxs, :]), dim=1)
self._robots_art_views[robot_name].set_velocities(velocities = concatenated_vel,
indices = env_indxs)
# jnts eff
self._robots_art_views[robot_name].set_joint_efforts(efforts = self._jnts_eff_default[robot_name][env_indxs, :],
indices = env_indxs)
else:
for i in range(len(rob_names)):
robot_name = rob_names[i]
# root q
self._robots_art_views[robot_name].set_world_poses(positions = self._root_p_default[robot_name][:, :],
orientations=self._root_q_default[robot_name][:, :],
indices = None)
# jnts q
self._robots_art_views[robot_name].set_joint_positions(positions = self._jnts_q_default[robot_name][:, :],
indices = None)
# root v and omega
self._robots_art_views[robot_name].set_joint_velocities(velocities = self._jnts_v_default[robot_name][:, :],
indices = None)
# jnts v
concatenated_vel = torch.cat((self._root_v_default[robot_name][:, :],
self._root_omega_default[robot_name][:, :]), dim=1)
self._robots_art_views[robot_name].set_velocities(velocities = concatenated_vel,
indices = None)
# jnts eff
self._robots_art_views[robot_name].set_joint_efforts(efforts = self._jnts_eff_default[robot_name][:, :],
indices = None)
# we update the robots state
self.get_states(env_indxs=env_indxs,
robot_names=rob_names)
def close(self):
pass
def root_pos_offsets(self,
robot_name: str,
env_idxs: torch.Tensor = None):
if env_idxs is None:
return self._root_pos_offsets[robot_name]
else:
return self._root_pos_offsets[robot_name][env_idxs, :]
def root_q_offsets(self,
robot_name: str,
env_idxs: torch.Tensor = None):
if env_idxs is None:
return self._root_q_offsets[robot_name]
else:
return self._root_q_offsets[robot_name][env_idxs, :]
def root_p(self,
robot_name: str,
env_idxs: torch.Tensor = None):
if env_idxs is None:
return self._root_p[robot_name]
else:
return self._root_p[robot_name][env_idxs, :]
def root_p_rel(self,
robot_name: str,
env_idxs: torch.Tensor = None):
rel_pos = torch.sub(self.root_p(robot_name=robot_name,
env_idxs=env_idxs),
self.root_pos_offsets(robot_name=robot_name,
env_idxs=env_idxs))
return rel_pos
def root_q(self,
robot_name: str,
env_idxs: torch.Tensor = None):
if env_idxs is None:
return self._root_q[robot_name]
else:
return self._root_q[robot_name][env_idxs, :]
def root_q_rel(self,
robot_name: str,
env_idxs: torch.Tensor = None):
rel_q = quaternion_difference(self.root_q_offsets(robot_name=robot_name,
env_idxs=env_idxs),
self.root_q(robot_name=robot_name,
env_idxs=env_idxs))
return rel_q
def root_v(self,
robot_name: str,
env_idxs: torch.Tensor = None):
if env_idxs is None:
return self._root_v[robot_name]
else:
return self._root_v[robot_name][env_idxs, :]
def root_v_rel(self,
robot_name: str,
env_idxs: torch.Tensor = None):
v_rel = rel_vel(offset_q0_q1=self.root_q_offsets(robot_name=robot_name,
env_idxs=env_idxs),
v0=self.root_v(robot_name=robot_name, env_idxs=env_idxs))
return v_rel
def root_omega(self,
robot_name: str,
env_idxs: torch.Tensor = None):
if env_idxs is None:
return self._root_omega[robot_name]
else:
return self._root_omega[robot_name][env_idxs, :]
def root_omega_rel(self,
robot_name: str,
env_idxs: torch.Tensor = None):
omega_rel = rel_vel(offset_q0_q1=self.root_q_offsets(robot_name=robot_name,
env_idxs=env_idxs),
v0=self.root_omega(robot_name=robot_name, env_idxs=env_idxs))
return omega_rel
def jnts_q(self,
robot_name: str,
env_idxs: torch.Tensor = None):
if env_idxs is None:
return self._jnts_q[robot_name]
else:
return self._jnts_q[robot_name][env_idxs, :]
def jnts_v(self,
robot_name: str,
env_idxs: torch.Tensor = None):
if env_idxs is None:
return self._jnts_v[robot_name]
else:
return self._jnts_v[robot_name][env_idxs, :]
def integration_dt(self):
return self._integration_dt
@abstractmethod
def _xrdf_cmds(self) -> Dict:
# this has to be implemented by the user depending on the arguments
# the xacro description of the robot takes. The output is a list
# of xacro commands.
# Example implementation:
# def _xrdf_cmds():
# cmds = {}
# cmds{self.robot_names[0]} = []
# xrdf_cmd_vals = [True, True, True, False, False, True]
# legs = "true" if xrdf_cmd_vals[0] else "false"
# big_wheel = "true" if xrdf_cmd_vals[1] else "false"
# upper_body ="true" if xrdf_cmd_vals[2] else "false"
# velodyne = "true" if xrdf_cmd_vals[3] else "false"
# realsense = "true" if xrdf_cmd_vals[4] else "false"
# floating_joint = "true" if xrdf_cmd_vals[5] else "false" # horizon needs a floating joint
# cmds.append("legs:=" + legs)
# cmds.append("big_wheel:=" + big_wheel)
# cmds.append("upper_body:=" + upper_body)
# cmds.append("velodyne:=" + velodyne)
# cmds.append("realsense:=" + realsense)
# cmds.append("floating_joint:=" + floating_joint)
# return cmds
pass
@abstractmethod
def pre_physics_step(self,
actions,
robot_name: str) -> None:
# apply actions to simulated robot
# to be overriden by child class depending
# on specific needs
pass
def _generate_srdf(self,
robot_name: str,
robot_pkg_name: str):
# we generate the URDF where the description package is located
import rospkg
rospackage = rospkg.RosPack()
descr_path = rospackage.get_path(robot_pkg_name + "_srdf")
srdf_path = descr_path + "/srdf"
xacro_name = robot_pkg_name
xacro_path = srdf_path + "/" + xacro_name + ".srdf.xacro"
self._srdf_paths[robot_name] = self._descr_dump_path + "/" + robot_name + ".srdf"
if self._xrdf_cmds() is not None:
cmds = self._xrdf_cmds()[robot_name]
if cmds is None:
xacro_cmd = ["xacro"] + [xacro_path] + ["-o"] + [self._srdf_paths[robot_name]]
else:
xacro_cmd = ["xacro"] + [xacro_path] + cmds + ["-o"] + [self._srdf_paths[robot_name]]
if self._xrdf_cmds() is None:
xacro_cmd = ["xacro"] + [xacro_path] + ["-o"] + [self._srdf_paths[robot_name]]
import subprocess
try:
xacro_gen = subprocess.check_call(xacro_cmd)
except:
Journal.log(self.__class__.__name__,
"_generate_urdf",
"failed to generate " + robot_name + "\'S SRDF!!!",
LogType.EXCEP,
throw_when_excep = True)
def _generate_urdf(self,
robot_name: str,
robot_pkg_name: str):
# we generate the URDF where the description package is located
import rospkg
rospackage = rospkg.RosPack()
descr_path = rospackage.get_path(robot_pkg_name + "_urdf")
urdf_path = descr_path + "/urdf"
xacro_name = robot_pkg_name
xacro_path = urdf_path + "/" + xacro_name + ".urdf.xacro"
self._urdf_paths[robot_name] = self._descr_dump_path + "/" + robot_name + ".urdf"
if self._xrdf_cmds() is not None:
cmds = self._xrdf_cmds()[robot_name]
if cmds is None:
xacro_cmd = ["xacro"] + [xacro_path] + ["-o"] + [self._urdf_paths[robot_name]]
else:
xacro_cmd = ["xacro"] + [xacro_path] + cmds + ["-o"] + [self._urdf_paths[robot_name]]
if self._xrdf_cmds() is None:
xacro_cmd = ["xacro"] + [xacro_path] + ["-o"] + [self._urdf_paths[robot_name]]
import subprocess
try:
xacro_gen = subprocess.check_call(xacro_cmd)
# we also generate an updated SRDF
except:
Journal.log(self.__class__.__name__,
"_generate_urdf",
"Failed to generate " + robot_name + "\'s URDF!!!",
LogType.EXCEP,
throw_when_excep = True)
def _generate_rob_descriptions(self,
robot_name: str,
robot_pkg_name: str):
self._descr_dump_path = "/tmp/" + f"{self.__class__.__name__}"
Journal.log(self.__class__.__name__,
"update_root_offsets",
"generating URDF for robot "+ f"{robot_name}, of type {robot_pkg_name}...",
LogType.STAT,
throw_when_excep = True)
self._generate_urdf(robot_name=robot_name,
robot_pkg_name=robot_pkg_name)
Journal.log(self.__class__.__name__,
"update_root_offsets",
"generating SRDF for robot "+ f"{robot_name}, of type {robot_pkg_name}...",
LogType.STAT,
throw_when_excep = True)
# we also generate SRDF files, which are useful for control
self._generate_srdf(robot_name=robot_name,
robot_pkg_name=robot_pkg_name)
def _import_urdf(self,
robot_name: str,
import_config: omni.importer.urdf._urdf.ImportConfig = _urdf.ImportConfig(),
fix_base = False,
self_collide = False,
merge_fixed = True):
Journal.log(self.__class__.__name__,
"update_root_offsets",
"importing robot URDF",
LogType.STAT,
throw_when_excep = True)
_urdf.acquire_urdf_interface()
# we overwrite some settings which are bound to be fixed
import_config.merge_fixed_joints = merge_fixed # makes sim more stable
# in case of fixed joints with light objects
import_config.import_inertia_tensor = True
# import_config.convex_decomp = False
import_config.fix_base = fix_base
import_config.self_collision = self_collide
# import_config.distance_scale = 1
# import_config.make_default_prim = True
# import_config.create_physics_scene = True
# import_config.default_drive_strength = 1047.19751
# import_config.default_position_drive_damping = 52.35988
# import_config.default_drive_type = _urdf.UrdfJointTargetType.JOINT_DRIVE_POSITION
# import URDF
success, robot_prim_path_default = omni.kit.commands.execute(
"URDFParseAndImportFile",
urdf_path=self._urdf_paths[robot_name],
import_config=import_config,
)
robot_base_prim_path = self._template_env_ns + "/" + robot_name
# moving default prim to base prim path for cloning
move_prim(robot_prim_path_default, # from
robot_base_prim_path) # to
return success
def _init_contact_sensors(self):
for i in range(0, len(self.robot_names)):
robot_name = self.robot_names[i]
# creates base contact sensor (which is then cloned)
self.omni_contact_sensors[robot_name].create_contact_sensors(
self._world,
self._env_ns
)
def _init_robots_state(self):
for i in range(0, len(self.robot_names)):
robot_name = self.robot_names[i]
pose = self._robots_art_views[robot_name].get_world_poses(
clone = True) # tuple: (pos, quat)
# root p (measured, previous, default)
self._root_p[robot_name] = pose[0]
self._root_p_prev[robot_name] = torch.clone(pose[0])
self._root_p_default[robot_name] = torch.clone(pose[0]) + self.distr_offset[robot_name]
# root q (measured, previous, default)
self._root_q[robot_name] = pose[1] # root orientation
self._root_q_prev[robot_name] = torch.clone(pose[1])
self._root_q_default[robot_name] = torch.clone(pose[1])
# jnt q (measured, previous, default)
self._jnts_q[robot_name] = self._robots_art_views[robot_name].get_joint_positions(
clone = True) # joint positions
self._jnts_q_prev[robot_name] = self._robots_art_views[robot_name].get_joint_positions(
clone = True)
self._jnts_q_default[robot_name] = self.homers[robot_name].get_homing(clone=True)
# root v (measured, default)
self._root_v[robot_name] = self._robots_art_views[robot_name].get_linear_velocities(
clone = True) # root lin. velocity
self._root_v_default[robot_name] = torch.full((self._root_v[robot_name].shape[0], self._root_v[robot_name].shape[1]),
0.0,
dtype=self.torch_dtype,
device=self.torch_device)
# root omega (measured, default)
self._root_omega[robot_name] = self._robots_art_views[robot_name].get_angular_velocities(
clone = True) # root ang. velocity
self._root_omega_default[robot_name] = torch.full((self._root_omega[robot_name].shape[0], self._root_omega[robot_name].shape[1]),
0.0,
dtype=self.torch_dtype,
device=self.torch_device)
# joints v (measured, default)
self._jnts_v[robot_name] = self._robots_art_views[robot_name].get_joint_velocities(
clone = True) # joint velocities
self._jnts_v_default[robot_name] = torch.full((self._jnts_v[robot_name].shape[0], self._jnts_v[robot_name].shape[1]),
0.0,
dtype=self.torch_dtype,
device=self.torch_device)
self._jnts_eff_default[robot_name] = torch.full((self._jnts_v[robot_name].shape[0], self._jnts_v[robot_name].shape[1]),
0.0,
dtype=self.torch_dtype,
device=self.torch_device)
self._root_pos_offsets[robot_name] = torch.zeros((self.num_envs, 3),
device=self.torch_device) # reference position offses
self._root_q_offsets[robot_name] = torch.zeros((self.num_envs, 4),
device=self.torch_device)
self._root_q_offsets[robot_name][:, 0] = 1.0 # init to valid identity quaternion
self.update_root_offsets(robot_name)
def _calc_robot_distrib(self):
import math
# we distribute robots in a single env. along the
# circumference of a circle of given radius
n_robots = len(self.robot_names)
offset_baseangle = 2 * math.pi / n_robots
for i in range(n_robots):
offset_angle = offset_baseangle * (i + 1)
robot_offset_wrt_center = torch.tensor([self.spawning_radius * math.cos(offset_angle),
self.spawning_radius * math.sin(offset_angle), 0],
device=self.torch_device,
dtype=self.torch_dtype)
# list with n references to the original tensor
tensor_list = [robot_offset_wrt_center] * self.num_envs
self.distr_offset[self.robot_names[i]] = torch.stack(tensor_list, dim=0)
def _get_robots_state(self,
env_indxs: torch.Tensor = None,
robot_names: List[str] = None,
dt: float = None,
reset: bool = False):
rob_names = robot_names if (robot_names is not None) else self.robot_names
if env_indxs is not None:
for i in range(0, len(rob_names)):
robot_name = rob_names[i]
pose = self._robots_art_views[robot_name].get_world_poses(
clone = True,
indices=env_indxs) # tuple: (pos, quat)
self._root_p[robot_name][env_indxs, :] = pose[0]
self._root_q[robot_name][env_indxs, :] = pose[1] # root orientation
self._jnts_q[robot_name][env_indxs, :] = self._robots_art_views[robot_name].get_joint_positions(
clone = True,
indices=env_indxs) # joint positions
if dt is None:
# we get velocities from the simulation. This is not good since
# these can actually represent artifacts which do not have physical meaning.
# It's better to obtain them by differentiation to avoid issues with controllers, etc...
self._root_v[robot_name][env_indxs, :] = self._robots_art_views[robot_name].get_linear_velocities(
clone = True,
indices=env_indxs) # root lin. velocity
self._root_omega[robot_name][env_indxs, :] = self._robots_art_views[robot_name].get_angular_velocities(
clone = True,
indices=env_indxs) # root ang. velocity
self._jnts_v[robot_name][env_indxs, :] = self._robots_art_views[robot_name].get_joint_velocities(
clone = True,
indices=env_indxs) # joint velocities
else:
# differentiate numerically
if not reset:
self._root_v[robot_name][env_indxs, :] = (self._root_p[robot_name][env_indxs, :] - \
self._root_p_prev[robot_name][env_indxs, :]) / dt
self._root_omega[robot_name][env_indxs, :] = quat_to_omega(self._root_q[robot_name][env_indxs, :],
self._root_q_prev[robot_name][env_indxs, :],
dt)
self._jnts_v[robot_name][env_indxs, :] = (self._jnts_q[robot_name][env_indxs, :] - \
self._jnts_q_prev[robot_name][env_indxs, :]) / dt
else:
# to avoid issues when differentiating numerically
self._root_v[robot_name][env_indxs, :].zero_()
self._root_omega[robot_name][env_indxs, :].zero_()
self._jnts_v[robot_name][env_indxs, :].zero_()
# update "previous" data for numerical differentiation
self._root_p_prev[robot_name][env_indxs, :] = self._root_p[robot_name][env_indxs, :]
self._root_q_prev[robot_name][env_indxs, :] = self._root_q[robot_name][env_indxs, :]
self._jnts_q_prev[robot_name][env_indxs, :] = self._jnts_q[robot_name][env_indxs, :]
else:
# updating data for all environments
for i in range(0, len(rob_names)):
robot_name = rob_names[i]
pose = self._robots_art_views[robot_name].get_world_poses(
clone = True) # tuple: (pos, quat)
self._root_p[robot_name][:, :] = pose[0]
self._root_q[robot_name][:, :] = pose[1] # root orientation
self._jnts_q[robot_name][:, :] = self._robots_art_views[robot_name].get_joint_positions(
clone = True) # joint positions
if dt is None:
# we get velocities from the simulation. This is not good since
# these can actually represent artifacts which do not have physical meaning.
# It's better to obtain them by differentiation to avoid issues with controllers, etc...
self._root_v[robot_name][:, :] = self._robots_art_views[robot_name].get_linear_velocities(
clone = True) # root lin. velocity
self._root_omega[robot_name][:, :] = self._robots_art_views[robot_name].get_angular_velocities(
clone = True) # root ang. velocity
self._jnts_v[robot_name][:, :] = self._robots_art_views[robot_name].get_joint_velocities(
clone = True) # joint velocities
else:
# differentiate numerically
if not reset:
self._root_v[robot_name][:, :] = (self._root_p[robot_name][:, :] - \
self._root_p_prev[robot_name][:, :]) / dt
self._root_omega[robot_name][:, :] = quat_to_omega(self._root_q[robot_name][:, :],
self._root_q_prev[robot_name][:, :],
dt)
self._jnts_v[robot_name][:, :] = (self._jnts_q[robot_name][:, :] - \
self._jnts_q_prev[robot_name][:, :]) / dt
# self._jnts_v[robot_name][:, :].zero_()
else:
# to avoid issues when differentiating numerically
self._root_v[robot_name][:, :].zero_()
self._root_omega[robot_name][:, :].zero_()
self._jnts_v[robot_name][:, :].zero_()
# update "previous" data for numerical differentiation
self._root_p_prev[robot_name][:, :] = self._root_p[robot_name][:, :]
self._root_q_prev[robot_name][:, :] = self._root_q[robot_name][:, :]
self._jnts_q_prev[robot_name][:, :] = self._jnts_q[robot_name][:, :]
def get_states(self,
env_indxs: torch.Tensor = None,
robot_names: List[str] = None):
if self.use_diff_velocities:
self._get_robots_state(dt = self.integration_dt(),
env_indxs = env_indxs,
robot_names = robot_names) # updates robot states
# but velocities are obtained via num. differentiation
else:
self._get_robots_state(env_indxs = env_indxs,
robot_names = robot_names) # velocities directly from simulator (can
# introduce relevant artifacts, making them unrealistic)
def _custom_post_init(self):
# can be overridden by child class
pass
def _set_robots_default_jnt_config(self):
# setting Isaac's internal defaults. Useful is resetting
# whole scenes or views, but single env reset has to be implemented
# manueally
# we use the homing of the robots
if (self._world_initialized):
for i in range(0, len(self.robot_names)):
robot_name = self.robot_names[i]
homing = self.homers[robot_name].get_homing()
self._robots_art_views[robot_name].set_joints_default_state(positions= homing,
velocities = torch.zeros((homing.shape[0], homing.shape[1]), \
dtype=self.torch_dtype, device=self.torch_device),
efforts = torch.zeros((homing.shape[0], homing.shape[1]), \
dtype=self.torch_dtype, device=self.torch_device))
else:
Journal.log(self.__class__.__name__,
"_set_robots_default_jnt_config",
"Before calling __set_robots_default_jnt_config(), you need to reset the World" + \
" at least once and call post_initialization_steps()",
LogType.EXCEP,
throw_when_excep = True)
def _set_robots_root_default_config(self):
if (self._world_initialized):
for i in range(0, len(self.robot_names)):
robot_name = self.robot_names[i]
self._robots_art_views[robot_name].set_default_state(positions = self._root_p_default[robot_name],
orientations = self._root_q_default[robot_name])
else:
Journal.log(self.__class__.__name__,
"_generate_urdf",
"Before calling _set_robots_root_default_config(), you need to reset the World" + \
" at least once and call post_initialization_steps()",
LogType.EXCEP,
throw_when_excep = True)
return True
def _get_solver_info(self):
for i in range(0, len(self.robot_names)):
robot_name = self.robot_names[i]
self._solver_position_iteration_counts[robot_name] = self._robots_art_views[robot_name].get_solver_position_iteration_counts()
self._solver_velocity_iteration_counts[robot_name] = self._robots_art_views[robot_name].get_solver_velocity_iteration_counts()
self._solver_stabilization_threshs[robot_name] = self._robots_art_views[robot_name].get_stabilization_thresholds()
def _update_art_solver_options(self):
# sets new solver iteration options for specifc articulations
self._get_solver_info() # gets current solver info for the articulations of the
# environments, so that dictionaries are filled properly
if (self._world_initialized):
for i in range(0, len(self.robot_names)):
robot_name = self.robot_names[i]
# increase by a factor
self._solver_position_iteration_counts[robot_name] = torch.full((self.num_envs,), self._solver_position_iteration_count)
self._solver_velocity_iteration_counts[robot_name] = torch.full((self.num_envs,), self._solver_velocity_iteration_count)
self._solver_stabilization_threshs[robot_name] = torch.full((self.num_envs,), self._solver_stabilization_thresh)
self._robots_art_views[robot_name].set_solver_position_iteration_counts(self._solver_position_iteration_counts[robot_name])
self._robots_art_views[robot_name].set_solver_velocity_iteration_counts(self._solver_velocity_iteration_counts[robot_name])
self._robots_art_views[robot_name].set_stabilization_thresholds(self._solver_stabilization_threshs[robot_name])
self._get_solver_info() # gets again solver info for articulation, so that it's possible to debug if
# the operation was successful
else:
Journal.log(self.__class__.__name__,
"_set_robots_default_jnt_config",
"Before calling update_art_solver_options(), you need to reset the World at least once!",
LogType.EXCEP,
throw_when_excep = True)
def _print_envs_info(self):
if (self._world_initialized):
print("TASK INFO:")
for i in range(0, len(self.robot_names)):
robot_name = self.robot_names[i]
task_info = f"[{robot_name}]" + "\n" + \
"bodies: " + str(self._robots_art_views[robot_name].body_names) + "\n" + \
"n. prims: " + str(self._robots_art_views[robot_name].count) + "\n" + \
"prims names: " + str(self._robots_art_views[robot_name].prim_paths) + "\n" + \
"n. bodies: " + str(self._robots_art_views[robot_name].num_bodies) + "\n" + \
"n. dofs: " + str(self._robots_art_views[robot_name].num_dof) + "\n" + \
"dof names: " + str(self._robots_art_views[robot_name].dof_names) + "\n" + \
"solver_position_iteration_counts: " + str(self._solver_position_iteration_counts[robot_name]) + "\n" + \
"solver_velocity_iteration_counts: " + str(self._solver_velocity_iteration_counts[robot_name]) + "\n" + \
"stabiliz. thresholds: " + str(self._solver_stabilization_threshs[robot_name])
# print("dof limits: " + str(self._robots_art_views[robot_name].get_dof_limits()))
# print("effort modes: " + str(self._robots_art_views[robot_name].get_effort_modes()))
# print("dof gains: " + str(self._robots_art_views[robot_name].get_gains()))
# print("dof max efforts: " + str(self._robots_art_views[robot_name].get_max_efforts()))
# print("dof gains: " + str(self._robots_art_views[robot_name].get_gains()))
# print("physics handle valid: " + str(self._robots_art_views[robot_name].is_physics_handle_valid())
Journal.log(self.__class__.__name__,
"_print_envs_info",
task_info,
LogType.STAT,
throw_when_excep = True)
else:
Journal.log(self.__class__.__name__,
"_set_robots_default_jnt_config",
"Before calling __print_envs_info(), you need to reset the World at least once!",
LogType.EXCEP,
throw_when_excep = True)
def _fill_robot_info_from_world(self):
if self._world_initialized:
for i in range(0, len(self.robot_names)):
robot_name = self.robot_names[i]
self.robot_bodynames[robot_name] = self._robots_art_views[robot_name].body_names
self.robot_n_links[robot_name] = self._robots_art_views[robot_name].num_bodies
self.robot_n_dofs[robot_name] = self._robots_art_views[robot_name].num_dof
self.robot_dof_names[robot_name] = self._robots_art_views[robot_name].dof_names
else:
Journal.log(self.__class__.__name__,
"_fill_robot_info_from_world",
"Before calling _fill_robot_info_from_world(), you need to reset the World at least once!",
LogType.EXCEP,
throw_when_excep = True)
def _init_homing_managers(self):
if self._world_initialized:
for i in range(0, len(self.robot_names)):
robot_name = self.robot_names[i]
self.homers[robot_name] = OmniRobotHomer(articulation=self._robots_art_views[robot_name],
srdf_path=self._srdf_paths[robot_name],
device=self.torch_device,
dtype=self.torch_dtype)
else:
exception = "you should reset the World at least once and call the " + \
"post_initialization_steps() method before initializing the " + \
"homing manager."
Journal.log(self.__class__.__name__,
"_init_homing_managers",
exception,
LogType.EXCEP,
throw_when_excep = True)
def _init_jnt_imp_control(self):
if self._world_initialized:
for i in range(0, len(self.robot_names)):
robot_name = self.robot_names[i]
# creates impedance controller
self.jnt_imp_controllers[robot_name] = OmniJntImpCntrl(articulation=self._robots_art_views[robot_name],
default_pgain = self.default_jnt_stiffness, # defaults
default_vgain = self.default_jnt_damping,
override_art_controller=self._override_art_controller,
filter_dt = None,
filter_BW = 50,
device= self.torch_device,
dtype=self.torch_dtype,
enable_safety=True,
enable_profiling=self._debug_enabled,
urdf_path=self._urdf_paths[robot_name],
debug_checks = self._debug_enabled)
self.reset_jnt_imp_control(robot_name)
else:
exception = "you should reset the World at least once and call the " + \
"post_initialization_steps() method before initializing the " + \
"joint impedance controller."
Journal.log(self.__class__.__name__,
"_init_homing_managers",
exception,
LogType.EXCEP,
throw_when_excep = True)
def _set_initial_camera_params(self,
camera_position=[10, 10, 3],
camera_target=[0, 0, 0]):
set_camera_view(eye=camera_position,
target=camera_target,
camera_prim_path="/OmniverseKit_Persp")
| 68,642 | Python | 47.995717 | 142 | 0.49324 |
AndrePatri/OmniRoboGym/omni_robo_gym/tests/test_lunar_lander_stable_bs3.py | # Copyright (C) 2023 Andrea Patrizi (AndrePatri, [email protected])
#
# This file is part of OmniRoboGym and distributed under the General Public License version 2 license.
#
# OmniRoboGym is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# OmniRoboGym is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OmniRoboGym. If not, see <http://www.gnu.org/licenses/>.
#
import gymnasium as gym
from stable_baselines3 import DQN
from stable_baselines3.common.evaluation import evaluate_policy
# Create environment
env = gym.make("LunarLander-v2", render_mode="rgb_array")
# Instantiate the agent
model = DQN("MlpPolicy", env, verbose=1)
# Train the agent and display a progress bar
model.learn(total_timesteps=int(2e5), progress_bar=True)
# Save the agent
model.save("dqn_lunar")
del model # delete trained model to demonstrate loading
# Load the trained agent
# NOTE: if you have loading issue, you can pass `print_system_info=True`
# to compare the system on which the model was trained vs the current one
# model = DQN.load("dqn_lunar", env=env, print_system_info=True)
model = DQN.load("dqn_lunar", env=env)
# Evaluate the agent
# NOTE: If you use wrappers with your environment that modify rewards,
# this will be reflected here. To evaluate with original rewards,
# wrap environment in a "Monitor" wrapper before other wrappers.
mean_reward, std_reward = evaluate_policy(model, model.get_env(), n_eval_episodes=10)
# Enjoy trained agent
vec_env = model.get_env()
obs = vec_env.reset()
n_pred_iterations = 100000
for i in range(n_pred_iterations):
action, _states = model.predict(obs, deterministic=True)
obs, rewards, dones, info = vec_env.step(action)
vec_env.render("human")
| 2,169 | Python | 38.454545 | 102 | 0.751498 |
AndrePatri/OmniRoboGym/omni_robo_gym/tests/create_terrain_demo.py | # Copyright (C) 2023 Andrea Patrizi (AndrePatri, [email protected])
#
# This file is part of OmniRoboGym and distributed under the General Public License version 2 license.
#
# OmniRoboGym is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# OmniRoboGym is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OmniRoboGym. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os, sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(SCRIPT_DIR)
import omni
from omni.isaac.kit import SimulationApp
import numpy as np
simulation_app = SimulationApp({"headless": False})
from omni.isaac.core.tasks import BaseTask
from omni.isaac.core import World
from omni.isaac.core.objects import DynamicSphere
from omni.isaac.core.utils.prims import define_prim
from omni.isaac.core.utils.stage import get_current_stage
from omni.isaac.core.materials import PreviewSurface
from omni.isaac.cloner import GridCloner
from pxr import UsdLux, UsdShade, Sdf
from omni_robo_gym.utils.terrain_utils import *
from omni_robo_gym.utils.terrains import RlTerrains
class TerrainsTest(BaseTask):
def __init__(self,
name) -> None:
BaseTask.__init__(self, name=name)
self._device = "cpu"
def set_up_scene(self,
scene) -> None:
self._stage = get_current_stage()
distantLight = UsdLux.DistantLight.Define(self._stage, Sdf.Path("/World/DistantLight"))
distantLight.CreateIntensityAttr(2000)
self.terrains = RlTerrains(self._stage)
self.terrains.get_obstacles_terrain(
terrain_size = 40.0,
num_obs = 200,
max_height = 0.5,
min_size = 0.5,
max_size = 5.0,)
super().set_up_scene(scene)
return
def post_reset(self):
a = 1
def get_observations(self):
pass
def calculate_metrics(self) -> None:
pass
def is_done(self) -> None:
pass
if __name__ == "__main__":
world = World(
stage_units_in_meters=1.0,
rendering_dt=1.0/60.0,
backend="torch",
device="cpu",
)
terrain_creation_task = TerrainsTest(name="CustomTerrain",
)
world.add_task(terrain_creation_task)
world.reset()
while simulation_app.is_running():
if world.is_playing():
if world.current_time_step_index == 0:
world.reset(soft=True)
world.step(render=True)
else:
world.step(render=True)
simulation_app.close() | 4,763 | Python | 33.773722 | 102 | 0.672475 |
AndrePatri/OmniRoboGym/omni_robo_gym/utils/contact_sensor.py | import torch
import numpy as np
from omni.isaac.sensor import ContactSensor
from typing import List, Dict
from omni.isaac.core.world import World
from omni.isaac.core.prims import RigidPrimView, RigidContactView
from SharsorIPCpp.PySharsorIPC import LogType
from SharsorIPCpp.PySharsorIPC import Journal
class OmniContactSensors:
def __init__(self,
name: str, # robot name for which contact sensors are to be created
n_envs: int, # number of environments
contact_prims: Dict[str, List] = None,
contact_offsets: Dict[str, Dict[str, np.ndarray]] = None,
sensor_radii: Dict[str, Dict[str, np.ndarray]] = None,
device = "cuda",
dtype = torch.float64,
enable_debug: bool = False,
filter_paths: List[str] = ["/World/terrain/GroundPlane/CollisionPlane"]):
# contact sensors abstraction for a single robot
# over multiple environments
self._filter_paths = filter_paths
self._enable_debug = enable_debug
self.n_envs = n_envs
self.device = device
if self.device == "cuda":
self.using_gpu = True
else:
self.using_gpu = False
self.dtype = dtype
self.name = name
self.contact_radius_default = 0.003
# parses contact dictionaries and checks for issues
self._parse_contact_dicts(self.name,
contact_prims,
contact_offsets,
sensor_radii)
self.n_sensors = len(self.contact_prims)
self.in_contact = torch.full((n_envs, self.n_sensors),
False,
device = self.device,
dtype=torch.bool)
self.force_norm = torch.full((n_envs, self.n_sensors),
-1.0,
device = self.device,
dtype=self.dtype)
self.n_contacts = torch.full((n_envs, self.n_sensors),
0,
device = self.device,
dtype=torch.int)
self.contact_sensors = [[None] * self.n_sensors] * n_envs # outer: environment,
# inner: contact sensor, ordered as in contact_prims
self.contact_geom_prim_views = [None] * self.n_sensors
# self.contact_views = [None] * self.n_sensors
def _parse_contact_dicts(self,
name: str,
contact_prims: Dict[str, List],
contact_offsets: Dict[str, Dict[str, np.ndarray]],
sensor_radii: Dict[str, Dict[str, np.ndarray]]):
try:
self.contact_prims = contact_prims[name]
except:
Journal.log(self.__class__.__name__,
"_parse_contact_dicts",
f"Could not find key {name} in contact_prims dictionary.",
LogType.EXCEP,
throw_when_excep = True)
try:
self.contact_offsets = contact_offsets[name]
except:
Journal.log(self.__class__.__name__,
"_parse_contact_dicts",
f"Could not find key {name} in contact_offsets dictionary.",
LogType.EXCEP,
throw_when_excep = True)
try:
self.sensor_radii = sensor_radii[name]
except:
Journal.log(self.__class__.__name__,
"_parse_contact_dicts",
f"Could not find key {name} in sensor_radii dictionary.",
LogType.EXCEP,
throw_when_excep = True)
contact_offsets_ok = all(item in self.contact_offsets for item in self.contact_prims)
sensor_radii_ok = all(item in self.sensor_radii for item in self.contact_prims)
if not contact_offsets_ok:
warning = f"Provided contact_offsets dictionary does not posses all the necessary keys. " + \
f"It should contain all of [{' '.join(self.contact_prims)}]. \n" + \
f"Resetting all offsets to zero..."
Journal.log(self.__class__.__name__,
"_parse_contact_dicts",
warning,
LogType.WARN,
throw_when_excep = True)
for i in range(0, len(self.contact_prims)):
self.contact_offsets[self.contact_prims[i]] = np.array([0.0, 0.0, 0.0])
if not sensor_radii_ok:
warning = f"Provided sensor_radii dictionary does not posses all the necessary keys. " + \
f"It should contain all of [{' '.join(self.contact_prims)}]. \n" + \
f"Resetting all radii to {self.contact_radius_default} ..."
Journal.log(self.__class__.__name__,
"_parse_contact_dicts",
warning,
LogType.WARN,
throw_when_excep = True)
for i in range(0, len(self.contact_prims)):
self.sensor_radii[self.contact_prims[i]] = self.contact_radius_default
def create_contact_sensors(self,
world: World,
envs_namespace: str):
robot_name = self.name
contact_link_names = self.contact_prims
for sensor_idx in range(0, self.n_sensors):
# we create views of the contact links for all envs
if self.contact_geom_prim_views[sensor_idx] is None:
self.contact_geom_prim_views[sensor_idx] = RigidPrimView(prim_paths_expr=envs_namespace + "/env_.*/" + robot_name + \
"/" + contact_link_names[sensor_idx],
name= self.name + "RigidPrimView" + contact_link_names[sensor_idx],
contact_filter_prim_paths_expr= self._filter_paths,
prepare_contact_sensors=True,
track_contact_forces = True,
disable_stablization = False,
reset_xform_properties=False,
max_contact_count = self.n_envs
)
world.scene.add(self.contact_geom_prim_views[sensor_idx])
# for env_idx in range(0, self.n_envs):
# # env_idx = 0 # create contact sensors for base env only
# for sensor_idx in range(0, self.n_sensors):
# contact_link_prim_path = envs_namespace + f"/env_{env_idx}" + \
# "/" + robot_name + \
# "/" + contact_link_names[sensor_idx]
# sensor_prim_path = contact_link_prim_path + \
# "/contact_sensor" # contact sensor prim path
# print(f"[{self.__class__.__name__}]" + f"[{self.journal.status}]" + ": creating contact sensor at " +
# f"{sensor_prim_path}...")
# contact_sensor = ContactSensor(
# prim_path=sensor_prim_path,
# name=f"{robot_name}{env_idx}_{contact_link_names[sensor_idx]}_contact_sensor",
# min_threshold=0,
# max_threshold=10000000,
# radius=self.sensor_radii[contact_link_names[sensor_idx]],
# translation=self.contact_offsets[contact_link_names[sensor_idx]],
# position=None
# )
# self.contact_sensors[env_idx][sensor_idx] = world.scene.add(contact_sensor)
# self.contact_sensors[env_idx][sensor_idx].add_raw_contact_data_to_frame()
# print(f"[{self.__class__.__name__}]" + f"[{self.journal.status}]" + ": contact sensor at " +
# f"{sensor_prim_path} created.")
def get(self,
dt: float,
contact_link: str,
env_indxs: torch.Tensor = None,
clone = False):
index = -1
try:
index = self.contact_prims.index(contact_link)
except:
exception = f"[{self.__class__.__name__}]" + f"[{self.journal.exception}]" + \
f"could not find contact link {contact_link} in contact list {' '.join(self.contact_prims)}."
Journal.log(self.__class__.__name__,
"get",
exception,
LogType.EXCEP,
throw_when_excep = True)
if env_indxs is None:
return self.contact_geom_prim_views[index].get_net_contact_forces(clone = clone,
dt = dt).view(self.n_envs, 3)
else:
if self._enable_debug:
if env_indxs is not None:
if not isinstance(env_indxs, torch.Tensor):
msg = "Provided env_indxs should be a torch tensor of indexes!"
Journal.log(self.__class__.__name__,
"get",
msg,
LogType.EXCEP,
throw_when_excep = True)
if not len(env_indxs.shape) == 1:
msg = "Provided robot_indxs should be a 1D torch tensor!"
Journal.log(self.__class__.__name__,
"get",
msg,
LogType.EXCEP,
throw_when_excep = True)
if self.using_gpu:
if not env_indxs.device.type == "cuda":
error = "Provided env_indxs should be on GPU!"
Journal.log(self.__class__.__name__,
"_step_jnt_imp_control",
error,
LogType.EXCEP,
True)
else:
if not env_indxs.device.type == "cpu":
error = "Provided env_indxs should be on CPU!"
Journal.log(self.__class__.__name__,
"_step_jnt_imp_control",
error,
LogType.EXCEP,
True)
return self.contact_geom_prim_views[index].get_net_contact_forces(clone = clone,
dt = dt).view(self.n_envs, 3)[env_indxs, :] | 10,792 | Python | 43.415638 | 133 | 0.47424 |
AndrePatri/OmniRoboGym/omni_robo_gym/utils/math_utils.py | import torch
import time
import torch.nn.functional as F
def normalize_quaternion(q):
# Normalizes the quaternion
return q / torch.norm(q, dim=-1, keepdim=True)
def quaternion_difference(q1, q2):
""" Compute the quaternion difference needed to rotate from q1 to q2 """
def quat_conjugate(q):
# Computes the conjugate of a quaternion
w, x, y, z = q.unbind(-1)
return torch.stack([w, -x, -y, -z], dim=-1)
q1_conj = quat_conjugate(q1)
return quaternion_multiply(q2, q1_conj)
def quaternion_multiply(q1, q2):
""" Multiply two quaternions. """
w1, x1, y1, z1 = q1.unbind(-1)
w2, x2, y2, z2 = q2.unbind(-1)
return torch.stack([
w1*w2 - x1*x2 - y1*y2 - z1*z2,
w1*x2 + x1*w2 + y1*z2 - z1*y2,
w1*y2 - x1*z2 + y1*w2 + z1*x2,
w1*z2 + x1*y2 - y1*x2 + z1*w2
], dim=-1)
def quaternion_to_angular_velocity(q_diff, dt):
""" Convert a quaternion difference to an angular velocity vector. """
angle = 2 * torch.arccos(q_diff[..., 0].clamp(-1.0, 1.0)) # Clamping for numerical stability
axis = q_diff[..., 1:]
norm = axis.norm(dim=-1, keepdim=True)
norm = torch.where(norm > 0, norm, torch.ones_like(norm))
axis = axis / norm
angle = angle.unsqueeze(-1) # Add an extra dimension for broadcasting
return (angle / dt) * axis
def quat_to_omega(q0, q1, dt):
""" Convert quaternion pairs to angular velocities """
if q0.shape != q1.shape:
raise ValueError("Tensor shapes do not match in quat_to_omega.")
# Normalize quaternions and compute differences
q0_normalized = normalize_quaternion(q0)
q1_normalized = normalize_quaternion(q1)
q_diff = quaternion_difference(q0_normalized, q1_normalized)
return quaternion_to_angular_velocity(q_diff, dt)
def rel_vel(offset_q0_q1,
v0):
# Calculate relative linear velocity in frame q1 from linear velocity in frame q0 using quaternions.
# Ensure the quaternion is normalized
offset_q0_q1 = F.normalize(offset_q0_q1, p=2, dim=0)
# Convert the linear velocity vector to a quaternion
v0_q = torch.cat([torch.tensor([0]), v0])
# Rotate the linear velocity quaternion using the orientation offset quaternion
rotated_velocity_quaternion = quaternion_multiply(offset_q0_q1, v0_q)
offset_q0_q1_inverse = torch.cat([offset_q0_q1[0:1], -offset_q0_q1[1:]])
# Multiply by the conjugate of the orientation offset quaternion to obtain the result in frame f1
v1_q = quaternion_multiply(rotated_velocity_quaternion, offset_q0_q1_inverse)
# Extract the linear velocity vector from the quaternion result
v1 = v1_q[1:]
return v1
# Example usage
n_envs = 100 # Number of environments
dt = 0.1 # Time step
# Random example tensors for initial and final orientations
q_initial = torch.randn(n_envs, 4)
q_final = torch.randn(n_envs, 4)
start_time = time.perf_counter()
# Convert to angular velocities
omega = quat_to_omega(q_initial, q_final, dt)
end_time = time.perf_counter()
elapsed_time = end_time - start_time
print(f"Time taken to compute angular velocities: {elapsed_time:.6f} seconds")
| 3,149 | Python | 32.870967 | 104 | 0.668466 |
AndrePatri/OmniRoboGym/omni_robo_gym/utils/terrain_utils.py | # Copyright (C) 2023 Andrea Patrizi (AndrePatri, [email protected])
#
# This file is part of OmniRoboGym and distributed under the General Public License version 2 license.
#
# OmniRoboGym is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# OmniRoboGym is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OmniRoboGym. If not, see <http://www.gnu.org/licenses/>.
#
import numpy as np
from numpy.random import choice
from scipy import interpolate
from math import sqrt
from omni.isaac.core.prims import XFormPrim
from pxr import UsdPhysics, Sdf, Gf, PhysxSchema
def random_uniform_terrain(terrain, min_height, max_height, step=1, downsampled_scale=None,):
"""
Generate a uniform noise terrain
Parameters
terrain (SubTerrain): the terrain
min_height (float): the minimum height of the terrain [meters]
max_height (float): the maximum height of the terrain [meters]
step (float): minimum height change between two points [meters]
downsampled_scale (float): distance between two randomly sampled points ( musty be larger or equal to terrain.horizontal_scale)
"""
if downsampled_scale is None:
downsampled_scale = terrain.horizontal_scale
# switch parameters to discrete units
min_height = int(min_height / terrain.vertical_scale)
max_height = int(max_height / terrain.vertical_scale)
step = int(step / terrain.vertical_scale)
heights_range = np.arange(min_height, max_height + step, step)
height_field_downsampled = np.random.choice(heights_range, (int(terrain.width * terrain.horizontal_scale / downsampled_scale), int(
terrain.length * terrain.horizontal_scale / downsampled_scale)))
x = np.linspace(0, terrain.width * terrain.horizontal_scale, height_field_downsampled.shape[0])
y = np.linspace(0, terrain.length * terrain.horizontal_scale, height_field_downsampled.shape[1])
f = interpolate.interp2d(y, x, height_field_downsampled, kind='linear')
x_upsampled = np.linspace(0, terrain.width * terrain.horizontal_scale, terrain.width)
y_upsampled = np.linspace(0, terrain.length * terrain.horizontal_scale, terrain.length)
z_upsampled = np.rint(f(y_upsampled, x_upsampled))
terrain.height_field_raw += z_upsampled.astype(np.int16)
return terrain
def sloped_terrain(terrain, slope=1):
"""
Generate a sloped terrain
Parameters:
terrain (SubTerrain): the terrain
slope (int): positive or negative slope
Returns:
terrain (SubTerrain): update terrain
"""
x = np.arange(0, terrain.width)
y = np.arange(0, terrain.length)
xx, yy = np.meshgrid(x, y, sparse=True)
xx = xx.reshape(terrain.width, 1)
max_height = int(slope * (terrain.horizontal_scale / terrain.vertical_scale) * terrain.width)
terrain.height_field_raw[:, np.arange(terrain.length)] += (max_height * xx / terrain.width).astype(terrain.height_field_raw.dtype)
return terrain
def pyramid_sloped_terrain(terrain, slope=1, platform_size=1.):
"""
Generate a sloped terrain
Parameters:
terrain (terrain): the terrain
slope (int): positive or negative slope
platform_size (float): size of the flat platform at the center of the terrain [meters]
Returns:
terrain (SubTerrain): update terrain
"""
x = np.arange(0, terrain.width)
y = np.arange(0, terrain.length)
center_x = int(terrain.width / 2)
center_y = int(terrain.length / 2)
xx, yy = np.meshgrid(x, y, sparse=True)
xx = (center_x - np.abs(center_x-xx)) / center_x
yy = (center_y - np.abs(center_y-yy)) / center_y
xx = xx.reshape(terrain.width, 1)
yy = yy.reshape(1, terrain.length)
max_height = int(slope * (terrain.horizontal_scale / terrain.vertical_scale) * (terrain.width / 2))
terrain.height_field_raw += (max_height * xx * yy).astype(terrain.height_field_raw.dtype)
platform_size = int(platform_size / terrain.horizontal_scale / 2)
x1 = terrain.width // 2 - platform_size
x2 = terrain.width // 2 + platform_size
y1 = terrain.length // 2 - platform_size
y2 = terrain.length // 2 + platform_size
min_h = min(terrain.height_field_raw[x1, y1], 0)
max_h = max(terrain.height_field_raw[x1, y1], 0)
terrain.height_field_raw = np.clip(terrain.height_field_raw, min_h, max_h)
return terrain
def discrete_obstacles_terrain(terrain, max_height, min_size, max_size, num_rects, platform_size=1.):
"""
Generate a terrain with gaps
Parameters:
terrain (terrain): the terrain
max_height (float): maximum height of the obstacles (range=[-max, -max/2, max/2, max]) [meters]
min_size (float): minimum size of a rectangle obstacle [meters]
max_size (float): maximum size of a rectangle obstacle [meters]
num_rects (int): number of randomly generated obstacles
platform_size (float): size of the flat platform at the center of the terrain [meters]
Returns:
terrain (SubTerrain): update terrain
"""
# switch parameters to discrete units
max_height = int(max_height / terrain.vertical_scale)
min_size = int(min_size / terrain.horizontal_scale)
max_size = int(max_size / terrain.horizontal_scale)
platform_size = int(platform_size / terrain.horizontal_scale)
(i, j) = terrain.height_field_raw.shape
height_range = [-max_height, -max_height // 2, max_height // 2, max_height]
width_range = range(min_size, max_size, 4)
length_range = range(min_size, max_size, 4)
for _ in range(num_rects):
width = np.random.choice(width_range)
length = np.random.choice(length_range)
start_i = np.random.choice(range(0, i-width, 4))
start_j = np.random.choice(range(0, j-length, 4))
terrain.height_field_raw[start_i:start_i+width, start_j:start_j+length] = np.random.choice(height_range)
x1 = (terrain.width - platform_size) // 2
x2 = (terrain.width + platform_size) // 2
y1 = (terrain.length - platform_size) // 2
y2 = (terrain.length + platform_size) // 2
terrain.height_field_raw[x1:x2, y1:y2] = 0
return terrain
def wave_terrain(terrain, num_waves=1, amplitude=1.):
"""
Generate a wavy terrain
Parameters:
terrain (terrain): the terrain
num_waves (int): number of sine waves across the terrain length
Returns:
terrain (SubTerrain): update terrain
"""
amplitude = int(0.5*amplitude / terrain.vertical_scale)
if num_waves > 0:
div = terrain.length / (num_waves * np.pi * 2)
x = np.arange(0, terrain.width)
y = np.arange(0, terrain.length)
xx, yy = np.meshgrid(x, y, sparse=True)
xx = xx.reshape(terrain.width, 1)
yy = yy.reshape(1, terrain.length)
terrain.height_field_raw += (amplitude*np.cos(yy / div) + amplitude*np.sin(xx / div)).astype(
terrain.height_field_raw.dtype)
return terrain
def stairs_terrain(terrain, step_width, step_height):
"""
Generate a stairs
Parameters:
terrain (terrain): the terrain
step_width (float): the width of the step [meters]
step_height (float): the height of the step [meters]
Returns:
terrain (SubTerrain): update terrain
"""
# switch parameters to discrete units
step_width = int(step_width / terrain.horizontal_scale)
step_height = int(step_height / terrain.vertical_scale)
num_steps = terrain.width // step_width
height = step_height
for i in range(num_steps):
terrain.height_field_raw[i * step_width: (i + 1) * step_width, :] += height
height += step_height
return terrain
def pyramid_stairs_terrain(terrain, step_width, step_height, platform_size=1.):
"""
Generate stairs
Parameters:
terrain (terrain): the terrain
step_width (float): the width of the step [meters]
step_height (float): the step_height [meters]
platform_size (float): size of the flat platform at the center of the terrain [meters]
Returns:
terrain (SubTerrain): update terrain
"""
# switch parameters to discrete units
step_width = int(step_width / terrain.horizontal_scale)
step_height = int(step_height / terrain.vertical_scale)
platform_size = int(platform_size / terrain.horizontal_scale)
height = 0
start_x = 0
stop_x = terrain.width
start_y = 0
stop_y = terrain.length
while (stop_x - start_x) > platform_size and (stop_y - start_y) > platform_size:
start_x += step_width
stop_x -= step_width
start_y += step_width
stop_y -= step_width
height += step_height
terrain.height_field_raw[start_x: stop_x, start_y: stop_y] = height
return terrain
def stepping_stones_terrain(terrain, stone_size, stone_distance, max_height, platform_size=1., depth=-10):
"""
Generate a stepping stones terrain
Parameters:
terrain (terrain): the terrain
stone_size (float): horizontal size of the stepping stones [meters]
stone_distance (float): distance between stones (i.e size of the holes) [meters]
max_height (float): maximum height of the stones (positive and negative) [meters]
platform_size (float): size of the flat platform at the center of the terrain [meters]
depth (float): depth of the holes (default=-10.) [meters]
Returns:
terrain (SubTerrain): update terrain
"""
# switch parameters to discrete units
stone_size = int(stone_size / terrain.horizontal_scale)
stone_distance = int(stone_distance / terrain.horizontal_scale)
max_height = int(max_height / terrain.vertical_scale)
platform_size = int(platform_size / terrain.horizontal_scale)
height_range = np.arange(-max_height-1, max_height, step=1)
start_x = 0
start_y = 0
terrain.height_field_raw[:, :] = int(depth / terrain.vertical_scale)
if terrain.length >= terrain.width:
while start_y < terrain.length:
stop_y = min(terrain.length, start_y + stone_size)
start_x = np.random.randint(0, stone_size)
# fill first hole
stop_x = max(0, start_x - stone_distance)
terrain.height_field_raw[0: stop_x, start_y: stop_y] = np.random.choice(height_range)
# fill row
while start_x < terrain.width:
stop_x = min(terrain.width, start_x + stone_size)
terrain.height_field_raw[start_x: stop_x, start_y: stop_y] = np.random.choice(height_range)
start_x += stone_size + stone_distance
start_y += stone_size + stone_distance
elif terrain.width > terrain.length:
while start_x < terrain.width:
stop_x = min(terrain.width, start_x + stone_size)
start_y = np.random.randint(0, stone_size)
# fill first hole
stop_y = max(0, start_y - stone_distance)
terrain.height_field_raw[start_x: stop_x, 0: stop_y] = np.random.choice(height_range)
# fill column
while start_y < terrain.length:
stop_y = min(terrain.length, start_y + stone_size)
terrain.height_field_raw[start_x: stop_x, start_y: stop_y] = np.random.choice(height_range)
start_y += stone_size + stone_distance
start_x += stone_size + stone_distance
x1 = (terrain.width - platform_size) // 2
x2 = (terrain.width + platform_size) // 2
y1 = (terrain.length - platform_size) // 2
y2 = (terrain.length + platform_size) // 2
terrain.height_field_raw[x1:x2, y1:y2] = 0
return terrain
def convert_heightfield_to_trimesh(height_field_raw, horizontal_scale, vertical_scale, slope_threshold=None):
hf = height_field_raw
num_rows = hf.shape[0]
num_cols = hf.shape[1]
y = np.linspace(0, (num_cols-1)*horizontal_scale, num_cols)
x = np.linspace(0, (num_rows-1)*horizontal_scale, num_rows)
yy, xx = np.meshgrid(y, x)
if slope_threshold is not None:
slope_threshold *= horizontal_scale / vertical_scale
move_x = np.zeros((num_rows, num_cols))
move_y = np.zeros((num_rows, num_cols))
move_corners = np.zeros((num_rows, num_cols))
move_x[:num_rows-1, :] += (hf[1:num_rows, :] - hf[:num_rows-1, :] > slope_threshold)
move_x[1:num_rows, :] -= (hf[:num_rows-1, :] - hf[1:num_rows, :] > slope_threshold)
move_y[:, :num_cols-1] += (hf[:, 1:num_cols] - hf[:, :num_cols-1] > slope_threshold)
move_y[:, 1:num_cols] -= (hf[:, :num_cols-1] - hf[:, 1:num_cols] > slope_threshold)
move_corners[:num_rows-1, :num_cols-1] += (hf[1:num_rows, 1:num_cols] - hf[:num_rows-1, :num_cols-1] > slope_threshold)
move_corners[1:num_rows, 1:num_cols] -= (hf[:num_rows-1, :num_cols-1] - hf[1:num_rows, 1:num_cols] > slope_threshold)
xx += (move_x + move_corners*(move_x == 0)) * horizontal_scale
yy += (move_y + move_corners*(move_y == 0)) * horizontal_scale
# create triangle mesh vertices and triangles from the heightfield grid
vertices = np.zeros((num_rows*num_cols, 3), dtype=np.float32)
vertices[:, 0] = xx.flatten()
vertices[:, 1] = yy.flatten()
vertices[:, 2] = hf.flatten() * vertical_scale
triangles = -np.ones((2*(num_rows-1)*(num_cols-1), 3), dtype=np.uint32)
for i in range(num_rows - 1):
ind0 = np.arange(0, num_cols-1) + i*num_cols
ind1 = ind0 + 1
ind2 = ind0 + num_cols
ind3 = ind2 + 1
start = 2*i*(num_cols-1)
stop = start + 2*(num_cols-1)
triangles[start:stop:2, 0] = ind0
triangles[start:stop:2, 1] = ind3
triangles[start:stop:2, 2] = ind1
triangles[start+1:stop:2, 0] = ind0
triangles[start+1:stop:2, 1] = ind2
triangles[start+1:stop:2, 2] = ind3
return vertices, triangles
def add_terrain_to_stage(stage, vertices, triangles, position=None, orientation=None):
num_faces = triangles.shape[0]
terrain_mesh = stage.DefinePrim("/World/terrain",
"Mesh")
terrain_mesh.GetAttribute("points").Set(vertices)
terrain_mesh.GetAttribute("faceVertexIndices").Set(triangles.flatten())
terrain_mesh.GetAttribute("faceVertexCounts").Set(np.asarray([3]*num_faces))
terrain = XFormPrim(prim_path="/World/terrain",
name="terrain",
position=position,
orientation=orientation)
UsdPhysics.CollisionAPI.Apply(terrain.prim)
# collision_api = UsdPhysics.MeshCollisionAPI.Apply(terrain.prim)
# collision_api.CreateApproximationAttr().Set("meshSimplification")
physx_collision_api = PhysxSchema.PhysxCollisionAPI.Apply(terrain.prim)
physx_collision_api.GetContactOffsetAttr().Set(0.02)
physx_collision_api.GetRestOffsetAttr().Set(0.00)
class SubTerrain:
def __init__(self, terrain_name="terrain", width=256, length=256, vertical_scale=1.0, horizontal_scale=1.0):
self.terrain_name = terrain_name
self.vertical_scale = vertical_scale
self.horizontal_scale = horizontal_scale
self.width = width
self.length = length
self.height_field_raw = np.zeros((self.width, self.length), dtype=np.int16)
| 15,700 | Python | 42.016438 | 135 | 0.648535 |
AndrePatri/OmniRoboGym/omni_robo_gym/utils/rt_factor.py | import time
class RtFactor():
def __init__(self,
dt_nom: float,
window_size: int):
self._it_counter = 0
self._dt_nom = dt_nom
self._start_time = time.perf_counter()
self._current_rt_factor = 0.0
self._window_size = window_size
self._real_time = 0
self._nom_time = 0
def update(self):
self._real_time = time.perf_counter() - self._start_time
self._it_counter += 1
self._nom_time += self._dt_nom
self._current_rt_factor = self._nom_time / self._real_time
def reset_due(self):
return (self._it_counter+1) % self._window_size == 0
def get_avrg_step_time(self):
return self._real_time / self._window_size
def get_dt_nom(self):
return self._dt_nom
def get_nom_time(self):
return self._now_time
def get(self):
return self._current_rt_factor
def reset(self):
self._it_counter = 0
self._nom_time = 0
self._start_time = time.perf_counter() | 1,096 | Python | 17.913793 | 66 | 0.530109 |
AndrePatri/OmniRoboGym/omni_robo_gym/utils/urdf_helpers.py | import xml.etree.ElementTree as ET
class UrdfLimitsParser:
def __init__(self, urdf_path, joint_names,
backend = "numpy",
device = "cpu"):
self.urdf_path = urdf_path
self.joint_names = joint_names
self.limits_matrix = None
self.backend = backend
self.device = device
if self.backend == "numpy" and \
self.device != "cpu":
raise Exception("When using numpy backend, only cpu device is supported!")
self.parse_urdf()
def parse_urdf(self):
tree = ET.parse(self.urdf_path)
root = tree.getroot()
num_joints = len(self.joint_names)
self.limits_matrix = None
self.inf = None
if self.backend == "numpy":
import numpy as np
self.limits_matrix = np.full((num_joints, 6), np.nan)
self.inf = np.inf
elif self.backend == "torch":
import torch
self.limits_matrix = torch.full((num_joints, 6), torch.nan, device=self.device)
self.inf = torch.inf
else:
raise Exception("Backend not supported")
for joint_name in self.joint_names:
joint_element = root.find(".//joint[@name='{}']".format(joint_name))
if joint_element is not None:
limit_element = joint_element.find('limit')
jnt_index = self.joint_names.index(joint_name)
# position limits
q_lower = float(limit_element.get('lower', - self.inf))
q_upper = float(limit_element.get('upper', self.inf))
# effort limits
effort_limit = float(limit_element.get('effort', self.inf))
# vel limits
velocity_limit = float(limit_element.get('velocity', self.inf))
self.limits_matrix[jnt_index, 0] = q_lower
self.limits_matrix[jnt_index, 3] = q_upper
self.limits_matrix[jnt_index, 1] = - abs(velocity_limit)
self.limits_matrix[jnt_index, 4] = abs(velocity_limit)
self.limits_matrix[jnt_index, 2] = - abs(effort_limit)
self.limits_matrix[jnt_index, 5] = abs(effort_limit)
def get_limits_matrix(self):
return self.limits_matrix
| 2,425 | Python | 28.228915 | 91 | 0.524536 |
AndrePatri/OmniRoboGym/omni_robo_gym/utils/homing.py | # Copyright (C) 2023 Andrea Patrizi (AndrePatri, [email protected])
#
# This file is part of OmniRoboGym and distributed under the General Public License version 2 license.
#
# OmniRoboGym is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# OmniRoboGym is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OmniRoboGym. If not, see <http://www.gnu.org/licenses/>.
#
from omni.isaac.core.articulations.articulation_view import ArticulationView
import torch
import xml.etree.ElementTree as ET
from SharsorIPCpp.PySharsorIPC import LogType
from SharsorIPCpp.PySharsorIPC import Journal
class OmniRobotHomer:
def __init__(self,
articulation: ArticulationView,
srdf_path: str,
backend = "torch",
device: torch.device = torch.device("cpu"),
dtype = torch.float64):
self.torch_dtype = dtype
if not articulation.initialized:
exception = f"the provided articulation is not initialized properly!"
Journal.log(self.__class__.__name__,
"__init__",
exception,
LogType.EXCEP,
throw_when_excep = True)
self._articulation = articulation
self.srdf_path = srdf_path
self._device = device
self.num_robots = self._articulation.count
self.n_dofs = self._articulation.num_dof
self.jnts_names = self._articulation.dof_names
self.joint_idx_map = {}
for joint in range(0, self.n_dofs):
self.joint_idx_map[self.jnts_names[joint]] = joint
if (backend != "torch"):
print(f"[{self.__class__.__name__}]" + f"[{self.journal.info}]" + ": forcing torch backend. Other backends are not yet supported.")
self._backend = "torch"
self._homing = torch.full((self.num_robots, self.n_dofs),
0.0,
device = self._device,
dtype=self.torch_dtype) # homing configuration
# open srdf and parse the homing field
with open(srdf_path, 'r') as file:
self._srdf_content = file.read()
try:
self._srdf_root = ET.fromstring(self._srdf_content)
# Now 'root' holds the root element of the XML tree.
# You can navigate through the XML tree to extract the tags and their values.
# Example: To find all elements with a specific tag, you can use:
# elements = root.findall('.//your_tag_name')
# Example: If you know the specific structure of your .SRDF file, you can extract
# the data accordingly, for instance:
# for child in root:
# if child.tag == 'some_tag_name':
# tag_value = child.text
# # Do something with the tag value.
# elif child.tag == 'another_tag_name':
# # Handle another tag.
except ET.ParseError as e:
print(f"[{self.__class__.__name__}]" + f"[{self.journal.warning}]" + ": could not read SRDF properly!!")
# Find all the 'joint' elements within 'group_state' with the name attribute and their values
joints = self._srdf_root.findall(".//group_state[@name='home']/joint")
self._homing_map = {}
for joint in joints:
joint_name = joint.attrib['name']
joint_value = joint.attrib['value']
self._homing_map[joint_name] = float(joint_value)
self._assign2homing()
def _assign2homing(self):
for joint in list(self._homing_map.keys()):
if joint in self.joint_idx_map:
self._homing[:, self.joint_idx_map[joint]] = torch.full((self.num_robots, 1),
self._homing_map[joint],
device = self._device,
dtype=self.torch_dtype).flatten()
else:
print(f"[{self.__class__.__name__}]" + f"[{self.journal.warning}]" + f"[{self._assign2homing.__name__}]" \
+ ": joint " + f"{joint}" + " is not present in the articulation. It will be ignored.")
def get_homing(self,
clone: bool = False):
if not clone:
return self._homing
else:
return self._homing.clone()
| 5,070 | Python | 36.286764 | 144 | 0.554438 |
AndrePatri/OmniRoboGym/omni_robo_gym/utils/jnt_imp_cntrl.py | # Copyright (C) 2023 Andrea Patrizi (AndrePatri, [email protected])
#
# This file is part of OmniRoboGym and distributed under the General Public License version 2 license.
#
# OmniRoboGym is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# OmniRoboGym is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OmniRoboGym. If not, see <http://www.gnu.org/licenses/>.
#
import torch
from typing import List
from enum import Enum
from omni.isaac.core.articulations.articulation_view import ArticulationView
from omni_robo_gym.utils.urdf_helpers import UrdfLimitsParser
import time
from SharsorIPCpp.PySharsorIPC import LogType
from SharsorIPCpp.PySharsorIPC import Journal
class FirstOrderFilter:
# a class implementing a simple first order filter
def __init__(self,
dt: float,
filter_BW: float = 0.1,
rows: int = 1,
cols: int = 1,
device: torch.device = torch.device("cpu"),
dtype = torch.double):
self._torch_dtype = dtype
self._torch_device = device
self._dt = dt
self._rows = rows
self._cols = cols
self._filter_BW = filter_BW
import math
self._gain = 2 * math.pi * self._filter_BW
self.yk = torch.zeros((self._rows, self._cols), device = self._torch_device,
dtype=self._torch_dtype)
self.ykm1 = torch.zeros((self._rows, self._cols), device = self._torch_device,
dtype=self._torch_dtype)
self.refk = torch.zeros((self._rows, self._cols), device = self._torch_device,
dtype=self._torch_dtype)
self.refkm1 = torch.zeros((self._rows, self._cols), device = self._torch_device,
dtype=self._torch_dtype)
self._kh2 = self._gain * self._dt / 2.0
self._coeff_ref = self._kh2 * 1/ (1 + self._kh2)
self._coeff_km1 = (1 - self._kh2) / (1 + self._kh2)
def update(self,
refk: torch.Tensor = None):
if refk is not None:
self.refk[:, :] = refk
self.yk[:, :] = torch.add(torch.mul(self.ykm1, self._coeff_km1),
torch.mul(torch.add(self.refk, self.refkm1),
self._coeff_ref))
self.refkm1[:, :] = self.refk
self.ykm1[:, :] = self.yk
def reset(self,
idxs: torch.Tensor = None):
if idxs is not None:
self.yk[:, :] = torch.zeros((self._rows, self._cols),
device = self._torch_device,
dtype=self._torch_dtype)
self.ykm1[:, :] = torch.zeros((self._rows, self._cols),
device = self._torch_device,
dtype=self._torch_dtype)
self.refk[:, :] = torch.zeros((self._rows, self._cols),
device = self._torch_device,
dtype=self._torch_dtype)
self.refkm1[:, :] = torch.zeros((self._rows, self._cols),
device = self._torch_device,
dtype=self._torch_dtype)
else:
self.yk[idxs, :] = torch.zeros((idxs.shape[0], self._cols),
device = self._torch_device,
dtype=self._torch_dtype)
self.ykm1[idxs, :] = torch.zeros((idxs.shape[0], self._cols),
device = self._torch_device,
dtype=self._torch_dtype)
self.refk[idxs, :] = torch.zeros((idxs.shape[0], self._cols),
device = self._torch_device,
dtype=self._torch_dtype)
self.refkm1[idxs, :] = torch.zeros((idxs.shape[0], self._cols),
device = self._torch_device,
dtype=self._torch_dtype)
def get(self):
return self.yk
class JntSafety:
def __init__(self,
urdf_parser: UrdfLimitsParser):
self.limits_parser = urdf_parser
self.limit_matrix = self.limits_parser.get_limits_matrix()
def apply(self, q_cmd=None, v_cmd=None, eff_cmd=None):
if q_cmd is not None:
self.saturate_tensor(q_cmd, position=True)
if v_cmd is not None:
self.saturate_tensor(v_cmd, velocity=True)
if eff_cmd is not None:
self.saturate_tensor(eff_cmd, effort=True)
def has_nan(self,
tensor):
return torch.any(torch.isnan(tensor))
def saturate_tensor(self, tensor, position=False, velocity=False, effort=False):
if self.has_nan(tensor):
exception = f"Found nan elements in provided tensor!!"
Journal.log(self.__class__.__name__,
"saturate_tensor",
exception,
LogType.EXCEP,
throw_when_excep = False)
# Replace NaN values with infinity, so that we can clamp it
tensor[:, :] = torch.nan_to_num(tensor, nan=torch.inf)
if position:
tensor[:, :] = torch.clamp(tensor[:, :], min=self.limit_matrix[:, 0], max=self.limit_matrix[:, 3])
elif velocity:
tensor[:, :] = torch.clamp(tensor[:, :], min=self.limit_matrix[:, 1], max=self.limit_matrix[:, 4])
elif effort:
tensor[:, :] = torch.clamp(tensor[:, :], min=self.limit_matrix[:, 2], max=self.limit_matrix[:, 5])
class OmniJntImpCntrl:
class IndxState(Enum):
NONE = -1
VALID = 1
INVALID = 0
def __init__(self,
articulation: ArticulationView,
default_pgain = 300.0,
default_vgain = 30.0,
backend = "torch",
device: torch.device = torch.device("cpu"),
filter_BW = 50.0, # [Hz]
filter_dt = None, # should correspond to the dt between samples
override_art_controller = False,
init_on_creation = False,
dtype = torch.double,
enable_safety = True,
urdf_path: str = None,
enable_profiling: bool = False,
debug_checks: bool = False): # [s]
self._torch_dtype = dtype
self._torch_device = device
self.enable_profiling = enable_profiling
self._debug_checks = debug_checks
# debug data
self.profiling_data = {}
self.profiling_data["time_to_update_state"] = -1.0
self.profiling_data["time_to_set_refs"] = -1.0
self.profiling_data["time_to_apply_cmds"] = -1.0
self.start_time = None
if self.enable_profiling:
self.start_time = time.perf_counter()
self.enable_safety = enable_safety
self.limiter = None
self.robot_limits = None
self.urdf_path = urdf_path
self.override_art_controller = override_art_controller # whether to override Isaac's internal joint
# articulation PD controller or not
self.init_art_on_creation = init_on_creation # init. articulation's gains and refs as soon as the controller
# is created
self.gains_initialized = False
self.refs_initialized = False
self._default_pgain = default_pgain
self._default_vgain = default_vgain
self._filter_BW = filter_BW
self._filter_dt = filter_dt
self._articulation_view = articulation # used to actually apply control
# signals to the robot
if not self._articulation_view.initialized:
exception = f"the provided articulation_view is not initialized properly!"
Journal.log(self.__class__.__name__,
"__init__",
exception,
LogType.EXCEP,
throw_when_excep = True)
self._valid_signal_types = ["pos_ref", "vel_ref", "eff_ref", # references
"pos", "vel", "eff", # measurements (necessary if overriding Isaac's art. controller)
"pgain", "vgain"]
self.num_robots = self._articulation_view.count
self.n_dofs = self._articulation_view.num_dof
self.jnts_names = self._articulation_view.dof_names
if (backend != "torch"):
warning = f"Only supported backend is torch!!!"
Journal.log(self.__class__.__name__,
"__init__",
warning,
LogType.WARN,
throw_when_excep = True)
self._backend = "torch"
if self.enable_safety:
if self.urdf_path is None:
exception = "If enable_safety is set to True, a urdf_path should be provided too!"
Journal.log(self.__class__.__name__,
"__init__",
exception,
LogType.EXCEP,
throw_when_excep = True)
self.robot_limits = UrdfLimitsParser(urdf_path=self.urdf_path,
joint_names=self.jnts_names,
backend=self._backend,
device=self._torch_device)
self.limiter = JntSafety(urdf_parser=self.robot_limits)
self._pos_err = None
self._vel_err = None
self._pos = None
self._vel = None
self._eff = None
self._imp_eff = None
self._filter_available = False
if filter_dt is not None:
self._filter_BW = filter_BW
self._filter_dt = filter_dt
self._pos_ref_filter = FirstOrderFilter(dt=self._filter_dt,
filter_BW=self._filter_BW,
rows=self.num_robots,
cols=self.n_dofs,
device=self._torch_device,
dtype=self._torch_dtype)
self._vel_ref_filter = FirstOrderFilter(dt=self._filter_dt,
filter_BW=self._filter_BW,
rows=self.num_robots,
cols=self.n_dofs,
device=self._torch_device,
dtype=self._torch_dtype)
self._eff_ref_filter = FirstOrderFilter(dt=self._filter_dt,
filter_BW=self._filter_BW,
rows=self.num_robots,
cols=self.n_dofs,
device=self._torch_device,
dtype=self._torch_dtype)
self._filter_available = True
else:
warning = f"No filter dt provided -> reference filter will not be used!"
Journal.log(self.__class__.__name__,
"__init__",
warning,
LogType.WARN,
throw_when_excep = True)
self.reset() # initialize data
def update_state(self,
pos: torch.Tensor = None,
vel: torch.Tensor = None,
eff: torch.Tensor = None,
robot_indxs: torch.Tensor = None,
jnt_indxs: torch.Tensor = None):
if self.enable_profiling:
self.start_time = time.perf_counter()
selector = self._gen_selector(robot_indxs=robot_indxs,
jnt_indxs=jnt_indxs) # only checks and throws
# if debug_checks
if pos is not None:
self._validate_signal(signal = pos,
selector = selector,
name="pos") # does nothing if not debug_checks
self._pos[selector] = pos
if vel is not None:
self._validate_signal(signal = vel,
selector = selector,
name="vel")
self._vel[selector] = vel
if eff is not None:
self._validate_signal(signal = eff,
selector = selector,
name="eff")
self._eff[selector] = eff
if self.enable_profiling:
self.profiling_data["time_to_update_state"] = \
time.perf_counter() - self.start_time
def set_gains(self,
pos_gains: torch.Tensor = None,
vel_gains: torch.Tensor = None,
robot_indxs: torch.Tensor = None,
jnt_indxs: torch.Tensor = None):
selector = self._gen_selector(robot_indxs=robot_indxs,
jnt_indxs=jnt_indxs) # only checks and throws
# if debug_checks
if pos_gains is not None:
self._validate_signal(signal = pos_gains,
selector = selector,
name="pos_gains")
self._pos_gains[selector] = pos_gains
if not self.override_art_controller:
self._articulation_view.set_gains(kps = self._pos_gains)
if vel_gains is not None:
self._validate_signal(signal = vel_gains,
selector = selector,
name="vel_gains")
self._vel_gains[selector] = vel_gains
if not self.override_art_controller:
self._articulation_view.set_gains(kds = self._vel_gains)
def set_refs(self,
eff_ref: torch.Tensor = None,
pos_ref: torch.Tensor = None,
vel_ref: torch.Tensor = None,
robot_indxs: torch.Tensor = None,
jnt_indxs: torch.Tensor = None):
if self.enable_profiling:
self.start_time = time.perf_counter()
selector = self._gen_selector(robot_indxs=robot_indxs,
jnt_indxs=jnt_indxs) # only checks and throws
# if debug_checks
if eff_ref is not None:
self._validate_signal(signal = eff_ref,
selector = selector,
name="eff_ref")
self._eff_ref[selector] = eff_ref
if pos_ref is not None:
self._validate_signal(signal = pos_ref,
selector = selector,
name="pos_ref")
self._pos_ref[selector] = pos_ref
if vel_ref is not None:
self._validate_signal(signal = vel_ref,
selector = selector,
name="vel_ref")
self._vel_ref[selector] = vel_ref
if self.enable_profiling:
self.profiling_data["time_to_set_refs"] = time.perf_counter() - self.start_time
def apply_cmds(self,
filter = False):
# initialize gains and refs if not done previously
if self.enable_profiling:
self.start_time = time.perf_counter()
if not self.gains_initialized:
self._apply_init_gains_to_art()
if not self.refs_initialized:
self._apply_init_refs_to_art()
if filter and self._filter_available:
self._pos_ref_filter.update(self._pos_ref)
self._vel_ref_filter.update(self._vel_ref)
self._eff_ref_filter.update(self._eff_ref)
# we first filter, then apply safety
eff_ref_filt = self._eff_ref_filter.get()
pos_ref_filt = self._pos_ref_filter.get()
vel_ref_filt = self._vel_ref_filter.get()
if self.limiter is not None:
# saturating ref cmds
self.limiter.apply(q_cmd=pos_ref_filt,
v_cmd=vel_ref_filt,
eff_cmd=eff_ref_filt)
if not self.override_art_controller:
# using omniverse's articulation PD controller
self._articulation_view.set_joint_efforts(eff_ref_filt)
self._articulation_view.set_joint_position_targets(pos_ref_filt)
self._articulation_view.set_joint_velocity_targets(vel_ref_filt)
else:
# impedance torque computed explicitly
self._pos_err = torch.sub(self._pos_ref_filter.get(), self._pos)
self._vel_err = torch.sub(self._vel_ref_filter.get(), self._vel)
self._imp_eff = torch.add(self._eff_ref_filter.get(),
torch.add(
torch.mul(self._pos_gains,
self._pos_err),
torch.mul(self._vel_gains,
self._vel_err)))
# torch.cuda.synchronize()
# we also make the resulting imp eff safe
if self.limiter is not None:
self.limiter.apply(eff_cmd=eff_ref_filt)
# apply only effort (comprehensive of all imp. terms)
self._articulation_view.set_joint_efforts(self._imp_eff)
else:
# we first apply safety to reference joint cmds
if self.limiter is not None:
self.limiter.apply(q_cmd=self._pos_ref,
v_cmd=self._vel_ref,
eff_cmd=self._eff_ref)
if not self.override_art_controller:
# using omniverse's articulation PD controller
self._articulation_view.set_joint_efforts(self._eff_ref)
self._articulation_view.set_joint_position_targets(self._pos_ref)
self._articulation_view.set_joint_velocity_targets(self._vel_ref)
else:
# impedance torque computed explicitly
self._pos_err = torch.sub(self._pos_ref, self._pos)
self._vel_err = torch.sub(self._vel_ref, self._vel)
self._imp_eff = torch.add(self._eff_ref,
torch.add(
torch.mul(self._pos_gains,
self._pos_err),
torch.mul(self._vel_gains,
self._vel_err)))
# torch.cuda.synchronize()
# we also make the resulting imp eff safe
if self.limiter is not None:
self.limiter.apply(eff_cmd=self._imp_eff)
# apply only effort (comprehensive of all imp. terms)
self._articulation_view.set_joint_efforts(self._imp_eff)
if self.enable_profiling:
self.profiling_data["time_to_apply_cmds"] = \
time.perf_counter() - self.start_time
def get_jnt_names_matching(self,
name_pattern: str):
return [jnt for jnt in self.jnts_names if name_pattern in jnt]
def get_jnt_idxs_matching(self,
name_pattern: str):
jnts_names = self.get_jnt_names_matching(name_pattern)
jnt_idxs = [self.jnts_names.index(jnt) for jnt in jnts_names]
if not len(jnt_idxs) == 0:
return torch.tensor(jnt_idxs,
dtype=torch.int64,
device=self._torch_device)
else:
return None
def pos_gains(self):
return self._pos_gains
def vel_gains(self):
return self._vel_gains
def eff_ref(self):
return self._eff_ref
def pos_ref(self):
return self._pos_ref
def vel_ref(self):
return self._vel_ref
def pos_err(self):
return self._pos_err
def vel_err(self):
return self._vel_err
def pos(self):
return self._pos
def vel(self):
return self._vel
def eff(self):
return self._eff
def imp_eff(self):
return self._imp_eff
def reset(self,
robot_indxs: torch.Tensor = None):
self.gains_initialized = False
self.refs_initialized = False
self._all_dofs_idxs = torch.tensor([i for i in range(0, self.n_dofs)],
dtype=torch.int64,
device=self._torch_device)
self._all_robots_idxs = torch.tensor([i for i in range(0, self.num_robots)],
dtype=torch.int64,
device=self._torch_device)
if robot_indxs is None: # reset all data
# we assume diagonal joint impedance gain matrices, so we can save on memory and only store the diagonal
self._pos_gains = torch.full((self.num_robots, self.n_dofs),
self._default_pgain,
device = self._torch_device,
dtype=self._torch_dtype)
self._vel_gains = torch.full((self.num_robots, self.n_dofs),
self._default_vgain,
device = self._torch_device,
dtype=self._torch_dtype)
self._eff_ref = torch.zeros((self.num_robots, self.n_dofs), device = self._torch_device,
dtype=self._torch_dtype)
self._pos_ref = torch.zeros((self.num_robots, self.n_dofs), device = self._torch_device,
dtype=self._torch_dtype)
self._vel_ref = torch.zeros((self.num_robots, self.n_dofs), device = self._torch_device,
dtype=self._torch_dtype)
self._pos_err = torch.zeros((self.num_robots, self.n_dofs), device = self._torch_device,
dtype=self._torch_dtype)
self._vel_err = torch.zeros((self.num_robots, self.n_dofs), device = self._torch_device,
dtype=self._torch_dtype)
self._pos = torch.zeros((self.num_robots, self.n_dofs), device = self._torch_device,
dtype=self._torch_dtype)
self._vel = torch.zeros((self.num_robots, self.n_dofs), device = self._torch_device,
dtype=self._torch_dtype)
self._eff = torch.zeros((self.num_robots, self.n_dofs), device = self._torch_device,
dtype=self._torch_dtype)
self._imp_eff = torch.zeros((self.num_robots, self.n_dofs), device = self._torch_device,
dtype=self._torch_dtype)
if self._filter_available:
self._pos_ref_filter.reset()
self._vel_ref_filter.reset()
self._eff_ref_filter.reset()
else: # only reset some robots
if self._debug_checks:
self._validate_selectors(robot_indxs=robot_indxs) # throws if checks not satisfied
n_envs = robot_indxs.shape[0]
# we assume diagonal joint impedance gain matrices, so we can save on memory and only store the diagonal
self._pos_gains[robot_indxs, :] = torch.full((n_envs, self.n_dofs),
self._default_pgain,
device = self._torch_device,
dtype=self._torch_dtype)
self._vel_gains[robot_indxs, :] = torch.full((n_envs, self.n_dofs),
self._default_vgain,
device = self._torch_device,
dtype=self._torch_dtype)
self._eff_ref[robot_indxs, :] = 0
self._pos_ref[robot_indxs, :] = 0
self._vel_ref[robot_indxs, :] = 0
# if self.override_art_controller:
# saving memory (these are not necessary if not overriding Isaac's art. controller)
self._pos_err[robot_indxs, :] = torch.zeros((n_envs, self.n_dofs), device = self._torch_device,
dtype=self._torch_dtype)
self._vel_err[robot_indxs, :] = torch.zeros((n_envs, self.n_dofs), device = self._torch_device,
dtype=self._torch_dtype)
self._pos[robot_indxs, :] = torch.zeros((n_envs, self.n_dofs), device = self._torch_device,
dtype=self._torch_dtype)
self._vel[robot_indxs, :] = torch.zeros((n_envs, self.n_dofs), device = self._torch_device,
dtype=self._torch_dtype)
self._eff[robot_indxs, :] = torch.zeros((n_envs, self.n_dofs), device = self._torch_device,
dtype=self._torch_dtype)
self._imp_eff[robot_indxs, :] = torch.zeros((n_envs, self.n_dofs), device = self._torch_device,
dtype=self._torch_dtype)
if self._filter_available:
self._pos_ref_filter.reset(idxs = robot_indxs)
self._vel_ref_filter.reset(idxs = robot_indxs)
self._eff_ref_filter.reset(idxs = robot_indxs)
if self.init_art_on_creation:
# will use updated gains/refs based on reset (non updated gains/refs will be the same)
self._apply_init_gains_to_art()
self._apply_init_refs_to_art()
def _apply_init_gains_to_art(self):
if not self.gains_initialized:
if not self.override_art_controller:
self._articulation_view.set_gains(kps = self._pos_gains,
kds = self._vel_gains)
else:
# settings Isaac's PD controller gains to 0
no_gains = torch.zeros((self.num_robots, self.n_dofs), device = self._torch_device,
dtype=self._torch_dtype)
self._articulation_view.set_gains(kps = no_gains,
kds = no_gains)
self.gains_initialized = True
def _apply_init_refs_to_art(self):
if not self.refs_initialized:
if not self.override_art_controller:
self._articulation_view.set_joint_efforts(self._eff_ref)
self._articulation_view.set_joint_position_targets(self._pos_ref)
self._articulation_view.set_joint_velocity_targets(self._vel_ref)
else:
self._articulation_view.set_joint_efforts(self._eff_ref)
self.refs_initialized = True
def _validate_selectors(self,
robot_indxs: torch.Tensor = None,
jnt_indxs: torch.Tensor = None):
if robot_indxs is not None:
robot_indxs_shape = robot_indxs.shape
if (not (len(robot_indxs_shape) == 1 and \
robot_indxs.dtype == torch.int64 and \
bool(torch.min(robot_indxs) >= 0) and \
bool(torch.max(robot_indxs) < self.num_robots)) and \
robot_indxs.device.type == self._torch_device.type): # sanity checks
error = "Mismatch in provided selector \n" + \
"robot_indxs_shape -> " + f"{len(robot_indxs_shape)}" + " VS" + " expected -> " + f"{1}" + "\n" + \
"robot_indxs.dtype -> " + f"{robot_indxs.dtype}" + " VS" + " expected -> " + f"{torch.int64}" + "\n" + \
"torch.min(robot_indxs) >= 0) -> " + f"{bool(torch.min(robot_indxs) >= 0)}" + " VS" + f" {True}" + "\n" + \
"torch.max(robot_indxs) < self.n_dofs -> " + f"{torch.max(robot_indxs)}" + " VS" + f" {self.num_robots}\n" + \
"robot_indxs.device -> " + f"{robot_indxs.device.type}" + " VS" + " expected -> " + f"{self._torch_device.type}" + "\n"
Journal.log(self.__class__.__name__,
"_validate_selectors",
error,
LogType.EXCEP,
throw_when_excep = True)
if jnt_indxs is not None:
jnt_indxs_shape = jnt_indxs.shape
if (not (len(jnt_indxs_shape) == 1 and \
jnt_indxs.dtype == torch.int64 and \
bool(torch.min(jnt_indxs) >= 0) and \
bool(torch.max(jnt_indxs) < self.n_dofs)) and \
jnt_indxs.device.type == self._torch_device.type): # sanity checks
error = "Mismatch in provided selector \n" + \
"jnt_indxs_shape -> " + f"{len(jnt_indxs_shape)}" + " VS" + " expected -> " + f"{1}" + "\n" + \
"jnt_indxs.dtype -> " + f"{jnt_indxs.dtype}" + " VS" + " expected -> " + f"{torch.int64}" + "\n" + \
"torch.min(jnt_indxs) >= 0) -> " + f"{bool(torch.min(jnt_indxs) >= 0)}" + " VS" + f" {True}" + "\n" + \
"torch.max(jnt_indxs) < self.n_dofs -> " + f"{torch.max(jnt_indxs)}" + " VS" + f" {self.num_robots}" + \
"robot_indxs.device -> " + f"{jnt_indxs.device.type}" + " VS" + " expected -> " + f"{self._torch_device.type}" + "\n"
Journal.log(self.__class__.__name__,
"_validate_selectors",
error,
LogType.EXCEP,
throw_when_excep = True)
def _validate_signal(self,
signal: torch.Tensor,
selector: torch.Tensor = None,
name: str = "signal"):
if self._debug_checks:
signal_shape = signal.shape
selector_shape = selector[0].shape
if not (signal_shape[0] == selector_shape[0] and \
signal_shape[1] == selector_shape[1] and \
signal.device.type == self._torch_device.type and \
signal.dtype == self._torch_dtype):
big_error = f"Mismatch in provided signal [{name}" + "] and/or selector \n" + \
"signal rows -> " + f"{signal_shape[0]}" + " VS" + " expected rows -> " + f"{selector_shape[0]}" + "\n" + \
"signal cols -> " + f"{signal_shape[1]}" + " VS" + " expected cols -> " + f"{selector_shape[1]}" + "\n" + \
"signal dtype -> " + f"{signal.dtype}" + " VS" + " expected -> " + f"{self._torch_dtype}" + "\n" + \
"signal device -> " + f"{signal.device.type}" + " VS" + " expected type -> " + f"{self._torch_device.type}"
Journal.log(self.__class__.__name__,
"_validate_signal",
big_error,
LogType.EXCEP,
throw_when_excep = True)
def _gen_selector(self,
robot_indxs: torch.Tensor = None,
jnt_indxs: torch.Tensor = None):
if self._debug_checks:
self._validate_selectors(robot_indxs=robot_indxs,
jnt_indxs=jnt_indxs) # throws if not valid
if robot_indxs is None:
robot_indxs = self._all_robots_idxs
if jnt_indxs is None:
jnt_indxs = self._all_dofs_idxs
return torch.meshgrid((robot_indxs, jnt_indxs),
indexing="ij")
| 32,884 | Python | 40.157697 | 139 | 0.485282 |
AndrePatri/OmniRoboGym/omni_robo_gym/utils/terrains.py | # Copyright (C) 2023 Andrea Patrizi (AndrePatri, [email protected])
#
# This file is part of OmniRoboGym and distributed under the General Public License version 2 license.
#
# OmniRoboGym is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# OmniRoboGym is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OmniRoboGym. If not, see <http://www.gnu.org/licenses/>.
#
import os, sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(SCRIPT_DIR)
import numpy as np
from omni_robo_gym.utils.terrain_utils import *
from pxr import Usd
class RlTerrains():
def __init__(self,
stage: Usd.Stage):
self._stage = stage
def get_wave_terrain(self,
terrain_size = 40,
num_waves = 10,
amplitude = 1,
position = np.array([0.0, 0.0, 0.0])):
# creates a terrain
num_terrains = 1
terrain_width = terrain_size
terrain_length = terrain_size
horizontal_scale = 0.25 # [m]
vertical_scale = 0.005 # [m]
num_rows = int(terrain_width/horizontal_scale)
num_cols = int(terrain_length/horizontal_scale)
heightfield = np.zeros((num_terrains * num_rows,
num_cols), dtype=np.int16)
def new_sub_terrain():
return SubTerrain(width=num_rows,
length=num_cols,
vertical_scale=vertical_scale,
horizontal_scale=horizontal_scale)
heightfield[0:num_rows, :] = wave_terrain(new_sub_terrain(), num_waves=num_waves,
amplitude=amplitude).height_field_raw
vertices, triangles = convert_heightfield_to_trimesh(heightfield,
horizontal_scale=horizontal_scale,
vertical_scale=vertical_scale,
slope_threshold=1.5)
position = np.array([-terrain_width/2.0, terrain_length/2.0, 0]) + position
orientation = np.array([0.70711, 0.0, 0.0, -0.70711])
add_terrain_to_stage(stage=self._stage,
vertices=vertices,
triangles=triangles,
position=position,
orientation=orientation)
def get_sloped_terrain(self,
terrain_size = 40,
slope = -0.5,
position = np.array([0.0, 0.0, 0.0])):
# creates a terrain
num_terrains = 1
terrain_width = terrain_size
terrain_length = terrain_size
horizontal_scale = 0.25 # [m]
vertical_scale = 0.005 # [m]
num_rows = int(terrain_width/horizontal_scale)
num_cols = int(terrain_length/horizontal_scale)
heightfield = np.zeros((num_terrains * num_rows,
num_cols), dtype=np.int16)
def new_sub_terrain():
return SubTerrain(width=num_rows,
length=num_cols,
vertical_scale=vertical_scale,
horizontal_scale=horizontal_scale)
heightfield[0:num_rows, :] = pyramid_sloped_terrain(new_sub_terrain(),
slope=slope).height_field_raw
vertices, triangles = convert_heightfield_to_trimesh(heightfield,
horizontal_scale=horizontal_scale,
vertical_scale=vertical_scale,
slope_threshold=1.5)
position = np.array([-terrain_width/2.0, terrain_length/2.0, 0]) + position
orientation = np.array([0.70711, 0.0, 0.0, -0.70711])
add_terrain_to_stage(stage=self._stage,
vertices=vertices,
triangles=triangles,
position=position,
orientation=orientation)
def get_stairs_terrain(self,
terrain_size = 40,
step_width = 0.75,
step_height = -0.5,
position = np.array([0.0, 0.0, 0.0])):
# creates a terrain
num_terrains = 1
terrain_width = terrain_size
terrain_length = terrain_size
horizontal_scale = 0.25 # [m]
vertical_scale = 0.005 # [m]
num_rows = int(terrain_width/horizontal_scale)
num_cols = int(terrain_length/horizontal_scale)
heightfield = np.zeros((num_terrains * num_rows,
num_cols), dtype=np.int16)
def new_sub_terrain():
return SubTerrain(width=num_rows,
length=num_cols,
vertical_scale=vertical_scale,
horizontal_scale=horizontal_scale)
heightfield[0:num_rows, :] = stairs_terrain(new_sub_terrain(), step_width=step_width,
step_height=step_height).height_field_raw
vertices, triangles = convert_heightfield_to_trimesh(heightfield,
horizontal_scale=horizontal_scale,
vertical_scale=vertical_scale,
slope_threshold=1.5)
position = np.array([-terrain_width/2.0, terrain_length/2.0, 0]) + position
orientation = np.array([0.70711, 0.0, 0.0, -0.70711])
add_terrain_to_stage(stage=self._stage,
vertices=vertices,
triangles=triangles,
position=position,
orientation=orientation)
def get_random_terrain(self,
terrain_size = 40,
min_height = -0.2,
max_height = 0.2,
step = 0.2,
downsampled_scale=0.5,
position = np.array([0.0, 0.0, 0.0])):
# creates a terrain
num_terrains = 1
terrain_width = terrain_size
terrain_length = terrain_size
horizontal_scale = 0.25 # [m]
vertical_scale = 0.005 # [m]
num_rows = int(terrain_width/horizontal_scale)
num_cols = int(terrain_length/horizontal_scale)
heightfield = np.zeros((num_terrains * num_rows,
num_cols), dtype=np.int16)
def new_sub_terrain():
return SubTerrain(width=num_rows,
length=num_cols,
vertical_scale=vertical_scale,
horizontal_scale=horizontal_scale)
heightfield[0:num_rows, :] = random_uniform_terrain(new_sub_terrain(),
min_height=min_height, max_height=max_height,
step=step,
downsampled_scale=downsampled_scale).height_field_raw
vertices, triangles = convert_heightfield_to_trimesh(heightfield,
horizontal_scale=horizontal_scale,
vertical_scale=vertical_scale,
slope_threshold=1.5)
position = np.array([-terrain_width/2.0, terrain_length/2.0, 0]) + position
orientation = np.array([0.70711, 0.0, 0.0, -0.70711])
add_terrain_to_stage(stage=self._stage,
vertices=vertices,
triangles=triangles,
position=position,
orientation=orientation)
def get_obstacles_terrain(self,
terrain_size = 40.0,
num_obs = 50,
max_height = 0.5,
min_size = 0.5,
max_size = 5.0,
position = np.array([0.0, 0.0, 0.0])):
# create all available terrain types
num_terains = 1
terrain_width = terrain_size
terrain_length = terrain_size
horizontal_scale = 0.25 # [m]
vertical_scale = 0.005 # [m]
num_rows = int(terrain_width/horizontal_scale)
num_cols = int(terrain_length/horizontal_scale)
heightfield = np.zeros((num_terains*num_rows, num_cols), dtype=np.int16)
def new_sub_terrain():
return SubTerrain(width=num_rows, length=num_cols, vertical_scale=vertical_scale, horizontal_scale=horizontal_scale)
heightfield[0:num_rows, :] = discrete_obstacles_terrain(new_sub_terrain(),
max_height=max_height,
min_size=min_size,
max_size=max_size,
num_rects=num_obs).height_field_raw
vertices, triangles = convert_heightfield_to_trimesh(heightfield, horizontal_scale=horizontal_scale, vertical_scale=vertical_scale, slope_threshold=1.5)
position = np.array([-terrain_width/2.0, terrain_length/2.0, 0]) + position
orientation = np.array([0.70711, 0.0, 0.0, -0.70711])
add_terrain_to_stage(stage=self._stage, vertices=vertices, triangles=triangles, position=position, orientation=orientation)
def post_reset(self):
a = 1
def get_observations(self):
pass
def calculate_metrics(self) -> None:
pass
def is_done(self) -> None:
pass
| 9,922 | Python | 36.730038 | 160 | 0.528926 |
AndrePatri/OmniRoboGym/docs/isaac2023.1.0_issues.md | ### Some bugs of Isaac2023.1.0 which can be easily fixed
#### 1.0 Nucleus blocking function makes startup super slow
Easy temporary fix: modify /home/username/.local/share/ov/pkg/isaac_sim-2023.1.0/exts/omni.isaac.core/omni/isaac/core/utils/nucleus.py .
Change lines 178 to 198 which is the check server function to below:
```python
def check_server(server: str, path: str, timeout: float = 10.0) -> bool:
"""Check a specific server for a path
Args:
server (str): Name of Nucleus server
path (str): Path to search
Returns:
bool: True if folder is found
"""
carb.log_info("Checking path: {}{}".format(server, path))
# Increase hang detection timeout
if "localhost" not in server:
omni.client.set_hang_detection_time_ms(10000)
result, _ = omni.client.stat("{}{}".format(server, path))
if result == Result.OK:
carb.log_info("Success: {}{}".format(server, path))
return True
carb.log_info("Failure: {}{} not accessible".format(server, path))
return False
```
#### 2.0 Grid Cloner bug
See `docs/grid_cloner_bugfix.py` for more details
#### 3.0 Contact sensor bug
When cloning environments, it's not possible to create contact sensors on the cloned environments because of a failed collision_API enabled flag option. Removing the check seems to recolve the problem without any major or noticeable issues.
| 1,413 | Markdown | 39.399999 | 240 | 0.683652 |
AndrePatri/OmniRoboGym/docs/grid_cloner_bugfix/grid_cloner.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from typing import List, Union
import numpy as np
import omni.usd
import torch
from omni.isaac.cloner import Cloner
from pxr import Gf, UsdGeom
class GridCloner(Cloner):
""" This is a specialized Cloner class that will automatically generate clones in a grid fashion. """
def __init__(self, spacing: float, num_per_row: int = -1):
"""
Args:
spacing (float): Spacing between clones.
num_per_row (int): Number of clones to place in a row. Defaults to sqrt(num_clones).
"""
self._spacing = spacing
self._num_per_row = num_per_row
Cloner.__init__(self)
def clone(
self,
source_prim_path: str,
prim_paths: List[str],
position_offsets: np.ndarray = None,
orientation_offsets: np.ndarray = None,
replicate_physics: bool = False,
base_env_path: str = None,
root_path: str = None,
copy_from_source: bool = False
):
""" Creates clones in a grid fashion. Positions of clones are computed automatically.
Args:
source_prim_path (str): Path of source object.
prim_paths (List[str]): List of destination paths.
position_offsets (np.ndarray): Positions to be applied as local translations on top of computed clone position.
Defaults to None, no offset will be applied.
orientation_offsets (np.ndarray): Orientations to be applied as local rotations for each clone.
Defaults to None, no offset will be applied.
replicate_physics (bool): Uses omni.physics replication. This will replicate physics properties directly for paths beginning with root_path and skip physics parsing for anything under the base_env_path.
base_env_path (str): Path to namespace for all environments. Required if replicate_physics=True and define_base_env() not called.
root_path (str): Prefix path for each environment. Required if replicate_physics=True and generate_paths() not called.
copy_from_source: (bool): Setting this to False will inherit all clones from the source prim; any changes made to the source prim will be reflected in the clones.
Setting this to True will make copies of the source prim when creating new clones; changes to the source prim will not be reflected in clones. Defaults to False. Note that setting this to True will take longer to execute.
Returns:
positions (List): Computed positions of all clones.
"""
num_clones = len(prim_paths)
self._num_per_row = int(np.sqrt(num_clones)) if self._num_per_row == -1 else self._num_per_row
num_rows = np.ceil(num_clones / self._num_per_row)
num_cols = np.ceil(num_clones / num_rows)
row_offset = 0.5 * self._spacing * (num_rows - 1)
col_offset = 0.5 * self._spacing * (num_cols - 1)
stage = omni.usd.get_context().get_stage()
positions = []
orientations = []
for i in range(num_clones):
# compute transform
row = i // num_cols
col = i % num_cols
x = row_offset - row * self._spacing
y = col * self._spacing - col_offset
up_axis = UsdGeom.GetStageUpAxis(stage)
position = [x, y, 0] if up_axis == UsdGeom.Tokens.z else [x, 0, y]
orientation = Gf.Quatd.GetIdentity()
if position_offsets is not None:
translation = position_offsets[i] + position
else:
translation = position
if orientation_offsets is not None:
orientation = (
Gf.Quatd(orientation_offsets[i][0].item(), Gf.Vec3d(orientation_offsets[i][1:].tolist()))
* orientation
)
else:
orientation = [
orientation.GetReal(),
orientation.GetImaginary()[0],
orientation.GetImaginary()[1],
orientation.GetImaginary()[2],
]
positions.append(translation)
orientations.append(orientation)
super().clone(
source_prim_path=source_prim_path,
prim_paths=prim_paths,
positions=positions,
orientations=orientations,
replicate_physics=replicate_physics,
base_env_path=base_env_path,
root_path=root_path,
copy_from_source=copy_from_source,
)
return positions
| 5,073 | Python | 40.590164 | 246 | 0.606742 |
AndrePatri/OmniRoboGym/docs/contact_sensor_bugfix/contact_sensor.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import argparse
import sys
import carb
import numpy as np
from omni.isaac.core import World
from omni.isaac.core.articulations import Articulation
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage
from omni.isaac.sensor import ContactSensor
from omni.isaac.cloner import GridCloner
import omni.isaac.core.utils.prims as prim_utils
parser = argparse.ArgumentParser()
parser.add_argument("--test", default=False, action="store_true", help="Run in test mode")
args, unknown = parser.parse_known_args()
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
simulation_app.close()
sys.exit()
my_world = World(stage_units_in_meters=1.0)
my_world.scene.add_default_ground_plane()
asset_path = assets_root_path + "/Isaac/Robots/Ant/ant.usd"
add_reference_to_stage(usd_path=asset_path, prim_path="/World/envs/env_0/Ant")
ant = my_world.scene.add(Articulation(prim_path="/World/envs/env_0/Ant/torso", name="ant", translation=np.array([0, 0, 1.5])))
ant_foot_prim_names = ["right_back_foot", "left_back_foot", "front_right_foot", "front_left_foot"]
translations = np.array(
[[0.38202, -0.40354, -0.0887], [-0.4, -0.40354, -0.0887], [-0.4, 0.4, -0.0887], [0.4, 0.4, -0.0887]]
)
# moving def prim
# move_prim(robot_prim_path_default, # from
# robot_base_prim_path) # to
num_envs = 3
env_ns = "/World/envs"
env_spacing = 15 # [m]
template_env_ns = env_ns + "/env_0"
cloner = GridCloner(spacing=env_spacing)
cloner.define_base_env(env_ns)
envs_prim_paths = cloner.generate_paths(env_ns + "/env",
num_envs)
cloner.clone(
source_prim_path=template_env_ns,
prim_paths=envs_prim_paths,
replicate_physics=True,
position_offsets = None
)
ant_sensors = []
for i in range(4):
ant_sensors.append(
my_world.scene.add(
ContactSensor(
prim_path="/World/envs/env_0/Ant/" + ant_foot_prim_names[i] + "/contact_sensor",
name="ant_contact_sensor_{}".format(i),
min_threshold=0,
max_threshold=10000000,
radius=0.1,
translation=translations[i],
)
)
)
ant_sensors[0].add_raw_contact_data_to_frame()
ant_sensors2 = []
for i in range(4):
ant_sensors2.append(
my_world.scene.add(
ContactSensor(
prim_path="/World/envs/env_1/Ant/" + ant_foot_prim_names[i] + "/contact_sensor",
name="ant_contact_sensor2_{}".format(i),
min_threshold=0,
max_threshold=10000000,
radius=0.1,
translation=translations[i],
)
)
)
ant_sensors2[0].add_raw_contact_data_to_frame()
my_world.reset()
while simulation_app.is_running():
my_world.step(render=True)
if my_world.is_playing():
print(ant_sensors2[0].get_current_frame())
if my_world.current_time_step_index == 0:
my_world.reset()
simulation_app.close()
| 3,638 | Python | 30.370689 | 126 | 0.657779 |
AndrePatri/OmniRoboGym/docs/sim_substepping_reset_issue/test_substepping_when_reset.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import numpy as np
import torch
def get_device(sim_params):
if "sim_device" in sim_params:
device = sim_params["sim_device"]
else:
device = "cpu"
physics_device_id = carb.settings.get_settings().get_as_int("/physics/cudaDevice")
gpu_id = 0 if physics_device_id < 0 else physics_device_id
if sim_params and "use_gpu_pipeline" in sim_params:
# GPU pipeline must use GPU simulation
if sim_params["use_gpu_pipeline"]:
device = "cuda:" + str(gpu_id)
elif sim_params and "use_gpu" in sim_params:
if sim_params["use_gpu"]:
device = "cuda:" + str(gpu_id)
return device
def sim_parameters():
# simulation parameters
sim_params = {}
# device settings
sim_params["use_gpu_pipeline"] = True # disabling gpu pipeline is necessary to be able
# to retrieve some quantities from the simulator which, otherwise, would have random values
sim_params["use_gpu"] = True # does this actually do anything?
if sim_params["use_gpu_pipeline"]:
sim_params["device"] = "cuda"
else:
sim_params["device"] = "cpu"
device = sim_params["device"]
# sim_params["dt"] = 1.0/100.0 # physics_dt?
sim_params["physics_dt"] = 1.0/400.0 # physics_dt?
sim_params["rendering_dt"] = sim_params["physics_dt"]
sim_params["substeps"] = 1 # number of physics steps to be taken for for each rendering step
sim_params["gravity"] = np.array([0.0, 0.0, -9.81])
sim_params["enable_scene_query_support"] = False
sim_params["use_fabric"] = True # Enable/disable reading of physics buffers directly. Default is True.
sim_params["replicate_physics"] = True
# sim_params["worker_thread_count"] = 4
sim_params["solver_type"] = 1 # 0: PGS, 1:TGS, defaults to TGS. PGS faster but TGS more stable
sim_params["enable_stabilization"] = True
# sim_params["bounce_threshold_velocity"] = 0.2
# sim_params["friction_offset_threshold"] = 0.04
# sim_params["friction_correlation_distance"] = 0.025
# sim_params["enable_sleeping"] = True
# Per-actor settings ( can override in actor_options )
sim_params["solver_position_iteration_count"] = 4 # defaults to 4
sim_params["solver_velocity_iteration_count"] = 1 # defaults to 1
sim_params["sleep_threshold"] = 0.0 # Mass-normalized kinetic energy threshold below which an actor may go to sleep.
# Allowed range [0, max_float).
sim_params["stabilization_threshold"] = 1e-5
# Per-body settings ( can override in actor_options )
# sim_params["enable_gyroscopic_forces"] = True
# sim_params["density"] = 1000 # density to be used for bodies that do not specify mass or density
# sim_params["max_depenetration_velocity"] = 100.0
# sim_params["solver_velocity_iteration_count"] = 1
# GPU buffers settings
# sim_params["gpu_max_rigid_contact_count"] = 512 * 1024
# sim_params["gpu_max_rigid_patch_count"] = 80 * 1024
# sim_params["gpu_found_lost_pairs_capacity"] = 1024
# sim_params["gpu_found_lost_aggregate_pairs_capacity"] = 1024
# sim_params["gpu_total_aggregate_pairs_capacity"] = 1024
# sim_params["gpu_max_soft_body_contacts"] = 1024 * 1024
# sim_params["gpu_max_particle_contacts"] = 1024 * 1024
# sim_params["gpu_heap_capacity"] = 64 * 1024 * 1024
# sim_params["gpu_temp_buffer_capacity"] = 16 * 1024 * 1024
# sim_params["gpu_max_num_partitions"] = 8
return sim_params
def reset_state(art_view,
idxs: torch.Tensor):
# root q
art_view.set_world_poses(positions = root_p_default[idxs, :],
orientations=root_q_default[idxs, :],
indices = idxs)
# jnts q
art_view.set_joint_positions(positions = jnts_q_default[idxs, :],
indices = idxs)
# root v and omega
art_view.set_joint_velocities(velocities = jnts_v_default[idxs, :],
indices = idxs)
# jnts v
concatenated_vel = torch.cat((root_v_default[idxs, :],
root_omega_default[idxs, :]), dim=1)
art_view.set_velocities(velocities = concatenated_vel,
indices = idxs)
# jnts eff
art_view.set_joint_efforts(efforts = jnts_eff_default[idxs, :],
indices = idxs)
def get_robot_state(
art_view):
pose = art_view.get_world_poses(
clone = True) # tuple: (pos, quat)
# root p (measured, previous, default)
root_p = pose[0]
# root q (measured, previous, default)
root_q = pose[1] # root orientation
# jnt q (measured, previous, default)
jnts_q = art_view.get_joint_positions(
clone = True) # joint positions
# root v (measured, default)
root_v= art_view.get_linear_velocities(
clone = True) # root lin. velocity
# root omega (measured, default)
root_omega = art_view.get_angular_velocities(
clone = True) # root ang. velocity
# joints v (measured, default)
jnts_v = art_view.get_joint_velocities(
clone = True) # joint velocities
jnts_eff = art_view.get_measured_joint_efforts(clone = True)
return root_p, root_q, jnts_q, root_v, root_omega, jnts_v, jnts_eff
from omni.isaac.kit import SimulationApp
import carb
import os
experience = f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.omnirobogym.headless.kit'
sim_params = sim_parameters()
num_envs = 2
headless = True
simulation_app = SimulationApp({"headless": headless,
"physics_gpu": 0},
experience=experience)
from omni.isaac.core import World
from omni.isaac.core.articulations import ArticulationView
from omni.importer.urdf import _urdf
# urdf import config
import_config = _urdf.ImportConfig()
import_config.merge_fixed_joints = True
import_config.import_inertia_tensor = True
import_config.fix_base = False
import_config.self_collision = False
my_world = World(stage_units_in_meters=1.0,
physics_dt=sim_params["physics_dt"],
rendering_dt=sim_params["rendering_dt"],
backend="torch",
device=str(get_device(sim_params=sim_params)),
physics_prim_path="/physicsScene",
set_defaults = False,
sim_params=sim_params)
# create initial robot
import omni.isaac.core.utils.prims as prim_utils
# create GridCloner instance
env_ns = "/World/envs"
template_env_ns = env_ns + "/env" # a single env. may contain multiple robots
base_env = template_env_ns + "_0"
base_robot_path = base_env + "/panda"
# get path to resource
from omni.isaac.core.utils.extensions import get_extension_path_from_name
extension_path = get_extension_path_from_name("omni.importer.urdf")
# import URDF at default prim path
import omni.kit
success, robot_prim_path_default = omni.kit.commands.execute(
"URDFParseAndImportFile",
urdf_path=extension_path + "/data/urdf/robots/franka_description/robots/panda_arm.urdf",
import_config=import_config,
)
# moving default prim to base prim path (for potential cloning)
from omni.isaac.core.utils.prims import move_prim
prim_utils.define_prim(base_env)
move_prim(robot_prim_path_default, # from
base_robot_path) # to
# cloning
from omni.isaac.cloner import GridCloner
cloner = GridCloner(spacing=6)
_envs_prim_paths = cloner.generate_paths(template_env_ns, num_envs)
position_offsets = np.array([[0.0, 0.0, 0.6]] * num_envs)
cloner.clone(
source_prim_path=base_env,
prim_paths=_envs_prim_paths,
base_env_path=base_env,
position_offsets=position_offsets,
replicate_physics=True
)
# Prim paths structure:
# World/envs/env_0/panda/panda_link0/...
# this only in 2023.1.0
art_view = ArticulationView(name = "Panda" + "ArtView",
prim_paths_expr = env_ns + "/env_.*"+ "/panda/panda_link0",
reset_xform_properties=False # required as per doc. when cloning
)
# moreover, robots are not cloned at different locations
my_world.scene.add(art_view)
ground_plane_prim_path = "/World/terrain"
my_world.scene.add_default_ground_plane(z_position=0,
name="terrain",
prim_path= ground_plane_prim_path,
static_friction=0.5,
dynamic_friction=0.5,
restitution=0.8)
cloner.filter_collisions(physicsscene_path = my_world.get_physics_context().prim_path,
collision_root_path = "/World/collisions",
prim_paths=_envs_prim_paths,
global_paths=[ground_plane_prim_path] # can collide with these prims
)
my_world.reset()
# init default state from measurements
root_p, root_q, jnts_q, root_v, \
root_omega, jnts_v, jnts_eff = get_robot_state(art_view)
root_p_default = torch.clone(root_p)
root_q_default = torch.clone(root_q)
jnts_q_default = torch.clone(jnts_q)
jnts_v_default = torch.clone(jnts_v)
root_omega_default = torch.clone(root_omega)
root_v_default = torch.clone(root_v)
jnts_eff_default = torch.clone(jnts_eff).zero_()
# default values
root_p_default[:, 0] = 0
root_p_default[:, 1] = 0
root_p_default[:, 2] = 0.5
root_q_default[:, 0] = 0.0
root_q_default[:, 1] = 0.0
root_q_default[:, 2] = 0.0
root_q_default[:, 3] = 1.0
jnts_q_default[:, :] = 1.0
jnts_v_default[:, :] = 0.0
root_omega_default[:, :] = 0.0
root_v_default[:, :] = 0.0
no_gains = torch.zeros((num_envs, jnts_eff_default.shape[1]), device = get_device(sim_params),
dtype=torch.float32)
art_view.set_gains(kps = no_gains,
kds = no_gains)
print("Extension path: " + str(extension_path))
print("Prim paths: " + str(art_view.prim_paths))
reset_ever_n_steps = 100
just_reset = False
for i in range(0, 1000):
if ((i + 1) % reset_ever_n_steps) == 0:
print("resetting to default")
reset_state(art_view,
torch.tensor([0], dtype=torch.int))
just_reset = True
my_world.step()
# retrieve state
root_p, root_q, jnts_q, root_v, \
root_omega, jnts_v, jnts_eff = get_robot_state(art_view)
# if just_reset:
# check we hace reset correcty
print("measured")
print(jnts_q)
print("default")
print(jnts_q_default)
simulation_app.close() | 11,081 | Python | 34.06962 | 120 | 0.624222 |
NVIDIA-AI-IOT/synthetic_data_generation_training_workflow/CLA.md | ## Individual Contributor License Agreement (CLA)
**Thank you for submitting your contributions to this project.**
By signing this CLA, you agree that the following terms apply to all of your past, present and future contributions
to the project.
### License.
You hereby represent that all present, past and future contributions are governed by the
[MIT License](https://opensource.org/licenses/MIT)
copyright statement.
This entails that to the extent possible under law, you transfer all copyright and related or neighboring rights
of the code or documents you contribute to the project itself or its maintainers.
Furthermore you also represent that you have the authority to perform the above waiver
with respect to the entirety of you contributions.
### Moral Rights.
To the fullest extent permitted under applicable law, you hereby waive, and agree not to
assert, all of your “moral rights” in or relating to your contributions for the benefit of the project.
### Third Party Content.
If your Contribution includes or is based on any source code, object code, bug fixes, configuration changes, tools,
specifications, documentation, data, materials, feedback, information or other works of authorship that were not
authored by you (“Third Party Content”) or if you are aware of any third party intellectual property or proprietary
rights associated with your Contribution (“Third Party Rights”),
then you agree to include with the submission of your Contribution full details respecting such Third Party
Content and Third Party Rights, including, without limitation, identification of which aspects of your
Contribution contain Third Party Content or are associated with Third Party Rights, the owner/author of the
Third Party Content and Third Party Rights, where you obtained the Third Party Content, and any applicable
third party license terms or restrictions respecting the Third Party Content and Third Party Rights. For greater
certainty, the foregoing obligations respecting the identification of Third Party Content and Third Party Rights
do not apply to any portion of a Project that is incorporated into your Contribution to that same Project.
### Representations.
You represent that, other than the Third Party Content and Third Party Rights identified by
you in accordance with this Agreement, you are the sole author of your Contributions and are legally entitled
to grant the foregoing licenses and waivers in respect of your Contributions. If your Contributions were
created in the course of your employment with your past or present employer(s), you represent that such
employer(s) has authorized you to make your Contributions on behalf of such employer(s) or such employer
(s) has waived all of their right, title or interest in or to your Contributions.
### Disclaimer.
To the fullest extent permitted under applicable law, your Contributions are provided on an "as is"
basis, without any warranties or conditions, express or implied, including, without limitation, any implied
warranties or conditions of non-infringement, merchantability or fitness for a particular purpose. You are not
required to provide support for your Contributions, except to the extent you desire to provide support.
### No Obligation.
You acknowledge that the maintainers of this project are under no obligation to use or incorporate your contributions
into the project. The decision to use or incorporate your contributions into the project will be made at the
sole discretion of the maintainers or their authorized delegates. | 3,543 | Markdown | 60.103447 | 117 | 0.812024 |
NVIDIA-AI-IOT/synthetic_data_generation_training_workflow/README.md | # Synthetic Data Generation and Training with Sim Ready Assets
This project provides a workflow for Training Computer Vision models with Synthetic Data. We will use Isaac Sim with Omniverse Replicator to generate data for our use case and objects of interest. To ensure seamless compatibility with model training, the data generated is in the KITTI format.
These steps can be followed on a Cloud/remote GPU instance or locally
## How to use this repository
- [Guide](local/README.md) for running the workflow locally
- [Guide](cloud/README.md) for running on a cloud/remote instance
## Workflow Components:
* Generating Data: Use Isaac Sim to generate data
* Training: We will use TAO toolkit, however users can train a model in a framework of their choice with data generated
### SDG
- Using the `palletjack` assets from the Warehouse Sim Ready Asset collection
- Carry out Domain Randomization in the scene with Replicator:
- Various attributes of the scene like lighting, textures, object pose and materials can be modified
- Important to generate a good quality dataset to ensure model detects objects in the real world
- Data output KITTI format
- We will use the KITTI Writer for generating annotations
- Possible to implement a custom writer (can be useful when data is expected in a certain format for your model)
- Sample generated images:
<p>
<img src="images/sample_synthetic/21.png" height="256"/>
<img src="images/sample_synthetic/653.png" height="256"/>
</p>
<p>
<img src="images/sample_synthetic/896.png" height="256"/>
<img src="images/sample_synthetic/1545.png" height="256"/>
</p>
### Training
- TAO: Outline of steps
- Generating Tfrecords
- Model training and evaluation
- Model backbone selction
- Hyperparameters specified via `spec` file (provided with repo)
- Running inference with trained model
- Sample real world detections on LOCO dataset images:
<p>
<img src="images/real_world_results/1564562568.298206.jpg" height="256"/>
<img src="images/real_world_results/1564562843.0618184.jpg" height="256"/>
</p>
<p>
<img src="images/real_world_results/593768,3659.jpg" height="256"/>
<img src="images/real_world_results/510196244,1362.jpg" height="256"/>
</p>
<p>
<img src="images/real_world_results/1574675156.7667925.jpg" height="256"/>
<img src="images/real_world_results/426023,9672.jpg" height="256"/>
</p>
### Deployment
- Perform Optimizations: Pruning and QAT with TAO to reduce model size and improve performance
- Deploy on NVIDIA Jetson powered Robot with Isaac ROS or Deepstream
## References:
- Real world images from the [LOCO dataset](https://github.com/tum-fml/loco) are used for visualizing model performance
| 2,771 | Markdown | 36.972602 | 294 | 0.738001 |
NVIDIA-AI-IOT/synthetic_data_generation_training_workflow/LICENSE.md | SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
SPDX-License-Identifier: MIT
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
| 1,167 | Markdown | 54.619045 | 97 | 0.796058 |
NVIDIA-AI-IOT/synthetic_data_generation_training_workflow/cloud/README.md | # Requirements
- Access to a cloud/remote GPU instance (workflow tested on a `g4dn` AWS EC2 instance with T4 GPU)
- Docker setup instructions are provided in the notebooks
- Entire workflow can be run in `headless` mode (SDG script and training)
## Synthetic Data Generation
- Use the Isaac Sim docker container for running the Data Generation [script](../palletjack_sdg/palletjack_datagen.sh)
- We will generate data for warehouse `palletjack` objects in KITTI format
- Follow the steps in the `cloud_sdg` notebook
- This generated data can be used to train your own model (framework and architecture of your choice), in this workflow we demonstrate using TAO for training
## Training with TAO Toolkit
- The `training/cloud_train` notebook provides a walkthrough of the steps:
- Setting up TAO docker container
- Downloading pre-trained model, we will use the `DetectNet_v2` model with a `resnet_18` backbone
- Running TAO training with `spec` files provided
- Visualizing model performance on real world data
- Visualize model metric with Tensorboard
<img src="../images/tensorboard/tensorboard_resized_palletjack.png"/>
## Next steps
### Generating Synthetic Data for your use case
- Make changes in the Domain Randomization under the Synthetic Data Generation [script](../palletjack_sdg/standalone_palletjack_sdg.py)
- Add additional objects of interest in the scene (similar to how palletjacks are added, you can add forklifts, ladders etc.) to generate dataUse different models for training with TAO (for object detection, you can use YOLO, SSD, EfficientDet)
- Replicator provides Semantic Segmentation, Instance Segmentation, Depth and various other ground truth annotations along with RGB. You can also write your own ground truth annotator (eg: Pose Estimation: Refer to [sample](https://docs.omniverse.nvidia.com/isaacsim/latest/tutorial_replicator_offline_pose_estimation.html) These can be used for training a model of your own framework and choice)
- Exploring the option of using Synthetic + Real data for training a network. Can be particularly useful for generating more data around particular corner cases
### Deploying Trained Models
- The trained model can be pruned and optimized for inference with TAO
- This can then be deployed on a robot with NVIDIA Jetson | 2,308 | Markdown | 66.911763 | 396 | 0.786395 |
NVIDIA-AI-IOT/synthetic_data_generation_training_workflow/local/README.md | # Requirements
- Install [Isaac Sim](https://docs.omniverse.nvidia.com/isaacsim/latest/install_workstation.html)
- Training via TAO Toolkit Docker container (TAO setup instructions in `local_train` notebook)
## Synthetic Data Generation
- Provide the path of your Isaac Sim installation folder in the `generate_data.sh` script
- Make the script an executable after adding the Isaac Sim Path (`chmod +x generate_data.sh`)
- Run the script (`./generate_data.sh`)
- We will generate data for the `palletjack` class of objects with annotations in KITTI format
- This generated data can be used to train your own model (framework and architecture of your choice)
## Training with TAO Toolkit
- The data generated in the previus step can be directly fed to TAO for training
- The `local_train` notebook provides a walkthrough of the steps:
- Setting up TAO docker container
- Downloading pre-trained model, we will use the `DetectNet_v2` model with a `resnet_18` backbone
- Running TAO training with `spec` files provided
- Visualizing model performance on real world data
- Visualize model metric with Tensorboard
<img src="../images/tensorboard/tensorboard_resized_palletjack.png"/>
## Next steps
### Generating Synthetic Data for your use case
- Make changes in the Domain Randomization under the Synthetic Data Generation [script](../palletjack_sdg/standalone_palletjack_sdg.py)
- Add additional objects of interest in the scene (similar to how palletjacks are added, you can add forklifts, ladders etc.) to generate dataUse different models for training with TAO (for object detection, you can use YOLO, SSD, EfficientDet)
- Replicator provides Semantic Segmentation, Instance Segmentation, Depth and various other ground truth annotations along with RGB. You can also write your own ground truth annotator (eg: Pose Estimation: Refer to [sample](https://docs.omniverse.nvidia.com/isaacsim/latest/tutorial_replicator_offline_pose_estimation.html) These can be used for training a model of your own framework and choice)
- Exploring the option of using Synthetic + Real data for training a network. Can be particularly useful for generating more data around particular corner cases
### Deploying Trained Models
- The trained model can be pruned and optimized for inference with TAO
- This can then be deployed on a robot with NVIDIA Jetson | 2,370 | Markdown | 66.742855 | 396 | 0.7827 |
NVIDIA-AI-IOT/synthetic_data_generation_training_workflow/palletjack_sdg/standalone_palletjack_sdg.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from omni.isaac.kit import SimulationApp
import os
import argparse
parser = argparse.ArgumentParser("Dataset generator")
parser.add_argument("--headless", type=bool, default=False, help="Launch script headless, default is False")
parser.add_argument("--height", type=int, default=544, help="Height of image")
parser.add_argument("--width", type=int, default=960, help="Width of image")
parser.add_argument("--num_frames", type=int, default=1000, help="Number of frames to record")
parser.add_argument("--distractors", type=str, default="warehouse",
help="Options are 'warehouse' (default), 'additional' or None")
parser.add_argument("--data_dir", type=str, default=os.getcwd() + "/_palletjack_data",
help="Location where data will be output")
args, unknown_args = parser.parse_known_args()
# This is the config used to launch simulation.
CONFIG = {"renderer": "RayTracedLighting", "headless": args.headless,
"width": args.width, "height": args.height, "num_frames": args.num_frames}
simulation_app = SimulationApp(launch_config=CONFIG)
## This is the path which has the background scene in which objects will be added.
ENV_URL = "/Isaac/Environments/Simple_Warehouse/warehouse.usd"
import carb
import omni
import omni.usd
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import get_current_stage, open_stage
from pxr import Semantics
import omni.replicator.core as rep
from omni.isaac.core.utils.semantics import get_semantics
# Increase subframes if shadows/ghosting appears of moving objects
# See known issues: https://docs.omniverse.nvidia.com/prod_extensions/prod_extensions/ext_replicator.html#known-issues
rep.settings.carb_settings("/omni/replicator/RTSubframes", 4)
# This is the location of the palletjacks in the simready asset library
PALLETJACKS = ["http://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/DigitalTwin/Assets/Warehouse/Equipment/Pallet_Trucks/Scale_A/PalletTruckScale_A01_PR_NVD_01.usd",
"http://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/DigitalTwin/Assets/Warehouse/Equipment/Pallet_Trucks/Heavy_Duty_A/HeavyDutyPalletTruck_A01_PR_NVD_01.usd",
"http://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/DigitalTwin/Assets/Warehouse/Equipment/Pallet_Trucks/Low_Profile_A/LowProfilePalletTruck_A01_PR_NVD_01.usd"]
# The warehouse distractors which will be added to the scene and randomized
DISTRACTORS_WAREHOUSE = 2 * ["/Isaac/Environments/Simple_Warehouse/Props/S_TrafficCone.usd",
"/Isaac/Environments/Simple_Warehouse/Props/S_WetFloorSign.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_BarelPlastic_A_01.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_BarelPlastic_A_02.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_BarelPlastic_A_03.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_BarelPlastic_B_01.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_BarelPlastic_B_01.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_BarelPlastic_B_03.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_BarelPlastic_C_02.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_BottlePlasticA_02.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_BottlePlasticB_01.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_BottlePlasticA_02.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_BottlePlasticA_02.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_BottlePlasticD_01.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_BottlePlasticE_01.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_BucketPlastic_B.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_CardBoxB_01_1262.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_CardBoxB_01_1268.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_CardBoxB_01_1482.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_CardBoxB_01_1683.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_CardBoxB_01_291.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_CardBoxD_01_1454.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_CardBoxD_01_1513.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_CratePlastic_A_04.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_CratePlastic_B_03.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_CratePlastic_B_05.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_CratePlastic_C_02.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_CratePlastic_E_02.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_PushcartA_02.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_RackPile_04.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_RackPile_03.usd"]
## Additional distractors which can be added to the scene
DISTRACTORS_ADDITIONAL = ["/Isaac/Environments/Hospital/Props/Pharmacy_Low.usd",
"/Isaac/Environments/Hospital/Props/SM_BedSideTable_01b.usd",
"/Isaac/Environments/Hospital/Props/SM_BooksSet_26.usd",
"/Isaac/Environments/Hospital/Props/SM_BottleB.usd",
"/Isaac/Environments/Hospital/Props/SM_BottleA.usd",
"/Isaac/Environments/Hospital/Props/SM_BottleC.usd",
"/Isaac/Environments/Hospital/Props/SM_Cart_01a.usd",
"/Isaac/Environments/Hospital/Props/SM_Chair_02a.usd",
"/Isaac/Environments/Hospital/Props/SM_Chair_01a.usd",
"/Isaac/Environments/Hospital/Props/SM_Computer_02b.usd",
"/Isaac/Environments/Hospital/Props/SM_Desk_04a.usd",
"/Isaac/Environments/Hospital/Props/SM_DisposalStand_02.usd",
"/Isaac/Environments/Hospital/Props/SM_FirstAidKit_01a.usd",
"/Isaac/Environments/Hospital/Props/SM_GasCart_01c.usd",
"/Isaac/Environments/Hospital/Props/SM_Gurney_01b.usd",
"/Isaac/Environments/Hospital/Props/SM_HospitalBed_01b.usd",
"/Isaac/Environments/Hospital/Props/SM_MedicalBag_01a.usd",
"/Isaac/Environments/Hospital/Props/SM_Mirror.usd",
"/Isaac/Environments/Hospital/Props/SM_MopSet_01b.usd",
"/Isaac/Environments/Hospital/Props/SM_SideTable_02a.usd",
"/Isaac/Environments/Hospital/Props/SM_SupplyCabinet_01c.usd",
"/Isaac/Environments/Hospital/Props/SM_SupplyCart_01e.usd",
"/Isaac/Environments/Hospital/Props/SM_TrashCan.usd",
"/Isaac/Environments/Hospital/Props/SM_Washbasin.usd",
"/Isaac/Environments/Hospital/Props/SM_WheelChair_01a.usd",
"/Isaac/Environments/Office/Props/SM_WaterCooler.usd",
"/Isaac/Environments/Office/Props/SM_TV.usd",
"/Isaac/Environments/Office/Props/SM_TableC.usd",
"/Isaac/Environments/Office/Props/SM_Recliner.usd",
"/Isaac/Environments/Office/Props/SM_Personenleitsystem_Red1m.usd",
"/Isaac/Environments/Office/Props/SM_Lamp02_162.usd",
"/Isaac/Environments/Office/Props/SM_Lamp02.usd",
"/Isaac/Environments/Office/Props/SM_HandDryer.usd",
"/Isaac/Environments/Office/Props/SM_Extinguisher.usd"]
# The textures which will be randomized for the wall and floor
TEXTURES = ["/Isaac/Materials/Textures/Patterns/nv_asphalt_yellow_weathered.jpg",
"/Isaac/Materials/Textures/Patterns/nv_tile_hexagonal_green_white.jpg",
"/Isaac/Materials/Textures/Patterns/nv_rubber_woven_charcoal.jpg",
"/Isaac/Materials/Textures/Patterns/nv_granite_tile.jpg",
"/Isaac/Materials/Textures/Patterns/nv_tile_square_green.jpg",
"/Isaac/Materials/Textures/Patterns/nv_marble.jpg",
"/Isaac/Materials/Textures/Patterns/nv_brick_reclaimed.jpg",
"/Isaac/Materials/Textures/Patterns/nv_concrete_aged_with_lines.jpg",
"/Isaac/Materials/Textures/Patterns/nv_wooden_wall.jpg",
"/Isaac/Materials/Textures/Patterns/nv_stone_painted_grey.jpg",
"/Isaac/Materials/Textures/Patterns/nv_wood_shingles_brown.jpg",
"/Isaac/Materials/Textures/Patterns/nv_tile_hexagonal_various.jpg",
"/Isaac/Materials/Textures/Patterns/nv_carpet_abstract_pattern.jpg",
"/Isaac/Materials/Textures/Patterns/nv_wood_siding_weathered_green.jpg",
"/Isaac/Materials/Textures/Patterns/nv_animalfur_pattern_greys.jpg",
"/Isaac/Materials/Textures/Patterns/nv_artificialgrass_green.jpg",
"/Isaac/Materials/Textures/Patterns/nv_bamboo_desktop.jpg",
"/Isaac/Materials/Textures/Patterns/nv_brick_reclaimed.jpg",
"/Isaac/Materials/Textures/Patterns/nv_brick_red_stacked.jpg",
"/Isaac/Materials/Textures/Patterns/nv_fireplace_wall.jpg",
"/Isaac/Materials/Textures/Patterns/nv_fabric_square_grid.jpg",
"/Isaac/Materials/Textures/Patterns/nv_granite_tile.jpg",
"/Isaac/Materials/Textures/Patterns/nv_marble.jpg",
"/Isaac/Materials/Textures/Patterns/nv_gravel_grey_leaves.jpg",
"/Isaac/Materials/Textures/Patterns/nv_plastic_blue.jpg",
"/Isaac/Materials/Textures/Patterns/nv_stone_red_hatch.jpg",
"/Isaac/Materials/Textures/Patterns/nv_stucco_red_painted.jpg",
"/Isaac/Materials/Textures/Patterns/nv_rubber_woven_charcoal.jpg",
"/Isaac/Materials/Textures/Patterns/nv_stucco_smooth_blue.jpg",
"/Isaac/Materials/Textures/Patterns/nv_wood_shingles_brown.jpg",
"/Isaac/Materials/Textures/Patterns/nv_wooden_wall.jpg"]
def update_semantics(stage, keep_semantics=[]):
""" Remove semantics from the stage except for keep_semantic classes"""
for prim in stage.Traverse():
if prim.HasAPI(Semantics.SemanticsAPI):
processed_instances = set()
for property in prim.GetProperties():
is_semantic = Semantics.SemanticsAPI.IsSemanticsAPIPath(property.GetPath())
if is_semantic:
instance_name = property.SplitName()[1]
if instance_name in processed_instances:
# Skip repeated instance, instances are iterated twice due to their two semantic properties (class, data)
continue
processed_instances.add(instance_name)
sem = Semantics.SemanticsAPI.Get(prim, instance_name)
type_attr = sem.GetSemanticTypeAttr()
data_attr = sem.GetSemanticDataAttr()
for semantic_class in keep_semantics:
# Check for our data classes needed for the model
if data_attr.Get() == semantic_class:
continue
else:
# remove semantics of all other prims
prim.RemoveProperty(type_attr.GetName())
prim.RemoveProperty(data_attr.GetName())
prim.RemoveAPI(Semantics.SemanticsAPI, instance_name)
# needed for loading textures correctly
def prefix_with_isaac_asset_server(relative_path):
assets_root_path = get_assets_root_path()
if assets_root_path is None:
raise Exception("Nucleus server not found, could not access Isaac Sim assets folder")
return assets_root_path + relative_path
def full_distractors_list(distractor_type="warehouse"):
"""Distractor type allowed are warehouse, additional or None. They load corresponding objects and add
them to the scene for DR"""
full_dist_list = []
if distractor_type == "warehouse":
for distractor in DISTRACTORS_WAREHOUSE:
full_dist_list.append(prefix_with_isaac_asset_server(distractor))
elif distractor_type == "additional":
for distractor in DISTRACTORS_ADDITIONAL:
full_dist_list.append(prefix_with_isaac_asset_server(distractor))
else:
print("No Distractors being added to the current scene for SDG")
return full_dist_list
def full_textures_list():
full_tex_list = []
for texture in TEXTURES:
full_tex_list.append(prefix_with_isaac_asset_server(texture))
return full_tex_list
def add_palletjacks():
rep_obj_list = [rep.create.from_usd(palletjack_path, semantics=[("class", "palletjack")], count=2) for palletjack_path in PALLETJACKS]
rep_palletjack_group = rep.create.group(rep_obj_list)
return rep_palletjack_group
def add_distractors(distractor_type="warehouse"):
full_distractors = full_distractors_list(distractor_type)
distractors = [rep.create.from_usd(distractor_path, count=1) for distractor_path in full_distractors]
distractor_group = rep.create.group(distractors)
return distractor_group
# This will handle replicator
def run_orchestrator():
rep.orchestrator.run()
# Wait until started
while not rep.orchestrator.get_is_started():
simulation_app.update()
# Wait until stopped
while rep.orchestrator.get_is_started():
simulation_app.update()
rep.BackendDispatch.wait_until_done()
rep.orchestrator.stop()
def main():
# Open the environment in a new stage
print(f"Loading Stage {ENV_URL}")
open_stage(prefix_with_isaac_asset_server(ENV_URL))
stage = get_current_stage()
# Run some app updates to make sure things are properly loaded
for i in range(100):
if i % 10 == 0:
print(f"App uppdate {i}..")
simulation_app.update()
textures = full_textures_list()
rep_palletjack_group = add_palletjacks()
rep_distractor_group = add_distractors(distractor_type=args.distractors)
# We only need labels for the palletjack objects
update_semantics(stage=stage, keep_semantics=["palletjack"])
# Create camera with Replicator API for gathering data
cam = rep.create.camera(clipping_range=(0.1, 1000000))
# trigger replicator pipeline
with rep.trigger.on_frame(num_frames=CONFIG["num_frames"]):
# Move the camera around in the scene, focus on the center of warehouse
with cam:
rep.modify.pose(position=rep.distribution.uniform((-9.2, -11.8, 0.4), (7.2, 15.8, 4)),
look_at=(0, 0, 0))
# Get the Palletjack body mesh and modify its color
with rep.get.prims(path_pattern="SteerAxles"):
rep.randomizer.color(colors=rep.distribution.uniform((0, 0, 0), (1, 1, 1)))
# Randomize the pose of all the added palletjacks
with rep_palletjack_group:
rep.modify.pose(position=rep.distribution.uniform((-6, -6, 0), (6, 12, 0)),
rotation=rep.distribution.uniform((0, 0, 0), (0, 0, 360)),
scale=rep.distribution.uniform((0.01, 0.01, 0.01), (0.01, 0.01, 0.01)))
# Modify the pose of all the distractors in the scene
with rep_distractor_group:
rep.modify.pose(position=rep.distribution.uniform((-6, -6, 0), (6, 12, 0)),
rotation=rep.distribution.uniform((0, 0, 0), (0, 0, 360)),
scale=rep.distribution.uniform(1, 1.5))
# Randomize the lighting of the scene
with rep.get.prims(path_pattern="RectLight"):
rep.modify.attribute("color", rep.distribution.uniform((0, 0, 0), (1, 1, 1)))
rep.modify.attribute("intensity", rep.distribution.normal(100000.0, 600000.0))
rep.modify.visibility(rep.distribution.choice([True, False, False, False, False, False, False]))
# select floor material
random_mat_floor = rep.create.material_omnipbr(diffuse_texture=rep.distribution.choice(textures),
roughness=rep.distribution.uniform(0, 1),
metallic=rep.distribution.choice([0, 1]),
emissive_texture=rep.distribution.choice(textures),
emissive_intensity=rep.distribution.uniform(0, 1000),)
with rep.get.prims(path_pattern="SM_Floor"):
rep.randomizer.materials(random_mat_floor)
# select random wall material
random_mat_wall = rep.create.material_omnipbr(diffuse_texture=rep.distribution.choice(textures),
roughness=rep.distribution.uniform(0, 1),
metallic=rep.distribution.choice([0, 1]),
emissive_texture=rep.distribution.choice(textures),
emissive_intensity=rep.distribution.uniform(0, 1000),)
with rep.get.prims(path_pattern="SM_Wall"):
rep.randomizer.materials(random_mat_wall)
# Set up the writer
writer = rep.WriterRegistry.get("KittiWriter")
# output directory of writer
output_directory = args.data_dir
print("Outputting data to ", output_directory)
# use writer for bounding boxes, rgb and segmentation
writer.initialize(output_dir=output_directory,
omit_semantic_type=True,)
# attach camera render products to wrieter so that data is outputted
RESOLUTION = (CONFIG["width"], CONFIG["height"])
render_product = rep.create.render_product(cam, RESOLUTION)
writer.attach(render_product)
# run rep pipeline
run_orchestrator()
simulation_app.update()
if __name__ == "__main__":
try:
main()
except Exception as e:
carb.log_error(f"Exception: {e}")
import traceback
traceback.print_exc()
finally:
simulation_app.close()
| 20,199 | Python | 52.439153 | 191 | 0.634388 |
abizovnuralem/go2_omniverse/terrain_cfg.py | # Copyright (c) 2024, RoboVerse community
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from terrain_generator_cfg import TerrainGeneratorCfg
import omni.isaac.orbit.terrains as terrain_gen
ROUGH_TERRAINS_CFG = TerrainGeneratorCfg(
size=(8.0, 8.0),
border_width=0.0,
num_rows=1,
num_cols=2,
horizontal_scale=0.1,
vertical_scale=0.005,
slope_threshold=0.75,
use_cache=False,
sub_terrains={
"pyramid_stairs": terrain_gen.MeshPyramidStairsTerrainCfg(
proportion=0.2,
step_height_range=(0.05, 0.23),
step_width=0.3,
platform_width=3.0,
border_width=1.0,
holes=False,
),
"pyramid_stairs_inv": terrain_gen.MeshInvertedPyramidStairsTerrainCfg(
proportion=0.2,
step_height_range=(0.05, 0.23),
step_width=0.3,
platform_width=3.0,
border_width=1.0,
holes=False,
),
},
) | 2,217 | Python | 38.607142 | 80 | 0.700947 |
abizovnuralem/go2_omniverse/agent_cfg.py | # Copyright (c) 2024, RoboVerse community
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
unitree_go2_agent_cfg = {
'seed': 42,
'device': 'cuda',
'num_steps_per_env': 24,
'max_iterations': 15000,
'empirical_normalization': False,
'policy': {
'class_name': 'ActorCritic',
'init_noise_std': 1.0,
'actor_hidden_dims': [512, 256, 128],
'critic_hidden_dims': [512, 256, 128],
'activation': 'elu'
},
'algorithm': {
'class_name': 'PPO',
'value_loss_coef': 1.0,
'use_clipped_value_loss': True,
'clip_param': 0.2,
'entropy_coef': 0.01,
'num_learning_epochs': 5,
'num_mini_batches': 4,
'learning_rate': 0.001,
'schedule': 'adaptive',
'gamma': 0.99,
'lam': 0.95,
'desired_kl': 0.01,
'max_grad_norm': 1.0
},
'save_interval': 50,
'experiment_name': 'unitree_go2_rough',
'run_name': '',
'logger': 'tensorboard',
'neptune_project': 'orbit',
'wandb_project': 'orbit',
'resume': False,
'load_run': '.*',
'load_checkpoint': 'model_.*.pt'
} | 2,562 | Python | 40.338709 | 80 | 0.613193 |
abizovnuralem/go2_omniverse/terrain_generator_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Configuration classes defining the different terrains available. Each configuration class must
inherit from ``omni.isaac.orbit.terrains.terrains_cfg.TerrainConfig`` and define the following attributes:
- ``name``: Name of the terrain. This is used for the prim name in the USD stage.
- ``function``: Function to generate the terrain. This function must take as input the terrain difficulty
and the configuration parameters and return a `tuple with the `trimesh`` mesh object and terrain origin.
"""
from __future__ import annotations
import numpy as np
import trimesh
from collections.abc import Callable
from dataclasses import MISSING
from typing import Literal
from omni.isaac.orbit.utils import configclass
@configclass
class FlatPatchSamplingCfg:
"""Configuration for sampling flat patches on the sub-terrain.
For a given sub-terrain, this configuration specifies how to sample flat patches on the terrain.
The sampled flat patches can be used for spawning robots, targets, etc.
Please check the function :meth:`~omni.isaac.orbit.terrains.utils.find_flat_patches` for more details.
"""
num_patches: int = MISSING
"""Number of patches to sample."""
patch_radius: float | list[float] = MISSING
"""Radius of the patches.
A list of radii can be provided to check for patches of different sizes. This is useful to deal with
cases where the terrain may have holes or obstacles in some areas.
"""
x_range: tuple[float, float] = (-1e6, 1e6)
"""The range of x-coordinates to sample from. Defaults to (-1e6, 1e6).
This range is internally clamped to the size of the terrain mesh.
"""
y_range: tuple[float, float] = (-1e6, 1e6)
"""The range of y-coordinates to sample from. Defaults to (-1e6, 1e6).
This range is internally clamped to the size of the terrain mesh.
"""
z_range: tuple[float, float] = (-1e6, 1e6)
"""Allowed range of z-coordinates for the sampled patch. Defaults to (-1e6, 1e6)."""
max_height_diff: float = MISSING
"""Maximum allowed height difference between the highest and lowest points on the patch."""
@configclass
class SubTerrainBaseCfg:
"""Base class for terrain configurations.
All the sub-terrain configurations must inherit from this class.
The :attr:`size` attribute is the size of the generated sub-terrain. Based on this, the terrain must
extend from :math:`(0, 0)` to :math:`(size[0], size[1])`.
"""
function: Callable[[float, SubTerrainBaseCfg], tuple[list[trimesh.Trimesh], np.ndarray]] = MISSING
"""Function to generate the terrain.
This function must take as input the terrain difficulty and the configuration parameters and
return a tuple with a list of ``trimesh`` mesh objects and the terrain origin.
"""
proportion: float = 1.0
"""Proportion of the terrain to generate. Defaults to 1.0.
This is used to generate a mix of terrains. The proportion corresponds to the probability of sampling
the particular terrain. For example, if there are two terrains, A and B, with proportions 0.3 and 0.7,
respectively, then the probability of sampling terrain A is 0.3 and the probability of sampling terrain B
is 0.7.
"""
size: tuple[float, float] = MISSING
"""The width (along x) and length (along y) of the terrain (in m)."""
flat_patch_sampling: dict[str, FlatPatchSamplingCfg] | None = None
"""Dictionary of configurations for sampling flat patches on the sub-terrain. Defaults to None,
in which case no flat patch sampling is performed.
The keys correspond to the name of the flat patch sampling configuration and the values are the
corresponding configurations.
"""
@configclass
class TerrainGeneratorCfg:
"""Configuration for the terrain generator."""
seed: int | None = None
"""The seed for the random number generator. Defaults to None,
in which case the seed is not set."""
curriculum: bool = False
"""Whether to use the curriculum mode. Defaults to False.
If True, the terrains are generated based on their difficulty parameter. Otherwise,
they are randomly generated.
"""
size: tuple[float, float] = MISSING
"""The width (along x) and length (along y) of each sub-terrain (in m).
Note:
This value is passed on to all the sub-terrain configurations.
"""
border_width: float = 0.0
"""The width of the border around the terrain (in m). Defaults to 0.0."""
num_rows: int = 1
"""Number of rows of sub-terrains to generate. Defaults to 1."""
num_cols: int = 1
"""Number of columns of sub-terrains to generate. Defaults to 1."""
color_scheme: Literal["height", "random", "none"] = "none"
"""Color scheme to use for the terrain. Defaults to "none".
The available color schemes are:
- "height": Color based on the height of the terrain.
- "random": Random color scheme.
- "none": No color scheme.
"""
horizontal_scale: float = 0.1
"""The discretization of the terrain along the x and y axes (in m). Defaults to 0.1.
This value is passed on to all the height field sub-terrain configurations.
"""
vertical_scale: float = 0.005
"""The discretization of the terrain along the z axis (in m). Defaults to 0.005.
This value is passed on to all the height field sub-terrain configurations.
"""
slope_threshold: float | None = 0.75
"""The slope threshold above which surfaces are made vertical. Defaults to 0.75.
If None no correction is applied.
This value is passed on to all the height field sub-terrain configurations.
"""
sub_terrains: dict[str, SubTerrainBaseCfg] = MISSING
"""Dictionary of sub-terrain configurations.
The keys correspond to the name of the sub-terrain configuration and the values are the corresponding
configurations.
"""
difficulty_range: tuple[float, float] = (0.0, 1.0)
"""The range of difficulty values for the sub-terrains. Defaults to (0.0, 1.0).
If curriculum is enabled, the terrains will be generated based on this range in ascending order
of difficulty. Otherwise, the terrains will be generated based on this range in a random order.
"""
use_cache: bool = False
"""Whether to load the terrain from cache if it exists. Defaults to True."""
cache_dir: str = "/tmp/orbit/terrains"
"""The directory where the terrain cache is stored. Defaults to "/tmp/orbit/terrains"."""
| 6,616 | Python | 35.15847 | 109 | 0.702086 |
abizovnuralem/go2_omniverse/main.py | # Copyright (c) 2024, RoboVerse community
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Script to play a checkpoint if an RL agent from RSL-RL."""
from __future__ import annotations
"""Launch Isaac Sim Simulator first."""
import argparse
from omni.isaac.orbit.app import AppLauncher
# local imports
import cli_args # isort: skip
# add argparse arguments
parser = argparse.ArgumentParser(description="Train an RL agent with RSL-RL.")
parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU pipeline.")
parser.add_argument(
"--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations."
)
parser.add_argument("--num_envs", type=int, default=1, help="Number of environments to simulate.")
parser.add_argument("--task", type=str, default="Isaac-Velocity-Rough-Unitree-Go2-v0", help="Name of the task.")
parser.add_argument("--seed", type=int, default=None, help="Seed used for the environment")
# append RSL-RL cli arguments
cli_args.add_rsl_rl_args(parser)
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
import omni
ext_manager = omni.kit.app.get_app().get_extension_manager()
ext_manager.set_extension_enabled_immediate("omni.isaac.ros2_bridge", True)
"""Rest everything follows."""
import os
import math
import gymnasium as gym
import torch
import carb
import usdrt.Sdf
from omni.isaac.orbit_tasks.utils import get_checkpoint_path
from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import (
RslRlOnPolicyRunnerCfg,
RslRlVecEnvWrapper
)
from omni.isaac.orbit.utils import configclass
from omni.isaac.orbit_assets.unitree import UNITREE_GO2_CFG
from omni.isaac.orbit.envs import RLTaskEnvCfg
import omni.isaac.orbit.sim as sim_utils
from omni.isaac.orbit.assets import ArticulationCfg, AssetBaseCfg
from omni.isaac.orbit.managers import CurriculumTermCfg as CurrTerm
from omni.isaac.orbit.managers import EventTermCfg as EventTerm
from omni.isaac.orbit.managers import ObservationGroupCfg as ObsGroup
from omni.isaac.orbit.managers import ObservationTermCfg as ObsTerm
from omni.isaac.orbit.managers import RewardTermCfg as RewTerm
from omni.isaac.orbit.managers import SceneEntityCfg
from omni.isaac.orbit.managers import TerminationTermCfg as DoneTerm
from omni.isaac.orbit.scene import InteractiveSceneCfg
from omni.isaac.orbit.sensors import ContactSensorCfg, RayCasterCfg, patterns, CameraCfg
from omni.isaac.orbit.terrains import TerrainImporterCfg
from omni.isaac.orbit.utils import configclass
from omni.isaac.orbit.utils.noise import AdditiveUniformNoiseCfg as Unoise
import omni.isaac.orbit_tasks.locomotion.velocity.mdp as mdp
import omni.appwindow # Contains handle to keyboard
from rsl_rl.runners import OnPolicyRunner
from typing import Literal
from dataclasses import MISSING
from omnigraph import create_front_cam_omnigraph
from agent_cfg import unitree_go2_agent_cfg
from terrain_cfg import ROUGH_TERRAINS_CFG
base_command = [0, 0, 0]
@configclass
class MySceneCfg(InteractiveSceneCfg):
"""Configuration for the terrain scene with a legged robot."""
# ground terrain
terrain = TerrainImporterCfg(
prim_path="/World/ground",
terrain_type="generator",
terrain_generator=ROUGH_TERRAINS_CFG,
max_init_terrain_level=5,
collision_group=-1,
physics_material=sim_utils.RigidBodyMaterialCfg(
friction_combine_mode="multiply",
restitution_combine_mode="multiply",
static_friction=1.0,
dynamic_friction=1.0,
),
visual_material=sim_utils.MdlFileCfg(
mdl_path="{NVIDIA_NUCLEUS_DIR}/Materials/Base/Architecture/Shingles_01.mdl",
project_uvw=True,
),
debug_vis=False,
)
# robots
robot: ArticulationCfg = MISSING
# sensors
camera = CameraCfg(
prim_path="{ENV_REGEX_NS}/Robot/base/front_cam",
update_period=0.1,
height=480,
width=640,
data_types=["rgb", "distance_to_image_plane"],
spawn=sim_utils.PinholeCameraCfg(
focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 1.0e5)
),
offset=CameraCfg.OffsetCfg(pos=(0.510, 0.0, 0.015), rot=(0.5, -0.5, 0.5, -0.5), convention="ros"),
)
height_scanner = RayCasterCfg(
prim_path="{ENV_REGEX_NS}/Robot/base",
offset=RayCasterCfg.OffsetCfg(pos=(0.0, 0.0, 20.0)),
attach_yaw_only=True,
pattern_cfg=patterns.GridPatternCfg(resolution=0.1, size=[1.6, 1.0]),
debug_vis=False,
mesh_prim_paths=["/World/ground"],
)
contact_forces = ContactSensorCfg(prim_path="{ENV_REGEX_NS}/Robot/.*", history_length=3, track_air_time=True)
# lights
light = AssetBaseCfg(
prim_path="/World/light",
spawn=sim_utils.DistantLightCfg(color=(0.75, 0.75, 0.75), intensity=3000.0),
)
sky_light = AssetBaseCfg(
prim_path="/World/skyLight",
spawn=sim_utils.DomeLightCfg(color=(0.13, 0.13, 0.13), intensity=1000.0),
)
def constant_commands(env: RLTaskEnvCfg) -> torch.Tensor:
global base_command
"""The generated command from the command generator."""
return torch.tensor([base_command], device=env.device).repeat(env.num_envs, 1)
@configclass
class ObservationsCfg:
"""Observation specifications for the MDP."""
@configclass
class PolicyCfg(ObsGroup):
"""Observations for policy group."""
# observation terms (order preserved)
base_lin_vel = ObsTerm(func=mdp.base_lin_vel)
base_ang_vel = ObsTerm(func=mdp.base_ang_vel)
projected_gravity = ObsTerm(
func=mdp.projected_gravity,
noise=Unoise(n_min=-0.05, n_max=0.05),
)
velocity_commands = ObsTerm(func=constant_commands)
joint_pos = ObsTerm(func=mdp.joint_pos_rel)
joint_vel = ObsTerm(func=mdp.joint_vel_rel)
actions = ObsTerm(func=mdp.last_action)
height_scan = ObsTerm(
func=mdp.height_scan,
params={"sensor_cfg": SceneEntityCfg("height_scanner")},
clip=(-1.0, 1.0),
)
def __post_init__(self):
self.enable_corruption = True
self.concatenate_terms = True
# observation groups
policy: PolicyCfg = PolicyCfg()
@configclass
class ActionsCfg:
"""Action specifications for the MDP."""
joint_pos = mdp.JointPositionActionCfg(asset_name="robot", joint_names=[".*"], scale=0.5, use_default_offset=True)
@configclass
class CommandsCfg:
"""Command specifications for the MDP."""
base_velocity = mdp.UniformVelocityCommandCfg(
asset_name="robot",
resampling_time_range=(0.0, 0.0),
rel_standing_envs=0.02,
rel_heading_envs=1.0,
heading_command=True,
heading_control_stiffness=0.5,
debug_vis=True,
ranges=mdp.UniformVelocityCommandCfg.Ranges(
lin_vel_x=(0.0, 0.0), lin_vel_y=(0.0, 0.0), ang_vel_z=(0.0, 0.0), heading=(0, 0)
),
)
@configclass
class RewardsCfg:
"""Reward terms for the MDP."""
# -- task
track_lin_vel_xy_exp = RewTerm(
func=mdp.track_lin_vel_xy_exp, weight=1.0, params={"command_name": "base_velocity", "std": math.sqrt(0.25)}
)
track_ang_vel_z_exp = RewTerm(
func=mdp.track_ang_vel_z_exp, weight=0.5, params={"command_name": "base_velocity", "std": math.sqrt(0.25)}
)
# -- penalties
lin_vel_z_l2 = RewTerm(func=mdp.lin_vel_z_l2, weight=-2.0)
ang_vel_xy_l2 = RewTerm(func=mdp.ang_vel_xy_l2, weight=-0.05)
dof_torques_l2 = RewTerm(func=mdp.joint_torques_l2, weight=-1.0e-5)
dof_acc_l2 = RewTerm(func=mdp.joint_acc_l2, weight=-2.5e-7)
action_rate_l2 = RewTerm(func=mdp.action_rate_l2, weight=-0.01)
feet_air_time = RewTerm(
func=mdp.feet_air_time,
weight=0.125,
params={
"sensor_cfg": SceneEntityCfg("contact_forces", body_names=".*FOOT"),
"command_name": "base_velocity",
"threshold": 0.5,
},
)
undesired_contacts = RewTerm(
func=mdp.undesired_contacts,
weight=-1.0,
params={"sensor_cfg": SceneEntityCfg("contact_forces", body_names=".*THIGH"), "threshold": 1.0},
)
# -- optional penalties
flat_orientation_l2 = RewTerm(func=mdp.flat_orientation_l2, weight=0.0)
dof_pos_limits = RewTerm(func=mdp.joint_pos_limits, weight=0.0)
@configclass
class TerminationsCfg:
"""Termination terms for the MDP."""
time_out = DoneTerm(func=mdp.time_out, time_out=True)
base_contact = DoneTerm(
func=mdp.illegal_contact,
params={"sensor_cfg": SceneEntityCfg("contact_forces", body_names="base"), "threshold": 1.0},
)
@configclass
class EventCfg:
"""Configuration for events."""
# startup
physics_material = EventTerm(
func=mdp.randomize_rigid_body_material,
mode="startup",
params={
"asset_cfg": SceneEntityCfg("robot", body_names=".*"),
"static_friction_range": (0.8, 0.8),
"dynamic_friction_range": (0.6, 0.6),
"restitution_range": (0.0, 0.0),
"num_buckets": 64,
},
)
@configclass
class CurriculumCfg:
"""Curriculum terms for the MDP."""
terrain_levels = CurrTerm(func=mdp.terrain_levels_vel)
@configclass
class ViewerCfg:
"""Configuration of the scene viewport camera."""
eye: tuple[float, float, float] = (7.5, 7.5, 7.5)
lookat: tuple[float, float, float] = (0.0, 0.0, 0.0)
cam_prim_path: str = "/OmniverseKit_Persp"
resolution: tuple[int, int] = (1920, 1080)
origin_type: Literal["world", "env", "asset_root"] = "world"
env_index: int = 0
asset_name: str | None = None
@configclass
class LocomotionVelocityRoughEnvCfg(RLTaskEnvCfg):
"""Configuration for the locomotion velocity-tracking environment."""
# Scene settings
scene: MySceneCfg = MySceneCfg(num_envs=4096, env_spacing=2.5)
viewer: ViewerCfg = ViewerCfg()
# Basic settings
observations: ObservationsCfg = ObservationsCfg()
actions: ActionsCfg = ActionsCfg()
commands: CommandsCfg = CommandsCfg()
# MDP settings
rewards: RewardsCfg = RewardsCfg()
terminations: TerminationsCfg = TerminationsCfg()
events: EventCfg = EventCfg()
curriculum: CurriculumCfg = CurriculumCfg()
def __post_init__(self):
"""Post initialization."""
# general settings
self.decimation = 4
self.episode_length_s = 20.0
# simulation settings
self.sim.dt = 0.005
self.sim.disable_contact_processing = True
self.sim.physics_material = self.scene.terrain.physics_material
# update sensor update periods
# we tick all the sensors based on the smallest update period (physics update period)
if self.scene.height_scanner is not None:
self.scene.height_scanner.update_period = self.decimation * self.sim.dt
if self.scene.contact_forces is not None:
self.scene.contact_forces.update_period = self.sim.dt
# check if terrain levels curriculum is enabled - if so, enable curriculum for terrain generator
# this generates terrains with increasing difficulty and is useful for training
if getattr(self.curriculum, "terrain_levels", None) is not None:
if self.scene.terrain.terrain_generator is not None:
self.scene.terrain.terrain_generator.curriculum = True
else:
if self.scene.terrain.terrain_generator is not None:
self.scene.terrain.terrain_generator.curriculum = False
@configclass
class UnitreeGo2RoughEnvCfg(LocomotionVelocityRoughEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
self.scene.robot = UNITREE_GO2_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot")
self.scene.height_scanner.prim_path = "{ENV_REGEX_NS}/Robot/base"
# reduce action scale
self.actions.joint_pos.scale = 0.25
# rewards
self.rewards.feet_air_time.params["sensor_cfg"].body_names = ".*_foot"
self.rewards.feet_air_time.weight = 0.01
self.rewards.undesired_contacts = None
self.rewards.dof_torques_l2.weight = -0.0002
self.rewards.track_lin_vel_xy_exp.weight = 1.5
self.rewards.track_ang_vel_z_exp.weight = 0.75
self.rewards.dof_acc_l2.weight = -2.5e-7
# terminations
self.terminations.base_contact.params["sensor_cfg"].body_names = "base"
#create ros2 camera stream omnigraph
create_front_cam_omnigraph()
def sub_keyboard_event(event, *args, **kwargs) -> bool:
global base_command
if event.type == carb.input.KeyboardEventType.KEY_PRESS:
if event.input.name == 'W':
base_command = [1, 0, 0]
if event.input.name == 'S':
base_command = [-1, 0, 0]
if event.input.name == 'A':
base_command = [0, 1, 0]
if event.input.name == 'D':
base_command = [0, -1, 0]
if event.input.name == 'Q':
base_command = [0, 0, 1]
if event.input.name == 'E':
base_command = [0, 0, -1]
elif event.type == carb.input.KeyboardEventType.KEY_RELEASE:
base_command = [0, 0, 0]
return True
def main():
# acquire input interface
_input = carb.input.acquire_input_interface()
_appwindow = omni.appwindow.get_default_app_window()
_keyboard = _appwindow.get_keyboard()
_sub_keyboard = _input.subscribe_to_keyboard_events(_keyboard, sub_keyboard_event)
"""Play with RSL-RL agent."""
# parse configuration
env_cfg = UnitreeGo2RoughEnvCfg()
env_cfg.scene.num_envs = 1
agent_cfg: RslRlOnPolicyRunnerCfg = unitree_go2_agent_cfg
# create isaac environment
env = gym.make(args_cli.task, cfg=env_cfg)
# wrap around environment for rsl-rl
env = RslRlVecEnvWrapper(env)
# specify directory for logging experiments
log_root_path = os.path.join("logs", "rsl_rl", agent_cfg["experiment_name"])
log_root_path = os.path.abspath(log_root_path)
print(f"[INFO] Loading experiment from directory: {log_root_path}")
resume_path = get_checkpoint_path(log_root_path, agent_cfg["load_run"], agent_cfg["load_checkpoint"])
print(f"[INFO]: Loading model checkpoint from: {resume_path}")
# load previously trained model
ppo_runner = OnPolicyRunner(env, agent_cfg, log_dir=None, device=agent_cfg["device"])
ppo_runner.load(resume_path)
print(f"[INFO]: Loading model checkpoint from: {resume_path}")
# obtain the trained policy for inference
policy = ppo_runner.get_inference_policy(device=env.unwrapped.device)
# reset environment
obs, _ = env.get_observations()
# simulate environment
while simulation_app.is_running():
# run everything in inference mode
with torch.inference_mode():
# agent stepping
actions = policy(obs)
# env stepping
obs, _, _, _ = env.step(actions)
# close the simulator
env.close()
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close() | 16,627 | Python | 34.529914 | 118 | 0.669333 |
abizovnuralem/go2_omniverse/omnigraph.py | # Copyright (c) 2024, RoboVerse community
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import omni
import omni.graph.core as og
def create_front_cam_omnigraph():
"""Define the OmniGraph for the Isaac Sim environment."""
keys = og.Controller.Keys
graph_path = "/ROS_" + "front_cam"
(camera_graph, _, _, _) = og.Controller.edit(
{
"graph_path": graph_path,
"evaluator_name": "execution",
"pipeline_stage": og.GraphPipelineStage.GRAPH_PIPELINE_STAGE_SIMULATION,
},
{
keys.CREATE_NODES: [
("OnPlaybackTick", "omni.graph.action.OnPlaybackTick"),
("IsaacCreateRenderProduct", "omni.isaac.core_nodes.IsaacCreateRenderProduct"),
("ROS2CameraHelper", "omni.isaac.ros2_bridge.ROS2CameraHelper"),
],
keys.SET_VALUES: [
("IsaacCreateRenderProduct.inputs:cameraPrim", "/World/envs/env_0/Robot/base/front_cam"),
("IsaacCreateRenderProduct.inputs:enabled", True),
("ROS2CameraHelper.inputs:type", "rgb"),
("ROS2CameraHelper.inputs:topicName", "unitree_go2/front_cam/rgb"),
("ROS2CameraHelper.inputs:frameId", "unitree_go2"),
],
keys.CONNECT: [
("OnPlaybackTick.outputs:tick", "IsaacCreateRenderProduct.inputs:execIn"),
("IsaacCreateRenderProduct.outputs:execOut", "ROS2CameraHelper.inputs:execIn"),
("IsaacCreateRenderProduct.outputs:renderProductPath", "ROS2CameraHelper.inputs:renderProductPath"),
],
},
) | 2,912 | Python | 45.238095 | 116 | 0.67239 |
abizovnuralem/go2_omniverse/cli_args.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import argparse
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import RslRlOnPolicyRunnerCfg
def add_rsl_rl_args(parser: argparse.ArgumentParser):
"""Add RSL-RL arguments to the parser.
Args:
parser: The parser to add the arguments to.
"""
# create a new argument group
arg_group = parser.add_argument_group("rsl_rl", description="Arguments for RSL-RL agent.")
# -- experiment arguments
arg_group.add_argument(
"--experiment_name", type=str, default=None, help="Name of the experiment folder where logs will be stored."
)
arg_group.add_argument("--run_name", type=str, default=None, help="Run name suffix to the log directory.")
# -- load arguments
arg_group.add_argument("--resume", type=bool, default=None, help="Whether to resume from a checkpoint.")
arg_group.add_argument("--load_run", type=str, default=None, help="Name of the run folder to resume from.")
arg_group.add_argument("--checkpoint", type=str, default=None, help="Checkpoint file to resume from.")
# -- logger arguments
arg_group.add_argument(
"--logger", type=str, default=None, choices={"wandb", "tensorboard", "neptune"}, help="Logger module to use."
)
arg_group.add_argument(
"--log_project_name", type=str, default=None, help="Name of the logging project when using wandb or neptune."
)
def parse_rsl_rl_cfg(task_name: str, args_cli: argparse.Namespace) -> RslRlOnPolicyRunnerCfg:
"""Parse configuration for RSL-RL agent based on inputs.
Args:
task_name: The name of the environment.
args_cli: The command line arguments.
Returns:
The parsed configuration for RSL-RL agent based on inputs.
"""
from omni.isaac.orbit_tasks.utils.parse_cfg import load_cfg_from_registry
# load the default configuration
rslrl_cfg: RslRlOnPolicyRunnerCfg = load_cfg_from_registry(task_name, "rsl_rl_cfg_entry_point")
# override the default configuration with CLI arguments
if args_cli.seed is not None:
rslrl_cfg.seed = args_cli.seed
if args_cli.resume is not None:
rslrl_cfg.resume = args_cli.resume
if args_cli.load_run is not None:
rslrl_cfg.load_run = args_cli.load_run
if args_cli.checkpoint is not None:
rslrl_cfg.load_checkpoint = args_cli.checkpoint
if args_cli.run_name is not None:
rslrl_cfg.run_name = args_cli.run_name
if args_cli.logger is not None:
rslrl_cfg.logger = args_cli.logger
# set the project name for wandb and neptune
if rslrl_cfg.logger in {"wandb", "neptune"} and args_cli.log_project_name:
rslrl_cfg.wandb_project = args_cli.log_project_name
rslrl_cfg.neptune_project = args_cli.log_project_name
return rslrl_cfg
| 2,981 | Python | 38.759999 | 117 | 0.688695 |
abizovnuralem/go2_omniverse/README.md | # Welcome to the Unitree Go2 Omniverse Project!
I am thrilled to announce that the Unitree Go2 robot has now been integrated with the Nvidia Isaac Sim (Orbit), marking a major step forward in robotics research and development. The combination of these two cutting-edge technologies opens up a world of possibilities for creating and testing algorithms in a variety of simulated environments.
Get ready to take your research to the next level with this powerful new resource at your fingertips!
Real time Go2 Balancing:
<p align="center">
<img width="1280" height="600" src="https://github.com/abizovnuralem/go2_omniverse/assets/33475993/60c2233a-7586-49b6-a134-a7bddc4dd9ae" alt='Go2'>
</p>
Go2 Ros2 Camera stream:
<p align="center">
<img width="1200" height="440" src="https://github.com/abizovnuralem/go2_omniverse/assets/33475993/c740147b-ce00-4d7c-94de-0140be135e3e" alt='Go2'>
</p>
## Project RoadMap:
1. PPO balancing algorithm :white_check_mark:
2. Keyboard real time control :white_check_mark:
3. Camera stream to ROS2 :white_check_mark:
4. Lidar stream to ROS2
5. IMU data stream to ROS2
6. URDF real-time joints sync
## Your feedback and support mean the world to us.
If you're as enthusiastic about this project as we are, please consider giving it a :star: star on our GitHub repository.
Your encouragement fuels our passion and helps us develop our RoadMap further. We welcome any help or suggestions you can offer!
Together, let's push the boundaries of what's possible with the Unitree Go2 and ROS2!
## System requirements
You need to install Ubuntu 20.04 with Nvidia Isaac Sim and Nvidia Orbit.
The full instruction:
```
https://isaac-orbit.github.io/orbit/source/setup/installation.html
```
Also, you need to install ROS2 on your system and configure it:
```
https://docs.omniverse.nvidia.com/isaacsim/latest/installation/install_ros.html#isaac-sim-app-install-ros
```
## Usage
Go inside the repo folder, then
```
conda activate orbit
python main.py
```
## Development
To contribute or modify the project, refer to these resources for implementing additional features or improving the existing codebase. PRs are welcome!
## License
This project is licensed under the BSD 2-clause License - see the [LICENSE](https://github.com/abizovnuralem/go2_omniverse/blob/master/LICENSE) file for details.
| 2,341 | Markdown | 33.441176 | 343 | 0.773601 |
abizovnuralem/go2_omniverse/logs/rsl_rl/unitree_go2_rough/2024-04-06_02-37-07/params/agent.yaml | seed: 42
device: cuda
num_steps_per_env: 24
max_iterations: 15000
empirical_normalization: false
policy:
class_name: ActorCritic
init_noise_std: 1.0
actor_hidden_dims:
- 512
- 256
- 128
critic_hidden_dims:
- 512
- 256
- 128
activation: elu
algorithm:
class_name: PPO
value_loss_coef: 1.0
use_clipped_value_loss: true
clip_param: 0.2
entropy_coef: 0.01
num_learning_epochs: 5
num_mini_batches: 4
learning_rate: 0.001
schedule: adaptive
gamma: 0.99
lam: 0.95
desired_kl: 0.01
max_grad_norm: 1.0
save_interval: 50
experiment_name: unitree_go2_rough
run_name: ''
logger: tensorboard
neptune_project: orbit
wandb_project: orbit
resume: false
load_run: .*
load_checkpoint: model_.*.pt
| 727 | YAML | 16.756097 | 34 | 0.700138 |
PatrickPalmer/Omniverse-Connect-cmake/README.md | # Omniverse Connector Sample using CMake Build generator
NVidia had provided [instructions](https://forums.developer.nvidia.com/t/creating-an-omniverse-usd-app-from-the-connect-sample/189557) to hand wire in the Omniverse Connector Sample into a Visual Studio project. For more structured C++ projects, cmake is common. This repo codifies the steps in the NVidia document into a cmake project. This should be considered a lightweight simple integration though and not the level you'd expect if NVidia USD was packaged for distribution. Proper USD Cmake module should use modern CMake with optional loading of USD components and using target properties. But this is enough to get started. Currently hardwired to Connect Sample v 200.0.0.
## Setup
* Windows 10.
* Visual Studio 2019.
* cmake v3.21 or greater.
* NVidia Omniverse with Connector Sample installed locally.
* Hardwired to version 200.0.0.
* Installed in the default local users home directory in %LOCALAPPDATA%/ov/pkg.
* Run build.bat in the Connector Sample directory to download the required header and library files for OmniVerse Client and USD.
## Build
```
mkdir build
cd build
cmake -G "Visual Studio 16 2019" -A x64 ..
```
NVidia suggests copying the NVidia USD and Omniverse Client libraries locally. By default, this isn't done. To do it, add the option COPY_CONNECT_LOCALLY to cmake to copy the libraries into the build deps directory.
```
cmake -G "Visual Studio 16 2019" -A x64 -DCOPY_CONNECT_LOCALLY=ON ..
```
If the Omniverse Client libraries are not installed in the default location of %LOCALAPPDATA%\ov\pkg, set the OmniverseConnectSample_ROOT variable.
```
cmake -G "Visual Studio 16 2019" -A x64 -DOmniverseConnectSample_ROOT=D:/Omniverse/Library/connectsample-200.0.0 ..
```
## Reference
* https://forums.developer.nvidia.com/t/creating-an-omniverse-usd-app-from-the-connect-sample/189557
| 1,907 | Markdown | 45.536584 | 686 | 0.769795 |
PatrickPalmer/Omniverse-Connect-cmake/SimpleApp/Main.cpp |
#include <string>
#include <vector>
#include <iostream>
#include <iomanip>
#include "OmniClient.h"
#include "pxr/usd/usd/stage.h"
#include "pxr/usd/usd/prim.h"
#include "pxr/usd/usd/primRange.h"
#include "pxr/usd/usdGeom/metrics.h"
using namespace pxr;
static void OmniClientConnectionStatusCallbackImpl(void* userData, const char* url, OmniClientConnectionStatus status) noexcept
{
std::cout << "Connection Status: " << omniClientGetConnectionStatusString(status) << " [" << url << "]" << std::endl;
if (status == eOmniClientConnectionStatus_ConnectError)
{
// We shouldn't just exit here - we should clean up a bit, but we're going to do it anyway
std::cout << "[ERROR] Failed connection, exiting." << std::endl;
exit(-1);
}
}
// Startup Omniverse
static bool startOmniverse()
{
// Register a function to be called whenever the library wants to print something to a log
omniClientSetLogCallback(
[](char const* threadName, char const* component, OmniClientLogLevel level, char const* message)
{
std::cout << "[" << omniClientGetLogLevelString(level) << "] " << message << std::endl;
});
// The default log level is "Info", set it to "Debug" to see all messages
omniClientSetLogLevel(eOmniClientLogLevel_Info);
// Initialize the library and pass it the version constant defined in OmniClient.h
// This allows the library to verify it was built with a compatible version. It will
// return false if there is a version mismatch.
if (!omniClientInitialize(kOmniClientVersion))
{
return false;
}
omniClientRegisterConnectionStatusCallback(nullptr, OmniClientConnectionStatusCallbackImpl);
return true;
}
int main(int argc, char* argv[])
{
if (argc != 2)
{
std::cout << "Please provide an Omniverse stage URL to read." << std::endl;
return -1;
}
startOmniverse();
UsdStageRefPtr stage = UsdStage::Open(argv[1]);
if (!stage)
{
std::cout << "Failure to open stage. Exiting." << std::endl;
return -2;
}
// Print the up-axis
std::cout << "Stage up-axis: " << UsdGeomGetStageUpAxis(stage) << std::endl;
// Print the stage's linear units, or "meters per unit"
std::cout << "Meters per unit: " << std::setprecision(5) << UsdGeomGetStageMetersPerUnit(stage) << std::endl;
auto range = stage->Traverse();
for (const auto& node : range)
{
std::cout << "Node: " << node.GetPath() << std::endl;
}
// The stage is a sophisticated object that needs to be destroyed properly.
// Since stage is a smart pointer we can just reset it
stage.Reset();
omniClientShutdown();
}
| 2,538 | C++ | 26.597826 | 127 | 0.702522 |
An-u-rag/synthetic-visual-dataset-generation/main.py | import numpy as np
import os
import json
import os
# class_name_to_id_mapping = {"Cow": 0,
# "Chicken": 1,
# "Sheep": 2,
# "Goat": 3,
# "Pig": 4}
class_name_to_id_mapping = {"cow_1": 0,
"cow_2": 1,
"cow_3": 2,
"cow_4": 3,
"cow_5": 4,
"pig_clean": 5,
"pig_dirty": 6
}
# Convert the info dict to the required yolo format and write it to disk
def convert_to_yolov5(info_dict, image_file, name_file):
print_buffer = []
print(info_dict)
data = np.load(info_dict)
# image_file = Image.open(image_file)
image_w, image_h = image_file.size
class_id = {}
with open(name_file, 'r') as info_name:
data_name = json.load(info_name)
# print(data_name)
for k, v in data_name.items():
class_id[k] = class_name_to_id_mapping[v["class"]]
# for values in data_name.values():
# # print(values)
# class_id[values["class"]] = class_name_to_id_mapping[values["class"]]
# class_id[class_name_to_id_mapping[values["class"]]] = values["class"]
# class_id.append(class_name_to_id_mapping[values["class"]])
# class_id = class_name_to_id_mapping[values["name"]]
# print(class_id)
# counter = 0
# For each bounding box
for b in data:
# Transform the bbox co-ordinates as per the format required by YOLO v5
b_center_x = (b[1] + b[3]) / 2
b_center_y = (b[2] + b[4]) / 2
b_width = (b[3] - b[1])
b_height = (b[4] - b[2])
# Normalise the co-ordinates by the dimensions of the image
b_center_x /= image_w
b_center_y /= image_h
b_width /= image_w
b_height /= image_h
# print(counter)
print(class_id)
# Write the bbox details to the file
print(class_id.get(str(b[0])))
print_buffer.append(
"{} {:.3f} {:.3f} {:.3f} {:.3f}".format(class_id.get(str(b[0])), b_center_x, b_center_y, b_width, b_height))
# counter += 1
# print(print_buffer)
# Name of the file which we have to save
path_pic = "C:/Users/xyche/Downloads/dataset"
save_file_name = os.path.join(path_pic, info_dict.replace("bounding_box_2d_tight_", "rgb_").replace("npy", "txt"))
# Save the annotation to disk
print("\n".join(print_buffer), file=open(save_file_name, "w"))
import os
from PIL import Image
# Convert and save the annotations
# path_label = "/content/RenderProduct_Replicator/bounding_box_2d_loose"
path_pic = "C:/Users/xyche/Downloads/dataset"
datanames = os.listdir(path_pic)
for i in datanames:
if os.path.splitext(i)[1] == '.npy':
# np.load("../"+info_dict)
# info_dict = open(os.path.join(path_pic,i), "rb")
info_dict = os.path.join(path_pic, i)
image_file = i.replace("bounding_box_2d_tight_", "rgb_").replace("npy", "png")
# os.listdir(path_pic)
image_file = Image.open(os.path.join(path_pic, image_file))
info_name = i.replace("bounding_box_2d_tight_", "bounding_box_2d_tight_labels_").replace("npy", "json")
name_file = os.path.join(path_pic, info_name)
convert_to_yolov5(info_dict, image_file, name_file)
# print(os.listdir(path_pic))
annotations = [os.path.join(path_pic, x) for x in os.listdir(path_pic) if x[-3:] == "txt" and x != 'metadata.txt']
# print(len(annotations))
from sklearn.model_selection import train_test_split
# Read images and annotations
images = [os.path.join(path_pic, x) for x in os.listdir(path_pic) if x[-3:] == "png"]
# print(len(images))
# datanames = os.listdir(path_pic)
annotations = [os.path.join(path_pic, x) for x in os.listdir(path_pic) if x[-3:] == "txt" and x != 'metadata.txt']
# print(len(annotations))
images.sort()
annotations.sort()
# for i in annotations:
# update_annotations = i.replace("bounding_box_2d_loose_", "rgb_").replace("txt", "png")
# if update_annotations not in images:
# print(update_annotations)
# Split the dataset into train-valid-test splits
train_images, val_images, train_annotations, val_annotations = train_test_split(images, annotations, test_size=0.2,
random_state=1)
val_images, test_images, val_annotations, test_annotations = train_test_split(val_images, val_annotations,
test_size=0.5, random_state=1)
path1 = 'C:/Users/xyche/Downloads/dataset'
os.mkdir(path1 + '/images')
os.mkdir(path1 + '/labels')
file_name = ['/train', '/val', '/test']
path2 = 'C:/Users/xyche/Downloads/dataset/images'
for name in file_name:
os.mkdir(path2 + name)
path3 = 'C:/Users/xyche/Downloads/dataset/labels'
for name in file_name:
os.mkdir(path3 + name)
import shutil
# Utility function to move images
def move_files_to_folder(list_of_files, destination_folder):
for f in list_of_files:
shutil.copy(f, destination_folder)
# Move the splits into their folders
move_files_to_folder(train_images, 'C:/Users/xyche/Downloads/dataset/images/train/')
move_files_to_folder(val_images, 'C:/Users/xyche/Downloads/dataset/images/val/')
move_files_to_folder(test_images, 'C:/Users/xyche/Downloads/dataset/images/test/')
move_files_to_folder(train_annotations, 'C:/Users/xyche/Downloads/dataset/labels/train/')
move_files_to_folder(val_annotations, 'C:/Users/xyche/Downloads/dataset/labels/val/')
move_files_to_folder(test_annotations, 'C:/Users/xyche/Downloads/dataset/labels/test/')
import yaml
desired_caps = {
'train': 'C:/Users/xyche/Downloads/dataset/images/train/',
'val': 'C:/Users/xyche/Downloads/dataset/images/val/',
'test': 'C:/Users/xyche/Downloads/dataset/images/test/',
# number of classes
'nc': 7,
# class names
#'names': ['Sam', 'Lucy', 'Ross', 'Mary', 'Elon', 'Alex', 'Max']
'names': ['0', '1', '2', '3', '4', '5', '6']
}
curpath = 'C:/Users/xyche/Downloads/dataset'
yamlpath = os.path.join(curpath, "./dataset.yaml")
with open(yamlpath, "w", encoding="utf-8") as f:
yaml.dump(desired_caps, f) | 6,421 | Python | 37.22619 | 120 | 0.583242 |
Subsets and Splits