file_path
stringlengths 20
202
| content
stringlengths 9
3.85M
| size
int64 9
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 3.33
100
| max_line_length
int64 8
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
tudelft/autoGDMplus/gaden_ws/src/gaden/map_server/src/image_loader.cpp | /*
* Copyright (c) 2008, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* This file contains helper functions for loading images as maps.
*
* Author: Brian Gerkey
*/
#include <cstring>
#include <stdexcept>
#include <stdlib.h>
#include <stdio.h>
// We use SDL_image to load the image from disk
#include <SDL/SDL_image.h>
// Use Bullet's Quaternion object to create one from Euler angles
#include <LinearMath/btQuaternion.h>
#include "map_server/image_loader.h"
// compute linear index for given map coords
#define MAP_IDX(sx, i, j) ((sx) * (j) + (i))
namespace map_server
{
void
loadMapFromFile(nav_msgs::GetMap::Response* resp,
const char* fname, double res, bool negate,
double occ_th, double free_th, double* origin,
MapMode mode)
{
SDL_Surface* img;
unsigned char* pixels;
unsigned char* p;
unsigned char value;
int rowstride, n_channels, avg_channels;
unsigned int i,j;
int k;
double occ;
int alpha;
int color_sum;
double color_avg;
// Load the image using SDL. If we get NULL back, the image load failed.
if(!(img = IMG_Load(fname)))
{
std::string errmsg = std::string("failed to open image file \"") +
std::string(fname) + std::string("\": ") + IMG_GetError();
throw std::runtime_error(errmsg);
}
// Copy the image data into the map structure
resp->map.info.width = img->w;
resp->map.info.height = img->h;
resp->map.info.resolution = res;
resp->map.info.origin.position.x = *(origin);
resp->map.info.origin.position.y = *(origin+1);
resp->map.info.origin.position.z = 0.0;
btQuaternion q;
// setEulerZYX(yaw, pitch, roll)
q.setEulerZYX(*(origin+2), 0, 0);
resp->map.info.origin.orientation.x = q.x();
resp->map.info.origin.orientation.y = q.y();
resp->map.info.origin.orientation.z = q.z();
resp->map.info.origin.orientation.w = q.w();
// Allocate space to hold the data
resp->map.data.resize(resp->map.info.width * resp->map.info.height);
// Get values that we'll need to iterate through the pixels
rowstride = img->pitch;
n_channels = img->format->BytesPerPixel;
// NOTE: Trinary mode still overrides here to preserve existing behavior.
// Alpha will be averaged in with color channels when using trinary mode.
if (mode==TRINARY || !img->format->Amask)
avg_channels = n_channels;
else
avg_channels = n_channels - 1;
// Copy pixel data into the map structure
pixels = (unsigned char*)(img->pixels);
for(j = 0; j < resp->map.info.height; j++)
{
for (i = 0; i < resp->map.info.width; i++)
{
// Compute mean of RGB for this pixel
p = pixels + j*rowstride + i*n_channels;
color_sum = 0;
for(k=0;k<avg_channels;k++)
color_sum += *(p + (k));
color_avg = color_sum / (double)avg_channels;
if (n_channels == 1)
alpha = 1;
else
alpha = *(p+n_channels-1);
if(negate)
color_avg = 255 - color_avg;
if(mode==RAW){
value = color_avg;
resp->map.data[MAP_IDX(resp->map.info.width,i,resp->map.info.height - j - 1)] = value;
continue;
}
// If negate is true, we consider blacker pixels free, and whiter
// pixels occupied. Otherwise, it's vice versa.
occ = (255 - color_avg) / 255.0;
// Apply thresholds to RGB means to determine occupancy values for
// map. Note that we invert the graphics-ordering of the pixels to
// produce a map with cell (0,0) in the lower-left corner.
if(occ > occ_th)
value = +100;
else if(occ < free_th)
value = 0;
else if(mode==TRINARY || alpha < 1.0)
value = -1;
else {
double ratio = (occ - free_th) / (occ_th - free_th);
value = 1 + 98 * ratio;
}
resp->map.data[MAP_IDX(resp->map.info.width,i,resp->map.info.height - j - 1)] = value;
}
}
SDL_FreeSurface(img);
}
}
| 5,472 | C++ | 31.969879 | 96 | 0.654971 |
tudelft/autoGDMplus/gaden_ws/src/gaden/map_server/src/map_saver.cpp | /*
* map_saver
* Copyright (c) 2008, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the <ORGANIZATION> nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <cstdio>
#include "ros/ros.h"
#include "ros/console.h"
#include "nav_msgs/GetMap.h"
#include "tf2/LinearMath/Matrix3x3.h"
#include "geometry_msgs/Quaternion.h"
using namespace std;
/**
* @brief Map generation node.
*/
class MapGenerator
{
public:
MapGenerator(const std::string& mapname, int threshold_occupied, int threshold_free)
: mapname_(mapname), saved_map_(false), threshold_occupied_(threshold_occupied), threshold_free_(threshold_free)
{
ros::NodeHandle n;
ROS_INFO("Waiting for the map");
map_sub_ = n.subscribe("map", 1, &MapGenerator::mapCallback, this);
}
void mapCallback(const nav_msgs::OccupancyGridConstPtr& map)
{
ROS_INFO("Received a %d X %d map @ %.3f m/pix",
map->info.width,
map->info.height,
map->info.resolution);
std::string mapdatafile = mapname_ + ".pgm";
ROS_INFO("Writing map occupancy data to %s", mapdatafile.c_str());
FILE* out = fopen(mapdatafile.c_str(), "w");
if (!out)
{
ROS_ERROR("Couldn't save map file to %s", mapdatafile.c_str());
return;
}
fprintf(out, "P5\n# CREATOR: map_saver.cpp %.3f m/pix\n%d %d\n255\n",
map->info.resolution, map->info.width, map->info.height);
for(unsigned int y = 0; y < map->info.height; y++) {
for(unsigned int x = 0; x < map->info.width; x++) {
unsigned int i = x + (map->info.height - y - 1) * map->info.width;
if (map->data[i] >= 0 && map->data[i] <= threshold_free_) { // [0,free)
fputc(254, out);
} else if (map->data[i] >= threshold_occupied_) { // (occ,255]
fputc(000, out);
} else { //occ [0.25,0.65]
fputc(205, out);
}
}
}
fclose(out);
std::string mapmetadatafile = mapname_ + ".yaml";
ROS_INFO("Writing map occupancy data to %s", mapmetadatafile.c_str());
FILE* yaml = fopen(mapmetadatafile.c_str(), "w");
/*
resolution: 0.100000
origin: [0.000000, 0.000000, 0.000000]
#
negate: 0
occupied_thresh: 0.65
free_thresh: 0.196
*/
geometry_msgs::Quaternion orientation = map->info.origin.orientation;
tf2::Matrix3x3 mat(tf2::Quaternion(
orientation.x,
orientation.y,
orientation.z,
orientation.w
));
double yaw, pitch, roll;
mat.getEulerYPR(yaw, pitch, roll);
fprintf(yaml, "image: %s\nresolution: %f\norigin: [%f, %f, %f]\nnegate: 0\noccupied_thresh: 0.65\nfree_thresh: 0.196\n\n",
mapdatafile.c_str(), map->info.resolution, map->info.origin.position.x, map->info.origin.position.y, yaw);
fclose(yaml);
ROS_INFO("Done\n");
saved_map_ = true;
}
std::string mapname_;
ros::Subscriber map_sub_;
bool saved_map_;
int threshold_occupied_;
int threshold_free_;
};
#define USAGE "Usage: \n" \
" map_saver -h\n"\
" map_saver [--occ <threshold_occupied>] [--free <threshold_free>] [-f <mapname>] [ROS remapping args]"
int main(int argc, char** argv)
{
ros::init(argc, argv, "map_saver");
std::string mapname = "map";
int threshold_occupied = 65;
int threshold_free = 25;
for(int i=1; i<argc; i++)
{
if(!strcmp(argv[i], "-h"))
{
puts(USAGE);
return 0;
}
else if(!strcmp(argv[i], "-f"))
{
if(++i < argc)
mapname = argv[i];
else
{
puts(USAGE);
return 1;
}
}
else if (!strcmp(argv[i], "--occ"))
{
if (++i < argc)
{
threshold_occupied = std::atoi(argv[i]);
if (threshold_occupied < 1 || threshold_occupied > 100)
{
ROS_ERROR("threshold_occupied must be between 1 and 100");
return 1;
}
}
else
{
puts(USAGE);
return 1;
}
}
else if (!strcmp(argv[i], "--free"))
{
if (++i < argc)
{
threshold_free = std::atoi(argv[i]);
if (threshold_free < 0 || threshold_free > 100)
{
ROS_ERROR("threshold_free must be between 0 and 100");
return 1;
}
}
else
{
puts(USAGE);
return 1;
}
}
else
{
puts(USAGE);
return 1;
}
}
if (threshold_occupied <= threshold_free)
{
ROS_ERROR("threshold_free must be smaller than threshold_occupied");
return 1;
}
MapGenerator mg(mapname, threshold_occupied, threshold_free);
while(!mg.saved_map_ && ros::ok())
ros::spinOnce();
return 0;
}
| 6,226 | C++ | 27.56422 | 128 | 0.598458 |
tudelft/autoGDMplus/gaden_ws/src/gaden/map_server/src/main.cpp | /*
* Copyright (c) 2008, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/* Author: Brian Gerkey */
#define USAGE "\nUSAGE: map_server <map.yaml>\n" \
" map.yaml: map description file\n" \
"DEPRECATED USAGE: map_server <map> <resolution>\n" \
" map: image file to load\n"\
" resolution: map resolution [meters/pixel]"
#include <stdio.h>
#include <stdlib.h>
#include <fstream>
#include <boost/filesystem.hpp>
#include "ros/ros.h"
#include "ros/console.h"
#include "map_server/image_loader.h"
#include "nav_msgs/MapMetaData.h"
#include "nav_msgs/LoadMap.h"
#include "yaml-cpp/yaml.h"
#ifdef HAVE_YAMLCPP_GT_0_5_0
// The >> operator disappeared in yaml-cpp 0.5, so this function is
// added to provide support for code written under the yaml-cpp 0.3 API.
template<typename T>
void operator >> (const YAML::Node& node, T& i)
{
i = node.as<T>();
}
#endif
class MapServer
{
public:
/** Trivial constructor */
MapServer(const std::string& fname, double res)
{
std::string mapfname = "";
double origin[3];
int negate;
double occ_th, free_th;
MapMode mode = TRINARY;
ros::NodeHandle private_nh("~");
private_nh.param("frame_id", frame_id_, std::string("map"));
//When called this service returns a copy of the current map
get_map_service_ = nh_.advertiseService("static_map", &MapServer::mapCallback, this);
//Change the currently published map
change_map_srv_ = nh_.advertiseService("change_map", &MapServer::changeMapCallback, this);
// Latched publisher for metadata
metadata_pub_ = nh_.advertise<nav_msgs::MapMetaData>("map_metadata", 1, true);
// Latched publisher for data
map_pub_ = nh_.advertise<nav_msgs::OccupancyGrid>("map", 1, true);
deprecated_ = (res != 0);
if (!deprecated_) {
if (!loadMapFromYaml(fname))
{
exit(-1);
}
} else {
if (!loadMapFromParams(fname, res))
{
exit(-1);
}
}
}
private:
ros::NodeHandle nh_;
ros::Publisher map_pub_;
ros::Publisher metadata_pub_;
ros::ServiceServer get_map_service_;
ros::ServiceServer change_map_srv_;
bool deprecated_;
std::string frame_id_;
/** Callback invoked when someone requests our service */
bool mapCallback(nav_msgs::GetMap::Request &req,
nav_msgs::GetMap::Response &res )
{
// request is empty; we ignore it
// = operator is overloaded to make deep copy (tricky!)
res = map_resp_;
ROS_INFO("Sending map");
return true;
}
/** Callback invoked when someone requests to change the map */
bool changeMapCallback(nav_msgs::LoadMap::Request &request,
nav_msgs::LoadMap::Response &response )
{
if (loadMapFromYaml(request.map_url))
{
response.result = response.RESULT_SUCCESS;
ROS_INFO("Changed map to %s", request.map_url.c_str());
}
else
{
response.result = response.RESULT_UNDEFINED_FAILURE;
}
return true;
}
/** Load a map given all the values needed to understand it
*/
bool loadMapFromValues(std::string map_file_name, double resolution,
int negate, double occ_th, double free_th,
double origin[3], MapMode mode)
{
ROS_INFO("Loading map from image \"%s\"", map_file_name.c_str());
try {
map_server::loadMapFromFile(&map_resp_, map_file_name.c_str(),
resolution, negate, occ_th, free_th,
origin, mode);
} catch (std::runtime_error& e) {
ROS_ERROR("%s", e.what());
return false;
}
// To make sure get a consistent time in simulation
ros::Time::waitForValid();
map_resp_.map.info.map_load_time = ros::Time::now();
map_resp_.map.header.frame_id = frame_id_;
map_resp_.map.header.stamp = ros::Time::now();
ROS_INFO("Read a %d X %d map @ %.3lf m/cell",
map_resp_.map.info.width,
map_resp_.map.info.height,
map_resp_.map.info.resolution);
meta_data_message_ = map_resp_.map.info;
//Publish latched topics
metadata_pub_.publish( meta_data_message_ );
map_pub_.publish( map_resp_.map );
return true;
}
/** Load a map using the deprecated method
*/
bool loadMapFromParams(std::string map_file_name, double resolution)
{
ros::NodeHandle private_nh("~");
int negate;
double occ_th;
double free_th;
double origin[3];
private_nh.param("negate", negate, 0);
private_nh.param("occupied_thresh", occ_th, 0.65);
private_nh.param("free_thresh", free_th, 0.196);
origin[0] = origin[1] = origin[2] = 0.0;
return loadMapFromValues(map_file_name, resolution, negate, occ_th, free_th, origin, TRINARY);
}
/** Load a map given a path to a yaml file
*/
bool loadMapFromYaml(std::string path_to_yaml)
{
std::string mapfname;
MapMode mode;
double res;
int negate;
double occ_th;
double free_th;
double origin[3];
std::ifstream fin(path_to_yaml.c_str());
if (fin.fail()) {
ROS_ERROR("Map_server could not open %s.", path_to_yaml.c_str());
return false;
}
#ifdef HAVE_YAMLCPP_GT_0_5_0
// The document loading process changed in yaml-cpp 0.5.
YAML::Node doc = YAML::Load(fin);
#else
YAML::Parser parser(fin);
YAML::Node doc;
parser.GetNextDocument(doc);
#endif
try {
doc["resolution"] >> res;
} catch (YAML::InvalidScalar &) {
ROS_ERROR("The map does not contain a resolution tag or it is invalid.");
return false;
}
try {
doc["negate"] >> negate;
} catch (YAML::InvalidScalar &) {
ROS_ERROR("The map does not contain a negate tag or it is invalid.");
return false;
}
try {
doc["occupied_thresh"] >> occ_th;
} catch (YAML::InvalidScalar &) {
ROS_ERROR("The map does not contain an occupied_thresh tag or it is invalid.");
return false;
}
try {
doc["free_thresh"] >> free_th;
} catch (YAML::InvalidScalar &) {
ROS_ERROR("The map does not contain a free_thresh tag or it is invalid.");
return false;
}
try {
std::string modeS = "";
doc["mode"] >> modeS;
if(modeS=="trinary")
mode = TRINARY;
else if(modeS=="scale")
mode = SCALE;
else if(modeS=="raw")
mode = RAW;
else{
ROS_ERROR("Invalid mode tag \"%s\".", modeS.c_str());
return false;
}
} catch (YAML::Exception &) {
ROS_DEBUG("The map does not contain a mode tag or it is invalid... assuming Trinary");
mode = TRINARY;
}
try {
doc["origin"][0] >> origin[0];
doc["origin"][1] >> origin[1];
doc["origin"][2] >> origin[2];
} catch (YAML::InvalidScalar &) {
ROS_ERROR("The map does not contain an origin tag or it is invalid.");
return false;
}
try {
doc["image"] >> mapfname;
// TODO: make this path-handling more robust
if(mapfname.size() == 0)
{
ROS_ERROR("The image tag cannot be an empty string.");
return false;
}
boost::filesystem::path mapfpath(mapfname);
if (!mapfpath.is_absolute())
{
boost::filesystem::path dir(path_to_yaml);
dir = dir.parent_path();
mapfpath = dir / mapfpath;
mapfname = mapfpath.string();
}
} catch (YAML::InvalidScalar &) {
ROS_ERROR("The map does not contain an image tag or it is invalid.");
return false;
}
return loadMapFromValues(mapfname, res, negate, occ_th, free_th, origin, mode);
}
/** The map data is cached here, to be sent out to service callers
*/
nav_msgs::MapMetaData meta_data_message_;
nav_msgs::GetMap::Response map_resp_;
/*
void metadataSubscriptionCallback(const ros::SingleSubscriberPublisher& pub)
{
pub.publish( meta_data_message_ );
}
*/
};
int main(int argc, char **argv)
{
ros::init(argc, argv, "map_server", ros::init_options::AnonymousName);
ros::NodeHandle nh("~");
if(argc != 3 && argc != 2)
{
ROS_ERROR("%s", USAGE);
exit(-1);
}
if (argc != 2) {
ROS_WARN("Using deprecated map server interface. Please switch to new interface.");
}
std::string fname(argv[1]);
double res = (argc == 2) ? 0.0 : atof(argv[2]);
try
{
MapServer ms(fname, res);
ros::spin();
}
catch(std::runtime_error& e)
{
ROS_ERROR("map_server exception: %s", e.what());
return -1;
}
return 0;
}
| 10,488 | C++ | 31.076453 | 100 | 0.596396 |
tudelft/autoGDMplus/gaden_ws/src/gaden/map_server/include/map_server/image_loader.h | /*
* Copyright (c) 2008, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef MAP_SERVER_MAP_SERVER_H
#define MAP_SERVER_MAP_SERVER_H
/*
* Author: Brian Gerkey
*/
#include "nav_msgs/GetMap.h"
/** Map mode
* Default: TRINARY -
* value >= occ_th - Occupied (100)
* value <= free_th - Free (0)
* otherwise - Unknown
* SCALE -
* alpha < 1.0 - Unknown
* value >= occ_th - Occupied (100)
* value <= free_th - Free (0)
* otherwise - f( (free_th, occ_th) ) = (0, 100)
* (linearly map in between values to (0,100)
* RAW -
* value = value
*/
enum MapMode {TRINARY, SCALE, RAW};
namespace map_server
{
/** Read the image from file and fill out the resp object, for later
* use when our services are requested.
*
* @param resp The map wil be written into here
* @param fname The image file to read from
* @param res The resolution of the map (gets stored in resp)
* @param negate If true, then whiter pixels are occupied, and blacker
* pixels are free
* @param occ_th Threshold above which pixels are occupied
* @param free_th Threshold below which pixels are free
* @param origin Triple specifying 2-D pose of lower-left corner of image
* @param mode Map mode
* @throws std::runtime_error If the image file can't be loaded
* */
void loadMapFromFile(nav_msgs::GetMap::Response* resp,
const char* fname, double res, bool negate,
double occ_th, double free_th, double* origin,
MapMode mode=TRINARY);
}
#endif
| 3,099 | C | 38.743589 | 78 | 0.693127 |
tudelft/autoGDMplus/gaden_ws/src/gaden/simulated_gas_sensor_array/src/fake_gas_sensor_array.h | #include <ros/ros.h>
#include <std_msgs/Float32.h>
#include <visualization_msgs/Marker.h>
#include <nav_msgs/Odometry.h>
#include <geometry_msgs/PoseWithCovarianceStamped.h>
#include <tf/transform_listener.h>
#include <olfaction_msgs/gas_sensor_array.h>
#include <gaden_player/GasPosition.h>
#include <boost/format.hpp>
#include <cstdlib>
#include <math.h>
#include <vector>
#include <fstream>
#include <iostream>
//Gas Types
#define ETHANOL_ID 0
#define METHANE_ID 1
#define HYDROGEN_ID 2
#define PROPANOL_ID 3
#define CHLORIDE_ID 4
#define FLURORINE_ID 5
#define ACETONE_ID 6
#define NEON_ID 7
#define HELIUM_ID 8
#define HOTAIR_ID 9
//Sensor Types
#define TGS2620_ID 0
#define TGS2600_ID 1
#define TGS2611_ID 2
#define TGS2610_ID 3
#define TGS2612_ID 4
#define PID_ID 30
// Parameters
int num_sensors; //number of sensors in the array
std::vector<int> sensor_models;
double pub_rate;
std::string topic_id;
std::string frame_id;
std::string fixed_frame;
bool use_PID_correction_factors;
//Parameters to model the MOX response
struct gas_sensor
{
bool first_reading; //First reading is set to baseline always
float RS_R0; //Ideal sensor response based on sensitivity
float sensor_output; //MOX model response
float previous_sensor_output; //The response in (t-1)
};
std::vector<gas_sensor> sensor_array;
// Vars
int ch_id; //Chemical ID
bool notified; //to notifiy about erros just once
//functions:
void loadNodeParameters(ros::NodeHandle private_nh);
float simulate_mox_as_line_loglog(gaden_player::GasPositionResponse GT_gas_concentrations, int s_idx);
float simulate_pid(gaden_player::GasPositionResponse GT_gas_concentrations);
//------------------------ SENSOR CHARACTERIZATION PARAMS ----------------------------------//
std::string labels[5] = {"TGS2620", "TGS2600", "TGS2611", "TGS2610", "TGS2612"};
float R0[5] = {3000, 50000, 3740, 3740, 4500}; //[Ohms] Reference resistance (see datasheets)
//Time constants (Rise, Decay)
float tau_value[5][7][2] = //5 sensors, 7 gases , 2 Time Constants
{
{ //TGS2620
{2.96, 15.71}, //ethanol
{2.96, 15.71}, //methane
{2.96, 15.71}, //hydrogen
{2.96, 15.71}, //propanol
{2.96, 15.71}, //chlorine
{2.96, 15.71}, //fluorine
{2.96, 15.71} //Acetone
},
{ //TGS2600
{4.8, 18.75}, //ethanol
{4.8, 18.75}, //methane
{4.8, 18.75}, //hydrogen
{4.8, 18.75}, //propanol
{4.8, 18.75}, //chlorine
{4.8, 18.75}, //fluorine
{4.8, 18.75} //Acetone
},
{ //TGS2611
{3.44, 6.35}, //ethanol
{3.44, 6.35}, //methane
{3.44, 6.35}, //hydrogen
{3.44, 6.35}, //propanol
{3.44, 6.35}, //chlorine
{3.44, 6.35}, //fluorine
{3.44, 6.35} //Acetone
},
{ //TGS2610
{3.44, 6.35}, //ethanol
{3.44, 6.35}, //methane
{3.44, 6.35}, //hydrogen
{3.44, 6.35}, //propanol
{3.44, 6.35}, //chlorine
{3.44, 6.35}, //fluorine
{3.44, 6.35} //Acetone
},
{ //TGS2612
{3.44, 6.35}, //ethanol
{3.44, 6.35}, //methane
{3.44, 6.35}, //hydrogen
{3.44, 6.35}, //propanol
{3.44, 6.35}, //chlorine
{3.44, 6.35}, //fluorine
{3.44, 6.35} //Acetone
}
};
// MOX sensitivity. Extracted from datasheets and curve fitting
//--------------------------------------------------------------
float Sensitivity_Air[5] = {21, 1, 8.8, 10.3, 19.5}; //RS/R0 when exposed to clean air (datasheet)
// RS/R0 = A*conc^B (a line in the loglog scale)
float sensitivity_lineloglog[5][7][2]={ //5 Sensors, 7 Gases, 2 Constants: A, B
{ //TGS2620
{62.32, -0.7155}, //Ethanol
{120.6, -0.4877}, //Methane
{24.45, -0.5546}, //Hydrogen
{120.6, -0.4877}, //propanol (To review)
{120.6, -0.4877}, //chlorine (To review)
{120.6, -0.4877}, //fluorine (To review)
{120.6, -0.4877} //Acetone (To review)
},
{ //TGS2600
{0.6796, -0.3196}, //ethanol
{1.018, -0.07284}, //methane
{0.6821, -0.3532}, //hydrogen
{1.018, -0.07284}, //propanol (To review)
{1.018, -0.07284}, //chlorine (To review)
{1.018, -0.07284}, //fluorine (To review)
{1.018, -0.07284} //Acetone (To review)
},
{ //TGS2611
{51.11, -0.3658}, //ethanol
{38.46, -0.4289}, //methane
{41.3, -0.3614}, //hydrogen
{38.46, -0.4289}, //propanol (To review)
{38.46, -0.4289}, //chlorine (To review)
{38.46, -0.4289}, //fluorine (To review)
{38.46, -0.4289} //Acetone (To review)
},
{ //TGS2610
{106.1, -0.5008}, //ethanol
{63.91, -0.5372}, //methane
{66.78, -0.4888}, //hydrogen
{63.91, -0.5372}, //propanol (To review)
{63.91, -0.5372}, //chlorine (To review)
{63.91, -0.5372}, //fluorine (To review)
{63.91, -0.5372} //Acetone (To review)
},
{ //TGS2612
{31.35, -0.09115}, //ethanol
{146.2, -0.5916}, //methane
{19.5, 0.0}, //hydrogen
{146.2, -0.5916}, //propanol (To review)
{146.2, -0.5916}, //chlorine (To review)
{146.2, -0.5916}, //fluorine (To review)
{146.2, -0.5916} //Acetone (To review)
}
};
//PID correction factors for gas concentration
//--------------------------------------------
//Ethanol, Methane, Hydrogen, Propanol, Chlorine, Fluorine, Acetone
// http://www.intlsensor.com/pdf/pidcorrectionfactors.pdf
// Here we simulate a lamp of 11.7eV to increase the range of detectable gases
// A 0.0 means the PID is not responsive to that gas
float PID_correction_factors[7] = {10.47, 0.0, 0.0, 2.7, 1.0, 0.0, 1.4};
| 5,945 | C | 29.649484 | 103 | 0.5455 |
tudelft/autoGDMplus/gaden_ws/src/gaden/simulated_gas_sensor_array/src/fake_gas_sensor_array.cpp | /*-------------------------------------------------------------------------------
* This node simulates the response of a MOX gas sensor given the GT gas concentration
* of the gases it is exposed to (request to simulation_player or dispersion_simulation)
* - Gas concentration should be given in [ppm]
* - The Pkg response can be set to: Resistance of the sensor (Rs), Resistance-ratio (Rs/R0), or Voltage (0-5V)
* - Sensitivity to different gases is set based on manufacter datasheet
* - Time constants for the dynamic response are set based on real experiments
*
* - Response to mixture of gases is set based on datasheet.
* -----------------------------------------------------------------------------------------------*/
#include "fake_gas_sensor_array.h"
int main( int argc, char** argv )
{
ros::init(argc, argv, "fake_gas_sensor_aray");
ros::NodeHandle n;
ros::NodeHandle pn("~");
//Read parameters
loadNodeParameters(pn);
//Publishers
ros::Publisher enose_pub = n.advertise<olfaction_msgs::gas_sensor_array>(topic_id, 500);
//Service to request gas concentration
ros::ServiceClient client = n.serviceClient<gaden_player::GasPosition>("/odor_value");
//Configure sensor_array
sensor_array.resize(num_sensors);
for (int i=0;i<num_sensors; i++)
{
sensor_array[i].first_reading = true;
}
// Loop
tf::TransformListener listener;
ros::Rate r(pub_rate);
notified = false;
while (ros::ok())
{
//Vars
tf::StampedTransform transform;
bool know_sensor_pose = true;
//Get pose of the sensor_aray in the /map reference
try
{
listener.lookupTransform(fixed_frame.c_str(), frame_id.c_str(),
ros::Time(0), transform);
}
catch (tf::TransformException ex)
{
ROS_ERROR("%s",ex.what());
know_sensor_pose = false;
ros::Duration(1.0).sleep();
}
if (know_sensor_pose)
{
//Current sensor pose
float x_pos = transform.getOrigin().x();
float y_pos = transform.getOrigin().y();
float z_pos = transform.getOrigin().z();
// Get Gas concentration at current position (for each gas present)
// Service request to the simulator
gaden_player::GasPosition srv;
srv.request.x.push_back(x_pos);
srv.request.y.push_back(y_pos);
srv.request.z.push_back(z_pos);
if (client.call(srv))
{
/*
for (int i=0; i<srv.response.gas_type.size(); i++)
{
ROS_INFO("[FakeMOX] %s:%.4f at (%.2f,%.2f,%.2f)",srv.response.gas_type[i].c_str(), srv.response.gas_conc[i],srv.request.x, srv.request.y, srv.request.z );
}
*/
olfaction_msgs::gas_sensor_array enose_msg;
enose_msg.header.frame_id = frame_id;
enose_msg.header.stamp = ros::Time::now();
//For each sensor in the array, simulate its response
for (int s=0; s<num_sensors; s++)
{
//Simulate Gas_Sensor response given this GT values of the concentration!
olfaction_msgs::gas_sensor sensor_msg;
sensor_msg.header.frame_id = frame_id;
sensor_msg.header.stamp = ros::Time::now();
switch (sensor_models[s])
{
case 0: //MOX TGS2620
sensor_msg.technology = sensor_msg.TECH_MOX;
sensor_msg.manufacturer = sensor_msg.MANU_FIGARO;
sensor_msg.mpn = sensor_msg.MPN_TGS2620;
sensor_msg.raw_units = sensor_msg.UNITS_OHM;
sensor_msg.raw = simulate_mox_as_line_loglog(srv.response, s);
sensor_msg.raw_air = Sensitivity_Air[sensor_models[s]]*R0[sensor_models[s]];
sensor_msg.calib_A = sensitivity_lineloglog[sensor_models[s]][0][0]; //Calib for Ethanol
sensor_msg.calib_B = sensitivity_lineloglog[sensor_models[s]][0][1]; //Calib for Ethanol
break;
case 1: //MOX TGS2600
sensor_msg.technology = sensor_msg.TECH_MOX;
sensor_msg.manufacturer = sensor_msg.MANU_FIGARO;
sensor_msg.mpn = sensor_msg.MPN_TGS2600;
sensor_msg.raw_units = sensor_msg.UNITS_OHM;
sensor_msg.raw = simulate_mox_as_line_loglog(srv.response, s);
sensor_msg.raw_air = Sensitivity_Air[sensor_models[s]]*R0[sensor_models[s]];
sensor_msg.calib_A = sensitivity_lineloglog[sensor_models[s]][0][0]; //Calib for Ethanol
sensor_msg.calib_B = sensitivity_lineloglog[sensor_models[s]][0][1]; //Calib for Ethanol
break;
case 2: //MOX TGS2611
sensor_msg.technology = sensor_msg.TECH_MOX;
sensor_msg.manufacturer = sensor_msg.MANU_FIGARO;
sensor_msg.mpn = sensor_msg.MPN_TGS2611;
sensor_msg.raw_units = sensor_msg.UNITS_OHM;
sensor_msg.raw = simulate_mox_as_line_loglog(srv.response, s);
sensor_msg.raw_air = Sensitivity_Air[sensor_models[s]]*R0[sensor_models[s]];
sensor_msg.calib_A = sensitivity_lineloglog[sensor_models[s]][0][0]; //Calib for Ethanol
sensor_msg.calib_B = sensitivity_lineloglog[sensor_models[s]][0][1]; //Calib for Ethanol
break;
case 3: //MOX TGS2610
sensor_msg.technology = sensor_msg.TECH_MOX;
sensor_msg.manufacturer = sensor_msg.MANU_FIGARO;
sensor_msg.mpn = sensor_msg.MPN_TGS2610;
sensor_msg.raw_units = sensor_msg.UNITS_OHM;
sensor_msg.raw = simulate_mox_as_line_loglog(srv.response, s);
sensor_msg.raw_air = Sensitivity_Air[sensor_models[s]]*R0[sensor_models[s]];
sensor_msg.calib_A = sensitivity_lineloglog[sensor_models[s]][0][0]; //Calib for Ethanol
sensor_msg.calib_B = sensitivity_lineloglog[sensor_models[s]][0][1]; //Calib for Ethanol
break;
case 4: //MOX TGS2612
sensor_msg.technology = sensor_msg.TECH_MOX;
sensor_msg.manufacturer = sensor_msg.MANU_FIGARO;
sensor_msg.mpn = sensor_msg.MPN_TGS2612;
sensor_msg.raw_units = sensor_msg.UNITS_OHM;
sensor_msg.raw = simulate_mox_as_line_loglog(srv.response, s);
sensor_msg.raw_air = Sensitivity_Air[sensor_models[s]]*R0[sensor_models[s]];
sensor_msg.calib_A = sensitivity_lineloglog[sensor_models[s]][0][0]; //Calib for Ethanol
sensor_msg.calib_B = sensitivity_lineloglog[sensor_models[s]][0][1]; //Calib for Ethanol
break;
case 30: //PID miniRaeLite
sensor_msg.technology = sensor_msg.TECH_PID;
sensor_msg.manufacturer = sensor_msg.MANU_RAE;
sensor_msg.mpn = sensor_msg.MPN_MINIRAELITE;
sensor_msg.raw_units = sensor_msg.UNITS_PPM;
sensor_msg.raw = simulate_pid(srv.response);
sensor_msg.raw_air = 0.0;
sensor_msg.calib_A = 0.0;
sensor_msg.calib_B = 0.0;
break;
default:
break;
}
//append sensor observation to the array
enose_msg.sensors.push_back(sensor_msg);
}//end for each sensor in the array
//Publish simulated enose (Array of sensors)
enose_pub.publish(enose_msg);
notified = false;
}
else
{
if (!notified)
{
ROS_WARN("[fake_gas_sensor_array] Cannot read Gas Concentrations from GADEN simulator.");
notified = true;
}
}
}
ros::spinOnce();
r.sleep();
}
}
// Simulate MOX response: Sensitivity + Dynamic response
// RS = R0*( A * conc^B )
// This method employes a curve fitting based on a line in the loglog scale to set the sensitivity
float simulate_mox_as_line_loglog(gaden_player::GasPositionResponse GT_gas_concentrations, int s_idx)
{
if (sensor_array[s_idx].first_reading)
{
//Init sensor to its Baseline lvl
sensor_array[s_idx].sensor_output = Sensitivity_Air[sensor_models[s_idx]]; //RS_R0 value at air
sensor_array[s_idx].previous_sensor_output = sensor_array[s_idx].sensor_output;
sensor_array[s_idx].first_reading = false;
}
else
{
//1. Set Sensor Output based on gas concentrations (gas type dependent)
//---------------------------------------------------------------------
// RS/R0 = A*conc^B (a line in the loglog scale)
float resistance_variation = 0.0;
//Handle multiple gases
for (int i=0; i<GT_gas_concentrations.positions[0].concentration.size(); i++)
{
int gas_id;
if (!strcmp(GT_gas_concentrations.gas_type[i].c_str(),"ethanol"))
gas_id = 0;
else if (!strcmp(GT_gas_concentrations.gas_type[i].c_str(),"methane"))
gas_id = 1;
else if (!strcmp(GT_gas_concentrations.gas_type[i].c_str(),"hydrogen"))
gas_id = 2;
else if (!strcmp(GT_gas_concentrations.gas_type[i].c_str(),"propanol"))
gas_id = 3;
else if (!strcmp(GT_gas_concentrations.gas_type[i].c_str(),"chlorine"))
gas_id = 4;
else if (!strcmp(GT_gas_concentrations.gas_type[i].c_str(),"fluorine"))
gas_id = 5;
else if (!strcmp(GT_gas_concentrations.gas_type[i].c_str(),"acetone"))
gas_id = 6;
else
{
ROS_ERROR("[fake_mox] MOX response is not configured for this gas type!");
return 0.0;
}
//JUST FOR VIDEO DEMO
/*
if (input_sensor_model == 0)
{
GT_gas_concentrations.gas_conc[i] *= 10;
}
else if (input_sensor_model ==2)
{
GT_gas_concentrations.gas_conc[i] *= 20;
}
*/
//Value of RS/R0 for the given gas and concentration
sensor_array[s_idx].RS_R0 = sensitivity_lineloglog[sensor_models[s_idx]][gas_id][0] * pow(GT_gas_concentrations.positions[0].concentration[i], sensitivity_lineloglog[sensor_models[s_idx]][gas_id][1]);
//Ensure we never overpass the baseline level (max allowed)
if (sensor_array[s_idx].RS_R0 > Sensitivity_Air[sensor_models[s_idx]])
sensor_array[s_idx].RS_R0= Sensitivity_Air[sensor_models[s_idx]];
//Increment with respect the Baseline
resistance_variation += Sensitivity_Air[sensor_models[s_idx]] - sensor_array[s_idx].RS_R0;
}
//Calculate final RS_R0 given the final resistance variation
sensor_array[s_idx].RS_R0 = Sensitivity_Air[sensor_models[s_idx]] - resistance_variation;
//Ensure a minimum sensor resitance
if (sensor_array[s_idx].RS_R0 <= 0.0)
sensor_array[s_idx].RS_R0 = 0.01;
//2. Simulate transient response (dynamic behaviour, tau_r and tau_d)
//---------------------------------------------------------------------
float tau;
if (sensor_array[s_idx].RS_R0 < sensor_array[s_idx].previous_sensor_output) //rise
tau = tau_value[sensor_models[s_idx]][0][0];
else //decay
tau = tau_value[sensor_models[s_idx]][0][1];
// Use a low pass filter
//alpha value = At/(tau+At)
float alpha = (1/pub_rate) / (tau+(1/pub_rate));
//filtered response (uses previous estimation):
sensor_array[s_idx].sensor_output = (alpha*sensor_array[s_idx].RS_R0) + (1-alpha)*sensor_array[s_idx].previous_sensor_output;
//Update values
sensor_array[s_idx].previous_sensor_output = sensor_array[s_idx].sensor_output;
}
// Return Sensor response for current time instant as the Sensor Resistance in Ohms
return (sensor_array[s_idx].sensor_output * R0[sensor_models[s_idx]]);
}
// Simulate PID response : Weighted Sum of all gases
float simulate_pid(gaden_player::GasPositionResponse GT_gas_concentrations)
{
//Handle multiple gases
float accumulated_conc = 0.0;
for (int i=0; i<GT_gas_concentrations.positions[0].concentration.size(); i++)
{
if (use_PID_correction_factors)
{
int gas_id;
if (!strcmp(GT_gas_concentrations.gas_type[i].c_str(),"ethanol"))
gas_id = 0;
else if (!strcmp(GT_gas_concentrations.gas_type[i].c_str(),"methane"))
gas_id = 1;
else if (!strcmp(GT_gas_concentrations.gas_type[i].c_str(),"hydrogen"))
gas_id = 2;
else
{
ROS_ERROR("[fake_PID] PID response is not configured for this gas type!");
return 0.0;
}
if (PID_correction_factors[gas_id] != 0)
accumulated_conc += GT_gas_concentrations.positions[0].concentration[i] / PID_correction_factors[gas_id];
}
else
accumulated_conc += GT_gas_concentrations.positions[0].concentration[i];
}
return accumulated_conc;
}
//Load Sensor parameters
void loadNodeParameters(ros::NodeHandle private_nh)
{
//Num sensors in the array
private_nh.param<int>("num_sensors", num_sensors, 1);
//Sensor models
sensor_models.resize(num_sensors);
for (int i=0;i<num_sensors; i++)
{
//Get model of sensor (i)
std::string paramName = boost::str( boost::format("sensor_model_%i") % i);
private_nh.param<int>(paramName.c_str(),sensor_models[i], TGS2620_ID);
}
//Publication rate (Hz)
private_nh.param<double>("pub_rate", pub_rate, 5.0);
//sensor_array_topic_id
private_nh.param<std::string>("topic_id", topic_id, "/enose");
//sensor_array_frame_id
private_nh.param<std::string>("frame_id", frame_id, "enose_frame");
//fixed frame
private_nh.param<std::string>("fixed_frame", fixed_frame, "/map");
//PID_correction_factors
private_nh.param<bool>("use_PID_correction_factors", use_PID_correction_factors, false);
}
| 15,192 | C++ | 41.557423 | 212 | 0.534426 |
tudelft/autoGDMplus/gaden_ws/src/gaden/gaden_preprocessing/src/preprocessing.cpp | #include <ros/ros.h>
#include <std_msgs/Bool.h>
#include <string>
#include <fstream>
#include <stdlib.h>
#include <vector>
#include <sstream>
#include <iostream>
#include <eigen3/Eigen/Dense>
#include <boost/format.hpp>
#include <boost/thread/mutex.hpp>
#include <queue>
#include <stack>
#include <TriangleBoxIntersection.h>
#include <stdint.h>
enum cell_state {non_initialized=0, empty=1, occupied=2, outlet=3, edge=4};
struct Point{
float x; float y; float z;
Point(){}
Point(float x, float y, float z){
this->x = x; this->y =y; this->z=z;
}
};
struct Triangle{
Point p1; Point p2; Point p3;
Triangle(){}
Triangle(Point p1, Point p2, Point p3){
this->p1=p1; this->p2=p2; this->p3=p3;
}
Point& operator[](int i){
if(i==0)
return p1;
else if (i==1)
return p2;
else if(i==2)
return p3;
else{
std::cout<<"Indexing error when accessing the points in triangle! Index must be >= 2";
return p1;
}
}
};
//dimensions of the enviroment [m]
float env_min_x;
float env_min_y;
float env_min_z;
float env_max_x;
float env_max_y;
float env_max_z;
float roundFactor;
//length of the sides of the cell [m]
float cell_size;
float floor_height;
std::vector<std::vector<std::vector<int> > > env;
bool compare_cell(int x, int y, int z, cell_state value){
if(x<0 || x>=env.size() ||
y<0 || y>=env[0].size() ||
z<0 || z>=env[0][0].size()){
return false;
}
else{
return env[x][y][z]==value;
}
}
void changeWorldFile(std::string filename){
std::ifstream input(filename);
std::stringstream ss;
std::string line;
while(getline(input, line)){
if(line.substr(0,8)=="floorMap"){
//ignore the floorMap bit, we are replacing it entirely
while(getline(input, line) && line!=")"){}
ss<<
"floorMap # load an environment bitmap\n"<<
"(\n"<<
" name \"SimulatedMap\"\n"<<
" bitmap \"../../occupancy.pgm\"\n"<<
" size ["<<(env_max_x-env_min_x)<<" "<<(env_max_y-env_min_y)<<" "<<(env_max_z-env_min_z) <<"] #m \n"<<
" pose ["<<(env_max_x-env_min_x)/2+env_min_x<<" "<<(env_max_y-env_min_y)/2+env_min_y<<" "<<floor_height<<" 0] #Coordinates (m) of the Center of the image_map\n"<<
")\n";
}
else{
ss<<line<<"\n";
}
}
input.close();
std::ofstream out(filename);
out<<ss.rdbuf();
out.close();
}
void printMap(std::string filename, int scale, bool block_outlets){
std::ofstream outfile(filename.c_str());
outfile << "P2\n"
<< scale * env[0].size() << " " << scale * env.size() << "\n" <<"1\n";
//things are repeated to scale them up (the image is too small!)
int height = (floor_height-env_min_z)/cell_size; //a xy slice of the 3D environment is used as a geometric map for navigation
for (int row = env.size()-1; row >= 0; row--)
{
for (int j = 0; j < scale; j++)
{
for (int col = 0; col <env[0].size() ; col++)
{
for (int i = 0; i < scale; i++)
{
auto& cell = env[row][col][height];
bool outletTerm = cell == cell_state::outlet && !block_outlets;
outfile << ( cell == cell_state::empty || outletTerm ? 1 : 0) << " ";
}
}
outfile << "\n";
}
}
outfile.close();
}
void printEnv(std::string filename, int scale)
{
std::ofstream outfile(filename.c_str());
outfile << "#env_min(m) " << env_min_x << " " << env_min_y << " " << env_min_z << "\n";
outfile << "#env_max(m) " << env_max_x << " " << env_max_y << " " << env_max_z << "\n";
outfile << "#num_cells " << env[0].size() << " " << env.size() << " " << env[0][0].size() << "\n";
outfile << "#cell_size(m) " << cell_size << "\n";
//things are repeated to scale them up (the image is too small!)
for (int height = 0; height < env[0][0].size(); height++)
{
for (int col = 0; col <env[0].size(); col++)
{
for (int j = 0; j < scale; j++)
{
for (int row = 0; row <env.size(); row++)
{
for (int i = 0; i < scale; i++)
{
outfile << (env[row][col][height]==cell_state::empty? 0 :
(env[row][col][height]==cell_state::outlet? 2 :
1))
<< " ";
}
}
outfile << "\n";
}
}
outfile << ";\n";
}
outfile.close();
}
void printWind(std::vector<double> U,
std::vector<double> V,
std::vector<double> W, std::string filename){
std::ofstream fileU(boost::str(boost::format("%s_U") % filename).c_str());
std::ofstream fileV(boost::str(boost::format("%s_V") % filename).c_str());
std::ofstream fileW(boost::str(boost::format("%s_W") % filename).c_str());
//this code is a header to let the filament_simulator know the file is in binary
int code=999;
fileU.write((char*) &code, sizeof(int));
fileV.write((char*) &code, sizeof(int));
fileW.write((char*) &code, sizeof(int));
fileU.write((char*) U.data(), sizeof(double) * U.size());
fileV.write((char*) V.data(), sizeof(double) * V.size());
fileW.write((char*) W.data(), sizeof(double) * W.size());
fileU.close();
fileV.close();
fileW.close();
}
void printYaml(std::string output){
std::ofstream yaml(boost::str(boost::format("%s/occupancy.yaml") % output.c_str()));
yaml << "image: occupancy.pgm\n"
<< "resolution: " << cell_size/10
<< "\norigin: [" << env_min_x << ", " << env_min_y << ", " << 0 << "]\n"
<< "occupied_thresh: 0.9\n"
<< "free_thresh: 0.1\n"
<< "negate: 0";
yaml.close();
}
float min_val(float x, float y, float z) {
float min =x;
if (y < min)
min=y;
if(z < min)
min=z;
return min;
}
float max_val(float x, float y, float z) {
float max= x;
if (y > max)
max=y;
if(z > max)
max=z;
return max;
}
bool eq(float x, float y){
return std::abs(x-y)<0.01;
}
std::vector<Eigen::Vector3d> cubePoints(const Eigen::Vector3d &query_point){
std::vector<Eigen::Vector3d> points;
points.push_back(query_point);
points.push_back(Eigen::Vector3d(query_point.x()-cell_size/2,
query_point.y()-cell_size/2,
query_point.z()-cell_size/2));
points.push_back(Eigen::Vector3d(query_point.x()-cell_size/2,
query_point.y()-cell_size/2,
query_point.z()+cell_size/2));
points.push_back(Eigen::Vector3d(query_point.x()-cell_size/2,
query_point.y()+cell_size/2,
query_point.z()-cell_size/2));
points.push_back(Eigen::Vector3d(query_point.x()-cell_size/2,
query_point.y()+cell_size/2,
query_point.z()+cell_size/2));
points.push_back(Eigen::Vector3d(query_point.x()+cell_size/2,
query_point.y()-cell_size/2,
query_point.z()-cell_size/2));
points.push_back(Eigen::Vector3d(query_point.x()+cell_size/2,
query_point.y()-cell_size/2,
query_point.z()+cell_size/2));
points.push_back(Eigen::Vector3d(query_point.x()+cell_size/2,
query_point.y()+cell_size/2,
query_point.z()-cell_size/2));
points.push_back(Eigen::Vector3d(query_point.x()+cell_size/2,
query_point.y()+cell_size/2,
query_point.z()+cell_size/2));
return points;
}
bool pointInTriangle(const Eigen::Vector3d& query_point,
const Eigen::Vector3d& triangle_vertex_0,
const Eigen::Vector3d& triangle_vertex_1,
const Eigen::Vector3d& triangle_vertex_2)
{
// u=P2−P1
Eigen::Vector3d u = triangle_vertex_1 - triangle_vertex_0;
// v=P3−P1
Eigen::Vector3d v = triangle_vertex_2 - triangle_vertex_0;
// n=u×v
Eigen::Vector3d n = u.cross(v);
bool anyProyectionInTriangle=false;
std::vector<Eigen::Vector3d> cube= cubePoints(query_point);
for(const Eigen::Vector3d &vec : cube){
// w=P−P1
Eigen::Vector3d w = vec - triangle_vertex_0;
// Barycentric coordinates of the projection P′of P onto T:
// γ=[(u×w)⋅n]/n²
float gamma = u.cross(w).dot(n) / n.dot(n);
// β=[(w×v)⋅n]/n²
float beta = w.cross(v).dot(n) / n.dot(n);
float alpha = 1 - gamma - beta;
// The point P′ lies inside T if:
bool proyectionInTriangle= ((0 <= alpha) && (alpha <= 1) &&
(0 <= beta) && (beta <= 1) &&
(0 <= gamma) && (gamma <= 1));
anyProyectionInTriangle=anyProyectionInTriangle||proyectionInTriangle;
}
n.normalize();
//we consider that the triangle goes through the cell if the proyection of the center
//is inside the triangle AND the plane of the triangle intersects the cube of the cell
return anyProyectionInTriangle;
}
bool parallel (const Point &vec){
return (eq(vec.y,0)
&&eq(vec.z,0))||
(eq(vec.x,0)
&&eq(vec.z,0))||
(eq(vec.x,0)
&&eq(vec.y,0));
}
void occupy(std::vector<Triangle> &triangles,
const std::vector<Point> &normals,
cell_state value_to_write){
std::cout<<"Processing the mesh...\n0%\n";
int numberOfProcessedTriangles=0; //for logging, doesn't actually do anything
boost::mutex mtx;
//Let's occupy the enviroment!
#pragma omp parallel for
for(int i= 0;i<triangles.size();i++){
//We try to find all the cells that some triangle goes through
int x1 = roundf((triangles[i].p1.x-env_min_x)*(roundFactor))/(cell_size*(roundFactor));
int y1 = roundf((triangles[i].p1.y-env_min_y)*(roundFactor))/(cell_size*(roundFactor));
int z1 = roundf((triangles[i].p1.z-env_min_z)*(roundFactor))/(cell_size*(roundFactor));
int x2 = roundf((triangles[i].p2.x-env_min_x)*(roundFactor))/(cell_size*(roundFactor));
int y2 = roundf((triangles[i].p2.y-env_min_y)*(roundFactor))/(cell_size*(roundFactor));
int z2 = roundf((triangles[i].p2.z-env_min_z)*(roundFactor))/(cell_size*(roundFactor));
int x3 = roundf((triangles[i].p3.x-env_min_x)*(roundFactor))/(cell_size*(roundFactor));
int y3 = roundf((triangles[i].p3.y-env_min_y)*(roundFactor))/(cell_size*(roundFactor));
int z3 = roundf((triangles[i].p3.z-env_min_z)*(roundFactor))/(cell_size*(roundFactor));
int min_x = min_val(x1,x2,x3);
int min_y = min_val(y1,y2,y3);
int min_z = min_val(z1,z2,z3);
int max_x = max_val(x1,x2,x3);
int max_y = max_val(y1,y2,y3);
int max_z = max_val(z1,z2,z3);
//is the triangle right at the boundary between two cells (in any axis)?
bool xLimit = eq(std::fmod(max_val(triangles[i][0].x,triangles[i][1].x,triangles[i][2].x)-env_min_x, cell_size),0)
||eq(std::fmod(max_val(triangles[i][0].x,triangles[i][1].x,triangles[i][2].x)-env_min_x, cell_size),cell_size);
bool yLimit = eq(std::fmod(max_val(triangles[i][0].y,triangles[i][1].y,triangles[i][2].y)-env_min_y, cell_size),0)
||eq(std::fmod(max_val(triangles[i][0].y,triangles[i][1].y,triangles[i][2].y)-env_min_y, cell_size),cell_size);
bool zLimit = eq(std::fmod(max_val(triangles[i][0].z,triangles[i][1].z,triangles[i][2].z)-env_min_z, cell_size),0)
||eq(std::fmod(max_val(triangles[i][0].z,triangles[i][1].z,triangles[i][2].z)-env_min_z, cell_size),cell_size);
bool isParallel =parallel(normals[i]);
for (int row = min_x; row <= max_x && row < env[0].size(); row++)
{
for (int col = min_y; col <= max_y && col < env.size(); col++)
{
for (int height = min_z; height <= max_z && height < env[0][0].size(); height++)
{
//check if the triangle goes through this cell
//special case for triangles that are parallel to the coordinate axes because the discretization can cause
//problems if they fall right on the boundary of two cells
if (
(isParallel && pointInTriangle(Eigen::Vector3d(row * cell_size + env_min_x+cell_size/2,
col * cell_size + env_min_y+cell_size/2,
height * cell_size + env_min_z+cell_size/2),
Eigen::Vector3d(triangles[i][0].x, triangles[i][0].y, triangles[i][0].z),
Eigen::Vector3d(triangles[i][1].x, triangles[i][1].y, triangles[i][1].z),
Eigen::Vector3d(triangles[i][2].x, triangles[i][2].y, triangles[i][2].z)))
||
triBoxOverlap(
Eigen::Vector3d(row * cell_size + env_min_x+cell_size/2,
col * cell_size + env_min_y+cell_size/2,
height * cell_size + env_min_z+cell_size/2),
Eigen::Vector3d(cell_size/2, cell_size/2, cell_size/2),
Eigen::Vector3d(triangles[i][0].x, triangles[i][0].y, triangles[i][0].z),
Eigen::Vector3d(triangles[i][1].x, triangles[i][1].y, triangles[i][1].z),
Eigen::Vector3d(triangles[i][2].x, triangles[i][2].y, triangles[i][2].z)))
{
mtx.lock();
env[col][row][height] = value_to_write;
if(value_to_write==cell_state::occupied){
//if the "limit" flags are activated, AND we are on the offending cells,
//AND the cell has not previously marked as normally occupied by a different triangle
//AND the cells are not on the very limit of the environment, mark the cell as "edge" for later cleanup
bool limitOfproblematicTriangle=(xLimit&&row==max_x)||
(yLimit&&col==max_y)||
(zLimit&&height==max_z);
bool endOfTheEnvironment = (col>0 || col<env.size() ||
row>0 || row<env[0].size() ||
height>0 || height<env[0][0].size());
if( !endOfTheEnvironment &&
limitOfproblematicTriangle &&
env[col][row][height]!=cell_state::occupied){
env[col][row][height]=cell_state::edge;
}
}
mtx.unlock();
}
}
}
}
//log progress
if(i>numberOfProcessedTriangles+triangles.size()/10){
mtx.lock();
std::cout<<(100*i)/triangles.size()<<"%\n";
numberOfProcessedTriangles=i;
mtx.unlock();
}
}
}
void parse(std::string filename, cell_state value_to_write){
bool ascii = false;
if (FILE *file = fopen(filename.c_str(), "r"))
{
//File exists!, keep going!
char buffer[6];
fgets(buffer, 6, file);
if(std::string(buffer).find("solid")!=std::string::npos)
ascii=true;
fclose(file);
}else{
std::cout<< "File " << filename << " does not exist\n";
return;
}
std::vector<Triangle> triangles;
std::vector<Point> normals;
if(ascii){
//first, we count how many triangles there are (we need to do this before reading the data
// to create a vector of the right size)
std::ifstream countfile(filename.c_str());
std::string line;
int count = 0;
while (std::getline(countfile, line)){
if(line.find("facet normal") != std::string::npos){
count++;
}
}
countfile.close();
//each points[i] contains one the three vertices of triangle i
triangles.resize(count);
normals.resize(count);
//let's read the data
std::ifstream infile(filename.c_str());
std::getline(infile, line);
int i =0;
while (line.find("endsolid")==std::string::npos)
{
while (line.find("facet normal") == std::string::npos){std::getline(infile, line);}
size_t pos = line.find("facet");
line.erase(0, pos + 12);
float aux;
std::stringstream ss(line);
ss >> std::skipws >> aux;
normals[i].x = roundf(aux * roundFactor) / roundFactor;
ss >> std::skipws >> aux;
normals[i].y = roundf(aux * roundFactor) / roundFactor;
ss >> std::skipws >> aux;
normals[i].z = roundf(aux * roundFactor) / roundFactor;
std::getline(infile, line);
for(int j=0;j<3;j++){
std::getline(infile, line);
size_t pos = line.find("vertex ");
line.erase(0, pos + 7);
std::stringstream ss(line);
ss >> std::skipws >> aux;
triangles[i][j].x = roundf(aux * roundFactor) / roundFactor;
ss >> std::skipws >> aux;
triangles[i][j].y = roundf(aux * roundFactor) / roundFactor;
ss >> std::skipws >> aux;
triangles[i][j].z = roundf(aux * roundFactor) / roundFactor;
}
i++;
//skipping lines here makes checking for the end of the file more convenient
std::getline(infile, line);
std::getline(infile, line);
while(std::getline(infile, line)&&line.length()==0);
}
infile.close();
}
else{
std::ifstream infile(filename.c_str(), std::ios_base::binary);
infile.seekg(80 * sizeof(uint8_t), std::ios_base::cur); //skip the header
uint32_t num_triangles;
infile.read((char*) &num_triangles, sizeof(uint32_t));
triangles.resize(num_triangles);
normals.resize(num_triangles);
for(int i = 0; i < num_triangles; i++){
infile.read((char*) &normals[i], 3 * sizeof(float)); //read the normal vector
for(int j=0; j<3;j++){
infile.read((char*) &triangles[i][j], 3 * sizeof(float)); //read the point
}
infile.seekg(sizeof(uint16_t), std::ios_base::cur); //skip the attribute data
}
infile.close();
}
//OK, we have read the data, let's do something with it
occupy(triangles, normals, value_to_write);
}
void findDimensions(std::string filename){
bool ascii = false;
if (FILE *file = fopen(filename.c_str(), "r"))
{
//File exists!, keep going!
char buffer[6];
fgets(buffer, 6, file);
if(std::string(buffer).find("solid")!=std::string::npos)
ascii=true;
fclose(file);
}else{
std::cout<< "File " << filename << " does not exist\n";
return;
}
if(ascii){
//let's read the data
std::string line;
std::ifstream infile(filename.c_str());
std::getline(infile, line);
int i =0;
while (line.find("endsolid")==std::string::npos)
{
while (std::getline(infile, line) && line.find("outer loop") == std::string::npos);
for(int j=0;j<3;j++){
float x, y, z;
std::getline(infile, line);
size_t pos = line.find("vertex ");
line.erase(0, pos + 7);
std::stringstream ss(line);
float aux;
ss >> std::skipws >> aux;
x = roundf(aux * roundFactor) / roundFactor;
ss >> std::skipws >> aux;
y = roundf(aux * roundFactor) / roundFactor;
ss >> std::skipws >> aux;
z = roundf(aux * roundFactor) / roundFactor;
env_max_x = env_max_x>=x?env_max_x:x;
env_max_y = env_max_y>=y?env_max_y:y;
env_max_z = env_max_z>=z?env_max_z:z;
env_min_x = env_min_x<=x?env_min_x:x;
env_min_y = env_min_y<=y?env_min_y:y;
env_min_z = env_min_z<=z?env_min_z:z;
}
i++;
//skipping three lines here makes checking for the end of the file more convenient
std::getline(infile, line);
std::getline(infile, line);
while(std::getline(infile, line)&&line.length()==0);
}
infile.close();
}
else{
std::ifstream infile(filename.c_str(), std::ios_base::binary);
infile.seekg(80 * sizeof(uint8_t), std::ios_base::cur); //skip the header
uint32_t num_triangles;
infile.read((char*) &num_triangles, sizeof(uint32_t));
for(int i = 0; i < num_triangles; i++){
infile.seekg(3 * sizeof(float), std::ios_base::cur); //skip the normal vector
for(int j=0; j<3;j++){
float x, y ,z;
infile.read((char*) &x, sizeof(float));
infile.read((char*) &y, sizeof(float));
infile.read((char*) &z, sizeof(float));
env_max_x = env_max_x>=x?env_max_x:x;
env_max_y = env_max_y>=y?env_max_y:y;
env_max_z = env_max_z>=z?env_max_z:z;
env_min_x = env_min_x<=x?env_min_x:x;
env_min_y = env_min_y<=y?env_min_y:y;
env_min_z = env_min_z<=z?env_min_z:z;
}
infile.seekg(sizeof(uint16_t), std::ios_base::cur); //skip the attribute data
}
}
std::cout<<"Dimensions are:\n"<<
"x: ("<<env_min_x<<", "<<env_max_x<<")\n"<<
"y: ("<<env_min_y<<", "<<env_max_y<<")\n"<<
"z: ("<<env_min_z<<", "<<env_max_z<<")\n";
}
int indexFrom3D(int x, int y, int z){
return x + y*env[0].size() + z*env[0].size()*env.size();
}
void openFoam_to_gaden(std::string filename)
{
//let's parse the file
std::ifstream infile(filename.c_str());
std::string line;
//ignore the first line (column names)
std::getline(infile, line);
std::vector<double> U(env[0].size()*env.size()*env[0][0].size());
std::vector<double> V(env[0].size()*env.size()*env[0][0].size());
std::vector<double> W(env[0].size()*env.size()*env[0][0].size());
std::vector<double> v(6);
int x_idx = 0;
int y_idx = 0;
int z_idx = 0;
while (std::getline(infile, line))
{
if (line.length()!=0)
{
for (int i = 0; i < 6; i++)
{
size_t pos = line.find(",");
v[i] = atof(line.substr(0, pos).c_str());
line.erase(0, pos + 1);
}
//assign each of the points we have information about to the nearest cell
x_idx = (int)roundf((v[3] - env_min_x) / cell_size*roundFactor)/roundFactor;
y_idx = (int)roundf((v[4] - env_min_y) / cell_size*roundFactor)/roundFactor;
z_idx = (int)roundf((v[5] - env_min_z) / cell_size*roundFactor)/roundFactor;
U[ indexFrom3D(x_idx, y_idx,z_idx) ] = v[0];
V[ indexFrom3D(x_idx, y_idx,z_idx) ] = v[1];
W[ indexFrom3D(x_idx, y_idx,z_idx) ] = v[2];
}
}
std::cout << "env_size: " << env.size() << ' ' << env[0].size() << ' ' << env[0][0].size() << '\n';
std::cout << "U.size():" << U.size()<< '\n';
std::cout << "V.size():" << V.size()<< '\n';
std::cout << "W.size():" << W.size()<< '\n';
infile.close();
printWind(U,V,W,filename);
}
void fill(int x, int y, int z, cell_state new_value, cell_state value_to_overwrite){
std::queue<Eigen::Vector3i> q;
q.push(Eigen::Vector3i(x, y, z));
env[x][y][z]=new_value;
while(!q.empty()){
Eigen::Vector3i point = q.front();
q.pop();
if(compare_cell(point.x()+1, point.y(), point.z(), value_to_overwrite)){ // x+1, y, z
env[point.x()+1][point.y()][point.z()]=new_value;
q.push(Eigen::Vector3i(point.x()+1, point.y(), point.z()));
}
if(compare_cell(point.x()-1, point.y(), point.z(), value_to_overwrite)){ // x-1, y, z
env[point.x()-1][point.y()][point.z()]=new_value;
q.push(Eigen::Vector3i(point.x()-1, point.y(), point.z()));
}
if(compare_cell(point.x(), point.y()+1, point.z(), value_to_overwrite)){ // x, y+1, z
env[point.x()][point.y()+1][point.z()]=new_value;
q.push(Eigen::Vector3i(point.x(), point.y()+1, point.z()));
}
if(compare_cell(point.x(), point.y()-1, point.z(), value_to_overwrite)){ // x, y-1, z
env[point.x()][point.y()-1][point.z()]=new_value;
q.push(Eigen::Vector3i(point.x(), point.y()-1, point.z()));
}
if(compare_cell(point.x(), point.y(), point.z()+1, value_to_overwrite)){ // x, y, z+1
env[point.x()][point.y()][point.z()+1]=new_value;
q.push(Eigen::Vector3i(point.x(), point.y(), point.z()+1));
}
if(compare_cell(point.x(), point.y(), point.z()-1, value_to_overwrite)){ // x, y, z-1
env[point.x()][point.y()][point.z()-1]=new_value;
q.push(Eigen::Vector3i(point.x(), point.y(), point.z()-1));
}
}
}
void clean(){
for(int col=0;col<env.size();col++){
for(int row=0;row<env[0].size();row++){
for(int height=0;height<env[0][0].size();height++){
if(env[col][row][height]==cell_state::edge){
if(compare_cell(col+1, row, height, cell_state::empty)||
compare_cell(col, row+1, height, cell_state::empty)||
compare_cell(col, row, height+1, cell_state::empty)||
(compare_cell(col+1, row+1, height, cell_state::empty)
&&env[col][row+1][height]==cell_state::edge
&&env[col+1][row][height]==cell_state::edge))
{
env[col][row][height]=cell_state::empty;
}else
{
env[col][row][height]=cell_state::occupied;
}
}
}
}
}
}
int main(int argc, char **argv){
ros::init(argc, argv, "preprocessing");
int numModels;
ros::NodeHandle nh;
ros::NodeHandle private_nh("~");
ros::Publisher pub = nh.advertise<std_msgs::Bool>("preprocessing_done",5,true);
private_nh.param<float>("cell_size", cell_size, 1); //size of the cells
roundFactor=100.0/cell_size;
//stl file with the model of the outlets
std::string outlet; int numOutletModels;
//path to the csv file where we want to write the occupancy map
std::string output;
private_nh.param<std::string>("output_path", output, "");
//--------------------------
//OCCUPANCY
//--------------------------
private_nh.param<int>("number_of_models", numModels, 2); // number of CAD models
std::vector<std::string> CADfiles;
for(int i = 0; i< numModels; i++){
std::string paramName = boost::str( boost::format("model_%i") % i); //each of the stl models
std::string filename;
private_nh.param<std::string>(paramName, filename, "");
CADfiles.push_back(filename.c_str());
}
for (int i = 0; i < CADfiles.size(); i++)
{
findDimensions(CADfiles[i]);
}
// eliminate floating point error rounding error, resulitng in incorrect cell amounts
// float y_cells_float =((env_max_y-env_min_y)*(roundFactor)/(cell_size*(roundFactor)));
// float y_cells_round = roundf(y_cells_float*100)/100;
// float x_cells_float =((env_max_x-env_min_x)*(roundFactor)/(cell_size*(roundFactor)));
// float x_cells_round = roundf(x_cells_float*100)/100;
// float z_cells_float =((env_max_z-env_min_z)*(roundFactor)/(cell_size*(roundFactor)));
// float z_cells_round = roundf(z_cells_float*100)/100;
//x and y are interchanged!!!!!! it goes env[y][x][z]
//I cannot for the life of me remember why I did that, but there must have been a reason
// env = std::vector<std::vector<std::vector<int> > > (ceil(y_cells_round),
// std::vector<std::vector<int> >(ceil(x_cells_round),
// std::vector<int>(ceil(z_cells_round), 0)));
env = std::vector<std::vector<std::vector<int> > > (ceil((env_max_y-env_min_y)*(roundFactor)/(cell_size*(roundFactor))),
std::vector<std::vector<int> >(ceil((env_max_x - env_min_x)*(roundFactor)/(cell_size*(roundFactor))),
std::vector<int>(ceil((env_max_z - env_min_z)*(roundFactor)/(cell_size*(roundFactor))), 0)));
ros::Time start = ros::Time::now();
for (int i = 0; i < numModels; i++)
{
parse(CADfiles[i], cell_state::occupied);
}
std::cout <<"Took "<< ros::Time::now().toSec()-start.toSec()<<" seconds \n";
float empty_point_x;
private_nh.param<float>("empty_point_x", empty_point_x, 1);
float empty_point_y;
private_nh.param<float>("empty_point_y", empty_point_y, 1);
float empty_point_z;
private_nh.param<float>("empty_point_z", empty_point_z, 1);
//--------------------------
//OUTLETS
//--------------------------
private_nh.param<int>("number_of_outlet_models", numOutletModels, 1); // number of CAD models
std::vector<std::string> outletFiles;
for(int i = 0; i< numOutletModels; i++){
std::string paramName = boost::str( boost::format("outlets_model_%i") % i); //each of the stl models
std::string filename;
private_nh.param<std::string>(paramName, filename, "");
outletFiles.push_back(filename.c_str());
}
for (int i=0;i<numOutletModels; i++){
parse(outletFiles[i], cell_state::outlet);
}
std::cout<<"Filling...\n";
//Mark all the empty cells reachable from the empty_point as aux_empty
//the ones that cannot be reached will be marked as occupied when printing
fill((empty_point_y-env_min_y)/cell_size,
(empty_point_x-env_min_x)/cell_size,
(empty_point_z-env_min_z)/cell_size,
cell_state::empty, cell_state::non_initialized);
//get rid of the cells marked as "edge", since those are not truly occupied
clean();
private_nh.param<float>("floor_height", floor_height, 0); // number of CAD models
printMap(boost::str(boost::format("%s/occupancy.pgm") % output.c_str()), 10, private_nh.param<bool>("block_outlets", false) );
std::string worldFile;
private_nh.param<std::string>("worldFile", worldFile, ""); // number of CAD models
if(worldFile!="")
changeWorldFile(worldFile);
//output - path, occupancy vector, scale
printEnv(boost::str(boost::format("%s/OccupancyGrid3D.csv") % output.c_str()), 1);
printYaml(output);
//-------------------------
//WIND
//-------------------------
bool uniformWind;
private_nh.param<bool>("uniformWind", uniformWind, false);
//path to the point cloud files with the wind data
std::string windFileName;
private_nh.param<std::string>("wind_files", windFileName, "");
int idx = 0;
if(uniformWind){
//let's parse the file
std::ifstream infile(windFileName);
std::string line;
std::vector<double> U(env[0].size()*env.size()*env[0][0].size());
std::vector<double> V(env[0].size()*env.size()*env[0][0].size());
std::vector<double> W(env[0].size()*env.size()*env[0][0].size());
while(std::getline(infile, line)){
std::vector<double> v;
for (int i = 0; i < 3; i++)
{
size_t pos = line.find(",");
v.push_back(atof(line.substr(0, pos).c_str()));
line.erase(0, pos + 1);
}
for(int i = 0; i< env[0].size();i++){
for(int j = 0; j< env.size();j++){
for(int k = 0; k< env[0][0].size();k++){
if(env[j][i][k]==cell_state::empty){
U[ indexFrom3D(i, j, k) ] = v[0];
V[ indexFrom3D(i, j, k) ] = v[1];
W[ indexFrom3D(i, j, k) ] = v[2];
}
}
}
}
infile.close();
printWind(U,V,W, boost::str(boost::format("%s_%i.csv") % windFileName % idx).c_str());
idx++;
}
}else{
while (FILE *file = fopen(boost::str(boost::format("%s_%i.csv") % windFileName % idx).c_str(), "r"))
{
fclose(file);
openFoam_to_gaden(boost::str(boost::format("%s_%i.csv") % windFileName % idx).c_str());
idx++;
}
}
ROS_INFO("Preprocessing done");
std_msgs::Bool b;
b.data=true;
pub.publish(b);
}
| 34,170 | C++ | 38.097254 | 182 | 0.504712 |
tudelft/autoGDMplus/gaden_ws/src/gaden/gaden_preprocessing/include/TriangleBoxIntersection.h | #pragma once
/*------------------------------------------------
Adapted from https://gist.github.com/jflipts/fc68d4eeacfcc04fbdb2bf38e0911850#file-triangleboxintersection-h
--------------------------------------------------*/
#include <cmath>
#include <eigen3/Eigen/Dense>
inline void findMinMax(float x0, float x1, float x2, float &min, float &max) {
min = max = x0;
if (x1 < min)
min = x1;
if (x1 > max)
max = x1;
if (x2 < min)
min = x2;
if (x2 > max)
max = x2;
}
inline bool planeBoxOverlap(Eigen::Vector3d normal, Eigen::Vector3d vert, Eigen::Vector3d maxbox) {
Eigen::Vector3d vmin, vmax;
float v;
for (size_t q = 0; q < 3; q++) {
v = vert[q];
if (normal[q] > 0.0f) {
vmin[q] = -maxbox[q] - v;
vmax[q] = maxbox[q] - v;
} else {
vmin[q] = maxbox[q] - v;
vmax[q] = -maxbox[q] - v;
}
}
if (normal.dot(vmin) > 0.0f)
return false;
if (normal.dot(vmax) >= 0.0f)
return true;
return false;
}
/*======================== X-tests ========================*/
inline bool axisTestX01(float a, float b, float fa, float fb, const Eigen::Vector3d &v0,
const Eigen::Vector3d &v2, const Eigen::Vector3d &boxhalfsize, float &rad, float &min,
float &max, float &p0, float &p2) {
p0 = a * v0.y() - b * v0.z();
p2 = a * v2.y() - b * v2.z();
if (p0 < p2) {
min = p0;
max = p2;
} else {
min = p2;
max = p0;
}
rad = fa * boxhalfsize.y() + fb * boxhalfsize.z();
if (min > rad || max < -rad)
return false;
return true;
}
inline bool axisTestX2(float a, float b, float fa, float fb, const Eigen::Vector3d &v0,
const Eigen::Vector3d &v1, const Eigen::Vector3d &boxhalfsize, float &rad, float &min,
float &max, float &p0, float &p1) {
p0 = a * v0.y() - b * v0.z();
p1 = a * v1.y() - b * v1.z();
if (p0 < p1) {
min = p0;
max = p1;
} else {
min = p1;
max = p0;
}
rad = fa * boxhalfsize.y() + fb * boxhalfsize.z();
if (min > rad || max < -rad)
return false;
return true;
}
/*======================== Y-tests ========================*/
inline bool axisTestY02(float a, float b, float fa, float fb, const Eigen::Vector3d &v0,
const Eigen::Vector3d &v2, const Eigen::Vector3d &boxhalfsize, float &rad, float &min,
float &max, float &p0, float &p2) {
p0 = -a * v0.x() + b * v0.z();
p2 = -a * v2.x() + b * v2.z();
if (p0 < p2) {
min = p0;
max = p2;
} else {
min = p2;
max = p0;
}
rad = fa * boxhalfsize.x() + fb * boxhalfsize.z();
if (min > rad || max < -rad)
return false;
return true;
}
inline bool axisTestY1(float a, float b, float fa, float fb, const Eigen::Vector3d &v0,
const Eigen::Vector3d &v1, const Eigen::Vector3d &boxhalfsize, float &rad, float &min,
float &max, float &p0, float &p1) {
p0 = -a * v0.x() + b * v0.z();
p1 = -a * v1.x() + b * v1.z();
if (p0 < p1) {
min = p0;
max = p1;
} else {
min = p1;
max = p0;
}
rad = fa * boxhalfsize.x() + fb * boxhalfsize.z();
if (min > rad || max < -rad)
return false;
return true;
}
/*======================== Z-tests ========================*/
inline bool axisTestZ12(float a, float b, float fa, float fb, const Eigen::Vector3d &v1,
const Eigen::Vector3d &v2, const Eigen::Vector3d &boxhalfsize, float &rad, float &min,
float &max, float &p1, float &p2) {
p1 = a * v1.x() - b * v1.y();
p2 = a * v2.x() - b * v2.y();
if (p1 < p2) {
min = p1;
max = p2;
} else {
min = p2;
max = p1;
}
rad = fa * boxhalfsize.x() + fb * boxhalfsize.y();
if (min > rad || max < -rad)
return false;
return true;
}
inline bool axisTestZ0(float a, float b, float fa, float fb, const Eigen::Vector3d &v0,
const Eigen::Vector3d &v1, const Eigen::Vector3d &boxhalfsize, float &rad, float &min,
float &max, float &p0, float &p1) {
p0 = a * v0.x() - b * v0.y();
p1 = a * v1.x() - b * v1.y();
if (p0 < p1) {
min = p0;
max = p1;
} else {
min = p1;
max = p0;
}
rad = fa * boxhalfsize.x() + fb * boxhalfsize.y();
if (min > rad || max < -rad)
return false;
return true;
}
bool triBoxOverlap(Eigen::Vector3d boxcenter, Eigen::Vector3d boxhalfsize, Eigen::Vector3d tv0, Eigen::Vector3d tv1,
Eigen::Vector3d tv2) {
/* use separating axis theorem to test overlap between triangle and box */
/* need to test for overlap in these directions: */
/* 1) the {x,y,z}-directions (actually, since we use the AABB of the triangle */
/* we do not even need to test these) */
/* 2) normal of the triangle */
/* 3) crossproduct(edge from tri, {x,y,z}-directin) */
/* this gives 3x3=9 more tests */
Eigen::Vector3d v0, v1, v2;
float min, max, p0, p1, p2, rad, fex, fey, fez;
Eigen::Vector3d normal, e0, e1, e2;
/* This is the fastest branch on Sun */
/* move everything so that the boxcenter is in (0,0,0) */
v0 = tv0 - boxcenter;
v1 = tv1 - boxcenter;
v2 = tv2 - boxcenter;
/* compute triangle edges */
e0 = v1 - v0;
e1 = v2 - v1;
e2 = v0 - v2;
/* Bullet 3: */
/* test the 9 tests first (this was faster) */
fex = fabsf(e0.x());
fey = fabsf(e0.y());
fez = fabsf(e0.z());
if (!axisTestX01(e0.z(), e0.y(), fez, fey, v0, v2, boxhalfsize, rad, min, max, p0, p2))
return false;
if (!axisTestY02(e0.z(), e0.x(), fez, fex, v0, v2, boxhalfsize, rad, min, max, p0, p2))
return false;
if (!axisTestZ12(e0.y(), e0.x(), fey, fex, v1, v2, boxhalfsize, rad, min, max, p1, p2))
return false;
fex = fabsf(e1.x());
fey = fabsf(e1.y());
fez = fabsf(e1.z());
if (!axisTestX01(e1.z(), e1.y(), fez, fey, v0, v2, boxhalfsize, rad, min, max, p0, p2))
return false;
if (!axisTestY02(e1.z(), e1.x(), fez, fex, v0, v2, boxhalfsize, rad, min, max, p0, p2))
return false;
if (!axisTestZ0(e1.y(), e1.x(), fey, fex, v0, v1, boxhalfsize, rad, min, max, p0, p1))
return false;
fex = fabsf(e2.x());
fey = fabsf(e2.y());
fez = fabsf(e2.z());
if (!axisTestX2(e2.z(), e2.y(), fez, fey, v0, v1, boxhalfsize, rad, min, max, p0, p1))
return false;
if (!axisTestY1(e2.z(), e2.x(), fez, fex, v0, v1, boxhalfsize, rad, min, max, p0, p1))
return false;
if (!axisTestZ12(e2.y(), e2.x(), fey, fex, v1, v2, boxhalfsize, rad, min, max, p1, p2))
return false;
/* Bullet 1: */
/* first test overlap in the {x,y,z}-directions */
/* find min, max of the triangle each direction, and test for overlap in */
/* that direction -- this is equivalent to testing a minimal AABB around */
/* the triangle against the AABB */
/* test in X-direction */
findMinMax(v0.x(), v1.x(), v2.x(), min, max);
if (min > boxhalfsize.x() || max < -boxhalfsize.x())
return false;
/* test in Y-direction */
findMinMax(v0.y(), v1.y(), v2.y(), min, max);
if (min > boxhalfsize.y() || max < -boxhalfsize.y())
return false;
/* test in Z-direction */
findMinMax(v0.z(), v1.z(), v2.z(), min, max);
if (min > boxhalfsize.z() || max < -boxhalfsize.z())
return false;
/* Bullet 2: */
/* test if the box intersects the plane of the triangle */
/* compute plane equation of triangle: normal*x+d=0 */
normal = e0.cross(e1);
if (!planeBoxOverlap(normal, v0, boxhalfsize))
return false;
return true; /* box and triangle overlaps */
} | 7,029 | C | 27.811475 | 116 | 0.577038 |
tudelft/autoGDMplus/gaden_ws/src/gaden/gaden_player/package.xml | <package>
<name>gaden_player</name>
<version>1.0.0</version>
<description> This package loads the log files (results) from the gaden_simulator pkg, and provides services to evaluate the gas concentration and wind vector at a given location. The goal is to provide a fast (real time) service for the simulated MOX and Anemometer sensors.</description>
<maintainer email="[email protected]">Javier Monroy</maintainer>
<license>GPLv3</license>
<author>Javier Monroy</author>
<!-- Dependencies which this package needs to build itself. -->
<buildtool_depend>catkin</buildtool_depend>
<!-- Dependencies needed to compile this package. -->
<build_depend>roscpp</build_depend>
<build_depend>visualization_msgs</build_depend>
<build_depend>std_msgs</build_depend>
<build_depend>nav_msgs</build_depend>
<build_depend>tf</build_depend>
<build_depend>message_generation</build_depend>
<!-- Dependencies needed after this package is compiled. -->
<run_depend>roscpp</run_depend>
<run_depend>visualization_msgs</run_depend>
<run_depend>std_msgs</run_depend>
<run_depend>tf</run_depend>
<run_depend>nav_msgs</run_depend>
<run_depend>message_runtime</run_depend>
</package>
| 1,206 | XML | 39.233332 | 291 | 0.737148 |
tudelft/autoGDMplus/gaden_ws/src/gaden/gaden_player/src/simulation_player.h | #include <ros/ros.h>
#include <nav_msgs/Odometry.h>
#include <geometry_msgs/PoseWithCovarianceStamped.h>
#include <tf/transform_listener.h>
#include <std_msgs/Float32.h>
#include <std_msgs/Float32MultiArray.h>
#include <visualization_msgs/Marker.h>
#include <gaden_player/GasPosition.h>
#include <gaden_player/WindPosition.h>
#include <cstdlib>
#include <math.h>
#include <stdio.h>
#include <vector>
#include <iostream>
#include <fstream>
#include <string>
#include <time.h>
#include <map>
#include <boost/iostreams/filter/zlib.hpp>
#include <boost/iostreams/filtering_stream.hpp>
#include <boost/algorithm/string.hpp>
#include <boost/iostreams/copy.hpp>
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
struct Filament{
public:
double x, y, z, sigma;
Filament(double a, double b, double c, double d){
x=a;
y=b;
z=c;
sigma=d;
}
};
// CLASS for every simulation to run. If two gas sources are needed, just create 2 instances!
class sim_obj
{
public:
sim_obj(std::string filepath, bool load_wind_info);
~sim_obj();
std::string gas_type;
std::string simulation_filename;
int environment_cells_x, environment_cells_y, environment_cells_z;
double environment_cell_size;
double source_pos_x, source_pos_y, source_pos_z;
double env_min_x; //[m]
double env_max_x; //[m]
double env_min_y; //[m]
double env_max_y; //[m]
double env_min_z; //[m]
double env_max_z; //[m]
bool load_wind_data;
std::vector<double> C; //3D Gas concentration
std::vector<double> U; //3D Wind U
std::vector<double> V; //3D Wind V
std::vector<double> W; //3D Wind W
bool first_reading;
bool filament_log;
double total_moles_in_filament;
double num_moles_all_gases_in_cm3;
std::map<int, Filament> activeFilaments;
std::vector<uint8_t> Env;
//methods
void configure_environment();
void readEnvFile();
void load_data_from_logfile(int sim_iteration);
void load_ascii_file(std::stringstream &decompressed);
void load_binary_file(std::stringstream &decompressed);
double get_gas_concentration(float x, float y, float z);
double concentration_from_filament(float x, float y, float z, Filament fil);
bool check_environment_for_obstacle(double start_x, double start_y, double start_z,
double end_x, double end_y, double end_z);
int check_pose_with_environment(double pose_x, double pose_y, double pose_z);
void get_wind_value(float x, float y, float z, double &u, double &v, double &w);
void get_concentration_as_markers(visualization_msgs::Marker &mkr_points);
void read_headers(std::stringstream &inbuf, std::string &line);
void load_wind_file(int wind_index);
int last_wind_idx=-1;
void read_concentration_line(std::string line);
std::vector<std::vector<double> > heatmap;
void updateHeatmap();
void writeHeatmapImage();
int indexFrom3D(int x, int y, int z);
std::string gasTypesByCode[14] = {
"ethanol",
"methane",
"hydrogen",
"propanol",
"chlorine",
"flurorine",
"acetone",
"neon",
"helium",
"biogas",
"butane",
"carbon dioxide",
"carbon monoxide",
"smoke"
};
};
// ---------------------- MAIN--------------------//
// Parameters
double player_freq;
int num_simulators;
bool verbose;
std::vector<std::string> simulation_data;
std::vector<sim_obj> player_instances; //To handle N simulations at a time.
int initial_iteration, loop_from_iteration, loop_to_iteration;
bool allow_looping;
std::string occupancyFile;
bool createHeatmapImage;
std::string heatmapPath;
float heatmapHeight;
double heatmapThreshold;
int heatMapIterations;
//Visualization
ros::Publisher marker_pub;
visualization_msgs::Marker mkr_gas_points; //We will create an array of particles according to cell concentration
//functions:
void loadNodeParameters(ros::NodeHandle private_nh);
void init_all_simulation_instances();
void load_all_data_from_logfiles(int sim_iteration);
void display_current_gas_distribution();
| 4,550 | C | 29.543624 | 135 | 0.617582 |
tudelft/autoGDMplus/gaden_ws/src/gaden/gaden_player/src/simulation_player.cpp | /*--------------------------------------------------------------------------------
* Pkg for playing the simulation results of the "filament_simulator" pkg.
* It allows to run on real time, and provide services to simulated sensors (gas, wind)
* It supports loading several simulations at a time, which allows multiple gas sources and gas types
* It also generates a point cloud representing the gas concentration [ppm] on the 3D environment
--------------------------------------------------------------------------------*/
#include <boost/format.hpp>
#include "simulation_player.h"
//--------------- SERVICES CALLBACKS----------------------//
gaden_player::GasInCell get_all_gases_single_cell(float x, float y, float z, const std::vector<std::string>& gas_types)
{
std::vector<double> srv_response_gas_concs(num_simulators);
std::map<std::string,double> concentrationByGasType;
for(int i=0;i<gas_types.size();i++)
concentrationByGasType[gas_types[i]] = 0;
//Get all gas concentrations and gas types (from all instances)
for (int i=0;i<num_simulators; i++)
concentrationByGasType[player_instances[i].gas_type] += player_instances[i].get_gas_concentration(x, y, z);
//Configure Response
gaden_player::GasInCell response;
for (int i = 0; i< gas_types.size();i++)
{
response.concentration.push_back( concentrationByGasType[gas_types[i]] );
}
return response;
}
bool get_gas_value_srv(gaden_player::GasPosition::Request &req, gaden_player::GasPosition::Response &res)
{
std::set<std::string> gas_types;
for (int i=0;i<num_simulators; i++)
gas_types.insert(player_instances[i].gas_type);
std::vector<std::string> gast_types_v(gas_types.begin(), gas_types.end());
res.gas_type = gast_types_v;
for(int i=0;i<req.x.size(); i++)
{
res.positions.push_back( get_all_gases_single_cell(req.x[i], req.y[i], req.z[i], gast_types_v) );
}
return true;
}
bool get_wind_value_srv(gaden_player::WindPosition::Request &req, gaden_player::WindPosition::Response &res)
{
//Since the wind fields are identical among different instances, return just the information from instance[0]
for(int i = 0; i<req.x.size(); i++){
double u, v, w;
player_instances[0].get_wind_value(req.x[i], req.y[i], req.z[i], u, v, w);
res.u.push_back(u);
res.v.push_back(v);
res.w.push_back(w);
}
return true;
}
//------------------------ MAIN --------------------------//
int main( int argc, char** argv )
{
ros::init(argc, argv, "simulation_player");
ros::NodeHandle n;
ros::NodeHandle pn("~");
//Read Node Parameters
loadNodeParameters(pn);
//Publishers
marker_pub = n.advertise<visualization_msgs::Marker>("Gas_Distribution", 1);
//Services offered
ros::ServiceServer serviceGas = n.advertiseService("odor_value", get_gas_value_srv);
ros::ServiceServer serviceWind = n.advertiseService("wind_value", get_wind_value_srv);
//Init variables
init_all_simulation_instances();
ros::Time time_last_loaded_file = ros::Time::now();
srand(time(NULL));// initialize random seed
//Init Markers for RVIZ visualization
mkr_gas_points.header.frame_id = "map";
mkr_gas_points.header.stamp = ros::Time::now();
mkr_gas_points.ns = "Gas_Dispersion";
mkr_gas_points.action = visualization_msgs::Marker::ADD;
mkr_gas_points.type = visualization_msgs::Marker::POINTS; //Marker type
mkr_gas_points.id = 0; //One marker with multiple points.
mkr_gas_points.scale.x = 0.025;
mkr_gas_points.scale.y = 0.025;
mkr_gas_points.scale.z = 0.025;
mkr_gas_points.pose.orientation.w = 1.0;
// Loop
ros::Rate r(100); //Set max rate at 100Hz (for handling services - Top Speed!!)
int iteration_counter = initial_iteration;
while (ros::ok())
{
if( (ros::Time::now() - time_last_loaded_file).toSec() >= 1/player_freq )
{
if (verbose)
ROS_INFO("[Player] Playing simulation iteration %i", iteration_counter);
//Read Gas and Wind data from log_files
load_all_data_from_logfiles(iteration_counter); //On the first time, we configure gas type, source pos, etc.
display_current_gas_distribution(); //Rviz visualization
iteration_counter++;
//Looping?
if (allow_looping)
{
if (iteration_counter >= loop_to_iteration)
{
iteration_counter = loop_from_iteration;
if (verbose)
ROS_INFO("[Player] Looping");
}
}
time_last_loaded_file = ros::Time::now();
}
//Attend service request at max rate!
//This allows sensors to have higher sampling rates than the simulation update
ros::spinOnce();
r.sleep();
}
}
//Load Node parameters
void loadNodeParameters(ros::NodeHandle private_nh)
{
//player_freq
private_nh.param<bool>("verbose", verbose, false);
//player_freq
private_nh.param<double>("player_freq", player_freq, 1); //Hz
//Number of simulators to load (For simulating multiple gases and multiple sources)
private_nh.param<int>("num_simulators", num_simulators, 1);
if (verbose)
{
ROS_INFO("[Player] player_freq %.2f", player_freq);
ROS_INFO("[Player] num_simulators: %i", num_simulators);
}
//FilePath for simulated data
simulation_data.resize(num_simulators);
for (int i=0;i<num_simulators; i++)
{
//Get location of simulation data for instance (i)
std::string paramName = boost::str( boost::format("simulation_data_%i") % i);
private_nh.param<std::string>(paramName.c_str(),simulation_data[i], "");
if (verbose)
ROS_INFO("[Player] simulation_data_%i: %s", i, simulation_data[i].c_str());
}
// Initial iteration
private_nh.param<int>("initial_iteration", initial_iteration, 1);
private_nh.param<std::string>("occupancyFile", occupancyFile, "");
private_nh.param<bool>("createHeatmapImage", createHeatmapImage, false);
private_nh.param<std::string>("heatmapPath",heatmapPath, "");
private_nh.param<float>("heatmapHeight", heatmapHeight, 0.5);
private_nh.param<int>("heatMapIterations", heatMapIterations, 100);
private_nh.param<double>("heatmapThreshold", heatmapThreshold, 0.5);
// Loop
private_nh.param<bool>("allow_looping", allow_looping, false);
private_nh.param<int>("loop_from_iteration", loop_from_iteration, 1);
private_nh.param<int>("loop_to_iteration", loop_to_iteration, 1);
}
//Init
void init_all_simulation_instances()
{
ROS_INFO("[Player] Initializing %i instances",num_simulators);
// At least one instance is needed which loads the wind field data!
sim_obj so(simulation_data[0], true);
player_instances.push_back(so);
//Create other instances, but do not save wind information! It is the same for all instances
for (int i=1;i<num_simulators;i++)
{
sim_obj so(simulation_data[i], false);
player_instances.push_back(so);
}
//Set size for service responses
}
//Load new Iteration of the Gas&Wind State on the 3d environment
void load_all_data_from_logfiles(int sim_iteration)
{
//Load corresponding data for each instance (i.e for every gas source)
for (int i=0;i<num_simulators;i++)
{
if (verbose)
ROS_INFO("[Player] Loading new data to instance %i (iteration %i)",i,sim_iteration);
player_instances[i].load_data_from_logfile(sim_iteration);
}
}
//Display in RVIZ the gas distribution
void display_current_gas_distribution()
{
//Remove previous data points
mkr_gas_points.points.clear();
mkr_gas_points.colors.clear();
for (int i=0;i<num_simulators;i++)
{
player_instances[i].get_concentration_as_markers(mkr_gas_points);
}
//Display particles
marker_pub.publish(mkr_gas_points);
}
//==================================== SIM_OBJ ==============================//
// Constructor
sim_obj::sim_obj(std::string filepath, bool load_wind_info)
{
gas_type = "unknown";
simulation_filename = filepath;
environment_cells_x = environment_cells_y = environment_cells_z = 0;
environment_cell_size = 0.0; //m
source_pos_x = source_pos_y = source_pos_z = 0.0; //m
load_wind_data = load_wind_info;
first_reading = true;
filament_log=false;
}
sim_obj::~sim_obj(){}
void sim_obj::read_concentration_line(std::string line){
size_t pos;
double conc, u, v, w;
int x, y, z;
//A line has the format x y z conc u v w
pos = line.find(" ");
x = atoi(line.substr(0, pos).c_str());
line.erase(0, pos + 1);
pos = line.find(" ");
y = atoi(line.substr(0, pos).c_str());
line.erase(0, pos + 1);
pos = line.find(" ");
z = atoi(line.substr(0, pos).c_str());
line.erase(0, pos + 1);
pos = line.find(" ");
conc = atof(line.substr(0, pos).c_str());
line.erase(0, pos + 1);
pos = line.find(" ");
u = atof(line.substr(0, pos).c_str());
line.erase(0, pos + 1);
pos = line.find(" ");
v = atof(line.substr(0, pos).c_str());
w = atof(line.substr(pos + 1).c_str());
//Save data to internal storage
C[indexFrom3D(x,y,z)] = conc / 1000;
if (load_wind_data)
{
U[indexFrom3D(x,y,z)] = u / 1000;
V[indexFrom3D(x,y,z)] = v / 1000;
W[indexFrom3D(x,y,z)] = w / 1000;
}
}
void sim_obj::read_headers(std::stringstream &inbuf, std::string &line){
std::getline(inbuf, line);
//Line 1 (min values of environment)
size_t pos = line.find(" ");
line.erase(0, pos + 1);
pos = line.find(" ");
env_min_x = atof(line.substr(0, pos).c_str());
line.erase(0, pos + 1);
pos = line.find(" ");
env_min_y = atof(line.substr(0, pos).c_str());
env_min_z = atof(line.substr(pos + 1).c_str());
std::getline(inbuf, line);
//Line 2 (max values of environment)
pos = line.find(" ");
line.erase(0, pos + 1);
pos = line.find(" ");
env_max_x = atof(line.substr(0, pos).c_str());
line.erase(0, pos + 1);
pos = line.find(" ");
env_max_y = atof(line.substr(0, pos).c_str());
env_max_z = atof(line.substr(pos + 1).c_str());
std::getline(inbuf, line);
//Get Number of cells (X,Y,Z)
pos = line.find(" ");
line.erase(0, pos + 1);
pos = line.find(" ");
environment_cells_x = atoi(line.substr(0, pos).c_str());
line.erase(0, pos + 1);
pos = line.find(" ");
environment_cells_y = atoi(line.substr(0, pos).c_str());
environment_cells_z = atoi(line.substr(pos + 1).c_str());
std::getline(inbuf, line);
//Get Cell_size
pos = line.find(" ");
line.erase(0, pos + 1);
pos = line.find(" ");
environment_cell_size = atof(line.substr(0, pos).c_str());
std::getline(inbuf, line);
//Get GasSourceLocation
pos = line.find(" ");
line.erase(0, pos + 1);
pos = line.find(" ");
source_pos_x = atof(line.substr(0, pos).c_str());
line.erase(0, pos + 1);
pos = line.find(" ");
source_pos_y = atof(line.substr(0, pos).c_str());
source_pos_z = atof(line.substr(pos + 1).c_str());
std::getline(inbuf, line);
//Get Gas_Type
pos = line.find(" ");
gas_type = line.substr(pos + 1);
//Configure instances
configure_environment();
std::getline(inbuf, line);
std::getline(inbuf, line);
std::getline(inbuf, line);
}
//Load a new file with Gas+Wind data
void sim_obj::load_data_from_logfile(int sim_iteration)
{
std::string filename = boost::str( boost::format("%s/iteration_%i") % simulation_filename.c_str() % sim_iteration);
FILE* fileCheck;
if ((fileCheck =fopen(filename.c_str(),"rb"))==NULL){
ROS_ERROR("File %s does not exist\n", filename.c_str());
return;
}
fclose(fileCheck);
std::ifstream infile(filename, std::ios_base::binary);
boost::iostreams::filtering_streambuf<boost::iostreams::input> inbuf;
inbuf.push(boost::iostreams::zlib_decompressor());
inbuf.push(infile);
std::stringstream decompressed;
boost::iostreams::copy(inbuf,decompressed);
//if the file starts with a 1, the contents are in binary
int check=0;
decompressed.read((char*) &check, sizeof(int));
if (check==1){
filament_log=true;
load_binary_file(decompressed);
}
else
load_ascii_file(decompressed);
infile.close();
static int iterationCounter=0;
if(createHeatmapImage){
if(iterationCounter<heatMapIterations)
updateHeatmap();
else if(iterationCounter==heatMapIterations)
writeHeatmapImage();
iterationCounter++;
}
}
void sim_obj::load_ascii_file(std::stringstream& decompressed){
std::string line;
size_t pos;
double conc, u, v, w;
int x, y, z;
if (first_reading)
{
read_headers(decompressed, line);
first_reading=false;
}
else{
//if already initialized, skip the header
for(int i=0; i<8; i++){
std::getline(decompressed, line);
}
}
do
{
read_concentration_line(line);
}while (std::getline(decompressed, line));
}
void sim_obj::load_binary_file(std::stringstream& decompressed){
if(first_reading){
double bufferD [5];
decompressed.read((char*) &env_min_x, sizeof(double));
decompressed.read((char*) &env_min_y, sizeof(double));
decompressed.read((char*) &env_min_z, sizeof(double));
decompressed.read((char*) &env_max_x, sizeof(double));
decompressed.read((char*) &env_max_y, sizeof(double));
decompressed.read((char*) &env_max_z, sizeof(double));
decompressed.read((char*) &environment_cells_x, sizeof(int));
decompressed.read((char*) &environment_cells_y, sizeof(int));
decompressed.read((char*) &environment_cells_z, sizeof(int));
decompressed.read((char*) &environment_cell_size, sizeof(double));
decompressed.read((char*) &bufferD, 5*sizeof(double));
int gt;
decompressed.read((char*) >, sizeof(int));
gas_type=gasTypesByCode[gt];
decompressed.read((char*) &total_moles_in_filament, sizeof(double));
decompressed.read((char*) &num_moles_all_gases_in_cm3, sizeof(double));
configure_environment();
first_reading=false;
}else{
//skip headers
decompressed.seekg(14*sizeof(double) + 5*sizeof(int));
}
int wind_index;
decompressed.read((char*) &wind_index, sizeof(int));
activeFilaments.clear();
int filament_index;
double x, y, z, stdDev;
while(decompressed.peek()!=EOF){
decompressed.read((char*) &filament_index, sizeof(int));
decompressed.read((char*) &x, sizeof(double));
decompressed.read((char*) &y, sizeof(double));
decompressed.read((char*) &z, sizeof(double));
decompressed.read((char*) &stdDev, sizeof(double));
std::pair<int, Filament> pair(filament_index, Filament(x, y, z, stdDev));
activeFilaments.insert(pair);
}
load_wind_file(wind_index);
}
void sim_obj::load_wind_file(int wind_index){
if(wind_index==last_wind_idx)
return;
last_wind_idx=wind_index;
std::ifstream infile(boost::str( boost::format("%s/wind/wind_iteration_%i") % simulation_filename.c_str() % wind_index), std::ios_base::binary);
infile.read((char*) U.data(), sizeof(double)* U.size());
infile.read((char*) V.data(), sizeof(double)* U.size());
infile.read((char*) W.data(), sizeof(double)* U.size());
infile.close();
}
//Get Gas concentration at lcoation (x,y,z)
double sim_obj::get_gas_concentration(float x, float y, float z)
{
int xx,yy,zz;
xx = (int)ceil((x - env_min_x)/environment_cell_size);
yy = (int)ceil((y - env_min_y)/environment_cell_size);
zz = (int)ceil((z - env_min_z)/environment_cell_size);
if(xx<0|| xx>environment_cells_x
|| yy<0|| yy>environment_cells_y
|| zz<0|| zz>environment_cells_z)
{
ROS_ERROR("Requested gas concentration at a point outside the environment (%f, %f, %f). Are you using the correct coordinates?\n", x, y ,z);
return 0;
}
double gas_conc=0;
if(filament_log){
for(auto it = activeFilaments.begin(); it!=activeFilaments.end(); it++){
Filament fil = it->second;
double distSQR = (x-fil.x)*(x-fil.x) + (y-fil.y)*(y-fil.y) + (z-fil.z)*(z-fil.z);
double limitDistance = fil.sigma*5/100;
if(distSQR < limitDistance * limitDistance && check_environment_for_obstacle(x, y, z, fil.x, fil.y, fil.z)){
gas_conc += concentration_from_filament(x, y, z, fil);
}
}
}else{
//Get cell idx from point location
//Get gas concentration from that cell
gas_conc = C[indexFrom3D(xx,yy,zz)];
}
return gas_conc;
}
double sim_obj::concentration_from_filament(float x, float y, float z, Filament filament){
//calculate how much gas concentration does one filament contribute to the queried location
double sigma = filament.sigma;
double distance_cm = 100 * sqrt( pow(x-filament.x,2) + pow(y-filament.y,2) + pow(z-filament.z,2) );
double num_moles_target_cm3 = (total_moles_in_filament /
(sqrt(8*pow(M_PI,3)) * pow(sigma,3) )) * exp( -pow(distance_cm,2)/(2*pow(sigma,2)) );
double ppm = num_moles_target_cm3/num_moles_all_gases_in_cm3 * 1000000; //parts of target gas per million
return ppm;
}
bool sim_obj::check_environment_for_obstacle(double start_x, double start_y, double start_z,
double end_x, double end_y, double end_z)
{
// Check whether one of the points is outside the valid environment or is not free
if(check_pose_with_environment(start_x, start_y, start_z) != 0) { return false; }
if(check_pose_with_environment( end_x, end_y , end_z) != 0) { return false; }
// Calculate normal displacement vector
double vector_x = end_x - start_x;
double vector_y = end_y - start_y;
double vector_z = end_z - start_z;
double distance = sqrt(vector_x*vector_x + vector_y*vector_y + vector_z*vector_z);
vector_x = vector_x/distance;
vector_y = vector_y/distance;
vector_z = vector_z/distance;
// Traverse path
int steps = ceil( distance / environment_cell_size ); // Make sure no two iteration steps are separated more than 1 cell
double increment = distance/steps;
for(int i=1; i<steps-1; i++)
{
// Determine point in space to evaluate
double pose_x = start_x + vector_x*increment*i;
double pose_y = start_y + vector_y*increment*i;
double pose_z = start_z + vector_z*increment*i;
// Determine cell to evaluate (some cells might get evaluated twice due to the current code
int x_idx = floor( (pose_x-env_min_x)/environment_cell_size );
int y_idx = floor( (pose_y-env_min_y)/environment_cell_size );
int z_idx = floor( (pose_z-env_min_z)/environment_cell_size );
// Check if the cell is occupied
if(Env[indexFrom3D(x_idx,y_idx,z_idx)] != 0) { return false; }
}
// Direct line of sight confirmed!
return true;
}
int sim_obj::check_pose_with_environment(double pose_x, double pose_y, double pose_z)
{
//1.1 Check that pose is within the boundingbox environment
if (pose_x<env_min_x || pose_x>env_max_x || pose_y<env_min_y || pose_y>env_max_y || pose_z<env_min_z || pose_z>env_max_z)
return 1;
//Get 3D cell of the point
int x_idx = (pose_x-env_min_x)/environment_cell_size;
int y_idx = (pose_y-env_min_y)/environment_cell_size;
int z_idx = (pose_z-env_min_z)/environment_cell_size;
if (x_idx >= environment_cells_x || y_idx >= environment_cells_y || z_idx >= environment_cells_z)
return 1;
//1.2. Return cell occupancy (0=free, 1=obstacle, 2=outlet)
return Env[indexFrom3D(x_idx,y_idx,z_idx)];
}
//Get Wind concentration at lcoation (x,y,z)
void sim_obj::get_wind_value(float x, float y, float z, double &u, double &v, double &w)
{
if (load_wind_data)
{
int xx,yy,zz;
xx = (int)ceil((x - env_min_x)/environment_cell_size);
yy = (int)ceil((y - env_min_y)/environment_cell_size);
zz = (int)ceil((z - env_min_z)/environment_cell_size);
if(xx<0|| xx>environment_cells_x
|| yy<0|| yy>environment_cells_y
|| zz<0|| zz>environment_cells_z)
{
ROS_ERROR("Requested gas concentration at a point outside the environment. Are you using the correct coordinates?\n");
return;
}
//Set wind vectors from that cell
u = U[indexFrom3D(xx,yy,zz)];
v = V[indexFrom3D(xx,yy,zz)];
w = W[indexFrom3D(xx,yy,zz)];
}
else
{
if (verbose)
ROS_WARN("[Player] Request to provide Wind information when No Wind data is available!!");
}
}
void sim_obj::readEnvFile()
{
if(occupancyFile==""){
ROS_ERROR(" [GADEN_PLAYER] No occupancy file specified. Use the parameter \"occupancyFile\" to input the path to the OccupancyGrid3D.csv file.\n");
return;
}
Env.resize(environment_cells_x * environment_cells_y * environment_cells_z);
//open file
std::ifstream infile(occupancyFile.c_str());
std::string line;
//discard the header
std::getline(infile, line);
std::getline(infile, line);
std::getline(infile, line);
std::getline(infile, line);
int x_idx = 0;
int y_idx = 0;
int z_idx = 0;
while ( std::getline(infile, line) )
{
std::stringstream ss(line);
if (z_idx >=environment_cells_z)
{
ROS_ERROR("Trying to read:[%s]",line.c_str());
}
if (line == ";")
{
//New Z-layer
z_idx++;
x_idx = 0;
y_idx = 0;
}
else
{ //New line with constant x_idx and all the y_idx values
while (ss)
{
double f;
ss >> std::skipws >> f; //get one double value
Env[indexFrom3D(x_idx,y_idx,z_idx)] = f;
y_idx++;
}
//Line has ended
x_idx++;
y_idx = 0;
}
}
infile.close();
}
//Init instances (for running multiple simulations)
void sim_obj::configure_environment()
{
//ROS_INFO("Configuring Enviroment");
//ROS_INFO("\t\t Dimension: [%i,%i,%i] cells", environment_cells_x, environment_cells_y, environment_cells_z);
//ROS_INFO("\t\t Gas_Type: %s", gas_type.c_str());
//ROS_INFO("\t\t FilePath: %s", simulation_filename.c_str());
//ROS_INFO("\t\t Source at location: [%.4f,%.4f,%.4f][m] ", source_pos_x, source_pos_y, source_pos_z);
//ROS_INFO("\t\t Loading Wind Data: %i", load_wind_data);
//Resize Gas Concentration container
C.resize(environment_cells_x * environment_cells_y * environment_cells_z);
//Resize Wind info container (if necessary)
if (load_wind_data)
{
U.resize(environment_cells_x * environment_cells_y * environment_cells_z);
V.resize(environment_cells_x * environment_cells_y * environment_cells_z);
W.resize(environment_cells_x * environment_cells_y * environment_cells_z);
}
readEnvFile();
if(createHeatmapImage)
heatmap = std::vector<std::vector<double> >(environment_cells_x, std::vector<double>(environment_cells_y));
}
void sim_obj::get_concentration_as_markers(visualization_msgs::Marker &mkr_points)
{
if(!filament_log){
//For every cell, generate as much "marker points" as [ppm]
for (int i=0;i<environment_cells_x;i++)
{
for (int j=0;j<environment_cells_y;j++)
{
for (int k=0;k<environment_cells_z;k++)
{
geometry_msgs::Point p; //Location of point
std_msgs::ColorRGBA color; //Color of point
double gas_value = C[indexFrom3D(i,j,k)];
for (int N=0;N<(int)round(gas_value/2);N++)
{
//Set point position (corner of the cell + random)
p.x = env_min_x + (i+0.5)*environment_cell_size + ((rand()%100)/100.0f)*environment_cell_size;
p.y = env_min_y + (j+0.5)*environment_cell_size + ((rand()%100)/100.0f)*environment_cell_size;
p.z = env_min_z + (k+0.5)*environment_cell_size + ((rand()%100)/100.0f)*environment_cell_size;
//Set color of particle according to gas type
color.a = 1.0;
if (!strcmp(gas_type.c_str(),"ethanol"))
{
color.r=0.2; color.g=0.9; color.b=0;
}
else if (!strcmp(gas_type.c_str(),"methane"))
{
color.r=0.9; color.g=0.1; color.b=0.1;
}
else if (!strcmp(gas_type.c_str(),"hydrogen"))
{
color.r=0.2; color.g=0.1; color.b=0.9;
}
else if (!strcmp(gas_type.c_str(),"propanol"))
{
color.r=0.8; color.g=0.8; color.b=0;
}
else if (!strcmp(gas_type.c_str(),"chlorine"))
{
color.r=0.8; color.g=0; color.b=0.8;
}
else if (!strcmp(gas_type.c_str(),"flurorine"))
{
color.r=0.0; color.g=0.8; color.b=0.8;
}
else if (!strcmp(gas_type.c_str(),"acetone"))
{
color.r=0.9; color.g=0.2; color.b=0.2;
}
else if (!strcmp(gas_type.c_str(),"neon"))
{
color.r=0.9; color.g=0; color.b=0;
}
else if (!strcmp(gas_type.c_str(),"helium"))
{
color.r=0.9; color.g=0; color.b=0;
}
else if (!strcmp(gas_type.c_str(),"hot_air"))
{
color.r=0.9; color.g=0; color.b=0;
}
else
{
ROS_INFO("[player] Setting Defatul Color");
color.r = 0.9; color.g = 0;color.b = 0;
}
//Add particle marker
mkr_points.points.push_back(p);
mkr_points.colors.push_back(color);
}
}
}
}
}
else{
for(auto it = activeFilaments.begin(); it!=activeFilaments.end(); it++){
geometry_msgs::Point p; //Location of point
std_msgs::ColorRGBA color; //Color of point
Filament filament = it->second;
for (int i=0; i<5; i++){
p.x=(filament.x)+((std::rand()%1000)/1000.0 -0.5) * filament.sigma/200;
p.y=(filament.y)+((std::rand()%1000)/1000.0 -0.5) * filament.sigma/200;
p.z=(filament.z)+((std::rand()%1000)/1000.0 -0.5) * filament.sigma/200;
color.a=1;
color.r=0;
color.g=1;
color.b=0;
//Add particle marker
mkr_points.points.push_back(p);
mkr_points.colors.push_back(color);
}
}
}
}
int sim_obj::indexFrom3D(int x, int y, int z){
return x + y*environment_cells_x + z*environment_cells_x*environment_cells_y;
}
void sim_obj::updateHeatmap(){
#pragma omp parallel for collapse(2)
for(int i = 0; i<heatmap.size(); i++){
for(int j=0; j<heatmap[0].size(); j++){
double p_x = env_min_x + (i+0.5)*environment_cell_size;
double p_y = env_min_y + (j+0.5)*environment_cell_size;
double g_c = get_gas_concentration(p_x, p_y, heatmapHeight);
if(g_c>heatmapThreshold)
heatmap[i][j]++;
}
}
}
void sim_obj::writeHeatmapImage(){
//std::ofstream pgmFile ("/home/pepe/catkin_ws/asd");
//pgmFile<<"P2\n";
//pgmFile<<heatmap[0].size()<<" "<<heatmap.size()<<"\n";
//pgmFile<<"255\n";
//for(int i = 0; i<heatmap.size(); i++){
// for(int j=0; j<heatmap[0].size(); j++){
// pgmFile<< (int)((heatmap[i][j]/heatMapIterations) * 255) <<" ";
// }
// pgmFile<<"\n";
//}
cv::Mat image(cv::Size(heatmap.size(), heatmap[0].size()), CV_32F, cv::Scalar(0) );
#pragma omp parallel for collapse(2)
for(int i = 0; i<heatmap.size(); i++){
for(int j=0; j<heatmap[0].size(); j++){
image.at<float>(heatmap[0].size()-1-j, i) = (heatmap[i][j]/heatMapIterations) * 255;
}
}
cv::imwrite(heatmapPath, image);
std::cout<<"Heatmap image saved\n";
} | 29,208 | C++ | 32.728637 | 155 | 0.570837 |
tudelft/autoGDMplus/gaden_ws/src/gaden/gaden_filament_simulator/package.xml | <package>
<name>gaden_filament_simulator</name>
<version>1.0.0</version>
<description>Implements the filament-based gas dispersal simulation. Gases with different densities are simulated such as: ethanol, methane and hydrogen etc.</description>
<maintainer email="[email protected]">Javier Monroy</maintainer>
<license>GPLv3</license>
<author>Javier Monroy</author>
<buildtool_depend>catkin</buildtool_depend>
<!-- Dependencies needed to compile this package. -->
<build_depend>roscpp</build_depend>
<build_depend>visualization_msgs</build_depend>
<build_depend>std_msgs</build_depend>
<build_depend>nav_msgs</build_depend>
<!-- Dependencies needed after this package is compiled. -->
<run_depend>roscpp</run_depend>
<run_depend>visualization_msgs</run_depend>
<run_depend>std_msgs</run_depend>
<run_depend>nav_msgs</run_depend>
</package>
| 884 | XML | 30.607142 | 173 | 0.735294 |
tudelft/autoGDMplus/gaden_ws/src/gaden/gaden_filament_simulator/src/filament_simulator.cpp | /*---------------------------------------------------------------------------------------
* MAIN Node for the simulation of gas dispersal using a Filament-based approach.
* This node loads the wind field (usually from CFD simulation), and simulates over it
* different filaments to spread gas particles.
*
* Each filament is composed of a fixed number of gas molecules (Q)
* Each filament is determined by its center position and width.
* The width of a filament increases over time (Turbulent and molecular difussion)
* The position of a filament is updated with the wind.
*
* The gas concentration at a given point is the sum of the concentration of all filaments.
*
* Thus, the gas concentration at the source location is determined by the number of molecules/filament and the number of filaments.
*
* A log file is recorded for every snapshot (time-step) with information about the gas
* concentration and wind vector for every cell (3D) of the environment.
*
* The node implements the filament-base gas dispersal simulation. At each time step, the puffs
* of filaments are sequentially released at a source location. Each puff is composed of n filaments.
* Filaments are affected by turbulence and molecular diffusion along its path while being transported
* by advection with the wind. The 3-dimensional positions of these filaments are represented by the points
* of the “visualization msgs/markers”. At each time step, “Dispersal_Simulation” node calculates or
* determines the positions of n filaments. Gas plumes are simulated with or without acceleration.
*
* It is very time consuming, and currently it runs in just one thread (designed to run offline).
*
* TODO: Cambiar std::vector por std::list para los FILAMENTOS
---------------------------------------------------------------------------------------*/
#include "filament_simulator/filament_simulator.h"
//==========================//
// Constructor //
//==========================//
CFilamentSimulator::CFilamentSimulator()
{
//Read parameters
loadNodeParameters();
//Create directory to save results (if needed)
if (save_results && !boost::filesystem::exists(results_location)){
if (!boost::filesystem::create_directories(results_location))
ROS_ERROR("[filament] Could not create result directory: %s", results_location.c_str());
if (!boost::filesystem::create_directories(boost::str(boost::format("%s/wind") % results_location) ) )
ROS_ERROR("[filament] Could not create result directory: %s", results_location.c_str());
}
//Set Publishers and Subscribers
//-------------------------------
marker_pub = n.advertise<visualization_msgs::Marker>("filament_visualization", 1);
// Wait preprocessing Node to finish?
preprocessing_done = false;
if(wait_preprocessing)
{
prepro_sub = n.subscribe("preprocessing_done", 1, &CFilamentSimulator::preprocessingCB, this);
while(ros::ok() && !preprocessing_done)
{
ros::Duration(0.5).sleep();
ros::spinOnce();
if (verbose) ROS_INFO("[filament] Waiting for node GADEN_preprocessing to end.");
}
}
//Init variables
//-----------------
sim_time = 0.0; //Start at time = 0(sec)
sim_time_last_wind = -2*windTime_step; //Force to load wind-data on startup
current_wind_snapshot = 0; //Start with wind_iter= 0;
current_simulation_step = 0; //Start with iter= 0;
last_saved_step = -1;
wind_notified = false; //To warn the user (only once) that no more wind data is found!
wind_finished = false;
//Init the Simulator
initSimulator();
//Fluid Dynamics Eq
/*/-----------------
* Ideal gas equation:
* PV = nRT
* P is the pressure of the gas (atm)
* V is the volume of the gas (cm^3)
* n is the amount of substance of gas (mol) = m/M where m=mass of the gas [g] and M is the molar mass
* R is the ideal, or universal, gas constant, equal to the product of the Boltzmann constant and the Avogadro constant. (82.057338 cm^3·atm/mol·k)
* T is the temperature of the gas (kelvin)
*/
double R = 82.057338; //[cm³·atm/mol·K] Gas Constant
filament_initial_vol = pow(6*filament_initial_std,3); //[cm³] -> We approximate the infinite volumen of the 3DGaussian as 6 sigmas.
env_cell_vol = pow(cell_size*100,3); //[cm³] Volumen of a cell
filament_numMoles = (envPressure*filament_initial_vol)/(R*envTemperature);//[mol] Num of moles of Air in that volume
env_cell_numMoles = (envPressure*env_cell_vol)/(R*envTemperature); //[mol] Num of moles of Air in that volume
//The moles of target_gas in a Filament are distributted following a 3D Gaussian
//Given the ppm value at the center of the filament, we approximate the total number of gas moles in that filament.
double numMoles_in_cm3 = envPressure/(R*envTemperature); //[mol of all gases/cm³]
double filament_moles_cm3_center = filament_ppm_center/pow(10,6)* numMoles_in_cm3; //[moles of target gas / cm³]
filament_numMoles_of_gas = filament_moles_cm3_center * ( sqrt(8*pow(3.14159,3))*pow(filament_initial_std,3) ); //total number of moles in a filament
if (verbose) ROS_INFO("[filament] filament_initial_vol [cm3]: %f",filament_initial_vol);
if (verbose) ROS_INFO("[filament] env_cell_vol [cm3]: %f",env_cell_vol);
if (verbose) ROS_INFO("[filament] filament_numMoles [mol]: %E",filament_numMoles);
if (verbose) ROS_INFO("[filament] env_cell_numMoles [mol]: %E",env_cell_numMoles);
if (verbose) ROS_INFO("[filament] filament_numMoles_of_gas [mol]: %E",filament_numMoles_of_gas);
//Init visualization
//-------------------
filament_marker.header.frame_id = fixed_frame;
filament_marker.ns = "filaments";
filament_marker.action = visualization_msgs::Marker::ADD;
filament_marker.id = 0;
filament_marker.type = visualization_msgs::Marker::POINTS;
filament_marker.color.a = 1;
}
CFilamentSimulator::~CFilamentSimulator()
{
}
//==============================//
// GADEN_preprocessing CB //
//==============================//
void CFilamentSimulator::preprocessingCB(const std_msgs::Bool& b)
{
preprocessing_done = true;
}
//==========================//
// Load Params //
//==========================//
void CFilamentSimulator::loadNodeParameters()
{
ros::NodeHandle private_nh("~");
// Verbose
private_nh.param<bool>("verbose", verbose, false);
// Wait PreProcessing
private_nh.param<bool>("wait_preprocessing", wait_preprocessing, false);
// Simulation Time (sec)
private_nh.param<double>("sim_time", max_sim_time, 20.0);
// Time increment between Gas snapshots (sec)
private_nh.param<double>("time_step", time_step, 1.0);
// Number of iterations to carry on = max_sim_time/time_step
numSteps = floor(max_sim_time/time_step);
// Num of filaments/sec
private_nh.param<int>("num_filaments_sec", numFilaments_sec, 100);
private_nh.param<bool>("variable_rate", variable_rate, false);
numFilaments_step = numFilaments_sec * time_step;
numFilament_aux=0;
total_number_filaments = numFilaments_step * numSteps;
current_number_filaments = 0;
private_nh.param<int>("filament_stop_steps", filament_stop_steps, 0);
filament_stop_counter= 0;
// Gas concentration at the filament center - 3D gaussian [ppm]
private_nh.param<double>("ppm_filament_center", filament_ppm_center, 20);
// [cm] Sigma of the filament at t=0-> 3DGaussian shape
private_nh.param<double>("filament_initial_std", filament_initial_std, 1.5);
// [cm²/s] Growth ratio of the filament_std
private_nh.param<double>("filament_growth_gamma", filament_growth_gamma, 10.0);
// [cm] Sigma of the white noise added on each iteration
private_nh.param<double>("filament_noise_std", filament_noise_std, 0.1);
// Gas Type ID
private_nh.param<int>("gas_type", gasType, 1);
// Environment temperature (necessary for molecules/cm3 -> ppm)
private_nh.param<double>("temperature", envTemperature, 298.0);
// Enviorment pressure (necessary for molecules/cm3 -> ppm)
private_nh.param<double>("pressure", envPressure, 1.0);
// Gas concentration units (0= molecules/cm3, 1=ppm)
private_nh.param<int>("concentration_unit_choice", gasConc_unit, 1);
//WIND DATA
//----------
//CFD wind files location
private_nh.param<std::string>("wind_data", wind_files_location, "");
//(sec) Time increment between Wind snapshots --> Determines when to load a new wind field
private_nh.param<double>("wind_time_step", windTime_step, 1.0);
// Loop
private_nh.param<bool>("allow_looping", allow_looping, false);
private_nh.param<int>("loop_from_step", loop_from_step, 1);
private_nh.param<int>("loop_to_step", loop_to_step, 100);
//ENVIRONMENT
//-----------
// Occupancy gridmap 3D location
private_nh.param<std::string>("occupancy3D_data", occupancy3D_data, "");
//fixed frame (to disaply the gas particles on RVIZ)
private_nh.param<std::string>("fixed_frame", fixed_frame, "/map");
//Source postion (x,y,z)
private_nh.param<double>("source_position_x", gas_source_pos_x, 1.0);
private_nh.param<double>("source_position_y", gas_source_pos_y, 1.0);
private_nh.param<double>("source_position_z", gas_source_pos_z, 1.0);
// Simulation results.
private_nh.param<int>("save_results", save_results, 1);
private_nh.param<std::string>("results_location", results_location, "");
if (save_results && !boost::filesystem::exists(results_location)){
if (!boost::filesystem::create_directories(results_location))
ROS_ERROR("[filament] Could not create result directory: %s", results_location.c_str());
}
//create a sub-folder for this specific simulation
results_location=boost::str(boost::format("%s/FilamentSimulation_gasType_%i_sourcePosition_%.2f_%.2f_%.2f") % results_location % gasType % gas_source_pos_x % gas_source_pos_y % gas_source_pos_z );
private_nh.param<double>("results_min_time", results_min_time, 0.0);
private_nh.param<double>("results_time_step", results_time_step, 1.0);
if (verbose)
{
ROS_INFO("[filament] The data provided in the roslaunch file is:");
ROS_INFO("[filament] Simulation Time %f(s)",sim_time);
ROS_INFO("[filament] Gas Time Step: %f(s)",time_step);
ROS_INFO("[filament] Num_steps: %d",numSteps);
ROS_INFO("[filament] Number of filaments: %d",numFilaments_sec);
ROS_INFO("[filament] PPM filament center %f",filament_ppm_center);
ROS_INFO("[filament] Gas type: %d",gasType);
ROS_INFO("[filament] Concentration unit: %d",gasConc_unit);
ROS_INFO("[filament] Wind_time_step: %f(s)", windTime_step);
ROS_INFO("[filament] Fixed frame: %s",fixed_frame.c_str());
ROS_INFO("[filament] Source position: (%f,%f,%f)",gas_source_pos_x, gas_source_pos_y, gas_source_pos_z);
if (save_results)
ROS_INFO("[filament] Saving results to %s",results_location.c_str());
}
}
//==========================//
// //
//==========================//
void CFilamentSimulator::initSimulator()
{
if (verbose) ROS_INFO("[filament] Initializing Simulator... Please Wait!");
//1. Load Environment and Configure Matrices
if (FILE *file = fopen(occupancy3D_data.c_str(), "r"))
{
//Files exist!, keep going!
fclose(file);
if (verbose) ROS_INFO("[filament] Loading 3D Occupancy GridMap");
read_3D_file(occupancy3D_data, Env, true, false);
}
else
{
ROS_ERROR("[filament] File %s Does Not Exists!",occupancy3D_data.c_str());
}
//2. Load the first Wind snapshot from file (all 3 components U,V,W)
read_wind_snapshot(current_simulation_step);
//3. Initialize the filaments vector to its max value (to avoid increasing the size at runtime)
if (verbose) ROS_INFO("[filament] Initializing Filaments");
filaments.resize(total_number_filaments, CFilament(0.0, 0.0, 0.0, filament_initial_std));
}
//Resize a 3D Matrix compose of Vectors, This operation is only performed once!
void CFilamentSimulator::configure3DMatrix(std::vector< double > &A)
{
A.resize(env_cells_x* env_cells_y * env_cells_z);
}
//==========================//
// //
//==========================//
void CFilamentSimulator::read_wind_snapshot(int idx)
{
if (last_wind_idx==idx)
return;
//configure filenames to read
std::string U_filename = boost::str( boost::format("%s%i.csv_U") % wind_files_location % idx );
std::string V_filename = boost::str( boost::format("%s%i.csv_V") % wind_files_location % idx );
std::string W_filename = boost::str( boost::format("%s%i.csv_W") % wind_files_location % idx );
if (verbose) ROS_INFO("Reading Wind Snapshot %s",U_filename.c_str());
//read data to 3D matrices
if (FILE *file = fopen(U_filename.c_str(), "r"))
{
//Files exist!, keep going!
fclose(file);
last_wind_idx=idx;
if (verbose) ROS_INFO("[filament] Loading Wind Snapshot %i", idx);
//binary format files start with the code "999"
std::ifstream ist(U_filename, std::ios_base::binary);
int check=0;
ist.read((char*) &check, sizeof(int));
ist.close();
read_3D_file(U_filename, U, false, (check==999));
read_3D_file(V_filename, V, false, (check==999));
read_3D_file(W_filename, W, false, (check==999));
if(!wind_finished){
//dump the binary wind data to file
std::string out_filename = boost::str( boost::format("%s/wind/wind_iteration_%i") % results_location % idx);
FILE * file = fopen(out_filename.c_str(), "wb");
if (file==NULL){
ROS_ERROR("CANNOT OPEN WIND LOG FILE\n");
exit(1);
}
fclose(file);
std::ofstream wind_File(out_filename.c_str());
wind_File.write((char*) U.data(), sizeof(double) * U.size());
wind_File.write((char*) V.data(), sizeof(double) * V.size());
wind_File.write((char*) W.data(), sizeof(double) * W.size());
wind_File.close();
}
}
else
{
//No more wind data. Keep current info.
if (!wind_notified)
{
ROS_WARN("[filament] File %s Does Not Exists!",U_filename.c_str());
ROS_WARN("[filament] No more wind data available. Using last Wind snapshopt as SteadyState.");
wind_notified = true;
wind_finished = true;
}
}
}
//==========================//
// //
//==========================//
void CFilamentSimulator::read_3D_file(std::string filename, std::vector< double > &A, bool hasHeader, bool binary)
{
if(binary){
std::ifstream infile(filename, std::ios_base::binary);
infile.seekg(sizeof(int));
infile.read((char*) A.data(), sizeof(double)* A.size());
infile.close();
return;
}
//open file
std::ifstream infile(filename.c_str());
std::string line;
int line_counter = 0;
//If header -> read 4 Header lines & configure all matrices to given dimensions!
if (hasHeader)
{
//Line 1 (min values of environment)
std::getline(infile, line);
line_counter++;
size_t pos = line.find(" ");
line.erase(0, pos+1);
pos = line.find(" ");
env_min_x = atof(line.substr(0, pos).c_str());
line.erase(0, pos+1);
pos = line.find(" ");
env_min_y = atof(line.substr(0, pos).c_str());
env_min_z = atof(line.substr(pos+1).c_str());
//Line 2 (max values of environment)
std::getline(infile, line);
line_counter++;
pos = line.find(" ");
line.erase(0, pos+1);
pos = line.find(" ");
env_max_x = atof(line.substr(0, pos).c_str());
line.erase(0, pos+1);
pos = line.find(" ");
env_max_y = atof(line.substr(0, pos).c_str());
env_max_z = atof(line.substr(pos+1).c_str());
//Line 3 (Num cells on eahc dimension)
std::getline(infile, line);
line_counter++;
pos = line.find(" ");
line.erase(0, pos+1);
pos = line.find(" ");
env_cells_x = atoi(line.substr(0, pos).c_str());
line.erase(0, pos+1);
pos = line.find(" ");
env_cells_y = atof(line.substr(0, pos).c_str());
env_cells_z = atof(line.substr(pos+1).c_str());
//Line 4 cell_size (m)
std::getline(infile, line);
line_counter++;
pos = line.find(" ");
cell_size = atof(line.substr(pos+1).c_str());
if (verbose) ROS_INFO("[filament] Env dimensions (%.2f,%.2f,%.2f) to (%.2f,%.2f,%.2f)",env_min_x, env_min_y, env_min_z, env_max_x, env_max_y, env_max_z );
if (verbose) ROS_INFO("[filament] Env size in cells (%d,%d,%d) - with cell size %f [m]",env_cells_x,env_cells_y,env_cells_z, cell_size);
//Reserve memory for the 3D matrices: U,V,W,C and Env, according to provided num_cells of the environment.
//It also init them to 0.0 values
configure3DMatrix(U);
configure3DMatrix(V);
configure3DMatrix(W);
configure3DMatrix(C);
configure3DMatrix(Env);
}
//Read file line by line
int x_idx = 0;
int y_idx = 0;
int z_idx = 0;
while ( std::getline(infile, line) )
{
line_counter++;
std::stringstream ss(line);
if (z_idx >=env_cells_z)
{
ROS_ERROR("Trying to read:[%s]",line.c_str());
}
if (line == ";")
{
//New Z-layer
z_idx++;
x_idx = 0;
y_idx = 0;
}
else
{ //New line with constant x_idx and all the y_idx values
while (!ss.fail())
{
double f;
ss >> f; //get one double value
if (!ss.fail())
{
A[indexFrom3D(x_idx,y_idx,z_idx)] = f;
y_idx++;
}
}
//Line has ended
x_idx++;
y_idx = 0;
}
}
//End of file.
if (verbose)
ROS_INFO("End of File");
infile.close();
}
// Add new filaments. On each step add a total of "numFilaments_step"
void CFilamentSimulator::add_new_filaments(double radius_arround_source)
{
numFilament_aux+=numFilaments_step;
// Release rate
int filaments_to_release=floor(numFilament_aux);
if (variable_rate)
{
filaments_to_release = (int) round( random_number(0.0, filaments_to_release) );
}
else
{
if(filament_stop_counter==filament_stop_steps){
filament_stop_counter=0;
}else{
filament_stop_counter++;
filaments_to_release=0;
}
}
for (int i=0; i<filaments_to_release; i++)
{
double x, y, z;
do{
//Set position of new filament within the especified radius arround the gas source location
x = gas_source_pos_x + random_number(-1,1)*radius_arround_source;
y = gas_source_pos_y + random_number(-1,1)*radius_arround_source;
z = gas_source_pos_z + random_number(-1,1)*radius_arround_source;
}while(check_pose_with_environment(x,y,z)!=0);
/*Instead of adding new filaments to the filaments vector on each iteration (push_back)
we had initially resized the filaments vector to the max number of filaments (numSteps*numFilaments_step)
Here we will "activate" just the corresponding filaments for this step.*/
filaments[current_number_filaments+i].activate_filament(x, y, z, sim_time);
}
}
// Here we estimate the gas concentration on each cell of the 3D env
// based on the active filaments and their 3DGaussian shapes
// For that we employ Farrell's Concentration Eq
void CFilamentSimulator::update_gas_concentration_from_filament(int fil_i)
{
// We run over all the active filaments, and update the gas concentration of the cells that are close to them.
// Ideally a filament spreads over the entire environment, but in practice since filaments are modeled as 3Dgaussians
// We can stablish a cutt_off raduis of 3*sigma.
// To avoid resolution problems, we evaluate each filament according to the minimum between:
// the env_cell_size and filament_sigma. This way we ensure a filament is always well evaluated (not only one point).
double grid_size_m = std::min(cell_size, (filaments[fil_i].sigma/100)); //[m] grid size to evaluate the filament
// Compute at which increments the Filament has to be evaluated.
// If the sigma of the Filament is very big (i.e. the Filament is very flat), the use the world's cell_size.
// If the Filament is very small (i.e in only spans one or few world cells), then use increments equal to sigma
// in order to have several evaluations fall in the same cell.
int num_evaluations = ceil(6*(filaments[fil_i].sigma/100) / grid_size_m);
// How many times the Filament has to be evaluated depends on the final grid_size_m.
// The filament's grid size is multiplied by 6 because we evaluate it over +-3 sigma
// If the filament is very small (i.e. grid_size_m = sigma), then the filament is evaluated only 6 times
// If the filament is very big and spans several cells, then it has to be evaluated for each cell (which will be more than 6)
// EVALUATE IN ALL THREE AXIS
for (int i=0; i<=num_evaluations; i++)
{
for (int j=0; j<=num_evaluations; j++)
{
for (int k=0; k<=num_evaluations; k++)
{
//get point to evaluate [m]
double x = (filaments[fil_i].pose_x - 3*(filaments[fil_i].sigma/100)) + i*grid_size_m;
double y = (filaments[fil_i].pose_y - 3*(filaments[fil_i].sigma/100)) + j*grid_size_m;
double z = (filaments[fil_i].pose_z - 3*(filaments[fil_i].sigma/100)) + k*grid_size_m;
// Disntance from evaluated_point to filament_center (in [cm])
double distance_cm = 100 * sqrt( pow(x-filaments[fil_i].pose_x,2) + pow(y-filaments[fil_i].pose_y,2) + pow(z-filaments[fil_i].pose_z,2) );
// FARRELLS Eq.
//Evaluate the concentration of filament fil_i at given point (moles/cm³)
double num_moles_cm3 = (filament_numMoles_of_gas / (sqrt(8*pow(3.14159,3)) * pow(filaments[fil_i].sigma,3) )) * exp( -pow(distance_cm,2)/(2*pow(filaments[fil_i].sigma,2)) );
//Multiply for the volumen of the grid cell
double num_moles = num_moles_cm3 * pow(grid_size_m*100,3); //[moles]
//Valid point? If either OUT of the environment, or through a wall, treat it as invalid
bool path_is_obstructed = check_environment_for_obstacle(filaments[fil_i].pose_x, filaments[fil_i].pose_y, filaments[fil_i].pose_z, x,y,z );
if (!path_is_obstructed)
{
//Get 3D cell of the evaluated point
int x_idx = floor( (x-env_min_x)/cell_size );
int y_idx = floor( (y-env_min_y)/cell_size );
int z_idx = floor( (z-env_min_z)/cell_size );
//Accumulate concentration in corresponding env_cell
if (gasConc_unit==0)
{
mtx.lock();
C[indexFrom3D(x_idx,y_idx,z_idx)] += num_moles; //moles
mtx.unlock();
}
else
{
mtx.lock();
double num_ppm = (num_moles/env_cell_numMoles)*pow(10,6); //[ppm]
C[indexFrom3D(x_idx,y_idx,z_idx)] += num_ppm; //ppm
mtx.unlock();
}
}
}
}
}
//Update Gasconcentration markers
}
//==========================//
// //
//==========================//
void CFilamentSimulator::update_gas_concentration_from_filaments(){
//First, set all cells to 0.0 gas concentration (clear previous state)
#pragma omp parallel for collapse(3)
for (size_t i=0; i<env_cells_x; i++)
{
for (size_t j=0; j<env_cells_y; j++)
{
for (size_t k=0; k<env_cells_z; k++)
{
C[indexFrom3D(i,j,k)] = 0.0;
}
}
}
#pragma omp parallel for
for (int i = 0; i < current_number_filaments; i++)
{
if (filaments[i].valid)
{
update_gas_concentration_from_filament(i);
}
}
}
//Check if a given 3D pose falls in:
// 0 = free space
// 1 = obstacle, wall, or outside the environment
// 2 = outlet (usefull to disable filaments)
int CFilamentSimulator::check_pose_with_environment(double pose_x, double pose_y, double pose_z)
{
//1.1 Check that pose is within the boundingbox environment
if (pose_x<env_min_x || pose_x>env_max_x || pose_y<env_min_y || pose_y>env_max_y || pose_z<env_min_z || pose_z>env_max_z)
return 1;
//Get 3D cell of the point
int x_idx = (pose_x-env_min_x)/cell_size;
int y_idx = (pose_y-env_min_y)/cell_size;
int z_idx = (pose_z-env_min_z)/cell_size;
if (x_idx >= env_cells_x || y_idx >= env_cells_y || z_idx >= env_cells_z)
return 1;
//1.2. Return cell occupancy (0=free, 1=obstacle, 2=outlet)
return Env[indexFrom3D(x_idx,y_idx,z_idx)];
}
//==========================//
// //
//==========================//
bool CFilamentSimulator::check_environment_for_obstacle(double start_x, double start_y, double start_z,
double end_x, double end_y, double end_z)
{
const bool PATH_OBSTRUCTED = true;
const bool PATH_UNOBSTRUCTED = false;
// Check whether one of the points is outside the valid environment or is not free
if(check_pose_with_environment(start_x, start_y, start_z) != 0) { return PATH_OBSTRUCTED; }
if(check_pose_with_environment( end_x, end_y , end_z) != 0) { return PATH_OBSTRUCTED; }
// Calculate normal displacement vector
double vector_x = end_x - start_x;
double vector_y = end_y - start_y;
double vector_z = end_z - start_z;
double distance = sqrt(vector_x*vector_x + vector_y*vector_y + vector_z*vector_z);
vector_x = vector_x/distance;
vector_y = vector_y/distance;
vector_z = vector_z/distance;
// Traverse path
int steps = ceil( distance / cell_size ); // Make sure no two iteration steps are separated more than 1 cell
double increment = distance/steps;
for(int i=1; i<steps-1; i++)
{
// Determine point in space to evaluate
double pose_x = start_x + vector_x*increment*i;
double pose_y = start_y + vector_y*increment*i;
double pose_z = start_z + vector_z*increment*i;
// Determine cell to evaluate (some cells might get evaluated twice due to the current code
int x_idx = floor( (pose_x-env_min_x)/cell_size );
int y_idx = floor( (pose_y-env_min_y)/cell_size );
int z_idx = floor( (pose_z-env_min_z)/cell_size );
// Check if the cell is occupied
if(Env[indexFrom3D(x_idx,y_idx,z_idx)] != 0) { return PATH_OBSTRUCTED; }
}
// Direct line of sight confirmed!
return PATH_UNOBSTRUCTED;
}
//Update the filaments location in the 3D environment
// According to Farrell Filament model, a filament is afected by three components of the wind flow.
// 1. Va (large scale wind) -> Advection (Va) -> Movement of a filament as a whole by wind) -> from CFD
// 2. Vm (middle scale wind)-> Movement of the filament with respect the center of the "plume" -> modeled as white noise
// 3. Vd (small scale wind) -> Difussion or change of the filament shape (growth with time)
// We also consider Gravity and Bouyant Forces given the gas molecular mass
void CFilamentSimulator::update_filament_location(int i)
{
//Estimte filament acceleration due to gravity & Bouyant force (for the given gas_type):
double g = 9.8;
double specific_gravity_air = 1; //[dimensionless]
double accel = g * ( specific_gravity_air - SpecificGravity[gasType] ) / SpecificGravity[gasType];
double newpos_x, newpos_y, newpos_z;
//Update the location of all active filaments
//ROS_INFO("[filament] Updating %i filaments of %lu",current_number_filaments, filaments.size());
try
{
//Get 3D cell of the filament center
int x_idx = floor( (filaments[i].pose_x-env_min_x)/cell_size );
int y_idx = floor( (filaments[i].pose_y-env_min_y)/cell_size );
int z_idx = floor( (filaments[i].pose_z-env_min_z)/cell_size );
//1. Simulate Advection (Va)
// Large scale wind-eddies -> Movement of a filament as a whole by wind
//------------------------------------------------------------------------
newpos_x = filaments[i].pose_x + U[indexFrom3D(x_idx,y_idx,z_idx)] * time_step;
newpos_y = filaments[i].pose_y + V[indexFrom3D(x_idx,y_idx,z_idx)] * time_step;
newpos_z = filaments[i].pose_z + W[indexFrom3D(x_idx,y_idx,z_idx)] * time_step;
//Check filament location
int valid_location = check_pose_with_environment(newpos_x, newpos_y, newpos_z);
switch (valid_location)
{
case 0:
//Free and valid location... update filament position
filaments[i].pose_x = newpos_x;
filaments[i].pose_y = newpos_y;
filaments[i].pose_z = newpos_z;
break;
case 2:
//The location corresponds to an outlet! Delete filament!
filaments[i].valid = false;
break;
default:
//The location falls in an obstacle -> Illegal movement (Do not apply advection)
break;
}
//2. Simulate Gravity & Bouyant Force
//------------------------------------
//OLD approach: using accelerations (pure gas)
//newpos_z = filaments[i].pose_z + 0.5*accel*pow(time_step,2);
//Approximation from "Terminal Velocity of a Bubble Rise in a Liquid Column", World Academy of Science, Engineering and Technology 28 2007
double ro_air = 1.205; //[kg/m³] density of air
double mu = 19*pow(10,-6); //[kg/s·m] dynamic viscosity of air
double terminal_buoyancy_velocity = (g * (1-SpecificGravity[gasType])*ro_air * filament_ppm_center*pow(10,-6) ) / (18* mu);
//newpos_z = filaments[i].pose_z + terminal_buoyancy_velocity*time_step;
//Check filament location
if (check_pose_with_environment(filaments[i].pose_x, filaments[i].pose_y, newpos_z ) == 0){
filaments[i].pose_z = newpos_z;
}else if(check_pose_with_environment(filaments[i].pose_x, filaments[i].pose_y, newpos_z ) == 2){
filaments[i].valid = false;
}
//3. Add some variability (stochastic process)
static thread_local std::mt19937 engine;
static thread_local std::normal_distribution<> dist{0, filament_noise_std};
newpos_x = filaments[i].pose_x + dist(engine);
newpos_y = filaments[i].pose_y + dist(engine);
newpos_z = filaments[i].pose_z + dist(engine);
//Check filament location
if (check_pose_with_environment(newpos_x, newpos_y, newpos_z ) == 0)
{
filaments[i].pose_x = newpos_x;
filaments[i].pose_y = newpos_y;
filaments[i].pose_z = newpos_z;
}
//4. Filament growth with time (this affects the posterior estimation of gas concentration at each cell)
// Vd (small scale wind eddies) -> Difussion or change of the filament shape (growth with time)
// R = sigma of a 3D gaussian -> Increasing sigma with time
//------------------------------------------------------------------------
filaments[i].sigma = sqrt(pow(filament_initial_std,2) + filament_growth_gamma*(sim_time-filaments[i].birth_time));
}catch(...)
{
ROS_ERROR("Exception Updating Filaments!");
return;
}
}
//==========================//
// //
//==========================//
void CFilamentSimulator::update_filaments_location()
{
#pragma omp parallel for
for (int i = 0; i < current_number_filaments; i++)
{
if (filaments[i].valid)
{
update_filament_location(i);
}
}
current_number_filaments+=floor(numFilament_aux);
numFilament_aux-=floor(numFilament_aux);
}
//==========================//
// //
//==========================//
void CFilamentSimulator::publish_markers()
{
//1. Clean old markers
filament_marker.points.clear();
filament_marker.colors.clear();
filament_marker.header.stamp = ros::Time::now();
filament_marker.pose.orientation.w = 1.0;
//width of points: scale.x is point width, scale.y is point height
filament_marker.scale.x = cell_size/4;
filament_marker.scale.y = cell_size/4;
filament_marker.scale.z = cell_size/4;
//2. Add a marker for each filament!
for (int i=0; i<current_number_filaments; i++)
{
geometry_msgs::Point point;
std_msgs::ColorRGBA color;
//Set filament pose
point.x = filaments[i].pose_x;
point.y = filaments[i].pose_y;
point.z = filaments[i].pose_z;
//Set filament color
color.a = 1;
if (filaments[i].valid)
{
color.r = 0;
color.g = 0;
color.b = 1;
}
else
{
color.r = 1;
color.g = 0;
color.b = 0;
}
//Add marker
filament_marker.points.push_back(point);
filament_marker.colors.push_back(color);
}
//Publish marker of the filaments
marker_pub.publish(filament_marker);
}
//==========================//
// //
//==========================//
double CFilamentSimulator::random_number(double min_val, double max_val)
{
double n = (double)(rand() % 100); //int random number [0, 100)
n = n/100.0f; //random number [0, 1)
n = n * (max_val - min_val); //random number [0, max-min)
n = n+ min_val; //random number [min, max)
return n;
}
bool eq (double a, double b){
return abs(a-b)<0.001;
}
//Saves current Wind + GasConcentration to file
// These files will be later used in the "player" node.
void CFilamentSimulator::save_state_to_file()
{
last_saved_step++;
//Configure file name for saving the current snapshot
std::string out_filename = boost::str( boost::format("%s/iteration_%i") % results_location % last_saved_step);
FILE * file = fopen(out_filename.c_str(), "wb");
if (file==NULL){
ROS_ERROR("CANNOT OPEN LOG FILE\n");
exit(1);
}
fclose(file);
boost::iostreams::filtering_streambuf<boost::iostreams::input> inbuf;
std::stringstream ist;
inbuf.push(boost::iostreams::zlib_compressor());
inbuf.push(ist);
int h = 1;
ist.write((char*) &h, sizeof(int));
ist.write((char*) &env_min_x, sizeof(double));
ist.write((char*) &env_min_y, sizeof(double));
ist.write((char*) &env_min_z, sizeof(double));
ist.write((char*) &env_max_x, sizeof(double));
ist.write((char*) &env_max_y, sizeof(double));
ist.write((char*) &env_max_z, sizeof(double));
ist.write((char*) &env_cells_x, sizeof(int));
ist.write((char*) &env_cells_y, sizeof(int));
ist.write((char*) &env_cells_z, sizeof(int));
ist.write((char*) &cell_size, sizeof(double));
ist.write((char*) &cell_size, sizeof(double));
ist.write((char*) &cell_size, sizeof(double));
ist.write((char*) &gas_source_pos_x, sizeof(double));
ist.write((char*) &gas_source_pos_y, sizeof(double));
ist.write((char*) &gas_source_pos_z, sizeof(double));
ist.write((char*)&gasType, sizeof(int));
//constants to work out the gas concentration form the filament location
ist.write((char*) &filament_numMoles_of_gas, sizeof(double));
double num_moles_all_gases_in_cm3=env_cell_numMoles/env_cell_vol;
ist.write((char*) &num_moles_all_gases_in_cm3, sizeof(double));
ist.write((char*) &last_wind_idx, sizeof(int)); //index of the wind file (they are stored separately under (results_location)/wind/... )
for(int i=0;i<filaments.size();i++){
if(filaments[i].valid){
ist.write((char*) &i, sizeof(int));
ist.write((char*) &filaments[i].pose_x, sizeof(double));
ist.write((char*) &filaments[i].pose_y, sizeof(double));
ist.write((char*) &filaments[i].pose_z, sizeof(double));
ist.write((char*) &filaments[i].sigma, sizeof(double));
}
}
std::ofstream fi(out_filename);
boost::iostreams::copy(inbuf,fi);
fi.close();
}
int CFilamentSimulator::indexFrom3D(int x, int y, int z){
return x + y*env_cells_x + z*env_cells_x*env_cells_y;
}
//==============================//
// MAIN //
//==============================//
int main(int argc, char **argv)
{
// Init ROS-NODE
ros::init(argc, argv, "new_filament_simulator");
//Create simulator obj and initialize it
CFilamentSimulator sim;
// Initiate Random Number generator with current time
srand(time(NULL));
//--------------
// LOOP
//--------------
while (ros::ok() && (sim.current_simulation_step<sim.numSteps) )
{
//ROS_INFO("[filament] Simulating step %i (sim_time = %.2f)", sim.current_simulation_step, sim.sim_time);
//0. Load wind snapshot (if necessary and availabe)
if ( sim.sim_time-sim.sim_time_last_wind >= sim.windTime_step)
{
// Time to update wind!
sim.sim_time_last_wind = sim.sim_time;
if (sim.allow_looping)
{
// Load wind-data
sim.read_wind_snapshot(sim.current_wind_snapshot);
// Update idx
if (sim.current_wind_snapshot >= sim.loop_to_step){
sim.current_wind_snapshot = sim.loop_from_step;
sim.wind_finished = true;
}
else
sim.current_wind_snapshot++;
}
else
sim.read_wind_snapshot(floor(sim.sim_time/sim.windTime_step)); //Alllways increasing
}
//1. Create new filaments close to the source location
// On each iteration num_filaments (See params) are created
sim.add_new_filaments(sim.cell_size);
//2. Publish markers for RVIZ
sim.publish_markers();
//3. Update filament locations
sim.update_filaments_location();
//4. Save data (if necessary)
if ( (sim.save_results==1) && (sim.sim_time>=sim.results_min_time) )
{
if ( floor(sim.sim_time/sim.results_time_step) != sim.last_saved_step ){
sim.save_state_to_file();
}
}
//5. Update Simulation state
sim.sim_time = sim.sim_time + sim.time_step; //sec
sim.current_simulation_step++;
ros::spinOnce();
}
}
| 37,202 | C++ | 34.910232 | 197 | 0.625988 |
tudelft/autoGDMplus/gaden_ws/src/gaden/gaden_filament_simulator/src/filament.cpp | /*---------------------------------------------------------------------------------------
* Very simple implementation of the class Filament
* A gas source releases patches/puffs of gas. Each pach or puff is composed of N filaments.
* A filament is a 3D shape which contains Q molecules of gas.
* The filament is affected by advection (i.e its center moves with the wind field)
* But also for difussion (its width increases with time), and some estochastic process (random behaviour)
* This three effects can be related to the size of the wind turbulent eddies
* See: Filament-Based Atmospheric DispersionModel to Achieve Short Time-Scale Structure of Odor Plumes, Farrell et al, 2002
---------------------------------------------------------------------------------------*/
#include "filament_simulator/filament.h"
//Default Constructor
CFilament::CFilament()
{
//Create a new filament!
pose_x = 0.0; //[m] Filament center pose
pose_y = 0.0; //[m] Filament center pose
pose_z = 0.0; //[m] Filament center pose
sigma = 0.01; //[cm] The sigma of a 3D gaussian (controlls the shape of the filament)
birth_time = 0.0;
valid = false;
}
//Overload Constructor
CFilament::CFilament(double x, double y, double z, double sigma_filament)
{
//Create a new filament!
pose_x = x; //[m] Filament center pose
pose_y = y; //[m] Filament center pose
pose_z = z; //[m] Filament center pose
sigma = sigma_filament; //[cm] The sigma of a 3D gaussian (controlls the shape of the filament)
birth_time = 0.0;
valid = false;
}
CFilament::~CFilament()
{
}
void CFilament::activate_filament(double x, double y, double z, double birth)
{
//Active the filament at given location
pose_x = x;
pose_y = y;
pose_z = z;
birth_time = birth;
valid = true;
}
void CFilament::deactivate_filament()
{
//de-Active the filament
valid = false;
}
| 2,018 | C++ | 33.810344 | 124 | 0.590188 |
tudelft/autoGDMplus/gaden_ws/src/gaden/gaden_filament_simulator/include/filament_simulator/filament_simulator.h | #ifndef CFilamentSimulator_H
#define CFilamentSimulator_H
#include <omp.h>
#include <ros/ros.h>
#include <std_msgs/Bool.h>
#include <tf/transform_broadcaster.h>
#include <tf/transform_listener.h>
#include <visualization_msgs/Marker.h>
#include "filament_simulator/filament.h"
#include <stdlib.h> /* srand, rand */
#include <iostream>
#include <fstream>
#include <random>
#include <boost/format.hpp>
#include <boost/filesystem.hpp>
#include <boost/thread/mutex.hpp>
#include <boost/interprocess/streams/bufferstream.hpp>
#include <boost/iostreams/filter/zlib.hpp>
#include <boost/iostreams/filtering_stream.hpp>
#include <boost/iostreams/copy.hpp>
class CFilamentSimulator
{
public:
CFilamentSimulator();
~CFilamentSimulator();
void add_new_filaments(double radius_arround_source);
void read_wind_snapshot(int idx);
void update_gas_concentration_from_filaments();
void update_gas_concentration_from_filament(int fil_i);
void update_filaments_location();
void update_filament_location(int i);
void publish_markers();
void save_state_to_file();
//Variables
int current_wind_snapshot;
int current_simulation_step;
double sim_time;
int last_saved_step;
//Parameters
bool verbose;
bool wait_preprocessing;
bool preprocessing_done;
double max_sim_time; //(sec) Time tu run this simulation
int numSteps; //Number of gas iterations to simulate
double time_step; //(sec) Time increment between gas snapshots --> Simul_time = snapshots*time_step
int numFilaments_sec; //Num of filaments released per second
bool variable_rate; //If true the number of released filaments would be random(0,numFilaments_sec)
int filament_stop_steps; //Number of steps to wait between the release of filaments (to force a patchy plume)
int filament_stop_counter;
double numFilaments_step; //Num of filaments released per time_step
double numFilament_aux;
int current_number_filaments;
int total_number_filaments; //total number of filaments to use along the simulation (for efficiency -> avoids push_back)
double filament_ppm_center; //[ppm] Gas concentration at the center of the 3D gaussian (filament)
double filament_initial_std; //[cm] Sigma of the filament at t=0-> 3DGaussian shape
double filament_growth_gamma; //[cm²/s] Growth ratio of the filament_std
double filament_noise_std; //STD to add some "variablity" to the filament location
int gasType; //Gas type to simulate
double envTemperature; //Temp in Kelvins
double envPressure; //Pressure in Atm
int gasConc_unit; //Get gas concentration in [molecules/cm3] or [ppm]
//Wind
std::string wind_files_location; //Location of the wind information
double windTime_step; //(sec) Time increment between wind snapshots
double sim_time_last_wind; //(sec) Simulation Time of the last updated of wind data
bool allow_looping;
int loop_from_step;
int loop_to_step;
//Enviroment
std::string occupancy3D_data; //Location of the 3D Occupancy GridMap of the environment
std::string fixed_frame; //Frame where to publish the markers
int env_cells_x; //cells
int env_cells_y; //cells
int env_cells_z; //cells
double env_min_x; //[m]
double env_max_x; //[m]
double env_min_y; //[m]
double env_max_y; //[m]
double env_min_z; //[m]
double env_max_z; //[m]
double cell_size; //[m]
//Gas Source Location (for releasing the filaments)
double gas_source_pos_x; //[m]
double gas_source_pos_y; //[m]
double gas_source_pos_z; //[m]
//Results
int save_results; //True or false
std::string results_location; //Location for results logfiles
double results_time_step; //(sec) Time increment between saving results
double results_min_time; //(sec) time after which start saving results
bool wind_finished;
boost::mutex mtx;
protected:
void loadNodeParameters();
void initSimulator();
void configure3DMatrix(std::vector< double > &A);
void read_3D_file(std::string filename, std::vector< double > &A, bool hasHeader, bool binary);
int check_pose_with_environment(double pose_x, double pose_y, double pose_z);
bool check_environment_for_obstacle(double start_x, double start_y, double start_z, double end_x, double end_y, double end_z);
double random_number(double min_val, double max_val);
void preprocessingCB(const std_msgs::Bool& b);
//Subscriptions & Publishers
ros::Publisher marker_pub; //For visualization of the filaments!
ros::Subscriber prepro_sub; // In case we require the preprocessing node to finish.
//Vars
ros::NodeHandle n;
std::vector< double > U, V, W, C, Env;
std::vector<CFilament> filaments;
visualization_msgs::Marker filament_marker;
bool wind_notified;
int last_wind_idx=-1;
// SpecificGravity [dimensionless] with respect AIR
double SpecificGravity[14] = {
// Molecular gas mass [g/mol]
// SpecificGravity(Air) = 1 (as reference)
// Specific gravity is the ratio of the density of a substance to the density of a reference substance; equivalently,
// it is the ratio of the mass of a substance to the mass of a reference substance for the same given volume.
1.0378, //ethanol (heavier than air)
0.5537, //methane (lighter than air)
0.0696, //hydrogen (lighter than air)
1.4529, //acetone (heavier than air)
//To be updated
1.23, //propanol //gases heavier then air
2.48, //chlorine
1.31, //fluorine
0.7, //neon //gases lighter than air
0.138, //helium
0.8, //sewage, biogas
2.0061, //butane
0.967, //carbon monoxide
1.52, //carbon dioxide
0.89 //smoke
};
//Fluid Dynamics
double filament_initial_vol;
double env_cell_vol;
double filament_numMoles; //Number of moles in a filament (of any gas or air)
double filament_numMoles_of_gas; //Number of moles of target gas in a filament
double env_cell_numMoles; //Number of moles in a cell (3D volume)
int indexFrom3D(int x, int y, int z);
};
#endif
| 6,800 | C | 40.218182 | 132 | 0.625735 |
tudelft/autoGDMplus/gaden_ws/src/gaden/gaden_filament_simulator/include/filament_simulator/filament.h | #ifndef FILAMENT_H
#define FILAMENT_H
class CFilament
{
public:
CFilament();
CFilament(double x, double y, double z, double sigma_filament);
~CFilament();
void activate_filament(double x, double y, double z, double birth);
void deactivate_filament();
//Parameters of the filament
//--------------------------
double pose_x; // Center of the filament (m)
double pose_y; // Center of the filament (m)
double pose_z; // Center of the filament (m)
double sigma; // [cm] The sigma of a 3D gaussian (controlls the shape of the filament)
bool valid; // Is filament valid?
double birth_time; // Time at which the filament is released (set as active)
};
#endif
| 769 | C | 31.083332 | 100 | 0.595579 |
tudelft/autoGDMplus/gaden_ws/src/gaden/utils/paraview.py | from paraview.simple import *
import sys
import os
path = sys.argv[1]
# create a new 'OpenFOAMReader'
foamReader = OpenFOAMReader(FileName=path)
# Properties modified on foamReader
if sys.argc==4 and sys.argv[3]=="-d":
foamReader.CaseType = 'Decomposed Case'
foamReader.UpdatePipeline()
foamReader.UpdatePipelineInformation()
# Properties modified on foamReader
foamReader.MeshRegions = ['internalMesh']
foamReader.CellArrays = ['U']
foamReader.UpdatePipeline()
# create a new 'Cell Centers'
cellCenters1 = CellCenters(Input=foamReader)
# Properties modified on cellCenters1
cellCenters1.VertexCells = 1
cellCenters1.UpdatePipeline()
# save data
directory=sys.argv[2]
if not os.path.exists(directory):
os.makedirs(directory)
SaveData(directory+"/"+sys.argv[2]+".csv", proxy=cellCenters1, WriteAllTimeSteps=1)
| 827 | Python | 21.999999 | 83 | 0.771463 |
tudelft/autoGDMplus/gaden_ws/src/gaden/simulated_gas_sensor/src/fake_gas_sensor.h | #include <ros/ros.h>
#include <std_msgs/Float32.h>
#include <visualization_msgs/Marker.h>
#include <nav_msgs/Odometry.h>
#include <geometry_msgs/PoseWithCovarianceStamped.h>
#include <tf/transform_listener.h>
#include <olfaction_msgs/gas_sensor.h>
#include <gaden_player/GasPosition.h>
#include <cstdlib>
#include <math.h>
#include <vector>
#include <fstream>
#include <iostream>
//Gas Types
#define ETHANOL_ID 0
#define METHANE_ID 1
#define HYDROGEN_ID 2
#define PROPANOL_ID 3
#define CHLORIDE_ID 4
#define FLURORINE_ID 5
#define ACETONE_ID 6
#define NEON_ID 7
#define HELIUM_ID 8
#define HOTAIR_ID 9
//Sensor Types
#define TGS2620_ID 0
#define TGS2600_ID 1
#define TGS2611_ID 2
#define TGS2610_ID 3
#define TGS2612_ID 4
//Default Values
#define NODE_NAME "fake_mox"
#define DEFAULT_GAS_TYPE METHANE_ID
#define DEFAULT_SENSOR_NAME "mox_sensor"
#define DEFAULT_SENSOR_MODEL TGS2620_ID
#define DEFAULT_SENSOR_FRAME "mox_frame"
#define DEFAULT_FIXED_FRAME "map"
// Sensor Parameters
int input_sensor_model;
std::string input_sensor_frame;
std::string input_fixed_frame;
bool use_PID_correction_factors;
//MOX model params
bool first_reading; //First reading is set to baseline always
float RS_R0; //Ideal sensor response based on sensitivity
float sensor_output; //MOX model response
float previous_sensor_output; //The response in (t-1)
float node_rate; //(Hz) Execution freq. Useful for the MOX model
bool notified; //to notifiy about erros just once
// Vars
int ch_id; //Chemical ID
//functions:
void loadNodeParameters(ros::NodeHandle private_nh);
float simulate_mox_as_line_loglog(gaden_player::GasPositionResponse GT_gas_concentrations);
float simulate_pid(gaden_player::GasPositionResponse GT_gas_concentrations);
//------------------------ SENSOR CHARACTERIZATION PARAMS ----------------------------------//
std::string labels[5] = {"TGS2620", "TGS2600", "TGS2611", "TGS2610", "TGS2612"};
float R0[5] = {3000, 50000, 3740, 3740, 4500}; //[Ohms] Reference resistance (see datasheets)
//Time constants (Rise, Decay)
float tau_value[5][7][2] = //5 sensors, 7 gases , 2 Time Constants
{
{ //TGS2620
{2.96, 15.71}, //ethanol
{2.96, 15.71}, //methane
{2.96, 15.71}, //hydrogen
{2.96, 15.71}, //propanol
{2.96, 15.71}, //chlorine
{2.96, 15.71}, //fluorine
{2.96, 15.71} //Acetone
},
{ //TGS2600
{4.8, 18.75}, //ethanol
{4.8, 18.75}, //methane
{4.8, 18.75}, //hydrogen
{4.8, 18.75}, //propanol
{4.8, 18.75}, //chlorine
{4.8, 18.75}, //fluorine
{4.8, 18.75} //Acetone
},
{ //TGS2611
{3.44, 6.35}, //ethanol
{3.44, 6.35}, //methane
{3.44, 6.35}, //hydrogen
{3.44, 6.35}, //propanol
{3.44, 6.35}, //chlorine
{3.44, 6.35}, //fluorine
{3.44, 6.35} //Acetone
},
{ //TGS2610
{3.44, 6.35}, //ethanol
{3.44, 6.35}, //methane
{3.44, 6.35}, //hydrogen
{3.44, 6.35}, //propanol
{3.44, 6.35}, //chlorine
{3.44, 6.35}, //fluorine
{3.44, 6.35} //Acetone
},
{ //TGS2612
{3.44, 6.35}, //ethanol
{3.44, 6.35}, //methane
{3.44, 6.35}, //hydrogen
{3.44, 6.35}, //propanol
{3.44, 6.35}, //chlorine
{3.44, 6.35}, //fluorine
{3.44, 6.35} //Acetone
}
};
// MOX sensitivity. Extracted from datasheets and curve fitting
//--------------------------------------------------------------
float Sensitivity_Air[5] = {21, 1, 8.8, 10.3, 19.5}; //RS/R0 when exposed to clean air (datasheet)
// RS/R0 = A*conc^B (a line in the loglog scale)
float sensitivity_lineloglog[5][7][2]={ //5 Sensors, 7 Gases, 2 Constants: A, B
{ //TGS2620
{62.32, -0.7155}, //Ethanol
{120.6, -0.4877}, //Methane
{24.45, -0.5546}, //Hydrogen
{120.6, -0.4877}, //propanol (To review)
{120.6, -0.4877}, //chlorine (To review)
{120.6, -0.4877}, //fluorine (To review)
{120.6, -0.4877} //Acetone (To review)
},
{ //TGS2600
{0.6796, -0.3196}, //ethanol
{1.018, -0.07284}, //methane
{0.6821, -0.3532}, //hydrogen
{1.018, -0.07284}, //propanol (To review)
{1.018, -0.07284}, //chlorine (To review)
{1.018, -0.07284}, //fluorine (To review)
{1.018, -0.07284} //Acetone (To review)
},
{ //TGS2611
{51.11, -0.3658}, //ethanol
{38.46, -0.4289}, //methane
{41.3, -0.3614}, //hydrogen
{38.46, -0.4289}, //propanol (To review)
{38.46, -0.4289}, //chlorine (To review)
{38.46, -0.4289}, //fluorine (To review)
{38.46, -0.4289} //Acetone (To review)
},
{ //TGS2610
{106.1, -0.5008}, //ethanol
{63.91, -0.5372}, //methane
{66.78, -0.4888}, //hydrogen
{63.91, -0.5372}, //propanol (To review)
{63.91, -0.5372}, //chlorine (To review)
{63.91, -0.5372}, //fluorine (To review)
{63.91, -0.5372} //Acetone (To review)
},
{ //TGS2612
{31.35, -0.09115}, //ethanol
{146.2, -0.5916}, //methane
{19.5, 0.0}, //hydrogen
{146.2, -0.5916}, //propanol (To review)
{146.2, -0.5916}, //chlorine (To review)
{146.2, -0.5916}, //fluorine (To review)
{146.2, -0.5916} //Acetone (To review)
}
};
//PID correction factors for gas concentration
//--------------------------------------------
//Ethanol, Methane, Hydrogen, Propanol, Chlorine, Fluorine, Acetone
// http://www.intlsensor.com/pdf/pidcorrectionfactors.pdf
// Here we simulate a lamp of 11.7eV to increase the range of detectable gases
// A 0.0 means the PID is not responsive to that gas
float PID_correction_factors[7] = {10.47, 0.0, 0.0, 2.7, 1.0, 0.0, 1.4};
// OLD - DEPRECATED
// MOX sensitivity. Extracted from datasheets and curve fitting
// RS/R0 = A*e^(B*conc) + C*e^(D*conc)
float sensitivity_2exp[3][3][4]={ //3 Sensors, 3 Gases, 4 Constants: A, B, C, D
{ //TGS2620
{6.018,-0.01662, 0.9832,-0.0005651}, //ethanol
{18.79,-0.01062, 6.138, -0.0002136 }, //methane
{3.884 ,-0.0127,0.8745,-0.0003222 }, //hydrogen
},
{ //TGS2600
{0.4668,-0.3249, 0.3293, -0.01051 }, //ethanol
{0.2202, -0.1122, 0.8356, -0.001932}, //methane
{0.0,0.0,0.0,0.0}, //hydrogen
},
{ //TGS2611
{4.766,-0.001639, 3.497, -7.348e-05 }, //ethanol
{3.286 ,-0.002211, 1.806 -0.000103}, //methane
{4.535, -0.001723, 2.69, -5.191e-05}, //hydrogen
}
};
| 6,747 | C | 29.672727 | 103 | 0.551653 |
tudelft/autoGDMplus/gaden_ws/src/gaden/simulated_gas_sensor/src/fake_gas_sensor.cpp | /*-------------------------------------------------------------------------------
* This node simulates the response of a MOX gas sensor given the GT gas concentration
* of the gases it is exposed to (request to simulation_player or dispersion_simulation)
* - Gas concentration should be given in [ppm]
* - The Pkg response can be set to: Resistance of the sensor (Rs), Resistance-ratio (Rs/R0), or Voltage (0-5V)
* - Sensitivity to different gases is set based on manufacter datasheet
* - Time constants for the dynamic response are set based on real experiments
*
* - Response to mixture of gases is set based on datasheet.
* -----------------------------------------------------------------------------------------------*/
#include "fake_gas_sensor.h"
int main( int argc, char** argv )
{
ros::init(argc, argv, NODE_NAME);
ros::NodeHandle n;
ros::NodeHandle pn("~");
//Read parameters
loadNodeParameters(pn);
//Publishers
ros::Publisher sensor_read_pub = n.advertise<olfaction_msgs::gas_sensor>("Sensor_reading", 500);
ros::Publisher marker_pub = n.advertise<visualization_msgs::Marker>("Sensor_display", 100);
//Service to request gas concentration
ros::ServiceClient client = n.serviceClient<gaden_player::GasPosition>("/odor_value");
//Init Visualization data (marker)
//---------------------------------
// sensor = sphere
// conector = stick from the floor to the sensor
visualization_msgs::Marker sensor,connector;
sensor.header.frame_id = input_fixed_frame.c_str();
sensor.ns = "sensor_visualization";
sensor.action = visualization_msgs::Marker::ADD;
sensor.type = visualization_msgs::Marker::SPHERE;
sensor.id = 0;
sensor.scale.x = 0.1;
sensor.scale.y = 0.1;
sensor.scale.z = 0.1;
sensor.color.r = 2.0f;
sensor.color.g = 1.0f;
sensor.color.a = 1.0;
connector.header.frame_id = input_fixed_frame.c_str();
connector.ns = "sensor_visualization";
connector.action = visualization_msgs::Marker::ADD;
connector.type = visualization_msgs::Marker::CYLINDER;
connector.id = 1;
connector.scale.x = 0.1;
connector.scale.y = 0.1;
connector.color.a = 1.0;
connector.color.r = 1.0f;
connector.color.b = 1.0f;
connector.color.g = 1.0f;
// Loop
tf::TransformListener listener;
node_rate = 5; //Hz
ros::Rate r(node_rate);
first_reading = true;
notified = false;
while (ros::ok())
{
//Vars
tf::StampedTransform transform;
bool know_sensor_pose = true;
//Get pose of the sensor in the /map reference
try
{
listener.lookupTransform(input_fixed_frame.c_str(), input_sensor_frame.c_str(),
ros::Time(0), transform);
}
catch (tf::TransformException ex)
{
ROS_ERROR("%s",ex.what());
know_sensor_pose = false;
ros::Duration(1.0).sleep();
}
if (know_sensor_pose)
{
//Current sensor pose
float x_pos = transform.getOrigin().x();
float y_pos = transform.getOrigin().y();
float z_pos = transform.getOrigin().z();
// Get Gas concentration at current position (of each gas present)
// Service request to the simulator
gaden_player::GasPosition srv;
srv.request.x.push_back(x_pos);
srv.request.y.push_back(y_pos);
srv.request.z.push_back(z_pos);
if (client.call(srv))
{
/*
for (int i=0; i<srv.response.gas_type.size(); i++)
{
ROS_INFO("[FakeMOX] %s:%.4f at (%.2f,%.2f,%.2f)",srv.response.gas_type[i].c_str(), srv.response.gas_conc[i],srv.request.x, srv.request.y, srv.request.z );
}
*/
//Simulate Gas_Sensor response given this GT values of the concentration!
olfaction_msgs::gas_sensor sensor_msg;
sensor_msg.header.frame_id = input_sensor_frame;
sensor_msg.header.stamp = ros::Time::now();
switch (input_sensor_model)
{
case 0: //MOX TGS2620
sensor_msg.technology = sensor_msg.TECH_MOX;
sensor_msg.manufacturer = sensor_msg.MANU_FIGARO;
sensor_msg.mpn = sensor_msg.MPN_TGS2620;
sensor_msg.raw_units = sensor_msg.UNITS_OHM;
sensor_msg.raw = simulate_mox_as_line_loglog(srv.response);
sensor_msg.raw_air = Sensitivity_Air[input_sensor_model]*R0[input_sensor_model];
sensor_msg.calib_A = sensitivity_lineloglog[input_sensor_model][0][0]; //Calib for Ethanol
sensor_msg.calib_B = sensitivity_lineloglog[input_sensor_model][0][1]; //Calib for Ethanol
break;
case 1: //MOX TGS2600
sensor_msg.technology = sensor_msg.TECH_MOX;
sensor_msg.manufacturer = sensor_msg.MANU_FIGARO;
sensor_msg.mpn = sensor_msg.MPN_TGS2600;
sensor_msg.raw_units = sensor_msg.UNITS_OHM;
sensor_msg.raw = simulate_mox_as_line_loglog(srv.response);
sensor_msg.raw_air = Sensitivity_Air[input_sensor_model]*R0[input_sensor_model];
sensor_msg.calib_A = sensitivity_lineloglog[input_sensor_model][0][0]; //Calib for Ethanol
sensor_msg.calib_B = sensitivity_lineloglog[input_sensor_model][0][1]; //Calib for Ethanol
break;
case 2: //MOX TGS2611
sensor_msg.technology = sensor_msg.TECH_MOX;
sensor_msg.manufacturer = sensor_msg.MANU_FIGARO;
sensor_msg.mpn = sensor_msg.MPN_TGS2611;
sensor_msg.raw_units = sensor_msg.UNITS_OHM;
sensor_msg.raw = simulate_mox_as_line_loglog(srv.response);
sensor_msg.raw_air = Sensitivity_Air[input_sensor_model]*R0[input_sensor_model];
sensor_msg.calib_A = sensitivity_lineloglog[input_sensor_model][0][0]; //Calib for Ethanol
sensor_msg.calib_B = sensitivity_lineloglog[input_sensor_model][0][1]; //Calib for Ethanol
break;
case 3: //MOX TGS2610
sensor_msg.technology = sensor_msg.TECH_MOX;
sensor_msg.manufacturer = sensor_msg.MANU_FIGARO;
sensor_msg.mpn = sensor_msg.MPN_TGS2610;
sensor_msg.raw_units = sensor_msg.UNITS_OHM;
sensor_msg.raw = simulate_mox_as_line_loglog(srv.response);
sensor_msg.raw_air = Sensitivity_Air[input_sensor_model]*R0[input_sensor_model];
sensor_msg.calib_A = sensitivity_lineloglog[input_sensor_model][0][0]; //Calib for Ethanol
sensor_msg.calib_B = sensitivity_lineloglog[input_sensor_model][0][1]; //Calib for Ethanol
break;
case 4: //MOX TGS2612
sensor_msg.technology = sensor_msg.TECH_MOX;
sensor_msg.manufacturer = sensor_msg.MANU_FIGARO;
sensor_msg.mpn = sensor_msg.MPN_TGS2612;
sensor_msg.raw_units = sensor_msg.UNITS_OHM;
sensor_msg.raw = simulate_mox_as_line_loglog(srv.response);
sensor_msg.raw_air = Sensitivity_Air[input_sensor_model]*R0[input_sensor_model];
sensor_msg.calib_A = sensitivity_lineloglog[input_sensor_model][0][0]; //Calib for Ethanol
sensor_msg.calib_B = sensitivity_lineloglog[input_sensor_model][0][1]; //Calib for Ethanol
break;
case 30: //PID miniRaeLite
sensor_msg.technology = sensor_msg.TECH_PID;
sensor_msg.manufacturer = sensor_msg.MANU_RAE;
sensor_msg.mpn = sensor_msg.MPN_MINIRAELITE;
sensor_msg.raw_units = sensor_msg.UNITS_PPM;
sensor_msg.raw = simulate_pid(srv.response);
sensor_msg.raw_air = 0.0;
sensor_msg.calib_A = 0.0;
sensor_msg.calib_B = 0.0;
break;
default:
break;
}
//Publish simulated sensor reading
sensor_read_pub.publish(sensor_msg);
notified = false;
}
else
{
if (!notified)
{
ROS_WARN("[fake_gas_sensor] Cannot read Gas Concentrations from simulator.");
notified = true;
}
}
//Publish RVIZ sensor pose
sensor.header.stamp = ros::Time::now();
sensor.pose.position.x = x_pos;
sensor.pose.position.y = y_pos;
sensor.pose.position.z = z_pos;
marker_pub.publish(sensor);
connector.header.stamp = ros::Time::now();
connector.scale.z = z_pos;
connector.pose.position.x = x_pos;
connector.pose.position.y = y_pos;
connector.pose.position.z = float(z_pos)/2;
marker_pub.publish(connector);
}
ros::spinOnce();
r.sleep();
}
}
// Simulate MOX response: Sensitivity + Dynamic response
// RS = R0*( A * conc^B )
// This method employes a curve fitting based on a line in the loglog scale to set the sensitivity
float simulate_mox_as_line_loglog(gaden_player::GasPositionResponse GT_gas_concentrations)
{
if (first_reading)
{
//Init sensor to its Baseline lvl
sensor_output = Sensitivity_Air[input_sensor_model]; //RS_R0 value at air
previous_sensor_output = sensor_output;
first_reading = false;
}
else
{
//1. Set Sensor Output based on gas concentrations (gas type dependent)
//---------------------------------------------------------------------
// RS/R0 = A*conc^B (a line in the loglog scale)
float resistance_variation = 0.0;
//Handle multiple gases
for (int i=0; i<GT_gas_concentrations.positions[0].concentration.size(); i++)
{
int gas_id;
if (!strcmp(GT_gas_concentrations.gas_type[i].c_str(),"ethanol"))
gas_id = 0;
else if (!strcmp(GT_gas_concentrations.gas_type[i].c_str(),"methane"))
gas_id = 1;
else if (!strcmp(GT_gas_concentrations.gas_type[i].c_str(),"hydrogen"))
gas_id = 2;
else if (!strcmp(GT_gas_concentrations.gas_type[i].c_str(),"propanol"))
gas_id = 3;
else if (!strcmp(GT_gas_concentrations.gas_type[i].c_str(),"chlorine"))
gas_id = 4;
else if (!strcmp(GT_gas_concentrations.gas_type[i].c_str(),"fluorine"))
gas_id = 5;
else if (!strcmp(GT_gas_concentrations.gas_type[i].c_str(),"acetone"))
gas_id = 6;
else
{
ROS_ERROR("[fake_mox] MOX response is not configured for this gas type!");
return 0.0;
}
//JUST FOR VIDEO DEMO
/*
if (input_sensor_model == 0)
{
GT_gas_concentrations.gas_conc[i] *= 10;
}
else if (input_sensor_model ==2)
{
GT_gas_concentrations.gas_conc[i] *= 20;
}
*/
//Value of RS/R0 for the given gas and concentration
RS_R0 = sensitivity_lineloglog[input_sensor_model][gas_id][0] * pow(GT_gas_concentrations.positions[0].concentration[i], sensitivity_lineloglog[input_sensor_model][gas_id][1]);
//ROS_INFO("Sensor model: %d - Gas conc: %f", input_sensor_model, GT_gas_concentrations.positions[0].concentration[i]);
//Ensure we never overpass the baseline level (max allowed)
if (RS_R0 > Sensitivity_Air[input_sensor_model])
RS_R0 = Sensitivity_Air[input_sensor_model];
//Increment with respect the Baseline
resistance_variation += Sensitivity_Air[input_sensor_model] - RS_R0;
}
//Calculate final RS_R0 given the final resistance variation
RS_R0 = Sensitivity_Air[input_sensor_model] - resistance_variation;
//Ensure a minimum sensor resitance
if (RS_R0 <= 0.0)
RS_R0 = 0.01;
//2. Simulate transient response (dynamic behaviour, tau_r and tau_d)
//---------------------------------------------------------------------
float tau;
if (RS_R0 < previous_sensor_output) //rise
tau = tau_value[input_sensor_model][0][0];
else //decay
tau = tau_value[input_sensor_model][0][1];
// Use a low pass filter
//alpha value = At/(tau+At)
float alpha = (1/node_rate) / (tau+(1/node_rate));
//filtered response (uses previous estimation):
sensor_output = (alpha*RS_R0) + (1-alpha)*previous_sensor_output;
//Update values
previous_sensor_output = sensor_output;
}
// Return Sensor response for current time instant as the Sensor Resistance in Ohms
return (sensor_output * R0[input_sensor_model]);
}
// Simulate PID response : Weighted Sum of all gases
float simulate_pid(gaden_player::GasPositionResponse GT_gas_concentrations)
{
//Handle multiple gases
float accumulated_conc = 0.0;
for (int i=0; i<GT_gas_concentrations.positions[0].concentration.size(); i++)
{
if (use_PID_correction_factors)
{
int gas_id;
if (!strcmp(GT_gas_concentrations.gas_type[i].c_str(),"ethanol"))
gas_id = 0;
else if (!strcmp(GT_gas_concentrations.gas_type[i].c_str(),"methane"))
gas_id = 1;
else if (!strcmp(GT_gas_concentrations.gas_type[i].c_str(),"hydrogen"))
gas_id = 2;
else
{
ROS_ERROR("[fake_PID] PID response is not configured for this gas type!");
return 0.0;
}
if (PID_correction_factors[gas_id] != 0)
accumulated_conc += GT_gas_concentrations.positions[0].concentration[i] / PID_correction_factors[gas_id];
}
else
accumulated_conc += GT_gas_concentrations.positions[0].concentration[i];
}
return accumulated_conc;
}
// ===============================//
// Load Node Parameters //
// ===============================//
void loadNodeParameters(ros::NodeHandle private_nh)
{
//fixed frame
private_nh.param<std::string>("fixed_frame", input_fixed_frame, "map");
//Sensor Model
private_nh.param<int>("sensor_model", input_sensor_model, DEFAULT_SENSOR_MODEL);
//sensor_frame
private_nh.param<std::string>("sensor_frame", input_sensor_frame, DEFAULT_SENSOR_FRAME);
//PID_correction_factors
private_nh.param<bool>("use_PID_correction_factors", use_PID_correction_factors, false);
ROS_INFO("The data provided in the roslaunch file is:");
ROS_INFO("Sensor model: %d", input_sensor_model);
ROS_INFO("Fixed frame: %s",input_fixed_frame.c_str());
ROS_INFO("Sensor frame: %s",input_sensor_frame.c_str());
}
| 15,494 | C++ | 39.883905 | 188 | 0.550471 |
tudelft/autoGDMplus/gaden_ws/src/gaden/test_env/decompress/src/comp.cpp | #include <fstream>
#include <iostream>
#include <vector>
#include <bits/stdc++.h>
#include <boost/format.hpp>
#include <boost/iostreams/filtering_stream.hpp>
#include <boost/iostreams/filter/zlib.hpp>
#include <boost/iostreams/copy.hpp>
int main(int argc, char *argv[]){
using namespace std;
if(argc!=3){
cout << "Correct format is \"decompress inputFile outputFile\"";
}else{
ifstream infile(argv[1], ios_base::binary);
boost::iostreams::filtering_streambuf<boost::iostreams::input> inbuf;
inbuf.push(boost::iostreams::zlib_decompressor());
inbuf.push(infile);
ofstream out(argv[2]);
boost::iostreams::copy(inbuf,out);
}
}
| 705 | C++ | 26.153845 | 77 | 0.655319 |
tudelft/autoGDMplus/gaden_ws/src/gaden/test_env/decompress/src/toASCII.cpp | #include <fstream>
#include <sstream>
#include <iostream>
#include <vector>
#include <bits/stdc++.h>
#include <boost/format.hpp>
#include <boost/iostreams/filtering_stream.hpp>
#include <boost/iostreams/filter/zlib.hpp>
#include <boost/iostreams/copy.hpp>
int main(int argc, char *argv[]){
if(argc!=3){
std::cout << "Correct format is \"toASCII inputFile outputFile\"";
return -1;
}
std::ifstream infile(argv[1], std::ios_base::binary);
boost::iostreams::filtering_streambuf<boost::iostreams::input> inbuf;
inbuf.push(boost::iostreams::zlib_decompressor());
inbuf.push(infile);
std::stringstream decompressed;
boost::iostreams::copy(inbuf,decompressed);
std::ofstream outFile (argv[2], std::ios::out);
double bufferD;
int bufferInt;
decompressed.read((char*) &bufferInt, sizeof(int));
decompressed.read((char*) &bufferD, sizeof(double));
outFile<<"env_min "<<bufferD;
decompressed.read((char*) &bufferD, sizeof(double));
outFile<<" "<<bufferD;
decompressed.read((char*) &bufferD, sizeof(double));
outFile<<" "<<bufferD<<"\n";
decompressed.read((char*) &bufferD, sizeof(double));
outFile<<"env_max "<<bufferD;
decompressed.read((char*) &bufferD, sizeof(double));
outFile<<" "<<bufferD;
decompressed.read((char*) &bufferD, sizeof(double));
outFile<<" "<<bufferD<<"\n";
decompressed.read((char*) &bufferInt, sizeof(int));
outFile<<"NumCells_XYZ "<<bufferInt;
int cells_x=bufferInt;
decompressed.read((char*) &bufferInt, sizeof(int));
outFile<<" "<<bufferInt;
int cells_y=bufferInt;
decompressed.read((char*) &bufferInt, sizeof(int));
outFile<<" "<<bufferInt<<"\n";
int cells_z=bufferInt;
decompressed.read((char*) &bufferD, sizeof(double));
outFile<<"CellSizes_XYZ "<<bufferD;
decompressed.read((char*) &bufferD, sizeof(double));
outFile<<" "<<bufferD;
decompressed.read((char*) &bufferD, sizeof(double));
outFile<<" "<<bufferD<<"\n";
decompressed.read((char*) &bufferD, sizeof(double));
outFile<<"GasSourceLocation_XYZ "<<bufferD;
decompressed.read((char*) &bufferD, sizeof(double));
outFile<<" "<<bufferD;
decompressed.read((char*) &bufferD, sizeof(double));
outFile<<" "<<bufferD<<"\n";
decompressed.read((char*) &bufferInt, sizeof(int));
outFile<<"GasType "<<bufferInt<<"\n";
outFile<<"Number of moles per filament ";
decompressed.read((char*) &bufferD, sizeof(double));
outFile<<bufferD<<"\nMoles of all gases in cm3 ";
decompressed.read((char*) &bufferD, sizeof(double));
outFile<<bufferD<<"\n";
decompressed.read((char*) &bufferInt, sizeof(int));
outFile<<bufferInt<<"\n";
while(decompressed.peek()!=EOF){
decompressed.read((char*) &bufferInt, sizeof(int));
outFile<<bufferInt<<" ";
decompressed.read((char*) &bufferD, sizeof(double));
outFile<<bufferD<<" ";
decompressed.read((char*) &bufferD, sizeof(double));
outFile<<bufferD<<" ";
decompressed.read((char*) &bufferD, sizeof(double));
outFile<<bufferD<<" ";
decompressed.read((char*) &bufferD, sizeof(double));
outFile<<bufferD<<"\n";
}
outFile.close();
return 0;
} | 3,274 | C++ | 31.75 | 74 | 0.635614 |
tudelft/autoGDMplus/gaden_ws/src/gaden/gaden_gui/gui.py | from tkinter import *
from tkinter import filedialog
import re
root = Tk()
root.geometry('780x700')
left=Frame(root, width=350)
right=Frame(root, width=350)
left.grid(row=0, column=0, sticky=N)
right.grid(row=0, column=1, sticky=N)
leftList=[]
modelFiles=[]
windF = ""
###################################################################################################################################
# GADEN_preprocessing.launch
###################################################################################################################################
def getModelFile(ent):
file= filedialog.askopenfilename(filetypes=(("STL files", "*.stl"), ("All files", "*.*") ) )
ent.delete(0,END)
ent.insert(0, file)
def getCSVFile(ent):
file= filedialog.askopenfilename(filetypes=(("CSV files", "*.csv"), ("All files", "*.*") ) )
ent.delete(0,END)
ent.insert(0, file)
def addFile():
entryModel = Entry(left)
modelLabel = Label(left, text="Model "+str(len(modelFiles)))
modelFileButton = Button(left, text="...", command=lambda: getModelFile(entryModel))
#rearrange the grid to fit the new entry field
leftList.insert(len(modelFiles)+2,[modelLabel,entryModel,modelFileButton])
modelFiles.append(entryModel)
for row in range(0,len(leftList)):
for col in range(0,len(leftList[row])):
leftList[row][col].grid(row=row, column=col)
#entry field for size of cell side
cellSizeLabel= Label(left, text="Cell size: ")
cellSizeEntry = Entry(left)
#label for list of models
modelsLabel= Label(left, text="STL models of environment")
#button to create more filepath entry fields
addFileButton= Button(left, text="+", command=addFile)
#empty point coordinates
emptyPointLabel=Label(left, text="Coordinates of empty point")
xEmptyPoint = Entry(left)
yEmptyPoint = Entry(left)
zEmptyPoint = Entry(left)
xEmptyPoint.insert(END, "x")
yEmptyPoint.insert(END, "y")
zEmptyPoint.insert(END, "z")
#wind files
windLabel = Label(left, text="Wind files")
homogeneous=BooleanVar()
homogeneous.set(False)
check = Checkbutton(left, text="Homogeneous", variable=homogeneous, onvalue=True, offvalue=False)
windEntry=Entry(left)
windFileButton = Button(left, text="...", command=lambda: getCSVFile(windEntry))
leftList.append([cellSizeLabel, cellSizeEntry])
leftList.append([modelsLabel])
leftList.append([addFileButton])
leftList.append([emptyPointLabel])
leftList.append([xEmptyPoint])
leftList.append([yEmptyPoint])
leftList.append([zEmptyPoint])
leftList.append([windLabel])
leftList.append([check,windEntry,windFileButton])
#create the first file entry field for CAD models automatically
addFile()
#right side of window
outletModelFiles=[]
rightList=[]
def addOutletFile():
entryModel = Entry(right)
modelLabel = Label(right, text="Outlet model "+str(len(outletModelFiles)))
modelFileButton = Button(right, text="...", command=lambda: getModelFile(entryModel))
#rearrange the grid to fit the new entry field
rightList.insert(len(outletModelFiles)+2,[modelLabel,entryModel,modelFileButton])
outletModelFiles.append(entryModel)
for row in range(0,len(rightList)):
for col in range(0,len(rightList[row])):
rightList[row][col].grid(row=row, column=col)
def getOutputFile(ent):
file= filedialog.askdirectory()
ent.delete(0,END)
ent.insert(0, file)
#empty row
emptyLabel= Label(right, text=" ")
#label for list of outlet
outletsLabel= Label(right, text="STL models of outlets")
#button to create more filepath entry fields
addOutletButton= Button(right, text="+", command=addOutletFile)
#output path for files
outputLabel = Label(right, text="Preprocessing output path: ")
outputEntry=Entry(right)
outputButton = Button(right, text="...", command=lambda: getOutputFile(outputEntry))
#save launch file
def savePreprocessingFile():
global windF
f=open("GADEN_preprocessing.launch","w")
f.write(
"<launch>\n"+
" <node pkg=\"gaden_preprocessing\" type=\"preprocessing\" name=\"preprocessing\" output=\"screen\">\n"+
" <param name=\"cell_size\" value=\""+cellSizeEntry.get()+"\"/>\n\n"+
" <param name=\"number_of_models\" value=\""+str(len(modelFiles))+"\"/>\n"
)
for ind in range(0,len(modelFiles)):
f.write(
" <param name=\"model_"+str(ind)+"\" value=\""+modelFiles[ind].get()+"\"/>\n")
f.write("\n"+
" <param name=\"number_of_outlet_models\" value=\""+str(len(outletModelFiles))+"\"/>\n")
for ind in range(0,len(outletModelFiles)):
f.write(
" <param name=\"outlets_model_"+str(ind)+"\" value=\""+outletModelFiles[ind].get()+"\"/>\n")
windF = re.sub("_0.csv","",windEntry.get())
f.write(
"\n"+
" <param name=\"empty_point_x\" value=\""+str(xEmptyPoint.get())+"\"/>\n"+
" <param name=\"empty_point_y\" value=\""+str(yEmptyPoint.get())+"\"/>\n"+
" <param name=\"empty_point_z\" value=\""+str(zEmptyPoint.get())+"\"/>\n\n"+
" <param name=\"uniformWind\" value=\""+str(homogeneous.get())+"\"/>\n"
" <param name=\"wind_files\" value=\""+windF+"\"/>\n\n"+
" <param name=\"output_path\" value=\""+outputEntry.get()+"\"/>\n"
" </node>\n"+
"</launch>"
)
savedLabel= Label(right, text="Launch file saved!")
nextButton = Button(right, text="Continue", command=filamentSim)
rightList.append([savedLabel])
rightList.append([nextButton])
for row in range(0,len(rightList)):
for col in range(0,len(rightList[row])):
rightList[row][col].grid(row=row, column=col)
saveButton = Button(right, text="Save", command=savePreprocessingFile)
rightList.append([emptyLabel])
rightList.append([outletsLabel])
rightList.append([addOutletButton])
rightList.append([outputLabel, outputEntry, outputButton])
rightList.append([saveButton])
addOutletFile()
###################################################################################################################################
# GADEN.launch
###################################################################################################################################
def getColladaFile(ent):
file= filedialog.askopenfilename(filetypes=(("Collada files", "*.dae"), ("All files", "*.*") ) )
ent.delete(0,END)
ent.insert(0, file)
def getWindFile(ent):
file= filedialog.askopenfilename(filetypes=(("Wind files", "*_U"), ("All files", "*.*") ) )
ent.delete(0,END)
ent.insert(0, file)
def addColladaFile():
entryModel = Entry(left)
modelLabel = Label(left, text="Model "+str(len(modelFiles)))
modelFileButton = Button(left, text="...", command=lambda: getColladaFile(entryModel))
#rearrange the grid to fit the new entry field
leftList.insert(len(modelFiles)+2,[modelLabel,entryModel,modelFileButton])
modelFiles.append(entryModel)
for row in range(0,len(leftList)):
for col in range(0,len(leftList[row])):
leftList[row][col].grid(row=row, column=col)
startWind = Entry(left)
endWind = Entry(left)
def loopWind(*args):
if looping.get():
startLabel = Label(left,text="From:")
endLabel = Label(left,text="To:")
leftList.append([startLabel, startWind])
leftList.append([endLabel, endWind])
else:
for col in range(0,len(leftList[len(leftList)-1])):
leftList[len(leftList)-1][col].grid_forget()
leftList.pop(len(leftList)-1)
for col in range(0,len(leftList[len(leftList)-1])):
leftList[len(leftList)-1][col].grid_forget()
leftList.pop(len(leftList)-1)
for row in range(0,len(leftList)):
for col in range(0,len(leftList[row])):
leftList[row][col].grid(row=row, column=col)
def filamentSim():
global leftList, rightList, modelFiles, looping
for row in range(0,len(leftList)):
for col in range(0,len(leftList[row])):
leftList[row][col].grid_forget()
for row in range(0,len(rightList)):
for col in range(0,len(rightList[row])):
rightList[row][col].grid_forget()
leftList=[]
rightList=[]
modelFiles=[]
windEntryFilament.insert(END, windF+"_0.csv_U")
occupancy3DEntry.insert(END, outputEntry.get()+"/OccupancyGrid3D.csv")
leftList.append([occupancyLabel,occupancy3DEntry,occupancy3DButton])
leftList.append([modelsLabel])
leftList.append([addFileButton])
leftList.append([SourcePosLabel])
leftList.append([xSourcePos])
leftList.append([ySourcePos])
leftList.append([zSourcePos])
leftList.append([windLabelFilament,windEntryFilament,windButtonFilament])
leftList.append([windDurationLabel, windDurationEntry])
leftList.append([checkLooping])
addColladaFile()
rightList.append([checkVerbose])
rightList.append([simTimeLabel,simTimeEntry])
rightList.append([numFilamentsLabel,numFilamentsEntry])
rightList.append([checkVariableRate])
rightList.append([ppmLabel,ppmEntry])
rightList.append([initialStdLabel,initialStdEntry])
rightList.append([growthGammaLabel,growthGammaEntry])
rightList.append([initialStdLabel,initialStdEntry])
rightList.append([noiseLabel,noiseEntry])
rightList.append([gasTypeLabel,gasTypeChoice])
rightList.append([temperatureLabel,temperatureEntry])
rightList.append([pressureLabel,pressureEntry])
rightList.append([emptyLabel])
rightList.append([resultsPathLabel,resultsPathEntry,resultsPathButton])
rightList.append([resultsMinLabel,resultsMinEntry])
rightList.append([resultsIntervalLabel,resultsIntervalEntry])
rightList.append([saveFilButton])
for row in range(0,len(rightList)):
for col in range(0,len(rightList[row])):
rightList[row][col].grid(row=row, column=col)
occupancyLabel = Label(left, text="Occupancy3D file: ")
occupancy3DEntry=Entry(left)
occupancy3DButton = Button(left, text="...", command=lambda: getCSVFile(occupancy3DEntry))
#label for list of models
modelsLabel= Label(left, text="Collada models (visualization)")
#button to create more filepath entry fields
addFileButton= Button(left, text="+", command=addColladaFile)
#source position
SourcePosLabel=Label(left, text="Coordinates of gas source")
xSourcePos = Entry(left)
ySourcePos = Entry(left)
zSourcePos = Entry(left)
xSourcePos.insert(END, "x")
ySourcePos.insert(END, "y")
zSourcePos.insert(END, "z")
#wind options
windLabelFilament = Label(left, text="Wind filepath: ")
windEntryFilament=Entry(left)
windButtonFilament = Button(left, text="...", command=lambda: getWindFile(windEntryFilament))
windDurationLabel = Label(left, text="Seconds per\nwind snapshot")
windDurationEntry = Entry(left)
looping = BooleanVar()
looping.set(False)
checkLooping = Checkbutton(left, text="Allow looping", variable=looping, onvalue=True, offvalue=False)
looping.trace("w",loopWind)
### right side
verbose = BooleanVar()
verbose.set(False)
checkVerbose = Checkbutton(right, text="Verbose", variable=verbose, onvalue=True, offvalue=False)
simTimeLabel= Label(right, text="Simulation length (s)")
simTimeEntry = Entry(right)
simTimeEntry.insert(END, "300")
timeStepLabel= Label(right, text="Time step (s)")
timeStepEntry = Entry(right)
timeStepEntry.insert(END, "0.1")
numFilamentsLabel= Label(right, text="Filaments/second")
numFilamentsEntry = Entry(right)
numFilamentsEntry.insert(END, "10")
variableRate = BooleanVar()
variableRate.set(False)
checkVariableRate = Checkbutton(right, text="Variable rate", variable=variableRate, onvalue=True, offvalue=False)
ppmLabel= Label(right, text="Concentration at\nfilament center(ppm)")
ppmEntry = Entry(right)
ppmEntry.insert(END, "10")
initialStdLabel= Label(right, text="Initial stdDev (cm)")
initialStdEntry = Entry(right)
initialStdEntry.insert(END, "5")
growthGammaLabel= Label(right, text="Filament growth (cm²/s)")
growthGammaEntry = Entry(right)
growthGammaEntry.insert(END, "5")
noiseLabel= Label(right, text="Movement noise (m)")
noiseEntry = Entry(right)
noiseEntry.insert(END, "0.01")
gasTypeLabel=Label(right, text="Gas Type:")
gasTypeList=["Ethanol","Methane","Hydrogen","Propanol","Chlorine","Fluorine","Acetone","Neon","Helium"]
gasType = StringVar(root)
gasType.set("Ethanol")
gasTypeChoice=OptionMenu(right, gasType, *gasTypeList)
temperatureLabel= Label(right, text="Temperature (K)")
temperatureEntry = Entry(right)
temperatureEntry.insert(END, "298")
pressureLabel= Label(right, text="Pressure (atm)")
pressureEntry = Entry(right)
pressureEntry.insert(END, "1")
#output path for files
resultsPathLabel = Label(right, text="Results directory path: ")
resultsPathEntry=Entry(right)
resultsPathButton = Button(right, text="...", command=lambda: getOutputFile(resultsPathEntry))
resultsMinLabel= Label(right, text="Results start (s)")
resultsMinEntry = Entry(right)
resultsMinEntry.insert(END, "0.0")
resultsIntervalLabel= Label(right, text="Results interval (s)")
resultsIntervalEntry = Entry(right)
resultsIntervalEntry.insert(END, "0.5")
#save launch file
def saveFilamentSimFile():
global windF
f=open("GADEN.launch","w")
f.write(
"<launch>\n"+
" <node pkg=\"gaden_environment\" type=\"environment\" name=\"environment\" output=\"screen\">\n"+
" <param name=\"verbose\" value=\"false\"/>\n"+
" <param name=\"wait_preprocessing\" value=\"false\"/>\n"+
" <param name=\"number_of_CAD\" value=\""+str(len(modelFiles))+"\"/>\n"+
" <rosparam subst_value=\"True\">\n"
)
for ind in range(0,len(modelFiles)):
f.write(
" CAD_"+str(ind)+": file:// "+modelFiles[ind].get()+"\n"+
" CAD_"+str(ind)+"_color: [0.5,0.5,0.5]\n")
f.write(" </rosparam>\n\n"+
" <param name=\"occupancy3D_data\" value=\""+occupancy3DEntry.get()+"\"/>\n\n"+
" <param name=\"number_of_sources\" value=\"1\"/>\n"+
" <param name=\"source_0_position_x\" value=\""+xSourcePos.get()+"\"/>\n"+
" <param name=\"source_0_position_y\" value=\""+ySourcePos.get()+"\"/>\n"+
" <param name=\"source_0_position_z\" value=\""+zSourcePos.get()+"\"/>\n"+
" <rosparam>\n"+
" source_0_scale: 0.2\n"
" source_0_color: [0.0, 1.0, 0.0]\n"
" </rosparam>\n"+
"</node>\n\n")
f.write(
" <node pkg=\"gaden_filament_simulator\" type=\"filament_simulator\" name=\"filament_simulator\" output=\"screen\">\n"+
" <param name=\"verbose\" value=\""+str(verbose.get())+"\"/>\n"+
" <param name=\"wait_preprocessing\" value=\"false\"/>\n"+
" <param name=\"sim_time\" value=\""+simTimeEntry.get()+"\"/>\n"+
" <param name=\"time_step\" value=\""+timeStepEntry.get()+"\"/>\n"+
" <param name=\"num_filaments_sec\" value=\""+numFilamentsEntry.get()+"\"/>\n"+
" <param name=\"variable_rate\" value=\""+str(variableRate.get())+"\"/>\n"+
" <param name=\"filaments_stop_steps\" value=\"0\"/>\n"+
" <param name=\"ppm_filament_center\" value=\""+ppmEntry.get()+"\"/>\n"+
" <param name=\"filament_initial_std\" value=\""+initialStdEntry.get()+"\"/>\n"+
" <param name=\"filament_growth_gamma\" value=\""+growthGammaEntry.get()+"\"/>\n"+
" <param name=\"filament_noise_std\" value=\""+noiseEntry.get()+"\"/>\n"+
" <param name=\"gas_type\" value=\""+str(gasTypeList.index(gasType.get()))+"\"/>\n"+
" <param name=\"temperature\" value=\""+temperatureEntry.get()+"\"/>\n"+
" <param name=\"pressure\" value=\""+pressureEntry.get()+"\"/>\n"+
" <param name=\"concentration_unit_choice\" value=\"1\"/>\n"+
" <param name=\"occupancy3D_data\" value=\""+occupancy3DEntry.get()+"\"/>\n"+
" <param name=\"fixed_frame\" value=\"map\"/>\n\n"+
" <param name=\"wind_data\" value=\""+re.sub("0.csv_U","",windEntryFilament.get())+"\"/>\n"+
" <param name=\"wind_time_step\" value=\""+windDurationEntry.get()+"\"/>\n"+
" <param name=\"allow_looping\" value=\""+str(looping.get())+"\"/>\n"+
" <param name=\"loop_from_step\" value=\""+startWind.get()+"\"/>\n"+
" <param name=\"loop_to_step\" value=\""+endWind.get()+"\"/>\n\n"+
" <param name=\"source_position_x\" value=\""+xSourcePos.get()+"\"/>\n"+
" <param name=\"source_position_y\" value=\""+ySourcePos.get()+"\"/>\n"+
" <param name=\"source_position_z\" value=\""+zSourcePos.get()+"\"/>\n\n"+
" <param name=\"save_results\" value=\"1\"/>\n"+
" <param name=\"results_min_time\" value=\""+resultsMinEntry.get()+"\"/>\n"+
" <param name=\"results_time_step\" value=\""+resultsIntervalEntry.get()+"\"/>\n"+
" <param name=\"writeConcentrations\" value=\"true\"/>\n"+
" <param name=\"results_location\" value=\""+resultsPathEntry.get()+"\"/>\n"+
" </node>\n\n"
)
f.write(
" <node name=\"rviz\" pkg=\"rviz\" type=\"rviz\" args=\"-d $(find test_env)/10x6_empty_room/launch/ros/gaden.rviz\"/>\n"+
"</launch>")
savedLabel= Label(right, text="Launch file saved!")
nextButton = Button(right, text="Continue",command=player)
rightList.append([savedLabel])
rightList.append([nextButton])
for row in range(0,len(rightList)):
for col in range(0,len(rightList[row])):
rightList[row][col].grid(row=row, column=col)
saveFilButton = Button(right, text="Save", command=saveFilamentSimFile)
###################################################################################################################################
# GADEN_player.launch
###################################################################################################################################
def parseF(st):
try:
return "{:.2f}".format(float(st))
except ValueError:
return st
def player():
global leftList, rightList, startWind, endWind
looping.set(False)
for row in range(0,len(leftList)):
for col in range(0,len(leftList[row])):
leftList[row][col].grid_forget()
for row in range(0,len(rightList)):
for col in range(0,len(rightList[row])):
rightList[row][col].grid_forget()
leftList = []
rightList = []
leftList.append([occupancyLabel,occupancy3DEntry,occupancy3DButton])
leftList.append([modelsLabel])
for index in range(0,len(modelFiles)):
entryModel = modelFiles[index]
modelLabel = Label(left, text="Model "+str(index))
modelFileButton = Button(left, text="...", command=lambda: getColladaFile(entryModel))
leftList.append([modelLabel, entryModel, modelFileButton])
leftList.append([addFileButton])
leftList.append([initialIterationLabel, initialIterationEntry])
startWind = Entry(left)
endWind = Entry(left)
leftList.append([checkLooping])
for row in range(0,len(leftList)):
for col in range(0,len(leftList[row])):
leftList[row][col].grid(row=row, column=col)
####
rightList.append([logFilesLabel])
rightList.append([logFilesEntry, logFilesButton])
logFilesEntry.insert(0,
resultsPathEntry.get()+"/FilamentSimulation_gasType_"+str(gasTypeList.index(gasType.get()))
+"_sourcePosition_"+parseF(xSourcePos.get())+"_"+parseF(ySourcePos.get())+"_"+parseF(zSourcePos.get())+"_iteration_"
)
rightList.append([frequencyLabel, frequencyEntry])
try:
frequencyEntry.insert(0, int(1.0/float(resultsIntervalEntry.get())))
except:
pass
xSourcePos_2 = Entry(right)
xSourcePos_2.insert(0, xSourcePos.get())
ySourcePos_2 = Entry(right)
ySourcePos_2.insert(0, ySourcePos.get())
zSourcePos_2 = Entry(right)
zSourcePos_2.insert(0, zSourcePos.get())
SourcePosLabel=Label(right, text="Coordinates of gas source")
rightList.append([SourcePosLabel])
rightList.append([xSourcePos_2])
rightList.append([ySourcePos_2])
rightList.append([zSourcePos_2])
rightList.append([savePlayerButton])
for row in range(0,len(rightList)):
for col in range(0,len(rightList[row])):
rightList[row][col].grid(row=row, column=col)
def savePlayer():
f=open("GADEN_player.launch","w")
f.write(
"<launch>\n"+
" <node pkg=\"gaden_environment\" type=\"environment\" name=\"environment\" output=\"screen\">\n"+
" <param name=\"verbose\" value=\"false\"/>\n"+
" <param name=\"wait_preprocessing\" value=\"false\"/>\n"+
" <param name=\"number_of_CAD\" value=\""+str(len(modelFiles))+"\"/>\n"+
" <rosparam subst_value=\"True\">\n"
)
for ind in range(0,len(modelFiles)):
f.write(
" CAD_"+str(ind)+": file:// "+modelFiles[ind].get()+"\n"+
" CAD_"+str(ind)+"_color: [0.5,0.5,0.5]\n")
f.write(" </rosparam>\n\n"+
" <param name=\"occupancy3D_data\" value=\""+occupancy3DEntry.get()+"\"/>\n\n"+
" <param name=\"number_of_sources\" value=\"1\"/>\n"+
" <param name=\"source_0_position_x\" value=\""+xSourcePos.get()+"\"/>\n"+
" <param name=\"source_0_position_y\" value=\""+ySourcePos.get()+"\"/>\n"+
" <param name=\"source_0_position_z\" value=\""+zSourcePos.get()+"\"/>\n"+
" <rosparam>\n"+
" source_0_scale: 0.2\n"
" source_0_color: [0.0, 1.0, 0.0]\n"
" </rosparam>\n"+
"</node>\n\n")
f.write(
" <node pkg=\"gaden_player\" type=\"gaden_player\" name=\"gaden_player\" output=\"screen\">\n"+
" <param name=\"verbose\" value=\"false\" />\n"+
" <param name=\"player_freq\" value=\""+frequencyEntry.get()+"\"/>\n"+
" <param name=\"initial_iteration\" value=\""+initialIterationEntry.get()+"\"/>\n"+
" <param name=\"num_simulators\" value=\"1\" />\n"+
" <param name=\"simulation_data_0\" value=\""+logFilesEntry.get()+"\" />\n"+
" <param name=\"allow_looping\" value=\""+str(looping.get())+"\" />\n"+
" <param name=\"loop_from_iteration\" value=\""+startWind.get()+"\" />\n"+
" <param name=\"loop_to_iteration\" value=\""+endWind.get()+"\" />\n"
" </node>\n\n"
)
f.write(
" <node name=\"rviz\" pkg=\"rviz\" type=\"rviz\" args=\"-d $(find test_env)/10x6_empty_room/launch/ros/gaden.rviz\"/>\n"+
"</launch>")
doneLabel = Label(right, text="Done!")
rightList.append([doneLabel])
for row in range(0,len(rightList)):
for col in range(0,len(rightList[row])):
rightList[row][col].grid(row=row, column=col)
def getLogs(ent):
file= filedialog.askopenfilename()
ent.delete(0,END)
ent.insert(0, re.sub("iteration_.*","iteration_",file))
initialIterationLabel = Label(left, text="Initial iteration:")
initialIterationEntry = Entry(left)
initialIterationEntry.insert(0, "0")
frequencyLabel = Label(right, text="Playback frequency (Hz):")
frequencyEntry = Entry(right)
logFilesLabel= Label(right, text="Simulation log files:")
logFilesEntry = Entry(right)
logFilesButton = Button(right, text="...", command=lambda: getLogs(logFilesEntry))
savePlayerButton = Button(right, text="Save", command=savePlayer)
root.mainloop()
| 23,348 | Python | 38.176174 | 131 | 0.620953 |
elsayedelsheikh/dishwaher/README.md | # DishWasher
Calculate the dimesions of an object (cm)
```bash
import omni.isaac.core.utils.bounds as bounds_utils
cache = bounds_utils.create_bbox_cache()
bounds = bounds_utils.compute_aabb(cache, prim_path="/Root/bin/Visuals/FOF_Mesh_Magenta_Box")
length_x = bounds[3] - bounds[0]
length_y = bounds[4] - bounds[1]
length_z = bounds[5] - bounds[2]
print("length of x:",length_x)
print("length of y:",length_y)
print("length of z:",length_z)
```
| 451 | Markdown | 22.789472 | 93 | 0.707317 |
elsayedelsheikh/dishwaher/omni.isaac.dish.washer.manipulator/docs/CHANGELOG.md | # Changelog
## [0.1.0] - 2024-05-28
### Added
- Initial version of omni.isaac.dish.washer.manipulator Extension
| 115 | Markdown | 13.499998 | 65 | 0.695652 |
NVIDIA-Omniverse/kit-extension-sample-scatter/README.md | # Scatter Kit Extension Sample
## [Scatter Tool (omni.example.ui_scatter_tool)](exts/omni.example.ui_scatter_tool)

### About
This Extension uses `Scatter Properties` to scatter a selected primitive on the X, Y, and Z Axis per object count, distance, and randomization set by the user.
### [README](exts/omni.example.ui_scatter_tool)
See the [README for this extension](exts/omni.example.ui_scatter_tool) to learn more about it including how to use it.
### [Tutorial](exts/omni.example.ui_scatter_tool/Tutorial/Scatter_Tool_Guide.md)
This extension sample also includes a step-by-step tutorial to accelerate your growth as you learn to build your own Omniverse Kit extensions.
In the tutorial you will learn how to build upon modules provided to you to create the `Scatter Window UI` and the `Scatter Properties`.
[Get started with the tutorial.](exts/omni.example.ui_scatter_tool/Tutorial/Scatter_Tool_Guide.md)
## Adding This Extension
To add this extension to your Omniverse app:
1. Go into: Extension Manager -> Gear Icon -> Extension Search Path
2. Add this as a search path: `git://github.com/NVIDIA-Omniverse/kit-extension-sample-scatter?branch=main&dir=exts`
## Linking with an Omniverse app
For a better developer experience, it is recommended to create a folder link named `app` to the *Omniverse Kit* app installed from *Omniverse Launcher*. A convenience script to use is included.
Run:
```bash
> link_app.bat
```
There is also an analogous `link_app.sh` for Linux. If successful you should see `app` folder link in the root of this repo.
If multiple Omniverse apps is installed script will select recommended one. Or you can explicitly pass an app:
```bash
> link_app.bat --app code
```
You can also just pass a path to create link to:
```bash
> link_app.bat --path "C:/Users/bob/AppData/Local/ov/pkg/create-2022.1.3"
```
## Contributing
The source code for this repository is provided as-is and we are not accepting outside contributions.
| 2,109 | Markdown | 37.363636 | 193 | 0.760076 |
NVIDIA-Omniverse/kit-extension-sample-scatter/exts/omni.example.ui_scatter_tool/Tutorial/Scatter_Tool_Guide.md | 
# How to Create a Scatter Tool
In this tutorial, you learn how to create a scatter tool that can randomize prims around the world space. You create a tool that can scatter on X, Y, and Z axes by the amount of objects, their distance, and their random count. This tutorial is well suited for intermediate engineers.
## Learning Objectives
- Use the Omniverse UI framework
- Add an Extension from your local path
- Set scatter properties
- Analyze a random number generator
- Use the USD API to set up a PointInstancer
- Understand the `undo` function
## Prerequisites
Before you begin, install [Omniverse Code](https://docs.omniverse.nvidia.com/app_code/app_code/overview.html) version 2022.1.2 or higher.
We recommend that you understand the concepts in the following tutorials before proceeding:
- [Extension Environment Tutorial](https://github.com/NVIDIA-Omniverse/ExtensionEnvironmentTutorial)
- [Spawn Prims Extension Tutorial](https://github.com/NVIDIA-Omniverse/kit-extension-sample-spawn-prims)
## Step 1: Install the Starter Project Extension
In this section, you download our sample Extension project and install it in Omniverse Code.
### Step 1.1: Download the Scatter Project
Clone the `tutorial-start` branch of the `kit-extension-sample-scatter` [GitHub repository](https://github.com/NVIDIA-Omniverse/kit-extension-sample-scatter/tree/tutorial-start):
```bash
git clone -b tutorial-start https://github.com/NVIDIA-Omniverse/kit-extension-sample-scatter.git
```
This repository contains the assets you use in this tutorial.
### Step 1.2: Open the Extensions Tab
In Omniverse Code, click the _Extensions_ tab:

> **Note:** If you don't see the *Extensions* panel, enable **Window > Extensions**:
>
> 
### Step 1.3: Add the Extension From Your Local Path
In the *Extensions* tab, click the **gear** icon to open *Extension Search Paths*. Then, click the **green plus** icon to add a new path. Finally, copy and paste the local path of the `exts` folder from the `tutorial-start` branch:

Here, you imported the extension into the *Extension Manager* in Omniverse Code by adding the local path of the `tutorial-start` branch you cloned from our [GitHub repository](https://github.com/NVIDIA-Omniverse/kit-extension-sample-scatter/tree/tutorial-start).
### Step 1.4: Activate Your New Extension
Type "scatter" into the search box at the top of the *Extensions* list, and activate the `OMNI.UI WINDOW SCATTER` Extension:

Now that your Extension is imported and active, you can make changes to the code and see them in your Application.
## Step 2: Implement `_build_source()`
This tutorial starts with a blank *Scatter Window*. In the following steps, you learn to use [Omniverse UI Framework](https://docs.omniverse.nvidia.com/py/kit/source/extensions/omni.ui/docs/index.html) to build the window's user interface (UI).
### Step 2.1: Navigate to `window.py`
From the root directory of the project, navigate to `exts/omni.example.ui_scatter_tool/omni/example/ui_scatter_tool/window.py`.
### Step 2.2: Create a Collapsable Frame
Create a [`CollapsableFrame`](https://docs.omniverse.nvidia.com/py/kit/source/extensions/omni.ui/docs/index.html#omni.ui.CollapsableFrame) in `_build_source()` as the first component of your UI:
```python
def _build_source(self):
"""Build the widgets of the "Source" group"""
# Create frame
with ui.CollapsableFrame("Source", name="group"):
```
`_build_source()` creates a place to display the source path of the prim you want to scatter. Here, you added a `CollapsableFrame`, which is a frame widget from Omniverse UI Framework that can hide or show its content.
### Step 2.3: Lay Out Your Frame
Use a [`VStack`](https://docs.omniverse.nvidia.com/py/kit/source/extensions/omni.ui/docs/index.html#omni.ui.VStack) to create a column and an [`HStack`](https://docs.omniverse.nvidia.com/py/kit/source/extensions/omni.ui/docs/index.html#omni.ui.HStack) to create a row:
```python
def _build_source(self):
"""Build the widgets of the "Source" group"""
# Create frame
with ui.CollapsableFrame("Source", name="group"):
# Create column
with ui.VStack(height=0, spacing=SPACING):
# Create row
with ui.HStack():
```
The `VStack` is a vertical stack container that holds one `HStack`, a horizontal stack container.
### Step 2.4: Create and Name an Input Field
Create a [`Label`](https://docs.omniverse.nvidia.com/py/kit/source/extensions/omni.ui/docs/index.html#omni.ui.Label) and a [`StringField`](https://docs.omniverse.nvidia.com/py/kit/source/extensions/omni.ui/docs/index.html#omni.ui.StringField):
```python
def _build_source(self):
"""Build the widgets of the "Source" group"""
# Create frame
with ui.CollapsableFrame("Source", name="group"):
# Create column
with ui.VStack(height=0, spacing=SPACING):
# Create row
with ui.HStack():
# Give name of field
ui.Label("Prim", name="attribute_name", width=self.label_width)
ui.StringField(model=self._source_prim_model)
```
The `StringField` is an input field that accepts the prim. The `Label`, called "Prim", describes the field to your users.
### Step 2.5: Populate Your Field
Add a [`Button`](https://docs.omniverse.nvidia.com/py/kit/source/extensions/omni.ui/docs/index.html#omni.ui.Button) that takes the user's current selection and populates the `StringField`:
```python
def _build_source(self):
"""Build the widgets of the "Source" group"""
# Create frame
with ui.CollapsableFrame("Source", name="group"):
with ui.VStack(height=0, spacing=SPACING):
with ui.HStack():
# Give name of field
ui.Label("Prim", name="attribute_name", width=self.label_width)
ui.StringField(model=self._source_prim_model)
# Button that puts the selection to the string field
ui.Button(
" S ",
width=0,
height=0,
style={"margin": 0},
clicked_fn=self._on_get_selection,
tooltip="Get From Selection",
)
```
This `Button`, labeled "S", places the prim selection in the `StringField`.
## Step 3: Implement `_build_scatter()`
Now that you've built functionality that selects a source prim to scatter, you need to implement `_build_scatter()`.
### Step 3.1: Create a Collapsable Frame
Start your `_build_scatter()` interface with a `CollapsableFrame`:
```python
def _build_scatter(self):
"""Build the widgets of the "Scatter" group"""
with ui.CollapsableFrame("Scatter", name="group"):
```
### Step 3.2: Lay Out Your Frame
Create one column with three rows, each with a `Label` and an input:
```python
def _build_scatter(self):
"""Build the widgets of the "Scatter" group"""
with ui.CollapsableFrame("Scatter", name="group"):
# Column
with ui.VStack(height=0, spacing=SPACING):
# Row
with ui.HStack():
ui.Label("Prim Path", name="attribute_name", width=self.label_width)
ui.StringField(model=self._scatter_prim_model)
# Row
with ui.HStack():
ui.Label("Prim Type", name="attribute_name", width=self.label_width)
ui.ComboBox(self._scatter_type_model)
# Row
with ui.HStack():
ui.Label("Seed", name="attribute_name", width=self.label_width)
ui.IntDrag(model=self._scatter_seed_model, min=0, max=10000)
```
Like before, you've created a layout with `VStack` and `HStack`, but this time, your column includes three rows. Each row has a `Label` and an input. The first row is labeled "Prim Path" and accepts a string. The second row is labeled "Prim Type" and accepts a [`ComboBox`](https://docs.omniverse.nvidia.com/py/kit/source/extensions/omni.ui/docs/index.html#omni.ui.ComboBox) selection. The third row is labeled "Seed" and accepts the result of an [integer drag widget](https://docs.omniverse.nvidia.com/py/kit/source/extensions/omni.ui/docs/index.html#omni.ui.IntDrag).
## Step 4: Implement `_build_axis()`
Implement the `_build_axis()` UI to set the scatter parameters on the X, Y, and Z axes.
### Step 4.1: Lay Out Your Frame
In `_build_axis()`, establish a structure similar to `_build_scatter()`, with a Collapsable Frame, one column, and three rows:
```python
def _build_axis(self, axis_id, axis_name):
"""Build the widgets of the "X" or "Y" or "Z" group"""
with ui.CollapsableFrame(axis_name, name="group"):
# Column
with ui.VStack(height=0, spacing=SPACING):
# Row
with ui.HStack():
ui.Label("Object Count", name="attribute_name", width=self.label_width)
ui.IntDrag(model=self._scatter_count_models[axis_id], min=1, max=100)
# Row
with ui.HStack():
ui.Label("Distance", name="attribute_name", width=self.label_width)
ui.FloatDrag(self._scatter_distance_models[axis_id], min=0, max=10000)
# Row
with ui.HStack():
ui.Label("Random", name="attribute_name", width=self.label_width)
ui.FloatDrag(self._scatter_random_models[axis_id], min=0, max=10000)
```
Like with `_build_scatter()`, each row has a `Label` and an input. This time, the first row is labeled "Object Count" and accepts the result of an integer drag widget. The second row is labeled "Distance" and accepts the results of a [`FloatDrag` widget](https://docs.omniverse.nvidia.com/py/kit/source/extensions/omni.ui/docs/index.html#omni.ui.FloatDrag). The third row is labeled "Random" and also accepts the results of a `FloatDrag` widget.
Even though there are three axes on which you want to scatter your prim, you only need one function, since you can reuse it for each axis.
## Step 5: Implement `_build_fn()`
Now that you've established the user interface for the *Scatter Window* in a collection of functions, you implement `_build_fn()`, which calls those functions and draws their UIs to the screen.
### Step 5.1: Lay Out Your Frame
In `_build_fn()`, lay out a [`ScrollingFrame`](https://docs.omniverse.nvidia.com/py/kit/source/extensions/omni.ui/docs/index.html#omni.ui.ScrollingFrame) with a single column composed of your previously-built UIs:
```python
def _build_fn(self):
"""
The method that is called to build all the UI once the window is
visible.
"""
# Frame
with ui.ScrollingFrame():
# Column
with ui.VStack(height=0):
# Build it
self._build_source()
self._build_scatter()
self._build_axis(0, "X Axis")
self._build_axis(1, "Y Axis")
self._build_axis(2, "Z Axis")
# The Go button
ui.Button("Scatter", clicked_fn=self._on_scatter)
```
Here, you used `ScrollingFrame` instead of `CollapsableFrame` so that the frame can scroll to accommodate all the UIs. In your `VStack`, you call all of your UI build functions in sequence to establish a visual hierarchy. You call `_build_axis()` three times with arguments that identify the axis it represents. Finally, you add a **Scatter** button that scatters the selected prim.
### Step 5.2: Review Your Work
In Omniverse Code, review your *Scatter Window*:

> **Note:** If you don't see your changes to the window, try deactivating and reactivating the Extension:
>
> 
## Step 6: Implement `_on_scatter()`
In this step, you implement`_on_scatter()`, which scatters the prim when the **Scatter** button is clicked.
### Step 6.1: Implement the Scatter Logic
Implement the logic to scatter the selected prims:
```python
def _on_scatter(self):
"""Called when the user presses the "Scatter" button"""
prim_names = [i.strip() for i in self._source_prim_model.as_string.split(",")]
if not prim_names:
prim_names = get_selection()
if not prim_names:
pass
transforms = scatter(
count=[m.as_int for m in self._scatter_count_models],
distance=[m.as_float for m in self._scatter_distance_models],
randomization=[m.as_float for m in self._scatter_random_models],
id_count=len(prim_names),
seed=self._scatter_seed_model.as_int,
)
duplicate_prims(
transforms=transforms,
prim_names=prim_names,
target_path=self._scatter_prim_model.as_string,
mode=self._scatter_type_model.get_current_item().as_string,
)
```
We defined `prim_names` in the sample code. You got the scatter properties from the models and passed them to `duplicate_prims()`, which scatters the prims for you.
> **Optional Challenge:** While we provide some arrays and loops for the properties, we encourage you to experiment with your own.
### Step 6.2: Review Your Work
In your *Scatter Window*, click `Scatter`:

Your prim scatters using the properties set above.
## Congratulations
Great job completing this tutorial! Interested in learning more about the ins and outs of the code? Continue reading.
### Further Reading: Understanding `scatter.py`
This section introduces `scatter.py` and briefly showcases its function in the scatter tool.
Navigate to `scatter.py` in your `exts` folder hierarchy. This script is where `on_scatter()` in `window.py` pulls its information from. Notice the following arguments in `scatter.py`:
```python
def scatter(
count: List[int], distance: List[float], randomization: List[float], id_count: int = 1, seed: Optional[int] = None
):
...
```
These arguments match up with the properties in `transforms` from the previous step of `on_scatter`.
The docstring below provides a description for each parameter:
```python
"""
Returns generator with pairs containing transform matrices and ids to
arrange multiple objects.
### Arguments:
`count: List[int]`
Number of matrices to generage per axis
`distance: List[float]`
The distance between objects per axis
`randomization: List[float]`
Random distance per axis
`id_count: int`
Count of differrent id
`seed: int`
If seed is omitted or None, the current system time is used. If seed
is an int, it is used directly.
""""
```
Below this comment is where the loop is initialized to randomly generated a sets of points as well as create a matrix with position randomization for each axis:
```python
# Initialize the random number generator.
random.seed(seed)
for i in range(count[0]):
x = (i - 0.5 * (count[0] - 1)) * distance[0]
for j in range(count[1]):
y = (j - 0.5 * (count[1] - 1)) * distance[1]
for k in range(count[2]):
z = (k - 0.5 * (count[2] - 1)) * distance[2]
# Create a matrix with position randomization
result = Gf.Matrix4d(1)
result.SetTranslate(
Gf.Vec3d(
x + random.random() * randomization[0],
y + random.random() * randomization[1],
z + random.random() * randomization[2],
)
)
id = int(random.random() * id_count)
yield (result, id)
```
`scatter.py` is where you can adjust to create different types of scatters, such as scatter on the geometry or a scatter that uses texture.
### Further Reading: Understanding `command.py`
This section introduces `command.py` and briefly showcases its function in the scatter tool.
Navigate to `command.py` in the `exts` folder, and review what's inside. At the start of the `ScatterCreatePointInstancerCommand` class, the docstring provides descriptions for each of the parameters:
```python
"""
Create PointInstancer undoable **Command**.
### Arguments:
`path_to: str`
The path for the new prims
`transforms: List`
Pairs containing transform matrices and ids to apply to new objects
`prim_names: List[str]`
Prims to duplicate
"""
```
Below the comment is where these arguments are initialized, sets the USD stage, and unzips the list of tuples:
```python
def __init__(
self,
path_to: str,
transforms: List[Tuple[Gf.Matrix4d, int]],
prim_names: List[str],
stage: Optional[Usd.Stage] = None,
context_name: Optional[str] = None,
):
omni.usd.commands.stage_helper.UsdStageHelper.__init__(self, stage, context_name)
self._path_to = path_to
# We have it like [(tr, id), (tr, id), ...]
# It will be transformaed to [[tr, tr, ...], [id, id, ...]]
unzipped = list(zip(*transforms))
self._positions = [m.ExtractTranslation() for m in unzipped[0]]
self._proto_indices = unzipped[1]
self._prim_names = prim_names.copy()
```
Following that, the `PointInstancer` command is set up. This where the USD API is used to create the geometry and create the points during scatter.
```python
def do(self):
stage = self._get_stage()
# Set up PointInstancer
instancer = UsdGeom.PointInstancer.Define(stage, Sdf.Path(self._path_to))
attr = instancer.CreatePrototypesRel()
for name in self._prim_names:
attr.AddTarget(Sdf.Path(name))
instancer.CreatePositionsAttr().Set(self._positions)
instancer.CreateProtoIndicesAttr().Set(self._proto_indices)
```
Finally, the `undo()` function is defined. This is called when the user undoes the scatter to restore the prior state of the stage. In this case, the state is restored by simply deleting the PointInstancer. The reason `delete_cmd.do()` is used rather than calling `omni.kit.commands.execute()` is so that the "DeletePrimsCommand" doesn't show up in the Commands History.
```python
def undo(self):
delete_cmd = omni.usd.commands.DeletePrimsCommand([self._path_to])
delete_cmd.do()
``` | 18,268 | Markdown | 38.888646 | 569 | 0.680315 |
NVIDIA-Omniverse/sample-ackermann-amr/pyproject.toml | [tool.poetry]
name = "sample-ackermann-amr"
version = "0.1.0"
description = ""
authors = ["Your Name <[email protected]>"]
readme = "README.md"
[tool.poetry.dependencies]
python = "^3.10"
torch = "^2.3.0"
torchvision = "^0.18.0"
pynput = "^1.7.6"
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
| 332 | TOML | 17.499999 | 41 | 0.659639 |
NVIDIA-Omniverse/sample-ackermann-amr/README.md | # ackermann AMR Sample
This repository includes a working sample (source code and assets) that can be used to train an AI model to steer an ackermann AMR through a scene.
### README
See the README files for this to learn more about it, including how to use it:
* [ackermann AMR Controller](exts/omni.sample.ackermann_amr_controller/docs/README.md)
* [ackermann AMR Trainer](exts/omni.sample.ackermann_amr_trainer/docs/README.md)
* [ROS Packages](ros_packages/f1_tenth_trainer/docs/README.md)
## Tutorial
Follow the [step-by-step tutorial](/tutorial/tutorial.md) to learn how to train a self-driving car.
## Contributing
The source code for this repository is provided as-is and we are not accepting outside contributions. | 728 | Markdown | 39.499998 | 147 | 0.777473 |
NVIDIA-Omniverse/sample-ackermann-amr/tutorial/tutorial.md | # Train an ackermann Autonomous Mobile Robot to drive Itself with NVIDIA Omniverse and ROS
Follow along with this tutorial to learn how to use a combination of CAD templates, NVIDIA Omniverse Extensions, and Robot Operating System (ROS) to build and train your own virtual ackermann autonomous mobile robot (AMR).
## Learning Objectives
* Learn the entire workflow to take an Autonomous Mobile Robot (AMR) from concept to reality
* See a USD pipeline in action
* Create a USD scene to train an AMR
* Use Robot Operating System (ROS) 2 to manually control an AMR
* Write an Omniverse extension to train an AMR
* Create ROS 2 nodes to autonomously control the robot
## Prerequisites
* Ubuntu 22.04
* Isaac Sim 2023.1.1 or higher
* ROS 2 Humble
* Clone this Repository
* Install ROS dependencies. (This can be done by running `prerequisites.sh` in the repository's `ros2_f1_tenth_trainer` folder)
## 1 From Concept to Simulation
An ackermann AMR is an autonomous mobile robot that steers and drives like a car. It has four wheels, two or four-wheel drive, and the two front wheels steer. In this tutorial you will be shown an end-to-end workflow that can be used to train a 1/10 scale remote control car to drive itself in a virtual environment.
### 1.1 The Ackermann AMR Onshape Template
The first step to develop a real-world AMR is to model one in CAD.
#### 1.1.1 Open the OnShape ackermann AMR Template
Open the OnShape <a href="https://cad.onshape.com/documents/836767cd08a2800e8a9d4cb0/w/91eb0ff38b3ab8b03ea0db77/e/9015511e4d7c44d4d48e3cf2?renderMode=0&uiState=65bd4308b58f851a1d4b4096" target="_blank">ackermann AMR template</a>
<figure style="text-align: center;">
<img src="Images/OnShapeTemplate.png" alt="ackermann AMR Template" width="800"/>
<figcaption>Onshape Ackermann AMR Template</figcaption>
</figure>
The ackermann AMR template makes this easy; if you wanted to customize the template, you would just enter your car's measurements into the parameter table and then import the model into Omniverse. If you are working with the a traxxas slash-based vehicle such as the F1Tenth platform you do not need to change anything because it already matches your suspension geometry!
### 1.2 The ackermann AMR Template USD Pipeline
The Ackermann AMR template has been imported directly into Omniverse with the OnShape Importer extension. The resulting USD file has geometry and basic joint definitions, but is far from ready to simulate. This section demonstrates a powerful USD pipeline that will automatically prepare an imported ackerman AMR template model for simulation.
#### 1.2.1 Open Isaac Sim
Open Isaac Sim, making sure that under `ROS Bridge Extension`, `omni.isaac.ros2_bridge` is selected.
<figure style="text-align: center;">
<img src="Images/Launch_Isaac.gif" alt="Launch Isaac Sim" width="800"/>
<figcaption>Launch Isaac Sim</figcaption>
</figure>
#### 1.2.2 Open the Fully Rigged Vehicle
Go to the content browser tab at the bottom of the screen and enter `/home/nvidia/source/sample-ackermann-amr/assets/` into its address bar where *nvidia* should be replaced with your logged in username. This will open a folder with a number of useful assets for this tutorial. Double click on the *F1Tenth.usd* file to open it.
<figure style="text-align: center;">
<img src="Images/Open_F1Tenth.gif" alt="Open F1Tenth.usd" width="800"/>
<figcaption>Open F1Tenth.usd</figcaption>
</figure>
#### 1.2.3 Simulate the Vehicle
Press the `play` button to see the car simulate. `Shift+click` on the car and then drag the mouse to interact with it during simulation. Press the `stop` button to reset the simulation. Take note that the simulation behaves well and that there are no warnings or errors.
<figure style="text-align: center;">
<img src="Images/F1Tenth_Simulation.gif" alt="Simulate F1Tenth.usd" width="800"/>
<figcaption>Simulate the Working Vehicle</figcaption>
</figure>
> **_NOTE_**: If at any time you edit this asset, it may not work properly. If your edits show up as deltas in the `Root Layer` in the Layer pane, this layer is intentionally left empty for this very reason; you can delete any deltas in that layer. If that doesn't work, just re-open `F1Tenth.usd` and do not save your changes. Accidently saved your changes? Check out the appendix at the end to see how to reset your files.
#### 1.2.4 Mute All Layers
Open the Layer tab and mute each layer by clicking on the `eye` icon until the car disapears completely.
<figure style="text-align: center;">
<img src="Images/Mute_Layers.gif" alt="Mute All Layers in F1Tenth.usd" width="800"/>
<figcaption>Mute All Layers in F1Tenth.usd</figcaption>
</figure>
USD Layers do not simply serve for organization; they work more like macros! Layers have a list of changes, or deltas, that are made to the USD stage tree. In this case, we import the CAD template and then these layers automatically prepare it for simulation.
> **_NOTE_**: Roughly speaking, any prim attributes values (opinions) in a layer take precedence over layers below them. For more detailed information on the prioritization of layer opinions (LIVRPS), please read the [USD Documentation](https://openusd.org/release/glossary.html#livrps-strength-ordering)
#### 1.2.5 Unmute Full_Car_base.usd
Unmute `Full_Car_base.usd` by clicking on its `eye` icon to make the car appear.
Select `Full_Car_base.usd` and press the `F` key to fit the robot to the screen.
<figure style="text-align: center;">
<img src="Images/Unmute_Full_Car_Base.gif" alt="Unmute Full_Car_Base.usd" width="800"/>
<figcaption>Unmute Full_Car_Base.usd</figcaption>
</figure>
This layer is created by the OnShape import and is the base of the asset. Press the `play` button to start a simulation, you will notice that the car simply falls, has incorrect joint rigging and quite a few errors. Press the `stop` button to reset the simulation.
#### 1.2.6 Unmute Reparenting.usd, Mass_Properties.usd and Joint_Rigging.usd fff
Unmute `Reparenting.usd`, `Mass_Properties.usd`, and `Joint_Rigging.usd`.
Expand `Joint_Rigging.usd`, select `World` → `Full_Car` and press the `F` key to fit the ackermann robot to the screen.
<figure style="text-align: center;">
<img src="Images/Unmute_Three.gif" alt="Unmute the Next Three Layers" width="800"/>
<figcaption>Unmute the Next Three Layers</figcaption>
</figure>
These layers rearrange the stage tree, apply correct mass properties and fix a number of issues with the joints.
#### 1.2.7 Simulate Robot
Press `play` now; the car should once again simulate well and without errors.
<figure style="text-align: center;">
<img src="Images/Bounce.gif" alt="Simulate Vehicle with Rigged Joints" width="800"/>
<figcaption>Simulate the Vehicle with Rigged Joints</figcaption>
</figure>
#### 1.2.8 Unmute Remaining Layers
Unmute `Materials.usd`, `Cameras.usd`, and `Make_Asset.usd`.
<figure style="text-align: center;">
<img src="Images/Unmute_Remaining.gif" alt="Unmute Remaining Layers" width="800"/>
<figcaption>Unmute Remaining Layers</figcaption>
</figure>
If you replace `Full_Car_Base.usd` with a new CAD import, these layers will make the same changes to that version of the template, giving you an asset that is ready to drive around a scene.
### 1.3 Add a ROS Isaac Sim bridge Action Graph to the F1Tenth Car
Next we will add two Robot Operating System (ROS) action graphs to the `F1Tenth` asset so that it can send images to and be controlled by ROS.
#### 1.3.1 Add `ROS_Actiongraph.usd` to the Pipeline
Drag `ROS_Actiongraph.usd` from the content browser and drop it into the **Layer** window, making sure it is above all other sub-layers.
<figure style="text-align: center;">
<img src="Images/Add_ActionGraphs.gif" alt="Add ROS Action Graphs to the Scene" width="800"/>
<figcaption>Add ROS Action Graphs to the Scene</figcaption>
</figure>
#### 1.3.2 See the Changes to the Scene
Navigate to the stage tree to see the two action graph nodes that have been added to the scene.
<figure style="text-align: center;">
<img src="Images/Added_ActionGraphs.gif" alt="View Added Action Graphs in the Stage Tree" width="800"/>
<figcaption>View Added Action Graphs in the Stage Tree</figcaption>
</figure>
This layer behaves as a macro that adds the two action graphs to the stage tree under the *Full_Car* prim. The `ROS_Sensors` graph publishes the cameras and sensors from the template for other ROS nodes to subscribe to. The `ROS_Ackermann_Drive` graph listens for ROS nodes that send driving commands so that it can be controlled by other ROS nodes.
> **_NOTE_**: You can open `F1Tenth_ROS.usd` to catch up to this point.
### 1.4 Create a USD Scene of a Race Track
Now that the car is ready for physics simulation, we will create a scene with a race track and add the car to it.
#### 1.4.1 `Open Raceing_Grid_Start.usd`
Find `Racing_Grid_Start.usd` in the content browser and double click it to open it.
<figure style="text-align: center;">
<img src="Images/Open_Racing_Grid_Start.gif" alt="Open Racing_Grid_Start.usd" width="800"/>
<figcaption>Open Racing_Grid_Start.usd</figcaption>
</figure>
#### 1.4.2 Add the `F1Tenth_ROS.usd` to the scene
To add the car to the scene, drag the `F1Tenth_ROS.usd` asset from the content browser and drop it into the stage tree window.
<figure style="text-align: center;">
<img src="Images/Add_Car.gif" alt="Add the Car to the Scene" width="800"/>
<figcaption>Add the Car to the Scene</figcaption>
</figure>
#### 1.4.3 Rotate the Robot 90 degrees
Click on the `F1Tenth_ROS` prim in the stage tree and in the properties pane, set it's `Z` rotation to `-90`.
<figure style="text-align: center;">
<img src="Images/Rotate_Car.gif" alt="Rotate the Car -90 degrees about the Z-axis" width="800"/>
<figcaption>Rotate the Car -90 degrees about the Z-axis</figcaption>
</figure>
#### 1.4.4 Simulate the Robot
You can press `play` to start the physics simulation. Press `stop` to reset the simulation. Please keep in mind that the car will not roll during simulation because the wheels are locked until they receive a ROS message.
<figure style="text-align: center;">
<img src="Images/Car_Simulation.gif" alt="Simulate the Car in the Scene" width="800"/>
<figcaption>Simulate the Car in the Scene</figcaption>
</figure>
> **_NOTE_**: To catch up to this point, open `Racing_Grid.usd` in the content browser.
### 1.5 Write a ROS 2 Node to Manually Control the Robot
Next we will write a ROS 2 node in python that can manually control the robot.
> **_NOTE_**: If you are new to python, the indentation of your code is critical! Your code will not work correctly if it is not indented correctly because the indentation defines the scope. The Comment for each line of code you will insert is indented correctly, so if you copy-paste the code from this tutorial, align it with the matching comments in the source files, and double check the indentation is the same there as it is in this tutorial, your indentation should be correct.
#### 1.5.1 Open the `sample-ackermann-amr` Folder in Visual Studio Code
In a terminal run the following commands to change to the sample-ackermann-amr directory and open it in Visual Studio Code:
```bash
cd ~/source/sample-ackermann-amr/
code .code
```
<figure style="text-align: center;">
<img src="Images/Open_VSCode.gif" alt="Launch VS Code" width="800"/>
<figcaption>Launch VS Code</figcaption>
</figure>
#### 1.5.2 Open *teleop_ackermann_key_start*
Next, open *teleop_ackermann_key_start.py* found in *ros2_f1_tenth_trainer*.
<figure style="text-align: center;">
<img src="Images/Open_Teleop.gif" alt="Open teleop_ackermann_key_start.py" width="800"/>
<figcaption>Open <em>teleop_ackermann_key_start.py</em></figcaption>
</figure>
This ROS Node uses `pynput` to access keyboard events. It detects whether a key has been pressed and then sends that as a message from a ROS node for other ROS nodes to list and respond to.
#### 1.5.3 Respond to Key Presses
Second, in the `on_press` function, add the following code:
```python
# 1.5.3 Create ROS Messages based on keys pressed
if key.char == 'w':
self.drive_msg.drive.speed = 2.0 # Drive Forward
elif key.char == 's':
self.drive_msg.drive.speed = -2.0 # Drive Backward
elif key.char == 'a':
self.drive_msg.drive.steering_angle = 0.523599 # Turn left by 30 degrees
elif key.char == 'd':
self.drive_msg.drive.steering_angle = -0.523599 # Turn right by 30 degrees
```
This code responds to `wasd` key presses by steering and driving the car.
<details>
<summary>Completed Code</summary>
```Python
def __init__(self):
super().__init__('keyboard_teleop')
# 1. Create the publisher and message
self.publisher_ = self.create_publisher(AckermannDriveStamped, 'ackermann_cmd', 10)
self.drive_msg = AckermannDriveStamped()
self.listener = keyboard.Listener(on_press=self.on_press, on_release=self.on_release)
self.listener.start()
self.timer_period = 0.1 # seconds
self.timer = self.create_timer(self.timer_period, self.publish_cmd)
def on_press(self, key):
try:
# 2. Create ROS Messages based on keys pressed
if key.char == 'w':
self.drive_msg.drive.speed = 2.0 # Drive Forward
elif key.char == 's':
self.drive_msg.drive.speed = -2.0 # Drive Backward
elif key.char == 'a':
self.drive_msg.drive.steering_angle = 0.523599 # Turn left by 30 degrees
elif key.char == 'd':
self.drive_msg.drive.steering_angle = -0.523599 # Turn right by 30 degrees
except AttributeError:
pass
def on_release(self, key):
try:
# 3. If no keys are pressed, stop the car
if key.char in ['w', 's']:
self.drive_msg.drive.speed = 0.0 # stop driving
elif key.char in ['a', 'd']:
self.drive_msg.drive.steering_angle = 0.0 # stop turning
except AttributeError:
pass
def publish_cmd(self):
# 4. Publish the Message
self.drive_msg.header.frame_id = "f1_tenth"
self.drive_msg.header.stamp = self.get_clock().now().to_msg()
self.publisher_.publish(self.drive_msg)
```
</details>
#### 1.5.4 Start the ROS Node
To run the ROS2 node, open a terminal and run the following command:
```bash
cd ~/source/sample-ackermann-amr/ros2_f1_tenth_trainer
```
Then launch the ROS2 node with this command:
```bash
python3 teleop_ackermann_key_start.py
```
If you are having trouble, run the completed code instead:
```bash
python3 teleop_ackermann_key.py
```
#### 1.5.5 Change to the Chase Camera
Click on the camera icon on the upper left of the viewport and change to `Cameras` → `camera_chase`.
<figure style="text-align: center;">
<img src="Images/Chase_Camera.gif" alt="Change to the chase camera" width="800"/>
<figcaption>Change to the chase camera</em></figcaption>
</figure>
#### 1.5.6 Start the Omniverse Simulation
Open Omniverse and click the `play` button or press the `spacebar` to start the physics simulation.
<figure style="text-align: center;">
<img src="Images/1_5_6_Start_Simulation.gif" alt="Start the simulation" width="800"/>
<figcaption>Start the simulation</em></figcaption>
</figure>
#### 1.5.7 Drive the Car in the Scene
Drive the car in the scene with the `wasd` keys.
<figure style="text-align: center;">
<img src="Images/1_5_7_Drive_Car.gif" alt="Drive the car with the WASD keys" width="800"/>
<figcaption>Drive the car with the WASD keys</em></figcaption>
</figure>
Be careful, if you have a text-entry window or element selected such as the terminal or text box, it will capture your key presses.
> **_NOTE_**: Is the car slipping and sliding when you drive it? This material should be applied to any ground planes when you add this asset to a scene or else the friction will only be half what you expect!
#### 1.5.8 Stop the Simulation
Stop and reset the simulation by clicking the `stop` button or by pressing the `spacebar`.
<figure style="text-align: center;">
<img src="Images/1_5_8_Stop_Simulation.gif" alt="Stop the simulation" width="800"/>
<figcaption>Stop the simulation</em></figcaption>
</figure>
#### 1.5.9 Stop the ROS Node
Stop the ROS node by selecting its terminal and pressing `ctrl + c`.
> **_CHALLENGE_**: Remember, `teleop_ackermann_key.py` will be used to drive the car as you collect annotated data for AI model training. With that in mind, how might you change how the robot is controlled to make it easier to collect data?
## 2 Write an Omniverse Extension to Train the Driving Model
Now that we can perform a physics simulation of the robot navigating an environment, the next step is to collect annotated data and train a model that can drive the car autonomously.
### 2.1 The Ackermann Trainer Extension User Interface
In this section we will create the Ackermann Trainer Extension's graphical user interface.
#### 2.1.1 Training Workflow Overview
In order to train a computer-vision based supervised-learning AI for an AMR we need to take the following steps:
* Capture images from the robot's camera.
* Annotate the images with data we would like the model to predict
* Train a model from the dataset
* Validate whether the model is trained well enough
* Save the model so we can use it to actually drive our autonomous robot!
Here is the user interface that guides a user through that workflow:
<figure style="text-align: center;">
<img src="Images/Trainer_UI.png" alt="The Ackermann Trainer Window User Interface" width="600"/>
<figcaption>The Ackermann Trainer Window User Interface</figcaption>
</figure>
The controls of the window are as follows.
1. The `Capture` button captures the current viewport, saves a copy and displays it in the user interface.
2. The captured viewport is displayed here.
3. Clicking on a captured image annotates the image with the coordinates you have clicked.
4. `Count` displays the number of annotated images in your currently loaded dataset.
5. The `Train` button trains the model for one epoch
6. The `Evaluate` starts repeatedly capturing the current viewport and evaluating it with your current model.
7. With each iteration of `Evaluate`, the predicted point appears as a green dot. Clicking the `Evaluate` button again will stop evaluation.
8. The `Model Path` attribute determines the file name your model will load from and save to.
9. The `Load Model` button loads the model.
10. The `Save Model` buttons saves the model.
#### 2.1.2 Capture the Viewport to File
Next we will write the code for most of these UI elements. In VS Code, navigate to *exts/omni.sample.ackermann_amr_trainer/omni/sample/ackermann_amr_trainer*, open `ackermann_trainer_window_start.py`, and find the `capture_image` function. Enter the following snippet which captures the viewport to file:
```python
# 2.1.2 Capture the Viewport to File
viewport = get_active_viewport()
capture_viewport_to_file(viewport, file_path=file_path)
```
When you click on the `Capture` button, it calls the `OnCapture` function. `OnCapture` calls `capture_image` asynchrounously which in turn calls `replace_image`.
#### 2.1.3 Update the UI Thumbnail Image
In the `replace_image` function, paste `self.file_name` into the `ui.Image()` constructor as shown below:
```python
# 2.1.3 Update the image from the capture
ui.Image(self.file_path)
```
The `replace_image` function redraws the thumbnail image and its annotations.
#### 2.1.4 Add Annotation to Dataset
The image displayed in the user interface is clickable and fires the `onMouseReleased` function when clicked. Add the following code to capture the click coordinates and add the image with its annotation to the model's dataset.
```python
# 2.1.4 Capture Click Position
self.click_x, self.click_y = canvas.screen_to_canvas(x, y)
# 2.1.4 Add Image to Dataset
self.file_path = self.model.add_item(self.click_y,
self.click_x,
self.file_path)
```
> **_NOTE_**: The `add_item` function is a member of `ackermann_amr_model`, which will be discussed in section 2.2.
#### 2.1.5 Update the Image Annotation
Next we return to `replace_image` where we set the `x` and `y` positions of the red annotation dot. This is done by adding `self.click_x` and `self.click_y` to the spacers above and to the left of the red dot:
```python
# 2.1.5 Set Annotation Y-Position
ui.Spacer()
with ui.HStack(height=self.click_y):
# 2.1.5 Set Annotation Z-Position
ui.Spacer(width=self.click_x)
style = {"Circle": {"background_color": cl("#cc0000"),
"border_color": cl("#cc0000"),
"border_width": 2}}
ui.Circle(width=10,
height=10,
alignment=ui.
Alignment.LEFT_TOP,
style=style)
```
This places the red dot where the user clicked.
#### 2.1.6 Train the Model
Next, inside the `train` function, add the following code to train the model:
```python
# 2.1.6 Train AI Model
self.model.train()
```
This function will train the model for one epoch each time it is run.
> **_NOTE_**: The model and its training function will be discussed in section 2.2
#### 2.1.7 Evaluating the Model
Add the viewport capture code to `on_update`:
```python
# 2.1.7 Capture the Viewport to Buffer
viewport_api = get_active_viewport()
capture_viewport_to_buffer(viewport_api, self.on_viewport_captured)
```
This captures the viewport to a buffer and then calls `on_viewport_captured`. Add the following to `on_viewport_captured` to evaluate the given viewport capture:
```python
# 2.1.7 Evaluate Viewport Image
self.prediction_y, self.prediction_x = self.model.Evaluate(buffer,
buffer_size,
width,
height,
self.thumbnail_width,
self.thumbnail_height)
```
> **_NOTE_**: The `Evaluate` function is in `ackermann_amr_model`, which will be discussed in section 2.2.
The evaluate model functionality in this extension can be toggled on and off with a toggle-button. While on, the extension will send the current viewport through the AI model every 30 frames and will annotate the window's thumbnail with the predicted coordinates continuously. While toggled off, no evaluations are made.
#### 2.1.8 Load and Save Model
The final step in the workflow is to load and save the model with the following functions which are tied to the callbacks from the `load` and `save` buttons:
```python
# 2.1.8 Load the Model
def onLoadModel(self):
self.model.load(self.model_path_model.as_string)
# 2.1.8 Save the Model
def onSaveModel(self):
self.model.save(self.model_path_model.as_string)
```
<details>
<summary>Completed Code</summary>
```python
def __init__(self, *args, **kwargs):
self.file_path = ""
self.click_x = 0
self.click_y = 0
self.prediction_x = 0
self.prediction_y = 0
self.thumbnail_height = 300
self.thumbnail_width = 600
# Configure Directory Where Data Will Be Saved
self.save_dir = tempfile.gettempdir()
self.save_dir = os.path.join(self.save_dir, 'road_following')
if not os.path.exists(self.save_dir):
os.mkdir(self.save_dir)
self.save_dir = os.path.join(self.save_dir, 'apex')
if not os.path.exists(self.save_dir):
os.mkdir(self.save_dir)
# Initialize AI Model
self.model = ackermann_amr_model(self.thumbnail_height,
self.thumbnail_width,
self.save_dir)
self.update_stream = omni.kit.app.get_app().get_update_event_stream()
self.frame_update_count = 0
self.ov_update = None
self.build_ui()
# Capture Image
def onCapture(self):
# Get Filename
filename = '%s.png' % (str(uuid.uuid1()))
self.file_path = os.path.join(self.save_dir, filename)
# Request Image Capture Asynchronously
asyncio.ensure_future(self.capture_image(self.replace_image,
self.file_path))
# Capture the Viewport to File
async def capture_image(self, on_complete_fn: callable, file_path) -> str:
# 2.1.2 Capture the Viewport to File
viewport = get_active_viewport()
capture_viewport_to_file(viewport, file_path=file_path)
# Wait for the Capture to Complete
if on_complete_fn:
await omni.kit.app.get_app().next_update_async()
await omni.kit.app.get_app().next_update_async()
# Update the User Interface
on_complete_fn()
# Thumbnail Clicked Callback
def onMouseReleased(self, x, y, button, modifier, canvas):
# 2.1.4 Capture Click Position
self.click_x, self.click_y = canvas.screen_to_canvas(x, y)
# 2.1.4 Add Image to Dataset
self.file_path = self.model.add_item(self.click_y,
self.click_x,
self.file_path)
# Update Image Annotation
self.replace_image()
# Update Count
self.count_model.set_value(str(len(self.model.dataset)))
# Replace the Thumbnail with the latest image
def replace_image(self):
counter = 0
while not os.path.exists(self.file_path) and counter < 10:
time.sleep(0.1)
counter += 1
with self.ClickableCanvas:
with ui.ZStack():
# 2.1.3 Update the image from the capture
ui.Image(self.file_path)
with ui.VStack():
# 2.1.5 Set Annotation Y-Position
ui.Spacer()
with ui.HStack():
# 2.1.5 Set Annotation Z-Position
ui.Spacer(width=self.click_x)
style = {"Circle": {"background_color": cl("#cc0000"),
"border_color": cl("#cc0000"),
"border_width": 2}}
ui.Circle(width=10,
height=10,
alignment=ui.
Alignment.LEFT_TOP,
style=style)
# Add Prediction Dot
with ui.VStack():
ui.Spacer(height=self.prediction_y)
with ui.HStack():
ui.Spacer(width=self.prediction_x)
style = {"Circle": {"background_color": cl("#00cc00"),
"border_color": cl("#00cc00"),
"border_width": 2}}
ui.Circle(width=10,
height=10,
alignment=ui.Alignment.LEFT_TOP,
style=style)
# Train the AI Model
def train(self):
self.train_button.enabled = False
# 2.1.6 Train AI Model
self.model.train()
self.train_button.enabled = True
carb.log_info("Training Complete")
# Turn Evaluation On and Off
def toggle_eval(self, model):
# Toggle Evaluation On and Off
if self.eval_model.get_value_as_bool():
self.ov_update = self.update_stream.create_subscription_to_pop(
self.on_update,
name="Eval_Subscription")
else:
self.ov_update.unsubscribe()
# Omniverse Update Callback
def on_update(self, e: carb.events.IEvent):
# Capture the Viewport Every 30 Frames
self.frame_update_count += 1
if self.frame_update_count % 30 == 0:
self.frame_update_count = 1
# 2.1.7 Capture the Viewport to Buffer
viewport_api = get_active_viewport()
capture_viewport_to_buffer(viewport_api, self.on_viewport_captured)
# Evaluate the Viewport with the AI Model
def on_viewport_captured(self, buffer, buffer_size, width, height, format):
# 2.1.7 Evaluate Viewport Image
self.prediction_y, self.prediction_x = self.model.Evaluate(buffer,
buffer_size,
width,
height,
self.thumbnail_width,
self.thumbnail_height)
self.replace_image()
# 2.1.8 Load Model
def onLoadModel(self):
self.model.load(self.model_path_model.as_string)
# 2.1.8 Save Model
def onSaveModel(self):
self.model.save(self.model_path_model.as_string)
# Build the UI
def build_ui(self):
# Build UI
self._window = ui.Window("Ackermann AMR Trainer", width=600, height=500)
with self._window.frame:
with ui.ScrollingFrame():
with ui.VStack():
ui.Spacer(height=40)
# Capture Image
with ui.HStack(height=self.thumbnail_height):
with ui.HStack():
ui.Spacer()
self.ClickableCanvas = ui.CanvasFrame(
draggable=False,
width=self.thumbnail_width,
height=self.thumbnail_height)
self.ClickableCanvas.set_mouse_released_fn(
lambda x, y, b, m, c=self.ClickableCanvas:
self.onMouseReleased(x, y, b, m, c))
ui.Spacer()
# Capture Button
with ui.HStack(height=40):
ui.Spacer()
ui.Button(
"Capture",
clicked_fn=lambda: self.onCapture(),
style={"margin": 5},
height=30,
width=self.thumbnail_width)
ui.Spacer()
# Count Widget
with ui.HStack(height=40):
ui.Spacer()
ui.Label(
'Count: ',
style={"margin": 5},
width=self.thumbnail_width/2,
alignment=ui.Alignment.RIGHT_CENTER)
self.count_model = ui.SimpleStringModel(
str(len(self.model.dataset)))
ui.StringField(
self.count_model,
style={"margin": 5},
height=30,
width=self.thumbnail_width/2)
ui.Spacer()
# Train and Eval Buttons
with ui.HStack(height=40):
ui.Spacer()
self.train_button = ui.Button(
"Train",
clicked_fn=lambda: self.train(),
style={"margin": 5},
height=30,
width=self.thumbnail_width/2)
self.eval_button = ui.ToolButton(
text="Evaluate",
style={"margin": 5},
height=30,
width=self.thumbnail_width/2)
self.eval_model = self.eval_button.model
self.eval_model.add_value_changed_fn(
lambda m: self.toggle_eval(m))
ui.Spacer()
# Model File Path Widget
with ui.HStack(height=40):
ui.Spacer()
ui.Label(
'model path: ',
style={"margin": 5},
height=30, width=self.thumbnail_width/2,
alignment=ui.Alignment.RIGHT_CENTER)
self.model_path_model = ui.SimpleStringModel(
'road_following_model.pth')
ui.StringField(
self.model_path_model,
style={"margin": 5},
height=30,
width=self.thumbnail_width/2)
ui.Spacer()
# Model Load and Save Buttons
with ui.HStack(height=40):
ui.Spacer()
ui.Button(
"load model",
clicked_fn=lambda: self.onLoadModel(),
style={"margin": 5},
height=30,
width=self.thumbnail_width/2)
ui.Button(
"save model",
clicked_fn=lambda: self.onSaveModel(),
style={"margin": 5},
height=30,
width=self.thumbnail_width/2)
ui.Spacer()
ui.Spacer()
```
</details>
### 2.2 The Pytorch Model used to Train and Drive the AMR
With a clear user interface that allows the user to navigate the training workflow, the next step in the extension development is to implement an AI model with its training and evaluation functions.
#### 2.2.1 Open `ackermann_amr_policy_start.py`
Open `ackermann_amr_policy_start.py` in the `exts/omni.sample.ackermann_amr_trainer/omni/sample/ackermann_amr_trainer`folder. This class defines the nature of the AI model. In this case we are using a very typical fully-connected model based off of resnet18. If you would like to learn more detail on how neural networks work; I highly reccomend the Deep Learning Institute's [*Getting Started With Deep Learning*](https://courses.nvidia.com/courses/course-v1:DLI+S-FX-01+V1/). Here are the main components of the AI policy:
1. Its base model
2. How the layers are connected
3. How the nodes are activated
4. How the images are processed
5. How forward propogation is performed
#### 2.2.2 Add Image Processing Transform
To preprocess the images add the following code to `ackermann_amr_policy.__init__(self)`:
```python
# 2.3.2 Image Processing Transform
self.transform = transforms.Compose([transforms.ColorJitter(0.2,
0.2,
0.2,
0.2),
transforms.Resize((224,
224)),
transforms.Normalize([0.485,
0.456,
0.406],
[0.229,
0.224,
0.225])])
```
This code modifies the images' brightness, contrast, saturation and hue to improve training. It also resizes the images and normalizes the image data.
<details>
<summary>Completed Code</summary>
```python
def __init__(self):
super().__init__()
# Initialize Base Model
full_model = resnet18(weights=ResNet18_Weights.DEFAULT)
# Configure Layer Connections
self.model = nn.Sequential(*list(full_model.children())[:-1])
self.fc = nn.Linear(512, 2)
# Node Activation Function
self.sigma = nn.Tanh()
# 6.3.2 Image Processing Transform
self.transform = transforms.Compose([transforms.ColorJitter(0.2,
0.2,
0.2,
0.2),
transforms.Resize((224,
224)),
transforms.Normalize([0.485,
0.456,
0.406],
[0.229,
0.224,
0.225])])
```
</details>
#### 2.2.3 Open `ackermann_amr_data_start`
Open `ackermann_amr_data_start.py`. This class stores the annotated images and makes them available to the model.
#### 2.2.4 Prepend the Annotation to the Image
Look in the `Add_Item` function, and insert this code to add the coordinates to the beginning of a clicked image as it is added to the dataset:
```python
# 2.2.4 Prepend file name with x and y coordinates for later loading
file_name = Path(file_path).stem
file_directory = os.path.dirname(file_path)
file_name = '%d_%d_%s.png' % (col, row, file_name)
full_path = os.path.join(file_directory, file_name)
os.rename(file_path, full_path)
file_path = full_path
return full_path
```
This is how the annotations are associated with the images so that it can read in the dataset when you reload the extension.
<details>
<summary>Completed Code</summary>
```python
# Adding an image to the dataset
def Add_Item(self, row, col, width, height, file_path):
# Read the image from file
image = torchvision.io.read_image(file_path)
# Save the image raw data
self._raw_images.append((image[:-1]/255).float().unsqueeze(0))
# Convert from `click` coordinates to image coordinates
x, y = self.rowcol2xy(row, col, height, width)
# Process and save click annotation
col_ratio = float(col/width)
row_ratio = float(row/height)
self._raw_click.append([row, col])
self._raw_ratio.append([row_ratio, col_ratio])
self._raw_coords.append([x, y])
# Convert data to pytorch-compatible values
self._click_tensor = torch.from_numpy(
np.array(self._raw_click)).float().to(self.device)
self._ratio_tensor = torch.from_numpy(
np.array(self._raw_ratio)).float().to(self.device)
self._coords_tensor = torch.from_numpy(
np.array(self._raw_coords)).float().to(self.device)
self._image_tensor = torch.cat(self._raw_images, dim=0).to(self.device)
self._total_length = self._image_tensor.shape[0]
# 2.2.4 Prepend file name with x and y coordinates for later loading
file_name = Path(file_path).stem
file_directory = os.path.dirname(file_path)
file_name = '%d_%d_%s.png' % (col, row, file_name)
full_path = os.path.join(file_directory, file_name)
os.rename(file_path, full_path)
file_path = full_path
return full_path
```
</details>
#### 2.2.5 Open `ackermann_amr_model_start`
Finally, `ackermann_amr_model` trains and evaluates an AI model that can drive an AMR.
#### 2.2.6 Load Training Data in the `train` function
Add the following code to the `train` function to load the annotated data set:
```python
# 2.2.6 Load Training Data
loader = DataLoader(self.dataset, batch_size=10, shuffle=True)
```
Now that the data is loaded, the training loop will train the model correctly.
#### 2.2.7 Load Viewport Capture to GPU
Add the following code to the `evaluate` function to convert the captured viewport buffer to an image compatible with pytorch and add it to the GPU:
```python
# 2.2.7 Load the viewport capture from buffer and add it to the GPU
img = PIL.Image.frombuffer('RGBA',
(source_width, source_height),
content.contents)
tensor_image = transforms.functional.to_tensor(img)
image = tensor_image[:3, ...].to(self.device)
```
Your model is now complete.
<details>
<summary>Completed Code</summary>
```python
# Train Model
def train(self):
time.sleep(0.1)
mse = torch.nn.MSELoss(reduction='mean')
optimizer = optim.Adam(self.model.parameters(),
lr=1E-4,
weight_decay=1E-5)
# 2.2.6 Load Training Data
loader = DataLoader(self.dataset, batch_size=10, shuffle=True)
self.model.train()
temp_losses = []
for images, coords in loader:
prediction = self.model(images)
optimizer.zero_grad()
loss = mse(prediction, coords)
loss.backward()
optimizer.step()
temp_losses.append(loss.detach().item())
print(np.mean(temp_losses))
# Evaluate Model
def Evaluate(self,
buffer,
buffer_size,
source_width,
source_height,
dest_width,
dest_height):
try:
ctypes.pythonapi.PyCapsule_GetPointer.restype = ctypes.POINTER(
ctypes.c_byte * buffer_size)
ctypes.pythonapi.PyCapsule_GetPointer.argtypes = [ctypes.py_object,
ctypes.c_char_p]
content = ctypes.pythonapi.PyCapsule_GetPointer(buffer, None)
except Exception as e:
carb.log_error(f"Failed to capture viewport: {e}")
return
# Evaluate Model
self.model.eval()
# 2.2.7 Load the viewport capture from buffer and add it to the GPU
img = PIL.Image.frombuffer('RGBA',
(source_width, source_height),
content.contents)
tensor_image = transforms.functional.to_tensor(img)
image = tensor_image[:3, ...].to(self.device)
mean = torch.Tensor([0.485, 0.456, 0.406]).cuda()
std = torch.Tensor([0.229, 0.224, 0.225]).cuda()
image.sub_(mean[:, None, None]).div_(std[:, None, None])
img = image[None, ...]
prediction = self.model(img)
rows = dest_height
cols = dest_width
self.prediction_y, self.prediction_x = self.dataset.xy2rowcol(
*prediction.squeeze().cpu(),
num_rows=rows, num_cols=cols)
carb.log_info(
"prediction made: %.1f %.1f" % (
self.prediction_x, self.prediction_y))
return [self.prediction_y, self.prediction_x]
```
</details>
The extension is now complete.
### 2.3 Collect Data
Now that the extension is complete, you can use it to collect a dataset of annotated images.
#### 2.3.1 Start the ROS Node
To enable control of the car, open a terminal and run the following commands:
```bash
cd ~/source/sample-ackermann-amr/ros2_f1_tenth_trainer
python3 teleop_ackermann_key.py
```
#### 2.3.2 Change to the Left Camera
Open Omniverse and click on the camera icon on the upper left of the viewport and change to `Cameras` → `Camera_Left`
<figure style="text-align: center;">
<img src="Images/Left_Camera.gif" alt="Change to the chase camera" width="600"/>
<figcaption>Change to the chase camera</figcaption>
</figure>
#### 2.3.3 Start the Omniverse Simulation
Click the `play` button or press the `spacebar` to start the physics simulation.
<figure style="text-align: center;">
<img src="Images/1_5_6_Start_Simulation.gif" alt="Start the simulation" width="600"/>
<figcaption>Start the simulation</figcaption>
</figure>
#### 2.3.4 Drive the Car in the Scene
Drive the car in the scene with that `WASD` keys.
<figure style="text-align: center;">
<img src="Images/1_5_7_Drive_Car.gif" alt="Drive the car around the track" width="600"/>
<figcaption>Drive the car around the track</figcaption>
</figure>
#### 2.3.5 Capture One Image
Capture an Image by clicking the `Capture` button in the `Ackermann AMR Trainer`.
<figure style="text-align: center;">
<img src="Images/Capture.gif" alt="Capture one image" width="600"/>
<figcaption>Capture one image_</figcaption>
</figure>
This will capture the viewport and save it to a temporary directory.
#### 2.3.4 Annotate One Image
Click on the point on the image the car should drive towards.
<figure style="text-align: center;">
<img src="Images/Annotate_Image.gif" alt="Annotate one image" width="600"/>
<figcaption>Annotate one image</figcaption>
</figure>
This will add the coordinates you clicked to the image as an annotation.
#### 2.3.4 Train the Model
Click on the `Train` button to train the model for one epoch.
<figure style="text-align: center;">
<img src="Images/Train_Model.gif" alt="Train the model for one epoch per click" width="600"/>
<figcaption>Train the model for one epoch per click</figcaption>
</figure>
The current model loss is printed to the console at the `information` level.
#### 2.3.5 Save the Model
Save the model by clicking the `save model` button in the `Ackermann AMR Trainer` extension.
#### 2.3.6 Toggle Evaluation On and off.
Click the `Evaluate` button to turn on model evaluation; click it again to turn off model evaluation.
<figure style="text-align: center;">
<img src="Images/Evaluate.gif" alt="Click the evaluate button to turn evaluation on and off" width="600"/>
<figcaption>Click the evaluate button to turn evaluation on and off</figcaption>
</figure>
> **_WARNING_**: This sends the viewport, not the extension thumbnail, through evaluation. If you drive the car during evaluation, the green dot will show the model prediction for the **Current** position of the car, not the position shown in the extension's thumbnail.
> **_Challenge_**: See if you can train a model that could control the robot. It does not have to be perfect, it just needs to generally predict more to the right or more to the left correctly depending on the car's position. An imperfect model can work well with the right controller code as will be demonstrated in the next section. Expect to build a dataset with about 400 annotated images and to train on that dataset for 5-10 epochs for a workable model. It is important as you are training to drive along the ideal and non-ideal paths to teach the robot both how to drive the ideal path as well as get back on track if it gets off course.
## 3 Create a ROS 2 Node to Automatically Control the Car
With a trained model that, in theory, can control the car, the next step is to deploy the model to a ROS node that actually controlls the robot.
### 3.1 Open `model_evaluate_start.py`
Open `ros2_f1_tenth_trainer/model_evaluate_start.py`. When this class is initialized it loads the model the user trained and saved. It then subscribes to viewport images published by Isaac Sim and uses those to predict where it think a user would click on that image. In this section we will respond to that prediction.
### 3.2 Compute Steering Angle
First, in `image_callback`, compute the raw steering angle from the predicted `x` and `y` values:
```python
# 3.2. Compute Steering Angle
forward = np.array([0.0, -1.0])
offset = np.array([0.0, 1.0])
traj = np.array([prediction_x.item(), prediction_y.item()]) - offset
unit_traj = traj / np.linalg.norm(traj)
unit_forward = forward / np.linalg.norm(forward)
steering_angle = np.arccos(np.dot(unit_traj, unit_forward)) / 2.0 - np.deg2rad(15.0)
```
Here we use the dot product of the forward vector and the vector from the bottom, center to the annotated point to find the steering angle.
<figure style="text-align: center;">
<img src="Images/Steering_Angle.png" alt="Steering Angle Schematic" width="600"/>
<figcaption>Steering Angle Schematic</figcaption>
</figure>
> **_Tip_** Notice that in the last line a few adjustments to the steering angle are made. the value is reduced by half and biased by fifteen degrees. These correction factors will be a key to your success in making this car drive faster! Because the track we trained with only has left-hand turns, the model is biased towards the left. We have introduced a reverse-bias to help the vehicle go stright and turn right when needed. You might be able to find a better bias value! Also, we found the car made large turns it sometimes could not recover from so we divided all angles by a scaling factor to help the car drive more smoothly. Once again, you may be able to find a better value for this correction factor.
### 3.3 Create and Send ROS Message
Second, we create and send a message through ROS with the following snippet:
```python
# 2. Create and Send Message
msg = AckermannDriveStamped()
msg.header.frame_id = "f1_tenth"
msg.header.stamp = self.get_clock().now().to_msg()
msg.drive.steering_angle = steering_angle
# Challenge: Increase the drive speed as much as you can!
msg.drive.speed = 2.0
self.publisher.publish(msg)
```
This code is very similar to the code used to steer the car with the keyboard. Rather than take keyboard input, however, we use model predictions to determine the steering and throttle values.
<details>
<summary>Completed Code</summary>
```Python
def image_callback(self, msg):
width = msg.width
height = msg.height
# Evaluate Model
self.model.eval()
img = PIL.Image.frombuffer('RGB', (width, height), msg.data.tobytes())
tensor_image = transforms.functional.to_tensor(img)
image = tensor_image[:3, ...].to(self.device)
mean = torch.Tensor([0.485, 0.456, 0.406]).cuda()
std = torch.Tensor([0.229, 0.224, 0.225]).cuda()
image.sub_(mean[:, None, None]).div_(std[:, None, None])
img = image[None, ...]
prediction = self.model(img)
prediction_x, prediction_y = self.xy2rowcol(*prediction.squeeze().cpu())
# 1. Compute Steering Angle
forward = np.array([0.0, -1.0])
offset = np.array([0.0, 1.0])
traj = np.array([prediction_x.item(), prediction_y.item()]) - offset
import math
unit_traj = traj / np.linalg.norm(traj)
unit_forward = forward / np.linalg.norm(forward)
steering_angle = np.arccos(np.dot(unit_traj, unit_forward)) / 2.0 - np.deg2rad(15.0)
# 2. Create and Send Message
msg = AckermannDriveStamped()
msg.header.frame_id = "f1_tenth"
msg.header.stamp = self.get_clock().now().to_msg()
msg.drive.steering_angle = steering_angle
# Challenge: Increase the drive speed as much as you can!
msg.drive.speed = 2.0
self.publisher.publish(msg)
```
</details>
### 3.4 Run the Model
To run the model replace `road_following_model.pth` with a model you have trained and enter the following command from the `ros2_f1_tenth_trainer` directory and start the omniverse simulation.
```bash
python3 model_evaluate_start.py
```
> **_NOTE_**: Use the final code: `model_evalaute.py`, if needed.
#### 3.5 Start the Simulation
Open Omniverse and click the `play` icon or press the `spacebar` to start the simulation.
<figure style="text-align: center;">
<img src="Images/1_5_6_Start_Simulation.gif" alt="Start the simulation and watch the robot drive itself!" width="600"/>
<figcaption>Start the simulation and watch the robot drive itself!</figcaption>
</figure>
> **_Challenge_**: Increasing the drive speed will have a dramatic effect on your lap time. How much can you increase it? Could you make the drive speed variable instead of constant?
## Conclusions
Take a moment to reflect on what you have just done; you have taken an ackermann AMR from mere concept to driving around a virtual track under its own intelligence. You have done it with ROS so you can deploy this synthetically trained model to a real robot. Great job!
## Appendix: How to Revert Changes You Have Made to this Repository
If you have made a change to a file and now the project isn't working and you are not sure what to do; feel free to simply revert back to the original state of this repository.
Open your terminal and enter this command to navigate to the git repository:
```bash
cd ~/source/sample-ackermann-amr
```
To keep any new files you have added while reverting any files that came with the repository:
```bash
git reset
```
If you want to completely reset the repository to its original state use this command:
```bash
git clean
``` | 51,968 | Markdown | 39.888277 | 714 | 0.646436 |
NVIDIA-Omniverse/sample-ackermann-amr/exts/omni.sample.ackermann_amr_trainer/omni/sample/ackermann_amr_trainer/ackermann_amr_model.py | # Copyright (c) 2020-2024, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from .ackermann_amr_policy import ackermann_amr_policy
from .ackermann_amr_data import ackermann_amr_data
import time
import carb
import numpy as np
import torch
import PIL.Image
import ctypes
import os
from torch import optim
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
class ackermann_amr_model():
def __init__(self, rows, columns, image_directory):
self.device = torch.device('cuda')
self.model = ackermann_amr_policy()
self.model = self.model.to(self.device)
self.save_directory = image_directory
self.rows = rows
self.columns = columns
# Initialize Dataset
self.dataset = ackermann_amr_data([image_directory],
self.device,
self.columns,
self.rows)
# Add Item to Dataset
def add_item(self, click_y, click_x, file_path):
return self.dataset.Add_Item(click_y,
click_x,
self.columns,
self.rows,
file_path)
# Train Model
def train(self):
time.sleep(0.1)
mse = torch.nn.MSELoss(reduction='mean')
optimizer = optim.Adam(self.model.parameters(),
lr=1E-4,
weight_decay=1E-5)
# 2.2.6 Load Training Data
loader = DataLoader(self.dataset, batch_size=10, shuffle=True)
self.model.train()
temp_losses = []
for images, coords in loader:
prediction = self.model(images)
optimizer.zero_grad()
loss = mse(prediction, coords)
loss.backward()
optimizer.step()
temp_losses.append(loss.detach().item())
print(np.mean(temp_losses))
# Evaluate Model
def Evaluate(self,
buffer,
buffer_size,
source_width,
source_height,
dest_width,
dest_height):
try:
ctypes.pythonapi.PyCapsule_GetPointer.restype = ctypes.POINTER(
ctypes.c_byte * buffer_size)
ctypes.pythonapi.PyCapsule_GetPointer.argtypes = [ctypes.py_object,
ctypes.c_char_p]
content = ctypes.pythonapi.PyCapsule_GetPointer(buffer, None)
except Exception as e:
carb.log_error(f"Failed to capture viewport: {e}")
return
# Evaluate Model
self.model.eval()
# 2.2.7 Load the viewport capture from buffer and add it to the GPU
img = PIL.Image.frombuffer('RGBA',
(source_width, source_height),
content.contents)
tensor_image = transforms.functional.to_tensor(img)
image = tensor_image[:3, ...].to(self.device)
mean = torch.Tensor([0.485, 0.456, 0.406]).cuda()
std = torch.Tensor([0.229, 0.224, 0.225]).cuda()
image.sub_(mean[:, None, None]).div_(std[:, None, None])
img = image[None, ...]
prediction = self.model(img)
rows = dest_height
cols = dest_width
self.prediction_y, self.prediction_x = self.dataset.xy2rowcol(
*prediction.squeeze().cpu(),
num_rows=rows, num_cols=cols)
carb.log_info(
"prediction made: %.1f %.1f" % (
self.prediction_x, self.prediction_y))
return [self.prediction_y, self.prediction_x]
# Load Model
def load(self, model_name):
model_path = os.path.join(self.save_directory,
model_name)
self.model.load_state_dict(torch.load(model_path))
carb.log_warn("Model Loaded: {0}".format(model_path))
# Save Model
def save(self, model_name):
model_path = os.path.join(self.save_directory,
model_name)
torch.save(self.model.state_dict(), model_path)
carb.log_warn("Model Saved: {0}".format(model_path))
| 4,662 | Python | 34.325757 | 79 | 0.554269 |
NVIDIA-Omniverse/sample-ackermann-amr/exts/omni.sample.ackermann_amr_trainer/omni/sample/ackermann_amr_trainer/ackermann_trainer_window.py |
# Copyright (c) 2020-2024, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from .ackermann_amr_model import ackermann_amr_model
import omni.ext
import omni.ui as ui
import carb.events
import os
import tempfile
import time
import omni.usd
from omni.kit.viewport.utility import get_active_viewport
from omni.kit.viewport.utility import capture_viewport_to_file
from omni.kit.viewport.utility import capture_viewport_to_buffer
import omni.kit.app
import uuid
from omni.ui import color as cl
import asyncio
class ackermann_trainer_window(ui.Window):
def __init__(self, *args, **kwargs):
super(ackermann_trainer_window, self).__init__(*args, **kwargs)
self.file_path = ""
self.click_x = 0
self.click_y = 0
self.prediction_x = 0
self.prediction_y = 0
self.thumbnail_height = 300
self.thumbnail_width = 600
# Configure Directory Where Data Will Be Saved
self.save_dir = tempfile.gettempdir()
self.save_dir = os.path.join(self.save_dir, 'road_following')
if not os.path.exists(self.save_dir):
os.mkdir(self.save_dir)
self.save_dir = os.path.join(self.save_dir, 'apex')
if not os.path.exists(self.save_dir):
os.mkdir(self.save_dir)
# Initialize AI Model
self.model = ackermann_amr_model(self.thumbnail_height,
self.thumbnail_width,
self.save_dir)
self.update_stream = omni.kit.app.get_app().get_update_event_stream()
self.frame_update_count = 0
self.ov_update = None
self.build_ui()
# Capture Image
def onCapture(self):
# Get Filename
filename = '%s.png' % (str(uuid.uuid1()))
self.file_path = os.path.join(self.save_dir, filename)
# Request Image Capture Asynchronously
asyncio.ensure_future(self.capture_image(self.replace_image,
self.file_path))
# Capture the Viewport to File
async def capture_image(self, on_complete_fn: callable, file_path) -> str:
# 2.1.2 Capture the Viewport to File
viewport = get_active_viewport()
capture_viewport_to_file(viewport, file_path=file_path)
carb.log_warn("Image Captured: {file_path}")
# Wait for the Capture to Complete
if on_complete_fn:
await omni.kit.app.get_app().next_update_async()
await omni.kit.app.get_app().next_update_async()
# Update the User Interface
on_complete_fn()
# Thumbnail Clicked Callback
def onMouseReleased(self, x, y, button, modifier, canvas):
# 2.1.4 Capture Click Position
self.click_x, self.click_y = canvas.screen_to_canvas(x, y)
# 2.1.4 Add Image to Dataset
self.file_path = self.model.add_item(self.click_y,
self.click_x,
self.file_path)
carb.log_warn("Image Annotated: {self.file_path}")
# Update Image Annotation
self.replace_image()
# Update Count
self.count_model.set_value(str(len(self.model.dataset)))
# Replace the Thumbnail with the latest image
def replace_image(self):
counter = 0
while not os.path.exists(self.file_path) and counter < 10:
time.sleep(0.1)
counter += 1
with self.ClickableCanvas:
with ui.ZStack():
# 2.1.3 Update the image from the capture
ui.Image(self.file_path)
with ui.VStack():
# 2.1.5 Set Annotation Y-Position
ui.Spacer(height=self.click_y)
with ui.HStack():
# 2.1.5 Set Annotation Z-Position
ui.Spacer(width=self.click_x)
style = {"Circle": {"background_color": cl("#cc0000"),
"border_color": cl("#cc0000"),
"border_width": 2}}
ui.Circle(width=10,
height=10,
alignment=ui.
Alignment.LEFT_TOP,
style=style)
# Add Prediction Dot
with ui.VStack():
ui.Spacer(height=self.prediction_y)
with ui.HStack():
ui.Spacer(width=self.prediction_x)
style = {"Circle": {"background_color": cl("#00cc00"),
"border_color": cl("#00cc00"),
"border_width": 2}}
ui.Circle(width=10,
height=10,
alignment=ui.Alignment.LEFT_TOP,
style=style)
# Train the AI Model
def train(self):
self.train_button.enabled = False
# 2.1.6 Train AI Model
self.model.train()
self.train_button.enabled = True
carb.log_info("Training Complete")
# Turn Evaluation On and Off
def toggle_eval(self, model):
# Toggle Evaluation On and Off
if self.eval_model.get_value_as_bool():
self.ov_update = self.update_stream.create_subscription_to_pop(
self.on_update,
name="Eval_Subscription")
else:
self.ov_update.unsubscribe()
# Omniverse Update Callback
def on_update(self, e: carb.events.IEvent):
# Capture the Viewport Every 30 Frames
self.frame_update_count += 1
if self.frame_update_count % 30 == 0:
self.frame_update_count = 1
# 2.1.7 Capture the Viewport to Buffer
viewport_api = get_active_viewport()
capture_viewport_to_buffer(viewport_api, self.on_viewport_captured)
# Evaluate the Viewport with the AI Model
def on_viewport_captured(self, buffer, buffer_size, width, height, format):
# 2.1.7 Evaluate Viewport Image
self.prediction_y, self.prediction_x = self.model.Evaluate(buffer,
buffer_size,
width,
height,
self.thumbnail_width,
self.thumbnail_height)
self.replace_image()
# 2.1.8 Load Model
def onLoadModel(self):
self.model.load(self.model_path_model.as_string)
# 2.1.8 Save Model
def onSaveModel(self):
self.model.save(self.model_path_model.as_string)
# Build the UI
def build_ui(self):
# Build UI
with self.frame:
with ui.ScrollingFrame():
with ui.VStack():
ui.Spacer(height=40)
# Capture Image
with ui.HStack(height=self.thumbnail_height):
with ui.HStack():
ui.Spacer()
self.ClickableCanvas = ui.CanvasFrame(
draggable=False,
width=self.thumbnail_width,
height=self.thumbnail_height)
self.ClickableCanvas.set_mouse_released_fn(
lambda x, y, b, m, c=self.ClickableCanvas:
self.onMouseReleased(x, y, b, m, c))
ui.Spacer()
# Capture Button
with ui.HStack(height=40):
ui.Spacer()
ui.Button(
"Capture",
clicked_fn=lambda: self.onCapture(),
style={"margin": 5},
height=30,
width=self.thumbnail_width)
ui.Spacer()
# Count Widget
with ui.HStack(height=40):
ui.Spacer()
ui.Label(
'Count: ',
style={"margin": 5},
width=self.thumbnail_width/2,
alignment=ui.Alignment.RIGHT_CENTER)
self.count_model = ui.SimpleStringModel(
str(len(self.model.dataset)))
ui.StringField(
self.count_model,
style={"margin": 5},
height=30,
width=self.thumbnail_width/2)
ui.Spacer()
# Train and Eval Buttons
with ui.HStack(height=40):
ui.Spacer()
self.train_button = ui.Button(
"Train",
clicked_fn=lambda: self.train(),
style={"margin": 5},
height=30,
width=self.thumbnail_width/2)
self.eval_button = ui.ToolButton(
text="Evaluate",
style={"margin": 5},
height=30,
width=self.thumbnail_width/2)
self.eval_model = self.eval_button.model
self.eval_model.add_value_changed_fn(
lambda m: self.toggle_eval(m))
ui.Spacer()
# Model File Path Widget
with ui.HStack(height=40):
ui.Spacer()
ui.Label(
'model path: ',
style={"margin": 5},
height=30, width=self.thumbnail_width/2,
alignment=ui.Alignment.RIGHT_CENTER)
self.model_path_model = ui.SimpleStringModel(
'road_following_model.pth')
ui.StringField(
self.model_path_model,
style={"margin": 5},
height=30,
width=self.thumbnail_width/2)
ui.Spacer()
# Model Load and Save Buttons
with ui.HStack(height=40):
ui.Spacer()
ui.Button(
"load model",
clicked_fn=lambda: self.onLoadModel(),
style={"margin": 5},
height=30,
width=self.thumbnail_width/2)
ui.Button(
"save model",
clicked_fn=lambda: self.onSaveModel(),
style={"margin": 5},
height=30,
width=self.thumbnail_width/2)
ui.Spacer()
ui.Spacer()
def destroy(self):
super().destroy()
if self.ov_update:
self.ackerman_trainer_window.ov_update.unsubscribe()
self.ov_update = None
| 12,045 | Python | 38.887417 | 89 | 0.464757 |
NVIDIA-Omniverse/sample-ackermann-amr/exts/omni.sample.ackermann_amr_trainer/omni/sample/ackermann_amr_trainer/ackermann_amr_data.py | # Copyright (c) 2020-2024, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import numpy as np
import torch
import torchvision
from torch.utils.data import Dataset
import os
from pathlib import Path
class ackermann_amr_data(Dataset):
# Initializing the dataset
def __init__(self, root_paths, device, thumbnail_width, thumbnail_height):
super().__init__()
# Initialize variables
self.device = device
self._THUMB_ROWS = thumbnail_height
self._THUMB_COLS = thumbnail_width
self._raw_click = []
self._raw_ratio = []
self._raw_coords = []
self._raw_images = []
self._root_paths = root_paths
# Iterate through all pre-existing images
for i, path in enumerate(root_paths):
files = os.listdir(path)
for fname in files:
# Only process .png files
if fname.endswith(('.png')):
# Split out the x coord, y coord and UID of the image
stokens = fname.split("_")
# Only process images that have an x, y, and UID
if len(stokens) == 3:
# Read image from file
image = torchvision.io.read_image(
os.path.join(path, fname))
# Store raw image data for future use
self._raw_images.append(
(image[:-1]/255).float().unsqueeze(0))
# Get x coord
col = int(stokens[0])
# Get y coord
row = int(stokens[1])
# Convert from "click" coords to image coords
x, y = self.rowcol2xy(row,
col,
self._THUMB_ROWS,
self._THUMB_COLS)
# Determine Aspect Ratio
col_ratio = float(col/self._THUMB_COLS)
row_ratio = float(row/self._THUMB_ROWS)
# Save Raw Annotation for later use
self._raw_click.append([row, col])
self._raw_ratio.append([row_ratio, col_ratio])
self._raw_coords.append([x, y])
else:
# Delete any images that are not properly annotated
fpath = os.path.join(path, fname)
if os.path.isfile(fpath):
os.remove(fpath)
# Convert raw data to pytorch-compatible data
self._click_tensor = torch.from_numpy(
np.array(self._raw_click)).float().to(device)
self._ratio_tensor = torch.from_numpy(
np.array(self._raw_ratio)).float().to(device)
self._coords_tensor = torch.from_numpy(
np.array(self._raw_coords)).float().to(device)
# Compute dataset length
if len(self._raw_images) > 0:
self._image_tensor = torch.cat(self._raw_images, dim=0).to(device)
self._total_length = self._image_tensor.shape[0]
else:
self._total_length = 0
# Adding an image to the dataset
def Add_Item(self, row, col, width, height, file_path):
# Read the image from file
image = torchvision.io.read_image(file_path)
# Save the image raw data
self._raw_images.append((image[:-1]/255).float().unsqueeze(0))
# Convert from `click` coordinates to image coordinates
x, y = self.rowcol2xy(row, col, height, width)
# Process and save click annotation
col_ratio = float(col/width)
row_ratio = float(row/height)
self._raw_click.append([row, col])
self._raw_ratio.append([row_ratio, col_ratio])
self._raw_coords.append([x, y])
# Convert data to pytorch-compatible values
self._click_tensor = torch.from_numpy(
np.array(self._raw_click)).float().to(self.device)
self._ratio_tensor = torch.from_numpy(
np.array(self._raw_ratio)).float().to(self.device)
self._coords_tensor = torch.from_numpy(
np.array(self._raw_coords)).float().to(self.device)
self._image_tensor = torch.cat(self._raw_images, dim=0).to(self.device)
self._total_length = self._image_tensor.shape[0]
# 2.2.4 Prepend file name with x and y coordinates for later loading
file_name = Path(file_path).stem
file_directory = os.path.dirname(file_path)
file_name = '%d_%d_%s.png' % (col, row, file_name)
full_path = os.path.join(file_directory, file_name)
os.rename(file_path, full_path)
file_path = full_path
return full_path
# Get a single image from the dataset
def __getitem__(self, index):
img = self._image_tensor[index]
coords = self._coords_tensor[index]
return img, coords
# Find the number of images in the dataset
def __len__(self):
return self._total_length
# Convert from image to annotation coordinates
def rowcol2xy(self, row, col, num_rows=224, num_cols=224):
x = 2*col/num_cols - 1
y = 1 - 2*row/num_rows
return float(x), float(y)
# Convert from annotation to image coordinates
def xy2rowcol(self, x, y, num_rows=224, num_cols=224):
col = 0.5*num_cols*(1 + x)
row = 0.5*num_rows*(1 - y)
return int(row), int(col)
| 5,938 | Python | 38.85906 | 79 | 0.550185 |
NVIDIA-Omniverse/sample-ackermann-amr/exts/omni.sample.ackermann_amr_trainer/omni/sample/ackermann_amr_trainer/ackermann_amr_policy_start.py | # Copyright (c) 2020-2024, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from torch import nn
from torchvision import transforms
from torchvision.models import resnet18, ResNet18_Weights
class ackermann_amr_policy(nn.Module):
def __init__(self):
super().__init__()
# Initialize Base Model
full_model = resnet18(weights=ResNet18_Weights.DEFAULT)
# Configure Layer Connections
self.model = nn.Sequential(*list(full_model.children())[:-1])
self.fc = nn.Linear(512, 2)
# Node Activation Function
self.sigma = nn.Tanh()
# 2.3.2 Image Processing Transform
# Forward Propogation
def forward(self, x):
x = self.transform(x)
x = self.model(x)
x = x.view(x.size(0),-1)
x = self.sigma(self.fc(x))
return x
| 1,185 | Python | 31.054053 | 76 | 0.683544 |
NVIDIA-Omniverse/sample-ackermann-amr/exts/omni.sample.ackermann_amr_trainer/omni/sample/ackermann_amr_trainer/extension.py | # Copyright (c) 2020-2024, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from .ackermann_trainer_window import ackermann_trainer_window
import omni.ext
# omni.kit.pipapi extension is required
import omni.kit.pipapi
omni.kit.pipapi.install(
package="torch"
)
omni.kit.pipapi.install(
package='torchvision'
)
class AckermanAMRTrainerExtension(omni.ext.IExt):
def on_startup(self, ext_id):
print("Ackerman AMR Trainer Startup")
# Build UI
self._window = ackermann_trainer_window("Ackerman AMR Trainer",
width=600,
height=500)
def on_shutdown(self):
print("Ackerman AMR Trainer Shutdown")
if self._window:
self._window.destroy()
self._window = None
| 1,197 | Python | 28.949999 | 76 | 0.665831 |
NVIDIA-Omniverse/sample-ackermann-amr/exts/omni.sample.ackermann_amr_trainer/omni/sample/ackermann_amr_trainer/utils.py | # Copyright (c) 2020-2024, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import torch
import torchvision.transforms as transforms
import torch.nn.functional as F
import cv2
import PIL.Image
import numpy as np
mean = torch.Tensor([0.485, 0.456, 0.406]).cuda()
std = torch.Tensor([0.229, 0.224, 0.225]).cuda()
def preprocess(image_path):
device = torch.device('cuda')
image = PIL.Image.open(image_path)
tensor_image = transforms.functional.to_tensor(image)
image = tensor_image[:3,...].to(device)
image.sub_(mean[:, None, None]).div_(std[:, None, None])
return image[None, ...]
| 961 | Python | 35.999999 | 76 | 0.740895 |
NVIDIA-Omniverse/sample-ackermann-amr/exts/omni.sample.ackermann_amr_trainer/docs/README.md | # ackermann AMR Trainer [omni.sample.ackermann_AMR_Trainer]
<div style="text-align: center;">
<img src="Images/ackermann_AMR_Trainer.png" alt="ackermann_AMR_Trainer" width="400"/>
</div>
This is a sample extension that helps train an AI model to drive an ackermann-based autonomous mobile robot (AMR) in NVIDIA Isaac Sim. The full source code along with assets can be found [here](https://github.com/NVIDIA-Omniverse/kit-extension-sample-ackermann-amr-trainer).
## [Tutorial](../tutorial/tutorial.md)
This extension sample includes a step-by-step tutorial to demonstrate portions of how it was written. [Learn how with the tutorial.](../tutorial/tutorial.md)
## Usage
In order to use this sample you must install *Omniverse*, clone this repository, and configure *Omniverse* to include the provided extension. Then the extension can be used as described below.
### Install Omniverse Launcher and an Omniverse App
1. Install *Omniverse Launcher*: [download](https://www.nvidia.com/en-us/Omniverse/download)
2. Install and launch an *Omniverse* app through the launcher such as *Omniverse Code*.
## Adding This Extension to your *Omniverse App*
### Directly from Github
To add a this extension to your Omniverse app directly from github:
1. Go into: Extension Manager → Hamburger Menu → Settings → Extension Search Paths
<div style="text-align: center;">
<img src="Images/add-ext-search-path.gif" alt="extension search path" width="400"/>
</div>
2. Add this as a search path: `git://github.com/NVIDIA-Omniverse/kit-extension-sample-spawn-prims.git?branch=main&dir=exts`
### With Source
#### Adding the Source to Omniverse
1. Fork and clone this repo, for example in `C:\projects\kit-extension-sample-ackermann-amr-trainer`
2. In the *Omniverse App* open extension manager: *Window* → *Extensions*.
3. In the *Extension Manager Window* click on the *hamburger menu* → *settings*.
4. In the settings page there is a list of *Extension Search Paths*. Click on the *plus* button and add the cloned repo's `exts` subfolder. If you used the example clone path, the `exts` filder would be here: `C:\projects\kit-extension-sample-ackermann-amr-trainer\exts`
<div style="text-align: center;">
<img src="Images/add-ext-search-path.gif" alt="extension search path" width="400"/>
</div>
5. Now you can find `omni.sample.excel_amr_trainer` extension in the top left search bar. Select and enable it.
6. The "ackermann AMR Trainer" window will pop up. *Extension Manager* watches for any file changes. You can try changing some code in this extension and see them applied immediately with a hotreload.
#### Linking with an Omniverse app
For a better developer experience, it is recommended to create a folder link named `app` to the *Omniverse Kit* app installed from *Omniverse Launcher*. A convenience script to use is included.
Run:
```bash
> link_app.bat
```
If successful you should see `app` folder link in the root of this repo.
If multiple Omniverse apps is installed script will select recommended one. Or you can explicitly pass an app:
```bash
> link_app.bat --app code
```
You can also just pass a path to create link to:
```bash
> link_app.bat --path "C:/Users/bob/AppData/Local/ov/pkg/create-2022.1.3"
```
## Using the Extension
### Open the Scene
Use the *Omniverse* app installed previously to open *Racing_Grid.usd* included in the *Assets* folder of the repository.
### Activate the ackermann_AMR_Controller
Activate the *ackermann_AMR_Controller* Extension by clicking on the `+` button.
### Start a Physics Simulation
Start the Isaac Sim simulation by pressing the `play` button or by pressing the `spacebar`. Press the forward arrow on your keyboard or the `a` button on your game pad to start the car driving forward. Steer the car within the cones as it drives forward.
### Collect Annotated Data
Press the `Capture` button to save the current viewport to file. Then click the point in the distance on the image where you would steer the car. This will rename the image file by prepending the `x` and `y` coodinates you have clicked.
<div style="text-align: center;">
<img src="Images/Annotate.gif" alt="Excel connected" width="400"/>
</div>
You can see how many images you have collected in the `Count` field.
> **_Note_**: Remember that you are collecting this data to teach an AI to drive! Do not just collect data along a perfect driving line; collect data off of the ideal line in order to teach the AI how to get back on course if it gets a little lost.
### Train the Model
To train your model set the number of epochs you would like to train in the `epochs` field. If you are not sure what to use, five epochs is a good start.
Next, click on the `train` button. You can see the loss after each epoch printed in the Isaac Sim command prompt as it trains or all printed at once in the Isaac Sim console after training has completed.
If you are unhappy with the loss value, simply click on the train button again and the model will continue to train for as many epochs as are indicated in the `epochs` field.
### Load and Save the Model
The model can be loaded and saved from the path indicated in the `model path` field by clicking on the `load model` and `save model` fields respectively.
A saved model can be used in future sessions or deployed to a ROS node so that the car can drive itself! | 5,397 | Markdown | 46.769911 | 274 | 0.753196 |
NVIDIA-Omniverse/sample-ackermann-amr/ros2_f1_tenth_trainer/model_evaluate_start.py | #!/usr/bin/env python3
# Copyright (c) 2020-2024, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import rclpy
from rclpy.node import Node
from ackermann_msgs.msg import AckermannDriveStamped
from sensor_msgs.msg import Image
import numpy as np
from ackermann_amr_policy import ackermann_amr_policy
import PIL.Image
import torch
import torchvision
import torchvision.transforms as transforms
import os
class MinimalSubscriber(Node):
def __init__(self):
super().__init__('minimal_subscriber')
self.subscription = self.create_subscription(
Image,
'rgb',
self.image_callback,
1)
self.subscription # prevent unused variable warning
self.publisher = self.create_publisher(AckermannDriveStamped, 'ackermann_cmd', 10)
# Initialize AI Model
self.device = torch.device('cuda')
self.model = ackermann_amr_policy()
self.model = self.model.to(self.device)
self.save_dir = os.path.dirname(os.path.realpath(__file__))
self.model_file = "road_following_model.pth"
model_path = os.path.join(self.save_dir, self.model_file)
self.model.load_state_dict(torch.load(model_path))
def image_callback(self, msg):
width = msg.width
height = msg.height
# Evaluate Model
self.model.eval()
img = PIL.Image.frombuffer('RGB', (width, height), msg.data.tobytes())
tensor_image = transforms.functional.to_tensor(img)
image = tensor_image[:3, ...].to(self.device)
mean = torch.Tensor([0.485, 0.456, 0.406]).cuda()
std = torch.Tensor([0.229, 0.224, 0.225]).cuda()
image.sub_(mean[:, None, None]).div_(std[:, None, None])
img = image[None, ...]
prediction = self.model(img)
prediction_x, prediction_y = self.xy2rowcol(*prediction.squeeze().cpu())
# 3.2. Compute Steering Angle
# 3.3. Create and Send Message
def xy2rowcol(self, x, y):
return x, y
def main(args=None):
rclpy.init(args=args)
evaluate_node = MinimalSubscriber()
try:
rclpy.spin(evaluate_node)
# Destroy the node explicitly
# (optional - otherwise it will be done automatically
# when the garbage collector destroys the node object)
except KeyboardInterrupt:
evaluate_node.destroy_node()
rclpy.try_shutdown()
if __name__ == '__main__':
main()
| 2,809 | Python | 27.673469 | 90 | 0.656105 |
NVIDIA-Omniverse/sample-ackermann-amr/ros2_f1_tenth_trainer/teleop_ackermann_key.py | #!/usr/bin/env python3
# Copyright (c) 2020-2024, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import rclpy
from rclpy.node import Node
from pynput import keyboard
from ackermann_msgs.msg import AckermannDriveStamped
class KeyboardTeleopNode(Node):
def __init__(self):
super().__init__('keyboard_teleop')
# 1. Create the publisher and message
self.publisher_ = self.create_publisher(AckermannDriveStamped, 'ackermann_cmd', 10)
self.drive_msg = AckermannDriveStamped()
self.listener = keyboard.Listener(on_press=self.on_press, on_release=self.on_release)
self.listener.start()
self.timer_period = 0.1 # seconds
self.timer = self.create_timer(self.timer_period, self.publish_cmd)
def on_press(self, key):
try:
# 2. Create ROS Messages based on keys pressed
if key.char == 'w':
self.drive_msg.drive.speed = 2.0
elif key.char == 's':
self.drive_msg.drive.speed = -2.0
elif key.char == 'a':
self.drive_msg.drive.steering_angle = 0.523599 # Turn left by 30 degrees
elif key.char == 'd':
self.drive_msg.drive.steering_angle = -0.523599 # Turn right by 30 degrees
except AttributeError:
pass
def on_release(self, key):
try:
# 3. If no keys are pressed, stop the car
if key.char in ['w', 's']:
self.drive_msg.drive.speed = 0.0
elif key.char in ['a', 'd']:
self.drive_msg.drive.steering_angle = 0.0
except AttributeError:
pass
def publish_cmd(self):
# 4. Publish the Message
self.drive_msg.header.frame_id = "f1_tenth"
self.drive_msg.header.stamp = self.get_clock().now().to_msg()
self.publisher_.publish(self.drive_msg)
def main(args=None):
rclpy.init(args=args)
keyboard_teleop_node = KeyboardTeleopNode()
keyboard_teleop_node.get_logger().info('Listening to keyboard. Use WASD to drive!')
try:
rclpy.spin(keyboard_teleop_node)
except KeyboardInterrupt:
keyboard_teleop_node.destroy_node()
rclpy.try_shutdown()
if __name__ == '__main__':
main()
| 2,616 | Python | 36.927536 | 93 | 0.632645 |
NVIDIA-Omniverse/sample-ackermann-amr/ros2_f1_tenth_trainer/model_evaluate.py | #!/usr/bin/env python3
# Copyright (c) 2020-2024, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import rclpy
from rclpy.node import Node
from ackermann_msgs.msg import AckermannDriveStamped
from sensor_msgs.msg import Image
import numpy as np
from ackermann_amr_policy import ackermann_amr_policy
import PIL.Image
import torch
import torchvision
import torchvision.transforms as transforms
import os
import math
class MinimalSubscriber(Node):
def __init__(self):
super().__init__('minimal_subscriber')
self.subscription = self.create_subscription(
Image,
'rgb',
self.image_callback,
1)
self.subscription # prevent unused variable warning
self.publisher = self.create_publisher(AckermannDriveStamped, 'ackermann_cmd', 10)
# Initialize AI Model
self.device = torch.device('cuda')
self.model = ackermann_amr_policy()
self.model = self.model.to(self.device)
self.save_dir = os.path.dirname(os.path.realpath(__file__))
self.model_file = "road_following_model.pth"
model_path = os.path.join(self.save_dir, self.model_file)
self.model.load_state_dict(torch.load(model_path))
def image_callback(self, msg):
width = msg.width
height = msg.height
# Evaluate Model
self.model.eval()
img = PIL.Image.frombuffer('RGB', (width, height), msg.data.tobytes())
tensor_image = transforms.functional.to_tensor(img)
image = tensor_image[:3, ...].to(self.device)
mean = torch.Tensor([0.485, 0.456, 0.406]).cuda()
std = torch.Tensor([0.229, 0.224, 0.225]).cuda()
image.sub_(mean[:, None, None]).div_(std[:, None, None])
img = image[None, ...]
prediction = self.model(img)
prediction_x, prediction_y = self.xy2rowcol(*prediction.squeeze().cpu())
# 3.2. Compute Steering Angle
forward = np.array([0.0, -1.0])
offset = np.array([0.0, 1.0])
traj = np.array([prediction_x.item(), prediction_y.item()]) - offset
unit_traj = traj / np.linalg.norm(traj)
unit_forward = forward / np.linalg.norm(forward)
steering_angle = np.arccos(np.dot(unit_traj, unit_forward)) / 2.0 - np.deg2rad(15.0)
# 3.3. Create and Send Message
msg = AckermannDriveStamped()
msg.header.frame_id = "f1_tenth"
msg.header.stamp = self.get_clock().now().to_msg()
msg.drive.steering_angle = steering_angle
# Challenge: Increase the drive speed as much as you can!
msg.drive.speed = 2.0
self.publisher.publish(msg)
def xy2rowcol(self, x, y):
return x, y
def main(args=None):
rclpy.init(args=args)
evaluate_node = MinimalSubscriber()
try:
rclpy.spin(evaluate_node)
# Destroy the node explicitly
# (optional - otherwise it will be done automatically
# when the garbage collector destroys the node object)
except KeyboardInterrupt:
evaluate_node.destroy_node()
rclpy.try_shutdown()
if __name__ == '__main__':
main()
| 3,509 | Python | 28.745762 | 92 | 0.642918 |
NVIDIA-Omniverse/kit-extension-sample-gestures/README.md | # UI Gestures Extension Sample

## [UI Gestures for omni.ui (omni.example.gesture_window)](exts/omni.example.gesture_window)

### About
This extension shows how to create gestures using omni.ui. The focus of this sample extension is to show how to register and create a scene view within a Window.
### [README](exts/omni.example.gesture_window)
See the [README for this extension](exts/omni.example.gesture_window) to learn more about it including how to use it.
## [UI Gestures for Viewport (omni.example.gesture_viewport)](exts/omni.example.gesture_viewport)

### About
This extension shows how to create a simple manipulator with gestures using omni.ui.scene. The focus of this sample extension is to show how to create a simple manipulator and register gestures in the viewport.
### [README](exts/omni.example.gesture_viewport)
See the [README for this extension](exts/omni.example.gesture_viewport) to learn more about it including how to use it.
## [Tutorial](docs/tutorial.md)
Follow a [step-by-step tutorial](docs/tutorial.md) that walks you through how to use omni.ui.scene to build this extension.
## Adding This Extension
To add a this extension to your Omniverse app:
1. Go into: Extension Manager -> Hamburger Icon -> Settings -> Extension Search Path
2. Add this as a search path: `git://github.com/NVIDIA-Omniverse/kit-extension-sample-gestures.git?branch=main&dir=exts`
Alternatively:
1. Download or Clone the extension, unzip the file if downloaded
2. Copy the `exts` folder path within the extension folder
- i.e. home/.../kit-extension-sample-gestures/exts (Linux) or C:/.../kit-extension-sample-gestures/exts (Windows)
3. Go into: Extension Manager -> Hamburger Icon -> Settings -> Extension Search Path
4. Add the `exts` folder path as a search path
## Linking with an Omniverse app
For a better developer experience, it is recommended to create a folder link named `app` to the *Omniverse Kit* app installed from *Omniverse Launcher*. A convenience script to use is included.
Run:
```bash
# Windows
> link_app.bat
```
```shell
# Linux
~$ ./link_app.sh
```
If successful you should see `app` folder link in the root of this repo.
If multiple Omniverse apps are installed the script will select the recommended one. Or you can explicitly pass an app:
```bash
# Windows
> link_app.bat --app code
```
```shell
# Linux
~$ ./link_app.sh --app code
```
You can also pass a path that leads to the Omniverse package folder to create the link:
```bash
# Windows
> link_app.bat --path "C:/Users/bob/AppData/Local/ov/pkg/create-2022.1.3"
```
```shell
# Linux
~$ ./link_app.sh --path "home/bob/.local/share/ov/pkg/create-2022.1.3"
```
## Contributing
The source code for this repository is provided as-is and we are not accepting outside contributions. | 2,921 | Markdown | 32.976744 | 210 | 0.748374 |
NVIDIA-Omniverse/kit-extension-sample-gestures/exts/omni.example.gesture_viewport/omni/example/gesture_viewport/extension.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import omni.ext
from omni.kit.viewport.registry import RegisterScene
from .line import LineManipulator
# Functions and vars are available to other extension as usual in python: `example.python_ext.some_public_function(x)`
def some_public_function(x: int):
print("[omni.example.gesture] some_public_function was called with x: ", x)
return x ** x
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class OmniExampleGestureExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
print("[omni.example.gesture] omni example gesture startup")
self._line = RegisterScene(LineManipulator, "Line Gesture")
def on_shutdown(self):
print("[omni.example.gesture] omni example gesture shutdown")
self._line = None | 1,562 | Python | 47.843749 | 119 | 0.757362 |
NVIDIA-Omniverse/kit-extension-sample-gestures/exts/omni.example.gesture_viewport/omni/example/gesture_viewport/line.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import omni.ui as ui
from omni.ui import scene as sc
from omni.ui_scene._scene import AbstractGesture
def setcolor(sender, color):
"""
Sets the color of the sender
Args:
`sender : omni.ui.scene.Manipulator`
The shape driving the gesture
`color : omni.ui.color`
The color that will be assigned to the shape
"""
sender.color = color
class Manager(sc.GestureManager):
"""
The object that controls batch processing and preventing of gestures.
See more here: https://docs.omniverse.nvidia.com/kit/docs/omni.ui.scene/latest/omni.ui.scene/omni.ui.scene.GestureManager.html
"""
def should_prevent(self, gesture: AbstractGesture, preventer: AbstractGesture) -> bool:
"""
Called per gesture. Determines if the gesture should be prevented with another gesture.
Useful to resolve intersections.
Args:
`gesture : AbstractGesture`
Gesture that is occurring
`preventer : AbstractGesture`
Gesture preventing `gesture`
Returns:
bool: Whether or not the gesture should be prevented.
If True gesture will be prevented otherwise the gesture will overtake the last gesture used.
"""
if gesture.name == "SelectionDrag" and preventer.state == sc.GestureState.BEGAN:
return True
if gesture.name == "SelectionClick" and preventer.name == "color_change":
return True
class Move(sc.DragGesture):
"""
Inherits from `DragGesture`, the gesture that provides a way to capture click-and-drag mouse event.
See more here: https://docs.omniverse.nvidia.com/kit/docs/omni.ui.scene/latest/omni.ui.scene/omni.ui.scene.DragGesture.html
"""
def __init__(self, transform: sc.Transform, **kwargs):
"""
Construct the gesture to track mouse drags
Args:
`transform : sc.Transform` The transform parent of the shape.
`kwargs : dict`
See below
### Keyword Arguments:
`mouse_button : `
Mouse button that should be active to start the gesture.
`modifiers : `
The keyboard modifier that should be active ti start the gesture.
`check_mouse_moved : `
The check_mouse_moved property is a boolean flag that determines whether the DragGesture should verify if the 2D screen position of the mouse has changed before invoking the on_changed method. This property is essential in a 3D environment, as changes in the camera position can result in the mouse pointing to different locations in the 3D world even when the 2D screen position remains unchanged.
Usage
When check_mouse_moved is set to True, the DragGesture will only call the on_changed method if the actual 2D screen position of the mouse has changed. This can be useful when you want to ensure that the on_changed method is only triggered when there is a genuine change in the mouse's 2D screen position.
If check_mouse_moved is set to False, the DragGesture will not check for changes in the mouse's 2D screen position before calling the on_changed method. This can be useful when you want the on_changed method to be invoked even if the mouse's 2D screen position hasn't changed, such as when the camera position is altered, and the mouse now points to a different location in the 3D world.
`on_began_fn : `
Called if the callback is not set when the user clicks the mouse button.
`on_changed_fn : `
Called if the callback is not set when the user moves the clicked button.
`on_ended_fn : `
Called if the callback is not set when the user releases the mouse button.
`name : `
The name of the object. It's used for debugging.
`manager : `
The Manager that controld this gesture.
"""
super().__init__(**kwargs)
self.__transform = transform
def on_changed(self):
"""
Called when the user moves the clicked button. Moves the sender in the direction the mouse was moved.
"""
translate = self.sender.gesture_payload.moved
# Move transform to the direction mouse moved
current = sc.Matrix44.get_translation_matrix(*translate)
self.__transform.transform *= current
manager = Manager()
class LineManipulator(sc.Manipulator):
"""
Class that holds a custom Manipulator. Inherits from omni.ui.scene.Manipulator class.
See more here: https://docs.omniverse.nvidia.com/kit/docs/omni.ui.scene/latest/omni.ui.scene/omni.ui.scene.Manipulator.html
"""
def __init__(self, desc: dict, **kwargs) -> None:
"""
### Arguments:
`desc : dict`
Description of the manipulator
`kwargs : dict`
See below
### Keyword Arguments:
`gestures : `
All the gestures assigned to this shape.
`model : `
The model of the class.
"""
super().__init__(**kwargs)
def on_build(self) -> None:
"""
Builds the Scene UI.
Consists of a beige line that stretches in the X-axis.
Called when Manipulator is dirty to build the content. It's another way to build the manipulator's content on the case the user doesn't want to reimplement the class.
"""
transform = sc.Transform()
with transform:
sc.Line(
[-50, 0, 0],
[50, 0, 0],
color=ui.color.beige,
thickness=10,
gestures=[
sc.ClickGesture(
lambda s: setcolor(s, ui.color.green), mouse_button=0, name="color_change", manager=manager
),
sc.DoubleClickGesture(
lambda s: setcolor(s, ui.color.beige), mouse_button=0, name="color_change", manager=manager
),
Move(transform, manager=manager),
],
)
with sc.Transform(transform=sc.Matrix44.get_translation_matrix(0, 20, 0)):
sc.Label(
"Click and Drag the Line to Move me\nClick or Double Click to Change color",
size=18,
alignment=ui.Alignment.CENTER,
color=ui.color.blue,
)
| 6,980 | Python | 39.824561 | 414 | 0.621633 |
NVIDIA-Omniverse/kit-extension-sample-gestures/exts/omni.example.gesture_viewport/omni/example/gesture_viewport/tests/test_hello_world.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
# NOTE:
# omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests
# For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html
import omni.kit.test
# Extnsion for writing UI tests (simulate UI interaction)
import omni.kit.ui_test as ui_test
# Import extension python module we are testing with absolute import path, as if we are external user (other extension)
import omni.example.gesture
# Having a test class dervived from omni.kit.test.AsyncTestCase declared on the root of module will make it auto-discoverable by omni.kit.test
class Test(omni.kit.test.AsyncTestCase):
# Before running each test
async def setUp(self):
pass
# After running each test
async def tearDown(self):
pass
# Actual test, notice it is "async" function, so "await" can be used if needed
async def test_hello_public_function(self):
result = omni.example.gesture.some_public_function(4)
self.assertEqual(result, 256)
async def test_window_button(self):
# Find a label in our window
label = ui_test.find("My Window//Frame/**/Label[*]")
# Find buttons in our window
add_button = ui_test.find("My Window//Frame/**/Button[*].text=='Add'")
reset_button = ui_test.find("My Window//Frame/**/Button[*].text=='Reset'")
# Click reset button
await reset_button.click()
self.assertEqual(label.widget.text, "empty")
await add_button.click()
self.assertEqual(label.widget.text, "count: 1")
await add_button.click()
self.assertEqual(label.widget.text, "count: 2")
| 2,103 | Python | 37.962962 | 142 | 0.708512 |
NVIDIA-Omniverse/kit-extension-sample-gestures/exts/omni.example.gesture_viewport/docs/CHANGELOG.md | # Changelog
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
## [1.0.0] - 2023-10-11
### Added
- Initial version of extension
| 163 | Markdown | 15.399998 | 80 | 0.668712 |
NVIDIA-Omniverse/kit-extension-sample-gestures/exts/omni.example.gesture_viewport/docs/README.md | # Gestures Viewport (omni.example.gesture_viewport)

## Overview
This Extension displays the a simple manipulator in the Viewport. This manipulator can be dragged and clicked on.
See [Adding the Extension](../../../README.md#adding-this-extension) on how to add the extension to your project.
## [Tutorial](../../../docs/tutorial.md)
This extension sample also includes a step-by-step tutorial to accelerate your growth as you learn to build your own Omniverse Kit extensions.
Learn how to create a simple scene manipulator and how to apply gestures to the manipulator.
[Get started with the tutorial here.](../../../docs/tutorial.md)
## Usage
Once the extension is enabled in the *Extension Manager*, you should see a similar line inside the viewport like in the to the image before [Overview section](#overview).
There is beige line that will start at the origin (0,0,0) in the Viewport. This line can be dragged left and right along the X-axis. When clicking on the line it changes the color to green and double clicking changes the color back to beige. | 1,115 | Markdown | 49.72727 | 241 | 0.75426 |
NVIDIA-Omniverse/kit-extension-sample-gestures/exts/omni.example.gesture_window/omni/example/gesture_window/extension.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import omni.ext
from .window import GestureWindowExample
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class OmniExampleGestureExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
print("[omni.example.gesture] omni example gesture startup")
self._window = GestureWindowExample("Gesture Example", width=500, height=500)
def on_shutdown(self):
print("[omni.example.gesture] omni example gesture shutdown")
if self._window:
self._window.destroy()
self._window = None
| 1,345 | Python | 45.413792 | 119 | 0.747955 |
NVIDIA-Omniverse/kit-extension-sample-gestures/exts/omni.example.gesture_window/omni/example/gesture_window/window.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import omni.ui as ui
from omni.ui import scene as sc
from omni.ui_scene._scene import AbstractGesture
proj = [0.5, 0, 0, 0, 0, 0.5, 0, 0, 0, 0, 2e-7, 0, 0, 0, 1, 1]
def setcolor(sender, color):
"""
Sets the color of the sender
Args:
`sender : omni.ui.scene.Manipulator`
The shape driving the gesture
`color : omni.ui.color`
The color that will be assigned to the shape
"""
sender.color = color
class Manager(sc.GestureManager):
"""
The object that controls batch processing and preventing of gestures.
See more here: https://docs.omniverse.nvidia.com/kit/docs/omni.ui.scene/latest/omni.ui.scene/omni.ui.scene.GestureManager.html
"""
def should_prevent(self, gesture: AbstractGesture, preventer: AbstractGesture) -> bool:
"""
Called per gesture. Determines if the gesture should be prevented with another gesture.
Useful to resolve intersections.
Args:
`gesture : AbstractGesture`
Gesture that is occurring
`preventer : AbstractGesture`
Gesture preventing `gesture`
Returns:
bool: Whether or not the gesture should be prevented.
If True gesture will be prevented otherwise the gesture will overtake the last gesture used.
"""
if gesture.name != "gesture_name" and preventer.state == sc.GestureState.BEGAN:
return True
manager = Manager()
class Move(sc.DragGesture):
"""
Inherits from `DragGesture`, the gesture that provides a way to capture click-and-drag mouse event.
See more here: https://docs.omniverse.nvidia.com/kit/docs/omni.ui.scene/latest/omni.ui.scene/omni.ui.scene.DragGesture.html
"""
def __init__(self, transform: sc.Transform, **kwargs):
"""
Construct the gesture to track mouse drags
Args:
`transform : sc.Transform` The transform parent of the shape.
`kwargs : dict`
See below
### Keyword Arguments:
`mouse_button : `
Mouse button that should be active to start the gesture.
`modifiers : `
The keyboard modifier that should be active ti start the gesture.
`check_mouse_moved : `
The check_mouse_moved property is a boolean flag that determines whether the DragGesture should verify if the 2D screen position of the mouse has changed before invoking the on_changed method. This property is essential in a 3D environment, as changes in the camera position can result in the mouse pointing to different locations in the 3D world even when the 2D screen position remains unchanged.
Usage
When check_mouse_moved is set to True, the DragGesture will only call the on_changed method if the actual 2D screen position of the mouse has changed. This can be useful when you want to ensure that the on_changed method is only triggered when there is a genuine change in the mouse's 2D screen position.
If check_mouse_moved is set to False, the DragGesture will not check for changes in the mouse's 2D screen position before calling the on_changed method. This can be useful when you want the on_changed method to be invoked even if the mouse's 2D screen position hasn't changed, such as when the camera position is altered, and the mouse now points to a different location in the 3D world.
`on_began_fn : `
Called if the callback is not set when the user clicks the mouse button.
`on_changed_fn : `
Called if the callback is not set when the user moves the clicked button.
`on_ended_fn : `
Called if the callback is not set when the user releases the mouse button.
`name : `
The name of the object. It's used for debugging.
`manager : `
The Manager that controld this gesture.
"""
super().__init__(**kwargs)
self.__transform = transform
def on_changed(self):
"""
Called when the user moves the clicked button. Moves the sender in the direction the mouse was moved.
"""
translate = self.sender.gesture_payload.moved
# Move transform to the direction mouse moved
current = sc.Matrix44.get_translation_matrix(*translate)
self.__transform.transform *= current
class GestureWindowExample(ui.Window):
"""
omni.ui.Window that hold two Rectangles
Both Rectangles can be hovered, clicked, and dragged
As each gesture is being used the label in the middle of the window will update with the current gesture being used.
See more here: https://docs.omniverse.nvidia.com/kit/docs/omni.ui/latest/omni.ui/omni.ui.Window.html
"""
def __init__(self, title: str, **kwargs) -> None:
"""
Construct the window, add it to the underlying windowing system, and makes it appear.
### Arguments:
`title :`
The window title. It's also used as an internal window ID.
`kwargs : dict`
See below
### Keyword Arguments:
`flags : `
This property set the Flags for the Window.
`visible : `
This property holds whether the window is visible.
`title : `
This property holds the window's title.
`padding_x : `
This property set the padding to the frame on the X axis.
`padding_y : `
This property set the padding to the frame on the Y axis.
`width : `
This property holds the window Width.
`height : `
This property holds the window Height.
`position_x : `
This property set/get the position of the window in the X Axis. The default is kWindowFloatInvalid because we send the window position to the underlying system only if the position is explicitly set by the user. Otherwise the underlying system decides the position.
`position_y : `
This property set/get the position of the window in the Y Axis. The default is kWindowFloatInvalid because we send the window position to the underlying system only if the position is explicitly set by the user. Otherwise the underlying system decides the position.
`auto_resize : `
setup the window to resize automatically based on its content
`noTabBar : `
setup the visibility of the TabBar Handle, this is the small triangle at the corner of the view If it is not shown then it is not possible to undock that window and it need to be closed/moved programatically
`raster_policy : `
Determine how the content of the window should be rastered.
`width_changed_fn : `
This property holds the window Width.
`height_changed_fn : `
This property holds the window Height.
`visibility_changed_fn : `
This property holds whether the window is visible.
"""
super().__init__(title, **kwargs)
self.label = None
self.frame.set_build_fn(self._build_fn)
def _build_fn(self):
"""
The callback that will be called once the frame is visible and the content of the callback will override the frame child. It's useful for lazy load.
"""
with self.frame:
with ui.VStack():
self.label = ui.Label("Sender: None\nAction: None", alignment=ui.Alignment.CENTER, size=16)
scene_view = sc.SceneView(
sc.CameraModel(proj, 1), aspect_ratio_policy=sc.AspectRatioPolicy.PRESERVE_ASPECT_FIT
)
with scene_view.scene:
transform = sc.Transform()
with transform:
sc.Rectangle(
2,
2,
color=ui.color.beige,
thickness=5,
gestures=[
sc.ClickGesture(
lambda s: setcolor(s, ui.color.blue), manager=manager, name="gesture_name"
),
sc.DoubleClickGesture(
lambda s: setcolor(s, ui.color.beige), manager=manager, name="gesture_name"
),
Move(transform, manager=manager, name="gesture_name"),
sc.HoverGesture(
on_began_fn=lambda s: setcolor(s, ui.color.black),
on_changed_fn=lambda s: self.print_action(s, "Hover Changed"),
on_ended_fn=lambda s: self.print_action(s, "Hover End"),
),
],
)
transform = sc.Transform(transform=sc.Matrix44.get_translation_matrix(0, 0, -1))
with transform:
sc.Rectangle(
2,
2,
color=ui.color.olive,
thickness=5,
gestures=[
sc.ClickGesture(lambda s: setcolor(s, ui.color.red)),
sc.DoubleClickGesture(lambda s: setcolor(s, ui.color.olive)),
Move(transform),
sc.HoverGesture(
on_began_fn=lambda s: setcolor(s, ui.color.black),
on_changed_fn=lambda s: self.print_action(s, "Hover Changed"),
on_ended_fn=lambda s: self.print_action(s, "Hover End"),
),
],
)
def print_action(self, sender, action):
"""
Prints the action / gesture to the label in the middle of the window
Args:
sender : Where the gesture is coming from
action : The type of gesture being used
"""
self.label.text = f"Sender: {sender}\nAction: {action}"
| 10,881 | Python | 43.05668 | 414 | 0.577796 |
NVIDIA-Omniverse/kit-extension-sample-gestures/exts/omni.example.gesture_window/docs/README.md | # Gestures Window (omni.example.gesture_window)

## Overview
This Extension displays the a Window with two Rectangles that can be interacted with by hovering, clicking, and dragging.
See [Adding the Extension](../../../README.md#adding-this-extension) on how to add the extension to your project.
## [Tutorial](../../docs/tutorial.md)
This extension sample also includes a step-by-step tutorial to accelerate your growth as you learn to build your own Omniverse Kit extensions.
Learn how to create a scene view within an omni.ui.Window element and how to apply gestures to items in the scene view.
[Get started with the tutorial here.](../../docs/tutorial.md)
## Usage
Once the extension is enabled in the *Extension Manager*, you should see a similar window in the to the image before [Overview section](#overview).
There is text refering to which object is recieving the gestures as well as the action. The actions that display are `Hover Changed` and `Hover Ended` where, `Hover Changed` refers to the change in location of the mouse on the object and `Hover Ended` refers to when the mouse has exitted the objects bounds. When the mouse first enters the object or `Hover Began`, the color of the Rectangles will turn black.
Both Rectangles can be dragged in the scene by clicking and dragging them. This utilitzing the `omni.ui.scene.DragGesture` functionality.
You can click on each Rectangle to change the color (red and blue) or double click to change to their original colors (beige and olive).
| 1,562 | Markdown | 54.821427 | 410 | 0.765685 |
NVIDIA-Omniverse/kit-extension-sample-gestures/docs/tutorial.md | # How to make an extension with UI Gestures in a UI Window and the Viewport
The Gestures extensions show the basics of how gestures work in Omniverse and how users can start creating their own scene manipulators. This guide is great for extension builders who want to start creating their own scene manipulator tools in Omniverse.
> NOTE: Visual Studio Code is the preferred IDE, hence forth we will be referring to it throughout this guide.
> NOTE: Omniverse Code is the preferred platform, hence forth we will be referring to it throughout this guide.
# Learning Objectives
In this tutorial you learn how to:
- How to create/register a scene view
- Use the omni.ui.scene API
- How to use Gestures
- What Gesture States are
- Prioritizing Gestures
- Creating a Gesture Manager
# Prerequisites
We recommend that you complete these tutorials before moving forward:
- [Extension Environment Tutorial](https://github.com/NVIDIA-Omniverse/ExtensionEnvironmentTutorial)
- [How to make an extension by spawning prims](https://github.com/NVIDIA-Omniverse/kit-extension-sample-spawn-prims)
# Step 1: Create an Extension
> **Note:** This is a review, if you know how to create an extension, feel free to skip this step.
For this guide, we will briefly go over how to create an extension. If you have not completed [How to make an extension by spawning prims](https://github.com/NVIDIA-Omniverse/kit-extension-sample-spawn-prims/blob/main/exts/omni.example.spawn_prims/tutorial/tutorial.md) we recommend you pause here and complete that before moving forward.
## Step 1.1: Create the extension template
In Omniverse Code navigate to the `Extensions` tab and create a new extension by clicking the ➕ icon in the upper left corner and select `New Extension Template Project`.
Name the project to `kit-ext-gestures` and the extension name to `my.gestures.window`.

> **Note:** If you don't see the *Extensions* Window, enable **Window > Extensions**:
>
> 
<icon> | <new template>
:-------------------------:|:-------------------------:
 | 
A new extension template window and Visual Studio Code will open after you have selected the folder location, folder name, and extension ID.
## Step 1.2: Naming your extension
Before beginning to code, navigate into `VS Code` and change how the extension is viewed in the **Extension Manager**. It's important to give your extension a title and description for the end user to understand the extension's purpose.
Inside of the `config` folder, locate the `extension.toml` file.
> **Note:** `extension.toml` is located inside of the `exts` folder you created for your extension.

Inside of this file, there is a title and description for how the extension will look in the **Extension Manager**. Change the title and description for the extension.
``` python
title = "Gesture Window"
description="Example on how Gestures work in omni.ui"
```
# Step 2: Create `omni.ui` Window
## Step 2.1: Create `window.py`
In VS Code, **create** a file called `window.py` in `exts/my.gestures.window/my/gestures/window/`..

This file will hold our logic for creating an `omni.ui` Window
## Step 2.2: Set Up UI Window Class
With `window.py` created, **add** the following code block inside:
``` python
import omni.ui as ui
# Inherits from omni.ui.Window
class GestureWindowExample(ui.Window):
def __init__(self, title: str, **kwargs) -> None:
super().__init__(title, **kwargs)
# Sets the build function so when the frame refreshes / rebuilds it will run the corresponding function
self.frame.set_build_fn(self._build_fn)
# Build function for our Window
def _build_fn(self):
with self.frame:
with ui.VStack():
ui.Label("Hello World")
```
**Save** `window.py`.
This only sets up our Window class. To have the window created we will head into `extension.py`.
## Step 2.3: Create the UI Window
Open `extension.py` and **replace** all of the code inside with the following:
```python
import omni.ext
from .window import GestureWindowExample
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class OmniExampleGestureExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
print("[omni.example.gesture] omni example gesture startup")
self._window = GestureWindowExample("Gesture Example", width=500, height=500)
def on_shutdown(self):
print("[omni.example.gesture] omni example gesture shutdown")
if self._window:
self._window.destroy()
self._window = None
```
**Save** `extension.py` and **go back** to Omniverse and your window should look like the following:

So far we have created a UI Window that can be docked or dragged within Omniverse.
# Step 3: Create a SceneView
[SceneUI](https://docs.omniverse.nvidia.com/kit/docs/omni.ui.scene/latest/Scene.html) helps build 3D manipulators and 3D helpers. It provides shapes and controls for declaring the UI in 3D Space.
## Step 3.1: Import scene tools
**Go back** to VS Code and **open** `window.py`.
In order to use the Scene API **add** the following line under `import omni.ui as ui`:
```python
from omni.ui import scene as sc
```
After adding your code will read:
```python
import omni.ui as ui
from omni.ui import scene as sc
```
## Step 3.2: Create SceneView and Project in the Window
1. **Create** the projection matrix, this will indicate the position of the camera for our scene in the UI. **Add** the following code under `from omni.ui import scene as sc` but before `class GestureWindowExample(ui.Window)`:
```python
proj = [0.5,0,0,0,0,0.5,0,0,0,0,2e-7,0,0,0,1,1]
```
2. **Remove** `ui.Label("Hello World")` and **replace it** with the following:
```python
scene_view = sc.SceneView(sc.CameraModel(proj, 1), aspect_ratio_policy=sc.AspectRatioPolicy.PRESERVE_ASPECT_FIT)
```
Saving `window.py` now will result in an empty UI window. With the scene established other UI elements can now be populated inside.
## Step 3.3: Add a Rectangle
To add a rectangle to the Scene add the following under where `scene_view` was declared:
```python
with scene_view.scene:
transform = sc.Transform()
with transform:
sc.Rectangle(
2, # width
2, # height
color = ui.color.beige,
thickness=5
)
```
After editing `window.py` should look like the following:
```python
import omni.ui as ui
from omni.ui import scene as sc
proj = [0.5,0,0,0,0,0.5,0,0,0,0,2e-7,0,0,0,1,1]
class GestureWindowExample(ui.Window):
def __init__(self, title: str, **kwargs) -> None:
super().__init__(title, **kwargs)
self.frame.set_build_fn(self._build_fn)
def _build_fn(self):
with self.frame:
with ui.VStack():
scene_view = sc.SceneView(sc.CameraModel(proj, 1), aspect_ratio_policy=sc.AspectRatioPolicy.PRESERVE_ASPECT_FIT)
with scene_view.scene:
transform = sc.Transform()
with transform:
sc.Rectangle(
2,
2,
color=ui.color.beige,
thickness=5
)
```
**Save** `window.py` and go back to Omniverse. You will see that our window now has a beige square.

# Step 4: Add a Gesture
Currently, the square does nothing. No actions will occur when hovering, dragging, or clicking on it. To add actions we will be adding [Gestures](https://docs.omniverse.nvidia.com/kit/docs/omni.ui.scene/latest/Gestures.html).
## Step 4.1 Create `setcolor()`
In `window.py`, under `proj=[...]` **add** the following lines after:
```python
def setcolor(sender, color):
sender.color = color
```
This function will be used to change the color of the shape.
## Step 4.2: Add Gesture Callback
**Add** a comma after `thickness=5` then **add** the following line after:
```python
gesture=sc.ClickGesture(lambda s: setcolor(s, ui.color.blue))
```
After editing `window.py` should look like the following:
```python
import omni.ui as ui
from omni.ui import scene as sc
proj = [0.5,0,0,0,0,0.5,0,0,0,0,2e-7,0,0,0,1,1]
def setcolor(sender, color):
sender.color = color
class GestureWindowExample(ui.Window):
def __init__(self, title: str, **kwargs) -> None:
super().__init__(title, **kwargs)
self.frame.set_build_fn(self._build_fn)
def _build_fn(self):
with self.frame:
with ui.VStack():
scene_view = sc.SceneView(sc.CameraModel(proj, 1), aspect_ratio_policy=sc.AspectRatioPolicy.PRESERVE_ASPECT_FIT)
with scene_view.scene:
transform = sc.Transform()
with transform:
sc.Rectangle(
2,
2,
color=ui.color.beige,
thickness=5,
gesture=sc.ClickGesture(lambda s: setcolor(s, ui.color.blue))
)
```
**Save** `window.py` and go back to Omniverse. When you click on the Rectangle now it should change from beige to blue.

## Step 4.3: Gesture States
Some Gestures have different states. To show how the different states work **change** the gesture for the Rectangle to the following:
```python
sc.Rectangle(
2,
2,
color=ui.color.beige,
thickness=5,
gesture=
sc.DragGesture(
on_began_fn=lambda s: setcolor(s, ui.color.indigo),
on_changed_fn=lambda s: setcolor(s, ui.color.lightblue),
on_ended_fn=lambda s: setcolor(s, ui.color.beige)
)
)
```
After editing `window.py` should look like the following:
```python
import omni.ui as ui
from omni.ui import scene as sc
proj = [0.5,0,0,0,0,0.5,0,0,0,0,2e-7,0,0,0,1,1]
def setcolor(sender, color):
sender.color = color
class GestureWindowExample(ui.Window):
def __init__(self, title: str, **kwargs) -> None:
super().__init__(title, **kwargs)
self.frame.set_build_fn(self._build_fn)
def _build_fn(self):
with self.frame:
with ui.VStack():
scene_view = sc.SceneView(sc.CameraModel(proj, 1), aspect_ratio_policy=sc.AspectRatioPolicy.PRESERVE_ASPECT_FIT)
with scene_view.scene:
transform = sc.Transform()
with transform:
sc.Rectangle(
2,
2,
color=ui.color.beige,
thickness=5,
gesture=
sc.DragGesture(
on_began_fn=lambda s: setcolor(s, ui.color.indigo),
on_changed_fn=lambda s: setcolor(s, ui.color.lightblue),
on_ended_fn=lambda s: setcolor(s, ui.color.beige)
)
)
```
**Save** `window.py` and go back to Omniverse. Click on the Rectangle and Click and Drag. Notice how it changes colors based on what was set for each function.
- `on_began_fn` occurs when first clicking and no mouse movement occurs.
- `on_changed_fn` occurs when the gesture starts and the mouse is dragged.
- `on_ended_fn` occurs when the mouse is released.

## Step 4.4: Moving by Dragging
Next we will look into how the Rectangle can move using DragGesture.
1. Back in `window.py` **add** the following function before `setcolor()`:
```python
def move(transform, shape):
translate = shape.gesture_payload.moved
# Move transform to the direction mouse moved
current = sc.Matrix44.get_translation_matrix(*translate)
transform.transform *= current
```
2. **Add** the following import at the top of the `window.py`:
- `from functools import partial`
- Partial functions allow for a more limited function with fewer parameters and fixed values.
3. **Update** DragGesture's `on_changed_fn` to look like the following:
```python
sc.DragGesture(
on_began_fn=lambda s: setcolor(s, ui.color.indigo),
on_changed_fn=partial(move, transform),
on_ended_fn=lambda s: setcolor(s, ui.color.beige)
)
```
After editing `window.py` should look like the following:
```python
import omni.ui as ui
from omni.ui import scene as sc
from functools import partial
proj = [0.5,0,0,0,0,0.5,0,0,0,0,2e-7,0,0,0,1,1]
def move(transform, shape):
translate = shape.gesture_payload.moved
# Move transform to the direction mouse moved
current = sc.Matrix44.get_translation_matrix(*translate)
transform.transform *= current
def setcolor(sender, color):
sender.color = color
class GestureWindowExample(ui.Window):
def __init__(self, title: str, **kwargs) -> None:
super().__init__(title, **kwargs)
self.frame.set_build_fn(self._build_fn)
def _build_fn(self):
with self.frame:
with ui.VStack():
scene_view = sc.SceneView(sc.CameraModel(proj, 1), aspect_ratio_policy=sc.AspectRatioPolicy.PRESERVE_ASPECT_FIT)
with scene_view.scene:
transform = sc.Transform()
with transform:
sc.Rectangle(
2,
2,
color=ui.color.beige,
thickness=5
gesture=
sc.DragGesture(
on_began_fn=lambda s: setcolor(s, ui.color.indigo),
on_changed_fn=partial(move, transform),
on_ended_fn=lambda s: setcolor(s, ui.color.beige)
)
)
```
**Save** `window.py` and go back to Omniverse. Notice that now when the Rectangle is dragged it will now move based on the mouse's position.

## Step 4.5: Extending `sc.DragGesture`
We can extend any gesture by reimplementing its class.
1. In `window.py` after `proj`, **create** a new class called `Move`:
- `class Move(sc.DragGesture):`
2. Inside of `Move` **add** the following code:
- ```python
def __init__(self, transform: sc.Transform, **kwargs):
super().__init__(**kwargs)
self.__transform = transform
```
3. After `__init__()` **add** the following code:
- ```python
def on_changed(self):
translate = self.sender.gesture_payload.moved
# Move transform to the direction mouse moved
current = sc.Matrix44.get_translation_matrix(*translate)
self.__transform.transform *= current
```
> **Note:** The above code is similar to the `move()` function. For future code references this will be **removed**.
4. **Change** the value passed to `gesture` when creating the Rectangle to `Move(transform)`
- ```python
sc.Rectangle(
2,
2,
color=ui.color.beige,
thickness=5,
gesture=
Move(transform)
)
```
5. **Save** `window.py`.
After editing `window.py` should look like the following:
```python
import omni.ui as ui
from omni.ui import scene as sc
from functools import partial
proj = [0.5,0,0,0,0,0.5,0,0,0,0,2e-7,0,0,0,1,1]
class Move(sc.DragGesture):
def __init__(self, transform: sc.Transform, **kwargs):
super().__init__(**kwargs)
self.__transform = transform
def on_changed(self):
translate = self.sender.gesture_payload.moved
# Move transform to the direction mouse moved
current = sc.Matrix44.get_translation_matrix(*translate)
self.__transform.transform *= current
def setcolor(sender, color):
sender.color = color
class GestureWindowExample(ui.Window):
def __init__(self, title: str, **kwargs) -> None:
super().__init__(title, **kwargs)
self.frame.set_build_fn(self._build_fn)
def _build_fn(self):
with self.frame:
with ui.VStack():
scene_view = sc.SceneView(sc.CameraModel(proj, 1), aspect_ratio_policy=sc.AspectRatioPolicy.PRESERVE_ASPECT_FIT)
with scene_view.scene:
transform = sc.Transform()
with transform:
sc.Rectangle(
2,
2,
color=ui.color.beige,
thickness=5,
gesture=
Move(transform)
)
```
There are multiple ways that a Gesture Callback can be added.
1. Adding the Gesture Callback with a single function
2. Adding a function for each state in the Gesture Callback
3. Reimplementing Gesture
# Step 5: Multiple Gestures and Multiple Shapes
Shapes are not limited to having one Gesture, they can have more than one. There is also no limit to how many shapes as well.
## Step 5.1: Add Multiple Gestures
In `window.py`, **update** the parameters inside Rectangle to the following:
```python
sc.Rectangle(
2,
2,
color=ui.color.beige,
thickness=5,
gestures=[
Move(transform),
sc.ClickGesture(lambda s: setcolor(s, ui.color.red)),
sc.DoubleClickGesture(lambda s: setcolor(s, ui.color.beige))
]
)
```
> **Note:** that instead of using `gesture`, use `gestures` and pass in a list of gestures.
After editing `window.py` should look like the following:
```python
import omni.ui as ui
from omni.ui import scene as sc
from functools import partial
proj = [0.5,0,0,0,0,0.5,0,0,0,0,2e-7,0,0,0,1,1]
class Move(sc.DragGesture):
def __init__(self, transform: sc.Transform, **kwargs):
super().__init__(**kwargs)
self.__transform = transform
def on_changed(self):
translate = self.sender.gesture_payload.moved
# Move transform to the direction mouse moved
current = sc.Matrix44.get_translation_matrix(*translate)
self.__transform.transform *= current
def setcolor(sender, color):
sender.color = color
class GestureWindowExample(ui.Window):
def __init__(self, title: str, **kwargs) -> None:
super().__init__(title, **kwargs)
self.frame.set_build_fn(self._build_fn)
def _build_fn(self):
with self.frame:
with ui.VStack():
scene_view = sc.SceneView(sc.CameraModel(proj, 1), aspect_ratio_policy=sc.AspectRatioPolicy.PRESERVE_ASPECT_FIT)
with scene_view.scene:
transform = sc.Transform()
with transform:
sc.Rectangle(
2,
2,
color=ui.color.beige,
thickness=5,
gestures=[
Move(transform),
sc.ClickGesture(lambda s: setcolor(s, ui.color.red)),
sc.DoubleClickGesture(lambda s: setcolor(s, ui.color.beige))
]
)
```
**Save** `window.py` and go back to Omniverse. We are now able to Click, Double Click, and Drag the rectangle in the window.
The default behavior is that only one gesture will occur at a time.

## Step 5.2: Handling Multiple Shapes
Knowing that only one gesture can occur at a time, we will see what happens when adding a second shape on top.
In `window.py`, **add** the following code below where we created the first Rectangle:
```python
transform = sc.Transform(transform=sc.Matrix44.get_translation_matrix(0,0,-1))
with transform:
sc.Rectangle(
2,
2,
color=ui.color.olive,
thickness=5,
gestures=[
Move(transform),
sc.ClickGesture(lambda s: setcolor(s, ui.color.blue)),
sc.DoubleClickGesture(lambda s: setcolor(s, ui.color.olive))
]
)
```
After editing `window.py` should look like the following:
```python
import omni.ui as ui
from omni.ui import scene as sc
from functools import partial
proj = [0.5,0,0,0,0,0.5,0,0,0,0,2e-7,0,0,0,1,1]
class Move(sc.DragGesture):
def __init__(self, transform: sc.Transform, **kwargs):
super().__init__(**kwargs)
self.__transform = transform
def on_changed(self):
translate = self.sender.gesture_payload.moved
# Move transform to the direction mouse moved
current = sc.Matrix44.get_translation_matrix(*translate)
self.__transform.transform *= current
def setcolor(sender, color):
sender.color = color
class GestureWindowExample(ui.Window):
def __init__(self, title: str, **kwargs) -> None:
super().__init__(title, **kwargs)
self.frame.set_build_fn(self._build_fn)
def _build_fn(self):
with self.frame:
with ui.VStack():
scene_view = sc.SceneView(sc.CameraModel(proj, 1), aspect_ratio_policy=sc.AspectRatioPolicy.PRESERVE_ASPECT_FIT)
with scene_view.scene:
transform = sc.Transform()
with transform:
sc.Rectangle(
2,
2,
color=ui.color.beige,
thickness=5,
gestures=[
Move(transform),
sc.ClickGesture(lambda s: setcolor(s, ui.color.red)),
sc.DoubleClickGesture(lambda s: setcolor(s, ui.color.beige))
]
)
transform = sc.Transform(transform=sc.Matrix44.get_translation_matrix(0,0,-1))
with transform:
sc.Rectangle(
2,
2,
color=ui.color.olive,
thickness=5,
gestures=[
Move(transform),
sc.ClickGesture(lambda s: setcolor(s, ui.color.blue)),
sc.DoubleClickGesture(lambda s: setcolor(s, ui.color.olive))
]
)
```
The above code creates an olive Rectangle that is infront of the beige rectangle.
**Save** `window.py` and go back to Omniverse. Try clicking, double clicking, or dragging the rectangles.

This demonstrates that the Gesture closer to the camera will fire. If all the shapes are under the same transform it will move all of them. However, since each rectangle is under their own transform it will move them seperately.
# Step 6: Gesture Manager
Gestures track incoming input events separately, but it’s normally necessary to let only one gesture be executed because it prevents user input from triggering more than one action at a time.
GestureManager controls the priority of gestures if they are processed at the same time. It prioritizes the desired gestures and prevents unintended gestures from being executed.
## Step 6.1: Create the Gesture Manager
To demonstrate the Gesture Manager we will give the beige priority through a Gesture Manager.
**Add** the following code block after `setcolor()`:
```python
class Manager(sc.GestureManager):
def should_prevent(self, gesture: sc.AbstractGesture, preventer: sc.AbstractGesture) -> bool:
if gesture.name != 'gesture_name' and preventer.state == sc.GestureState.BEGAN:
return True
manager = Manager()
```
A Gesture Manager will inherit from `sc.GestureManager`. The Gesture Manager contains the function `should_prevent()` which can state whether or not the gesture should not run. If it returns `True` it will be prevented and if it returns `False` it will override the last gesture triggered.
## Step 6.2: Add the Gesture Manager to the Gestures
With a Gesture Manager defined we will need to pass it to the Gestures. To also avoid the olive rectangle's gestures from always being prevented a name will be assigned to the gestures as well.
**Update** the Gestures for the beige rectangle by passing in the following parameters:
```python
Move(transform, manager=manager, name="gesture_name"),
sc.ClickGesture(lambda s: setcolor(s, ui.color.red), manager=manager, name="gesture_name"),
sc.DoubleClickGesture(lambda s: setcolor(s, ui.color.beige), manager=manager, name="gesture_name")
```
After editing `window.py` should look like the following:
```python
import omni.ui as ui
from omni.ui import scene as sc
from functools import partial
proj = [0.5,0,0,0,0,0.5,0,0,0,0,2e-7,0,0,0,1,1]
class Move(sc.DragGesture):
def __init__(self, transform: sc.Transform, **kwargs):
super().__init__(**kwargs)
self.__transform = transform
def on_changed(self):
translate = self.sender.gesture_payload.moved
# Move transform to the direction mouse moved
current = sc.Matrix44.get_translation_matrix(*translate)
self.__transform.transform *= current
def setcolor(sender, color):
sender.color = color
class Manager(sc.GestureManager):
def should_prevent(self, gesture: sc.AbstractGesture, preventer: sc.AbstractGesture) -> bool:
if gesture.name != 'gesture_name' and preventer.state == sc.GestureState.BEGAN:
return True
manager = Manager()
class GestureWindowExample(ui.Window):
def __init__(self, title: str, **kwargs) -> None:
super().__init__(title, **kwargs)
self.frame.set_build_fn(self._build_fn)
def _build_fn(self):
with self.frame:
with ui.VStack():
scene_view = sc.SceneView(sc.CameraModel(proj, 1), aspect_ratio_policy=sc.AspectRatioPolicy.PRESERVE_ASPECT_FIT)
with scene_view.scene:
transform = sc.Transform()
with transform:
sc.Rectangle(
2,
2,
color=ui.color.beige,
thickness=5,
gestures=[
Move(transform, manager=manager, name="gesture_name"),
sc.ClickGesture(lambda s: setcolor(s, ui.color.red), manager=manager, name="gesture_name"),
sc.DoubleClickGesture(lambda s: setcolor(s, ui.color.beige), manager=manager, name="gesture_name")
]
)
transform = sc.Transform(transform=sc.Matrix44.get_translation_matrix(0,0,-1))
with transform:
sc.Rectangle(
2,
2,
color=ui.color.olive,
thickness=5,
gestures=[
Move(transform),
sc.ClickGesture(lambda s: setcolor(s, ui.color.blue)),
sc.DoubleClickGesture(lambda s: setcolor(s, ui.color.olive))
]
)
```
**Save** `window.py` and go back to Omniverse. Now when you click and drag the beige rectangle will take priority over the olive rectangle. Even though the beige rectangle is behind the olive rectangle with the Gesture Manager we prevent the olive rectangle's gestures and prioritize the beige rectangle.

# Step 7: Simple Scene Manipulator
Knowing how gestures work we can apply it to create a simple scene manipulator.
## Step 7.1: Create a new Extension
1. **Follow** the same steps from [Step 1](#step-1-create-an-extension)
2. Similar to [Step 2](#step-2-create-omniui-window), **create** a new file called `line.py`.

## Step 7.2: Create the Line Manipulator
In `line.py` **add** the following code and **save** the file:
```python
# line.py
import omni.ui as ui
from omni.ui import scene as sc
class LineManipulator(sc.Manipulator):
def __init__(self, desc: dict, **kwargs) -> None:
super().__init__(**kwargs)
def on_build(self) -> None:
transform = sc.Transform()
with transform:
sc.Line(
[-50, 0, 0],
[50, 0, 0],
color = ui.color.beige,
thickness=10
)
```
**Open** `extension.py` and **replace** the code with the following:
```python
# extension.py
import omni.ext
from omni.kit.viewport.registry import RegisterScene
from .line import LineManipulator
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class OmniExampleGestureExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
print("[omni.example.gesture] omni example gesture startup")
self._line = RegisterScene(LineManipulator, "Line Gesture")
def on_shutdown(self):
print("[omni.example.gesture] omni example gesture shutdown")
self._line = None
```
**Save** `extension.py` and go back to Omniverse. You should see a beige line in the scene at the origin(0,0,0).

Notice that to add the Scene Manipulator is similar to how we create Rectangles in a Window frame. The difference is we registered it to the Viewport using `RegisterScene`.
# Step 7.3: Add a Gesture to the Line
1. Similiar to `window.py` **add** the following code to `line.py`:
```python
class Move(sc.DragGesture):
def __init__(self, transform: sc.Transform, **kwargs):
super().__init__(**kwargs)
self.__transform = transform
def on_changed(self):
translate = self.sender.gesture_payload.moved
# Move transform to the direction mouse moved
current = sc.Matrix44.get_translation_matrix(*translate)
self.__transform.transform *= current
```
2. **Pass** the parameter `gesture=Move(transform)` into `sc.Line()`.
After editing `window.py` should look like the following:
```python
# line.py
import omni.ui as ui
from omni.ui import scene as sc
class Move(sc.DragGesture):
def __init__(self, transform: sc.Transform, **kwargs):
super().__init__(**kwargs)
self.__transform = transform
def on_changed(self):
translate = self.sender.gesture_payload.moved
# Move transform to the direction mouse moved
current = sc.Matrix44.get_translation_matrix(*translate)
self.__transform.transform *= current
class LineManipulator(sc.Manipulator):
def __init__(self, desc: dict, **kwargs) -> None:
super().__init__(**kwargs)
def on_build(self) -> None:
transform = sc.Transform()
with transform:
sc.Line(
[-50, 0, 0],
[50, 0, 0],
color = ui.color.beige,
thickness=10,
gesture=Move(transform)
)
```
**Save** `line.py` and go back to Omniverse. When trying to drag the Line the Selection Gesture occurs instead. We can prevent this gesture using a Gesture Manager.

## Step 7.4: Add a Gesture Manager
The Gesture Manager works the same way when added in `line.py`. However, this time we will specify which gesture to prevent.
1. **Add** the following code after the imports:
```python
class Manager(sc.GestureManager):
def should_prevent(self, gesture: sc.AbstractGesture, preventer: sc.AbstractGesture) -> bool:
if gesture.name == "SelectionDrag" and preventer.state == sc.GestureState.BEGAN:
return True
manager=Manager()
```
Since we want to prevent the `SelectionDrag` gesture we check to see if the gesture name is "SelectionDrag".
2. Inside `Line()`, **locate** `Move()` and **add** the argument `manager=manager` into `Move()`.
After editing `window.py` should look like the following:
```python
# line.py
import omni.ui as ui
from omni.ui import scene as sc
class Manager(sc.GestureManager):
def should_prevent(self, gesture: sc.AbstractGesture, preventer: sc.AbstractGesture) -> bool:
if gesture.name == "SelectionDrag" and preventer.state == sc.GestureState.BEGAN:
return True
manager=Manager()
class Move(sc.DragGesture):
def __init__(self, transform: sc.Transform, **kwargs):
super().__init__(**kwargs)
self.__transform = transform
def on_changed(self):
translate = self.sender.gesture_payload.moved
# Move transform to the direction mouse moved
current = sc.Matrix44.get_translation_matrix(*translate)
self.__transform.transform *= current
class LineManipulator(sc.Manipulator):
def __init__(self, desc: dict, **kwargs) -> None:
super().__init__(**kwargs)
def on_build(self) -> None:
transform = sc.Transform()
with transform:
sc.Line(
[-50, 0, 0],
[50, 0, 0],
color = ui.color.beige,
thickness=10,
gesture=Move(transform, manager=manager)
)
```
**Save** `line.py` and go back to Omniverse. Now we can freely drag the line along the X-Axis in the viewport.

Scene manipulators are essential tools in NVIDIA Omniverse for controlling, editing, and arranging 3D scenes. Explore [advanced scene manipulator](https://github.com/NVIDIA-Omniverse/kit-extension-sample-ui-scene) options to deepen your understanding of creating custom scene manipulation tools. | 34,817 | Markdown | 34.857878 | 338 | 0.620473 |
ArtecGroup/omni-artec-asset-browser/README.md | # Artec Cloud asset browser
Provides access to Artec Cloud models for Omniverse
 | 144 | Markdown | 27.999994 | 62 | 0.784722 |
ArtecGroup/omni-artec-asset-browser/exts/artec.services.browser.asset/config/extension.toml | [package]
authors = ["NVIDIA"]
category = "services"
changelog = "docs/CHANGELOG.md"
description = "Artec asset browser service"
icon = "data/icon.png"
keywords = ["kit", "service", "browsers", "assets"]
preview_image = "data/preview.png"
readme = "docs/README.md"
repository = ""
title = "Omniverse Artec Asset Service"
toggleable = false
version = "1.0.5"
[dependencies]
"omni.services.core" = {}
"omni.services.facilities.base" = {}
"omni.client" = {}
[[python.module]]
name = "artec.services.browser.asset"
[settings]
exts."artec.services.browser.asset".api_version = "v2"
[[test]]
dependencies = ["omni.services.client", "omni.client"]
| 645 | TOML | 22.071428 | 54 | 0.697674 |
ArtecGroup/omni-artec-asset-browser/exts/artec.services.browser.asset/config/extension.gen.toml | [package]
[package.publish]
date = 1661864119
kitVersion = "104.0+release.91154.beb7c507.tc"
buildNumber = "101.1.0+master.848.3bbd8284.tc"
repoName = "kit-browsers"
| 166 | TOML | 22.85714 | 46 | 0.753012 |
ArtecGroup/omni-artec-asset-browser/exts/artec.services.browser.asset/artec/services/browser/asset/extension.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
# Forked from AssetServiceExtension from omni.services.browser.asset
import importlib
from typing import Optional
import carb
import carb.settings
import carb.tokens
import omni.ext
from omni.services.core import main
from .store.base import AssetStoreGroupFacility, BaseAssetStore
from .services.asset import router
from pathlib import Path
CURRENT_PATH = Path(__file__).parent
ASSETS_DATA_PATH = CURRENT_PATH.parent.parent.parent.parent.joinpath("data").joinpath("assets")
_extension_instance = None
class AssetServiceExtension(omni.ext.IExt):
""" Asset service extension.
"""
def on_startup(self, ext_id):
settings = carb.settings.get_settings()
ext_name = ext_id.split("-")[0]
api_version = settings.get(f"exts/{ext_name}/api_version")
self._base_url = f"/{api_version}/artec-assets"
self._asset_store_group = AssetStoreGroupFacility()
router.register_facility("asset_store", self._asset_store_group)
main.register_router(router, prefix=self._base_url, tags=["assets"])
global _extension_instance
_extension_instance = self
def on_shutdown(self):
global _extension_instance
_extension_instance = None
main.deregister_router(router, prefix=self._base_url)
def resolve_cls(self, import_path):
cls_name = import_path.split(".")[-1]
import_path = import_path.replace(f".{cls_name}", "")
module = importlib.import_module(import_path)
return getattr(module, cls_name)
def register_store(self, asset_provider: BaseAssetStore) -> None:
self._asset_store_group.register_store(asset_provider.id(), asset_provider)
def unregister_store(self, asset_store: BaseAssetStore) -> None:
self._asset_store_group.unregister_store(asset_store)
def get_store(self, store_name: str) -> BaseAssetStore:
if store_name:
return self._asset_store_group.get_store(store_name)
return None
def get_instance() -> Optional[AssetServiceExtension]:
return _extension_instance
| 2,504 | Python | 31.115384 | 95 | 0.709665 |
ArtecGroup/omni-artec-asset-browser/exts/artec.services.browser.asset/artec/services/browser/asset/models.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
# Forked from ProviderModel, ConfigModel, AssetModel, _Page, _Filter, SearchCriteria from omni.services.browser.asset
from typing import List, Dict, Optional, Tuple
import pydantic
class ProviderModel(pydantic.BaseModel):
name: str = pydantic.Field(..., title="Provider name", description="Name of the provider")
icon: str = pydantic.Field("", title="Provider icon", description="Icon of the provider")
enable_common_categories: bool = pydantic.Field(
True, title="Use common categories", description="Use common categories for this provider"
)
private: bool = pydantic.Field(
False,
title="If private provider",
description="Search in private provider's category will not search other providers",
)
configurable: bool = pydantic.Field(
False, title="If provider could be configed", description="True to call /config to config the provider"
)
refresh_setting: str = pydantic.Field(
"", title="Provider refresh setting path", description="Setting path to notify refresh provider"
)
enable_setting: str = pydantic.Field(
"", title="Provider enable setting path", description="Setting path to notify provider enable"
)
class ConfigModel(pydantic.BaseModel):
vendor: str = pydantic.Field(..., title="Vendor", description="Vendor providing the assets")
class AssetModel(pydantic.BaseModel):
identifier: str = pydantic.Field(
..., title="Asset identifier", description="Unique ID code, used for downloading and caching"
)
name: str = pydantic.Field(..., title="Asset name", description="Name of the asset")
version: str = pydantic.Field("", title="Asset version", description="Version of the asset")
published_at: str = pydantic.Field(
..., title="Publication date", description="Date the asset was published (in ISO-8601 format)."
)
categories: List[str] = pydantic.Field(
..., title="Asset categories", description="List of categories this asset is a part of"
)
tags: List[str] = pydantic.Field(list(), title="Asset tags", description="Tags describing the asset")
vendor: str = pydantic.Field(..., title="Vendor", description="Vendor providing the assets")
download_url: Optional[str] = pydantic.Field(
"", title="Download location", description="Location from where to download the asset"
)
product_url: str = pydantic.Field(
"", title="Product url", description="Product url for assets that might not be available to download directly"
)
price: float = pydantic.Field(0.0, title="Price", description="Price of the asset in US Dollars")
thumbnail: str = pydantic.Field(..., title="Thumbnail path", description="Public endpoint for the thumbnail")
user: str = pydantic.Field(..., title="Asset suer name", description="Name of the user of the asset")
fusions: List[dict] = pydantic.Field(..., title="Fusions", description="Dict of name and download url")
class Config:
schema_extra = {
"example": {
"identifier": "1c54053d-49dd-4e18-ba46-abbe49a905b0",
"name": "Astronaut",
"version": "1.0.1-beta",
"published_at": "2020-12-15T17:49:22+00:00",
"categories": ["/characters/clothing/work"],
"tags": ["space", "astronaut", "human"],
"vendor": "NVIDIA",
"download_url": "https://acme.org/downloads/character/astronaut.zip",
"product_url": "https://acme.org/products/purchase/astronaut",
"price": 10.99,
"thumbnail": "https://images.com/thumbnails/256x256/astronaut.png",
"fusions": [{"name": "Test", "download_url": "https://images.com/thumbnails/256x256/astronaut.png"}],
}
}
def to_dict(self) -> Dict:
return self.__dict__
class _Page(pydantic.BaseModel):
number: int = pydantic.Field(0, title="Page number", description="Page number to return from paginated search")
size: int = pydantic.Field(50, title="Number of results to return per page", ge=1, le=100)
class _Filter(pydantic.BaseModel):
categories: List[str] = pydantic.Field(
None, title="Filter by Category", description="List of categories to filter search results by"
)
class SearchCriteria(pydantic.BaseModel):
keywords: List[str] = pydantic.Field(None, title="Search terms", description="List of keywords for searching")
page: _Page = pydantic.Field(_Page(), title="Pagination options")
sort: Tuple = pydantic.Field(None, title="Sort order", description="Tuple sort order (ie: price,desc or price,asc")
filter: _Filter = pydantic.Field(_Filter(), title="Filter Options")
vendors: List[str] = pydantic.Field(
None, title="List of vendors", description="Query a subset of available vendors"
)
search_timeout: int = pydantic.Field(
60, title="Search timeout", description="Stop searches after timeout has been reached"
)
class Config:
schema_extra = {
"example": {
"keywords": ["GPU", "RTX"],
"page": {"number": 5, "size": 75},
"sort": ["price", "desc"],
"filter": {"categories": ["hardware", "electronics"]},
"vendors": ["Vendor1", "Vendor2"],
"search_timeout": 60,
}
}
| 5,849 | Python | 46.560975 | 119 | 0.652932 |
ArtecGroup/omni-artec-asset-browser/exts/artec.services.browser.asset/artec/services/browser/asset/collector/abstract_collector.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
# Forked from AbstractCollector from omni.services.browser.asset
import abc
from typing import List
from ..models import AssetModel
class AbstractCollector(abc.ABC):
def __init__(self):
pass
@abc.abstractmethod
async def collect(self) -> List[AssetModel]:
"""
Collect assets
"""
return []
| 773 | Python | 28.76923 | 76 | 0.734799 |
ArtecGroup/omni-artec-asset-browser/exts/artec.services.browser.asset/artec/services/browser/asset/collector/__init__.py | from .s3_collector import S3Collector
| 38 | Python | 18.499991 | 37 | 0.842105 |
ArtecGroup/omni-artec-asset-browser/exts/artec.services.browser.asset/artec/services/browser/asset/collector/s3_collector.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
# Forked from AbstractCollector from omni.services.browser.asset
from typing import List, Optional, Tuple, Callable
import carb
import omni.client
from ..models import AssetModel
from .abstract_collector import AbstractCollector
THUMBNAIL_PATH = ".thumbs"
THUMBNAIL_SIZE = 256
THUMBNAIL_FULL_PATH = f"{THUMBNAIL_PATH}/{THUMBNAIL_SIZE}x{THUMBNAIL_SIZE}/"
class S3Collector(AbstractCollector):
def __init__(
self, url: str, vendor: str, filter_file_suffixes: Optional[List[str]] = [".usd", ".usda", ".usdc", ".usdz"]
) -> None:
self._url = url
if self._url.endswith("/"):
self._url = self._url[:-1]
self._filter_file_suffixes = filter_file_suffixes
self._vendor = vendor
self._asset_models = []
super().__init__()
async def collect(
self, default_thumbnail=None, on_folder_done_fn: Callable[[str, List[AssetModel]], None] = None
) -> List[AssetModel]:
await self._traverse_folder_async(
self._url, default_thumbnail=default_thumbnail, on_folder_done_fn=on_folder_done_fn
)
self._asset_models = [asset for asset in self._asset_models if asset.thumbnail != ""]
return self._asset_models
async def _traverse_folder_async(
self,
url: str,
recurse: bool = True,
default_thumbnail=None,
on_folder_done_fn: Callable[[str, List[AssetModel]], None] = None,
):
"""Traverse folder to retreive assets and thumbnails"""
if not url.endswith("/"):
url += "/"
entries = await self._list_folder_async(url)
if entries:
thumbnail_path = None
folder_asset_models = []
for entry in entries:
path = omni.client.combine_urls(url, entry.relative_path)
# "\" used in local path, convert to "/"
path = path.replace("\\", "/")
if entry.flags & omni.client.ItemFlags.CAN_HAVE_CHILDREN:
if recurse:
dirs = path.split("/")
sub_folder_name = dirs[-1]
if sub_folder_name == THUMBNAIL_PATH:
thumbnail_path = omni.client.combine_urls(url, THUMBNAIL_FULL_PATH)
else:
await self._traverse_folder_async(
path,
recurse=recurse,
default_thumbnail=default_thumbnail,
on_folder_done_fn=on_folder_done_fn,
)
else:
asset_model = self._add_asset_model(url, entry, default_thumbnail=default_thumbnail)
if asset_model is not None:
folder_asset_models.append(asset_model)
if thumbnail_path is not None:
# Only verify assets in same folder
await self._list_thumbnails(thumbnail_path, folder_asset_models)
if folder_asset_models and on_folder_done_fn:
on_folder_done_fn(url, folder_asset_models)
async def _list_folder_async(self, url: str) -> Optional[Tuple[omni.client.ListEntry]]:
"""List files on a s3 server folder"""
try:
(result, entries) = await omni.client.list_async(url)
if result == omni.client.Result.OK:
return entries
else:
carb.log_warn(f"Failed to access {url}")
return None
except Exception as e:
carb.log_error(str(e))
return None
def _add_asset_model(self, url: str, entry: omni.client.ListEntry, default_thumbnail=None) -> Optional[AssetModel]:
file_name = entry.relative_path
if self._filter_file_suffixes is not None:
pos = file_name.rfind(".")
file_suffix = file_name[pos:].lower()
if file_suffix not in self._filter_file_suffixes:
return None
# Use last path in url as first path of category url
pos = self._url.rfind("/")
if pos <= 0:
pos = 0
category = url[pos:]
if category[0] == "/":
category = category[1:]
if category and category[-1] == "/":
category = category[:-1]
# To match search by category, ignore unnecessary sub folders in category url
sub_categories = category.split("/")[0:3]
category = "/".join(sub_categories)
# TODO: identifier/version/tags need to be comfirmed
asset_model = AssetModel(
identifier=entry.hash or hash(url + entry.relative_path),
name=file_name,
version=entry.version or "",
published_at=entry.modified_time.timestamp(),
categories=[category],
tags=[],
vendor=self._vendor,
download_url=url + entry.relative_path,
product_url="",
price=0,
thumbnail=default_thumbnail or "", # Fill it later
user=entry.created_by or "",
fusions=[]
)
self._asset_models.append(asset_model)
return asset_model
async def _list_thumbnails(self, url: str, folder_assset_models: List[AssetModel]) -> None:
if len(folder_assset_models) == 0:
return
entries = await self._list_folder_async(url)
if entries:
for entry in entries:
thumbnail_name = entry.relative_path[:-4]
for asset_model in folder_assset_models:
if thumbnail_name == asset_model.name or thumbnail_name == asset_model.name + ".auto":
asset_model.thumbnail = omni.client.combine_urls(url, entry.relative_path)
break
| 6,287 | Python | 39.050955 | 119 | 0.566248 |
ArtecGroup/omni-artec-asset-browser/exts/artec.services.browser.asset/artec/services/browser/asset/services/asset.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
# Forked from SortOrder from omni.services.browser.asset
import asyncio
from enum import Enum
from typing import Dict, List, Tuple
from fastapi import Depends
from omni.services.core import routers
from .dependencies import get_app_header, get_app_version
from ..store.base import AssetStoreGroupFacility
from ..models import AssetModel, ProviderModel, SearchCriteria, ConfigModel
router = routers.ServiceAPIRouter()
router.dependencies = [Depends(get_app_header), Depends(get_app_version)]
class SortOrder(str, Enum):
date = "date"
name = "name"
@router.get("/categories", response_model=Dict[str, Dict])
async def list_categories(
asset_store: AssetStoreGroupFacility = router.get_facility("asset_store"),
):
await asyncio.sleep(0)
return asset_store.get_categories()
@router.post("/search", response_model=Dict[str, Tuple[List[AssetModel], bool]])
async def search(search: SearchCriteria, asset_store: AssetStoreGroupFacility = router.get_facility("asset_store")):
return await asset_store.search(search, stores=search.vendors, search_timeout=60)
@router.get("/providers", response_model=Dict[str, ProviderModel])
async def list_vendors(
asset_store: AssetStoreGroupFacility = router.get_facility("asset_store"),
):
await asyncio.sleep(0)
return asset_store.get_providers()
@router.post("/config", response_model=None)
async def config(config_params: ConfigModel, asset_store: AssetStoreGroupFacility = router.get_facility("asset_store")):
await asyncio.sleep(0)
return asset_store.config(config_params.vendor)
| 2,002 | Python | 33.534482 | 120 | 0.77023 |
ArtecGroup/omni-artec-asset-browser/exts/artec.services.browser.asset/artec/services/browser/asset/store/base.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
# Forked from BaseAssetStore, AssetStoreGroupFacility from omni.services.browser.asset
import abc
import asyncio
import traceback
import omni.client
import zipfile
from typing import Dict, List, Tuple, Callable
import carb
from omni.services.facilities.base import Facility
from ..models import AssetModel, ProviderModel, SearchCriteria
class BaseAssetStore(Facility, abc.ABC):
def __init__(self, store_id: str) -> None:
super().__init__()
self._store_id = store_id
self._categories = {}
self._download_progress: Dict[str, float] = {}
def authorized(self) -> bool:
"""Override this method to force authentication flow."""
return True
async def authenticate(self, username: str, password: str):
"""Override this method to implement authentication method."""
pass
@abc.abstractmethod
async def _search(self, search_criteria: SearchCriteria) -> Tuple[List[AssetModel], bool]:
"""Searches the asset store.
This function needs to be implemented as part of an implementation of the BaseAssetStore.
This function is called by the public `search` function that will wrap this function in a timeout.
"""
pass
async def search(self, search_criteria: SearchCriteria, search_timeout: int) -> Tuple[List[AssetModel], bool]:
"""Search the asset store
Will error and stop the search if search_timeout is exceeded
Args:
search_criteria (SearchCriteria): Dictionary with support search fields.
search_timeout (int): Timeout a search after `search_timeout` seconds. (default: 60 seconds)
Returns:
List of asset models and if more.
Raises:
asyncio.TimeoutError
"""
return await asyncio.wait_for(self._search(search_criteria), timeout=search_timeout)
async def _download(self, asset: AssetModel, dest_url: str, on_progress_fn: Callable[[float], None] = None) -> Dict:
"""Default Download handler using omni.client.
This function needs to be implemented as part of an implementation of the BaseAssetStore.
This function is called by the public `download` function that will wrap this function in a timeout.
"""
ret_value = {"url": None}
if asset and asset.download_url:
file_name = asset.download_url.split("/")[-1]
dest_url = f"{dest_url}/{file_name}"
carb.log_info(f"Download {asset.download_url} to {dest_url}")
result = await omni.client.copy_async(
asset.download_url, dest_url, behavior=omni.client.CopyBehavior.OVERWRITE
)
ret_value["status"] = result
if result != omni.client.Result.OK:
carb.log_error(f"Failed to download {asset.download_url} to {dest_url}")
return ret_value
if asset.download_url.lower().endswith(".zip"):
# unzip
output_url = dest_url[:-4]
await omni.client.create_folder_async(output_url)
carb.log_info(f"Unzip {dest_url} to {output_url}")
with zipfile.ZipFile(dest_url, "r") as zip_ref:
zip_ref.extractall(output_url)
dest_url = output_url
ret_value["url"] = dest_url
return ret_value
async def download(
self, asset: AssetModel, dest_url: str, on_progress_fn: Callable[[float], None] = None, timeout: int = 600
) -> Dict:
"""Downloads an asset from the asset store.
Args:
asset (AssetModel): The asset descriptor.
dest_url (str): Url of the destination file.
Kwargs:
timeout (int): Timeout a download after this amount of time. (default: 10 mins.)
Returns:
Response Dict.
Raises:
asyncio.TimeoutError
"""
self._download_progress[asset.identifier] = 0
def __on_download_progress(progress):
self._download_progress[asset.identifier] = progress
if on_progress_fn:
on_progress_fn(progress)
download_future = asyncio.Task(self._download(asset, dest_url, on_progress_fn=__on_download_progress))
while True:
last_progress = self._download_progress[asset.identifier]
done, pending = await asyncio.wait([download_future], timeout=timeout)
if done:
return download_future.result()
else:
# download not completed
# if progress changed, continue to wait for completed
# otherwwise, treat as timeout
if self._download_progress[asset.identifier] == last_progress:
carb.log_warn(f"[{asset.name}]: download timeout")
download_future.cancel()
return {"status": omni.client.Result.ERROR_ACCESS_LOST}
def categories(self) -> Dict[str, List[str]]:
"""Return the list of predefined categories."""
return self._categories
def id(self) -> str:
"""Return store id."""
return self._store_id
def provider(self) -> ProviderModel:
"""Return provider info"""
return ProviderModel(name=self._store_id)
def config(self) -> None:
"""Entry point to config the provider"""
pass
class AssetStoreGroupFacility(Facility):
def __init__(self):
self._stores: Dict[str, BaseAssetStore] = {}
self._updated = True
super().__init__()
def register_store(self, name: str, store: BaseAssetStore) -> None:
self._stores[name] = store
self._updated = True
def unregister_store(self, store: BaseAssetStore) -> None:
self._stores.pop(store.id())
self._updated = True
def clear_stores(self) -> None:
self._stores = {}
def get_registered_stores(self) -> List[str]:
"""Return list of all registered stores."""
return list(self._stores.keys())
def get_store(self, store_name: str) -> BaseAssetStore:
return self._stores.get(store_name)
def get_providers(self) -> Dict[str, ProviderModel]:
providers = {}
for name, store in self._stores.items():
providers[store.id()] = store.provider()
return providers
def get_categories(self):
categories = {}
for store_name, store in self._stores.items():
categories[store_name] = store.categories()
return categories
def config(self, name: str):
if name in self._stores:
self._stores[name].config()
async def search(
self, search_criteria: SearchCriteria, stores: List[str] = None, search_timeout: int = 60
) -> Dict[str, Tuple[List[AssetModel], bool]]:
stores = stores or self.get_registered_stores()
queries: Dict[str, asyncio.Future] = {}
for store_name in stores:
queries[store_name] = asyncio.ensure_future(
# Use a deep copy of the ``search_criteria`` model in order to prevent one of the ``AssetStore``'s
# ``search()`` operation from mutating the object in the body of the function, which would end up
# affecting the search criterias of downstream ``AssetStore``s:
self._stores[store_name].search(
search_criteria=search_criteria.copy(deep=True), search_timeout=search_timeout
)
)
await asyncio.gather(*queries.values(), return_exceptions=True)
results = {}
for store, query in queries.items():
try:
results[store] = query.result()
except Exception:
carb.log_warn(f"Failed to fetch results for store {store}. Reason:")
carb.log_warn(traceback.format_exc())
return results
| 8,364 | Python | 36.34375 | 120 | 0.612984 |
ArtecGroup/omni-artec-asset-browser/exts/artec.services.browser.asset/artec/services/browser/asset/store/local/local.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
# Forked from omni.kit.browser.asset_provider.local class LocalFolderAssetProvider
import asyncio
import carb
import carb.settings
import json
from typing import Dict, List, Optional
from .static import StaticAssetStore
from ...models import AssetModel, ProviderModel
from ...collector import S3Collector
from pathlib import Path
import omni.kit.app
from .my_assets_paths import MyAssetsPathsWindow
from .constants import SETTING_ROOT, SETTING_STORE_ENABLE
CURRENT_PATH = Path(__file__).parent
DATA_PATH = CURRENT_PATH.parent.parent.parent.parent.parent.parent.joinpath("data")
PROVIDER_ID = "My Assets"
SETTING_STORE_RERESH = SETTING_ROOT + "refresh"
SETTING_STORE_SEARCH_SUB_FOLDERS = SETTING_ROOT + "searchSubFolders"
SETTING_STORE_FOLDER = SETTING_ROOT + "folders"
SETTING_PERSISTENT_STORE_FOLDER = "/persistent" + SETTING_STORE_FOLDER
SETTING_STORE_FOLDER_CHANGED = SETTING_ROOT + "folderChanged"
DEFAULT_THUMBNAIL = f"{DATA_PATH}/usd_stage_256.png"
CACHE_FILE = "${shared_documents}/my_assets_2.json"
class LocalFolderAssetProvider(StaticAssetStore):
""" Local file system asset provider
"""
def __init__(self):
super().__init__(PROVIDER_ID, [])
self._settings = carb.settings.get_settings()
self._my_assets_window: Optional[MyAssetsPathsWindow] = None
self._folders = self._get_local_folders()
self._assets: Dict[str, Dict[str, List[AssetModel]]] = {}
self._json_file = carb.tokens.get_tokens_interface().resolve(CACHE_FILE)
# First load assets from saved file
self._load_assets()
# Refresh assets in background
asyncio.ensure_future(self._collect_async(self._folders))
self._refresh_folders_sub = omni.kit.app.SettingChangeSubscription(
SETTING_PERSISTENT_STORE_FOLDER,
lambda item, event_type: self._on_path_changed(),
)
self._folder_changed_sub = omni.kit.app.SettingChangeSubscription(
SETTING_STORE_FOLDER_CHANGED,
lambda item, event_type: self._on_folder_changed(event_type),
)
def destroy(self):
self._refresh_folders_sub = None
self._folder_changed_sub = None
if self._my_assets_window:
self._my_assets_window.destroy()
self._my_assets_window = None
async def _collect_async(self, folders) -> List[AssetModel]:
# Collection assets from folders into json file
for url in folders:
await self._collect_folder_async(url)
self._save_assets()
async def _collect_folder_async(self, folder):
carb.log_info(f"Starting collecting {folder}...")
if folder not in self._assets:
self._assets[folder] = {}
self._scanned_categories = []
scanner = S3Collector(folder, self._store_id)
self.__refresh = False
await scanner.collect(default_thumbnail=DEFAULT_THUMBNAIL, on_folder_done_fn=self._on_folder_collected)
# OM-77818: Only refresh when whole folder collected instead of refresh every sub folder collected
if self.__refresh:
self._refresh_categories()
# Remove assets not found during collection
remove_categories = [category for category in self._assets[folder] if category not in self._scanned_categories]
if remove_categories:
carb.log_info(f" Remove {remove_categories} from {folder}")
for category in remove_categories:
self._assets[folder].pop(category)
self._refresh_categories()
def _filter_by_category(self, categories: List[str]) -> List[AssetModel]:
search_sub_folders = self._settings.get(SETTING_STORE_SEARCH_SUB_FOLDERS)
filtered: List[AssetModel] = []
for _, folder in self._assets.items():
for _, assets in folder.items():
if categories:
for item in assets:
for item_category in item.categories:
if search_sub_folders:
if any(item_category.lower().startswith(category.lower()) for category in categories):
filtered.append(item)
break
else:
# Here make sure category is match
if any(category.lower() == item_category.lower() for category in categories):
break
else:
filtered.extend(assets)
return filtered
def provider(self) -> ProviderModel:
"""Return provider info"""
return ProviderModel(
name=self._store_id,
icon=f"{DATA_PATH}/folder.svg",
private=True,
configurable=True,
refresh_setting=SETTING_STORE_RERESH,
enable_setting=SETTING_STORE_ENABLE,
)
def config(self) -> None:
"""Entry point to config the provider"""
if self._my_assets_window:
# Always destroy old window to make sure laod latest settings when show config window
self._my_assets_window.destroy()
self._my_assets_window = MyAssetsPathsWindow()
def _get_local_folders(self) -> List[str]:
folders = self._settings.get(SETTING_PERSISTENT_STORE_FOLDER)
if not folders:
folders = self._settings.get(SETTING_STORE_FOLDER)
return folders
def _on_path_changed(self):
folders = self._get_local_folders()
if folders != self._folders:
# Refresh assets
append_folders = [folder for folder in folders if folder not in self._folders]
remove_folders = [folder for folder in self._folders if folder not in folders]
self._folders = folders
if remove_folders:
for folder in remove_folders:
if folder in self._assets:
self._assets.pop(folder)
self._refresh_categories()
if append_folders:
asyncio.ensure_future(self._collect_async(append_folders))
def _on_folder_changed(self, event_type):
if event_type != carb.settings.ChangeEventType.CHANGED:
return
folder = self._settings.get(SETTING_STORE_FOLDER_CHANGED)
if folder:
asyncio.ensure_future(self._collect_async([folder]))
def _on_folder_collected(self, url: str, asset_models: List[AssetModel]) -> None:
carb.log_info(f"{url} collected with {len(asset_models)} assets")
self._scanned_categories.append(url)
for folder in self._folders:
if url.startswith(folder):
break
else:
return
# Append assets
if url in self._assets[folder]:
refresh_category = False
if self._assets[folder][url] == asset_models:
# Do nothind since assets no change
return
else:
refresh_category = True
self._assets[folder][url] = asset_models
self.__refresh = refresh_category
def _refresh_categories(self) -> None:
self._load_categories()
# Notify to refresh
self._settings.set(SETTING_STORE_RERESH, True)
def _load_categories(self) -> None:
# Update categories
categories = set()
for _, folder in self._assets.items():
for _, assets in folder.items():
for asset in assets:
categories.update(asset.categories)
# Generate category list
self._categories = {}
for category in categories:
folders = category.split("/")
root = folders[0]
if root not in self._categories:
self._categories[root] = []
if len(folders) > 1:
sub = "/".join(folders[1:])
self._categories[root].append(sub)
def _save_assets(self):
result = {}
for folder in self._assets:
result[folder] = {}
for category in self._assets[folder]:
result[folder][category] = []
for data in self._assets[folder][category]:
result[folder][category].append(data.to_dict())
try:
with open(self._json_file, "w") as json_file:
json.dump(result, json_file, indent=4)
json_file.close()
except FileNotFoundError:
carb.log_warn(f"Failed to open {self._json_file}!")
except PermissionError:
carb.log_warn(f"Cannot write to {self._json_file}: permission denied!")
except Exception as e:
carb.log_warn(f"Unknown failure to write to {self._json_file}: {e}")
finally:
if json_file:
json_file.close()
def _load_assets(self):
asset_json = None
try:
with open(self._json_file, "r") as json_file:
asset_json = json.load(json_file)
except FileNotFoundError:
carb.log_info(f"Failed to open {self._json_file}!")
except PermissionError:
carb.log_error(f"Cannot read {self._json_file}: permission denied!")
except Exception as exc:
carb.log_error(f"Unknown failure to read {self._json_file}: {exc}")
self._assets = {}
self._categories = {}
if asset_json is None:
return
for folder in asset_json:
if folder not in self._folders:
continue
self._assets[folder] = {}
for category in asset_json[folder]:
self._assets[folder][category] = []
for asset in asset_json[folder][category]:
asset_model = AssetModel(**asset)
self._assets[folder][category].append(asset_model)
self._load_categories()
| 10,379 | Python | 37.731343 | 119 | 0.596493 |
ArtecGroup/omni-artec-asset-browser/exts/artec.services.browser.asset/artec/services/browser/asset/store/local/style.py | # Forked from omni.kit.browser.asset_provider.local
from pathlib import Path
CURRENT_PATH = Path(__file__).parent
ICON_PATH = CURRENT_PATH.parent.parent.parent.parent.parent.parent.joinpath("icons")
MY_ASSETS_STYLE = {
"TreeView": {"background_color": 0xFF23211F, "background_selected_color": 0xFF444444},
"TreeView.Item": {"margin": 14, "color": 0xFF000055},
# "Field": {"background_color": 0xFF333322},
"Label::header": {"margin": 4},
"Label": {"margin": 5},
"Label::builtin": {"color": 0xFF909090},
"Label::config": {"color": 0xFFDDDDDD},
"ItemButton": {"padding": 2, "background_color": 0xFF444444, "border_radius": 4},
"ItemButton.Image::add": {"image_url": f"{ICON_PATH}/plus.svg", "color": 0xFF06C66B},
"ItemButton.Image::remove": {"image_url": f"{ICON_PATH}/trash.svg", "color": 0xFF1010C6},
"ItemButton.Image::clean": {"image_url": f"{ICON_PATH}/broom.svg", "color": 0xFF5EDAFA},
"ItemButton.Image::update": {"image_url": f"{ICON_PATH}/refresh.svg", "color": 0xFF5EDAFA},
"ItemButton:hovered": {"background_color": 0xFF333333},
"ItemButton:pressed": {"background_color": 0xFF222222},
}
| 1,155 | Python | 45.239998 | 95 | 0.661472 |
ArtecGroup/omni-artec-asset-browser/exts/artec.services.browser.asset/artec/services/browser/asset/store/local/constants.py | # Forked from omni.kit.browser.asset_provider.local
SETTING_ROOT = "/exts/omni.kit.browser.asset_provider.local/"
SETTING_STORE_ENABLE = SETTING_ROOT + "enable"
| 162 | Python | 31.599994 | 61 | 0.765432 |
ArtecGroup/omni-artec-asset-browser/exts/artec.services.browser.asset/artec/services/browser/asset/store/local/__init__.py | from .local import LocalFolderAssetProvider
| 44 | Python | 21.499989 | 43 | 0.886364 |
ArtecGroup/omni-artec-asset-browser/exts/artec.services.browser.asset/artec/services/browser/asset/store/local/my_assets_paths.py | # Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
# Forked from omni.kit.browser.asset_provider.local
from functools import lru_cache
import asyncio
from typing import Optional, List, Callable
import carb
import carb.dictionary
import carb.settings
import omni.ui as ui
import omni.kit.app
from omni.kit.window.filepicker import FilePickerDialog
from .style import MY_ASSETS_STYLE
SETTING_MY_ASSET_FOLDERS = "/persistent/exts/omni.kit.browser.asset_provider.local/folders"
class PathItem(ui.AbstractItem):
def __init__(self, path, add_dummy=False):
super().__init__()
self.path_model = ui.SimpleStringModel(path)
self.add_dummy = add_dummy
def __repr__(self):
return f"[PathItem]: {self.path_model.as_string}"
class MyAssetPathsModel(ui.AbstractItemModel):
def __init__(self, on_path_changed_fn: Callable[[None], None]):
super().__init__()
self._on_path_changed_fn = on_path_changed_fn
self._settings = carb.settings.get_settings()
self._load()
self._add_dummy = PathItem("", add_dummy=True)
def destroy(self):
self._children = []
def get_item_children(self, item: Optional[ui.AbstractItem] = None) -> List[ui.AbstractItem]:
"""Returns all the children when the widget asks it."""
if item is not None:
return []
return self._children + [self._add_dummy]
def get_item_value_model_count(self, item: PathItem):
"""The number of columns"""
return 3
def get_item_value_model(self, item: PathItem, column_id: int):
if column_id == 1:
return item.path_model
return None
def _load(self):
self._children = []
folders = self._settings.get(SETTING_MY_ASSET_FOLDERS)
if folders:
for folder in folders:
item = PathItem(folder)
self._children.append(item)
self._item_changed(None)
def add_empty(self):
self._children.append(PathItem(""))
self._item_changed(None)
def add_item(self, item: PathItem):
self._children.append(item)
self.save()
self._item_changed(None)
def remove_item(self, item: PathItem):
self._children.remove(item)
self.save()
self._item_changed(None)
def save(self):
paths = [c.path_model.as_string for c in self._children]
self._settings.set(SETTING_MY_ASSET_FOLDERS, paths)
if self._on_path_changed_fn:
self._on_path_changed_fn()
class MyAssetPathDelegate(ui.AbstractItemDelegate):
def __init__(self):
super().__init__()
self._pick_folder_dialog: Optional[FilePickerDialog] = None
def destroy(self):
self._pick_folder_dialog = None
def build_widget(self, model: MyAssetPathsModel, item: PathItem, column_id: int, level, expanded):
"""Create a widget per column per item"""
if column_id == 0 and not item.add_dummy:
def open(item_=item):
# Import it here instead of on the file root because it has long import time.
path = item_.path_model.as_string
if path:
import webbrowser
webbrowser.open(path)
ui.Button("open", width=20, clicked_fn=open, tooltip="Open path using OS file explorer.")
elif column_id == 1 and not item.add_dummy:
value_model = model.get_item_value_model(item, column_id)
ui.StringField(value_model)
elif column_id == 2:
def on_click(item_=item):
if item.add_dummy:
if self._pick_folder_dialog is None:
self._pick_folder_dialog = self._create_filepicker(
"Select Directory for My Assets",
click_apply_fn=lambda url, m=model: self._on_folder_picked(url, m),
dir_only=True,
)
self._pick_folder_dialog.show()
else:
model.remove_item(item_)
with ui.HStack(width=60):
ui.Spacer(width=10)
ui.Button(
name=("add" if item.add_dummy else "remove"),
style_type_name_override="ItemButton",
width=20,
height=20,
clicked_fn=on_click,
)
ui.Spacer(width=4)
ui.Spacer()
def build_header(self, column_id: int):
COLUMNS = ["", "folder", "edit"]
with ui.HStack(height=24):
ui.Spacer(width=10)
ui.Label(COLUMNS[column_id], name="header")
def _create_filepicker(
self,
title: str,
filters: list = ["All Files (*)"],
click_apply_fn: Callable = None,
error_fn: Callable = None,
dir_only: bool = False,
) -> FilePickerDialog:
async def on_click_handler(
filename: str, dirname: str, dialog: FilePickerDialog, click_fn: Callable, dir_only: bool
):
fullpath = None
if dir_only:
fullpath = dirname
else:
if dirname:
fullpath = f"{dirname}/{filename}"
elif filename:
fullpath = filename
if click_fn:
click_fn(fullpath)
dialog.hide()
dialog = FilePickerDialog(
title,
allow_multi_selection=False,
apply_button_label="Select",
click_apply_handler=lambda filename, dirname: asyncio.ensure_future(
on_click_handler(filename, dirname, dialog, click_apply_fn, dir_only)
),
click_cancel_handler=lambda filename, dirname: dialog.hide(),
item_filter_options=filters,
error_handler=error_fn,
)
dialog.hide()
return dialog
def _on_folder_picked(self, url: Optional[str], model: ui.AbstractItemModel) -> None:
item = PathItem(url)
model.add_item(item)
class MyAssetsPathsWidget(object):
def __init__(self, on_path_changed_fn: Callable[[None], None]):
self._model = MyAssetPathsModel(on_path_changed_fn)
self._delegate = MyAssetPathDelegate()
with ui.ScrollingFrame(
horizontal_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_OFF,
vertical_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_OFF,
style_type_name_override="TreeView",
):
tree_view = ui.TreeView(self._model, delegate=self._delegate, root_visible=False, header_visible=True)
tree_view.column_widths = [ui.Pixel(46), ui.Fraction(1), ui.Pixel(60)]
def destroy(self):
self._model.destroy()
self._model = None
self._delegate.destroy()
self._delegate = None
class MyAssetsPathsWindow(ui.Window):
def __init__(self, on_path_changed_fn: Callable[[None], None] = None):
super().__init__("My Assets Folders", width=500, height=600)
self._on_path_changed_fn = on_path_changed_fn
self._widget: Optional[MyAssetsPathsWidget] = None
self.frame.set_build_fn(self._build_ui)
self.frame.set_style(MY_ASSETS_STYLE)
def destroy(self):
if self._widget is not None:
self._widget.destroy()
self._widget = None
self.visible = False
def _build_ui(self):
with self.frame:
self._widget = MyAssetsPathsWidget(self._on_path_changed_fn)
| 8,010 | Python | 32.659664 | 114 | 0.584644 |
ArtecGroup/omni-artec-asset-browser/exts/artec.services.browser.asset/artec/services/browser/asset/store/local/static.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
# Forked from StaticAssetStore from omni.services.browser.asset
from typing import List, Tuple
from ...models import AssetModel, SearchCriteria
from ..base import BaseAssetStore
class StaticAssetStore(BaseAssetStore):
def __init__(self, store_id, data=List[AssetModel]) -> None:
super().__init__(store_id=store_id)
self._data: List[AssetModel] = data
async def _search(self, search_criteria: SearchCriteria) -> Tuple[List[AssetModel], bool]:
keywords = search_criteria.keywords or []
categories = search_criteria.filter.categories or []
page = search_criteria.page
sort = search_criteria.sort
filtered = self._filter_by_category(categories)
selected: List[AssetModel] = []
if keywords:
for item in filtered:
if any(keyword in item.name for keyword in keywords) or any(
keyword in item.tags for keyword in keywords
):
selected.append(item)
else:
selected = filtered
if sort:
key, order = sort
reverse = True if order == "desc" else False
if key == "created_at":
key = "published_at"
selected = sorted(selected, key=lambda item: getattr(item, key), reverse=reverse)
start_index = 0
end_index = page.size
# For consistency with external vendors, page count starts at 1, not 0.
if page.number > 1:
start_index = page.size * (page.number - 1)
end_index = start_index + page.size
assets = selected[start_index:end_index]
return (assets, len(assets) == page.size)
def _filter_by_category(self, categories: List[str]) -> List[AssetModel]:
filtered: List[AssetModel] = []
if categories:
for item in self._data:
for item_category in item.categories:
if any(category.lower() in item_category.lower() for category in categories):
filtered.append(item)
break
else:
filtered = self._data
return filtered
| 2,602 | Python | 36.724637 | 97 | 0.621445 |
ArtecGroup/omni-artec-asset-browser/exts/artec.services.browser.asset/artec/services/browser/asset/tests/dummy.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from ..models import AssetModel
from ..store.local import StaticAssetStore
class DummyAssetStore(StaticAssetStore):
""" Hard coded data set of assets.
This is a hardcoded list of assets used as a reference implementation.
This store gets instantiated as part of the extension and passed through as a facility to the various endpoints.
More advanced implementations can be added and as long as the API of the facility is followed they can be swapped without futher changes needed.
"""
def __init__(self) -> None:
data = [
AssetModel(
identifier="1c54053d-49dd-4e18-ba46-abbe49a905b0",
name="car-suv-1",
version="1.0.1-beta",
published_at="2020-12-15T17:49:22+00:00",
categories=["/vehicles/cars/suv"],
tags=["vehicle", "cars", "suv"],
vendor="NVIDIA",
download_url="https://acme.org/downloads/vehicles/cars/suv/car-suv-1.zip",
product_url="https://acme.org/products/purchase/car-suv-1",
price=10.99,
thumbnail="https://images.com/thumbnails/256x256/car-suv-1.png",
),
AssetModel(
identifier="3708fe73-6b82-449a-8e6f-96c6f443a93c",
name="car-suv-2",
version="1.0.1-beta",
published_at="2020-12-15T17:49:22+00:00",
categories=["/vehicles/cars/suv"],
tags=["vehicle", "cars", "suv"],
vendor="NVIDIA",
download_url="https://acme.org/downloads/vehicles/cars/suv/car-suv-2.zip",
product_url="https://acme.org/products/purchase/car-suv-2",
price=12.99,
thumbnail="https://images.com/thumbnails/256x256/car-suv-2.png",
),
AssetModel(
identifier="9dcf54e8-76f5-49e0-8155-c4529b5ed059",
name="car-sedan-1",
version="1.0.1-beta",
published_at="2020-12-15T17:49:22+00:00",
categories=["/vehicles/cars/sedan"],
tags=["vehicle", "cars", "sedan"],
vendor="NVIDIA",
download_url="https://acme.org/downloads/vehicles/cars/suv/car-sedan-1.zip",
product_url="https://acme.org/products/purchase/car-sedan-1",
price=13.99,
thumbnail="https://images.com/thumbnails/256x256/car-sedan-1.png",
),
AssetModel(
identifier="fc6d47b9-8243-4694-8c44-3b66cbbd7d24",
name="car-sedan-2",
version="1.0.1-beta",
published_at="2020-12-15T17:49:22+00:00",
categories=["/vehicles/cars/sedan"],
tags=["vehicle", "cars", "sedan"],
vendor="NVIDIA",
download_url="https://acme.org/downloads/vehicles/cars/suv/car-sedan-2.zip",
product_url="https://acme.org/products/purchase/car-sedan-2",
price=14.99,
thumbnail="https://images.com/thumbnails/256x256/car-sedan-2.png",
),
AssetModel(
identifier="fc6d47b9-8243-4694-8c44-3b66cbbd7d24",
name="car-sedan-3",
version="1.0.1-beta",
published_at="2020-12-15T17:49:22+00:00",
categories=["/vehicles/cars/sedan"],
tags=["vehicle", "cars", "sedan"],
vendor="NVIDIA",
download_url="https://acme.org/downloads/vehicles/cars/suv/car-sedan-3.zip",
product_url="https://acme.org/products/purchase/car-sedan-3",
price=15.99,
thumbnail="https://images.com/thumbnails/256x256/car-sedan-3.png",
),
]
super().__init__("DUMMY", data)
| 4,315 | Python | 45.408602 | 152 | 0.562225 |
ArtecGroup/omni-artec-asset-browser/exts/artec.services.browser.asset/artec/services/browser/asset/tests/test_service.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import carb.settings
import omni.kit.test
from omni.services.client import AsyncClient
from omni.services.core import main
from ..models import SearchCriteria, _Filter
from .dummy import DummyAssetStore
from ..store.base import AssetStoreGroupFacility
from ..services.asset import router
from pathlib import Path
CURRENT_PATH = Path(__file__).parent
ASSETS_DATA_PATH = CURRENT_PATH.parent.parent.parent.parent.parent.joinpath("data").joinpath("assets")
class TestAssetGroupFacility(omni.kit.test.AsyncTestCaseFailOnLogError):
async def setUp(self):
self._asset_store_group = AssetStoreGroupFacility()
router.register_facility("asset_store", self._asset_store_group)
api_version = carb.settings.get_settings_interface().get("exts/artec.services.browser.asset/api_version")
self._client = AsyncClient(f"local:///{api_version}", app=main.get_app())
async def tearDown(self):
await self._client.stop_async()
self._client = None
async def test_search_multiple_stores(self):
self._asset_store_group.clear_stores()
self._asset_store_group.register_store("DUMMY", DummyAssetStore())
res = await self._client.assets.search.post(filter={"categories": ["/Vegetation/Plant_Tropical"]})
self.assertIn("NVIDIA", res)
self.assertIn("DUMMY", res)
self.assertEqual(len(res["NVIDIA"][0]), 17)
self.assertEqual(len(res["DUMMY"][0]), 0)
async def test_search_specific_store(self):
self._asset_store_group.clear_stores()
self._asset_store_group.register_store("DUMMY", DummyAssetStore())
res = await self._client.assets.search.post(
filter={"categories": ["/Vegetation/Plant_Tropical"]}, vendors=["NVIDIA"]
)
self.assertIn("NVIDIA", res)
self.assertNotIn("DUMMY", res)
self.assertEqual(len(res["NVIDIA"][0]), 17)
async def test_page_items(self):
self._asset_store_group.clear_stores()
res = await self._client.assets.search.post(
filter={"categories": ["/Vegetation/Plant_Tropical"]}, page={"size": 10}
)
self.assertEqual(len(res["NVIDIA"][0]), 10)
async def test_page_items_second_page_larger_size(self):
self._asset_store_group.clear_stores()
res = await self._client.assets.search.post(
filter={"categories": ["/Vegetation/Plant_Tropical"]}, page={"size": 10, "number": 2}
)
self.assertEqual(len(res["NVIDIA"][0]), 7)
async def test_item_order_by_price_ascending(self):
self._asset_store_group.clear_stores()
self._asset_store_group.register_store("DUMMY", DummyAssetStore())
res = await self._client.assets.search.post(keywords=["cars"], sort=["price", "asc"])
retrieved_prices = []
for item in res["DUMMY"][0]:
retrieved_prices.append(item["price"])
self.assertEqual(retrieved_prices, [10.99, 12.99, 13.99, 14.99, 15.99])
async def test_item_order_by_price_descending(self):
self._asset_store_group.clear_stores()
self._asset_store_group.register_store("DUMMY", DummyAssetStore())
res = await self._client.assets.search.post(keywords=["cars"], sort=["price", "desc"])
retrieved_prices = []
for item in res["DUMMY"][0]:
retrieved_prices.append(item["price"])
self.assertEqual(retrieved_prices, list(reversed([10.99, 12.99, 13.99, 14.99, 15.99])))
class TestDummyAssetStore(omni.kit.test.AsyncTestCaseFailOnLogError):
async def test_search_no_criteria(self):
store = DummyAssetStore()
(result, *_) = await store.search(search_criteria=SearchCriteria(), search_timeout=60)
self.assertEqual(len(result), 5)
async def test_search_category(self):
store = DummyAssetStore()
search = SearchCriteria(filter=_Filter(categories=["/vehicles/cars/sedan"]))
(result, *_) = await store.search(search_criteria=search, search_timeout=60)
self.assertEqual(len(result), 3)
async def test_search_order_by_name(self):
store = DummyAssetStore()
search = SearchCriteria(keywords=["sedan"], sort=["name", "desc"])
(result, *_) = await store.search(search_criteria=search, search_timeout=60)
retrieved_names = []
for item in result:
retrieved_names.append(item.name)
self.assertEqual(retrieved_names, ["car-sedan-3", "car-sedan-2", "car-sedan-1"])
| 4,909 | Python | 37.661417 | 113 | 0.667142 |
ArtecGroup/omni-artec-asset-browser/exts/artec.services.browser.asset/docs/CHANGELOG.md | # Changelog
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
## [1.0.0] - 2023-05-15
- Initial version of extension | 150 | Markdown | 20.571426 | 80 | 0.693333 |
ArtecGroup/omni-artec-asset-browser/exts/artec.services.browser.asset/docs/README.md | # Artec asset browser service [artec.services.browsers.asset]
## About
Complimentary extension for Artec Cloud asset browser
Forked from omni.services.browsers.asset
| 168 | Markdown | 23.142854 | 61 | 0.815476 |
ArtecGroup/omni-artec-asset-browser/exts/artec.asset.browser/config/extension.toml | [package]
# Semantic Versionning is used: https://semver.org/
version = "1.0.5"
# Lists people or organizations that are considered the "authors" of the package.
authors = ["Artec"]
# The title and description fields are primarly for displaying extension info in UI
title = "Browser of Models on Artec Cloud"
description="omni kit browser artec cloud models extension."
# URL of the extension source repository.
repository = ""
# Keywords for the extension
keywords = ['browser', 'asset', 'model', 'artec', 'cloud']
# Location of change log file in target (final) folder of extension, relative to the root.
# More info on writing changelog: https://keepachangelog.com/en/1.0.0/
changelog="docs/CHANGELOG.md"
category = "services"
readme = "docs/README.md"
preview_image = "data/preview.png"
icon = "data/artec_cloud.png"
[dependencies]
"omni.kit.browser.core" = {version="2.1.5"}
"artec.services.browser.asset" = {version="1.0.5"}
"omni.usd" = {}
"omni.services.client" = {version="0.4.0"}
"omni.kit.window.content_browser" = { optional=true }
"omni.kit.window.filepicker" = {}
# Main python module this extension provides, it will be publicly available as "import artec.asset.browser".
[[python.module]]
name = "artec.asset.browser"
[settings]
exts."artec.asset.browser".provider = "local://"
exts."artec.asset.browser".showCategory = ""
exts."artec.asset.browser".pageSize = 25
exts."artec.asset.browser".hoverWindow = false
exts."artec.asset.browser".singleProvider = true
exts."artec.asset.browser".appSettings = "/persistent/app/artec_asset_browser"
exts."artec.asset.browser".autoScroll = true
exts."artec.asset.browser".enable = true
exts."artec.asset.browser".providerId = "ArtecCloud"
exts."artec.asset.browser".maxCountPerPage = 20
exts."artec.asset.browser".modelsUrl = "https://cloud.artec3d.com/api/omni/1.0/projects"
exts."artec.asset.browser".cloudSearchUrl = "https://cloud.artec3d.com/api/omni/1.0/projects.json"
exts."artec.asset.browser".authorizeUrl = "https://cloud.artec3d.com/api/omni/1.0/sessions"
[[test]]
dependencies = [
"omni.kit.renderer.core",
"omni.kit.renderer.capture",
"artec.services.browser.asset",
"omni.kit.browser.asset_provider.s3"
]
args = [
"--no-window",
"--/app/window/dpiScaleOverride=1.0",
"--/app/window/scaleToMonitor=false",
]
stdoutFailPatterns.exclude = [
"*gpu.foundation.plugin'>: Failed to load texture*", # Leak
"*[gpu.foundation.plugin] Failed to load texture*",
"*[gpu.foundation.plugin] Failed to read texture file*",
]
| 2,532 | TOML | 33.229729 | 108 | 0.723539 |
ArtecGroup/omni-artec-asset-browser/exts/artec.asset.browser/artec/asset/browser/popup_menu.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
# Forked from AssetStore class AbstractPopupMenu, FilterMenu
import abc
from typing import Optional, List
from omni import ui
from .style import POPUP_MENU_STYLE
class AbstractPopupMenu(ui.Window):
"""
Represent a popup window to show popup menu with a title
"""
def __init__(self, title: str):
self._title = title
window_flags = (
ui.WINDOW_FLAGS_NO_RESIZE
| ui.WINDOW_FLAGS_POPUP
| ui.WINDOW_FLAGS_NO_TITLE_BAR
| ui.WINDOW_FLAGS_NO_SCROLLBAR
)
super().__init__(title, width=0, height=0, padding_x=0, padding_y=0, flags=window_flags)
self.frame.set_build_fn(self._build_ui)
self.frame.set_style(POPUP_MENU_STYLE)
def destroy(self):
self.visible = False
def _build_ui(self) -> None:
with self.frame:
with ui.VStack(height=0):
with ui.ZStack(height=0):
ui.Rectangle(height=40, style_type_name_override="Title.Background")
with ui.HStack():
ui.Spacer(width=10)
ui.Label(self._title, style_type_name_override="Title.Label")
self._build_menu()
@abc.abstractmethod
def _build_menu(self) -> None:
"""Build menu items"""
pass
class MenuRadioButton(ui.RadioButton):
"""
Represent a menu radio button.
"""
def __init__(self, **kwargs):
super().__init__(
width=120,
height=30,
image_width=24,
image_height=24,
spacing=5,
style_type_name_override="MenuButton",
**kwargs,
)
class SortMenu(AbstractPopupMenu):
"""
Represent the sort by menu.
Args:
on_sort_changed_fn: Callback when sort field or sort order changed. Function signure:
void on_sort_changed_fn(sort_field: str, sort_order: str)
"""
SORT_BY_FIELDS = ["Name", "Date"]
SORT_BY_ORDERS = ["Ascending", "Descending"]
def __init__(self, on_sort_changed_fn: callable):
self._on_sort_changed_fn = on_sort_changed_fn
super().__init__("SORT BY")
self._sort_field = self.SORT_BY_FIELDS[0]
self._sort_order = self.SORT_BY_ORDERS[0]
def _build_menu(self) -> None:
field_collection = ui.RadioCollection()
with ui.VStack(height=0):
for field in self.SORT_BY_FIELDS:
MenuRadioButton(text=field, radio_collection=field_collection)
ui.Line(alignment=ui.Alignment.BOTTOM, style_type_name_override="MenuSeparator")
order_collection = ui.RadioCollection()
with ui.VStack(height=0):
for order in self.SORT_BY_ORDERS:
MenuRadioButton(text=order, radio_collection=order_collection)
field_collection.model.add_value_changed_fn(self._on_sort_field_changed)
order_collection.model.add_value_changed_fn(self._on_sort_order_changed)
def _on_sort_field_changed(self, model: ui.AbstractValueModel) -> None:
self._sort_field = self.SORT_BY_FIELDS[model.as_int]
self.visible = False
if self._on_sort_changed_fn is not None:
self._on_sort_changed_fn(self._sort_field, self._sort_order)
def _on_sort_order_changed(self, model: ui.AbstractValueModel) -> None:
self._sort_order = self.SORT_BY_ORDERS[model.as_int]
self.visible = False
if self._on_sort_changed_fn is not None:
self._on_sort_changed_fn(self._sort_field, self._sort_order)
class FilterMenu(AbstractPopupMenu):
"""
Represent the filter menu.
Args:
providers (List[str]): Provider list.
on_filter_changed_fn: Callback when filter changed. Function signure:
void on_filter_changed_fn(filter: str)
"""
def __init__(self, providers: List[str], on_filter_changed_fn: callable):
self._on_filter_changed_fn = on_filter_changed_fn
self._filter_provider = ""
self._container: Optional[ui.VStack] = None
super().__init__("Filter")
self.refresh(providers)
def refresh(self, providers: List[str]):
self._providers = providers
self._providers.insert(0, "All")
if self._filter_provider not in self._providers:
self._filter_vendor = self._providers[0]
if self._container is not None:
self._container.clear()
self._build_menu_internal()
def _build_menu(self) -> None:
self._container = ui.VStack(height=0)
self._build_menu_internal()
def _build_menu_internal(self) -> None:
vendor_collection = ui.RadioCollection()
with self._container:
for field in self._providers:
MenuRadioButton(text=field, radio_collection=vendor_collection)
vendor_collection.model.add_value_changed_fn(self._on_filter_changed)
def _on_filter_changed(self, model: ui.AbstractValueModel) -> None:
self._filter_provider = self._providers[model.as_int]
self.visible = False
if self._on_filter_changed_fn is not None:
self._on_filter_changed_fn(self._filter_provider)
| 5,614 | Python | 34.314465 | 96 | 0.620948 |
ArtecGroup/omni-artec-asset-browser/exts/artec.asset.browser/artec/asset/browser/search_notification.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
# Forked from AssetStore class SearchNotification
from typing import Callable
from omni import ui
from .style import EMPTY_NOTIFICATION_STYLE, ICON_PATH
class SearchNotification:
"""
When no results searched, show notification.
"""
def __init__(self, clear_search_fn: Callable[[None], None]):
self._clear_search_fn = clear_search_fn
self._build_ui()
def _build_ui(self):
self._container = ui.ZStack(style=EMPTY_NOTIFICATION_STYLE)
with self._container:
ui.Rectangle(style_type_name_override="EmptyNotification.Frame")
with ui.VStack(spacing=10):
ui.Spacer()
with ui.HStack(height=0):
ui.Spacer()
ui.ImageWithProvider(
f"{ICON_PATH}/search.png",
width=90,
height=60,
fill_policy=ui.IwpFillPolicy.IWP_PRESERVE_ASPECT_FIT,
style_type_name_override="EmptyNotification.Image",
)
ui.Spacer()
self._message_label = ui.Label(
"not found.",
height=0,
alignment=ui.Alignment.CENTER,
style_type_name_override="EmptyNotification.Label",
)
self._clear_container = ui.HStack(height=24)
with self._clear_container:
ui.Spacer()
ui.Button(
"Click to clear search",
width=192,
mouse_pressed_fn=lambda x, y, btn, a: self._clear_search_fn(),
style_type_name_override="EmptyNotification.Button",
)
ui.Spacer()
ui.Spacer()
@property
def visible(self) -> bool:
return self._container.visible
@visible.setter
def visible(self, value) -> None:
self._container.visible = value
def set_message(self, message: str, show_clear: bool = True) -> None:
self._message_label.text = message
self._clear_container.visible = show_clear
| 2,633 | Python | 36.098591 | 86 | 0.562856 |
ArtecGroup/omni-artec-asset-browser/exts/artec.asset.browser/artec/asset/browser/style.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
# Forked from AssetStore class Colors
from omni import ui
from pathlib import Path
CURRENT_PATH = Path(__file__).parent
ICON_PATH = CURRENT_PATH.parent.parent.parent.joinpath("icons")
class Colors:
Window = ui.color.shade(0xFF353535)
Selected = ui.color.shade(0xFFDDDDDD)
Text = ui.color.shade(0xFF929292)
Hint = ui.color.shade(0xFF6A6A6A)
Warn = ui.color.shade(0xCC2222FF)
Image = ui.color.shade(0xFFA8A8A8)
Background = ui.color.shade(0xFF23211F, light=0xFF535354)
ARTEC_CLOUD_BROWSER_STYLE = {
"Window": {"background_color": Colors.Window},
"CollectionList": {"background_color": 0, "selected_color": 0, "color": 0, "border_radius": 1},
"TreeView.Frame": {"background_color": 0, "padding": 10},
"TreeView.Item.Image": {"color": Colors.Text},
"TreeView.Item.Image:hovered": {"color": 0xFF131313},
"TreeView.Item.Button": {"background_color": 0, "padding": 0, "margin": 0, "spacing": 0},
"TreeView.Item.Button:hovered": {"background_color": 0xFF3A3A3A},
"TreeView.Item.Button.Image": {"image_url": f"{ICON_PATH}/options.svg", "color": Colors.Text},
"TreeView.Item.Name": {"background_color": 0, "color": Colors.Text},
"TreeView.Item.Name:selected": {"color": 0xFFE39724},
"GridView.Frame": {"background_color": Colors.Background},
"GridView.Item": {"background_color": 0, "color": Colors.Selected, "font_size": 16},
"GridView.Item.Selection": {"background_color": 0, "border_width": 0},
"GridView.Item.Selection:selected": {"border_width": 2, "border_color": 0xFFFFC734, "border_radius": 3.0},
"GridView.Image:selected": {"border_width": 2, "border_color": 0, "border_radius": 3.0},
"SearchBar.Button.Image::sort": {"image_url": f"{ICON_PATH}/sort_by_dark.svg", "color": Colors.Image},
"SearchBar.Button.Image::filter": {"image_url": f"{ICON_PATH}/filter.svg", "color": Colors.Image},
"GridView.Item.Vendor.Background": {"background_color": 0xFF151515},
"GridView.Item.Hover.Background": {"background_color": 0xFF131313},
"GridView.Item.Hover.BackgroundAll:hovered": {"background_color": 0xFF131313},
"GridView.Item.Hover.BackgroundAll": {"background_color": 0},
"GridView.Item.Tips.Background": {"background_color": 0xFF363636},
"GridView.Item.Tips.Text": {"background_color": 0xFFDADADA, "font_size": 14, "margin": 2, "color": 0xFFCCCCCC},
"GridView.Item.Tips.Text::Download": {"color": 0xFF00B976},
"GridView.Item.Tips.Text::ExternalLink": {"color": 0xFFF6A66B},
"GridView.Item.Tips.Text::Normal": {"color": 0xFFCCCCCC},
"GridView.Item.Price": {"color": Colors.Selected, "font_size": 12},
"GridView.Item.User": {"color": 0xFF328C6C, "font_size": 12},
"GridView.Item.Frame": {"color": 0xFFFF0000},
"GridView.Item.Download": {
"background_color": 0xFF2A2825,
"color": 0xFFE39724,
"secondary_color": 0,
"border_radius": 0,
"font_size": 10,
"padding": 0,
},
}
POPUP_MENU_STYLE = {
# "Window": {"background_color": 0xFFFF0000, "padding": 0, "margin": 0},
"Title.Background": {"background_color": Colors.Window},
"Title.Label": {"color": Colors.Selected, "font_size": 18},
"MenuButton": {"background_color": 0xFF4A4A4A, "stack_direction": ui.Direction.LEFT_TO_RIGHT, "spacing": 20},
"MenuButton.Image": {"image_url": f"{ICON_PATH}/none.svg"},
"MenuButton.Image:checked": {"image_url": f"{ICON_PATH}/toggleCheck_dark.svg"},
"MenuButton.Label": {"color": 0xFFD4D4D4, "alignment": ui.Alignment.LEFT_CENTER},
"MenuSeparator": {"color": Colors.Window, "border_width": 4},
}
EMPTY_NOTIFICATION_STYLE = {
"EmptyNotification.Frame": {"background_color": Colors.Window},
"EmptyNotification.Label": {"background_color": Colors.Window, "color": 0xFF7C7C7C, "font_size": 20},
"EmptyNotification.Image": {"background_color": Colors.Window, "color": 0xFF7C7C7C},
"EmptyNotification.Button": {"background_color": 0xFF6C6C6C, "color": 0xFF9E9E9E},
}
HOVER_WINDOW_STYLE = {
**ARTEC_CLOUD_BROWSER_STYLE,
"Window": {"background_color": Colors.Window, "border_width": 0, "padding": 0},
}
AUTH_DIALOG_STYLE = {
"Window": {"background_color": Colors.Window, "border_radius": 2, "border_width": 0.5, "border_color": 0x55ADAC9F},
"Dialog": {"background_color": 0x0, "color": Colors.Text, "margin": 10},
"Message": {"background_color": 0x0, "color": Colors.Text, "margin": 0, "alignment": ui.Alignment.LEFT_CENTER},
"Label": {"background_color": 0x0, "color": Colors.Text, "margin": 0, "alignment": ui.Alignment.LEFT_CENTER},
"Field": {
"background_color": Colors.Background,
"color": Colors.Text,
"alignment": ui.Alignment.LEFT_CENTER,
"margin_height": 0,
},
"Field::overlay": {"background_color": 0x0, "border_color": Colors.Warn, "border_width": 1},
"Field::warn": {
"background_color": 0x0,
"margin_width": 8,
"color": Colors.Warn,
"alignment": ui.Alignment.RIGHT_CENTER,
},
"CheckBox": {
"background_color": Colors.Background,
"color": Colors.Text,
"margin": 4,
"alignment": ui.Alignment.LEFT_CENTER,
},
"Button": {
"background_color": Colors.Background,
"color": Colors.Text,
"margin": 4,
"alignment": ui.Alignment.CENTER,
},
}
| 5,783 | Python | 45.645161 | 119 | 0.658655 |
ArtecGroup/omni-artec-asset-browser/exts/artec.asset.browser/artec/asset/browser/overview_delegate.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
# Forked from AssetStore class OverviewDelegate
from typing import Optional
import carb.settings
from omni.kit.browser.core import DetailDelegate, CategoryItem
class OverviewDelegate(DetailDelegate):
def get_label(self, item: CategoryItem) -> Optional[str]:
return item.name.upper()
def on_double_click(self, item: CategoryItem) -> None:
# Show selected category
settings = carb.settings.get_settings()
settings.set("/exts/artec.asset.browser/showCategory", item.url)
| 940 | Python | 38.208332 | 76 | 0.767021 |
ArtecGroup/omni-artec-asset-browser/exts/artec.asset.browser/artec/asset/browser/artec_cloud.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
# Forked from SketchFabAssetProvider for asset store
from dataclasses import dataclass
from enum import Enum
from pathlib import Path
from typing import Callable, Optional, Tuple, Dict, List
import tempfile
from time import time
import zipfile
import aiohttp
import aiofiles
import carb
import carb.settings
import omni.client
import omni.kit.asset_converter as converter
from urllib.parse import urlparse, urlencode
from artec.services.browser.asset import BaseAssetStore, AssetModel, SearchCriteria, ProviderModel
from .models.asset_fusion import AssetFusion
SETTING_ROOT = "/exts/artec.asset.browser/"
SETTING_STORE_ENABLE = SETTING_ROOT + "enable"
CURRENT_PATH = Path(__file__).parent
DATA_PATH = CURRENT_PATH.parent.parent.parent.joinpath("data")
class ConversionTaskStatus(Enum):
ENQUEUED = 1
IN_PROGRESS = 2
PROCESSED = 3
FAILED = -1
@dataclass
class ConversionResult:
status: ConversionTaskStatus
download_url: str
class ArtecCloudAssetProvider(BaseAssetStore):
def __init__(self) -> None:
settings = carb.settings.get_settings()
self._provider_id = settings.get_as_string(SETTING_ROOT + "providerId")
super().__init__(store_id=self._provider_id)
self._max_count_per_page = settings.get_as_int(SETTING_ROOT + "maxCountPerPage")
self._search_url = settings.get_as_string(SETTING_ROOT + "cloudSearchUrl")
self._auth_token = None
self._authorize_url = settings.get_as_string(SETTING_ROOT + "authorizeUrl")
self._auth_params: Dict = {}
def provider(self) -> ProviderModel:
return ProviderModel(
name=self._store_id, icon=f"{DATA_PATH}/artec_cloud.png", enable_setting=SETTING_STORE_ENABLE
)
def authorized(self) -> bool:
return self._auth_token is not None
async def authenticate(self, username: str, password: str):
params = {"user[email]": username, "user[password]": password}
async with aiohttp.ClientSession() as session:
async with session.post(self._authorize_url, params=params) as response:
self._auth_params = await response.json()
self._auth_token = self._auth_params.get("auth_token")
async def _search(self, search_criteria: SearchCriteria) -> Tuple[List[AssetModel], bool]:
assets: List[AssetModel] = []
params = {
"auth_token": self._auth_token,
"sort_field": "",
"sort_direction": "",
"term": "",
"slug": "",
"per_page": self._max_count_per_page,
"page": 0,
}
if search_criteria.sort:
params["sort_field"], params["sort_direction"] = search_criteria.sort
if search_criteria.keywords:
params["term"] = " ".join(search_criteria.keywords)
if search_criteria.filter.categories:
category = search_criteria.filter.categories[-1]
if category:
params["slug"] = category
to_continue = True
while to_continue:
params["page"] += 1
(page_assets, to_continue) = await self._search_one_page(params)
if page_assets:
assets.extend(page_assets)
if not to_continue:
break
else:
break
return (assets, to_continue)
async def _search_one_page(self, params: Dict) -> Tuple[List[AssetModel], bool]:
if not self.authorized():
return ([], False)
items = []
meta = {}
async with aiohttp.ClientSession() as session:
async with session.get(self._search_url, params=params) as response:
results = await response.json()
items = results.get("projects", [])
meta = results.get("meta")
assets: List[AssetModel] = []
for item in items:
item_categories = item.get("categories", [])
item_thumbnail = self.url_with_token(item.get("preview_presigned_url"))
assets.append(
AssetModel(
identifier=item.get("id"),
name=item.get("name"),
version="",
published_at=item.get("created_at"),
categories=item_categories,
tags=[],
vendor=self._provider_id,
download_url=item.get("download_url", ""),
product_url=item.get("viewer_url", ""),
thumbnail=item_thumbnail,
user=item.get("user"),
fusions=item.get("fusions", ""),
)
)
to_continue = meta["total_count"] > meta["current_page"] * meta["per_page"]
return (assets, to_continue)
def url_with_token(self, url: str) -> str:
params = {"auth_token": self._auth_token}
url += ('&' if urlparse(url).query else '?') + urlencode(params)
return url
def destroy(self):
self._auth_params = {}
async def download(self, fusion: AssetFusion, dest_path: str,
on_progress_fn: Optional[Callable[[float], None]] = None, timeout: int = 600,
on_prepared_fn: Optional[Callable[[float], None]] = None) -> Dict:
with tempfile.TemporaryDirectory() as tmp_dir:
zip_file_path = Path(tmp_dir) / f"{fusion.name}.zip"
snapshot_group_id, eta = await self._request_model(fusion)
conversion_start_time = time()
while True:
if on_progress_fn:
on_progress_fn(min((time() - conversion_start_time) / eta, 1))
conversion_result = await self._check_status(fusion, snapshot_group_id)
if conversion_result.status is ConversionTaskStatus.PROCESSED:
if on_prepared_fn:
on_prepared_fn()
async with aiohttp.ClientSession() as session:
content = bytearray()
downloaded = 0
async with session.get(conversion_result.download_url) as response:
size = int(response.headers.get("content-length", 0))
if size > 0:
async for chunk in response.content.iter_chunked(1024 * 512):
content.extend(chunk)
downloaded += len(chunk)
if on_progress_fn:
on_progress_fn(float(downloaded) / size)
else:
if on_progress_fn:
on_progress_fn(0)
content = await response.read()
if on_progress_fn:
on_progress_fn(1)
async with aiofiles.open(zip_file_path, "wb") as file:
await file.write(content)
break
elif conversion_result.status is ConversionTaskStatus.FAILED:
return {"url": None, "status": omni.client.Result.ERROR}
# unzip
output_path = zip_file_path.parent / fusion.name
await self._extract_zip(zip_file_path, output_path)
# convert model
try:
obj_path = next(output_path.glob("**/*.obj"))
except StopIteration:
return {"url": None, "status": omni.client.Result.ERROR}
converted_project_path = zip_file_path.parent / f"{obj_path.parent.name}-converted"
usd_path = converted_project_path / f"{obj_path.stem}.usd"
await omni.client.create_folder_async(str(converted_project_path))
if not await self.convert(obj_path, usd_path):
return {"url": None, "status": omni.client.Result.ERROR}
# prepare usdz
usdz_path = Path(dest_path) / f"{usd_path.name}z"
with zipfile.ZipFile(usdz_path, "w") as archive:
# usd file should be first in the USDZ package
archive.write(usd_path, arcname=usd_path.name)
for file_path in usd_path.parent.glob("**/*"):
if file_path != usd_path:
archive.write(file_path, arcname=file_path.relative_to(usd_path.parent))
await self._download_thumbnail(usdz_path, fusion.thumbnail_url)
return {"url": str(usdz_path), "status": omni.client.Result.OK}
async def _download_thumbnail(self, usd_path: Path, thumbnail_url: str):
thumbnail_out_dir_path = usd_path.parent / ".thumbs" / "256x256"
await omni.client.create_folder_async(str(thumbnail_out_dir_path))
thumbnail_out_path = thumbnail_out_dir_path / f"{Path(usd_path).name}.png"
async with aiohttp.ClientSession() as session:
async with session.get(self.url_with_token(thumbnail_url)) as response:
async with aiofiles.open(thumbnail_out_path, "wb") as file:
await file.write(await response.read())
@staticmethod
async def convert(input_asset_path: Path, output_asset_path: Path) -> bool:
task_manager = converter.get_instance()
task = task_manager.create_converter_task(str(input_asset_path), str(output_asset_path), None)
success = await task.wait_until_finished()
if not success:
carb.log_error(f"Conversion failed. Reason: {task.get_error_message()}")
return False
return True
@staticmethod
async def _extract_zip(input_path, output_path):
await omni.client.create_folder_async(str(output_path))
with zipfile.ZipFile(input_path, "r") as zip_ref:
zip_ref.extractall(output_path)
async def _check_status(self, fusion: AssetFusion, snapshot_group_id):
params = {
"auth_token": self._auth_token,
"snapshot_group_id": snapshot_group_id
}
url = f"{'/'.join(fusion.url.split('/')[:-1])}/conversion_status"
async with aiohttp.ClientSession() as session:
async with session.get(url=url, params=params) as response:
decoded_response = await response.json()
if response.status != 200:
return ConversionResult(ConversionTaskStatus.FAILED, "")
status = ConversionTaskStatus(int(decoded_response["project"]["conversion_status"]))
return ConversionResult(status, decoded_response["project"]["download_url"])
async def _request_model(self, fusion: AssetFusion):
async with aiohttp.ClientSession() as session:
async with session.get(url=self.url_with_token(fusion.url)) as response:
results = await response.json()
return results["project"]["snapshot_group_id"], results["project"]["eta"]
| 11,433 | Python | 41.191882 | 105 | 0.579201 |
ArtecGroup/omni-artec-asset-browser/exts/artec.asset.browser/artec/asset/browser/extension.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
# Forked from AssetStore class AssetStoreExtension
import omni.ext
import omni.kit.ui
from .window import ArtecCloudWindow, ARTEC_CLOUD_WINDOW_NAME
from .artec_cloud import ArtecCloudAssetProvider
from artec.services.browser.asset import get_instance as get_asset_services
from artec.services.browser.asset.store.local.local import LocalFolderAssetProvider
ARTEC_CLOUD_BROWSER_MENU_PATH = "Window/Browsers/" + ARTEC_CLOUD_WINDOW_NAME
_extension_instance = None
class ArtecAssetBrowserExtension(omni.ext.IExt):
def on_startup(self, ext_id):
self._window = None
self._menu = omni.kit.ui.get_editor_menu().add_item(
ARTEC_CLOUD_BROWSER_MENU_PATH, self._on_click, toggle=True, value=True
)
self._window = ArtecCloudWindow()
self._window.set_visibility_changed_fn(self._on_visibility_changed)
global _extension_instance
_extension_instance = self
self._asset_provider = ArtecCloudAssetProvider()
self._asset_provider_local = LocalFolderAssetProvider()
self._asset_service = get_asset_services()
self._asset_service.register_store(self._asset_provider)
self._asset_service.register_store(self._asset_provider_local)
_extension_instance
def on_shutdown(self):
self._asset_service.unregister_store(self._asset_provider)
self._asset_service.unregister_store(self._asset_provider_local)
self._asset_provider = None
self._asset_provider_local = None
self._asset_service = None
if self._window is not None:
self._window.destroy()
self._window = None
global _extension_instance
_extension_instance = None
def _on_click(self, *args):
self._window.visible = not self._window.visible
def _on_visibility_changed(self, visible):
omni.kit.ui.get_editor_menu().set_value(ARTEC_CLOUD_BROWSER_MENU_PATH, visible)
| 2,369 | Python | 36.619047 | 87 | 0.712537 |
ArtecGroup/omni-artec-asset-browser/exts/artec.asset.browser/artec/asset/browser/hover_window.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
# Forked from AssetStore class HoverWindow
from typing import Optional
from omni import ui
from .models import AssetDetailItem
from .style import HOVER_WINDOW_STYLE
class HoverWindow(ui.Window):
"""
Window to show hover for asset item.
"""
def __init__(self):
flags = (
ui.WINDOW_FLAGS_NO_COLLAPSE
| ui.WINDOW_FLAGS_NO_TITLE_BAR
| ui.WINDOW_FLAGS_NO_SCROLLBAR
| ui.WINDOW_FLAGS_NO_RESIZE
| ui.WINDOW_FLAGS_NO_CLOSE
| ui.WINDOW_FLAGS_NO_DOCKING
)
super().__init__(
"ASSET HOVER WINDOW",
width=250,
height=200,
flags=flags,
padding_x=0,
padding_y=0,
dockPreference=ui.DockPreference.DISABLED,
)
self.frame.set_style(HOVER_WINDOW_STYLE)
self.frame.set_build_fn(self._build_ui)
self.visible = False
self._item: Optional[AssetDetailItem] = None
self._image: Optional[ui.Image] = None
def _build_ui(self) -> None:
with self.frame:
self._container = ui.VStack()
with self._container:
self._image = ui.Image(
self._item.thumbnail,
fill_policy=ui.FillPolicy.PRESERVE_ASPECT_CROP,
style_type_name_override="GridView.Image",
)
self._build_tips(self._item)
self._container.set_mouse_hovered_fn(self._on_hover)
def show(self, item: AssetDetailItem, image_size: float, tips_height, x: float, y: float):
self._item = item
if self._image:
self._image.source_url = item.thumbnail
self._tips.text = item.tips
self._tips.name = item.asset_type
self.width = image_size
self.height = image_size + tips_height
self.position_x = x
self.position_y = y
self.visible = True
def _on_hover(self, hovered):
self.visible = hovered
def _build_tips(self, item: AssetDetailItem) -> None:
# Hover background and text
with ui.ZStack(height=self.height - self.width):
ui.Rectangle(style_type_name_override="GridView.Item.Hover.Background")
self._tips = ui.Label(
item.tips,
name=item.asset_type,
alignment=ui.Alignment.CENTER,
style_type_name_override="GridView.Item.Tips.Text",
)
| 2,915 | Python | 31.764045 | 94 | 0.594168 |
ArtecGroup/omni-artec-asset-browser/exts/artec.asset.browser/artec/asset/browser/category_delegate.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
# Forked from AssetStore MainNavigationDelegate
from omni import ui
from omni.kit.browser.core import CategoryDelegate, CategoryItem
from .models import MainNavigationItem, AssetStoreModel
class MainNavigationDelegate(CategoryDelegate):
def __init__(self, model: AssetStoreModel, **kwargs):
self._model = model
super().__init__(kwargs)
def get_label(self, item: CategoryItem) -> str:
return item.name.upper()
def build_widget(
self,
model: ui.AbstractItemModel,
item: MainNavigationItem,
index: int = 0,
level: int = 0,
expanded: bool = False,
):
"""
Create a widget per catetory item
Args:
model (AbstractItemModel): Category data model
item (CategoryItem): Category item
index (int): ignore
level (int): ignore
expand (int): ignore
"""
with ui.HStack():
if self._tree_mode:
ui.Label(" " * level, width=0)
ui.Label(
self.get_label(item),
width=0,
alignment=ui.Alignment.LEFT_CENTER,
style_type_name_override="TreeView.Item.Name",
)
ui.Spacer()
if item.configurable:
ui.Button(
"",
width=16,
height=16,
clicked_fn=lambda model=model, item=item: self._on_config(model, item),
style_type_name_override="TreeView.Item.Button",
)
def build_branch(
self,
model: ui.AbstractItemModel,
item: CategoryItem,
column_id: int = 0,
level: int = 0,
expanded: bool = False,
):
"""
Create a branch widget that opens or closes subtree
Args:
model (AbstractItemModel): Category data model
item (CategoryItem): Category item
column_id (int): ignore
level (int): ignore
expand (int): ignore
"""
if not self._tree_mode or len(item.children) == 0:
# In tree mode, if have children, show as branch
return
with ui.HStack(height=20, spacing=5):
ui.Label(" " * level, width=0)
if expanded:
ui.Label("- ", width=5)
else:
ui.Label("+ ", width=5)
def _on_config(self, model: AssetStoreModel, item: MainNavigationItem) -> None:
# Here item name is provider id
self._model.config_provider(item.name)
| 3,043 | Python | 32.450549 | 91 | 0.568189 |
ArtecGroup/omni-artec-asset-browser/exts/artec.asset.browser/artec/asset/browser/options_menu.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
# Forked from AssetStore class AssetOptionsMenu
from omni.kit.browser.core import OptionMenuDescription, OptionsMenu
from .models import AssetStoreModel
class AssetOptionsMenu(OptionsMenu):
"""
Represent options menu used in asset store.
"""
def __init__(self, model: AssetStoreModel):
super().__init__()
self._model = model
self._my_assets_window = None
self._menu_descs = []
def destroy(self) -> None:
if self._my_assets_window is not None:
self._my_assets_window.destroy()
self._my_assets_window = None
super().destroy()
def show(self) -> None:
if self._options_menu is None:
for provider, setting in self._model.providers.items():
if setting["configurable"]:
self._menu_descs.append(
OptionMenuDescription(
f"{provider} Setting", clicked_fn=lambda p=provider: self._model.config_provider(p)
)
)
super().show()
| 1,506 | Python | 33.249999 | 111 | 0.634794 |
ArtecGroup/omni-artec-asset-browser/exts/artec.asset.browser/artec/asset/browser/download_helper.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
# Forked from AssetStore class DownloadHelper
from typing import Dict, Optional
import os
import carb
import carb.tokens
import json
import omni.client
from artec.services.browser.asset import AssetModel
DOWNLOAD_RESULT_FILE = "asset_store_downloads.json"
def Singleton(class_):
"""A singleton decorator"""
instances = {}
def getinstance(*args, **kwargs):
if class_ not in instances:
instances[class_] = class_(*args, **kwargs)
return instances[class_]
return getinstance
@Singleton
class DownloadHelper:
"""
Helper to download assets.
"""
def __init__(self):
self._download_root = os.path.abspath(carb.tokens.get_tokens_interface().resolve("${shared_documents}"))
self._download_result_file = self._download_root + "/" + DOWNLOAD_RESULT_FILE
self._download_stats: Dict[str, Dict[str, Dict]] = {}
self._load_download_assets()
def destroy(self):
pass
def get_download_url(self, asset: AssetModel) -> Optional[str]:
"""
Query asset local downloaded url.
Args:
asset (AssetModel): Asset model to query
Return:
Local url if found. Else None.
"""
if asset["vendor"] not in self._download_stats:
return None
if asset["identifier"] in self._download_stats[asset["vendor"]]:
url = self._download_stats[asset["vendor"]][asset["identifier"]]
(result, entry) = omni.client.stat(url)
if result == omni.client.Result.OK:
return url
else:
# File not found, clean download stats
del self._download_stats[asset["vendor"]][asset["identifier"]]
self._save_download_assets()
return None
def save_download_asset(self, asset: AssetModel, url: str) -> None:
"""
Save asset local downloaded url
Args:
asset (AssetModel): Asset model to save.
url (str): Local url of downloaded asset model.
"""
if asset["vendor"] not in self._download_stats:
self._download_stats[asset["vendor"]] = {}
self._download_stats[asset["vendor"]][asset["identifier"]] = url
self._save_download_assets()
def _save_download_assets(self):
json_file = None
try:
with open(self._download_result_file, "w") as json_file:
json.dump(self._download_stats, json_file, indent=4)
json_file.close()
except FileNotFoundError:
carb.log_warn(f"Failed to open {self._download_result_file}!")
except PermissionError:
carb.log_warn(f"Cannot write to {self._download_result_file}: permission denied!")
except Exception:
carb.log_warn(f"Unknown failure to write to {self._download_result_file}")
finally:
if json_file:
json_file.close()
def _load_download_assets(self):
result, entry = omni.client.stat(self._download_result_file)
if result != omni.client.Result.OK:
self._download_stats = {}
return
try:
with open(self._download_result_file, "r") as json_file:
self._download_stats = json.load(json_file)
except FileNotFoundError:
carb.log_error(f"Failed to open {self._download_result_file}!")
except PermissionError:
carb.log_error(f"Cannot read {self._download_result_file}: permission denied!")
except Exception as exc:
carb.log_error(f"Unknown failure to read {self._download_result_file}: {exc}")
| 4,117 | Python | 34.808695 | 112 | 0.615497 |
ArtecGroup/omni-artec-asset-browser/exts/artec.asset.browser/artec/asset/browser/browser_widget.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
# Forked from AssetStore BrowserWidget
import asyncio
from typing import Optional, List, Callable
import carb.settings
import carb.dictionary
import omni.kit.app
from omni.kit.browser.core import BrowserSearchBar, BrowserWidget, CategoryItem
from omni import ui
from .search_notification import SearchNotification
from .popup_menu import SortMenu, FilterMenu
from .models import AssetDetailItem
from .style import ICON_PATH
from .auth_dialog import AuthDialog
from functools import partial
DEFAULT_THUMBNAIL_PADDING = 5
SETTING_ROOT = "/exts/artec.asset.browser/"
SETTING_AUTO_SCROLL = SETTING_ROOT + "autoScroll"
class ArtecCloudBrowserWidget(BrowserWidget):
def __init__(self, *args, **kwargs):
self._auth_dialog = None
super().__init__(*args, **kwargs)
self._sort_menu = None
self._filter_menu = None
self._filter_vendor = None
self._update_setting = omni.kit.app.SettingChangeSubscription(
"/exts/artec.asset.browser/showCategory", self._on_show_category_changed
)
self._load_future = None
self._more_details = True
self._thumbnail_sub_id = None
self._detail_kwargs["delegate"].set_request_more_fn(self._request_more_assets)
# Get categories from model
self._load_categories()
self._browser_model.on_refresh_provider_fn = self._on_refresh_provider
self._browser_model.on_enable_provider_fn = self._on_enable_provider
def destroy(self):
if self._load_future is not None:
if not self._load_future.done():
self._load_future.cancel()
self._update_setting = None
if self._sort_menu is not None:
self._sort_menu.destroy()
if self._thumbnail_sub_id:
self.remove_thumbnail_size_changed_fn(self._thumbnail_sub_id)
super().destroy()
def authorized(self) -> bool: # WIP working
provider = self._browser_model.artec_cloud_provider()
return provider.authorized()
def _build_results(self): # FIXME use other method
self.filter_details(None)
def trigger_authenticate(self):
if not self.authorized():
def on_authenticate(self, dialog: AuthDialog):
def check_authorized(self, dialog: AuthDialog):
if self.authorized():
self._build_results()
dialog.hide()
else:
dialog.warn_password()
asyncio.ensure_future(
self._browser_model.authenticate_async(
self._browser_model.artec_cloud_provider_id,
dialog.username,
dialog.password,
lambda: check_authorized(self, dialog)
)
)
pass
def on_cancel(dialog: AuthDialog):
dialog.hide()
if not self._auth_dialog:
self._auth_dialog = AuthDialog()
self._auth_dialog.show(
self._browser_model.artec_cloud_provider_id,
click_okay_handler=partial(on_authenticate, self),
click_cancel_handler=partial(on_cancel),
)
def _build_right_panel(self):
self.trigger_authenticate()
with ui.ZStack():
self._build_detail_panel()
if self._zoom_bar:
self._zoom_bar.set_on_hovered_fn(self._on_zoombar_hovered)
auto_scroll = carb.settings.get_settings().get(SETTING_AUTO_SCROLL)
if auto_scroll:
self._detail_scrolling_frame.set_scroll_y_changed_fn(self._on_detail_scroll_y_changed)
def _build_detail_panel(self):
# Add search bar
with ui.VStack(spacing=5):
with ui.HStack(spacing=4, height=26):
self._search_bar = BrowserSearchBar(options_menu=None, subscribe_edit_changed=False)
with ui.VStack(width=26):
ui.Spacer()
self._sort_button = ui.Button(
image_width=20,
image_height=20,
width=26,
height=26,
name="sort",
clicked_fn=self._trigger_sort_menu,
style_type_name_override="SearchBar.Button",
)
ui.Spacer()
with ui.ZStack():
super()._build_right_panel()
def __clear_search():
self._search_bar.clear_search()
self._search_notification = SearchNotification(__clear_search)
self._search_bar.bind_browser_widget(self)
self._thumbnail_sub_id = self.add_thumbnail_size_changed_fn(self._on_thumbnail_size_changed)
self._search_notification.visible = False
def _build_detail_view_internal(self):
self._thumbnail_padding = self._get_thumbnail_padding(self._detail_kwargs["thumbnail_size"])
self._detail_kwargs["thumbnail_padding_width"] = self._thumbnail_padding
self._detail_kwargs["thumbnail_padding_height"] = self._thumbnail_padding
super()._build_detail_view_internal()
self._detail_view.set_extra_filter_fn(self._on_extra_filter)
def _on_category_selected(self, category_item: Optional[CategoryItem]) -> None:
if category_item is None:
# Alway show "ALL" if nothing selected
self.category_selection = [self._browser_model._category_items[0]]
return
if category_item is not None:
super()._on_category_selected(category_item)
self._load_assets(category_item, lambda: self._detail_view.model._item_changed(None))
else:
super()._on_category_selected(category_item)
def show_widgets(
self,
collection: Optional[bool] = None,
category: Optional[bool] = None,
detail: Optional[bool] = None,
expand_root: Optional[bool] = None,
) -> None:
# Show collection control but disable it and make it transparent
super().show_widgets(collection=collection, category=category, detail=detail)
self._collection_combobox.enabled = False
# if expand_root:
# self._category_view.set_expanded(self._category_model.get_item_children()[0], True, False)
def filter_details(self, filter_words: Optional[List[str]]):
self._begin_search()
self._browser_model.search_words = filter_words
# Clean cache detail items in browser model
if self.category_selection:
for category_item in self.category_selection:
self._browser_model._item_changed(category_item)
def __show_filter_results():
self._detail_view.model._item_changed(None)
self._end_search()
if self.category_selection:
self._load_assets(self.category_selection[0], __show_filter_results)
else:
# Force to refresh detail view for new filter words
self._detail_view.model._item_changed(None)
def _trigger_sort_menu(self) -> None:
if self._sort_menu is None:
self._sort_menu = SortMenu(self._on_sort_changed)
else:
self._sort_menu.visible = True
self._sort_menu.position_x = self._sort_button.screen_position_x
self._sort_menu.position_y = self._sort_button.screen_position_y + self._sort_button.computed_height
def _on_sort_changed(self, sort_field: str, sort_order: str) -> None:
self._browser_model.change_sort_args(sort_field, sort_order)
if self.category_selection:
self._load_assets(self.category_selection[0], lambda: self._detail_view.model._item_changed(None))
def _on_filter_changed(self, filter_vendor: str) -> None:
self._browser_model.search_provider = None if filter_vendor == "All" else filter_vendor
if self.category_selection:
self._load_assets(self.category_selection[0], lambda: self._detail_view.model._item_changed(None))
def _on_show_category_changed(self, item: carb.dictionary.Item, event_type) -> None:
# Show and expand category
if event_type == carb.settings.ChangeEventType.CHANGED:
url = str(item)
if url:
full_chain = []
category_item = self._find_category_item(url, None, full_chain)
if category_item:
self.category_selection = [category_item]
# Expand to show selected category
for item in full_chain:
self._category_view.set_expanded(item, True, False)
def _on_refresh_provider(self, provider: str, item: carb.dictionary.Item, event_type) -> None:
# Refresh category
if event_type != carb.settings.ChangeEventType.CHANGED:
return
async def __refresh_categories_async():
await omni.kit.app.get_app().next_update_async()
await self._browser_model.list_categories_async()
# Refresh categories list
self._browser_model._item_changed(self.collection_selection)
self._category_view.model._item_changed(None)
# Default select "ALL"
self.category_selection = [self._browser_model._category_items[0]]
asyncio.ensure_future(__refresh_categories_async())
def _on_enable_provider(self, provider: str, item: carb.dictionary.Item, event_type) -> None:
if event_type != carb.settings.ChangeEventType.CHANGED:
return
async def __refresh_providers_async():
await self._browser_model.list_providers_async()
await self._browser_model.list_categories_async()
# Refresh provider filter menu
if self._filter_menu:
self._filter_menu.refresh(list(self._browser_model.providers.keys()))
# Refresh categories list
self._browser_model._item_changed(self.collection_selection)
await omni.kit.app.get_app().next_update_async()
self._category_view.model._item_changed(None)
self._on_category_selected(None)
asyncio.ensure_future(__refresh_providers_async())
def _find_category_item(
self, url: str, category_item: Optional[CategoryItem] = None, full_chain: Optional[List[CategoryItem]] = None
) -> Optional[CategoryItem]:
if category_item is None:
# Find in root
for child in self._category_model.get_item_children():
found_item = self._find_category_item(url, child, full_chain)
if found_item:
return found_item
else:
return None
else:
if category_item.url == url:
return category_item
else:
if full_chain is not None:
full_chain.append(category_item)
for child in category_item.children:
# Find in children
found_item = self._find_category_item(url, child, full_chain)
if found_item is not None:
return found_item
else:
if full_chain is not None:
full_chain.pop()
return None
def _on_extra_filter(self, item: AssetDetailItem) -> bool:
if self._filter_vendor is None:
return True
else:
if isinstance(item, AssetDetailItem):
return item.asset_model["vendor"] == self._filter_vendor
else:
return True
def _load_assets(
self, category_item: CategoryItem, callback: Callable[[None], None] = None, reset: bool = True
) -> None:
if reset:
self._begin_search()
self._browser_model.reset_assets()
self._detail_view.model._item_changed(None)
else:
self._detail_kwargs["delegate"].more_item_image.source_url = f"{ICON_PATH}/search.png"
self._detail_kwargs["delegate"].more_item_label.text = "Searching..."
self._detail_kwargs["delegate"].more_item_center_tips.text = "Searching..."
self._detail_kwargs["delegate"].more_item_right_tips.text = ""
if self._load_future is not None:
if not self._load_future.done():
self._load_future.cancel()
if reset:
self._more_details = True
def __assets_loaded():
self._detail_view.model._item_changed(None)
self._end_search()
self._load_future = asyncio.ensure_future(self._load_asset_async(category_item, __assets_loaded, reset=reset))
async def _load_asset_async(
self, category_item: CategoryItem, callback: Callable[[None], None] = None, reset: bool = True
):
self._more_details = await self._browser_model.list_assets_async(category_item, callback, reset=reset)
self._end_search()
def _on_thumbnail_size_changed(self, thumbnail_size: int) -> None:
self._detail_kwargs["delegate"].on_thumbnail_size_changed(thumbnail_size)
thumbnail_padding = self._get_thumbnail_padding(thumbnail_size)
if thumbnail_padding != self._thumbnail_padding:
self._thumbnail_padding = thumbnail_padding
self._detail_view.thumbnail_padding_height = thumbnail_padding
self._detail_view.thumbnail_padding_width = thumbnail_padding
def _load_categories(self):
async def __load_categories_async():
await self._browser_model.list_categories_async()
# Show categories list
self.collection_index = 0
self.category_selection = [self._browser_model._category_items[0]]
asyncio.ensure_future(__load_categories_async())
def _request_more_assets(self):
# Require more assets
if self.category_selection:
self._load_assets(
self.category_selection[0], lambda: self._detail_view.model._item_changed(None), reset=False
)
def _on_zoombar_hovered(self, hovered: bool) -> None:
# When zoombar hovered, disable hover window.
# Otherwise zoombar will lost focus and cannot change thumbnail size anymore.
self._detail_kwargs["delegate"].enable_hovered(not hovered)
def _get_thumbnail_padding(self, thumbnail_size):
if thumbnail_size > 384:
return 3 * DEFAULT_THUMBNAIL_PADDING
elif thumbnail_size > 192:
return 2 * DEFAULT_THUMBNAIL_PADDING
else:
return DEFAULT_THUMBNAIL_PADDING
def _begin_search(self) -> None:
self._search_notification.set_message("Searching...", show_clear=False)
self._search_notification.visible = True
def _end_search(self) -> None:
if len(self._browser_model._assets) == 0:
if self._browser_model.search_words is not None:
message = " ".join(self._browser_model.search_words)
message = f'"{message}" not found'
self._search_notification.set_message(message)
self._search_notification.visible = True
else:
if self._browser_model.search_provider:
message = f"No asset found ({self._browser_model.search_provider} only)!"
else:
message = "No assets found!"
self._search_notification.set_message(message, show_clear=False)
self._search_notification.visible = True
else:
self._search_notification.visible = False
def _on_detail_scroll_y_changed(self, y: float) -> None:
try:
if self._more_details and y >= self._detail_scrolling_frame.scroll_y_max and self.category_selection:
# Require more assets
self._request_more_assets()
except AttributeError:
# scroll_y_max required new kit
# carb.log_error("Update kit to enable scrolling event!")
pass
| 16,634 | Python | 39.474452 | 118 | 0.598593 |
ArtecGroup/omni-artec-asset-browser/exts/artec.asset.browser/artec/asset/browser/download_progress_bar.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
# Forked from AssetStore class DownloadProgressBar
import carb.events
from omni import ui
import omni.kit.app
class DownloadProgressBar(ui.ProgressBar):
"""
Represent the asset download progress bar.
Args:
real_progress (bool): True to display real progress. False to display progress when time changed.
"""
def __init__(self, real_progress: bool = True):
self._real_progress = real_progress
self._build_ui()
def destroy(self) -> None:
self._stop()
@property
def visible(self) -> bool:
return self._container.visible
@visible.setter
def visible(self, value: bool) -> bool:
self._container.visible = value
if value:
if self._real_progress:
self.progress = 0
else:
self._start()
else:
if self._real_progress:
self.progress = 1
else:
self._stop()
@property
def progress(self) -> float:
return self._progress_bar.model.as_float
@progress.setter
def progress(self, value: float) -> None:
self._progress_bar.model.set_value(value)
def _start(self) -> None:
self._progress_bar.model.set_value(0)
self._action_time = 0.0
self._current_time = 0.0
self._step = 0.01
self._threshold = self._get_threshold()
self._update_sub = (
omni.kit.app.get_app().get_update_event_stream().create_subscription_to_pop(self._update_progress)
)
def _stop(self) -> None:
self._progress_bar.model.set_value(1)
self._update_sub = None
def _build_ui(self) -> None:
self._container = ui.VStack(visible=False)
with self._container:
ui.Spacer()
self._progress_bar = ui.ProgressBar(height=0, style_type_name_override="GridView.Item.Download")
def _update_progress(self, event: carb.events.IEvent):
self._current_time += event.payload["dt"]
if self._current_time - self._action_time >= 0.1:
value = self._progress_bar.model.as_float
value += self._step
if value > 1.0:
value = 0
self._progress_bar.model.set_value(value)
if value >= self._threshold:
self._step /= 10
self._threshold = self._get_threshold()
self._action_time = self._current_time
def _get_threshold(self):
value = self._progress_bar.model.as_float
return value + (1 - value) * 0.75
| 2,999 | Python | 31.258064 | 110 | 0.606202 |
ArtecGroup/omni-artec-asset-browser/exts/artec.asset.browser/artec/asset/browser/window.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
# Forked from AssetStore class AssetStoreWindow
import omni.ui as ui
from .browser_widget import ArtecCloudBrowserWidget
from .models import AssetStoreModel
from .category_delegate import MainNavigationDelegate
from .detail_delegate import AssetDetailDelegate
from .overview_delegate import OverviewDelegate
from .style import ARTEC_CLOUD_BROWSER_STYLE
ARTEC_CLOUD_WINDOW_NAME = "Artec Cloud models"
class ArtecCloudWindow(ui.Window):
"""
Represent a window to show Artec Cloud Models
"""
def __init__(self):
super().__init__(ARTEC_CLOUD_WINDOW_NAME, width=500, height=600)
self._widget = None
self.frame.set_build_fn(self._build_ui)
self.frame.set_style(ARTEC_CLOUD_BROWSER_STYLE)
# Dock it to the same space where Stage is docked, make it active.
self.deferred_dock_in("Content", ui.DockPolicy.CURRENT_WINDOW_IS_ACTIVE)
def destroy(self):
if self._widget is not None:
self._widget.destroy()
super().destroy()
def _build_ui(self):
self._browser_model = AssetStoreModel()
with self.frame:
with ui.VStack(spacing=5):
self._widget = ArtecCloudBrowserWidget(
self._browser_model,
min_thumbnail_size=128,
category_delegate=MainNavigationDelegate(self._browser_model, tree_mode=True),
category_tree_mode=True,
detail_delegate=AssetDetailDelegate(self._browser_model),
overview_delegate=OverviewDelegate(model=self._browser_model),
style=ARTEC_CLOUD_BROWSER_STYLE,
always_select_category=False,
show_category_splitter=True,
category_width=180,
)
self._widget.show_widgets(collection=True)
| 2,291 | Python | 35.967741 | 98 | 0.666085 |
ArtecGroup/omni-artec-asset-browser/exts/artec.asset.browser/artec/asset/browser/auth_dialog.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
# Forked from AssetStore AuthDialog
import omni.ui as ui
import carb.settings
from typing import Callable
from .style import AUTH_DIALOG_STYLE
class AuthDialog:
def __init__(self, **kwargs):
self._window = None
self._message = None
self._username = None
self._password = None
self._password_overlay = None
self._remember = None
self._ok_button = None
self._cancel_button = None
self._sub_begin_edit = None
self._width = kwargs.get("width", 400)
self._build_ui()
def _build_ui(self):
window_flags = (
ui.WINDOW_FLAGS_NO_RESIZE
| ui.WINDOW_FLAGS_POPUP
| ui.WINDOW_FLAGS_NO_TITLE_BAR
| ui.WINDOW_FLAGS_NO_SCROLLBAR
| ui.WINDOW_FLAGS_NO_BACKGROUND
| ui.WINDOW_FLAGS_MODAL
)
self._window = ui.Window("Authentication", width=self._width, height=0, flags=window_flags)
with self._window.frame:
with ui.ZStack(style=AUTH_DIALOG_STYLE):
ui.Rectangle(style_type_name_override="Window")
with ui.VStack(style_type_name_override="Dialog", spacing=6):
self._message = ui.Label(
f"Please login to your account.", height=20, style_type_name_override="Message"
)
with ui.HStack(height=0):
ui.Label("Email: ", style_type_name_override="Label")
self._username = ui.StringField(
width=ui.Percent(75), height=20, style_type_name_override="Field"
)
with ui.HStack(height=0):
ui.Label("Password: ", style_type_name_override="Label")
with ui.ZStack(width=ui.Percent(75)):
self._password = ui.StringField(
height=20, password_mode=True, style_type_name_override="Field"
)
self._password_overlay = ui.ZStack()
with self._password_overlay:
ui.Rectangle(style_type_name_override="Field", name="overlay")
ui.Label("Invalid credentials.", style_type_name_override="Field", name="warn")
with ui.HStack(height=0):
ui.Spacer()
ui.Label("Remember My Password ", width=0, height=20, style_type_name_override="Label")
self._remember = ui.CheckBox(enabled=True, width=0, style_type_name_override="CheckBox")
with ui.HStack(height=20, spacing=4):
ui.Spacer()
self._okay_button = ui.Button("Okay", width=100, style_type_name_override="Button")
self._cancel_button = ui.Button("Cancel", width=100, style_type_name_override="Button")
ui.Spacer(height=2)
def on_begin_edit(_):
self._password_overlay.visible = False
self._sub_begin_edit = self._password.model.subscribe_begin_edit_fn(on_begin_edit)
@property
def username(self) -> str:
if self._username:
return self._username.model.get_value_as_string()
return ""
@property
def password(self) -> str:
if self._password:
return self._password.model.get_value_as_string()
return ""
def show(self, provider: str, **kwargs):
def on_okay(dialog, provider, callback: Callable):
self._save_default_settings(provider)
if callback:
callback(dialog)
else:
dialog.hide()
def on_cancel(dialog, provider, callback: Callable):
if callback:
callback(dialog)
else:
dialog.hide()
click_okay_handler = kwargs.get("click_okay_handler")
self._okay_button.set_clicked_fn(lambda: on_okay(self, provider, click_okay_handler))
click_cancel_handler = kwargs.get("click_cancel_handler")
self._cancel_button.set_clicked_fn(lambda: on_cancel(self, provider, click_cancel_handler))
self._message.text = f"Please login to your {provider} account."
self._load_default_settings(provider)
self._password_overlay.visible = False
self._window.visible = True
def _load_default_settings(self, provider: str):
settings = carb.settings.get_settings()
default_settings = settings.get_as_string("/exts/artec.asset.browser/appSettings")
username = settings.get_as_string(f"{default_settings}/providers/{provider}/username")
password = settings.get_as_string(f"{default_settings}/providers/{provider}/password")
remember = settings.get_as_bool(f"{default_settings}/providers/remember_password")
self._username.model.set_value(username)
self._password.model.set_value(password)
self._remember.model.set_value(remember)
def _save_default_settings(self, provider: str):
settings = carb.settings.get_settings()
default_settings = settings.get_as_string("/exts/artec.asset.browser/appSettings")
remember = self._remember.model.get_value_as_bool()
username = self._username.model.get_value_as_string()
password = self._password.model.get_value_as_string() if remember else ""
settings.set_string(f"{default_settings}/providers/{provider}/username", username)
settings.set_string(f"{default_settings}/providers/{provider}/password", password)
settings.set_bool(f"{default_settings}/providers/remember_password", remember)
def warn_password(self):
self._password_overlay.visible = True
def hide(self):
self._window.visible = False
def destroy(self):
self._message = None
self._username = None
self._password = None
self._password_overlay = None
self._remember = None
self._ok_button = None
self._cancel_button = None
self._sub_begin_edit = None
self._window = None
| 6,629 | Python | 41.5 | 112 | 0.590889 |
ArtecGroup/omni-artec-asset-browser/exts/artec.asset.browser/artec/asset/browser/detail_delegate.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
# Forked from AssetStore class AssetDetailDelegate
import carb
import carb.settings
import omni.ui as ui
import omni.client
import omni.kit.app
from omni.kit.browser.core import DetailDelegate, DetailItem, create_drop_helper
from omni.kit.window.filepicker import FilePickerDialog
from .models import AssetStoreModel, AssetDetailItem, AssetType, MoreDetailItem, SearchingDetailItem, AssetFusion
from .models.asset_detail_item import ASSET_TIPS
from .hover_window import HoverWindow
from .auth_dialog import AuthDialog
from .download_progress_bar import DownloadProgressBar
from .download_helper import DownloadHelper
from .style import ICON_PATH
import os
import asyncio
from pathlib import Path
from typing import Optional, Dict, Tuple, Callable
from functools import partial
import webbrowser
CURRENT_PATH = Path(__file__).parent
ICON_PATH = CURRENT_PATH.parent.parent.parent.joinpath("icons")
SETTING_HOVER_WINDOW = "/exts/artec.asset.browser/hoverWindow"
SETTING_MY_ASSET_FOLDERS = "/persistent/exts/omni.kit.browser.asset_provider.local/folders"
SETTING_MY_ASSET_FOLDER_CHANGED = "/exts/omni.kit.browser.asset_provider.local/folderChanged"
MIN_THUMBNAIL_SIZE_HOVER_WINDOW = 192
LABEL_HEIGHT = 32
ASSET_PROVIDER_ICON_SIZE = 32
class AssetDetailDelegate(DetailDelegate):
"""
Delegate to show asset item in detail view
Args:
model (AssetBrowserModel): Asset browser model
"""
def __init__(self, model: AssetStoreModel):
super().__init__(model=model)
self._dragging_url = None
self._settings = carb.settings.get_settings()
self._context_menu: Optional[ui.Menu] = None
self._action_item: Optional[AssetDetailItem] = None
self._action_fusion: Optional[AssetFusion] = None
self._vendor_container: Dict[AssetDetailItem, ui.ZStack] = {}
self._hover_center_container: Dict[AssetDetailItem, ui.VStack] = {}
self._hover_center_label: Dict[AssetDetailItem, ui.Label] = {}
self._hover_container: Dict[AssetDetailItem, ui.VStack] = {}
self._hover_label: Dict[AssetDetailItem, ui.Label] = {}
self._hover_background: Dict[AssetDetailItem, ui.Widget] = {}
self._asset_type_container: Dict[AssetDetailItem, ui.Widget] = {}
self._asset_type_image: Dict[AssetDetailItem, ui.Image] = {}
self._download_progress_bar: Dict[AssetFusion, DownloadProgressBar] = {}
self._draggable_urls: Dict[str, str] = {}
self._auth_dialog: Optional[AuthDialog] = None
self._pick_folder_dialog: Optional[FilePickerDialog] = None
self._on_request_more_fn: Callable[[None], None] = None
self.more_item_image: Optional[ui.Image] = None
self.more_item_label: Optional[ui.Label] = None
self.more_item_center_tips: Optional[ui.Label] = None
self.more_item_right_tips: Optional[ui.Label] = None
self._enable_hovered = True
self._asset_type_image_multiple = self._get_asset_type_image_multiple(self.thumbnail_size)
self._show_hover_window = carb.settings.get_settings().get(SETTING_HOVER_WINDOW)
if self._show_hover_window:
self._hover_window = HoverWindow()
else:
self._hover_window = None
self._instanceable_categories = self._settings.get("/exts/omni.kit.browser.asset/instanceable")
if self._instanceable_categories:
self._drop_helper = create_drop_helper(
pickable=True,
add_outline=True,
on_drop_accepted_fn=self._on_drop_accepted,
on_drop_fn=self._on_drop,
)
self._download_helper = DownloadHelper()
def destroy(self):
self._drop_helper = None
if self._pick_folder_dialog is not None:
self._pick_folder_dialog.destroy()
self._pick_folder_dialog = None
if self._hover_window:
self._hover_window.visible = False
self._hover_window = None
for item in self._download_progress_bar:
self._download_progress_bar[item].destroy()
super().destroy()
def set_request_more_fn(self, request_more_fn: Callable[[None], None]) -> None:
self._on_request_more_fn = request_more_fn
def enable_hovered(self, enable: bool) -> None:
self._enable_hovered = enable
def get_thumbnail(self, item) -> str:
"""Set default sky thumbnail if thumbnail is None"""
if item.thumbnail is None:
return f"{ICON_PATH}/usd_stage_256.png"
else:
return item.thumbnail
def get_label_height(self) -> int:
# return 0 if self.hide_label else two lines for small thumbnail size and one line for large thumbnail size
return LABEL_HEIGHT
def on_drag(self, item: AssetDetailItem) -> str:
"""Could be dragged to viewport window"""
if item.asset_type != AssetType.NORMAL:
# Cannot drag if item to be downloaded or external link
return ""
thumbnail = self.get_thumbnail(item)
icon_size = 128
with ui.VStack(width=icon_size):
if thumbnail:
ui.Spacer(height=2)
with ui.HStack():
ui.Spacer()
ui.ImageWithProvider(thumbnail, width=icon_size, height=icon_size)
ui.Spacer()
ui.Label(
item.name,
word_wrap=False,
elided_text=True,
skip_draw_when_clipped=True,
alignment=ui.Alignment.TOP,
style_type_name_override="GridView.Item",
)
self._dragging_url = None
if self._instanceable_categories:
# For required categories, need to set instanceable after dropped
url = item.url
pos = url.rfind("/")
if pos > 0:
url = url[:pos]
for category in self._instanceable_categories:
if category in url:
self._dragging_url = item.url
break
return item.url
def _on_drop_accepted(self, url):
# Only hanlder dragging from asset browser
return url == self._dragging_url
def _on_drop(self, url, target, viewport_name, context_name):
saved_instanceable = self._settings.get("/persistent/app/stage/instanceableOnCreatingReference")
if not saved_instanceable and url == self._dragging_url:
# Enable instanceable for viewport asset drop handler
self._settings.set_bool("/persistent/app/stage/instanceableOnCreatingReference", True)
async def __restore_instanceable_flag():
# Waiting for viewport asset dropper handler completed
await omni.kit.app.get_app().next_update_async()
self._settings.set("/persistent/app/stage/instanceableOnCreatingReference", saved_instanceable)
asyncio.ensure_future(__restore_instanceable_flag())
self._dragging_url = None
# Let viewport do asset dropping
return None
def _single_item_changed(self, item: AssetDetailItem):
if self._cached_label_widgets[item] is not None:
label_height = self._cached_label_widgets[item].computed_height
super()._single_item_changed(item)
if self._cached_label_widgets[item] is not None:
self._cached_label_widgets[item].height = ui.Pixel(label_height)
def on_double_click(self, item: AssetDetailItem) -> None:
if isinstance(item, AssetDetailItem):
if item.asset_type == AssetType.EXTERNAL_LINK:
webbrowser.open(item.asset_model["product_url"])
elif item.asset_type == AssetType.DOWNLOAD:
fusion = AssetFusion(item, item.asset_model['name'],
item.asset_model['download_url'],
item.asset_model["thumbnail"])
self.download_fusion(fusion)
elif item.asset_type == AssetType.NORMAL:
return super().on_double_click(item)
else:
if self._on_request_more_fn:
self._on_request_more_fn()
def download_fusion(self, fusion: AssetFusion) -> None:
if fusion in self._download_progress_bar and self._download_progress_bar[fusion].visible:
# Already downloading, do nothing
return
self._download_fusion_asset(fusion)
def on_right_click(self, item: DetailItem) -> None:
"""Show context menu"""
self._action_item = item
if isinstance(item, AssetDetailItem):
show_web = item.asset_model.get("product_url", "") != ""
if show_web:
self._context_menu = ui.Menu("Asset browser context menu")
with self._context_menu:
if show_web:
ui.MenuItem(
"Open in Web Browser",
triggered_fn=partial(webbrowser.open, item.asset_model["product_url"]),
)
self._context_menu.show()
def build_thumbnail(self, item: AssetDetailItem, container: ui.Widget = None) -> Optional[ui.Image]:
if not container:
container = ui.ZStack()
if hasattr(item, "uid"):
if item.uid in self._draggable_urls:
item.url = self._draggable_urls[item.uid]
item.asset_type = AssetType.NORMAL
with container:
thumbnail = self.get_thumbnail(item)
image = ui.Image(
thumbnail or "",
fill_policy=ui.FillPolicy.PRESERVE_ASPECT_FIT
if isinstance(item, SearchingDetailItem)
else ui.FillPolicy.PRESERVE_ASPECT_CROP,
style_type_name_override="GridView.Image",
)
if isinstance(item, MoreDetailItem):
self.more_item_image = image
# Vendor image
self._build_vendor_image(item)
# Asset type background and image
self._asset_type_container[item] = ui.VStack()
self._build_asset_type(item)
# For displaying download progress over the thumbnail
self._download_progress_bar[item] = DownloadProgressBar()
# Selection rectangle
ui.Rectangle(style_type_name_override="GridView.Item.Selection")
return image
def on_hover(self, item: DetailItem, hovered: bool) -> None:
if not self._enable_hovered:
return
if self.thumbnail_size < MIN_THUMBNAIL_SIZE_HOVER_WINDOW:
if self._show_hover_window:
image_size = self.thumbnail_size * 1.15
offset = self.thumbnail_size * 0.15 / 2
window_position_x = self._cached_thumbnail_widgets[item].screen_position_x - offset
window_position_y = self._cached_thumbnail_widgets[item].screen_position_y - offset
self._hover_window.show(item, image_size, LABEL_HEIGHT, window_position_x, window_position_y)
else:
if item in self._hover_center_container:
self._hover_center_container[item].visible = hovered
else:
if item in self._hover_container:
self._hover_container[item].visible = hovered
self._hover_background[item].visible = hovered
def on_thumbnail_size_changed(self, thumbnail_size: int) -> None:
new_multiple = self._get_asset_type_image_multiple(thumbnail_size)
if new_multiple != self._asset_type_image_multiple:
self._asset_type_image_multiple = new_multiple
for item in self._asset_type_container:
self._build_asset_type(item)
for item in self._vendor_container:
self._vendor_container[item].width = ui.Pixel(
ASSET_PROVIDER_ICON_SIZE * self._asset_type_image_multiple
)
self._vendor_container[item].height = ui.Pixel(
ASSET_PROVIDER_ICON_SIZE * self._asset_type_image_multiple
)
def _build_label(self, item: AssetDetailItem, container: ui.Widget = None) -> ui.Widget:
"""
Display label per detail item
Args:
item (AssetDetailItem): detail item to display
"""
if not container:
container = ui.ZStack(height=LABEL_HEIGHT)
with container:
ui.Rectangle(height=0, style_type_name_override="GridView.Item.Frame")
with ui.ZStack():
# TODO: fix hover
self._hover_background[item] = ui.Rectangle(
visible=False, style_type_name_override="GridView.Item.Hover.Background"
)
with ui.HStack(height=LABEL_HEIGHT):
label = self._build_name_and_owner(item)
self._build_tips_at_right(item)
if not self._show_hover_window:
self._build_tips_at_center(item)
return label
def _build_tips_at_right(self, item: AssetDetailItem) -> None:
self._hover_container[item] = ui.ZStack(width=0, visible=False)
with self._hover_container[item]:
self._hover_label[item] = ui.Label(
item.tips,
name=item.asset_type,
width=0,
alignment=ui.Alignment.RIGHT,
style_type_name_override="GridView.Item.Tips.Text",
)
if isinstance(item, MoreDetailItem):
self.more_item_right_tips = self._hover_label[item]
def _build_tips_at_center(self, item: AssetDetailItem) -> None:
# Hover background and text
self._hover_center_container[item] = ui.ZStack(visible=False)
with self._hover_center_container[item]:
ui.Rectangle(style_type_name_override="GridView.Item.Hover.Background")
self._hover_center_label[item] = ui.Label(
# TODO: use download link in tips ?
item.tips,
name=item.asset_type,
alignment=ui.Alignment.CENTER,
style_type_name_override="GridView.Item.Tips.Text",
)
if isinstance(item, MoreDetailItem):
self.more_item_center_tips = self._hover_center_label[item]
def _build_name_and_owner(self, item: AssetDetailItem) -> ui.Label:
text = self.get_label(item)
with ui.VStack(height=LABEL_HEIGHT):
label = ui.Label(
text or "",
word_wrap=True,
elided_text=True,
skip_draw_when_clipped=True,
alignment=ui.Alignment.LEFT,
style_type_name_override="GridView.Item",
)
if isinstance(item, AssetDetailItem):
with ui.HStack():
ui.Label(
"by " + item.asset_model["user"],
elided_text=True,
style_type_name_override="GridView.Item.User",
)
ui.Spacer()
else:
ui.Label("")
self.more_item_label = label
return label
def _build_vendor_image(self, item: AssetDetailItem):
vendor_image = self._get_vendor_image(item)
if not vendor_image:
return
self._vendor_container[item] = ui.Image(
vendor_image,
width=ASSET_PROVIDER_ICON_SIZE,
height=ASSET_PROVIDER_ICON_SIZE,
fill_policy=ui.FillPolicy.STRETCH,
style_type_name_override="GridView.Item.Vendor.Image",
)
def _build_asset_type(self, item: AssetDetailItem):
(type_image_url, type_image_size) = self._get_asset_type_image(item)
tips_size = 32 * self._asset_type_image_multiple
type_image_size *= self._asset_type_image_multiple
self._asset_type_container[item].clear()
with self._asset_type_container[item]:
ui.Spacer()
with ui.HStack(height=tips_size):
ui.Spacer()
with ui.ZStack(width=tips_size):
ui.Triangle(
alignment=ui.Alignment.RIGHT_TOP, style_type_name_override="GridView.Item.Tips.Background"
)
with ui.VStack():
ui.Spacer()
with ui.HStack(height=0):
ui.Spacer()
self._asset_type_image[item] = ui.Image(
type_image_url,
width=type_image_size,
height=type_image_size,
fill_policy=ui.FillPolicy.STRETCH,
mouse_pressed_fn=lambda x, y, btn, flag, item=item: self._on_type_image_pressed(item),
style_type_name_override="GridView.Item.Tips.Image",
)
def _get_asset_type_image(self, item: AssetDetailItem) -> Tuple[str, int]:
"""Get item tips image url and text"""
if isinstance(item, AssetDetailItem):
if item.asset_type == AssetType.EXTERNAL_LINK:
return (f"{ICON_PATH}/External_link_green.svg", 16)
elif item.asset_type == AssetType.DOWNLOAD:
return (f"{ICON_PATH}/Download_dark.svg", 20)
elif item.asset_type == AssetType.NORMAL:
return (f"{ICON_PATH}/finger_drag_dark.svg", 24)
else:
return ("", 0)
else:
return ("", 0)
def _get_vendor_image(self, item: AssetDetailItem) -> str:
"""Get item vendor image url"""
if isinstance(item, AssetDetailItem):
vendor_name = item.asset_model["vendor"]
return self._model.providers[vendor_name]["icon"]
else:
return ""
def _download_fusion_asset(self, fusion: AssetFusion) -> None:
self.select_fusion_download_folder(fusion)
def select_fusion_download_folder(self, fusion: AssetFusion):
self._action_item = fusion.asset
self._action_fusion = fusion
if self._pick_folder_dialog is None:
self._pick_folder_dialog = self._create_filepicker(
"Select Directory to Download Asset", click_apply_fn=self._on_fusion_folder_picked, dir_only=True
)
self._pick_folder_dialog.show()
def _create_filepicker(
self,
title: str,
filters: list = ["All Files (*)"],
click_apply_fn: Callable = None,
error_fn: Callable = None,
dir_only: bool = False,
) -> FilePickerDialog:
async def on_click_handler(
filename: str, dirname: str, dialog: FilePickerDialog, click_fn: Callable, dir_only: bool
):
fullpath = None
if dir_only:
fullpath = dirname
else:
if dirname:
fullpath = f"{dirname}/{filename}"
elif filename:
fullpath = filename
if click_fn:
click_fn(fullpath)
dialog.hide()
dialog = FilePickerDialog(
title,
allow_multi_selection=False,
apply_button_label="Select",
click_apply_handler=lambda filename, dirname: asyncio.ensure_future(
on_click_handler(filename, dirname, dialog, click_apply_fn, dir_only)
),
click_cancel_handler=lambda filename, dirname: dialog.hide(),
item_filter_options=filters,
error_handler=error_fn,
)
dialog.hide()
return dialog
def _on_fusion_folder_picked(self, url: Optional[str]) -> None:
fusion = self._action_fusion
if url is not None:
self._pick_folder_dialog.set_current_directory(url)
asyncio.ensure_future(
self._model.download_fusion_async(
fusion,
fusion.asset.asset_model,
url,
callback=partial(self._on_fusion_asset_downloaded, fusion.asset),
on_progress_fn=partial(self._on_fusion_download_progress, fusion.asset),
on_prepared_fn=lambda: self._on_fusion_asset_prepared(fusion.asset)
)
)
if self._download_progress_bar.get(fusion.asset):
self._download_progress_bar[fusion.asset].visible = True
if fusion.asset in self._hover_center_label:
self._hover_label[fusion.asset].text = "Preparing"
if fusion.asset in self._hover_label:
self._hover_center_label[fusion.asset].text = "Preparing"
def _on_fusion_asset_prepared(self, item: AssetDetailItem):
if item in self._hover_center_label:
self._hover_label[item].text = "Downloading"
if item in self._hover_label:
self._hover_center_label[item].text = "Downloading"
def _on_fusion_download_progress(self, item: AssetDetailItem, progress: float) -> None:
if item in self._download_progress_bar:
self._download_progress_bar[item].progress = progress
def _on_fusion_asset_downloaded(self, item: AssetDetailItem, results: Dict):
self._add_to_my_assets(results["url"])
if self._download_progress_bar.get(item):
self._download_progress_bar[item].visible = False
if results.get("status") != omni.client.Result.OK:
return
async def delayed_item_changed(model: AssetStoreModel, item: AssetDetailItem):
for _ in range(20):
await omni.kit.app.get_app().next_update_async()
# TODO Do I need item changed
self.item_changed(model, item)
url = results.get("url")
if url:
asyncio.ensure_future(delayed_item_changed(self._model, item))
if item in self._hover_center_label:
self._hover_label[item].text = ASSET_TIPS[AssetType.DOWNLOAD]
if item in self._hover_label:
self._hover_center_label[item].text = ASSET_TIPS[AssetType.DOWNLOAD]
async def _download_thumbnail(self, item: AssetDetailItem, dest_url: str):
"""Copies the thumbnail for the given asset to the .thumbs subdir."""
if not (item and dest_url):
return
thumbnail = item.asset_model["thumbnail"]
thumbnail_ext = os.path.splitext(thumbnail)[-1]
if not thumbnail_ext:
return
filename = os.path.basename(dest_url) + thumbnail_ext
thumbnail_url = f"{os.path.dirname(dest_url)}/.thumbs/256x256/{filename}"
thumbnail_url = thumbnail_url.replace(".jpeg", ".png")
await omni.client.copy_async(thumbnail, thumbnail_url, behavior=omni.client.CopyBehavior.OVERWRITE)
# Add downloaded to My Assets after thumbnails downloaded to make sure it display well
self._add_to_my_assets(dest_url)
def _navigate(self, url: str):
try:
import omni.kit.window.content_browser
content_window = omni.kit.window.content_browser.get_content_window()
content_window.navigate_to(url)
content_window._window._window.focus()
except ImportError:
pass
def _on_type_image_pressed(self, item: AssetDetailItem) -> None:
if item.asset_type == AssetType.EXTERNAL_LINK:
webbrowser.open(item.asset_model["product_url"])
def _get_asset_type_image_multiple(self, thumbnail_size):
if thumbnail_size > 384:
return 3
elif thumbnail_size > 192:
return 2
else:
return 1
def _add_to_my_assets(self, url: str) -> None:
# Add download folder to My Assets
for provider, setting in self._model.providers.items():
if provider == "My Assets":
url = url.replace("\\", "/")
downloaded_folder = url[: url.rfind("/")]
my_assets_folders = self._settings.get(SETTING_MY_ASSET_FOLDERS)
# Check if download folder already in My Assets
if my_assets_folders:
for folder in my_assets_folders:
if downloaded_folder.startswith(folder):
# folder already in my assets, require to refresh folder
self._settings.set(SETTING_MY_ASSET_FOLDER_CHANGED, folder)
return
my_assets_folders.append(downloaded_folder)
else:
my_assets_folders = [downloaded_folder]
# Add download folder to My Assets
self._settings.set(SETTING_MY_ASSET_FOLDERS, my_assets_folders)
break
| 25,416 | Python | 41.01157 | 118 | 0.584828 |
ArtecGroup/omni-artec-asset-browser/exts/artec.asset.browser/artec/asset/browser/models/asset_store_client.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
# Forked from AssetStore AssetStoreClient
import asyncio
from typing import List, Optional, Dict, Tuple
import carb.settings
from omni.services.client import AsyncClient
from omni.services.core import main
from artec.services.browser.asset import AssetModel
class AssetStoreClient:
"""
Represent client to asset store service.
Args:
url: Asset service url
"""
def __init__(self, url: str):
self._url = url
self._assets: List[AssetModel] = []
api_version = carb.settings.get_settings_interface().get("exts/artec.services.browser.asset/api_version")
self._client = AsyncClient(f"{self._url}/{api_version}", app=main.get_app())
def destroy(self):
asyncio.ensure_future(self._stop())
def list(self, category: str, search_words: Optional[List[str]] = None) -> List[AssetModel]:
return asyncio.get_event_loop().run_until_complete(self._list_async(category, search_words=search_words))
async def list_categories_async(self):
categories = await self._client.artec_assets.categories.get()
return categories
async def list_providers_async(self) -> Dict[str, str]:
return await self._client.artec_assets.providers.get()
async def config_provider_async(self, provider: str) -> None:
return await self._client.artec_assets.config.post(vendor=provider)
async def _list_async(
self,
category: Optional[str],
search_words: Optional[List[str]] = None,
sort=["name", "asc"],
page_size=100,
page_number=1,
providers=None,
) -> Tuple[List[AssetModel], bool]:
assets = []
search_args = {
"page": {"size": page_size, "number": page_number},
"keywords": search_words,
"sort": sort,
"vendors": providers,
}
if category:
search_args["filter"] = {"categories": [category]}
to_continue = False
result = await self._client.artec_assets.search.post(**search_args)
for store in result:
assets.extend(result[store][0])
if result[store][1]:
to_continue = True
return (assets, to_continue)
async def _stop(self):
await self._client.stop_async()
self._client = None
| 2,753 | Python | 32.180723 | 113 | 0.648747 |
ArtecGroup/omni-artec-asset-browser/exts/artec.asset.browser/artec/asset/browser/models/asset_fusion.py | from dataclasses import dataclass
from .asset_detail_item import AssetDetailItem
@dataclass(frozen=True)
class AssetFusion:
asset: AssetDetailItem
name: str
url: str
thumbnail_url: str
| 203 | Python | 17.545453 | 46 | 0.753695 |
ArtecGroup/omni-artec-asset-browser/exts/artec.asset.browser/artec/asset/browser/models/__init__.py | from .asset_store_model import AssetStoreModel
from .asset_detail_item import AssetDetailItem, AssetType, MoreDetailItem, SearchingDetailItem
from .main_navigation_item import MainNavigationItem
from .asset_fusion import AssetFusion | 232 | Python | 57.249986 | 94 | 0.862069 |
ArtecGroup/omni-artec-asset-browser/exts/artec.asset.browser/artec/asset/browser/models/common_categories.py | COMMON_CATEGORIES = {
}
| 24 | Python | 7.333331 | 21 | 0.666667 |
ArtecGroup/omni-artec-asset-browser/exts/artec.asset.browser/artec/asset/browser/models/asset_detail_item.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
# Forked from AssetStore AssetType, AssetDetailItem, MoreDetailItem, SearchingDetailItem
import re
import omni.client
from artec.services.browser.asset import AssetModel
from omni.kit.browser.core import DetailItem
from artec.services.browser.asset import get_instance as get_asset_services
from ..style import ICON_PATH
from ..download_helper import DownloadHelper
class AssetType:
# External link in product url
EXTERNAL_LINK = "ExternalLink"
# Asset in usdz or zip, to be downloaded and unzip if zip
DOWNLOAD = "Download"
# Normal, user could drag it into viewport
NORMAL = "Normal"
UNKNOWN = "Unknown"
ASSET_TIPS = {
AssetType.EXTERNAL_LINK: "DOUBLE CLICK FOR\nEXTERNAL LINK", # Artec Cloud provides external links
AssetType.DOWNLOAD: "DOUBLE CLICK TO\nDOWNLOAD", # Default action for download type is to open
AssetType.NORMAL: "DRAG INTO\nVIEWPORT",
AssetType.UNKNOWN: "",
}
class AssetDetailItem(DetailItem):
def __init__(self, asset_model: AssetModel):
self._local_url = DownloadHelper().get_download_url(asset_model)
super().__init__(
asset_model["name"],
self._local_url if self._local_url else asset_model["download_url"],
thumbnail=asset_model["thumbnail"]
)
self.uid = asset_model["identifier"]
self.user = asset_model["user"]
self.asset_model = asset_model
self._get_type()
@property
def tips(self) -> str:
return ASSET_TIPS[self.asset_type]
def _get_type(self):
download_url = self.asset_model["download_url"].split("?")[0]
if self._local_url:
self.asset_type = AssetType.NORMAL
elif download_url:
if self._is_local_path(download_url):
# For local assets, drag and drop into viewport
self.asset_type = AssetType.NORMAL
elif (
download_url.lower().endswith("usdz")
or download_url.lower().endswith("zip")
or download_url.lower().endswith("download")
):
self.asset_type = AssetType.DOWNLOAD
else:
self.asset_type = AssetType.NORMAL
elif self.asset_model["product_url"]:
self.asset_type = AssetType.EXTERNAL_LINK
else:
self.asset_type = AssetType.UNKNOWN
def authorized(self) -> bool:
asset_services = get_asset_services()
if asset_services:
asset_store = asset_services.get_store(self.asset_model.get("vendor"))
if asset_store:
return asset_store.authorized()
return False
def _is_local_path(self, path: str) -> bool:
"""Returns True if given path is a local path"""
broken_url = omni.client.break_url(path)
if broken_url.scheme == "file":
return True
elif broken_url.scheme in ["omniverse", "http", "https"]:
return False
# Return True if root directory looks like beginning of a Linux or Windows path
root_name = broken_url.path.split("/")[0]
return not root_name or re.match(r"[A-Za-z]:", root_name) is not None
class MoreDetailItem(DetailItem):
def __init__(self):
super().__init__("More", "", f"{ICON_PATH}/load_more.png")
# used to show tips
self.tips = "DOUBLE CLICK FOR\nMORE ASSETS"
self.asset_type = AssetType.NORMAL
class SearchingDetailItem(DetailItem):
def __init__(self):
super().__init__("Searching", "", f"{ICON_PATH}/search.png")
# used to show tips
self.tips = "Searching"
self.asset_type = AssetType.NORMAL
| 4,110 | Python | 34.136752 | 102 | 0.637713 |
ArtecGroup/omni-artec-asset-browser/exts/artec.asset.browser/artec/asset/browser/models/asset_store_model.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
# Forked from AssetStore AssetStoreModel
import traceback
import asyncio
import copy
import carb
import carb.dictionary
import carb.settings
import omni.usd
import omni.kit.app
import omni.client
from omni.kit.browser.core import AbstractBrowserModel, CollectionItem, CategoryItem, DetailItem
from typing import Dict, List, Optional, Union, Callable
from artec.services.browser.asset import AssetModel, ProviderModel, BaseAssetStore
from artec.services.browser.asset import get_instance as get_asset_services
from pxr import Tf
from .asset_store_client import AssetStoreClient
from .asset_detail_item import AssetDetailItem, MoreDetailItem, SearchingDetailItem
from .main_navigation_item import MainNavigationItem
from .asset_fusion import AssetFusion
from .common_categories import COMMON_CATEGORIES
SETTING_ROOT = "/exts/artec.asset.browser/"
SETTING_PROVIDER_ROOT = SETTING_ROOT + "provider"
SETTING_PAGE_SIZE = SETTING_ROOT + "pageSize"
SETTING_SINGLE_PROVIDER = SETTING_ROOT + "singleProvider"
CATEGORY_ANY = "All projects"
class AssetStoreModel(AbstractBrowserModel):
"""
Represents the browser model for asset store services.
"""
def __init__(self):
self.artec_cloud_provider_id = 'ArtecCloud'
super().__init__(always_realod_detail_items=True)
# Dummy collection item. Not displayed, but required for browser model
self._collection_item = CollectionItem("store", "")
# For browser UI, category items in category treeview
self._category_items: List[MainNavigationItem] = []
# Category url <=> Category item
self._cached_catetory_items: Dict[str, MainNavigationItem] = {}
# Category item <=> Detail items of this category item (without children category item)
self._cached_detail_items: Dict[MainNavigationItem, List[DetailItem]] = {}
# Store
store_url = carb.settings.get_settings().get(SETTING_PROVIDER_ROOT)
self._store_client = AssetStoreClient(store_url)
# Sort of detail items, default by name with ascending
self._sort_args = {"key": lambda item: item["name"], "reverse": False}
self.search_words: Optional[List[str]] = None
self.search_provider: Optional[str] = None
self._search_sort_args = ["name", "asc"]
# Searched asset models
self._assets: Optional[List[AssetModel]] = None
self._categories: Optional[Dict] = None
self.providers: Dict[str, ProviderModel] = {}
self._refresh_provider_sub: Dict[str, omni.kit.app.SettingChangeSubscription] = {}
self._enable_provider_sub: Dict[str, omni.kit.app.SettingChangeSubscription] = {}
self.on_refresh_provider_fn: Callable[[str], None] = None
self.on_enable_provider_fn: Callable[[str], None] = None
self._page_number = 1
self._more_assets = False
self._searching = False
asyncio.ensure_future(self.list_providers_async())
def destroy(self):
for provider in self._refresh_provider_sub:
self._refresh_provider_sub[provider] = None
for provider in self._refresh_provider_sub:
self._refresh_provider_sub[provider] = None
self._store_client.destroy()
def get_store(self, vendor: str) -> BaseAssetStore:
asset_services = get_asset_services()
if asset_services:
return asset_services.get_store(vendor)
return None
def artec_cloud_provider(self) -> BaseAssetStore:
return self.get_store(self.artec_cloud_provider_id)
def get_collection_items(self) -> List[CollectionItem]:
"""Override to get list of collection items"""
return [self._collection_item]
def get_category_items(self, item: CollectionItem) -> List[CategoryItem]:
"""Override to get list of category items"""
self._category_items = []
self._cached_catetory_items = {}
# Public categories
full_categories = copy.deepcopy(COMMON_CATEGORIES)
for provider, categories in self._categories.items():
if provider in self.providers:
if self.providers[provider]["private"]:
continue
for name in categories:
if name in full_categories:
full_categories[name].extend(categories[name])
full_categories[name] = list(set(full_categories[name]))
else:
full_categories[name] = categories[name]
def __create_provider_category_items(category_list, provider=None, root=None):
for name in category_list:
self._create_category_chain(name, provider, root=root)
if category_list[name]:
category_list[name].sort()
for sub in category_list[name]:
url = name + "/" + sub
# Create category item
self._create_category_chain(url, provider, root=root)
__create_provider_category_items(full_categories)
self._category_items.sort(key=lambda item: item.name)
# Private categories
for provider, categories in self._categories.items():
if provider in self.providers:
if self.providers[provider]["private"]:
root_category_item = self._create_category_item(provider, None, provider)
if self.providers[provider]["configurable"]:
root_category_item.configurable = True
self._category_items.insert(0, root_category_item)
__create_provider_category_items(categories, provider=provider, root=root_category_item)
# All
self._category_items.insert(0, self._create_category_item(CATEGORY_ANY, None, list(self.providers.keys())))
# Cloud projects
self.cloud_projects_category_item = self._create_category_item("cloud projects", None, self.artec_cloud_provider_id)
self._category_items.insert(2, self.cloud_projects_category_item)
# Add dummy category to cloud projects to make it expandable
self._dummy_category_item = CategoryItem("")
self.cloud_projects_category_item.children.append(self._dummy_category_item)
return self._category_items
def get_detail_items(self, item: CategoryItem) -> List[DetailItem]:
"""Override to get list of detail items"""
detail_items = []
if self._assets:
for asset in self._assets:
detail_items.append(self._create_detail_item(asset))
if self._more_assets:
detail_items.append(MoreDetailItem())
if self._searching:
detail_items.append(SearchingDetailItem())
return detail_items
def execute(self, item: Union[AssetDetailItem, CategoryItem]) -> None:
if isinstance(item, CategoryItem):
# TODO: Jump to selected category item in category tree view
pass
elif isinstance(item, AssetDetailItem):
# Create a Reference of the Props in the stage
stage = omni.usd.get_context().get_stage()
if not stage:
return
name = item.name_model.as_string.split(".")[0]
prim_path = omni.usd.get_stage_next_free_path(stage, "/" + Tf.MakeValidIdentifier(name), True)
omni.kit.commands.execute(
"CreateReferenceCommand", path_to=prim_path, asset_path=item.url, usd_context=omni.usd.get_context()
)
def change_sort_args(self, sort_field: str, sort_order: str) -> None:
"""Change sort args with new field and order"""
sort_key = "name"
if sort_field == "Date":
sort_key = "created_at"
self._search_sort_args = [sort_key]
if sort_order == "Descending":
self._search_sort_args.append("desc")
else:
self._search_sort_args.append("asc")
if sort_field == "Date":
sort_fn = lambda item: item["published_at"]
else:
# Default, always sort by name
sort_fn = lambda item: item["name"]
self._sort_args = {"key": sort_fn, "reverse": sort_order == "Descending"}
def get_sort_args(self) -> Dict:
"""
Get sort args to sort detail items.
"""
return self._sort_args
def config_provider(self, provider: str) -> None:
asyncio.ensure_future(self._store_client.config_provider_async(provider))
def _on_client_prepared(self, client: AssetStoreClient) -> None:
# Client prepared, notify to model updated and regenerate items
self._category_items = []
self._cached_catetory_items = {}
self._cached_detail_items = {}
self._item_changed(self._collection_item)
def _create_category_chain(
self, category_url: str, provider_name: Optional[str], root: MainNavigationItem = None
) -> MainNavigationItem:
"""Create catetory chain by url."""
if category_url in self._cached_catetory_items:
category_item = self._cached_catetory_items[category_url]
if provider_name:
category_item.add_provider(provider_name)
return category_item
pos = category_url.rfind("/")
# Create new category item
category_item = self._create_category_item(category_url[pos + 1:], category_url, provider_name)
if pos < 0:
# Root category
if root:
root.children.append(category_item)
else:
self._category_items.append(category_item)
elif pos >= 0:
parent_category_item = self._create_category_chain(category_url[:pos], provider_name, root=root)
parent_category_item.children.append(category_item)
return category_item
def _create_category_item(self, category_name: str, category_url: Optional[str],
provider_name: str) -> MainNavigationItem:
category_item = MainNavigationItem(category_name, category_url, provider_name)
self._cached_catetory_items[category_url] = category_item
return category_item
def _create_detail_item(self, asset_model: AssetModel) -> DetailItem:
return AssetDetailItem(asset_model)
def reset_assets(self):
self._assets = []
self._page_number = 1
self._more_assets = False
async def list_assets_async(
self, category_item: MainNavigationItem, callback: Callable[[None], None] = None, reset: bool = True
) -> bool:
if reset:
self.reset_assets()
self._more_assets = False
page_size = carb.settings.get_settings().get(SETTING_PAGE_SIZE)
single_provider = carb.settings.get_settings().get(SETTING_SINGLE_PROVIDER)
if category_item.providers:
# If category is private, alwasy search for matched provider but do not care provider filter
if self.search_provider:
if self.search_provider not in category_item.providers:
carb.log_warn(
f"'{category_item.name}' used for {category_item.providers} only, ignore filter '{self.search_provider}'!"
)
providers = category_item.providers
else:
providers = [self.search_provider]
else:
providers = category_item.providers
elif self.search_provider:
providers = [self.search_provider]
else:
providers = list(self.providers.keys())
if single_provider:
self._searching = True
queries: Dict[str, asyncio.Future] = {}
for provider in providers:
queries[provider] = asyncio.ensure_future(
self._list_assets_by_vendor_async(
category_item.url, page_size, [provider], callback, single_step=True
)
)
await asyncio.gather(*queries.values(), return_exceptions=True)
for provider, query in queries.items():
try:
if query.result():
self._more_assets = True
except Exception:
carb.log_warn(f"Failed to fetch results for provider {provider}. Reason:")
carb.log_warn(traceback.format_exc())
self._searching = False
if callback:
callback()
else:
self._more_assets = await self._list_assets_by_vendor_async(
category_item.url, page_size, providers, callback
)
self._page_number += 1
return self._more_assets
async def _list_assets_by_vendor_async(self, category_url, page_size, providers, callback, single_step=False):
carb.log_info(
f"Searching providers: {providers} with category: {category_url}, keywords: {self.search_words}, page: {self._page_number}"
)
(assets, more_assets) = await self._store_client._list_async(
category_url,
search_words=self.search_words,
sort=self._search_sort_args,
page_size=page_size,
page_number=self._page_number,
providers=providers,
)
if assets:
# Filter duplicated results
new_assets = []
for asset in assets:
if asset not in self._assets + new_assets:
new_assets.append(asset)
# Sort new results
new_assets.sort(**self._sort_args)
# Add as a sub-categories to cloud projects category
self._add_assets_to_cloud_projects_category(new_assets)
# Unpack cloud projects
assets_to_add = []
for asset in new_assets:
if asset.get("vendor") == self.artec_cloud_provider_id:
assets_to_add.extend(self._extract_fusions_from_artec_cloud_project(asset))
else:
assets_to_add.append(asset)
self._assets.extend(assets_to_add)
carb.log_info(f" {len(assets)} projects returned, {len(assets_to_add)} assets added, total {len(self._assets)}")
if not single_step and more_assets:
self._more_assets = True
if callback:
callback()
elif not single_step:
if callback:
callback()
return more_assets
def _add_assets_to_cloud_projects_category(self, assets):
for asset in assets:
if asset.get("vendor") != self.artec_cloud_provider_id:
continue
if any(child.name == asset.get("name") for child in self.cloud_projects_category_item.children):
continue
category_url = asset.get("product_url").split("/")[-1]
item = self._create_category_item(asset.get("name"), category_url,
self.artec_cloud_provider_id)
self.cloud_projects_category_item.children.append(item)
if (self._dummy_category_item in self.cloud_projects_category_item.children
and len(self.cloud_projects_category_item.children) > 1):
self.cloud_projects_category_item.children.remove(self._dummy_category_item)
def _extract_fusions_from_artec_cloud_project(self, project_asset):
asset_store = self.get_store(project_asset["vendor"])
fusion_assets = []
for fusion_data in project_asset.get("fusions", []):
thumbnail_url = fusion_data["preview_url"]
if asset_store is not None:
thumbnail_url = asset_store.url_with_token(thumbnail_url)
categories = project_asset.get("categories", []).copy()
slug = project_asset.get("product_url").split("/")[-1]
categories.append(slug)
fusion_assets.append(
{
"identifier": fusion_data["fusion_id"],
"name": fusion_data["name"],
"download_url": fusion_data["download_url"],
"thumbnail": thumbnail_url,
"vendor": project_asset["vendor"],
"user": project_asset["user"],
"categories": categories,
"fusions": [],
"product_url": project_asset["product_url"]
}
)
return fusion_assets
async def list_categories_async(self):
self._categories = await self._store_client.list_categories_async()
async def list_providers_async(self):
self.providers = await self._store_client.list_providers_async()
for provider, setting in self.providers.items():
if provider in self._refresh_provider_sub:
self._refresh_provider_sub[provider] = None
if provider in self._enable_provider_sub:
self._enable_provider_sub[provider] = None
if setting["refresh_setting"]:
self._refresh_provider_sub[provider] = omni.kit.app.SettingChangeSubscription(
setting["refresh_setting"],
lambda item, event_type, p=provider: self._on_refresh_provider(p, item, event_type),
)
if setting["enable_setting"]:
self._enable_provider_sub[provider] = omni.kit.app.SettingChangeSubscription(
setting["enable_setting"],
lambda item, event_type, p=provider: self._on_enable_provider(p, item, event_type),
)
def _on_refresh_provider(self, provider: str, item: carb.dictionary.Item, event_type) -> None:
if self.on_refresh_provider_fn:
self.on_refresh_provider_fn(provider, item, event_type)
def _on_enable_provider(self, provider: str, item: carb.dictionary.Item, event_type) -> None:
if self.on_enable_provider_fn:
self.on_enable_provider_fn(provider, item, event_type)
async def authenticate_async(self, vendor: str, username: str, password: str, callback: Callable[[], None] = None):
asset_store = self.get_store(vendor)
if not asset_store:
return False
await asset_store.authenticate(username, password)
if callback:
callback()
async def download_fusion_async(
self,
fusion: AssetFusion,
asset: Dict,
dest_url: str,
callback: Callable[[Dict], None] = None,
on_progress_fn: Callable[[float], None] = None,
on_prepared_fn: Optional[Callable[[float], None]] = None
):
asset_store = self.get_store(asset.get("vendor"))
if not asset_store:
return
asset_model = AssetModel(
identifier=asset.get("identifier", ""),
name=asset.get("name", ""),
version=asset.get("version", ""),
published_at=asset.get("publishedAt", ""),
categories=asset.get("categories", []),
tags=asset.get("tags", []),
vendor=asset.get("vendor", ""),
download_url=asset.get("download_url", ""),
product_url=asset.get("product_url", ""),
price=asset.get("price", 0.0),
thumbnail=asset.get("thumbnail", ""),
user=asset.get("user", ""),
fusions=asset.get("fusions", ""),
)
results = await asset_store.download(fusion, dest_url, on_progress_fn=on_progress_fn,
timeout=600, on_prepared_fn=on_prepared_fn)
if results.get("status") != omni.client.Result.OK:
carb.log_warn(f"Failed to download asset from {asset.get('vendor')}.")
return
if callback:
callback(results)
| 20,375 | Python | 40.080645 | 135 | 0.598086 |
ArtecGroup/omni-artec-asset-browser/exts/artec.asset.browser/artec/asset/browser/models/main_navigation_item.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
# Forked from AssetStore MainNavigationItem
from typing import List, Union
from omni.kit.browser.core import CategoryItem
class MainNavigationItem(CategoryItem):
def __init__(self, name: str, url: str, provider: Union[str, List[str], None]):
super().__init__(name)
self.url = url
self.thumbnail = None
self.configurable = False
self.providers: List[str] = []
if provider is None:
self.providers = []
elif isinstance(provider, str):
self.providers = [provider]
else:
self.providers = provider
def add_provider(self, provider):
if provider not in self.providers:
self.providers.append(provider)
| 1,153 | Python | 35.062499 | 83 | 0.69124 |
ArtecGroup/omni-artec-asset-browser/exts/artec.asset.browser/docs/README.md | # Artec Cloud asset browser
Provides access to Artec Cloud models for Omniverse

| 123 | Markdown | 19.666663 | 51 | 0.747967 |
superboySB/SBDrone_deprecated/README.md | 从模型在环(MITL)、软件在环(SITL)、硬件在环(HITL)再到实测(REAL),搞一个对UAV的preception-aware velocity control,下面就让我们开始吧。
# 模型在环(Model-in-the-loop, MITL)仿真训练
## Install
找一台强大的服务器,安装nvidia最新驱动、nvidia docker,并通过如下的NGC container安装全部依赖
```sh
docker login nvcr.io
docker build --pull --network host -t sbdrone_image:v1 -f docker/simulation.dockerfile docker
docker run --name sbdrone --entrypoint /bin/bash \
-itd --privileged --gpus all -e "ACCEPT_EULA=Y" --network=host \
-v /tmp/.X11-unix:/tmp/.X11-unix:ro -v $HOME/.Xauthority:/root/.Xauthority -e DISPLAY=$DISPLAY \
-v /usr/share/vulkan/icd.d/nvidia_icd.json:/etc/vulkan/icd.d/nvidia_icd.json \
-v /usr/share/vulkan/implicit_layer.d/nvidia_layers.json:/etc/vulkan/implicit_layer.d/nvidia_layers.json \
-v ~/docker/isaac-sim/cache/ov:/root/.cache/ov:rw \
-v ~/docker/isaac-sim/cache/pip:/root/.cache/pip:rw \
-v ~/docker/isaac-sim/cache/glcache:/root/.cache/nvidia/GLCache:rw \
-v ~/docker/isaac-sim/cache/computecache:/root/.nv/ComputeCache:rw \
-v ~/docker/isaac-sim/logs:/root/.nvidia-omniverse/logs:rw \
-v ~/docker/isaac-sim/config:/root/.nvidia-omniverse/config:rw \
-v ~/docker/isaac-sim/data:/root/.local/share/ov/data:rw \
-v ~/docker/isaac-sim/documents:/root/Documents:rw \
-v ~/docker/isaac-sim/cache/kit:/isaac-sim/kit/cache/Kit:rw \
sbdrone_image:v1
docker exec -it sbdrone /bin/bash
```
以headless app方式启动,并且尝试使用Omniverse Streaming Client
```sh
git clone https://github.com/superboySB/SBDrone && cd SBDrone
ln -s /isaac-sim _isaac_sim && dos2unix ./dzp_is_sb.sh && bash ./dzp_is_sb.sh --install
# note: execute the command from where the `dzp_is_sb.sh` executable exists (e.g., for bash users)
echo -e "alias sbpy=$(pwd)/dzp_is_sb.sh" >> ${HOME}/.bashrc && source ~/.bashrc
```
# 软件在环(Software-in-the-loop, SITL)仿真训练
sbpy --python -c "print('HelloWorld.')"
sbpy --python /isaac-sim/standalone_examples/api/omni.isaac.core/add_cubes.py
ssh -L 8211:localhost:8211 -L 48010:localhost:48010 -p 17003 [email protected]
`source /opt/ros/foxy/setup.bash`
# 硬件在环(Hardware-in-the-loop, HITL)调试
考虑到[官方教程](https://www.youtube.com/watch?v=e3HUKGAWdx0)里面的WSL2限制太多,为了便于部署,PX4+RL都建议在远程server(172.16.15.188)的docker里运行,同时airsim在本地windows11开发机(172.16.13.104)里运行。
## Install
```sh
docker build --network host -t sbdrone_image:sitl-v1 .
docker run -itd --privileged -v /tmp/.X11-unix:/tmp/.X11-unix:ro -e DISPLAY=$DISPLAY --gpus all --user=user --env=PX4_SIM_HOST_ADDR=172.23.53.8 --network=host --name=sitl sbdrone_image:sitl-v1 /bin/bash
docker exec -it --user=user sbdrone /bin/bash
git clone https://github.com/superboySB/SBDrone && cd cd SBDrone && pip install -r requirements.txt && pip install -e .
```
为了测试后续ROS2的offboard功能,可以把我构建的docker container作为虚拟机,后续验证流程可以参考这个[教程](https://github.com/Jaeyoung-Lim/px4-offboard/blob/master/doc/ROS2_PX4_Offboard_Tutorial.md)。如果不想用两台机器,想用一台机器做,可以考虑将Dockerfile中的github-token补全,并且取消对UE、Airsim编译的注释,运行`docker build -t mypx4_image:full .`,预计会生成一个300GB左右的image,请留好空间。
## 消除碰撞体(以下是Non-interactive Unreal Engine Custom Environments的英文教程)
We provide the environment presented within the paper to allow others to validate our approach. However, to create a custom environment, we recomend you follow the following steps to prevent agent interaction.
### Remove ego-perspective rendering of other quadrotors
To make the quadrotor invisible in the scene, change the 'Hidden in Scene Capture' to True. This will make it invisible to other drones but the spectator actor can still see it. Go to details, then rendering, this will show the setting 'Actor Hidden In Game'.


### Remove Collision boxes from all agents within the environment
We need to specifically remove agent-agent interaction while also enabling environment interaction. Hence we need to define all components of the quadrotor blueprint 'BP_FlyingPawn' as 'Pawn' and ignore any overlaps that occour between this group. To do this, we modify the collision response within the agent blueprint.
There are five components to change within the 'BP_FlyingPawn' blueprint: BodyMesh, Prop3, Prop2, Prop1, Prop0. For all of these, go to collisions, then change the collision presents to custom. Thange the Object Type to 'Pawn' and then in 'Object Responses' change the Pawn to Ignore as shown bellow.

Now to remove collisions between 'Pawns', we need to ignore the event 'ActorBeginOverlap' which we can do using a Blueprint Event Graph. Add the following event graph to 'BP_FlyingPawn'.

Agents will interact with the environment without interacting with each other.
## 在Airsim里手动控制一台PX4无人机的测试
### 方法一:使用QGC
如果需要手动控制无人机(remote control),则在QGroundControl里面,必须手动设置通信链接,QGC的自动连接功能在多个机器的时候不起作用,如果在同一台机器有时候没有问题。具体做法是,添加一个14550的UDP监听,并且需要在可选的指定server处添加`172.16.13.104:18570`,并点击连接,如果有多台则需要连接多次,端口要累加。对应地,需要开启多个PX4实例,其它参数配置可以参考[官方教程](https://microsoft.github.io/AirSim/px4_sitl/),同样端口需要累加。
### 方法二:不使用QGC
在`settings.json`中对需要控制的无人机添加手柄id
```json
"RC": {
"RemoteControlID": 1
}
```
打开一个airsim的UE实例,再开启一个PX4实例。
```sh
bash /home/user/PX4-Autopilot/Tools/simulation/sitl_multiple_run.sh 1
```
## 测试强化学习
先打开UE实例,然后依次
```sh
bash /home/user/PX4-Autopilot/Tools/simulation/sitl_multiple_run.sh 5
python sbrl/Storage.py
python sbrl/
```
## TroubleShooting
### 1. 可以换一台网络好的机器直接拉镜像副本,解决docker拉不下来的问题
```sh
docker save > <image-name>.tar sbdrone_image:v1
docker load < <image-name>.tar
```
如果想分享,可以配置一个https://transfer.sh/,分块来存:
```sh
docker save sbdrone_image:v1 | split -b 5G -d - "sbdrone_image.tar.part."
cat sbdrone_image.tar.part.* | docker load
```
### 2. 关于"WSL2本地跑PX4+Windows跑AirSim+Windows跑QGC"的连接问题
如果不用docker,而是在WSL本地跑cmake装PX4来调试,连接问题也会很烦。首先解决PX4与airsim的连接问题,需要在windows的powershell里用`ipconfig`来找本机的WSL IPv4 Address,这需要设置到AirSim中`settings.json`的`LocalHostIp`属性,以及上述教程中所有`PX4_SIM_HOST_ADDR`中。之后每次跑PX4以前,甚至需要人为指定环境变量来找windows本机,例如:
```sh
export PX4_SIM_HOST_ADDR=172.18.240.1
```
其次,需要解决PX4与QGC的连接问题,在QGroundControl里面,需要添加一个14550的UDP监听,并且需要在可选的指定server处添加`<wsl-ip>:18570`,其中`wsl-ip`可以在WSL里面输入`ifconfig`查到外部ip地址(针对windows网络的eth0),每次重启WSL2这个ip都会刷新,形如:`172.18.243.55:18570`,最后的端口也不一定是`18570`,也要注意PX4版本(详见:`PX4-Autopilot/ROMFS/px4fmu_common/init.d-posix/px4-rc.mavlink`中的`udp_gcs_port_local`)。
### 3. px4编译过程中的自动依赖安装问题
在运行`make px4_sitl_default none_iris`的时候如果遇到警报,可以hit 'u',避免resolve manually,亲测会省心一点。如果半天卡在`Building for code coverage`,请检查网速是不是太慢。
## 4. 我想修改编译后的UE游戏的窗口等设置
https://blog.csdn.net/qq_33727884/article/details/89487292
# 实际(REAL)部署测试 | 6,558 | Markdown | 43.924657 | 320 | 0.764867 |
superboySB/SBDrone_deprecated/examples/9_custom.py | #!/usr/bin/env python
"""
| File: 8_camera_vehicle.py
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
| Description: This files serves as an example on how to build an app that makes use of the Pegasus API to run a simulation
with a single vehicle equipped with a camera, producing rgb and camera info ROS2 Humble topics.
"""
# Imports to start Isaac Sim from this script
import carb
from omni.isaac.kit import SimulationApp
# Start Isaac Sim's simulation environment
# Note: this simulation app must be instantiated right after the SimulationApp import, otherwise the simulator will crash
# as this is the object that will load all the extensions and load the actual simulator.
simulation_app = SimulationApp({"headless": False, "width": 640, "height": 480})
# -----------------------------------
# The actual script should start here
# -----------------------------------
import omni.timeline
from omni.isaac.core.world import World
from omni.isaac.core.utils.extensions import disable_extension, enable_extension
# Enable/disable ROS bridge extensions to keep only ROS2 Humble Bridge
disable_extension("omni.isaac.ros_bridge")
disable_extension("omni.isaac.ros2_bridge")
enable_extension("omni.isaac.ros2_bridge-humble")
# Import the Pegasus API for simulating drones
from pegasus.simulator.params import ROBOTS, SIMULATION_ENVIRONMENTS
from pegasus.simulator.logic.state import State
from pegasus.simulator.logic.backends.mavlink_backend import MavlinkBackend, MavlinkBackendConfig
from pegasus.simulator.logic.vehicles.multirotor import Multirotor, MultirotorConfig
from pegasus.simulator.logic.interface.pegasus_interface import PegasusInterface
from pegasus.simulator.logic.graphs import ROS2Camera, ROS2Tf, ROS2Odometry, ROS2Lidar
from pegasus.simulator.logic.sensors import Magnetometer, IMU, Barometer, Vision, Camera, Lidar
# Auxiliary scipy and numpy modules
from scipy.spatial.transform import Rotation
class PegasusApp:
"""
A Template class that serves as an example on how to build a simple Isaac Sim standalone App.
"""
def __init__(self):
"""
Method that initializes the PegasusApp and is used to setup the simulation environment.
"""
# Acquire the timeline that will be used to start/stop the simulation
self.timeline = omni.timeline.get_timeline_interface()
# Start the Pegasus Interface
self.pg = PegasusInterface()
# Acquire the World, .i.e, the singleton that controls that is a one stop shop for setting up physics,
# spawning asset primitives, etc.
self.pg._world = World(**self.pg._world_settings)
self.world = self.pg.world
# Launch one of the worlds provided by NVIDIA
self.pg.load_environment(SIMULATION_ENVIRONMENTS["Curved Gridroom"])
# Create the vehicle
# Try to spawn the selected robot in the world to the specified namespace
config_multirotor = MultirotorConfig()
# Create the multirotor configuration
mavlink_config = MavlinkBackendConfig({
"vehicle_id": 0,
"px4_autolaunch": True,
"px4_dir": "/home/fstec/Projects/PX4-Autopilot",
"px4_vehicle_model": 'iris_vision'
})
config_multirotor.backends = [MavlinkBackend(mavlink_config)]
# Sensors
camera_prim_path = "body/camera"
camera_config = {
"position": [0.1, 0.0, 0.0],
"orientation": Rotation.from_euler("XYZ", [0.0, 0.0, 0.0], degrees=True).as_quat(),
"focal_length": 16.0,
"overwrite_params": True
}
lidar_prim_path = "body/lidar"
lidar_config = {
"position": [-0.1, 0.0, 0.0],
"yaw_offset": 180.0,
"horizontal_fov": 27.0,
"vertical_fov": 27.0,
"min_range": 0.01,
"max_range": 5.0,
"draw_lines": True
}
config_multirotor.sensors = [
Magnetometer(), IMU(), Barometer(), Vision(),
Camera(camera_prim_path, camera_config),
Lidar(lidar_prim_path, lidar_config)]
# Graphs
config_multirotor.graphs = [
ROS2Tf(), ROS2Odometry(),
ROS2Camera(camera_prim_path, config={"types": ['rgb', 'camera_info', 'depth']}),
ROS2Lidar(lidar_prim_path)
]
Multirotor(
"/World/quadrotor",
ROBOTS['Iris'],
0,
[0.0, 0.0, 0.07],
Rotation.from_euler("XYZ", [0.0, 0.0, 0.0], degrees=True).as_quat(),
config=config_multirotor,
)
# Reset the simulation environment so that all articulations (aka robots) are initialized
self.world.reset()
# Auxiliar variable for the timeline callback example
self.stop_sim = False
def run(self):
"""
Method that implements the application main loop, where the physics steps are executed.
"""
# Start the simulation
self.timeline.play()
# The "infinite" loop
while simulation_app.is_running() and not self.stop_sim:
# Update the UI of the app and perform the physics step
self.world.step(render=True)
# Cleanup and stop
carb.log_warn("PegasusApp Simulation App is closing.")
self.timeline.stop()
simulation_app.close()
def main():
# Instantiate the template app
pg_app = PegasusApp()
# Run the application loop
pg_app.run()
if __name__ == "__main__":
main()
| 5,584 | Python | 35.743421 | 123 | 0.643087 |
superboySB/SBDrone_deprecated/src/HITL/drone_env.py | # import setup_path
import airsim
import numpy as np
import math
import time
from argparse import ArgumentParser
import gym
from gym import spaces
class AirSimDroneEnv(gym.Env):
def __init__(self, ip_address, step_length, image_shape):
super().__init__(image_shape)
self.step_length = step_length
self.image_shape = image_shape
self.state = {
"position": np.zeros(3),
"collision": False,
"prev_position": np.zeros(3),
}
self.drone = airsim.MultirotorClient(ip=ip_address)
self.action_space = spaces.Discrete(7)
self.observation_space = spaces.Box(0, 255, shape=image_shape, dtype=np.uint8)
self._setup_flight()
self.image_request = airsim.ImageRequest(
3, airsim.ImageType.DepthPerspective, True, False
)
def __del__(self):
self.drone.reset()
def _setup_flight(self):
self.drone.reset()
self.drone.enableApiControl(True)
self.drone.armDisarm(True)
# Set home position and velocity
self.drone.moveToPositionAsync(-0.55265, -31.9786, -19.0225, 10).join()
self.drone.moveByVelocityAsync(1, -0.67, -0.8, 5).join()
def transform_obs(self, responses):
img1d = np.array(responses[0].image_data_float, dtype=float)
img1d = 255 / np.maximum(np.ones(img1d.size), img1d)
img2d = np.reshape(img1d, (responses[0].height, responses[0].width))
from PIL import Image
image = Image.fromarray(img2d)
im_final = np.array(image.resize((84, 84)).convert("L"))
return im_final.reshape([84, 84, 1])
def _get_obs(self):
responses = self.drone.simGetImages([self.image_request])
image = self.transform_obs(responses)
self.drone_state = self.drone.getMultirotorState()
self.state["prev_position"] = self.state["position"]
self.state["position"] = self.drone_state.kinematics_estimated.position
self.state["velocity"] = self.drone_state.kinematics_estimated.linear_velocity
collision = self.drone.simGetCollisionInfo().has_collided
self.state["collision"] = collision
return image
def _do_action(self, action):
quad_offset = self.interpret_action(action)
quad_vel = self.drone.getMultirotorState().kinematics_estimated.linear_velocity
self.drone.moveByVelocityAsync(
quad_vel.x_val + quad_offset[0],
quad_vel.y_val + quad_offset[1],
quad_vel.z_val + quad_offset[2],
5,
).join()
def _compute_reward(self):
thresh_dist = 7
beta = 1
z = -10
pts = [
np.array([-0.55265, -31.9786, -19.0225]),
np.array([48.59735, -63.3286, -60.07256]),
np.array([193.5974, -55.0786, -46.32256]),
np.array([369.2474, 35.32137, -62.5725]),
np.array([541.3474, 143.6714, -32.07256]),
]
quad_pt = np.array(
list(
(
self.state["position"].x_val,
self.state["position"].y_val,
self.state["position"].z_val,
)
)
)
if self.state["collision"]:
reward = -100
else:
dist = 10000000
for i in range(0, len(pts) - 1):
dist = min(
dist,
np.linalg.norm(np.cross((quad_pt - pts[i]), (quad_pt - pts[i + 1])))
/ np.linalg.norm(pts[i] - pts[i + 1]),
)
if dist > thresh_dist:
reward = -10
else:
reward_dist = math.exp(-beta * dist) - 0.5
reward_speed = (
np.linalg.norm(
[
self.state["velocity"].x_val,
self.state["velocity"].y_val,
self.state["velocity"].z_val,
]
)
- 0.5
)
reward = reward_dist + reward_speed
done = 0
if reward <= -10:
done = 1
return reward, done
def step(self, action):
self._do_action(action)
obs = self._get_obs()
reward, done = self._compute_reward()
return obs, reward, done, self.state
def reset(self):
self._setup_flight()
return self._get_obs()
def interpret_action(self, action):
if action == 0:
quad_offset = (self.step_length, 0, 0)
elif action == 1:
quad_offset = (0, self.step_length, 0)
elif action == 2:
quad_offset = (0, 0, self.step_length)
elif action == 3:
quad_offset = (-self.step_length, 0, 0)
elif action == 4:
quad_offset = (0, -self.step_length, 0)
elif action == 5:
quad_offset = (0, 0, -self.step_length)
else:
quad_offset = (0, 0, 0)
return quad_offset
| 5,105 | Python | 29.945454 | 88 | 0.518511 |
superboySB/SBDrone_deprecated/src/HITL/run_ppo.py | # import setup_path
import gym
import airgym
import time
from stable_baselines3 import DQN
from stable_baselines3.common.monitor import Monitor
from stable_baselines3.common.vec_env import DummyVecEnv, VecTransposeImage
from stable_baselines3.common.evaluation import evaluate_policy
from stable_baselines3.common.callbacks import EvalCallback
from drone_env import AirSimDroneEnv
# Create a DummyVecEnv for main airsim gym env
env = AirSimDroneEnv(ip_address="172.16.13.104",
step_length=0.25,
image_shape=(84, 84, 1),)
env = DummyVecEnv(env)
# DummyVecEnv(
# [
# lambda: Monitor(
# gym.make(
# "airsim-drone-sample-v0",
# ip_address="172.16.13.104",
# step_length=0.25,
# image_shape=(84, 84, 1),
# )
# )
# ]
# )
# Wrap env as VecTransposeImage to allow SB to handle frame observations
env = VecTransposeImage(env)
# Initialize RL algorithm type and parameters
model = DQN(
"CnnPolicy",
env,
learning_rate=0.00025,
verbose=1,
batch_size=32,
train_freq=4,
target_update_interval=10000,
learning_starts=10000,
buffer_size=500000,
max_grad_norm=10,
exploration_fraction=0.1,
exploration_final_eps=0.01,
device="cuda",
tensorboard_log="./tb_logs/",
)
# Create an evaluation callback with the same env, called every 10000 iterations
callbacks = []
eval_callback = EvalCallback(
env,
callback_on_new_best=None,
n_eval_episodes=5,
best_model_save_path=".",
log_path=".",
eval_freq=10000,
)
callbacks.append(eval_callback)
kwargs = {}
kwargs["callback"] = callbacks
# Train for a certain number of timesteps
model.learn(
total_timesteps=5e5,
tb_log_name="dqn_airsim_drone_run_" + str(time.time()),
**kwargs
)
# Save policy weights
model.save("dqn_airsim_drone_policy")
| 1,908 | Python | 23.792207 | 80 | 0.658281 |
superboySB/SBDrone_deprecated/src/HITL/airsim/pfm.py | import numpy as np
import matplotlib.pyplot as plt
import re
import sys
import pdb
def read_pfm(file):
""" Read a pfm file """
file = open(file, 'rb')
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().rstrip()
header = str(bytes.decode(header, encoding='utf-8'))
if header == 'PF':
color = True
elif header == 'Pf':
color = False
else:
raise Exception('Not a PFM file.')
pattern = r'^(\d+)\s(\d+)\s$'
temp_str = str(bytes.decode(file.readline(), encoding='utf-8'))
dim_match = re.match(pattern, temp_str)
if dim_match:
width, height = map(int, dim_match.groups())
else:
temp_str += str(bytes.decode(file.readline(), encoding='utf-8'))
dim_match = re.match(pattern, temp_str)
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception('Malformed PFM header: width, height cannot be found')
scale = float(file.readline().rstrip())
if scale < 0: # little-endian
endian = '<'
scale = -scale
else:
endian = '>' # big-endian
data = np.fromfile(file, endian + 'f')
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
# DEY: I don't know why this was there.
file.close()
return data, scale
def write_pfm(file, image, scale=1):
""" Write a pfm file """
file = open(file, 'wb')
color = None
if image.dtype.name != 'float32':
raise Exception('Image dtype must be float32.')
if len(image.shape) == 3 and image.shape[2] == 3: # color image
color = True
elif len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1: # greyscale
color = False
else:
raise Exception('Image must have H x W x 3, H x W x 1 or H x W dimensions.')
file.write(bytes('PF\n', 'UTF-8') if color else bytes('Pf\n', 'UTF-8'))
temp_str = '%d %d\n' % (image.shape[1], image.shape[0])
file.write(bytes(temp_str, 'UTF-8'))
endian = image.dtype.byteorder
if endian == '<' or endian == '=' and sys.byteorder == 'little':
scale = -scale
temp_str = '%f\n' % scale
file.write(bytes(temp_str, 'UTF-8'))
image.tofile(file)
| 2,323 | Python | 26.023256 | 92 | 0.575118 |
superboySB/SBDrone_deprecated/src/HITL/airsim/__init__.py | from .client import *
from .utils import *
from .types import *
__version__ = "1.8.1"
| 87 | Python | 13.666664 | 21 | 0.643678 |
superboySB/SBDrone_deprecated/src/HITL/airsim/utils.py | import numpy as np #pip install numpy
import math
import time
import sys
import os
import inspect
import types
import re
import logging
from .types import *
def string_to_uint8_array(bstr):
return np.fromstring(bstr, np.uint8)
def string_to_float_array(bstr):
return np.fromstring(bstr, np.float32)
def list_to_2d_float_array(flst, width, height):
return np.reshape(np.asarray(flst, np.float32), (height, width))
def get_pfm_array(response):
return list_to_2d_float_array(response.image_data_float, response.width, response.height)
def get_public_fields(obj):
return [attr for attr in dir(obj)
if not (attr.startswith("_")
or inspect.isbuiltin(attr)
or inspect.isfunction(attr)
or inspect.ismethod(attr))]
def to_dict(obj):
return dict([attr, getattr(obj, attr)] for attr in get_public_fields(obj))
def to_str(obj):
return str(to_dict(obj))
def write_file(filename, bstr):
"""
Write binary data to file.
Used for writing compressed PNG images
"""
with open(filename, 'wb') as afile:
afile.write(bstr)
# helper method for converting getOrientation to roll/pitch/yaw
# https:#en.wikipedia.org/wiki/Conversion_between_quaternions_and_Euler_angles
def to_eularian_angles(q):
z = q.z_val
y = q.y_val
x = q.x_val
w = q.w_val
ysqr = y * y
# roll (x-axis rotation)
t0 = +2.0 * (w*x + y*z)
t1 = +1.0 - 2.0*(x*x + ysqr)
roll = math.atan2(t0, t1)
# pitch (y-axis rotation)
t2 = +2.0 * (w*y - z*x)
if (t2 > 1.0):
t2 = 1
if (t2 < -1.0):
t2 = -1.0
pitch = math.asin(t2)
# yaw (z-axis rotation)
t3 = +2.0 * (w*z + x*y)
t4 = +1.0 - 2.0 * (ysqr + z*z)
yaw = math.atan2(t3, t4)
return (pitch, roll, yaw)
def to_quaternion(pitch, roll, yaw):
t0 = math.cos(yaw * 0.5)
t1 = math.sin(yaw * 0.5)
t2 = math.cos(roll * 0.5)
t3 = math.sin(roll * 0.5)
t4 = math.cos(pitch * 0.5)
t5 = math.sin(pitch * 0.5)
q = Quaternionr()
q.w_val = t0 * t2 * t4 + t1 * t3 * t5 #w
q.x_val = t0 * t3 * t4 - t1 * t2 * t5 #x
q.y_val = t0 * t2 * t5 + t1 * t3 * t4 #y
q.z_val = t1 * t2 * t4 - t0 * t3 * t5 #z
return q
def wait_key(message = ''):
''' Wait for a key press on the console and return it. '''
if message != '':
print (message)
result = None
if os.name == 'nt':
import msvcrt
result = msvcrt.getch()
else:
import termios
fd = sys.stdin.fileno()
oldterm = termios.tcgetattr(fd)
newattr = termios.tcgetattr(fd)
newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, newattr)
try:
result = sys.stdin.read(1)
except IOError:
pass
finally:
termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)
return result
def read_pfm(file):
""" Read a pfm file """
file = open(file, 'rb')
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().rstrip()
header = str(bytes.decode(header, encoding='utf-8'))
if header == 'PF':
color = True
elif header == 'Pf':
color = False
else:
raise Exception('Not a PFM file.')
temp_str = str(bytes.decode(file.readline(), encoding='utf-8'))
dim_match = re.match(r'^(\d+)\s(\d+)\s$', temp_str)
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception('Malformed PFM header.')
scale = float(file.readline().rstrip())
if scale < 0: # little-endian
endian = '<'
scale = -scale
else:
endian = '>' # big-endian
data = np.fromfile(file, endian + 'f')
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
# DEY: I don't know why this was there.
file.close()
return data, scale
def write_pfm(file, image, scale=1):
""" Write a pfm file """
file = open(file, 'wb')
color = None
if image.dtype.name != 'float32':
raise Exception('Image dtype must be float32.')
if len(image.shape) == 3 and image.shape[2] == 3: # color image
color = True
elif len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1: # grayscale
color = False
else:
raise Exception('Image must have H x W x 3, H x W x 1 or H x W dimensions.')
file.write('PF\n'.encode('utf-8') if color else 'Pf\n'.encode('utf-8'))
temp_str = '%d %d\n' % (image.shape[1], image.shape[0])
file.write(temp_str.encode('utf-8'))
endian = image.dtype.byteorder
if endian == '<' or endian == '=' and sys.byteorder == 'little':
scale = -scale
temp_str = '%f\n' % scale
file.write(temp_str.encode('utf-8'))
image.tofile(file)
def write_png(filename, image):
""" image must be numpy array H X W X channels
"""
import cv2 # pip install opencv-python
ret = cv2.imwrite(filename, image)
if not ret:
logging.error(f"Writing PNG file {filename} failed")
| 5,280 | Python | 24.267942 | 93 | 0.565341 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.