file_path
stringlengths 21
207
| content
stringlengths 5
1.02M
| size
int64 5
1.02M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.27
0.93
|
---|---|---|---|---|---|---|
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/math/interpolation/smoothing_incremental_interpolator.cpp | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#include "cortex/math/interpolation/smoothing_incremental_interpolator.h"
| 521 | C++ | 39.153843 | 77 | 0.792706 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/math/interpolation/quintic_interpolator.cpp | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#include "cortex/math/interpolation/quintic_interpolator.h"
#include <fstream>
#include <vector>
#include <Eigen/Dense>
namespace cortex {
namespace math {
// Returns true iff t \in [0,1].
inline bool InZeroOne(double t) { return 0 <= t && t <= 1; }
inline Eigen::Matrix<double, 6, 6> CubicInterpolationMatrix() {
return (Eigen::MatrixXd(6, 6) << 0, 0, 0, 0, 0, 1,
0, 0, 0, 0, 1, 0,
0, 0, 0, 2, 0, 0,
1, 1, 1, 1, 1, 1,
5, 4, 3, 2, 1, 0,
20, 12, 6, 2, 0, 0).finished();
}
QuinticInterpolator1d::QuinticInterpolator1d(const PosVelAcc1d& p0,
const PosVelAcc1d& p1,
bool validate_interpolation_evals)
: validate_interpolation_evals_(validate_interpolation_evals),
A_(CubicInterpolationMatrix()),
b_((Eigen::VectorXd(6) << p0.x, p0.xd, p0.xdd, p1.x, p1.xd, p1.xdd).finished()),
coeffs_(A_.colPivHouseholderQr().solve(b_)) {}
bool QuinticInterpolator1d::Eval(double t, PosVelAcc1d& ret, std::string* error_str) const {
if (validate_interpolation_evals_ && !InZeroOne(t)) {
std::stringstream ss;
ss << "t not in [0,1] (t = " << t << "). ";
if (error_str) {
*error_str += ss.str();
}
return false;
}
auto a5 = coeffs_[0];
auto a4 = coeffs_[1];
auto a3 = coeffs_[2];
auto a2 = coeffs_[3];
auto a1 = coeffs_[4];
auto a0 = coeffs_[5];
std::vector<double> t_powers(6, 1);
for (size_t i = 1; i < t_powers.size(); ++i) {
t_powers[i] = t * t_powers[i - 1];
}
auto x = a5 * t_powers[5] + a4 * t_powers[4] + a3 * t_powers[3] + a2 * t_powers[2] +
a1 * t_powers[1] + a0;
auto xd = 5. * a5 * t_powers[4] + 4. * a4 * t_powers[3] + 3. * a3 * t_powers[2] +
2. * a2 * t_powers[1] + a1;
auto xdd = 20. * a5 * t_powers[3] + 12. * a4 * t_powers[2] + 6. * a3 * t_powers[1] + 2. * a2;
ret = PosVelAcc1d(x, xd, xdd);
return true;
}
} // namespace math
} // namespace cortex
| 2,577 | C++ | 33.837837 | 95 | 0.556849 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/math/interpolation/incremental_interpolator.h | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#pragma once
#include <iostream>
#include <list>
#include <sstream>
#include <string>
#include <Eigen/Core>
#include "cortex/math/interpolation/interpolator.h"
#include "cortex/math/interpolation/pos_vel_acc.h"
#include "cortex/math/interpolation/quintic_interpolator.h"
// Note: an incremental interpolator is one that leverages monotonicity
// assumptions on the evaluation times to continually grow the interpolator head
// while removing stale segments from the tail.
namespace cortex {
namespace math {
// Enables the interpolation of a sequence of (x, xd, xdd) way-points using
// quintic polynomials for each region between points. Evaluations and adding
// of new way points can be interleaved, although evaluations are expected to
// be with monotonically increasing time. There's a notion of a "delay_buffer"
// which enables points to be received and added with wall-clock time
// simultaneous with wall-clock evaluations by evaluating at a fixed time
// interval in the past. The delay buffer is the number of intervals in the past
// to set that fixed time offset to.
//
// When interpolating between (x, xd, xdd) way points at a non unity dt
// (i.e. each way point is dt seconds apart), we need to scale the xd and
// xdd by dt and dt^2, respectively, when adding them and undo that scaling
// when evaluating. Intuition: if dt is small, it's moving fast from one
// point to the next. If we then interpolate pretending that it takes a
// full second to get from one to the next, it's moving and accelerating
// much much slower, so we need to scale by dt and dt^2.
//
// This can be more rigorously derived by looking how time dilation scalars
// propagate through the derivative expressions.
class IncrementalInterpolator : public Interpolator<Eigen::VectorXd> {
public:
explicit IncrementalInterpolator(bool prune_history = true,
bool validate_interpolation_evals = true);
// Add a new waypoint, the time should be the current cycle time. Evals will
// be offset into the past by delay_buffer number of intervals to that
// incoming points can be added with the same time stamp as active
// evaluations.
bool AddPt(double t, const PosVelAccXd& p,
std::string* error_str = nullptr) override;
// Evaluates the interpolator at the given time. It uses a delay buffer to
// offset the evaluations into the past so that points can be added at the
// same time as evaluations and evaluations can be made after the latest
// point safely as long as they're within the delay buffer (see description
// above).
//
// This delay buffer functionality can also be implemented manually simply by
// setting the delay_buffer to zero no construction and manually offsetting
// evaluation points into the past.
//
// It's assumed the eval points are monotonically increasing. Fails if not.
// the evaluation point is returned as ret. Returns true if successful and
// false otherwise.
bool Eval(double t, PosVelAccXd& ret,
std::string* error_str = nullptr) const override;
using Interpolator<Eigen::VectorXd>::Eval;
int num_intervals() const { return segment_interpolators_.size(); }
bool IsReady(double t) const;
protected:
mutable std::list<TimeScaledInterpolatorXd> segment_interpolators_;
bool is_first_;
double prev_add_t_;
PosVelAccXd prev_add_p_;
bool validate_interpolation_evals_;
bool prune_history_;
};
typedef IncrementalInterpolator SequentialQuinticInterpolator;
} // namespace math
} // namespace cortex
| 4,004 | C | 39.867347 | 80 | 0.747502 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/math/interpolation/quintic_interpolator.h | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#pragma once
#include <iostream>
#include <list>
#include <sstream>
#include <string>
#include <Eigen/Core>
#include <ros/assert.h>
#include "cortex/math/interpolation/pos_vel_acc.h"
#include "cortex/math/interpolation/time_scaled_interpolator.h"
#include "cortex/math/interpolation/trajectories.h"
namespace cortex {
namespace math {
// One-dimensional quintic interpolating polynomial.
class QuinticInterpolator1d {
public:
typedef double VectorXx;
QuinticInterpolator1d() {}
// Creates a quintic spline that interpolates between p0 and p1 at t = 0 and
// 1, respectively.
QuinticInterpolator1d(const PosVelAcc1d& p0,
const PosVelAcc1d& p1,
bool validate_interpolation_evals = false);
// Evaluate the polynomial at t. If validate_interpolating_evals is set to
// true, enforces that the evaluations are only interpolating, i.e. t is in
// [0, 1]; fails if not. The interpolated value is returned in the ret return
// parameter. On failure, returns false and sets the error string if it's
// provided.
bool Eval(double t, PosVelAcc1d& ret, std::string* error_str = nullptr) const;
// This verion asserts on error.
PosVelAcc1d Eval(double t) const {
PosVelAcc1d ret;
std::string error_str;
ROS_ASSERT_MSG(Eval(t, ret, &error_str), "%s", error_str.c_str());
return ret;
}
double operator()(double t) const {
auto p = Eval(t);
return p.x;
}
// Accessor.
const Eigen::VectorXd& coeffs() const { return coeffs_; }
protected:
bool validate_interpolation_evals_;
const Eigen::MatrixXd A_;
const Eigen::VectorXd b_;
const Eigen::VectorXd coeffs_;
};
template <class vec_t>
MultiDimInterp<QuinticInterpolator1d, vec_t> QuinticInterpolator(
const PosVelAcc<vec_t>& p0,
const PosVelAcc<vec_t>& p1,
bool validate_interpolation_evals = false) {
return MultiDimInterp<QuinticInterpolator1d, vec_t>(p0, p1, validate_interpolation_evals);
}
typedef MultiDimInterp<QuinticInterpolator1d, Eigen::VectorXd> QuinticInterpolatorXd;
typedef TimeScaledInterpolator<QuinticInterpolatorXd> TimeScaledInterpolatorXd;
} // namespace math
} // namespace cortex
| 2,629 | C | 30.309523 | 92 | 0.728033 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/math/interpolation/incremental_interpolator.cpp | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#include "cortex/math/interpolation/incremental_interpolator.h"
#include <fstream>
#include <string>
#include "cortex/math/interpolation/quintic_interpolator.h"
namespace cortex {
namespace math {
IncrementalInterpolator::IncrementalInterpolator(bool prune_history,
bool validate_interpolation_evals)
: is_first_(true),
validate_interpolation_evals_(validate_interpolation_evals),
prune_history_(prune_history) {}
bool IncrementalInterpolator::AddPt(double t, const PosVelAccXd& p, std::string* error_str) {
if (is_first_) {
prev_add_t_ = t;
prev_add_p_ = p;
is_first_ = false;
return true;
}
if (t <= prev_add_t_) {
if (error_str) {
std::stringstream ss;
ss << "Add times nonmonotonic -- t = " << t << " vs prev t = " << prev_add_t_;
*error_str += ss.str();
}
return false;
}
segment_interpolators_.push_back(
TimeScaledInterpolatorXd(prev_add_t_, prev_add_p_, t, p, validate_interpolation_evals_));
prev_add_t_ = t;
prev_add_p_ = p;
return true;
}
bool IncrementalInterpolator::Eval(double t, PosVelAccXd& ret, std::string* error_str) const {
if (segment_interpolators_.size() == 0) {
if (error_str) {
*error_str += "No interpolators found.";
}
return false;
}
auto earliest_time = segment_interpolators_.front().t0();
auto latest_time = segment_interpolators_.back().t1();
if (validate_interpolation_evals_ && t < earliest_time) {
if (error_str) {
std::stringstream ss;
ss << "Nonmonotonic evals -- t = " << t
<< ", earliest time segment starts with t0 = " << earliest_time;
*error_str += ss.str();
}
return false;
}
if (validate_interpolation_evals_ && t > latest_time) {
if (error_str) {
std::stringstream ss;
ss << "Future eval (overflow) -- t = " << t
<< ", latest time segment ends with t1 = " << latest_time;
*error_str += ss.str();
}
return false;
}
// Find the first segment whose upper time bound is greater than the curren
// time. Since the segments are contiguous and monotonically increasing, we're
// guaranteed that t \in [t0, t1] of this segment.
TimeScaledInterpolatorXd* active_interpolator = nullptr;
for (auto it = segment_interpolators_.begin(); it != segment_interpolators_.end();) {
if (t <= it->t1()) {
active_interpolator = &(*it);
break;
} else {
if (prune_history_) {
it = segment_interpolators_.erase(it);
} else {
++it;
}
}
}
if (!active_interpolator && !validate_interpolation_evals_) {
active_interpolator = &segment_interpolators_.back();
}
if (active_interpolator) {
return active_interpolator->Eval(t, ret, error_str);
} else {
if (error_str) {
std::stringstream ss;
ss << "Eval time in the future -- t = " << t << " vs latest segment time = " << latest_time;
*error_str += ss.str();
}
return false;
}
}
bool IncrementalInterpolator::IsReady(double t) const {
return (segment_interpolators_.size() > 0) && (t >= segment_interpolators_.front().t0());
}
} // namespace math
} // namespace cortex
| 3,657 | C++ | 30 | 98 | 0.631939 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/math/interpolation/pos_vel_acc.h | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#pragma once
#include <iostream>
#include <vector>
#include <Eigen/Core>
#include <ros/assert.h>
namespace cortex {
namespace math {
struct PosVelAcc1d;
// Represents a triple of simultaneous position, velocity, and acceleration.
template <class vec_t>
struct PosVelAcc {
vec_t x;
vec_t xd;
vec_t xdd;
int dim() const { return x.size(); }
PosVelAcc<vec_t> Scale(double dt) const { return PosVelAcc<vec_t>(x, dt * xd, (dt * dt) * xdd); }
PosVelAcc<vec_t> Unscale(double dt) const {
return PosVelAcc<vec_t>(x, xd / dt, xdd / (dt * dt));
}
PosVelAcc() {}
// Initialize to all zeros with a particular dimensionality.
explicit PosVelAcc(int d) {
x = vec_t::Zero(d);
xd = vec_t::Zero(d);
xdd = vec_t::Zero(d);
}
// Initialize to specific (x, xd, xdd). Each vector much be the same
// dimension, otherwise assert.
PosVelAcc(const vec_t& x, const vec_t& xd, const vec_t& xdd);
// Join a collection of one-dimensional PosVelAcc1d's into a single object of
// this type. Aggregates the individual dimensions into vectors, x, xd, xdd.
static PosVelAcc Join(const std::vector<PosVelAcc1d>& dims);
};
// One dimensional variant of pos, vel, acc.
struct PosVelAcc1d {
double x;
double xd;
double xdd;
PosVelAcc1d() {}
PosVelAcc1d(double x, double xd, double xdd) : x(x), xd(xd), xdd(xdd) {}
// Slice a multi-dimensional pos, vel, acc into a one-dimensional variant
// containing only the specified dimension.
template <class vec_t>
static PosVelAcc1d Slice(const PosVelAcc<vec_t>& p, int dim) {
return PosVelAcc1d(p.x[dim], p.xd[dim], p.xdd[dim]);
}
};
//==============================================================================
// Template implementations
//==============================================================================
template <class vec_t>
PosVelAcc<vec_t>::PosVelAcc(const vec_t& x, const vec_t& xd, const vec_t& xdd)
: x(x), xd(xd), xdd(xdd) {
ROS_ASSERT(x.size() == xd.size());
ROS_ASSERT(x.size() == xdd.size());
}
template <class vec_t>
PosVelAcc<vec_t> PosVelAcc<vec_t>::Join(const std::vector<PosVelAcc1d>& dims) {
PosVelAcc<vec_t> p(dims.size());
for (size_t i = 0; i < dims.size(); ++i) {
p.x[i] = dims[i].x;
p.xd[i] = dims[i].xd;
p.xdd[i] = dims[i].xdd;
}
return p;
}
// Add specialization for VectorXd for convenience.
typedef PosVelAcc<Eigen::VectorXd> PosVelAccXd;
} // namespace math
} // namespace cortex
inline std::ostream& operator<<(std::ostream& os, const cortex::math::PosVelAcc1d& p) {
os << " x = " << p.x << ", xd = " << p.xd << ", xdd = " << p.xdd;
return os;
}
template <class vec_t>
std::ostream& operator<<(std::ostream& os, const cortex::math::PosVelAcc<vec_t>& p) {
os << "x = " << p.x.transpose() << "\n";
os << "xd = " << p.xd.transpose() << "\n";
os << "xdd = " << p.xdd.transpose() << "\n";
return os;
}
| 3,333 | C | 28.504425 | 99 | 0.624662 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/math/interpolation/quartic_interpolator.h | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#pragma once
#include <iostream>
#include <list>
#include <sstream>
#include <string>
#include <Eigen/Core>
#include <ros/assert.h>
#include "cortex/math/interpolation/pos_vel_acc.h"
#include "cortex/math/interpolation/time_scaled_interpolator.h"
#include "cortex/math/interpolation/trajectories.h"
namespace cortex {
namespace math {
// One-dimensional quartic interpolating polynomial. Interpolates between
// (x0, xd0, xdd0) and (x1, xd1).
class QuarticInterpolator1d {
public:
typedef double VectorXx;
QuarticInterpolator1d() {}
// Creates a quintic spline that interpolates between p0 and p1 at t = 0 and
// 1, respectively.
QuarticInterpolator1d(const PosVelAcc1d& p0,
const PosVelAcc1d& p1,
bool validate_interpolation_evals = false);
// Evaluate the polynomial at t. If validate_interpolating_evals is set to
// true, enforces that the evaluations are only interpolating, i.e. t is in
// [0, 1]; fails if not. The interpolated value is returned in the ret return
// parameter. On failure, returns false and sets the error string if it's
// provided.
bool Eval(double t, PosVelAcc1d& ret, std::string* error_str = nullptr) const;
// This verion asserts on error.
PosVelAcc1d Eval(double t) const {
PosVelAcc1d ret;
std::string error_str;
ROS_ASSERT_MSG(Eval(t, ret, &error_str), "%s", error_str.c_str());
return ret;
}
double operator()(double t) const {
auto p = Eval(t);
return p.x;
}
// Accessor.
const Eigen::VectorXd& coeffs() const { return coeffs_; }
protected:
bool validate_interpolation_evals_;
const Eigen::MatrixXd A_;
const Eigen::VectorXd b_;
const Eigen::VectorXd coeffs_;
};
template <class vec_t>
MultiDimInterp<QuarticInterpolator1d, vec_t> QuarticInterpolator(
const PosVelAcc<vec_t>& p0,
const PosVelAcc<vec_t>& p1,
bool validate_interpolation_evals = false) {
return MultiDimInterp<QuarticInterpolator1d, vec_t>(p0, p1, validate_interpolation_evals);
}
typedef MultiDimInterp<QuarticInterpolator1d, Eigen::VectorXd> QuarticInterpolatorXd;
// typedef TimeScaledInterpolator<QuarticInterpolatorXd>
// TimeScaledInterpolatorXd;
} // namespace math
} // namespace cortex
| 2,690 | C | 30.290697 | 92 | 0.724907 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/math/interpolation/quartic_interpolator.cpp | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#include "cortex/math/interpolation/quartic_interpolator.h"
#include <fstream>
#include <string>
#include <vector>
#include <Eigen/Dense>
namespace cortex {
namespace math {
// Returns true iff t \in [0,1].
inline bool InZeroOne(double t) { return 0 <= t && t <= 1; }
// clang-format off
#define QUARTIC_INTERP_MATRIX \
0, 0, 0, 0, 1, \
0, 0, 0, 1, 0, \
0, 0, 2, 0, 0, \
1, 1, 1, 1, 1, \
4, 3, 2, 1, 0
// clang-format on
QuarticInterpolator1d::QuarticInterpolator1d(const PosVelAcc1d& p0,
const PosVelAcc1d& p1,
bool validate_interpolation_evals)
: validate_interpolation_evals_(validate_interpolation_evals),
A_((Eigen::MatrixXd(5, 5) << QUARTIC_INTERP_MATRIX).finished()),
b_((Eigen::VectorXd(5) << p0.x, p0.xd, p0.xdd, p1.x, p1.xd).finished()),
coeffs_(A_.colPivHouseholderQr().solve(b_)) {}
bool QuarticInterpolator1d::Eval(double t, PosVelAcc1d& ret, std::string* error_str) const {
if (validate_interpolation_evals_ && !InZeroOne(t)) {
std::stringstream ss;
ss << "t not in [0,1] (t = " << t << "). ";
if (error_str) {
*error_str += ss.str();
}
return false;
}
auto a4 = coeffs_[0];
auto a3 = coeffs_[1];
auto a2 = coeffs_[2];
auto a1 = coeffs_[3];
auto a0 = coeffs_[4];
std::vector<double> t_powers(5, 1);
for (size_t i = 1; i < t_powers.size(); ++i) {
t_powers[i] = t * t_powers[i - 1];
}
auto x = a4 * t_powers[4] + a3 * t_powers[3] + a2 * t_powers[2] + a1 * t_powers[1] + a0;
auto xd = 4. * a4 * t_powers[3] + 3. * a3 * t_powers[2] + 2. * a2 * t_powers[1] + a1;
auto xdd = 12. * a4 * t_powers[2] + 6. * a3 * t_powers[1] + 2. * a2;
ret = PosVelAcc1d(x, xd, xdd);
return true;
}
} // namespace math
} // namespace cortex
| 2,280 | C++ | 30.680555 | 92 | 0.603509 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/math/interpolation/time_scaled_interpolator.h | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#pragma once
#include <iostream>
#include <list>
#include <sstream>
#include <string>
#include <Eigen/Core>
#include <ros/assert.h>
#include "cortex/math/interpolation/pos_vel_acc.h"
#include "cortex/math/interpolation/trajectories.h"
namespace cortex {
namespace math {
// Represents a quintic interpolator interpolating between two end points
// at specific times. If validate_interpolation_evals is true, valid evals
// are only those within the time range of the two end points.
template <class traj_t>
class TimeScaledInterpolator {
public:
typedef typename TimeScaledTraj<traj_t>::VectorXx VectorXx;
TimeScaledInterpolator() {}
TimeScaledInterpolator(double t0,
const PosVelAcc<VectorXx>& p0,
double t1,
const PosVelAcc<VectorXx>& p1,
bool validate_interpolation_evals = false)
: t0_(t0),
p0_(p0),
t1_(t1),
p1_(p1),
time_range_(t1 - t0),
scaled_traj_(
traj_t(p0.Scale(time_range_), p1.Scale(time_range_), validate_interpolation_evals),
time_range_),
validate_interpolation_evals_(validate_interpolation_evals) {}
bool Eval(double t, PosVelAcc<VectorXx>& ret, std::string* error_str = nullptr) const {
if (validate_interpolation_evals_ && !(t0_ <= t && t <= t1_)) {
if (error_str) {
std::stringstream ss;
ss << "t = " << t << " outside valid range [" << t0_ << ", " << t1_ << "]";
*error_str += ss.str();
}
return false;
}
return scaled_traj_.Eval((t - t0_) / time_range_, ret, error_str);
}
PosVelAcc<VectorXx> Eval(double t) const {
std::string error_str;
PosVelAcc<VectorXx> ret;
ROS_ASSERT_MSG(scaled_traj_.Eval((t - t0_) / time_range_, ret, &error_str), "%s",
error_str.c_str());
return ret;
}
// Performs a time shifted eval since the underlying trajectory starts at t0_
VectorXx operator()(double t) const { return Eval(t).x; }
double t0() const { return t0_; }
const PosVelAcc<VectorXx>& p0() const { return p0_; }
double t1() const { return t1_; }
const PosVelAcc<VectorXx>& p1() const { return p1_; }
protected:
double t0_;
PosVelAcc<VectorXx> p0_;
double t1_;
PosVelAcc<VectorXx> p1_;
double time_range_;
TimeScaledTraj<traj_t> scaled_traj_;
bool validate_interpolation_evals_;
};
} // namespace math
} // namespace cortex
| 2,908 | C | 30.967033 | 95 | 0.644085 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/math/interpolation/trajectories.h | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#pragma once
#include <iostream>
#include <list>
#include <sstream>
#include <string>
#include <vector>
#include <Eigen/Core>
#include <ros/assert.h>
#include "cortex/math/interpolation/pos_vel_acc.h"
namespace cortex {
namespace math {
// Represents a multidimensional trajectory as a collection of 1D trajectories.
template <class traj1d_t, class vec_t>
class MultiDimTraj {
public:
typedef vec_t VectorXx;
MultiDimTraj() {}
explicit MultiDimTraj(const std::vector<traj1d_t>& trajectories) : trajectories_(trajectories) {}
bool Eval(double t, PosVelAcc<vec_t>& ret, std::string* error_str) const;
// This verion asserts on error.
PosVelAccXd Eval(double t) const {
PosVelAccXd ret;
std::string error_str;
ROS_ASSERT_MSG(Eval(t, ret, &error_str), "%s", error_str.c_str());
return ret;
}
int dim() const { return trajectories_.size(); }
protected:
std::vector<traj1d_t> trajectories_;
};
// Creates a vector of 1D interpolators for each dimension of the given
// PosVelAcc end-point objects. If validate_interpolation_evals is true, the
// resulting interpolators will validate that the query points are between 0
// and 1.
template <class interp1d_t, class vec_t>
std::vector<interp1d_t> MakeDimInterps(const PosVelAcc<vec_t>& p0,
const PosVelAcc<vec_t>& p1,
bool validate_interpolation_evals) {
ROS_ASSERT(p0.dim() == p1.dim());
std::vector<interp1d_t> trajectories;
for (int i = 0; i < p0.dim(); ++i) {
trajectories.push_back(interp1d_t(
PosVelAcc1d::Slice(p0, i), PosVelAcc1d::Slice(p1, i), validate_interpolation_evals));
}
return trajectories;
}
// Represents a multi-dimensional interpolator interpolating between a pair of
// PosVelAcc points.
template <class interp1d_t, class vec_t>
class MultiDimInterp : public MultiDimTraj<interp1d_t, vec_t> {
public:
typedef vec_t VectorXx;
MultiDimInterp() {}
MultiDimInterp(const PosVelAcc<vec_t>& p0,
const PosVelAcc<vec_t>& p1,
bool validate_interpolation_evals = false)
: MultiDimTraj<interp1d_t, vec_t>(
MakeDimInterps<interp1d_t, vec_t>(p0, p1, validate_interpolation_evals)) {}
protected:
};
// Represents a trajectory whose time is scaled by some scaling factor. The
// semantics of scaling is that if the original time interval were [0,1] the
// new time interval would be [0, scalar], i.e. the original trajectory on
// [0,1] would be stretched to fit across the entire interval [0, scalar].
template <class traj_t>
class TimeScaledTraj {
public:
typedef typename traj_t::VectorXx VectorXx;
TimeScaledTraj() {}
TimeScaledTraj(const traj_t& traj, double scalar) : traj_(traj), scalar_(scalar) {}
VectorXx operator()(double t) const { return Eval(t).x; }
PosVelAcc<typename traj_t::VectorXx> Eval(double t) const {
return traj_.Eval(t).Unscale(scalar_);
}
bool Eval(double t, PosVelAcc<VectorXx>& ret, std::string* error_str = nullptr) const {
PosVelAcc<VectorXx> scaled_ret;
if (!traj_.Eval(t, scaled_ret, error_str)) {
return false;
}
ret = scaled_ret.Unscale(scalar_);
return true;
}
double scalar() const { return scalar_; }
protected:
traj_t traj_;
double scalar_;
};
template <class traj_t>
TimeScaledTraj<traj_t> TimeScaleTraj(const traj_t& traj, double scalar) {
return TimeScaledTraj<traj_t>(traj, scalar);
}
// traj_t should have an evaluation operator:
//
// vec_t operator()(double t) const
//
// This function performs finite-differencing to find the velocity.
// traj_t should also have a type vec_t:
//
// typename traj_t::vec_t
//
template <class traj_t>
typename traj_t::VectorXx CentralFdVel(const traj_t& traj, double t, double dt = 1e-5) {
auto x_up = traj(t + dt / 2);
auto x_down = traj(t - dt / 2);
return (x_up - x_down) / dt;
}
template <class traj_t>
typename traj_t::VectorXx FdAcc(const traj_t& traj, double t, double dt = 1e-5) {
auto x = traj(t);
auto x_up = traj(t + dt / 2);
auto x_down = traj(t - dt / 2);
return (x_up + x_down - 2 * x) / (dt * dt / 4);
}
// Converts a trajectory into a velocity trajectory using finite-differencing.
template <class traj_t>
class FdVelTraj {
public:
typedef typename traj_t::VectorXx VectorXx;
explicit FdVelTraj(const traj_t& traj, double dt = 1e-5) : traj_(traj), dt_(dt) {}
VectorXx operator()(double t) const { return CentralFdVel(traj_, t, dt_); }
protected:
traj_t traj_;
double dt_;
};
template <class traj_t>
FdVelTraj<traj_t> ToFdVelTraj(const traj_t& traj) {
return FdVelTraj<traj_t>(traj);
}
// Converts a trajectory into an acceleration trajectory using
// finite-differencing.
template <class traj_t>
class FdAccTraj {
public:
typedef typename traj_t::VectorXx VectorXx;
explicit FdAccTraj(const traj_t& traj, double dt = 1e-5) : traj_(traj), dt_(dt) {}
VectorXx operator()(double t) const { return FdAcc(traj_, t, dt_); }
protected:
traj_t traj_;
double dt_;
};
template <class traj_t>
FdAccTraj<traj_t> ToFdAccTraj(const traj_t& traj) {
return FdAccTraj<traj_t>(traj);
}
// Represents f(t) = c1 * sin(c2 * (t - t0)) + c3
//
// Derivatives:
// f' = c1 * c2 * cos(c2 * (t - t0))
// f'' = -c1 * c2^2 * sin(c2 * (t - t0))
class SinusoidalTraj {
public:
typedef double VectorXx;
SinusoidalTraj(double c1, double c2, double c3, double t0) : c1_(c1), c2_(c2), c3_(c3), t0_(t0) {}
PosVelAcc1d Eval(double t) const {
std::string error_str;
PosVelAcc1d ret;
ROS_ASSERT_MSG(Eval(t, ret, &error_str), "%s", error_str.c_str());
return ret;
}
bool Eval(double t, PosVelAcc1d& ret, std::string* error_str = nullptr) const {
// Suppress warnings that "error_str" is never written to.
(void)error_str;
auto t_affine = c2_ * (t - t0_);
auto x = c1_ * sin(t_affine) + c3_;
auto xd = c1_ * c2_ * cos(t_affine);
auto xdd = -c1_ * c2_ * c2_ * sin(t_affine);
ret = PosVelAcc1d(x, xd, xdd);
return true;
}
double operator()(double t) const { return Eval(t).x; }
protected:
double c1_, c2_, c3_, t0_;
};
//==============================================================================
// Template implementations
//==============================================================================
template <class traj1d_t, class vec_t>
bool MultiDimTraj<traj1d_t, vec_t>::Eval(double t,
PosVelAcc<vec_t>& ret,
std::string* error_str) const {
std::vector<PosVelAcc1d> dim_evals(dim());
for (size_t i = 0; i < trajectories_.size(); ++i) {
if (!trajectories_[i].Eval(t, dim_evals[i], error_str)) {
return false;
}
}
ret = PosVelAcc<vec_t>::Join(dim_evals);
return true;
}
} // namespace math
} // namespace cortex
| 7,290 | C | 28.518219 | 100 | 0.64513 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/math/interpolation/smoothing_incremental_interpolator.h | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
// Simple and generic smoothing incremental interpolator that creates each new
// polynomial segment between the latest evaluated point (the point sent to
// control) and the incoming point. This adds a level of robustness to noise
// governed by the size of the eval shift window.
#pragma once
#include <iostream>
#include <list>
#include <sstream>
#include <string>
#include "cortex/math/interpolation/interpolator.h"
#include "cortex/math/interpolation/pos_vel_acc.h"
#include "cortex/math/interpolation/time_scaled_interpolator.h"
namespace cortex {
namespace math {
template <class interp_t>
class SmoothingIncrementalInterpolator : public Interpolator<typename interp_t::VectorXx> {
public:
SmoothingIncrementalInterpolator() : is_first_(true), is_ready_(false) {}
bool AddPt(double t,
const PosVelAcc<typename interp_t::VectorXx>& p,
std::string* error_str = nullptr) override {
if (is_first_) {
prev_eval_t_ = t;
prev_eval_p_ = p;
is_first_ = false;
return true;
}
is_ready_ = true;
if (t <= prev_eval_t_) {
if (error_str) {
std::stringstream ss;
ss << "Add time must be beyond the last eval time = " << t
<< " vs last eval t = " << prev_eval_t_;
*error_str += ss.str();
}
return false;
}
interpolator_ = TimeScaledInterpolator<interp_t>(prev_eval_t_, prev_eval_p_, t, p);
return true;
}
// Note: only adds to the error string if there's an error. Typically string
// operations aren't real time safe, but in this case we'd be bailing out.
bool Eval(double t,
PosVelAcc<typename interp_t::VectorXx>& ret,
std::string* error_str) const override {
if (!IsReady(t)) {
if (error_str) {
*error_str +=
"Smoothing increment interpolator not ready. Must see at least two "
"points before evaluating.";
}
return false;
}
if (t < interpolator_.t0()) {
if (error_str) {
std::stringstream ss;
ss << "Nonmonotonic evals -- t = " << t << ", last eval was at " << interpolator_.t0();
*error_str += ss.str();
}
return false;
}
if (t > interpolator_.t1()) {
// TODO(roflaherty): Convert this over to a version that extrapolates with zero
// acceleration. Include a jitter buffer (only extrapolate so far).
//
// For now, though, this is unsupported and it just errors.
if (error_str) {
std::stringstream ss;
ss << "Future eval requested. Currently unsupported. Expects eval "
<< "monotonicity -- t = " << t << ", last eval time = " << interpolator_.t1();
*error_str += ss.str();
}
return false;
}
if (!interpolator_.Eval(t, ret, error_str)) {
return false;
}
prev_eval_t_ = t;
prev_eval_p_ = ret;
return true;
}
using Interpolator<typename interp_t::VectorXx>::Eval;
// Returns true iff the interpolator was created as least enough time in the
// past so the shifted evaluation time falls within the valid range of the
// interpolator.
//
// Note that once the interpolator is ready (has return ready once), since
// new interpolators are always created to be lower bounded at the shifted
// interpolation eval time, and eval times are always monotonically
// increasing, it will always be ready (always return true).
bool IsReady(double t) const { return is_ready_ && (t >= interpolator_.t0()); }
protected:
TimeScaledInterpolator<interp_t> interpolator_;
bool is_first_;
bool is_ready_;
mutable double prev_eval_t_;
mutable PosVelAccXd prev_eval_p_;
};
} // namespace math
} // namespace cortex
| 4,172 | C | 31.858267 | 95 | 0.649089 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/control/rmpflow_commanded_joints_listener.cpp | /* Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property and proprietary rights in
* and to this software, related documentation and any modifications thereto. Any use,
* reproduction, disclosure or distribution of this software and related documentation without an
* express license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#include "cortex/control/rmpflow_commanded_joints_listener.h"
namespace cortex {
namespace control {
RmpflowCommandedJointsListener::RmpflowCommandedJointsListener(
const std::string& rmpflow_commands_topic, const std::string& joint_state_topic)
: rmpflow_commands_listener_(rmpflow_commands_topic, 1),
joint_state_listener_(std::make_shared<util::JointStateListener>()) {
joint_state_listener_->Init(joint_state_topic);
rmpflow_commands_listener_.RegisterCallback([&](const auto& msg) {
std::lock_guard<std::mutex> guard(mutex_);
joint_state_listener_->SetRequiredJoints(msg.names);
});
}
bool RmpflowCommandedJointsListener::IsAvailable() const {
std::lock_guard<std::mutex> guard(mutex_);
return is_set_ && joint_state_listener_->is_available();
}
void RmpflowCommandedJointsListener::WaitUntilAvailable(double poll_rate) const {
ros::Rate rate(poll_rate);
while (ros::ok() && !IsAvailable()) {
rate.sleep();
}
}
const std::shared_ptr<util::JointStateListener>
RmpflowCommandedJointsListener::joint_state_listener() const {
return joint_state_listener_;
}
} // namespace control
} // namespace cortex
| 1,597 | C++ | 34.51111 | 98 | 0.751409 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/control/command_stream_interpolator.h | /**
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property and proprietary rights in
* and to this software, related documentation and any modifications thereto. Any use,
* reproduction, disclosure or distribution of this software and related documentation without an
* express license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#pragma once
#include <atomic>
#include <memory>
#include <mutex>
#include <thread>
#include <vector>
#include <cortex_control/JointPosVelAccCommand.h>
#include <ros/ros.h>
#include <ros/time.h>
#include "cortex/math/interpolation/interpolator.h"
namespace cortex {
namespace control {
enum class ControllerState {
// The controller hasn't started yet so we should ignore any incoming commands so we don't start
// processing state changes prematurely.
StartingController = 0,
// The controller's eval calls have started. We need to wait on the backend to start. (The backend
// may already be started, in which case it'll immediately transition once the next incoming
// command is received).
WaitingOnBackend,
// We need to sync the backend with the current state of the robot by suppressing it briefly.
// Suppression automatically sets the backend to latest measured state from the robot. We remain
// in this state until we've detected we're no longer receiving messages from the backend.
SyncingBackend,
// After we've detected the backend suppression has been successful, we stop suppressing and
// transition to initializing the interpolator. The next incoming command will be used to
// initialize the interpolator. The NextCommand() interface can be used to blend between the
// measured state of the robot and the interpolated command for a blending duration specified on
// initialization.
InitializingInterpolator,
// Once the interpolator is initialized, we're operation and running as expected.
Operational
};
} // namespace control
} // namespace cortex
namespace cortex {
namespace control {
// Enables usage of the following form:
//
// auto suppressor = std::make_shared<cortex::control::CommandSuppressor>(topic, rate_hz);
// suppressor->StartSuppression();
// ros::Duration(2.).sleep();
// suppressor->StopSuppression();
//
// Internally, it constantly sends suppression messages at the specified rate and switches from
// sending std_msgs::String("1") for suppression to st_msgs::String("0") when not suppressing.
class CommandSuppressor {
public:
static std::string default_topic;
static double default_rate_hz;
// Defaults topic to default_topic and the publication rate to default_rate_hz.
CommandSuppressor() : CommandSuppressor(default_topic, default_rate_hz) {}
// Initialize to publish on the specified topic at the specified rate. Constantly publishes in a
// separate thread.
CommandSuppressor(const std::string& topic, double rate_hz);
~CommandSuppressor();
void StartSuppressing() { is_suppressing_ = true; }
void StopSuppressing() { is_suppressing_ = false; }
protected:
void Run();
std::atomic_bool is_suppressing_; // True when suppressing.
std::atomic_bool is_running_; // Set to false to stop the thread.
std::string topic_; // Topic it'll publish on.
double rate_hz_; // Rate at which it'll publish.
ros::Publisher suppression_pub_; // The publisher itself.
std::thread run_thread_; // Thread running the constant publication stream.
};
// Interpolator receiving a stream of cortex commands and reconstructing the integral curve they
// describe using a quintic interpolator. It's assumed that Eval() is called at a regular control
// rate; the eval times are used as a clock for the system.
class CommandStreamInterpolator {
public:
static const double default_blending_duration;
static const double default_backend_timeout;
static const double default_time_between_interp_pubs;
// A command is a commanded position plus return information on the availability from the Eval()
// method. This enables the following syntax
//
// auto command = stream_interpolator->Eval(...);
// if (command) {
// Send(command);
// }
//
// There's a Command::Unavailable() static convenince method for retrieving a generic unavailable
// command.
struct Command {
bool is_available;
Eigen::VectorXd commanded_position;
Command(const Eigen::VectorXd& commanded_position)
: is_available(true), commanded_position(commanded_position) {}
Command() : is_available(false) {}
// Enables checking boolean truth value of the command to see whether
// or not it's available.
operator bool() { return is_available; }
static Command Unavailable() { return Command(); }
};
// By default doesn't use the smoothing interpolator.
bool Init(const ros::Duration& interpolator_lookup_delay_buffer,
const std::string& cortex_command_topic,
ros::Duration blending_duration = ros::Duration(default_blending_duration)) {
return Init(interpolator_lookup_delay_buffer, false, cortex_command_topic);
}
// interpolator_lookup_delay_buffer is how far in the past to look up interpolated values to
// accommodate possible jitter.
//
// use_smoothing_interpolator: if true, uses a smoothing interpolator. Otherwise, uses a basic
// quintic interpolator.
//
// cortex_command_topic: topic on which cortex_control::JointPosVelAccCommand messages are broadcast.
//
// blending_duration: how long to blend for during start up when using NextCommand().
bool Init(const ros::Duration& interpolator_lookup_delay_buffer,
bool use_smoothing_interpolator,
const std::string& cortex_command_topic,
ros::Duration blending_duration = ros::Duration(default_blending_duration),
double backend_timeout = default_backend_timeout);
bool Init(const ros::Duration& interpolator_lookup_delay_buffer,
bool use_smoothing_interpolator,
const std::string& cortex_command_topic,
const std::string& cortex_command_ack_topic,
const std::string& cortex_command_suppress_topic,
const std::string& cortex_command_interpolated_topic,
ros::Duration blending_duration = ros::Duration(default_blending_duration),
double backend_timeout = default_backend_timeout);
void Start();
// Returns true if enough time has passed since the last cortex command callback to designate the
// backend as having been stopped or successfully suppressed.
bool IsBackendTimedOut(const ros::Time& time) const;
// Evaluate the interpolator at the specified time index. Time indices should be monotonically
// increasing, and calling this method steps the protocol. The Command is flagged as not available
// until the protocol is in the Operational state.
Command EvalAndStep(const ros::Time& time);
// Internally calls EvalAndStep(time), but handles unavailable commands cleanly and smoothly
// interpolates as needed to create a smooth transition to interpolation on startup.
//
// Automatically switches between returning q_measured when the interpolator isn't ready, blending
// between q_measured and the interpolated values for a predefined duration (blend_duration, set
// at initialization), and fully returning the interpolated values once blending is complete. It
// is recommended that this method be used for smooth transitioning to interpolated command stream
// control.
//
// q_measured can be smaller in length than the internal interpolated commands. In that case,
// just the first q_measured.size() joint commands are used, and the returned command vector is of
// length q_measured.size().
Eigen::VectorXd NextCommand(const ros::Time& time,
const Eigen::VectorXd& q_measured,
bool* is_interpolator_active = nullptr);
private:
void CommandCallback(const cortex_control::JointPosVelAccCommand& msg);
// Add the command in the given command_msg to the interpolator. The command messages were meant
// to describe waypoints along an integral curve, so their command_msg.t time stamp is a rectified
// (jitter free) time stamp that can be used for interpolation.
void AddPointToInterpolator(const cortex_control::JointPosVelAccCommand& command_msg);
// Add the given interpolation point to the interolator at the given time.
void AddPointToInterpolator(const ros::Time& time, const cortex::math::PosVelAccXd& point);
// This method error checks on the state of the controller and shifts the time point to index the
// interpolator correctly. The time input should be controller time.
//
// Returns the result in eval_point.
//
// If there's an error, returns false (and if the optional error_str is available, sets the error
// string). Otherwise, return true on success.
bool EvalInterpolator(const ros::Time& time,
cortex::math::PosVelAccXd& eval_point,
std::string* error_str = nullptr) const;
// Publish the given interpolated point as a
//
// cortex_control::JointPosVelAccCommand
//
// on <joint_command_topic>/interpolated.
void PublishInterpolatedPoint(const ros::Time& time, const cortex::math::PosVelAccXd& point) const;
// Resets the interpolator to the initial state. One should always call this method for any event
// that transitions the system back to the WaitingOnBackend state.
void ResetInterpolator();
// Protects all members between calls to Eval() and CommandCallback().
std::mutex mutex_;
// Time at the most recent eval. This enables syncing the clocks between Eval() and Callback().
ros::Time last_eval_time_;
// Number of seconds in the past to evaluate the interpolator. The interpolator is effectively
// evaluated as interpolator->Eval(<now> - <delay_buffer>), ...); There are some details about
// syncing the clocks between incoming commands and controllers Eval() time, but the gist of it is
// that new incoming points are added at time <now> and we evaluate at <now> - <delay_buffer>.
ros::Duration interpolator_lookup_delay_buffer_;
// Time of the incoming command when the interpolator was initialized (this is actually the second
// point in the interpolator -- we actually step that back by buffer delay and interpolate from
// the current position to this initial incoming command).
ros::Time eval_time_at_interpolator_start_;
ros::Time command_time_at_interpolator_start_;
ros::Duration control_time_offset_from_now_;
// The underlying quintic interpolator.
std::shared_ptr<cortex::math::Interpolator<Eigen::VectorXd>> interp_;
// Current state of the stream interpolator. This orchestrates the sync protocol with the cortex
// commander.
ControllerState state_;
// The time stamp of the Eval() call when the latest incoming command was received.
ros::Time eval_time_at_last_callback_;
// ROS publishers and subscribers.
ros::Subscriber cortex_command_sub_;
ros::Publisher interpolated_command_pub_;
ros::Publisher cortex_command_time_pub_;
cortex_control::JointPosVelAccCommand latest_command_msg_;
// A command suppressor used during the backend sync to sync the backend with the measured
// joint states.
std::shared_ptr<CommandSuppressor> command_suppressor_;
// If true, uses an auto smoothing interpolator when ResetInterpolator() is called.
bool use_smoothing_interpolator_;
// These three members are used to coordinate blending during
ros::Time blending_start_time_;
bool start_blending_;
ros::Duration blending_duration_;
ros::Time next_print_time_;
ros::Duration print_period_;
double time_offset_;
double momentum_;
double time_between_interp_pubs_;
mutable ros::Time time_at_last_pub_;
double backend_timeout_;
};
} // namespace control
} // namespace cortex
| 12,048 | C | 41.575972 | 103 | 0.729914 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/control/joint_pos_vel_acc_command_publisher.cpp | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#include "cortex/control/joint_pos_vel_acc_command_publisher.h"
#include <vector>
#include <cortex_control/JointPosVelAccCommand.h>
namespace cortex {
namespace control {
JointPosVelAccCommandPublisher::JointPosVelAccCommandPublisher(
const std::string& topic, bool stamp_header_with_controller_time)
: stamp_header_with_controller_time_(stamp_header_with_controller_time),
is_first_(true),
next_id_(0) {
topic_ = topic;
ros::NodeHandle nh;
joint_command_publisher_ = nh.advertise<cortex_control::JointPosVelAccCommand>(topic_, 10);
}
JointPosVelAccCommandPublisher::~JointPosVelAccCommandPublisher() {}
void JointPosVelAccCommandPublisher::Publish(uint64_t id,
const ros::Time& t,
const std::vector<std::string>& joint_names,
const Eigen::VectorXd& q,
const Eigen::VectorXd& qd,
const Eigen::VectorXd& qdd) {
cortex_control::JointPosVelAccCommand joint_command;
if (stamp_header_with_controller_time_) {
joint_command.header.stamp = t;
} else {
if (is_first_) {
controller_time_offset_ = ros::Time::now() - t;
}
// We want to report the current time, but with the steadiness of the
// controller time.
joint_command.header.stamp = t + controller_time_offset_;
}
joint_command.id = id;
if (is_first_) {
// Usually this first message is missed by the interpolator (or it's
// dropped because of syncing protocols), but even if it's used, the
// interpolator won't use the period field because that's only used for
// knowing the period between the previous point (there isn't one) and this
// one.
joint_command.period = ros::Duration(0.);
is_first_ = false;
} else {
joint_command.period = (t - prev_t_);
}
joint_command.t = t;
joint_command.names = joint_names;
joint_command.q = std::vector<double>(q.data(), q.data() + q.size());
joint_command.qd = std::vector<double>(qd.data(), qd.data() + qd.size());
joint_command.qdd = std::vector<double>(qdd.data(), qdd.data() + qdd.size());
joint_command_publisher_.publish(joint_command);
// Updating the next_id_ member here means we can always set an ID once with
// a call explicitly to this Publish(...) method and then use the ID-less
// Publish(...) method to continue publishing sequential IDs from there.
next_id_ = id + 1;
prev_t_ = t;
}
void JointPosVelAccCommandPublisher::Publish(const ros::Time& t,
const std::vector<std::string>& joint_names,
const Eigen::VectorXd& q,
const Eigen::VectorXd& qd,
const Eigen::VectorXd& qdd) {
// Note that this call automatically increments next_id.
Publish(next_id_, t, joint_names, q, qd, qdd);
}
} // namespace control
} // namespace cortex
| 3,532 | C++ | 38.255555 | 93 | 0.62769 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/control/joint_pos_vel_acc_command_publisher.h | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#pragma once
#include <string>
#include <vector>
#include <Eigen/Core>
#include <ros/ros.h>
namespace cortex {
namespace control {
/*!\brief Abstract class representing the base of a oint position, velocity,
* and acceleration command publisher. The topic name of the publisher is
* defined as ns + "joint_command".
*/
class JointPosVelAccCommandPublisher {
public:
/*!\brief Creates a JointPosVelAccCommandPublisher under the given name
* space *ns*. The topic name of the publisher is defined as ns +
* "joint_command".
*
* There are two time stamps in each JointPosVelAccCommand message, one in
* the header and another as an explicit field t. The explicit field is
* always set to be the controller time (with each message exactly a period
* duration between), but by default (if stamp_header_with_controller_time is
* false) the header contains the wall clock time so we can see the jitter in
* the calculation using tools like rqt_plot. If
* stamp_header_with_controller_time is true, that header stamp is also set
* to the controller time so that becomes observable in plotters.
*/
JointPosVelAccCommandPublisher(const std::string& ns,
bool stamp_header_with_controller_time = false);
/*!\brief Default virtual destructor
*/
~JointPosVelAccCommandPublisher();
/*!\brief Publishes the position, velocity, and acceleration command. Each
* call to this method sets the id counter to the provided value, so
* subsequent calls to the id-less API will increment from this id.
*
* \param time The time stamp of this command.
* \param id the sequence id of this command.
* \param joint_names Joint names vector. This vector must have the same
* order as q qd, and qdd, i.e. the i-th name must correspond to the i-th q,
* qd, qdd values.
* \param q Joint position values
* \param qd Joint velocity values
* \param qdd Joint acceleration values
*/
virtual void Publish(uint64_t id,
const ros::Time& t,
const std::vector<std::string>& joint_names,
const Eigen::VectorXd& q,
const Eigen::VectorXd& qd,
const Eigen::VectorXd& qdd);
/*!\brief This version automatically creates the sequence id, starting from
* zero and incrementing once for each call.
*/
void Publish(const ros::Time& t,
const std::vector<std::string>& joint_names,
const Eigen::VectorXd& q,
const Eigen::VectorXd& qd,
const Eigen::VectorXd& qdd);
const std::string& topic() const { return topic_; }
protected:
bool stamp_header_with_controller_time_;
ros::Publisher joint_command_publisher_;
ros::Duration controller_time_offset_;
bool is_first_;
ros::Time prev_t_;
uint64_t next_id_;
std::string topic_;
};
} // namespace control
} // namespace cortex
| 3,403 | C | 35.60215 | 81 | 0.684396 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/control/builders.cpp | /**
* Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#include "cortex/control/builders.h"
#include "cortex/util/yaml.h"
#include <ros/time.h>
namespace cortex {
namespace control {
std::shared_ptr<CommandStreamInterpolator> LoadCommandStreamInterpolatorFromYaml(
const YAML::Node& command_stream_interpolator_config, bool verbose) {
// Extract params from yaml config.
auto params = util::GetFieldOrDie(command_stream_interpolator_config, "params");
auto interpolation_delay = util::GetOrDie<double>(params, "interpolation_delay");
auto use_smoothing_interpolator = util::GetOrDie<bool>(params, "use_smoothing_interpolator");
auto blending_duration = util::GetOrDie<double>(params, "blending_duration");
auto backend_timeout = util::GetOrDie<double>(params, "backend_timeout");
// Extract ROS topics from yaml config.
auto ros_topics = util::GetFieldOrDie(command_stream_interpolator_config, "ros_topics");
auto command_topics = util::GetFieldOrDie(ros_topics, "rmpflow_commands");
auto rmpflow_command_topic = util::GetOrDie<std::string>(command_topics, "command");
auto rmpflow_command_ack_topic = util::GetOrDie<std::string>(command_topics, "ack");
auto rmpflow_command_suppress_topic = util::GetOrDie<std::string>(command_topics, "suppress");
auto rmpflow_command_interpolated_topic = util::GetOrDie<std::string>(command_topics,
"interpolated");
if (verbose) {
std::cout << "RMPflow backend config:" << std::endl;
std::cout << " params:" << std::endl;
std::cout << " interpolation delay: " << interpolation_delay << std::endl;
std::cout << " use smoothing interpolator: " << use_smoothing_interpolator << std::endl;
std::cout << " blending duration: " << blending_duration << std::endl;
std::cout << " backend timeout: " << backend_timeout << std::endl;
std::cout << " ros_topics:" << std::endl;
std::cout << " rmpflow_commands:" << std::endl;
std::cout << " command: " << rmpflow_command_topic << std::endl;
std::cout << " ack: " << rmpflow_command_ack_topic << std::endl;
std::cout << " suppress: " << rmpflow_command_suppress_topic << std::endl;
std::cout << " interpolated: " << rmpflow_command_interpolated_topic << std::endl;
}
auto stream_interpolator = std::make_shared<cortex::control::CommandStreamInterpolator>();
stream_interpolator->Init(ros::Duration(interpolation_delay),
use_smoothing_interpolator,
rmpflow_command_topic,
rmpflow_command_ack_topic,
rmpflow_command_suppress_topic,
rmpflow_command_interpolated_topic,
ros::Duration(blending_duration),
backend_timeout);
return stream_interpolator;
}
} // namespace control
} // namespace cortex
| 3,336 | C++ | 49.560605 | 96 | 0.660372 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/control/builders.h | /**
* Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#pragma once
#include <memory>
#include <yaml-cpp/node/node.h>
#include "cortex/control/command_stream_interpolator.h"
namespace cortex {
namespace control {
//! Makes and initializes a command stream interpolator from the specified YAML config. One still needs
//! to call Start() on the returned object to start the streaming interpolation.
std::shared_ptr<CommandStreamInterpolator> LoadCommandStreamInterpolatorFromYaml(
const YAML::Node& command_stream_interpolator_config, bool verbose = false);
} // namespace control
} // namespace cortex
| 1,005 | C | 34.92857 | 103 | 0.7801 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/control/command_stream_interpolator_main.cpp | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
// Runs a generic CommandStreamInterpolator without sending the commands to
// a physical robot. This enables visualizing the underlying interpolated
// commands to analyze interpolation techniques for specific problems. Note it
// doesn't use the NextCommand() interface, but directly jumps to the
// interpolations, so blending doesn't pollute early signals.
#include <iostream>
#include "cortex/control/builders.h"
#include "cortex/control/command_stream_interpolator.h"
#include "cortex/control/rmpflow_commanded_joints_listener.h"
#include "cortex/util/joint_state_listener.h"
#include "cortex/util/ros_util.h"
#include "cortex/util/yaml.h"
#include <gflags/gflags.h>
#include <ros/ros.h>
DEFINE_string(command_stream_interpolator_config,
"package://cortex_control/config/command_stream_interpolator.yaml",
"");
DEFINE_double(interpolated_control_rate_hz,
500.,
"Rate in Hz at which the low-level control will be sending "
"commands. In this program, those commands are published on a "
"new topic <command_topic>/interpolated.");
DEFINE_bool(use_rectified_cycles,
false,
"If true, rectifies the time stamp so they're always exactly a period "
"apart. Otherwise (default), sets the time stamp to the current wall-clock "
"time.");
DEFINE_bool(analysis_mode,
false,
"If true, runs in analysis mode. Doesn't use NextCommand() for interpolation "
"between interpolated and desired when starting up. In general, you'll want to "
"use NextCommand() in real controllers.");
DEFINE_bool(verbose, false, "Print extra messages.");
class MockControllerInterface {
public:
bool is_interpolator_active;
MockControllerInterface(
const std::shared_ptr<cortex::util::JointStateListener>& joint_state_listener)
: is_interpolator_active(false), joint_state_listener_(joint_state_listener) {}
Eigen::VectorXd GetMeasuredPositions() {
if (is_interpolator_active) {
// The interpolator is active, so as part of the protocol the joint state listener has
// been set to listen to the same joints as found in the commands and the interpolator has
// made sure those are available in the joint state listener. Therefore, we can return the
// measure states from the listener.
return joint_state_listener_->CurrentState().q;
} else {
// Otherwise, return a zero length vector. That will get the NextCommand() calls to return a
// zero length vector as well.
return Eigen::VectorXd(0);
}
}
protected:
std::shared_ptr<cortex::util::JointStateListener> joint_state_listener_;
};
int main(int argc, char** argv) {
try {
gflags::ParseCommandLineFlags(&argc, &argv, true);
ros::init(argc, argv, "cortex_command_stream_interpolator");
ros::NodeHandle node_handle;
ros::AsyncSpinner spinner(4);
spinner.start();
auto command_stream_interpolator_config = YAML::LoadFile(
cortex::util::ExpandRosPkgRelPath(FLAGS_command_stream_interpolator_config));
auto command_stream_interpolator = cortex::control::LoadCommandStreamInterpolatorFromYaml(
command_stream_interpolator_config);
command_stream_interpolator->Start();
auto ros_topics = cortex::util::GetFieldOrDie(command_stream_interpolator_config, "ros_topics");
auto joint_state_topic = cortex::util::GetOrDie<std::string>(ros_topics, "joint_state");
auto command_topics = cortex::util::GetFieldOrDie(ros_topics, "rmpflow_commands");
auto rmpflow_command_topic = cortex::util::GetOrDie<std::string>(command_topics, "command");
cortex::control::RmpflowCommandedJointsListener rmpflow_commanded_joints_listener(
rmpflow_command_topic, joint_state_topic);
std::cout << "Waiting until joint states are available..." << std::endl;
rmpflow_commanded_joints_listener.WaitUntilAvailable(30.);
std::cout << "<done>" << std::endl;
auto controller_interface =
MockControllerInterface(rmpflow_commanded_joints_listener.joint_state_listener());
auto rate_hz = FLAGS_interpolated_control_rate_hz;
auto period = ros::Duration(1. / rate_hz);
auto time = ros::Time::now();
auto time_at_next_print = time;
Eigen::VectorXd q_des;
ros::Rate rate(rate_hz);
bool is_interpolator_active = false;
while (ros::ok()) {
if (FLAGS_use_rectified_cycles) {
time += period;
} else {
time = ros::Time::now();
}
if (FLAGS_analysis_mode) {
// Analysis mode. Allows us to see the interpolated commands without the blending introduced
// by NextCommand(). Controllers will typically want to use NextCommand().
auto command = command_stream_interpolator->EvalAndStep(time);
if (command) {
q_des = command.commanded_position;
}
} else {
// Standard mode. Usually you would send this next_command to the controller. Here, we just
// use the internal functionality of the command stream interpolator to publish the command
// on the specified interpolated commands topic.
auto q_measured = controller_interface.GetMeasuredPositions();
q_des = command_stream_interpolator->NextCommand(
time, q_measured, &controller_interface.is_interpolator_active);
}
if (FLAGS_verbose && time >= time_at_next_print) {
std::cout << "time = " << time << ", q_des = " << q_des.transpose() << std::endl;
time_at_next_print += ros::Duration(.2);
}
rate.sleep();
}
std::cout << "<done>" << std::endl;
} catch (const std::exception& ex) {
std::cout << "Exception caught: " << ex.what() << std::endl;
}
return 0;
}
| 6,247 | C++ | 40.377483 | 100 | 0.683368 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/control/rmpflow_commanded_joints_listener.h | /* Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property and proprietary rights in
* and to this software, related documentation and any modifications thereto. Any use,
* reproduction, disclosure or distribution of this software and related documentation without an
* express license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#include <mutex>
#include <cortex_control/JointPosVelAccCommand.h>
#include "cortex/util/joint_state_listener.h"
#include "cortex/util/ros_message_listener.h"
namespace cortex {
namespace control {
// A wrapper around the joint state listener ensuring that we listen to the same joints we're
// controlling with the RMPflow commander's commands.
//
// Listens to the RMPflow commander's commands as well as the joint state topic. Once we receive
// the first command, we register the joint names with the joint state listener as required joints.
// The IsAvailable() method (or WaitUntilAvailable()) can then be used to check whether the joint
// state listener is ready and has measured values for each of those named joints.
class RmpflowCommandedJointsListener {
public:
RmpflowCommandedJointsListener(const std::string& rmpflow_commands_topic,
const std::string& joint_state_topic);
bool IsAvailable() const;
void WaitUntilAvailable(double poll_rate) const;
const std::shared_ptr<util::JointStateListener> joint_state_listener() const;
protected:
mutable std::mutex mutex_;
util::RosMessageListener<cortex_control::JointPosVelAccCommand> rmpflow_commands_listener_;
bool is_set_;
std::shared_ptr<util::JointStateListener> joint_state_listener_;
};
} // namespace control
} // namespace cortex
| 1,799 | C | 38.130434 | 99 | 0.764869 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/control/command_stream_interpolator.cpp | /**
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property and proprietary rights in
* and to this software, related documentation and any modifications thereto. Any use,
* reproduction, disclosure or distribution of this software and related documentation without an
* express license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#include "cortex/control/command_stream_interpolator.h"
#include <algorithm>
#include <sstream>
#include <vector>
#include <Eigen/Core>
#include <std_msgs/Bool.h>
#include <std_msgs/Time.h>
#include "cortex/math/interpolation/cubic_position_interpolator.h"
#include "cortex/math/interpolation/incremental_interpolator.h"
#include "cortex/math/interpolation/pos_vel_acc.h"
#include "cortex/math/interpolation/quartic_interpolator.h"
#include "cortex/math/interpolation/smoothing_incremental_interpolator.h"
#include "cortex_control/CortexCommandAck.h"
namespace cortex {
namespace control {
inline std::ostream& operator<<(std::ostream& os, ControllerState state) {
using namespace cortex::control;
switch (state) {
case ControllerState::StartingController:
os << "ControllerState::StartingController";
break;
case ControllerState::WaitingOnBackend:
os << "ControllerState::WaitingOnBackend";
break;
case ControllerState::SyncingBackend:
os << "ControllerState::SyncingBackend";
break;
case ControllerState::InitializingInterpolator:
os << "ControllerState::InitializingInterpolator";
break;
case ControllerState::Operational:
os << "ControllerState::Operational";
break;
default:
os << "ControllerState::<unknown>";
}
return os;
}
inline std::ostream& operator<<(std::ostream& os, CommandStreamInterpolator::Command& command) {
if (command) {
os << "[" << command.commanded_position.transpose() << "]";
} else {
os << "<unavailable>";
}
return os;
}
std::string CommandSuppressor::default_topic = "/robot/command_suppression/right";
double CommandSuppressor::default_rate_hz = 30.;
CommandSuppressor::CommandSuppressor(const std::string& topic, double rate_hz)
: topic_(topic), rate_hz_(rate_hz) {
is_running_ = true;
is_suppressing_ = false;
ros::NodeHandle node_handle;
suppression_pub_ = node_handle.advertise<std_msgs::Bool>(topic_, 10);
run_thread_ = std::thread(&CommandSuppressor::Run, this);
}
CommandSuppressor::~CommandSuppressor() {
is_running_ = false;
run_thread_.join();
}
void CommandSuppressor::Run() {
ros::Rate rate(rate_hz_);
while (ros::ok() && is_running_) {
std_msgs::Bool msg;
if (is_suppressing_) {
msg.data = true;
} else {
msg.data = false;
}
suppression_pub_.publish(msg);
rate.sleep();
}
}
const double CommandStreamInterpolator::default_blending_duration = 2.;
const double CommandStreamInterpolator::default_backend_timeout = .5;
const double CommandStreamInterpolator::default_time_between_interp_pubs = 1. / 60; // 60 hz
bool CommandStreamInterpolator::Init(const ros::Duration& interpolator_lookup_delay_buffer,
bool use_smoothing_interpolator,
const std::string& cortex_command_topic,
ros::Duration blending_duration,
double backend_timeout) {
return Init(interpolator_lookup_delay_buffer,
use_smoothing_interpolator,
cortex_command_topic,
cortex_command_topic + "/ack",
cortex_command_topic + "/suppress",
cortex_command_topic + "/interpolated",
blending_duration,
backend_timeout);
}
bool CommandStreamInterpolator::Init(const ros::Duration& interpolator_lookup_delay_buffer,
bool use_smoothing_interpolator,
const std::string& cortex_command_topic,
const std::string& cortex_command_ack_topic,
const std::string& cortex_command_suppress_topic,
const std::string& cortex_command_interpolated_topic,
ros::Duration blending_duration,
double backend_timeout) {
interpolator_lookup_delay_buffer_ = interpolator_lookup_delay_buffer;
use_smoothing_interpolator_ = use_smoothing_interpolator;
blending_duration_ = blending_duration;
backend_timeout_ = backend_timeout;
time_between_interp_pubs_ = default_time_between_interp_pubs;
ros::NodeHandle node_handle;
// Create pub-subs.
cortex_command_sub_ = node_handle.subscribe(
cortex_command_topic, 1, &CommandStreamInterpolator::CommandCallback, this);
interpolated_command_pub_ =
node_handle.advertise<cortex_control::JointPosVelAccCommand>(cortex_command_interpolated_topic, 10);
cortex_command_time_pub_ =
node_handle.advertise<cortex_control::CortexCommandAck>(cortex_command_ack_topic, 10);
// Create the suppressor with defaults.
command_suppressor_ = std::make_shared<CommandSuppressor>(
cortex_command_suppress_topic, CommandSuppressor::default_rate_hz);
return true;
}
void CommandStreamInterpolator::Start() {
std::lock_guard<std::mutex> lock(mutex_);
std::cout << "<starting_controller>" << std::endl;
state_ = ControllerState::StartingController;
}
bool CommandStreamInterpolator::IsBackendTimedOut(const ros::Time& time) const {
auto delta = (time - eval_time_at_last_callback_).toSec();
return delta >= backend_timeout_;
}
CommandStreamInterpolator::Command CommandStreamInterpolator::EvalAndStep(
const ros::Time& time) {
std::lock_guard<std::mutex> lock(mutex_);
last_eval_time_ = time;
// Check state transitions.
if (state_ == ControllerState::StartingController) {
control_time_offset_from_now_ = ros::Time::now() - time;
ResetInterpolator();
std::cout << "<starting> --> <waiting_on_backend>" << std::endl;
state_ = ControllerState::WaitingOnBackend;
} else if (state_ == ControllerState::WaitingOnBackend) {
// The callback switches us out of this one.
} else if (state_ == ControllerState::SyncingBackend) {
// If we've stopped receiving messages from the backend, stop suppressing and transition to
// initializing the interpolator.
if (IsBackendTimedOut(time)) {
std::cout << "<syncing_backend> --> <initializing_interpolator>" << std::endl;
state_ = ControllerState::InitializingInterpolator;
command_suppressor_->StopSuppressing();
}
} else if (state_ == ControllerState::InitializingInterpolator) {
// The callback switches us out of this one.
} else if (state_ == ControllerState::Operational) {
// We're good to go. We'll just execute until it looks like we've lost communication with the
// backend.
if (IsBackendTimedOut(time)) {
ResetInterpolator();
std::cout << "<operational> --> <waiting_on_backend>" << std::endl;
state_ = ControllerState::WaitingOnBackend;
}
}
// Process states.
if (state_ == ControllerState::StartingController) {
// we should immediately transition to waiting on backend.
std::cerr << "There's something wrong. We should never get here. Diagnose "
<< "immediately.";
throw std::runtime_error("Bad state in CommandStreamInterpolator");
return Command::Unavailable();
} else if (state_ == ControllerState::WaitingOnBackend) {
// Just wait until we start receiving messages.
return Command::Unavailable();
} else if (state_ == ControllerState::SyncingBackend) {
// We're currently suppressing in a separate thread using the command_suppressor_.
// Otherwise, do nothing.
return Command::Unavailable();
} else if (state_ == ControllerState::InitializingInterpolator) {
time_at_last_pub_ = time;
// This is handled by the callback.
return Command::Unavailable();
} else if (state_ == ControllerState::Operational) {
auto lookup_time = time - interpolator_lookup_delay_buffer_;
if (lookup_time < eval_time_at_interpolator_start_) {
return Command::Unavailable();
}
// Get interpolated command.
cortex::math::PosVelAccXd eval_point;
std::string error_str;
if (!EvalInterpolator(lookup_time, eval_point, &error_str)) {
ROS_WARN_STREAM("[cortex] " << error_str);
return Command::Unavailable();
}
PublishInterpolatedPoint(time, eval_point);
return Command(eval_point.x);
} else {
std::cerr << "Unrecognized state: " << state_;
throw std::runtime_error("Bad state in CommandStreamInterpolator");
}
}
Eigen::VectorXd CommandStreamInterpolator::NextCommand(const ros::Time& time,
const Eigen::VectorXd& q_measured,
bool* is_interpolator_active) {
auto command = EvalAndStep(time);
if (is_interpolator_active) {
*is_interpolator_active = static_cast<bool>(command);
}
if (command) {
if (start_blending_) {
blending_start_time_ = time;
start_blending_ = false;
}
auto elapse = (time - blending_start_time_).toSec();
auto blend_duration = blending_duration_.toSec();
Eigen::VectorXd q_des = command.commanded_position.head(q_measured.size());
if (elapse < blend_duration) {
auto alpha = elapse / blend_duration; // Goes linearly from zero to one.
alpha *= alpha; // Quadratic increase.
q_des = alpha * q_des + (1. - alpha) * q_measured;
}
return q_des;
} else {
start_blending_ = true;
return q_measured;
}
}
void CommandStreamInterpolator::AddPointToInterpolator(
const cortex_control::JointPosVelAccCommand& command_msg) {
cortex::math::PosVelAccXd point;
point.x = Eigen::Map<const Eigen::VectorXd>(command_msg.q.data(), command_msg.q.size());
point.xd = Eigen::Map<const Eigen::VectorXd>(command_msg.qd.data(), command_msg.qd.size());
point.xdd = Eigen::VectorXd::Zero(point.x.size()); // Accelerations not used by interpolator.
AddPointToInterpolator(command_msg.t, point);
}
void CommandStreamInterpolator::AddPointToInterpolator(const ros::Time& time,
const cortex::math::PosVelAccXd& point) {
std::string error_str;
if (!interp_->AddPt( // We add the first point slightly in the past.
(time - command_time_at_interpolator_start_).toSec(),
point,
&error_str)) {
ROS_ERROR_STREAM("[monolithic]: " << error_str);
}
}
bool CommandStreamInterpolator::EvalInterpolator(const ros::Time& time,
cortex::math::PosVelAccXd& eval_point,
std::string* error_str) const {
if (state_ != ControllerState::Operational) {
if (error_str) {
std::stringstream ss;
ss << "Attempting to evaluate interpolator before reaching "
"ControllerState::Operational. Current state: "
<< cortex::control::ControllerState::Operational;
*error_str = ss.str();
}
return false;
}
return interp_->Eval(
(time - eval_time_at_interpolator_start_).toSec() + time_offset_, eval_point, error_str);
}
void CommandStreamInterpolator::PublishInterpolatedPoint(
const ros::Time& time, const cortex::math::PosVelAccXd& point) const {
if ((time - time_at_last_pub_).toSec() >= time_between_interp_pubs_) {
cortex_control::JointPosVelAccCommand command_msg;
command_msg.header.stamp = time + control_time_offset_from_now_;
command_msg.names = latest_command_msg_.names;
command_msg.q = std::vector<double>(point.x.data(), point.x.data() + point.x.size());
command_msg.qd = std::vector<double>(point.xd.data(), point.xd.data() + point.xd.size());
command_msg.qdd = std::vector<double>(point.xdd.data(), point.xdd.data() + point.xdd.size());
interpolated_command_pub_.publish(command_msg);
time_at_last_pub_ = time;
}
}
void CommandStreamInterpolator::CommandCallback(
const cortex_control::JointPosVelAccCommand& command_msg) {
if (command_msg.period == ros::Duration(0.)) {
std::cout << "<rejecting first message sent by backend>" << std::endl;
return;
}
std::lock_guard<std::mutex> lock(mutex_);
latest_command_msg_ = command_msg;
// While syncing the backend (state ControllerState::SyncingBackend) we suppress commands so
// callbacks stop. We need to check how much time's elapsed since the last callback (and it needs
// to be comparable to eval times, hence we set it to last_eval_time_). Note it's important that
// we check time since the last callback and not time since the state transition because
// transitioning to that state causes suppression commands to be sent to the backend. We want to
// measure how much time has elapsed since the commands actually start being suppressed, not since
// we started *trying* to suppress commands.
eval_time_at_last_callback_ = last_eval_time_;
if (state_ == ControllerState::StartingController) {
return; // Don't do anything until Update has been called once.
} else if (state_ == ControllerState::WaitingOnBackend) {
// The fact we're in the callback means we're up and running. Transition to syncing the backend.
std::cout << "<waiting_on_backend> --> <syncing_backend>" << std::endl;
state_ = ControllerState::SyncingBackend;
command_suppressor_->StartSuppressing();
// Until the backend's synced, we don't want to be interpolating points.
return;
} else if (state_ == ControllerState::SyncingBackend) {
return; // Still syncing.
} else if (state_ == ControllerState::InitializingInterpolator) {
// This aligns the interpolator's start time (command_msg.t) with the last controller time at
// the last Eval.
eval_time_at_interpolator_start_ = last_eval_time_;
command_time_at_interpolator_start_ = command_msg.t;
time_offset_ = 0.;
momentum_ = 0.;
// Now add the current commanded target at the current time and we're ready to start
// interpolating.
AddPointToInterpolator(command_msg);
std::cout << "<initializing_interpolator> --> <operational>" << std::endl;
state_ = ControllerState::Operational;
next_print_time_ = eval_time_at_last_callback_;
print_period_ = ros::Duration(1.);
} else if (state_ == ControllerState::Operational) {
AddPointToInterpolator(command_msg);
auto interp_time = (eval_time_at_last_callback_ - eval_time_at_interpolator_start_).toSec();
auto command_time = (command_msg.t - command_time_at_interpolator_start_).toSec();
auto time_error = command_time - (interp_time + time_offset_);
auto now = (ros::Time::now() - eval_time_at_interpolator_start_).toSec();
cortex_control::CortexCommandAck command_ack;
command_ack.cortex_command_time = command_msg.t;
command_ack.cortex_command_id = command_msg.id;
command_ack.time_offset = ros::Duration(-time_error);
cortex_command_time_pub_.publish(command_ack);
if (eval_time_at_last_callback_ >= next_print_time_) {
std::cout << std::setprecision(10) << "[stream interpolator (" << time_offset_ << ")] "
<< "interp time: " << interp_time << ", now: " << now
<< ", command time: " << command_time << ", interp - command diff: " << -time_error
<< std::endl;
next_print_time_ += print_period_;
}
}
}
void CommandStreamInterpolator::ResetInterpolator() {
start_blending_ = true;
if (use_smoothing_interpolator_) {
// Auto smoothing quartic interpolation. This version always interpolates between the latest
// evaluated (q, qd, qdd) and the incoming (q_target, qd_target).
interp_ = std::make_shared<
cortex::math::SmoothingIncrementalInterpolator<cortex::math::CubicPositionInterpolatorXd>>();
} else {
// Basic quintic interpolation.
interp_ = std::make_shared<cortex::math::IncrementalInterpolator>();
}
}
} // namespace control
} // namespace cortex
| 16,265 | C++ | 39.064039 | 106 | 0.662588 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/util/joint_state_listener.h | /*
* Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
//! @file
//! @brief A simple and general joint state listener to collect the latest information
//! about the robot's state.
#pragma once
#include "cortex/util/state_listener.h"
#include <atomic>
#include <mutex>
#include <unordered_map>
#include <Eigen/Core>
#include <ros/ros.h>
#include <sensor_msgs/JointState.h>
namespace cortex {
namespace util {
/*!\brief Contains information about the state of a single joint. Includes
* the time stamp of the message that last updated the joint.
*/
struct SingleJointState {
double position;
double velocity;
double effort;
ros::Time stamp;
SingleJointState() {}
SingleJointState(double pos, double vel, double eff, const ros::Time &stamp)
: position(pos), velocity(vel), effort(eff), stamp(stamp) {}
};
typedef std::unordered_map<std::string, SingleJointState> JointStateMap;
/*!\brief A very simple joint state listener that records the latest joint
* state information in an unordered map mapping the joint name to the most
* recent SingleJointState information.
*
* It's necessary to process the information this way rather than simply
* recording the joint state messages because there's no guarantee that each
* joint state message contains information about all of the joints. (This, for
* instance, is an issue with Baxter.)
*
* This class is thread safe.
*/
class JointStateListener : public StateListener {
public:
JointStateListener() = default;
/*!\brief Initialize to listen on the specified topic for the given required joints. Blocks
* waiting for for the joints to be available before returning, polling at the given poll rate.
*/
void Init(const std::string &topic,
const std::vector<std::string> &required_joints,
int poll_rate);
/*!\brief Initialize to listen on the specified topic for the given required joints. This version
* does not block. Users must check explicitly is_available() before accessing.
*/
void Init(const std::string &topic, const std::vector<std::string> &required_joints);
void Init(const std::string &topic, int poll_rate);
void Init(const std::string &topic);
/*!\brief Initializes the listener with 0.0 as the a default joint state
* values. The listener becomes immediately available.
*/
void InitWithZero(const std::string &topic, const std::vector<std::string> &required_joints);
/*!\brief Set the required joints (often used in conjunction with
* Init(topic, poll_rate)). Optionally set wait_until_available to true to
* block until they're available.
*/
void SetRequiredJoints(const std::vector<std::string> &required_joints);
/*!\brief Wait until at information for at least the specified
* required_joints is available.
*/
void WaitUntilAvailable(int poll_rate) const;
/*!\brief Returns true if the required joints are available.
*/
bool is_available() const { return is_available_; }
/*!\brief This variant of the accessor is not atomic. It performs no
* locking.
*/
const JointStateMap ¤t_state_map() const;
/*!\brief This variant is atomic. The only way to ensure no race condition
* is to fully copy the internal state out.
*/
JointStateMap current_state_map_atomic() const;
/*!\brief Returns a vector of position values for the given named joints
* retaining the specified joint order.
*/
std::vector<double> CurrentPositions(const std::vector<std::string> &names) const;
/*!\brief Returns the state of the system stamped with the minimum time
* stamp (oldest) of all the active joints. The state is the positions,
* velocities, and accelerations of the active joints.
*/
StampedState CurrentState() const;
/*!\brief Accessors implementing the StateListener API.
*/
StampedState State() const override { return CurrentState(); }
bool IsReady() const override;
/*!\brief Accessor for the vector of required joints.
*/
const std::vector<std::string> &required_joints() const { return required_joints_; }
protected:
/*!\brief Initialize to listen on the specified topic for the given required joints. If
* wait_until_available is true, blocks waiting for for the joints to be available before
* returning, polling at the given poll rate.
*/
void Init(const std::string &topic,
const std::vector<std::string> &required_joints,
bool wait_until_available,
int poll_rate);
// Calls to this method should be externally protected through a msg_mutex_
// lock.
bool HasRequiredJoints() const;
/*!\brief Callback consuming sensor_msgs::JointState messages. Writes the
* information into the internal current_state_map_.
*/
void Callback(const sensor_msgs::JointState &joint_states);
mutable std::mutex msg_mutex_;
std::vector<std::string> required_joints_;
std::unordered_map<std::string, SingleJointState> current_state_map_;
ros::NodeHandle node_handle_;
ros::Subscriber subscriber_;
std::atomic_bool is_available_;
};
//------------------------------------------------------------------------------
// Helper methods
//------------------------------------------------------------------------------
std::unordered_map<std::string, SingleJointState> ToMap(
const sensor_msgs::JointState &joint_states);
std::vector<double> ExtractNamedPositions(
const std::unordered_map<std::string, SingleJointState> &jstates,
const std::vector<std::string> &names);
std::vector<double> ExtractNamedPositions(const sensor_msgs::JointState &joint_states,
const std::vector<std::string> &names);
} // namespace util
} // namespace cortex
| 6,110 | C | 34.736842 | 99 | 0.701964 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/util/stamped_state.h | /*
* Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#pragma once
#include <cstdint>
#include <Eigen/Core>
namespace cortex {
namespace util {
struct StampedState {
double time;
Eigen::VectorXd q;
Eigen::VectorXd qd;
Eigen::VectorXd u;
int dim() const { return q.size(); }
StampedState() = default;
StampedState(uint32_t num_dim);
StampedState(double time, const Eigen::VectorXd &q, const Eigen::VectorXd &qd);
virtual ~StampedState() = default;
bool HasU() const;
};
} // namespace util
} // namespace cortex
| 933 | C | 23.578947 | 81 | 0.732047 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/util/state_listener.h | /*
* Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#pragma once
#include <atomic>
#include <Eigen/Core>
#include "cortex/util/stamped_state.h"
namespace cortex {
namespace util {
/**
* \brief Abstract state listener.
*/
class StateListener {
public:
/**
* \brief Creates a StateListener.
*/
StateListener();
/**
* \brief Default virtual destructor.
*/
virtual ~StateListener() = default;
/**
* \brief Returns the latest state.
*/
virtual StampedState State() const = 0;
/**
* \brief Returns true if the state is available.
*/
virtual bool IsReady() const = 0;
/**
* \brief Blocking call to wait until the state is available.
*/
virtual void WaitForReady(double poll_hz = 100) const;
private:
// This is an alternative and ros free implementation of the thread SIGINT
// signal handling
// static void signal_handler(int signal);
// static std::atomic_bool interruped_;
};
} // namespace util
} // namespace cortex
| 1,384 | C | 21.704918 | 77 | 0.700145 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/util/joint_state_publisher.cpp | /**
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#include "joint_state_publisher.h"
namespace cortex {
namespace util {
} // namespace util
} // namespace cortex
| 564 | C++ | 30.388887 | 77 | 0.76773 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/util/ros_util.cpp | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#include "cortex/util/ros_util.h"
#include <vector>
#include <ros/package.h>
#include <ros/ros.h>
#include "cortex/util/string.h"
namespace cortex {
namespace util {
void WaitForConnections(const ros::Publisher& pub, double stable_time, double rate_hz) {
std::cout << "Waiting for connections" << std::flush;
auto rate = ros::Rate(rate_hz);
auto last_change_time = ros::Time::now();
auto num_con = pub.getNumSubscribers();
while (ros::ok()) {
std::cout << '.' << std::flush;
auto curr_time = ros::Time::now();
auto latest_num_con = pub.getNumSubscribers();
auto elapse_sec = (curr_time - last_change_time).toSec();
if (latest_num_con != num_con) {
num_con = latest_num_con;
std::cout << num_con << std::flush;
last_change_time = curr_time;
} else if (latest_num_con > 0 && latest_num_con == num_con && elapse_sec >= stable_time) {
std::cout << "<stable>" << std::endl;
break;
}
rate.sleep();
}
}
std::string ExpandRosPkgRelPathRaw(const std::string& pkg_relative_path) {
// Parse out the json config file.
char delim = '/';
std::vector<std::string> tokens = Split(pkg_relative_path, delim);
if (tokens.size() == 0) {
return "";
} else if (tokens.size() < 2) {
return tokens.front();
}
auto pkg_name = tokens.front();
auto rel_path = Join(tokens, delim, 1); // Join all but first.
auto package_path = ros::package::getPath(pkg_name);
auto full_path = package_path + delim + rel_path;
return full_path;
}
std::string ExpandRosPkgRelPath(const std::string& pkg_relative_path) {
std::string expected_prefix = "package://";
if (pkg_relative_path.find(expected_prefix) == 0) {
return ExpandRosPkgRelPathRaw(pkg_relative_path.substr(expected_prefix.size()));
} else {
// The string doesn't start with the expected prefix, but we're still
// supporting that for the time being. WARNING -- this functionality
// is DEPRECATED; we'll require the package:// prefix soon.
ROS_WARN_STREAM(
"Package expansion without the 'package://' prefix is DEPRECATED: " << pkg_relative_path);
return ExpandRosPkgRelPathRaw(pkg_relative_path);
}
}
} // namespace util
} // namespace cortex
| 2,670 | C++ | 31.57317 | 98 | 0.671161 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/util/joint_state_publisher.h | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
//! @file
//! @brief A simple and general joint state listener to collect the latest information
//! about the robot's state.
#pragma once
#include <vector>
#include <ros/ros.h>
#include <sensor_msgs/JointState.h>
#include "cortex/math/state.h"
namespace cortex {
namespace util {
class JointStatePublisher {
public:
JointStatePublisher(const std::vector<std::string>& joint_names,
const std::string& topic,
int queue_size)
: joint_names_(joint_names), seq_(0) {
ros::NodeHandle node_handle;
pub_ = node_handle.advertise<sensor_msgs::JointState>(topic, queue_size);
}
void Publish(const math::State& state) {
sensor_msgs::JointState msg;
msg.header.seq = seq_++;
msg.header.stamp = ros::Time::now();
msg.name = joint_names_;
msg.position = std::vector<double>(state.pos().data(), state.pos().data() + state.pos().size());
msg.velocity = std::vector<double>(state.vel().data(), state.vel().data() + state.vel().size());
pub_.publish(msg);
}
protected:
ros::Publisher pub_;
std::vector<std::string> joint_names_;
int32_t seq_;
};
} // namespace util
} // namespace cortex
| 1,631 | C | 27.631578 | 100 | 0.679951 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/util/string.cpp | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#include "cortex/util/string.h"
#include <sstream>
namespace cortex {
namespace util {
std::vector<std::string> Split(const std::string& str, char delimiter){
std::vector<std::string> tokens;
std::string token;
std::istringstream token_stream(str);
while (std::getline(token_stream, token, delimiter)) {
if (token.size() > 0) {
tokens.push_back(token);
}
}
return tokens;
}
std::string Join(const std::vector<std::string>& tokens, char delimiter, size_t pos) {
std::stringstream ss;
for (auto i = pos; i < tokens.size(); ++i) {
if (i > pos) ss << delimiter;
ss << tokens[i];
}
return ss.str();
}
} // namespace util
} // namespace cortex
| 1,133 | C++ | 26.658536 | 86 | 0.695499 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/util/yaml.h | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#pragma once
#include <ros/assert.h>
#include <yaml-cpp/yaml.h>
namespace cortex {
namespace util {
//! Extract the named YAML field or assert if the field doesn't exist.
YAML::Node GetFieldOrDie(const YAML::Node& node, const std::string& name) {
auto field = node[name];
ROS_ASSERT_MSG(field, "YAML field not found: %s", name.c_str());
return field;
}
//! Extract a field of the specified type from the YAML node or assert if the field doesn't exist.
template <class T>
T GetOrDie(const YAML::Node& node, const std::string& name) {
auto field = node[name];
ROS_ASSERT_MSG(field, "Could not extract YAML field: %s", name.c_str());
return field.as<T>();
}
} // namespace util
} // namespace cortex
| 1,162 | C | 31.305555 | 98 | 0.728055 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/util/joint_state_listener.cpp | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#include "cortex/util/joint_state_listener.h"
#include <atomic>
#include <mutex>
#include <ros/assert.h>
namespace cortex {
namespace util {
//------------------------------------------------------------------------------
// JointStateListener implementation
//------------------------------------------------------------------------------
void JointStateListener::Init(const std::string &topic,
const std::vector<std::string> &required_joints,
int poll_rate) {
Init(topic, required_joints, true, poll_rate);
}
void JointStateListener::Init(const std::string &topic) { Init(topic, std::vector<std::string>()); }
void JointStateListener::Init(const std::string &topic, int poll_rate) {
Init(topic, std::vector<std::string>(), poll_rate);
}
void JointStateListener::Init(const std::string &topic,
const std::vector<std::string> &required_joints) {
Init(topic, required_joints, false, 0);
}
// This version is protected (internal).
void JointStateListener::Init(const std::string &topic,
const std::vector<std::string> &required_joints,
bool wait_until_available,
int poll_rate) {
is_available_ = false;
required_joints_ = required_joints;
subscriber_ = node_handle_.subscribe(topic,
10, // Queue size.
&JointStateListener::Callback,
this);
if (wait_until_available) {
WaitUntilAvailable(poll_rate);
}
}
void JointStateListener::InitWithZero(const std::string &topic,
const std::vector<std::string> &required_joints) {
required_joints_ = required_joints;
subscriber_ = node_handle_.subscribe(topic,
10, // Queue size.
&JointStateListener::Callback,
this);
for (uint32_t i = 0; i < required_joints_.size(); ++i) {
current_state_map_[required_joints_[i]] = SingleJointState(0.0, 0.0, 0., ros::Time::now());
}
is_available_ = true;
}
void JointStateListener::SetRequiredJoints(const std::vector<std::string> &required_joints) {
required_joints_ = required_joints;
}
void JointStateListener::WaitUntilAvailable(int poll_rate) const {
ros::Rate rate(poll_rate);
while (ros::ok() && !is_available()) {
rate.sleep();
}
}
void JointStateListener::Callback(const sensor_msgs::JointState &joint_states) {
std::lock_guard<std::mutex> guard(msg_mutex_);
auto n = joint_states.name.size();
ROS_ASSERT(joint_states.position.size() == n);
ROS_ASSERT(joint_states.velocity.size() == n);
ROS_ASSERT(joint_states.effort.size() == 0 || joint_states.effort.size() == n);
bool has_efforts = (joint_states.effort.size() > 0);
for (uint32_t i = 0; i < n; ++i) {
current_state_map_[joint_states.name[i]] =
SingleJointState(joint_states.position[i],
joint_states.velocity[i],
has_efforts ? joint_states.effort[i] : 0.,
joint_states.header.stamp);
}
if (!is_available_) {
// The method HasRequiredJoints(), which requires looping through the
// required joints to see if they're ready, is only called during the
// period of time when we're waiting for the first full set of
// information to be available.
is_available_ = HasRequiredJoints();
}
}
const std::unordered_map<std::string, SingleJointState> &JointStateListener::current_state_map()
const {
return current_state_map_;
}
std::unordered_map<std::string, SingleJointState> JointStateListener::current_state_map_atomic()
const {
std::lock_guard<std::mutex> guard(msg_mutex_);
return current_state_map_;
}
std::vector<double> JointStateListener::CurrentPositions(
const std::vector<std::string> &names) const {
std::lock_guard<std::mutex> guard(msg_mutex_);
return ExtractNamedPositions(current_state_map_, names);
}
StampedState JointStateListener::CurrentState() const {
std::lock_guard<std::mutex> guard(msg_mutex_);
StampedState state(required_joints_.size());
double min_time = 0.;
for (uint32_t i = 0; i < required_joints_.size(); ++i) {
const auto &name = required_joints_[i];
auto access_iter = current_state_map_.find(name);
ROS_ASSERT_MSG(access_iter != current_state_map_.end(),
"Required joint not found: %s", name.c_str());
const auto &single_joint_state = access_iter->second;
state.q(i) = single_joint_state.position;
state.qd(i) = single_joint_state.velocity;
state.u(i) = single_joint_state.effort;
double time = single_joint_state.stamp.toSec();
if (i == 0 || time < min_time) min_time = time;
}
state.time = min_time;
return state;
}
bool JointStateListener::IsReady() const { return is_available(); }
//------------------------------------------------------------------------------
// Helper methods implementation
//------------------------------------------------------------------------------
bool JointStateListener::HasRequiredJoints() const {
bool has_required_joints = true;
std::cout << "Checking required joints: ";
for (const auto &entry_name : required_joints_) {
std::cout << "[" << entry_name << "(";
if (current_state_map_.find(entry_name) == current_state_map_.end()) {
std::cout << "-";
has_required_joints = false;
} else {
std::cout << "+";
}
std::cout << ")]";
}
std::cout << "|" << std::endl;
return has_required_joints;
}
std::unordered_map<std::string, SingleJointState> ToMap(
const sensor_msgs::JointState &joint_states) {
auto n = joint_states.name.size();
ROS_ASSERT(joint_states.position.size() == n);
ROS_ASSERT(joint_states.velocity.size() == n);
ROS_ASSERT(joint_states.effort.size() == n);
std::unordered_map<std::string, SingleJointState> js_map;
for (uint32_t i = 0; i < n; ++i) {
js_map[joint_states.name[i]] = SingleJointState(joint_states.position[i],
joint_states.velocity[i],
joint_states.effort[i],
joint_states.header.stamp);
}
return js_map;
}
std::vector<double> ExtractNamedPositions(
const std::unordered_map<std::string, SingleJointState> &jstates,
const std::vector<std::string> &names) {
std::vector<double> positions;
for (const auto &name : names) {
auto access_iter = jstates.find(name);
ROS_ASSERT(access_iter != jstates.end());
positions.push_back(access_iter->second.position);
}
return positions;
}
std::vector<double> ExtractNamedPositions(const sensor_msgs::JointState &joint_states,
const std::vector<std::string> &names) {
return ExtractNamedPositions(ToMap(joint_states), names);
}
} // namespace util
} // namespace cortex
| 7,532 | C++ | 35.043062 | 100 | 0.595327 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/util/string.h | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#pragma once
#include <string>
#include <vector>
namespace cortex {
namespace util {
//! Split the specified string `str` into a set of strings delimited by the `delimiter` character.
//! If the delimiter is not found, the entire string is returned as a single token. The returned
//! vector always contains, in union, the set of all characters in the string that aren't
//! delimiters.
std::vector<std::string> Split(const std::string& str, char delimiter);
//! Join the tokens together separated by the specified `delimiter` character. Start with token
//! `pos`. By default, `pos` is zero, so all tokens are included.
std::string Join(const std::vector<std::string>& tokens, char delimiter, size_t pos = 0);
} // namespace util
} // namespace cortex
| 1,209 | C | 38.032257 | 98 | 0.748553 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/util/set_state_listener.h | /*
* Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#pragma once
#include <Eigen/Core>
#include "cortex/util/state_listener.h"
namespace cortex {
namespace util {
/**
* \brief This is a very simple state listener that just reports its set state.
*/
class SetStateListener : public StateListener {
public:
SetStateListener() : is_set_(false) {}
StampedState State() const override { return state_; }
bool IsReady() const override { return is_set_; }
void set_stamped_state(const StampedState &state) { state_ = state; }
protected:
bool is_set_;
StampedState state_;
};
} // namespace util
} // namespace cortex
| 1,028 | C | 26.81081 | 79 | 0.737354 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/util/stamped_state.cpp | /*
* Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#include <ros/ros.h>
#include "cortex/util/state_listener.h"
namespace cortex {
namespace util {
bool StampedState::HasU() const { return u.size() > 0; }
StampedState::StampedState(uint32_t num_dim)
: time(0.),
q(Eigen::VectorXd::Zero(num_dim)),
qd(Eigen::VectorXd::Zero(num_dim)),
u(Eigen::VectorXd::Zero(num_dim)) {}
StampedState::StampedState(double time, const Eigen::VectorXd &q, const Eigen::VectorXd &qd)
: time(time), q(q), qd(qd) {}
} // namespace util
} // namespace cortex
| 964 | C++ | 30.129031 | 92 | 0.715768 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/util/ros_util.h | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#pragma once
#include <iostream>
#include <sstream>
#include <Eigen/Core>
#include <ros/ros.h>
#include <ros/serialization.h>
#include <yaml-cpp/yaml.h>
namespace cortex {
namespace util {
//------------------------------------------------------------------------------
// Parameter helpers
//------------------------------------------------------------------------------
/*!\briefGeneric more convenient ROS parameter retrieval method that explicitly
* returns the parameter value.
*
* Call as:
*
* auto value = GetParam("/robot/step_size", .5);
* auto str_value = GetParam("/robot/controller_name", "lqr_controller");
*
* Infers the type by the type of the default value passed in.
*
* TODO: Figure out a way to get this to work with passing in const char*
* string literals.
*/
template <class value_t>
value_t GetParam(const std::string& param_name, const value_t& default_value) {
value_t param_value;
ros::param::param(param_name, param_value, default_value);
return param_value;
}
/*!\brief Call as: auto value = GetParam<double>("/robot/step_size"); Need to
* specific supply the template argument for the parameter type.
*/
template <class value_t>
value_t GetParam(const std::string& param_name) {
value_t param_value;
ros::param::get(param_name, param_value);
return param_value;
}
/*!\brief Get all parameters under a particular namespace.
*/
std::vector<std::string> GetNsParams(const std::string& ns);
/*!\brief Returns all of the names and corresponding tags under the given
* namespace.
*
* Returns a vector of pairs with the first element being a name and the second
* being a vector of strings for the tags:
*
* /ns/first/1
* /ns/first/2
* /ns/second
* /ns/third/1
* /ns/third/2
* /ns/third/3
*
* Corresponding return structure:
*
* { "first", {"1", "2"},
* "second", {},
* "third", {"1", "2", "3"} }
*
*/
void GetNsElements(const std::string& ns, std::map<std::string, std::set<std::string>>& elements);
//------------------------------------------------------------------------------
// Subscription helpers
//------------------------------------------------------------------------------
/*!\brief Wait until connections to the publisher stabilize.
*
* Checks at a rate of rate_hz, and requires that the number of subscribers
* doesn't change for stable_time seconds before considering the connection to
* be stable and returning.
*/
void WaitForConnections(const ros::Publisher& pub, double stable_time = .2, double rate_hz = 30.);
//------------------------------------------------------------------------------
// Package helpers
//------------------------------------------------------------------------------
/*!\brief Converts a ROS package relative path into a full path.
*
* The ROS package relative path should take the form:
* package://<pkg_name>/<rel_path>
*
* Returns <global_path_to_pkg>/<rel_path>
*
* For legacy reasons, currently the package:// prefix can be left off, but
* that functionality is nonstandard with ROS and now deprecated. In the near
* future, we'll require these strings to be prefixed with package://.
*/
std::string ExpandRosPkgRelPath(const std::string& pkg_relative_path);
} // namespace util
} // namespace cortex
| 3,724 | C | 30.837607 | 98 | 0.61493 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/util/state_listener.cpp | /*
* Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#include "cortex/util/state_listener.h"
#include <chrono>
#include <csignal>
#include <thread>
#include <ros/ros.h>
namespace cortex {
namespace util {
// std::atomic_bool StateListener::interruped_(false);
StateListener::StateListener() {
// std::signal(SIGINT, &StateListener::signal_handler);
}
void StateListener::WaitForReady(double poll_hz) const {
// This is an alternative and ros free implementation of the thread SIGINT
// signal handling
// auto sleep_duration = std::chrono::duration<double>(1. / poll_hz);
// while (!interruped_.load() && !IsReady()) {
// std::this_thread::sleep_for(sleep_duration);
// }
ros::Rate rate(poll_hz);
while (ros::ok() && !IsReady()) {
rate.sleep();
}
}
// void StateListener::signal_handler(int signal) { interruped_.store(true); }
} // namespace util
} // namespace cortex
| 1,301 | C++ | 26.702127 | 78 | 0.712529 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/util/ros_message_listener.h | /*
* Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#pragma once
#include <atomic>
#include <functional>
#include <mutex>
#include <vector>
#include <ros/ros.h>
namespace cortex {
namespace util {
// Generic message listener that saves off the latest message and makes it available atomically.
//
// Includes flag accessor is_available() saying whether the first message has been received.
// Thereafter, it always reports the last received message through GetLatestMessage(). There is no
// timeout mechanism on these messages, so once is_available() returns true for the first time, it
// will be true for every call after that.
template <class msg_t>
class RosMessageListener {
public:
RosMessageListener(const std::string& topic, int queue_size) {
is_available_ = false;
ros::NodeHandle node_handle;
sub_ = node_handle.subscribe(topic, queue_size, &RosMessageListener<msg_t>::Callback, this);
}
void Callback(const msg_t& msg) {
std::lock_guard<std::mutex> guard(mutex_);
msg_ = msg;
is_available_ = true;
for (auto& f : callbacks_) {
f(msg_);
}
}
bool is_available() const { return is_available_; }
msg_t GetLatestMessage() const {
std::lock_guard<std::mutex> guard(mutex_);
return msg_;
}
void RegisterCallback(const std::function<void(const msg_t&)>& f) { callbacks_.push_back(f); }
protected:
mutable std::mutex mutex_;
ros::Subscriber sub_;
std::atomic_bool is_available_;
msg_t msg_;
std::vector<std::function<void(const msg_t&)>> callbacks_;
};
} // namespace util
} // namespace cortex
| 1,980 | C | 28.132353 | 98 | 0.712626 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/config/command_stream_interpolator.yaml | params:
interpolation_delay: .1
use_smoothing_interpolator: true
blending_duration: 2.
backend_timeout: .1 # 6 backend cycles at dt = 1/60
ros_topics:
joint_state: /robot/joint_state # Only used by main().
rmpflow_commands:
command: /cortex/arm/command
ack: /cortex/arm/command/ack
suppress: /cortex/arm/command/suppress
interpolated: /cortex/arm/command/interpolated
| 426 | YAML | 31.846151 | 58 | 0.671362 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/isaac_tutorials/scripts/ros_publisher.py | #!/usr/bin/env python
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import rospy
from sensor_msgs.msg import JointState
import numpy as np
import time
rospy.init_node("test_rosbridge", anonymous=True)
pub = rospy.Publisher("/joint_command", JointState, queue_size=10)
joint_state = JointState()
joint_state.name = [
"panda_joint1",
"panda_joint2",
"panda_joint3",
"panda_joint4",
"panda_joint5",
"panda_joint6",
"panda_joint7",
"panda_finger_joint1",
"panda_finger_joint2",
]
num_joints = len(joint_state.name)
# make sure kit's editor is playing for receiving messages ##
joint_state.position = np.array([0.0] * num_joints)
default_joints = [0.0, -1.16, -0.0, -2.3, -0.0, 1.6, 1.1, 0.4, 0.4]
# limiting the movements to a smaller range (this is not the range of the robot, just the range of the movement
max_joints = np.array(default_joints) + 0.5
min_joints = np.array(default_joints) - 0.5
# position control the robot to wiggle around each joint
time_start = time.time()
rate = rospy.Rate(20)
while not rospy.is_shutdown():
joint_state.position = np.sin(time.time() - time_start) * (max_joints - min_joints) * 0.5 + default_joints
pub.publish(joint_state)
rate.sleep()
| 1,618 | Python | 29.547169 | 111 | 0.717553 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/isaac_tutorials/scripts/ros_service_client.py | #!/usr/bin/env python
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import rospy
import numpy as np
from isaac_ros_messages.srv import IsaacPose
from isaac_ros_messages.srv import IsaacPoseRequest
from geometry_msgs.msg import Pose
def teleport_client(msg):
rospy.wait_for_service("teleport")
try:
teleport = rospy.ServiceProxy("teleport", IsaacPose)
teleport(msg)
return
except rospy.ServiceException as e:
print("Service call failed: %s" % e)
# compose teleport messages
cube_pose = Pose()
cube_pose.position.x = np.random.uniform(-2, 2)
cube_pose.position.y = 0
cube_pose.position.z = 0
cube_pose.orientation.w = 1
cube_pose.orientation.x = 0
cube_pose.orientation.y = 0
cube_pose.orientation.z = 0
cone_pose = Pose()
cone_pose.position.x = 0
cone_pose.position.y = np.random.uniform(-2, 2)
cone_pose.position.z = 0
cone_pose.orientation.w = 1
cone_pose.orientation.x = 0
cone_pose.orientation.y = 0
cone_pose.orientation.z = 0
teleport_msg = IsaacPoseRequest()
teleport_msg.names = ["/World/Cube", "/World/Cone"]
teleport_msg.poses = [cube_pose, cone_pose]
teleport_client(teleport_msg)
| 1,530 | Python | 27.886792 | 76 | 0.744444 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/isaac_moveit/scripts/panda_combined_joints_publisher.py | #!/usr/bin/env python
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import rospy
from sensor_msgs.msg import JointState
joints_dict = {}
def joint_states_callback(message):
joint_commands = JointState()
joint_commands.header = message.header
for i, name in enumerate(message.name):
# Storing arm joint names and positions
joints_dict[name] = message.position[i]
if name == "panda_finger_joint1":
# Adding additional panda_finger_joint2 state info (extra joint used in isaac sim)
# panda_finger_joint2 mirrors panda_finger_joint1
joints_dict["panda_finger_joint2"] = message.position[i]
joint_commands.name = joints_dict.keys()
joint_commands.position = joints_dict.values()
# Publishing combined message containing all arm and finger joints
pub.publish(joint_commands)
return
if __name__ == "__main__":
rospy.init_node("panda_combined_joints_publisher")
pub = rospy.Publisher("/joint_command", JointState, queue_size=1)
rospy.Subscriber("/joint_command_desired", JointState, joint_states_callback, queue_size=1)
rospy.spin()
| 1,535 | Python | 30.346938 | 95 | 0.718567 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control_franka/plugin.xml | <library path="lib/libcortex_control_franka">
<class name="cortex_control_franka/InterpolatedCommandStreamController" type="cortex::control::franka::InterpolatedCommandStreamController" base_class_type="controller_interface::ControllerBase">
<description>
Receives a stream of commands from a reactive motion generator, interpolates them, and
streams realtime position commands to Franka Panda's joint-space position controller.
</description>
</class>
</library>
| 512 | XML | 55.999994 | 199 | 0.746094 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control_franka/src/interpolated_command_stream_controller.cpp | // Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
#include <iomanip>
#include <iostream>
#include <sstream>
#include <controller_interface/controller_base.h>
#include <cortex/math/interpolation/pos_vel_acc.h>
#include <hardware_interface/hardware_interface.h>
#include <pluginlib/class_list_macros.h>
#include <ros/ros.h>
#include <std_msgs/String.h>
#include "cortex/control/builders.h"
#include "cortex/util/ros_util.h" // TODO: verify has ExpandRosPkgRelPath()
#include "cortex/control/franka/interpolated_command_stream_controller.h"
namespace cortex {
namespace control {
namespace franka {
bool InterpolatedCommandStreamController::init(hardware_interface::RobotHW *robot_hardware,
ros::NodeHandle &node_handle) {
// Initialize connection to the robot and obtain joint handles
joint_interface_ = robot_hardware->get<hardware_interface::PositionJointInterface>();
if (joint_interface_ == nullptr) {
ROS_ERROR("InterpolatedCommandStreamController: Error getting position joint "
"interface from hardware!");
return false;
}
std::vector<std::string> joint_names;
if (!node_handle.getParam("joint_names", joint_names)) {
ROS_ERROR("InterpolatedCommandStreamController: Could not parse joint names");
}
if (joint_names.size() != 7) {
ROS_ERROR_STREAM("InterpolatedCommandStreamController: Wrong number of joint names, got"
<< joint_names.size() << " instead of 7 names!");
return false;
}
joint_handles_.resize(joint_names.size());
for (size_t i = 0; i < joint_names.size(); ++i) {
try {
joint_handles_[i] = joint_interface_->getHandle(joint_names[i]);
} catch (hardware_interface::HardwareInterfaceException const &e) {
ROS_ERROR_STREAM(
"InterpolatedCommandStreamController: Exception getting joint handles: " << e.what());
return false;
}
}
auto command_stream_interpolator_config = YAML::LoadFile(
cortex::util::ExpandRosPkgRelPath("package://cortex_control_franka/config/command_stream_interpolator.yaml"));
command_stream_interpolator_ = cortex::control::LoadCommandStreamInterpolatorFromYaml(
command_stream_interpolator_config);
return true;
}
void InterpolatedCommandStreamController::starting(ros::Time const &time) {
initialize_blending_ = true;
print_period_ = ros::Duration(1.);
start_time_ = time;
controller_time_ = time;
next_print_time_ = time;
command_stream_interpolator_->Start();
}
Eigen::VectorXd InterpolatedCommandStreamController::current_position() const {
Eigen::VectorXd q(joint_handles_.size());
for (size_t i = 0; i < joint_handles_.size(); ++i) {
q[i] = joint_handles_[i].getPosition();
}
return q;
}
void InterpolatedCommandStreamController::send_current_position() {
send_position_command(current_position());
}
void InterpolatedCommandStreamController::send_position_command(Eigen::VectorXd const &q) {
for (size_t i = 0; i < joint_handles_.size(); ++i) {
joint_handles_[i].setCommand(q[i]);
}
}
void InterpolatedCommandStreamController::update(ros::Time const &time,
ros::Duration const &period) {
// Update time information.
//
// WARNING: This method of accumulation into a duration using the period
// provided to the method is the only way of handling time
// that works, all other options will result in the robot
// producing motor noises during motion.
controller_time_ += period;
bool is_interpolator_active;
send_position_command(command_stream_interpolator_->NextCommand(
controller_time_, current_position(), &is_interpolator_active));
if (time >= next_print_time_) {
std::cout << std::setprecision(10) << "[franka] time: " << (time - start_time_).toSec()
<< ", control_time: " << (controller_time_ - start_time_).toSec()
<< ", now: " << (ros::Time::now() - start_time_).toSec()
<< ", period: " << period.toSec() << std::endl;
next_print_time_ += print_period_;
}
}
} // namespace franka
} // namespace control
} // namespace cortex
PLUGINLIB_EXPORT_CLASS(cortex::control::franka::InterpolatedCommandStreamController,
controller_interface::ControllerBase)
| 4,712 | C++ | 37.317073 | 116 | 0.686121 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control_franka/src/python/franka_gripper_commander.py | # Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
# Simple action client interface to the gripper action server.
from __future__ import absolute_import, division, print_function, unicode_literals
from franka_gripper.msg import GraspAction, GraspGoal, GraspEpsilon, MoveAction, MoveGoal
import numpy as np
import rospy
import actionlib
import argparse
# A gripper opening width of 0.8 appears full open, but Franka claims it will cause issues.
# The nominal maximum opening width is 0.7. Here we compromise between the two.
open_pos = 0.75
class FrankaGripperCommander(object):
def __init__(self, verbose=False):
self.verbose = verbose
self.grasp_client = actionlib.SimpleActionClient("/franka_gripper/grasp", GraspAction)
self.move_client = actionlib.SimpleActionClient("/franka_gripper/move", MoveAction)
if self.verbose:
print("Waiting for grasp client...")
self.grasp_client.wait_for_server()
if self.verbose:
print("Waiting for move client...")
self.move_client.wait_for_server()
def close(self, width=0.0, speed=0.03, force=40.0, grasp_eps=(0.2, 0.2), wait=True):
grasp_goal = GraspGoal()
grasp_goal.width = width
grasp_goal.speed = speed
grasp_goal.force = force
grasp_goal.epsilon = GraspEpsilon(inner=grasp_eps[0], outer=grasp_eps[1])
self.grasp_client.send_goal(grasp_goal)
if wait:
self.grasp_client.wait_for_result()
if self.verbose:
print("result:", self.grasp_client.get_result())
def move(self, width, speed=0.03, wait=True):
move_goal = MoveGoal()
move_goal.width = width
move_goal.speed = speed
print("sending goal")
self.move_client.send_goal(move_goal)
if wait:
print("waiting for finish")
self.move_client.wait_for_result()
if self.verbose:
print("result:", self.move_client.get_result())
print("move complete")
def open(self, speed=0.03, wait=True):
self.move(open_pos, speed=speed, wait=wait)
if __name__ == "__main__":
def Grasp(args):
print("Grasping...")
client = actionlib.SimpleActionClient("/franka_gripper/grasp", GraspAction)
# Waits until the action server has started up and started
# listening for goals.
client.wait_for_server()
# Creates a goal to send to the action server.
grasp_goal = GraspGoal()
grasp_goal.width = args.grasp_width
grasp_goal.speed = args.speed
grasp_goal.force = args.force
grasp_goal.epsilon = GraspEpsilon(inner=args.eps_inner, outer=args.eps_outer)
# Sends the goal to the action server.
print(">>>>", grasp_goal)
client.send_goal(grasp_goal)
# Waits for the server to finish performing the action.
client.wait_for_result()
# Prints out the result of executing the action
print("result:", client.get_result())
def Move(args):
print("Moving...")
client = actionlib.SimpleActionClient("/franka_gripper/move", MoveAction)
# Waits until the action server has started up and started
# listening for goals.
client.wait_for_server()
# Creates a goal to send to the action server.
move_goal = GraspGoal()
move_goal.width = args.width
move_goal.speed = args.speed
# Sends the goal to the action server.
client.send_goal(move_goal)
# Waits for the server to finish performing the action.
client.wait_for_result()
# Prints out the result of executing the action
print("result:", client.get_result())
def FrankaGripperCommanderTest(args):
print("Creating gripper commander...")
gripper_commander = FrankaGripperCommander()
print("Closing...")
gripper_commander.close()
print("Opening to all the way...")
gripper_commander.move(0.08)
print("Opening to .2...")
gripper_commander.move(0.02)
print("Opening to .5...")
gripper_commander.move(0.05)
print("Closing...")
gripper_commander.close()
print("Opening to all the way...")
gripper_commander.move(0.08)
def RobustnessTest(args):
commander = FrankaGripperCommander()
mode = "open"
while not rospy.is_shutdown():
if mode == "open":
commander.open(speed=0.2, wait=False)
print("opening...")
mode = "close"
elif mode == "close":
commander.close(speed=0.2, wait=False)
print("closing...")
mode = "open"
else:
raise RuntimeError("Invalid mode:", mode)
wait_time = abs(np.random.normal(loc=0.5, scale=0.75))
print(" wait:", wait_time)
rospy.sleep(wait_time)
parser = argparse.ArgumentParser("gripper_test")
parser.add_argument(
"--mode", type=str, required=True, help="Which mode: close, move, gripper_commander_test, robustness_test."
)
parser.add_argument(
"--width",
type=float,
default=None,
help="How wide in meters. Note that the gripper can open to about .8m wide.",
)
parser.add_argument("--speed", type=float, default=0.03, help="How fast to go in meter per second.")
parser.add_argument("--force", type=float, default=0.03, help="How strongly to grip.")
parser.add_argument(
"--grasp_width",
type=float,
default=0.0,
help="Width of the grasp. Defaults to closing all the way. "
"In conjunction with the default error (set wide) the default "
"behavior is to just close until it feels something.",
)
parser.add_argument(
"--eps_inner", type=float, default=0.2, help="Inner epsilon threshold. Defaults to enabling any error."
)
parser.add_argument(
"--eps_outer", type=float, default=0.2, help="Outer epsilon threshold. Defaults to enabling any error."
)
args = parser.parse_args()
rospy.init_node("gripper_test")
if args.mode == "move":
Move(args)
elif args.mode == "close":
Grasp(args)
elif args.mode == "gripper_commander_test":
FrankaGripperCommanderTest(args)
elif args.mode == "robustness_test":
RobustnessTest(args)
else:
print("ERROR -- unrecognized mode:", args.mode)
| 6,912 | Python | 33.914141 | 115 | 0.624421 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control_franka/src/python/franka_gripper_command_relay.py | #!/usr/bin/python
# Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
# Simple action client interface to the gripper action server.
from __future__ import print_function
import argparse
import json
import threading
import rospy
from sensor_msgs.msg import JointState
from std_msgs.msg import String
from franka_gripper_commander import FrankaGripperCommander
pinch_width = 0.0265
speed = 0.2
class SimGripperCommander(object):
def __init__(self):
pass
def move(self, width, speed, wait=True):
print("[move] width: %.4f, speed %.2f" % (width, speed))
def close(self, width=0.0, speed=0.03, force=40.0, grasp_eps=(0.2, 0.2), wait=True):
print("[close] width: %.4f, speed: %.2f, force: %.2f" % (width, speed, force))
class FrankaGripperCommandRelay(object):
def __init__(self, is_sim=False):
print("Setting up gripper commander")
self.is_sim = is_sim
if self.is_sim:
print("<is sim>")
self.gripper_commander = SimGripperCommander()
else:
print("<is real>")
self.gripper_commander = FrankaGripperCommander(verbose=True)
self.start_time = rospy.Time.now()
self.last_tick_time = self.start_time
self.seconds_between_tick_prints = 0.1
self.command_queue = []
self.command_queue_lock = threading.Lock()
print("Starting subscriber...")
self.command_sub = rospy.Subscriber("/cortex/gripper/command", String, self.command_callback)
print("<ready and listening>")
def command_callback(self, msg):
try:
command = json.loads(msg.data)
try:
self.command_queue_lock.acquire()
self.command_queue.append(command)
finally:
self.command_queue_lock.release()
except ValueError as ve:
print("Jsg parse error -- could not parse command:\n", msg.data)
except Exception as e:
print("Exception in processing command:", e)
print("message data:\n", msg.data)
def process_latest_commands(self):
now = rospy.Time.now()
if (now - self.last_tick_time).to_sec() >= self.seconds_between_tick_prints:
self.last_tick_time = now
try:
self.command_queue_lock.acquire()
command_queue = self.command_queue
self.command_queue = []
finally:
self.command_queue_lock.release()
for command in command_queue:
self.process_latest_command(command)
def process_latest_command(self, cmd):
try:
print("\nprocessing command:", cmd["command"])
if cmd["command"] == "move_to":
print("moving to:", cmd["width"])
self.gripper_commander.move(cmd["width"], speed=speed, wait=True)
elif cmd["command"] == "close_to_grasp":
print("closing to grasp")
self.gripper_commander.close(speed=speed)
else:
print("WARNING -- unrecognized gripper command:", cmd["command"])
except Exception as e:
print("ERROR processing command:\n", cmd)
print("exception:", e)
def run(self):
rate = rospy.Rate(60.0)
while not rospy.is_shutdown():
self.process_latest_commands()
rate.sleep()
if __name__ == "__main__":
node_name = "franka_gripper_commander_relay"
rospy.init_node(node_name)
parser = argparse.ArgumentParser(node_name)
parser.add_argument("--is_sim", action="store_true", help="Set to start in simulated env.")
parser.add_argument("--open", action="store_true", help="Open the gripper then exit.")
parser.add_argument("--close", action="store_true", help="Close the gripper then exit.")
parser.add_argument("--close_pinch", action="store_true", help="Close the gripper then exit.")
args = parser.parse_args()
if args.open:
gripper_commander = FrankaGripperCommander(verbose=True)
gripper_commander.open(speed=speed)
elif args.close:
gripper_commander = FrankaGripperCommander(verbose=True)
gripper_commander.close(speed=speed)
elif args.close_pinch:
gripper_commander = FrankaGripperCommander(verbose=True)
gripper_commander.move(pinch_width, speed=speed, wait=True)
else:
listener = FrankaGripperCommandRelay(args.is_sim)
listener.run()
| 4,852 | Python | 34.166666 | 101 | 0.628607 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control_franka/src/python/set_high_collision_thresholds.py | #!/usr/bin/env python
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
# Simple action client interface to the gripper action server.
import rospy
from franka_control.srv import SetJointImpedance
from franka_control.srv import SetJointImpedanceRequest
from franka_control.srv import SetForceTorqueCollisionBehavior
from franka_control.srv import SetForceTorqueCollisionBehaviorRequest
rospy.init_node("set_control_parameters")
force_torque_srv = "/franka_control/set_force_torque_collision_behavior"
lower_torque_thresholds_nominal = [1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0]
upper_torque_thresholds_nominal = [1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0]
lower_force_thresholds_nominal = [1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0]
upper_force_thresholds_nominal = [1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0]
ft_req = SetForceTorqueCollisionBehaviorRequest()
ft_req.lower_torque_thresholds_nominal = lower_torque_thresholds_nominal
ft_req.upper_torque_thresholds_nominal = upper_torque_thresholds_nominal
ft_req.lower_force_thresholds_nominal = lower_force_thresholds_nominal
ft_req.upper_force_thresholds_nominal = upper_force_thresholds_nominal
print(ft_req)
rospy.loginfo("Waiting for services...")
rospy.wait_for_service(force_torque_srv)
rospy.loginfo("Services ready.")
ft_srv = rospy.ServiceProxy(force_torque_srv, SetForceTorqueCollisionBehavior)
resp = ft_srv(ft_req)
failed = False
if not resp.success:
rospy.logerr("Could not set force torque collision behavior!")
failed = True
else:
rospy.loginfo("Set force torque collision behavior!")
if failed:
raise RuntimeError("Failed to set control parameters")
| 2,062 | Python | 39.45098 | 90 | 0.78613 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control_franka/include/cortex/control/franka/interpolated_command_stream_controller.h | // Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
#pragma once
#include <memory>
#include <mutex>
#include <vector>
#include <controller_interface/multi_interface_controller.h>
#include <cortex/control/command_stream_interpolator.h>
#include <cortex_control/JointPosVelAccCommand.h>
#include <hardware_interface/joint_command_interface.h>
#include <hardware_interface/robot_hw.h>
#include <ros/node_handle.h>
#include <ros/time.h>
namespace cortex {
namespace control {
namespace franka {
/**
* \brief Joint position controller using cortex rmp control commands.
*
* This controller forwards with interpolation the received cortex
* control commands to the robot's joint interface.
*/
class InterpolatedCommandStreamController
: public controller_interface::MultiInterfaceController<
hardware_interface::PositionJointInterface> {
public:
/**
* \brief Initializes the controller.
*
* \param robot_hardware handle to the robot's hardware abstraction
* \param node_handle node handle instance
*/
bool init(hardware_interface::RobotHW *robot_hardware, ros::NodeHandle &node_handle) override;
/**
* \brief Initialization of the controller upon activation.
*
* \param time time at which the controller was activated
*/
void starting(ros::Time const &time) override;
/**
* \brief Control update loop execution.
*
* \param time current time
* \param period time elapsed since last call
*/
void update(ros::Time const &time, ros::Duration const &period) override;
private:
/**
* \brief Retrieves the current position from the joint handles.
*/
Eigen::VectorXd current_position() const;
/**
* \brief Sends the robot's current pose to the robot.
*/
void send_current_position();
/**
* \brief Sends the defined position to the robot's joints.
*
* \param q joint position to be sent to the robot
*/
void send_position_command(const Eigen::VectorXd &q);
private:
std::shared_ptr<cortex::control::CommandStreamInterpolator> command_stream_interpolator_;
bool initialize_blending_;
ros::Time controller_time_;
ros::Time start_time_;
hardware_interface::PositionJointInterface *joint_interface_;
std::vector<hardware_interface::JointHandle> joint_handles_;
ros::Duration print_period_;
ros::Time next_print_time_;
};
} // namespace franka
} // namespace control
} // namespace cortex
| 2,800 | C | 28.797872 | 96 | 0.734286 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control_franka/config/controller.yaml | joint_position_controller:
type: cortex_control_franka/InterpolatedCommandStreamController
joint_names:
- panda_joint1
- panda_joint2
- panda_joint3
- panda_joint4
- panda_joint5
- panda_joint6
- panda_joint7
| 273 | YAML | 23.909089 | 67 | 0.615385 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control_franka/config/command_stream_interpolator.yaml | params:
interpolation_delay: .2
use_smoothing_interpolator: true
blending_duration: 2.
backend_timeout: .5
ros_topics:
joint_state: /robot/joint_state
rmpflow_commands:
command: /cortex/arm/command
ack: /cortex/arm/command/ack
suppress: /cortex/arm/command/suppress
interpolated: /cortex/arm/command/interpolated
| 370 | YAML | 27.538459 | 54 | 0.678378 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/isaac_vins/config/isaac_a1/vins_fusion_isaac_a1.yaml | %YAML:1.0
#common parameters
#support: 1 imu 1 cam; 1 imu 2 cam: 2 cam;
imu: 0
num_of_cam: 2
imu_topic: "/isaac_a1/imu_data"
image0_topic: "/isaac_a1/camera_forward/camera_left/rgb"
image1_topic: "/isaac_a1/camera_forward/camera_right/rgb"
output_path: "~/output"
cam0_calib: "isaac_left.yaml"
cam1_calib: "isaac_right.yaml"
image_width: 640
image_height: 480
# Extrinsic parameter between IMU and Camera.
estimate_extrinsic: 1 # 0 Have an accurate extrinsic parameters. We will trust the following imu^R_cam, imu^T_cam, don't change it.
# 1 Have an initial guess about extrinsic parameters. We will optimize around your initial guess.
body_T_cam0: !!opencv-matrix
rows: 4
cols: 4
dt: d
data: [ 0, 0, 1, 0.2693,
-1, 0, 0, 0.025,
0, -1, 0, 0.067,
0., 0., 0., 1. ]
body_T_cam1: !!opencv-matrix
rows: 4
cols: 4
dt: d
data: [ 0, 0, 1, 0.2693,
-1, 0, 0, -0.025,
0, -1, 0, 0.067,
0., 0., 0., 1. ]
#Multiple thread support
multiple_thread: 0
#feature traker paprameters
max_cnt: 150 # max feature number in feature tracking
min_dist: 10 # min distance between two features
freq: 15 # frequence (Hz) of publish tracking result. At least 10Hz for good estimation. If set 0, the frequence will be same as raw image
F_threshold: 1.0 # ransac threshold (pixel)
show_track: 1 # publish tracking image as topic
flow_back: 1 # perform forward and backward optical flow to improve feature tracking accuracy
#optimization parameters
max_solver_time: 0.04 # max solver itration time (ms), to guarantee real time
max_num_iterations: 8 # max solver itrations, to guarantee real time
keyframe_parallax: 10.0 # keyframe selection threshold (pixel)
#imu parameters The more accurate parameters you provide, the better performance
acc_n: 0.5 # accelerometer measurement noise standard deviation. #0.2 0.04
gyr_n: 0.1 # gyroscope measurement noise standard deviation. #0.05 0.004
acc_w: 0.001 # accelerometer bias random work noise standard deviation. #0.002
gyr_w: 0.0001 # gyroscope bias random work noise standard deviation. #4.0e-5
g_norm: 9.805 # gravity magnitude
#unsynchronization parameters
estimate_td: 0 # online estimate time offset between camera and imu
td: 0.0 # initial value of time offset. unit: s. readed image clock + td = real image clock (IMU clock)
#loop closure parameters
load_previous_pose_graph: 0 # load and reuse previous pose graph; load from 'pose_graph_save_path'
pose_graph_save_path: "~/output/pose_graph/" # save and load path
save_image: 0 # save image in pose graph for visualization prupose; you can close this function by setting 0
| 2,909 | YAML | 39.416666 | 154 | 0.649708 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/isaac_vins/config/isaac_a1/isaac_left.yaml | %YAML:1.0
---
model_type: PINHOLE
camera_name: camera
image_width: 640
image_height: 480
distortion_parameters:
k1: 0.0
k2: 0.0
p1: 0.0
p2: 0.0
projection_parameters:
fx: 732.999267578125
fy: 732.9993286132812
cx: 320
cy: 240
| 250 | YAML | 13.764705 | 24 | 0.664 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/isaac_vins/config/isaac_a1/isaac_right.yaml | %YAML:1.0
---
model_type: PINHOLE
camera_name: camera
image_width: 640
image_height: 480
distortion_parameters:
k1: 0.0
k2: 0.0
p1: 0.0
p2: 0.0
projection_parameters:
fx: 732.999267578125
fy: 732.9993286132812
cx: 320
cy: 240
| 250 | YAML | 13.764705 | 24 | 0.664 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/navigation/carter_2dnav/map/carter_office_navigation.yaml | image: carter_office_navigation.png
resolution: 0.05
origin: [-29.975, -39.975, 0.0000]
negate: 0
occupied_thresh: 0.65
free_thresh: 0.196
| 139 | YAML | 18.999997 | 35 | 0.733813 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/navigation/carter_2dnav/map/carter_hospital_navigation.yaml | image: carter_hospital_navigation.png
resolution: 0.05
origin: [-49.625, -4.675, 0.0000]
negate: 0
occupied_thresh: 0.65
free_thresh: 0.196
| 140 | YAML | 19.142854 | 37 | 0.735714 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/navigation/carter_2dnav/map/carter_warehouse_navigation.yaml | image: carter_warehouse_navigation.png
resolution: 0.05
origin: [-11.975, -17.975, 0.0000]
negate: 0
occupied_thresh: 0.65
free_thresh: 0.196
| 142 | YAML | 19.428569 | 38 | 0.739437 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/navigation/carter_2dnav/params/base_local_planner_params.yaml | TrajectoryPlannerROS:
holonomic_robot: false
max_vel_x: 1.2
min_vel_x: 0.1
max_vel_y: 0.0
min_vel_y: 0.0
max_vel_theta: 0.8
min_vel_theta: -0.8
min_in_place_vel_theta: 0.3
acc_lim_theta: 3.2
acc_lim_x: 2.5
acc_lim_y: 0.0
xy_goal_tolerance: 0.25
yaw_goal_tolerance: 0.05
occdist_scale: 0.7
escape_vel: -0.1
meter_scoring: true
path_distance_bias: 0.8
| 386 | YAML | 19.36842 | 29 | 0.642487 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/navigation/carter_2dnav/params/costmap_common_params.yaml | obstacle_range: 25
raytrace_range: 3
robot_radius: 0.36
cost_scaling_factor: 3.0
observation_sources: laser_scan_sensor
laser_scan_sensor: {sensor_frame: carter_lidar, data_type: LaserScan, topic: scan, marking: true, clearing: true}
| 234 | YAML | 32.571424 | 113 | 0.773504 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/navigation/carter_2dnav/params/global_costmap_params.yaml | global_costmap:
global_frame: map
robot_base_frame: base_link
update_frequency: 1.0
publish_frequency: 0.5
static_map: true
transform_tolerance: 1.25
inflation_radius: 0.85
| 187 | YAML | 19.888887 | 29 | 0.727273 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/navigation/carter_2dnav/params/local_costmap_params.yaml | local_costmap:
global_frame: odom
robot_base_frame: base_link
update_frequency: 5.0
publish_frequency: 2.0
static_map: false
rolling_window: true
width: 7.0
height: 7.0
resolution: 0.1
transform_tolerance: 1.25
inflation_radius: 0.32
| 257 | YAML | 17.42857 | 29 | 0.70428 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/navigation/isaac_ros_navigation_goal/setup.py | from setuptools import setup
from catkin_pkg.python_setup import generate_distutils_setup
d = generate_distutils_setup(
packages=["goal_generators", "obstacle_map"], package_dir={"": "isaac_ros_navigation_goal"}
)
setup(**d)
| 230 | Python | 27.874997 | 95 | 0.743478 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/navigation/isaac_ros_navigation_goal/isaac_ros_navigation_goal/obstacle_map.py | from __future__ import absolute_import
import numpy as np
import yaml
import os
import math
from PIL import Image
class GridMap:
def __init__(self, yaml_file_path):
self.__get_meta_from_yaml(yaml_file_path)
self.__get_raw_map()
self.__add_max_range_to_meta()
def __get_meta_from_yaml(self, yaml_file_path):
"""
Reads map meta from the yaml file.
Parameters
----------
yaml_file_path: path of the yaml file.
"""
with open(yaml_file_path, "r") as f:
file_content = f.read()
self.__map_meta = yaml.safe_load(file_content)
self.__map_meta["image"] = os.path.join(os.path.dirname(yaml_file_path), self.__map_meta["image"])
def __get_raw_map(self):
"""
Reads the map image and generates the grid map.\n
Grid map is a 2D boolean matrix where True=>occupied space & False=>Free space.
"""
img = Image.open(self.__map_meta.get("image"))
img = np.array(img)
# Anything greater than free_thresh is considered as occupied
if self.__map_meta["negate"]:
res = np.where((img / 255)[:, :, 0] > self.__map_meta["free_thresh"])
else:
res = np.where(((255 - img) / 255)[:, :, 0] > self.__map_meta["free_thresh"])
self.__grid_map = np.zeros(shape=(img.shape[:2]), dtype=bool)
for i in range(res[0].shape[0]):
self.__grid_map[res[0][i], res[1][i]] = 1
def __add_max_range_to_meta(self):
"""
Calculates and adds the max value of pose in x & y direction to the meta.
"""
max_x = self.__grid_map.shape[1] * self.__map_meta["resolution"] + self.__map_meta["origin"][0]
max_y = self.__grid_map.shape[0] * self.__map_meta["resolution"] + self.__map_meta["origin"][1]
self.__map_meta["max_x"] = round(max_x, 2)
self.__map_meta["max_y"] = round(max_y, 2)
def __pad_obstacles(self, distance):
pass
def get_range(self):
"""
Returns the bounds of pose values in x & y direction.\n
Returns
-------
[List]:\n
Where list[0][0]: min value in x direction
list[0][1]: max value in x direction
list[1][0]: min value in y direction
list[1][1]: max value in y direction
"""
return [
[self.__map_meta["origin"][0], self.__map_meta["max_x"]],
[self.__map_meta["origin"][1], self.__map_meta["max_y"]],
]
def __transform_to_image_coordinates(self, point):
"""
Transforms a pose in meters to image pixel coordinates.
Parameters
----------
Point: A point as list. where list[0]=>pose.x and list[1]=pose.y
Returns
-------
[Tuple]: tuple[0]=>pixel value in x direction. i.e column index.
tuple[1]=> pixel vlaue in y direction. i.e row index.
"""
p_x, p_y = point
i_x = math.floor((p_x - self.__map_meta["origin"][0]) / self.__map_meta["resolution"])
i_y = math.floor((p_y - self.__map_meta["origin"][1]) / self.__map_meta["resolution"])
# because origin in yaml is at bottom left of image
i_y = self.__grid_map.shape[0] - i_y
return i_x, i_y
def __transform_distance_to_pixels(self, distance):
"""
Converts the distance in meters to number of pixels based on the resolution.
Parameters
----------
distance: value in meters
Returns
-------
[Integer]: number of pixel which represent the same distance.
"""
return math.ceil(distance / self.__map_meta["resolution"])
def __is_obstacle_in_distance(self, img_point, distance):
"""
Checks if any obstacle is in vicinity of the given image point.
Parameters
----------
img_point: pixel values of the point
distance: distnace in pixels in which there shouldn't be any obstacle.
Returns
-------
[Bool]: True if any obstacle found else False.
"""
# need to make sure that patch xmin & ymin are >=0,
# because of python's negative indexing capability
row_start_idx = 0 if img_point[1] - distance < 0 else img_point[1] - distance
col_start_idx = 0 if img_point[0] - distance < 0 else img_point[0] - distance
# image point acts as the center of the square, where each side of square is of size
# 2xdistance. using int() because in python2.x math.floor() returns float.
patch = self.__grid_map[
int(row_start_idx) : int(img_point[1] + distance), int(col_start_idx) : int(img_point[0] + distance)
]
obstacles = np.where(patch == True)
return len(obstacles[0]) > 0
def is_valid_pose(self, point, distance=0.2):
"""
Checks if a given pose is "distance" away from a obstacle.
Parameters
----------
point: pose in 2D space. where point[0]=pose.x and point[1]=pose.y
distance: distance in meters.
Returns
-------
[Bool]: True if pose is valid else False
"""
assert len(point) == 2
img_point = self.__transform_to_image_coordinates(point)
img_pixel_distance = self.__transform_distance_to_pixels(distance)
return not self.__is_obstacle_in_distance(img_point, img_pixel_distance)
| 5,482 | Python | 34.836601 | 112 | 0.55436 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/navigation/isaac_ros_navigation_goal/isaac_ros_navigation_goal/set_goal.py | #!/usr/bin/env python
from __future__ import absolute_import
import rospy
import actionlib
import sys
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from obstacle_map import GridMap
from goal_generators import RandomGoalGenerator, GoalReader
from geometry_msgs.msg import PoseWithCovarianceStamped
class SetNavigationGoal:
def __init__(self):
self.__goal_generator = self.__create_goal_generator()
action_server_name = rospy.get_param("action_server_name", "move_base")
self._action_client = actionlib.SimpleActionClient(action_server_name, MoveBaseAction)
self.MAX_ITERATION_COUNT = rospy.get_param("iteration_count", 1)
assert self.MAX_ITERATION_COUNT > 0
self.curr_iteration_count = 1
self.__initial_goal_publisher = rospy.Publisher("initialpose", PoseWithCovarianceStamped, queue_size=1)
self.__initial_pose = rospy.get_param("initial_pose", None)
self.__is_initial_pose_sent = True if self.__initial_pose is None else False
def __send_initial_pose(self):
"""
Publishes the initial pose.
This function is only called once that too before sending any goal pose
to the mission server.
"""
goal = PoseWithCovarianceStamped()
goal.header.frame_id = rospy.get_param("frame_id", "map")
goal.header.stamp = rospy.get_rostime()
goal.pose.pose.position.x = self.__initial_pose[0]
goal.pose.pose.position.y = self.__initial_pose[1]
goal.pose.pose.position.z = self.__initial_pose[2]
goal.pose.pose.orientation.x = self.__initial_pose[3]
goal.pose.pose.orientation.y = self.__initial_pose[4]
goal.pose.pose.orientation.z = self.__initial_pose[5]
goal.pose.pose.orientation.w = self.__initial_pose[6]
rospy.sleep(1)
self.__initial_goal_publisher.publish(goal)
def send_goal(self):
"""
Sends the goal to the action server.
"""
if not self.__is_initial_pose_sent:
rospy.loginfo("Sending initial pose")
self.__send_initial_pose()
self.__is_initial_pose_sent = True
# Assumption is that initial pose is set after publishing first time in this duration.
# Can be changed to more sophisticated way. e.g. /particlecloud topic has no msg until
# the initial pose is set.
rospy.sleep(10)
rospy.loginfo("Sending first goal")
self._action_client.wait_for_server()
goal_msg = self.__get_goal()
if goal_msg is None:
rospy.signal_shutdown("Goal message not generated.")
sys.exit(1)
self._action_client.send_goal(goal_msg, feedback_cb=self.__goal_response_callback)
def __goal_response_callback(self, feedback):
"""
Callback function to check the response(goal accpted/rejected) from the server.\n
If the Goal is rejected it stops the execution for now.(We can change to resample the pose if rejected.)
"""
if self.verify_goal_state():
rospy.loginfo("Waiting to reach goal")
wait = self._action_client.wait_for_result()
if self.verify_goal_state():
self.__get_result_callback(True)
def verify_goal_state(self):
print("Action Client State:", self._action_client.get_state(), self._action_client.get_goal_status_text())
if self._action_client.get_state() not in [0, 1, 3]:
rospy.signal_shutdown("Goal Rejected :(")
return False
return True
def __get_goal(self):
goal_msg = MoveBaseGoal()
goal_msg.target_pose.header.frame_id = rospy.get_param("frame_id", "map")
goal_msg.target_pose.header.stamp = rospy.get_rostime()
pose = self.__goal_generator.generate_goal()
# couldn't sample a pose which is not close to obstacles. Rare but might happen in dense maps.
if pose is None:
rospy.logerr("Could not generate next goal. Returning. Possible reasons for this error could be:")
rospy.logerr(
"1. If you are using GoalReader then please make sure iteration count <= no of goals avaiable in file."
)
rospy.logerr(
"2. If RandomGoalGenerator is being used then it was not able to sample a pose which is given distance away from the obstacles."
)
return
rospy.loginfo("Generated goal pose: {0}".format(pose))
goal_msg.target_pose.pose.position.x = pose[0]
goal_msg.target_pose.pose.position.y = pose[1]
goal_msg.target_pose.pose.orientation.x = pose[2]
goal_msg.target_pose.pose.orientation.y = pose[3]
goal_msg.target_pose.pose.orientation.z = pose[4]
goal_msg.target_pose.pose.orientation.w = pose[5]
return goal_msg
def __get_result_callback(self, wait):
if wait and self.curr_iteration_count < self.MAX_ITERATION_COUNT:
self.curr_iteration_count += 1
self.send_goal()
else:
rospy.signal_shutdown("Iteration done or Goal not reached.")
# in this callback func we can compare/compute/log something while the robot is on its way to goal.
def __feedback_callback(self, feedback_msg):
pass
def __create_goal_generator(self):
goal_generator_type = rospy.get_param("goal_generator_type", "RandomGoalGenerator")
goal_generator = None
if goal_generator_type == "RandomGoalGenerator":
if rospy.get_param("map_yaml_path", None) is None:
rospy.loginfo("Yaml file path is not given. Returning..")
sys.exit(1)
yaml_file_path = rospy.get_param("map_yaml_path", None)
grid_map = GridMap(yaml_file_path)
obstacle_search_distance_in_meters = rospy.get_param("obstacle_search_distance_in_meters", 0.2)
assert obstacle_search_distance_in_meters > 0
goal_generator = RandomGoalGenerator(grid_map, obstacle_search_distance_in_meters)
elif goal_generator_type == "GoalReader":
if rospy.get_param("goal_text_file_path", None) is None:
rospy.loginfo("Goal text file path is not given. Returning..")
sys.exit(1)
file_path = rospy.get_param("goal_text_file_path", None)
goal_generator = GoalReader(file_path)
else:
rospy.loginfo("Invalid goal generator specified. Returning...")
sys.exit(1)
return goal_generator
def main():
rospy.init_node("set_goal_py")
set_goal = SetNavigationGoal()
result = set_goal.send_goal()
rospy.spin()
if __name__ == "__main__":
main()
| 6,772 | Python | 40.552147 | 144 | 0.627732 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/navigation/isaac_ros_navigation_goal/isaac_ros_navigation_goal/goal_generators/goal_reader.py | from __future__ import absolute_import
from .goal_generator import GoalGenerator
class GoalReader(GoalGenerator):
def __init__(self, file_path):
self.__file_path = file_path
self.__generator = self.__get_goal()
def generate_goal(self, max_num_of_trials=1000):
try:
return next(self.__generator)
except StopIteration:
return
def __get_goal(self):
for row in open(self.__file_path, "r"):
yield list(map(float, row.strip().split(" ")))
| 525 | Python | 26.684209 | 58 | 0.598095 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/navigation/isaac_ros_navigation_goal/isaac_ros_navigation_goal/goal_generators/random_goal_generator.py | from __future__ import absolute_import
import numpy as np
from .goal_generator import GoalGenerator
class RandomGoalGenerator(GoalGenerator):
"""
Random goal generator.
parameters
----------
grid_map: GridMap Object
distance: distance in meters to check vicinity for obstacles.
"""
def __init__(self, grid_map, distance):
self.__grid_map = grid_map
self.__distance = distance
def generate_goal(self, max_num_of_trials=1000):
"""
Generate the goal.
Parameters
----------
max_num_of_trials: maximum number of pose generations when generated pose keep is not a valid pose.
Returns
-------
[List][Pose]: Pose in format [pose.x,pose.y,orientaion.x,orientaion.y,orientaion.z,orientaion.w]
"""
range_ = self.__grid_map.get_range()
trial_count = 0
while trial_count < max_num_of_trials:
x = np.random.uniform(range_[0][0], range_[0][1])
y = np.random.uniform(range_[1][0], range_[1][1])
orient_x = 0 # not needed because robot is in x,y plane
orient_y = 0 # not needed because robot is in x,y plane
orient_z = np.random.uniform(0, 1)
orient_w = np.random.uniform(0, 1)
if self.__grid_map.is_valid_pose([x, y], self.__distance):
goal = [x, y, orient_x, orient_y, orient_z, orient_w]
return goal
trial_count += 1
| 1,488 | Python | 32.088888 | 107 | 0.571237 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/navigation/isaac_ros_navigation_goal/isaac_ros_navigation_goal/goal_generators/__init__.py | from .random_goal_generator import RandomGoalGenerator
from .goal_reader import GoalReader
| 91 | Python | 29.666657 | 54 | 0.857143 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/navigation/isaac_ros_navigation_goal/isaac_ros_navigation_goal/goal_generators/goal_generator.py | from __future__ import absolute_import
from abc import ABCMeta, abstractmethod
class GoalGenerator:
__metaclass__ = ABCMeta
"""
Parent class for the Goal generators
"""
def __init__(self):
pass
@abstractmethod
def generate_goal(self, max_num_of_trials=2000):
"""
Generate the goal.
Parameters
----------
max_num_of_trials: maximum number of pose generations when generated pose keep is not a valid pose.
Returns
-------
[List][Pose]: Pose in format [pose.x,pose.y,orientaion.x,orientaion.y,orientaion.z,orientaion.w]
"""
pass
| 648 | Python | 22.178571 | 107 | 0.594136 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/navigation/isaac_ros_navigation_goal/assets/carter_warehouse_navigation.yaml | image: carter_warehouse_navigation.png
resolution: 0.05
origin: [-11.975, -17.975, 0.0000]
negate: 0
occupied_thresh: 0.65
free_thresh: 0.196
| 142 | YAML | 19.428569 | 38 | 0.739437 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/humble_ws/src/isaac_tutorials/scripts/ros2_publisher.py | #!/usr/bin/env python3
# Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import rclpy
from rclpy.node import Node
from sensor_msgs.msg import JointState
import numpy as np
import time
class TestROS2Bridge(Node):
def __init__(self):
super().__init__("test_ros2bridge")
# Create the publisher. This publisher will publish a JointState message to the /joint_command topic.
self.publisher_ = self.create_publisher(JointState, "joint_command", 10)
# Create a JointState message
self.joint_state = JointState()
self.joint_state.name = [
"panda_joint1",
"panda_joint2",
"panda_joint3",
"panda_joint4",
"panda_joint5",
"panda_joint6",
"panda_joint7",
"panda_finger_joint1",
"panda_finger_joint2",
]
num_joints = len(self.joint_state.name)
# make sure kit's editor is playing for receiving messages
self.joint_state.position = np.array([0.0] * num_joints, dtype=np.float64).tolist()
self.default_joints = [0.0, -1.16, -0.0, -2.3, -0.0, 1.6, 1.1, 0.4, 0.4]
# limiting the movements to a smaller range (this is not the range of the robot, just the range of the movement
self.max_joints = np.array(self.default_joints) + 0.5
self.min_joints = np.array(self.default_joints) - 0.5
# position control the robot to wiggle around each joint
self.time_start = time.time()
timer_period = 0.05 # seconds
self.timer = self.create_timer(timer_period, self.timer_callback)
def timer_callback(self):
self.joint_state.header.stamp = self.get_clock().now().to_msg()
joint_position = (
np.sin(time.time() - self.time_start) * (self.max_joints - self.min_joints) * 0.5 + self.default_joints
)
self.joint_state.position = joint_position.tolist()
# Publish the message to the topic
self.publisher_.publish(self.joint_state)
def main(args=None):
rclpy.init(args=args)
ros2_publisher = TestROS2Bridge()
rclpy.spin(ros2_publisher)
# Destroy the node explicitly
ros2_publisher.destroy_node()
rclpy.shutdown()
if __name__ == "__main__":
main()
| 2,662 | Python | 31.084337 | 119 | 0.644252 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/humble_ws/src/navigation/carter_navigation/launch/carter_navigation.launch.py | ## Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
## NVIDIA CORPORATION and its licensors retain all intellectual property
## and proprietary rights in and to this software, related documentation
## and any modifications thereto. Any use, reproduction, disclosure or
## distribution of this software and related documentation without an express
## license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch.actions import IncludeLaunchDescription
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import Node
def generate_launch_description():
use_sim_time = LaunchConfiguration("use_sim_time", default="True")
map_dir = LaunchConfiguration(
"map",
default=os.path.join(
get_package_share_directory("carter_navigation"), "maps", "carter_warehouse_navigation.yaml"
),
)
param_dir = LaunchConfiguration(
"params_file",
default=os.path.join(
get_package_share_directory("carter_navigation"), "params", "carter_navigation_params.yaml"
),
)
nav2_bringup_launch_dir = os.path.join(get_package_share_directory("nav2_bringup"), "launch")
rviz_config_dir = os.path.join(get_package_share_directory("carter_navigation"), "rviz2", "carter_navigation.rviz")
return LaunchDescription(
[
DeclareLaunchArgument("map", default_value=map_dir, description="Full path to map file to load"),
DeclareLaunchArgument(
"params_file", default_value=param_dir, description="Full path to param file to load"
),
DeclareLaunchArgument(
"use_sim_time", default_value="true", description="Use simulation (Omniverse Isaac Sim) clock if true"
),
IncludeLaunchDescription(
PythonLaunchDescriptionSource(os.path.join(nav2_bringup_launch_dir, "rviz_launch.py")),
launch_arguments={"namespace": "", "use_namespace": "False", "rviz_config": rviz_config_dir}.items(),
),
IncludeLaunchDescription(
PythonLaunchDescriptionSource([nav2_bringup_launch_dir, "/bringup_launch.py"]),
launch_arguments={"map": map_dir, "use_sim_time": use_sim_time, "params_file": param_dir}.items(),
),
Node(
package='pointcloud_to_laserscan', executable='pointcloud_to_laserscan_node',
remappings=[('cloud_in', ['/front_3d_lidar/point_cloud']),
('scan', ['/scan'])],
parameters=[{
'target_frame': 'front_3d_lidar',
'transform_tolerance': 0.01,
'min_height': -0.4,
'max_height': 1.5,
'angle_min': -1.5708, # -M_PI/2
'angle_max': 1.5708, # M_PI/2
'angle_increment': 0.0087, # M_PI/360.0
'scan_time': 0.3333,
'range_min': 0.05,
'range_max': 100.0,
'use_inf': True,
'inf_epsilon': 1.0,
# 'concurrency_level': 1,
}],
name='pointcloud_to_laserscan'
)
]
)
| 3,522 | Python | 41.963414 | 119 | 0.601363 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/humble_ws/src/navigation/carter_navigation/launch/carter_navigation_individual.launch.py | ## Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
## NVIDIA CORPORATION and its licensors retain all intellectual property
## and proprietary rights in and to this software, related documentation
## and any modifications thereto. Any use, reproduction, disclosure or
## distribution of this software and related documentation without an express
## license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument, ExecuteProcess, IncludeLaunchDescription
from launch.conditions import IfCondition
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch.substitutions import LaunchConfiguration, PythonExpression, TextSubstitution
from launch_ros.actions import Node
def generate_launch_description():
# Get the launch directory
nav2_launch_dir = os.path.join(get_package_share_directory("nav2_bringup"), "launch")
# Create the launch configuration variables
slam = LaunchConfiguration("slam")
namespace = LaunchConfiguration("namespace")
use_namespace = LaunchConfiguration("use_namespace")
map_yaml_file = LaunchConfiguration("map")
use_sim_time = LaunchConfiguration("use_sim_time")
params_file = LaunchConfiguration("params_file")
default_bt_xml_filename = LaunchConfiguration("default_bt_xml_filename")
autostart = LaunchConfiguration("autostart")
# Declare the launch arguments
declare_namespace_cmd = DeclareLaunchArgument("namespace", default_value="", description="Top-level namespace")
declare_use_namespace_cmd = DeclareLaunchArgument(
"use_namespace", default_value="false", description="Whether to apply a namespace to the navigation stack"
)
declare_slam_cmd = DeclareLaunchArgument("slam", default_value="False", description="Whether run a SLAM")
declare_map_yaml_cmd = DeclareLaunchArgument(
"map",
default_value=os.path.join(nav2_launch_dir, "maps", "carter_warehouse_navigation.yaml"),
description="Full path to map file to load",
)
declare_use_sim_time_cmd = DeclareLaunchArgument(
"use_sim_time", default_value="True", description="Use simulation (Isaac Sim) clock if true"
)
declare_params_file_cmd = DeclareLaunchArgument(
"params_file",
default_value=os.path.join(nav2_launch_dir, "params", "nav2_params.yaml"),
description="Full path to the ROS2 parameters file to use for all launched nodes",
)
declare_bt_xml_cmd = DeclareLaunchArgument(
"default_bt_xml_filename",
default_value=os.path.join(
get_package_share_directory("nav2_bt_navigator"), "behavior_trees", "navigate_w_replanning_and_recovery.xml"
),
description="Full path to the behavior tree xml file to use",
)
declare_autostart_cmd = DeclareLaunchArgument(
"autostart", default_value="true", description="Automatically startup the nav2 stack"
)
bringup_cmd = IncludeLaunchDescription(
PythonLaunchDescriptionSource(os.path.join(nav2_launch_dir, "bringup_launch.py")),
launch_arguments={
"namespace": namespace,
"use_namespace": use_namespace,
"slam": slam,
"map": map_yaml_file,
"use_sim_time": use_sim_time,
"params_file": params_file,
"default_bt_xml_filename": default_bt_xml_filename,
"autostart": autostart,
}.items(),
)
# Create the launch description and populate
ld = LaunchDescription()
# Declare the launch options
ld.add_action(declare_namespace_cmd)
ld.add_action(declare_use_namespace_cmd)
ld.add_action(declare_slam_cmd)
ld.add_action(declare_map_yaml_cmd)
ld.add_action(declare_use_sim_time_cmd)
ld.add_action(declare_params_file_cmd)
ld.add_action(declare_bt_xml_cmd)
ld.add_action(declare_autostart_cmd)
ld.add_action(bringup_cmd)
return ld
| 4,076 | Python | 39.366336 | 120 | 0.711237 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/humble_ws/src/navigation/carter_navigation/launch/multiple_robot_carter_navigation_hospital.launch.py | ## Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
## NVIDIA CORPORATION and its licensors retain all intellectual property
## and proprietary rights in and to this software, related documentation
## and any modifications thereto. Any use, reproduction, disclosure or
## distribution of this software and related documentation without an express
## license agreement from NVIDIA CORPORATION is strictly prohibited.
"""
Example for spawing multiple robots in Gazebo.
This is an example on how to create a launch file for spawning multiple robots into Gazebo
and launch multiple instances of the navigation stack, each controlling one robot.
The robots co-exist on a shared environment and are controlled by independent nav stacks
"""
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument, ExecuteProcess, GroupAction, IncludeLaunchDescription, LogInfo
from launch.conditions import IfCondition
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch.substitutions import LaunchConfiguration, TextSubstitution
from launch_ros.actions import Node
def generate_launch_description():
# Get the launch and rviz directories
carter_nav2_bringup_dir = get_package_share_directory("carter_navigation")
nav2_bringup_dir = get_package_share_directory("nav2_bringup")
nav2_bringup_launch_dir = os.path.join(nav2_bringup_dir, "launch")
rviz_config_dir = os.path.join(carter_nav2_bringup_dir, "rviz2", "carter_navigation_namespaced.rviz")
# Names and poses of the robots
robots = [{"name": "carter1"}, {"name": "carter2"}, {"name": "carter3"}]
# Common settings
ENV_MAP_FILE = "carter_hospital_navigation.yaml"
use_sim_time = LaunchConfiguration("use_sim_time", default="True")
map_yaml_file = LaunchConfiguration("map")
default_bt_xml_filename = LaunchConfiguration("default_bt_xml_filename")
autostart = LaunchConfiguration("autostart")
rviz_config_file = LaunchConfiguration("rviz_config")
use_rviz = LaunchConfiguration("use_rviz")
log_settings = LaunchConfiguration("log_settings", default="true")
# Declare the launch arguments
declare_map_yaml_cmd = DeclareLaunchArgument(
"map",
default_value=os.path.join(carter_nav2_bringup_dir, "maps", ENV_MAP_FILE),
description="Full path to map file to load",
)
declare_robot1_params_file_cmd = DeclareLaunchArgument(
"carter1_params_file",
default_value=os.path.join(
carter_nav2_bringup_dir, "params", "hospital", "multi_robot_carter_navigation_params_1.yaml"
),
description="Full path to the ROS2 parameters file to use for robot1 launched nodes",
)
declare_robot2_params_file_cmd = DeclareLaunchArgument(
"carter2_params_file",
default_value=os.path.join(
carter_nav2_bringup_dir, "params", "hospital", "multi_robot_carter_navigation_params_2.yaml"
),
description="Full path to the ROS2 parameters file to use for robot2 launched nodes",
)
declare_robot3_params_file_cmd = DeclareLaunchArgument(
"carter3_params_file",
default_value=os.path.join(
carter_nav2_bringup_dir, "params", "hospital", "multi_robot_carter_navigation_params_3.yaml"
),
description="Full path to the ROS2 parameters file to use for robot3 launched nodes",
)
declare_bt_xml_cmd = DeclareLaunchArgument(
"default_bt_xml_filename",
default_value=os.path.join(
get_package_share_directory("nav2_bt_navigator"), "behavior_trees", "navigate_w_replanning_and_recovery.xml"
),
description="Full path to the behavior tree xml file to use",
)
declare_autostart_cmd = DeclareLaunchArgument(
"autostart", default_value="True", description="Automatically startup the stacks"
)
declare_rviz_config_file_cmd = DeclareLaunchArgument(
"rviz_config", default_value=rviz_config_dir, description="Full path to the RVIZ config file to use."
)
declare_use_rviz_cmd = DeclareLaunchArgument("use_rviz", default_value="True", description="Whether to start RVIZ")
# Define commands for launching the navigation instances
nav_instances_cmds = []
for robot in robots:
params_file = LaunchConfiguration(robot["name"] + "_params_file")
group = GroupAction(
[
IncludeLaunchDescription(
PythonLaunchDescriptionSource(os.path.join(nav2_bringup_launch_dir, "rviz_launch.py")),
condition=IfCondition(use_rviz),
launch_arguments={
"namespace": TextSubstitution(text=robot["name"]),
"use_namespace": "True",
"rviz_config": rviz_config_file,
}.items(),
),
IncludeLaunchDescription(
PythonLaunchDescriptionSource(
os.path.join(carter_nav2_bringup_dir, "launch", "carter_navigation_individual.launch.py")
),
launch_arguments={
"namespace": robot["name"],
"use_namespace": "True",
"map": map_yaml_file,
"use_sim_time": use_sim_time,
"params_file": params_file,
"default_bt_xml_filename": default_bt_xml_filename,
"autostart": autostart,
"use_rviz": "False",
"use_simulator": "False",
"headless": "False",
}.items(),
),
Node(
package='pointcloud_to_laserscan', executable='pointcloud_to_laserscan_node',
remappings=[('cloud_in', ['front_3d_lidar/point_cloud']),
('scan', ['scan'])],
parameters=[{
'target_frame': 'front_3d_lidar',
'transform_tolerance': 0.01,
'min_height': -0.4,
'max_height': 1.5,
'angle_min': -1.5708, # -M_PI/2
'angle_max': 1.5708, # M_PI/2
'angle_increment': 0.0087, # M_PI/360.0
'scan_time': 0.3333,
'range_min': 0.05,
'range_max': 100.0,
'use_inf': True,
'inf_epsilon': 1.0,
# 'concurrency_level': 1,
}],
name='pointcloud_to_laserscan',
namespace = robot["name"]
),
LogInfo(condition=IfCondition(log_settings), msg=["Launching ", robot["name"]]),
LogInfo(condition=IfCondition(log_settings), msg=[robot["name"], " map yaml: ", map_yaml_file]),
LogInfo(condition=IfCondition(log_settings), msg=[robot["name"], " params yaml: ", params_file]),
LogInfo(
condition=IfCondition(log_settings),
msg=[robot["name"], " behavior tree xml: ", default_bt_xml_filename],
),
LogInfo(
condition=IfCondition(log_settings), msg=[robot["name"], " rviz config file: ", rviz_config_file]
),
LogInfo(condition=IfCondition(log_settings), msg=[robot["name"], " autostart: ", autostart]),
]
)
nav_instances_cmds.append(group)
# Create the launch description and populate
ld = LaunchDescription()
# Declare the launch options
ld.add_action(declare_map_yaml_cmd)
ld.add_action(declare_robot1_params_file_cmd)
ld.add_action(declare_robot2_params_file_cmd)
ld.add_action(declare_robot3_params_file_cmd)
ld.add_action(declare_bt_xml_cmd)
ld.add_action(declare_use_rviz_cmd)
ld.add_action(declare_autostart_cmd)
ld.add_action(declare_rviz_config_file_cmd)
for simulation_instance_cmd in nav_instances_cmds:
ld.add_action(simulation_instance_cmd)
return ld
| 8,338 | Python | 42.432291 | 120 | 0.601823 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/humble_ws/src/navigation/carter_navigation/launch/multiple_robot_carter_navigation_office.launch.py | ## Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
## NVIDIA CORPORATION and its licensors retain all intellectual property
## and proprietary rights in and to this software, related documentation
## and any modifications thereto. Any use, reproduction, disclosure or
## distribution of this software and related documentation without an express
## license agreement from NVIDIA CORPORATION is strictly prohibited.
"""
Example for spawing multiple robots in Gazebo.
This is an example on how to create a launch file for spawning multiple robots into Gazebo
and launch multiple instances of the navigation stack, each controlling one robot.
The robots co-exist on a shared environment and are controlled by independent nav stacks
"""
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument, ExecuteProcess, GroupAction, IncludeLaunchDescription, LogInfo
from launch.conditions import IfCondition
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch.substitutions import LaunchConfiguration, TextSubstitution
from launch_ros.actions import Node
def generate_launch_description():
# Get the launch and rviz directories
carter_nav2_bringup_dir = get_package_share_directory("carter_navigation")
nav2_bringup_dir = get_package_share_directory("nav2_bringup")
nav2_bringup_launch_dir = os.path.join(nav2_bringup_dir, "launch")
rviz_config_dir = os.path.join(carter_nav2_bringup_dir, "rviz2", "carter_navigation_namespaced.rviz")
# Names and poses of the robots
robots = [{"name": "carter1"}, {"name": "carter2"}, {"name": "carter3"}]
# Common settings
ENV_MAP_FILE = "carter_office_navigation.yaml"
use_sim_time = LaunchConfiguration("use_sim_time", default="True")
map_yaml_file = LaunchConfiguration("map")
default_bt_xml_filename = LaunchConfiguration("default_bt_xml_filename")
autostart = LaunchConfiguration("autostart")
rviz_config_file = LaunchConfiguration("rviz_config")
use_rviz = LaunchConfiguration("use_rviz")
log_settings = LaunchConfiguration("log_settings", default="true")
# Declare the launch arguments
declare_map_yaml_cmd = DeclareLaunchArgument(
"map",
default_value=os.path.join(carter_nav2_bringup_dir, "maps", ENV_MAP_FILE),
description="Full path to map file to load",
)
declare_robot1_params_file_cmd = DeclareLaunchArgument(
"carter1_params_file",
default_value=os.path.join(
carter_nav2_bringup_dir, "params", "office", "multi_robot_carter_navigation_params_1.yaml"
),
description="Full path to the ROS2 parameters file to use for robot1 launched nodes",
)
declare_robot2_params_file_cmd = DeclareLaunchArgument(
"carter2_params_file",
default_value=os.path.join(
carter_nav2_bringup_dir, "params", "office", "multi_robot_carter_navigation_params_2.yaml"
),
description="Full path to the ROS2 parameters file to use for robot2 launched nodes",
)
declare_robot3_params_file_cmd = DeclareLaunchArgument(
"carter3_params_file",
default_value=os.path.join(
carter_nav2_bringup_dir, "params", "office", "multi_robot_carter_navigation_params_3.yaml"
),
description="Full path to the ROS2 parameters file to use for robot3 launched nodes",
)
declare_bt_xml_cmd = DeclareLaunchArgument(
"default_bt_xml_filename",
default_value=os.path.join(
get_package_share_directory("nav2_bt_navigator"), "behavior_trees", "navigate_w_replanning_and_recovery.xml"
),
description="Full path to the behavior tree xml file to use",
)
declare_autostart_cmd = DeclareLaunchArgument(
"autostart", default_value="True", description="Automatically startup the stacks"
)
declare_rviz_config_file_cmd = DeclareLaunchArgument(
"rviz_config", default_value=rviz_config_dir, description="Full path to the RVIZ config file to use."
)
declare_use_rviz_cmd = DeclareLaunchArgument("use_rviz", default_value="True", description="Whether to start RVIZ")
# Define commands for launching the navigation instances
nav_instances_cmds = []
for robot in robots:
params_file = LaunchConfiguration(robot["name"] + "_params_file")
group = GroupAction(
[
IncludeLaunchDescription(
PythonLaunchDescriptionSource(os.path.join(nav2_bringup_launch_dir, "rviz_launch.py")),
condition=IfCondition(use_rviz),
launch_arguments={
"namespace": TextSubstitution(text=robot["name"]),
"use_namespace": "True",
"rviz_config": rviz_config_file,
}.items(),
),
IncludeLaunchDescription(
PythonLaunchDescriptionSource(
os.path.join(carter_nav2_bringup_dir, "launch", "carter_navigation_individual.launch.py")
),
launch_arguments={
"namespace": robot["name"],
"use_namespace": "True",
"map": map_yaml_file,
"use_sim_time": use_sim_time,
"params_file": params_file,
"default_bt_xml_filename": default_bt_xml_filename,
"autostart": autostart,
"use_rviz": "False",
"use_simulator": "False",
"headless": "False",
}.items(),
),
Node(
package='pointcloud_to_laserscan', executable='pointcloud_to_laserscan_node',
remappings=[('cloud_in', ['front_3d_lidar/point_cloud']),
('scan', ['scan'])],
parameters=[{
'target_frame': 'front_3d_lidar',
'transform_tolerance': 0.01,
'min_height': -0.4,
'max_height': 1.5,
'angle_min': -1.5708, # -M_PI/2
'angle_max': 1.5708, # M_PI/2
'angle_increment': 0.0087, # M_PI/360.0
'scan_time': 0.3333,
'range_min': 0.05,
'range_max': 100.0,
'use_inf': True,
'inf_epsilon': 1.0,
# 'concurrency_level': 1,
}],
name='pointcloud_to_laserscan',
namespace = robot["name"]
),
LogInfo(condition=IfCondition(log_settings), msg=["Launching ", robot["name"]]),
LogInfo(condition=IfCondition(log_settings), msg=[robot["name"], " map yaml: ", map_yaml_file]),
LogInfo(condition=IfCondition(log_settings), msg=[robot["name"], " params yaml: ", params_file]),
LogInfo(
condition=IfCondition(log_settings),
msg=[robot["name"], " behavior tree xml: ", default_bt_xml_filename],
),
LogInfo(
condition=IfCondition(log_settings), msg=[robot["name"], " rviz config file: ", rviz_config_file]
),
LogInfo(condition=IfCondition(log_settings), msg=[robot["name"], " autostart: ", autostart]),
]
)
nav_instances_cmds.append(group)
# Create the launch description and populate
ld = LaunchDescription()
# Declare the launch options
ld.add_action(declare_map_yaml_cmd)
ld.add_action(declare_robot1_params_file_cmd)
ld.add_action(declare_robot2_params_file_cmd)
ld.add_action(declare_robot3_params_file_cmd)
ld.add_action(declare_bt_xml_cmd)
ld.add_action(declare_use_rviz_cmd)
ld.add_action(declare_autostart_cmd)
ld.add_action(declare_rviz_config_file_cmd)
for simulation_instance_cmd in nav_instances_cmds:
ld.add_action(simulation_instance_cmd)
return ld
| 8,330 | Python | 42.390625 | 120 | 0.601441 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/humble_ws/src/navigation/carter_navigation/maps/carter_office_navigation.yaml | image: carter_office_navigation.png
mode: trinary
resolution: 0.05
origin: [-29.975, -39.975, 0.0000]
negate: 0
occupied_thresh: 0.65
free_thresh: 0.196
| 153 | YAML | 18.249998 | 35 | 0.738562 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/humble_ws/src/navigation/carter_navigation/maps/carter_hospital_navigation.yaml | image: carter_hospital_navigation.png
mode: trinary
resolution: 0.05
origin: [-49.625, -4.675, 0.0000]
negate: 0
occupied_thresh: 0.65
free_thresh: 0.196
| 154 | YAML | 18.374998 | 37 | 0.74026 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/humble_ws/src/navigation/carter_navigation/maps/carter_warehouse_navigation.yaml | image: carter_warehouse_navigation.png
mode: trinary
resolution: 0.05
origin: [-11.975, -17.975, 0.0000]
negate: 0
occupied_thresh: 0.65
free_thresh: 0.196
| 156 | YAML | 18.624998 | 38 | 0.74359 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/humble_ws/src/navigation/isaac_ros_navigation_goal/setup.py | from setuptools import setup
from glob import glob
import os
package_name = "isaac_ros_navigation_goal"
setup(
name=package_name,
version="0.0.1",
packages=[package_name, package_name + "/goal_generators"],
data_files=[
("share/ament_index/resource_index/packages", ["resource/" + package_name]),
("share/" + package_name, ["package.xml"]),
(os.path.join("share", package_name, "launch"), glob("launch/*.launch.py")),
("share/" + package_name + "/assets", glob("assets/*")),
],
install_requires=["setuptools"],
zip_safe=True,
maintainer="isaac sim",
maintainer_email="[email protected]",
description="Package to set goals for navigation stack.",
license="NVIDIA Isaac ROS Software License",
tests_require=["pytest"],
entry_points={"console_scripts": ["SetNavigationGoal = isaac_ros_navigation_goal.set_goal:main"]},
)
| 906 | Python | 33.884614 | 102 | 0.651214 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/humble_ws/src/navigation/isaac_ros_navigation_goal/test/test_flake8.py | # Copyright 2017 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ament_flake8.main import main_with_errors
import pytest
@pytest.mark.flake8
@pytest.mark.linter
def test_flake8():
rc, errors = main_with_errors(argv=[])
assert rc == 0, "Found %d code style errors / warnings:\n" % len(errors) + "\n".join(errors)
| 864 | Python | 35.041665 | 96 | 0.741898 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/humble_ws/src/navigation/isaac_ros_navigation_goal/test/test_pep257.py | # Copyright 2015 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ament_pep257.main import main
import pytest
@pytest.mark.linter
@pytest.mark.pep257
def test_pep257():
rc = main(argv=[".", "test"])
assert rc == 0, "Found code style errors / warnings"
| 803 | Python | 32.499999 | 74 | 0.743462 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/humble_ws/src/navigation/isaac_ros_navigation_goal/launch/isaac_ros_navigation_goal.launch.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import Node
def generate_launch_description():
map_yaml_file = LaunchConfiguration(
"map_yaml_path",
default=os.path.join(
get_package_share_directory("isaac_ros_navigation_goal"), "assets", "carter_warehouse_navigation.yaml"
),
)
goal_text_file = LaunchConfiguration(
"goal_text_file_path",
default=os.path.join(get_package_share_directory("isaac_ros_navigation_goal"), "assets", "goals.txt"),
)
navigation_goal_node = Node(
name="set_navigation_goal",
package="isaac_ros_navigation_goal",
executable="SetNavigationGoal",
parameters=[
{
"map_yaml_path": map_yaml_file,
"iteration_count": 3,
"goal_generator_type": "RandomGoalGenerator",
"action_server_name": "navigate_to_pose",
"obstacle_search_distance_in_meters": 0.2,
"goal_text_file_path": goal_text_file,
"initial_pose": [-6.4, -1.04, 0.0, 0.0, 0.0, 0.99, 0.02],
}
],
output="screen",
)
return LaunchDescription([navigation_goal_node])
| 1,782 | Python | 35.387754 | 114 | 0.654882 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/humble_ws/src/navigation/isaac_ros_navigation_goal/isaac_ros_navigation_goal/obstacle_map.py | import numpy as np
import yaml
import os
import math
from PIL import Image
class GridMap:
def __init__(self, yaml_file_path):
self.__get_meta_from_yaml(yaml_file_path)
self.__get_raw_map()
self.__add_max_range_to_meta()
# print(self.__map_meta)
def __get_meta_from_yaml(self, yaml_file_path):
"""
Reads map meta from the yaml file.
Parameters
----------
yaml_file_path: path of the yaml file.
"""
with open(yaml_file_path, "r") as f:
file_content = f.read()
self.__map_meta = yaml.safe_load(file_content)
self.__map_meta["image"] = os.path.join(os.path.dirname(yaml_file_path), self.__map_meta["image"])
def __get_raw_map(self):
"""
Reads the map image and generates the grid map.\n
Grid map is a 2D boolean matrix where True=>occupied space & False=>Free space.
"""
img = Image.open(self.__map_meta.get("image"))
img = np.array(img)
# Anything greater than free_thresh is considered as occupied
if self.__map_meta["negate"]:
res = np.where((img / 255)[:, :, 0] > self.__map_meta["free_thresh"])
else:
res = np.where(((255 - img) / 255)[:, :, 0] > self.__map_meta["free_thresh"])
self.__grid_map = np.zeros(shape=(img.shape[:2]), dtype=bool)
for i in range(res[0].shape[0]):
self.__grid_map[res[0][i], res[1][i]] = 1
def __add_max_range_to_meta(self):
"""
Calculates and adds the max value of pose in x & y direction to the meta.
"""
max_x = self.__grid_map.shape[1] * self.__map_meta["resolution"] + self.__map_meta["origin"][0]
max_y = self.__grid_map.shape[0] * self.__map_meta["resolution"] + self.__map_meta["origin"][1]
self.__map_meta["max_x"] = round(max_x, 2)
self.__map_meta["max_y"] = round(max_y, 2)
def __pad_obstacles(self, distance):
pass
def get_range(self):
"""
Returns the bounds of pose values in x & y direction.\n
Returns
-------
[List]:\n
Where list[0][0]: min value in x direction
list[0][1]: max value in x direction
list[1][0]: min value in y direction
list[1][1]: max value in y direction
"""
return [
[self.__map_meta["origin"][0], self.__map_meta["max_x"]],
[self.__map_meta["origin"][1], self.__map_meta["max_y"]],
]
def __transform_to_image_coordinates(self, point):
"""
Transforms a pose in meters to image pixel coordinates.
Parameters
----------
Point: A point as list. where list[0]=>pose.x and list[1]=pose.y
Returns
-------
[Tuple]: tuple[0]=>pixel value in x direction. i.e column index.
tuple[1]=> pixel vlaue in y direction. i.e row index.
"""
p_x, p_y = point
i_x = math.floor((p_x - self.__map_meta["origin"][0]) / self.__map_meta["resolution"])
i_y = math.floor((p_y - self.__map_meta["origin"][1]) / self.__map_meta["resolution"])
# because origin in yaml is at bottom left of image
i_y = self.__grid_map.shape[0] - i_y
return i_x, i_y
def __transform_distance_to_pixels(self, distance):
"""
Converts the distance in meters to number of pixels based on the resolution.
Parameters
----------
distance: value in meters
Returns
-------
[Integer]: number of pixel which represent the same distance.
"""
return math.ceil(distance / self.__map_meta["resolution"])
def __is_obstacle_in_distance(self, img_point, distance):
"""
Checks if any obstacle is in vicinity of the given image point.
Parameters
----------
img_point: pixel values of the point
distance: distnace in pixels in which there shouldn't be any obstacle.
Returns
-------
[Bool]: True if any obstacle found else False.
"""
# need to make sure that patch xmin & ymin are >=0,
# because of python's negative indexing capability
row_start_idx = 0 if img_point[1] - distance < 0 else img_point[1] - distance
col_start_idx = 0 if img_point[0] - distance < 0 else img_point[0] - distance
# image point acts as the center of the square, where each side of square is of size
# 2xdistance
patch = self.__grid_map[row_start_idx : img_point[1] + distance, col_start_idx : img_point[0] + distance]
obstacles = np.where(patch == True)
return len(obstacles[0]) > 0
def is_valid_pose(self, point, distance=0.2):
"""
Checks if a given pose is "distance" away from a obstacle.
Parameters
----------
point: pose in 2D space. where point[0]=pose.x and point[1]=pose.y
distance: distance in meters.
Returns
-------
[Bool]: True if pose is valid else False
"""
assert len(point) == 2
img_point = self.__transform_to_image_coordinates(point)
img_pixel_distance = self.__transform_distance_to_pixels(distance)
# Pose is not valid if there is obstacle in the vicinity
return not self.__is_obstacle_in_distance(img_point, img_pixel_distance)
| 5,443 | Python | 33.455696 | 113 | 0.553188 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/humble_ws/src/navigation/isaac_ros_navigation_goal/isaac_ros_navigation_goal/set_goal.py | import rclpy
from rclpy.action import ActionClient
from rclpy.node import Node
from nav2_msgs.action import NavigateToPose
from .obstacle_map import GridMap
from .goal_generators import RandomGoalGenerator, GoalReader
import sys
from geometry_msgs.msg import PoseWithCovarianceStamped
import time
class SetNavigationGoal(Node):
def __init__(self):
super().__init__("set_navigation_goal")
self.declare_parameters(
namespace="",
parameters=[
("iteration_count", 1),
("goal_generator_type", "RandomGoalGenerator"),
("action_server_name", "navigate_to_pose"),
("obstacle_search_distance_in_meters", 0.2),
("frame_id", "map"),
("map_yaml_path", rclpy.Parameter.Type.STRING),
("goal_text_file_path", rclpy.Parameter.Type.STRING),
("initial_pose", rclpy.Parameter.Type.DOUBLE_ARRAY),
],
)
self.__goal_generator = self.__create_goal_generator()
action_server_name = self.get_parameter("action_server_name").value
self._action_client = ActionClient(self, NavigateToPose, action_server_name)
self.MAX_ITERATION_COUNT = self.get_parameter("iteration_count").value
assert self.MAX_ITERATION_COUNT > 0
self.curr_iteration_count = 1
self.__initial_goal_publisher = self.create_publisher(PoseWithCovarianceStamped, "/initialpose", 1)
self.__initial_pose = self.get_parameter("initial_pose").value
self.__is_initial_pose_sent = True if self.__initial_pose is None else False
def __send_initial_pose(self):
"""
Publishes the initial pose.
This function is only called once that too before sending any goal pose
to the mission server.
"""
goal = PoseWithCovarianceStamped()
goal.header.frame_id = self.get_parameter("frame_id").value
goal.header.stamp = self.get_clock().now().to_msg()
goal.pose.pose.position.x = self.__initial_pose[0]
goal.pose.pose.position.y = self.__initial_pose[1]
goal.pose.pose.position.z = self.__initial_pose[2]
goal.pose.pose.orientation.x = self.__initial_pose[3]
goal.pose.pose.orientation.y = self.__initial_pose[4]
goal.pose.pose.orientation.z = self.__initial_pose[5]
goal.pose.pose.orientation.w = self.__initial_pose[6]
self.__initial_goal_publisher.publish(goal)
def send_goal(self):
"""
Sends the goal to the action server.
"""
if not self.__is_initial_pose_sent:
self.get_logger().info("Sending initial pose")
self.__send_initial_pose()
self.__is_initial_pose_sent = True
# Assumption is that initial pose is set after publishing first time in this duration.
# Can be changed to more sophisticated way. e.g. /particlecloud topic has no msg until
# the initial pose is set.
time.sleep(10)
self.get_logger().info("Sending first goal")
self._action_client.wait_for_server()
goal_msg = self.__get_goal()
if goal_msg is None:
rclpy.shutdown()
sys.exit(1)
self._send_goal_future = self._action_client.send_goal_async(
goal_msg, feedback_callback=self.__feedback_callback
)
self._send_goal_future.add_done_callback(self.__goal_response_callback)
def __goal_response_callback(self, future):
"""
Callback function to check the response(goal accpted/rejected) from the server.\n
If the Goal is rejected it stops the execution for now.(We can change to resample the pose if rejected.)
"""
goal_handle = future.result()
if not goal_handle.accepted:
self.get_logger().info("Goal rejected :(")
rclpy.shutdown()
return
self.get_logger().info("Goal accepted :)")
self._get_result_future = goal_handle.get_result_async()
self._get_result_future.add_done_callback(self.__get_result_callback)
def __get_goal(self):
"""
Get the next goal from the goal generator.
Returns
-------
[NavigateToPose][goal] or None if the next goal couldn't be generated.
"""
goal_msg = NavigateToPose.Goal()
goal_msg.pose.header.frame_id = self.get_parameter("frame_id").value
goal_msg.pose.header.stamp = self.get_clock().now().to_msg()
pose = self.__goal_generator.generate_goal()
# couldn't sample a pose which is not close to obstacles. Rare but might happen in dense maps.
if pose is None:
self.get_logger().error(
"Could not generate next goal. Returning. Possible reasons for this error could be:"
)
self.get_logger().error(
"1. If you are using GoalReader then please make sure iteration count <= number of goals avaiable in file."
)
self.get_logger().error(
"2. If RandomGoalGenerator is being used then it was not able to sample a pose which is given distance away from the obstacles."
)
return
self.get_logger().info("Generated goal pose: {0}".format(pose))
goal_msg.pose.pose.position.x = pose[0]
goal_msg.pose.pose.position.y = pose[1]
goal_msg.pose.pose.orientation.x = pose[2]
goal_msg.pose.pose.orientation.y = pose[3]
goal_msg.pose.pose.orientation.z = pose[4]
goal_msg.pose.pose.orientation.w = pose[5]
return goal_msg
def __get_result_callback(self, future):
"""
Callback to check result.\n
It calls the send_goal() function in case current goal sent count < required goals count.
"""
# Nav2 is sending empty message for success as well as for failure.
result = future.result().result
self.get_logger().info("Result: {0}".format(result.result))
if self.curr_iteration_count < self.MAX_ITERATION_COUNT:
self.curr_iteration_count += 1
self.send_goal()
else:
rclpy.shutdown()
def __feedback_callback(self, feedback_msg):
"""
This is feeback callback. We can compare/compute/log while the robot is on its way to goal.
"""
# self.get_logger().info('FEEDBACK: {}\n'.format(feedback_msg))
pass
def __create_goal_generator(self):
"""
Creates the GoalGenerator object based on the specified ros param value.
"""
goal_generator_type = self.get_parameter("goal_generator_type").value
goal_generator = None
if goal_generator_type == "RandomGoalGenerator":
if self.get_parameter("map_yaml_path").value is None:
self.get_logger().info("Yaml file path is not given. Returning..")
sys.exit(1)
yaml_file_path = self.get_parameter("map_yaml_path").value
grid_map = GridMap(yaml_file_path)
obstacle_search_distance_in_meters = self.get_parameter("obstacle_search_distance_in_meters").value
assert obstacle_search_distance_in_meters > 0
goal_generator = RandomGoalGenerator(grid_map, obstacle_search_distance_in_meters)
elif goal_generator_type == "GoalReader":
if self.get_parameter("goal_text_file_path").value is None:
self.get_logger().info("Goal text file path is not given. Returning..")
sys.exit(1)
file_path = self.get_parameter("goal_text_file_path").value
goal_generator = GoalReader(file_path)
else:
self.get_logger().info("Invalid goal generator specified. Returning...")
sys.exit(1)
return goal_generator
def main():
rclpy.init()
set_goal = SetNavigationGoal()
result = set_goal.send_goal()
rclpy.spin(set_goal)
if __name__ == "__main__":
main()
| 8,046 | Python | 38.253658 | 144 | 0.608128 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/humble_ws/src/navigation/isaac_ros_navigation_goal/isaac_ros_navigation_goal/goal_generators/goal_reader.py | from .goal_generator import GoalGenerator
class GoalReader(GoalGenerator):
def __init__(self, file_path):
self.__file_path = file_path
self.__generator = self.__get_goal()
def generate_goal(self, max_num_of_trials=1000):
try:
return next(self.__generator)
except StopIteration:
return
def __get_goal(self):
for row in open(self.__file_path, "r"):
yield list(map(float, row.strip().split(" ")))
| 486 | Python | 26.055554 | 58 | 0.584362 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/humble_ws/src/navigation/isaac_ros_navigation_goal/isaac_ros_navigation_goal/goal_generators/random_goal_generator.py | import numpy as np
from .goal_generator import GoalGenerator
class RandomGoalGenerator(GoalGenerator):
"""
Random goal generator.
parameters
----------
grid_map: GridMap Object
distance: distance in meters to check vicinity for obstacles.
"""
def __init__(self, grid_map, distance):
self.__grid_map = grid_map
self.__distance = distance
def generate_goal(self, max_num_of_trials=1000):
"""
Generate the goal.
Parameters
----------
max_num_of_trials: maximum number of pose generations when generated pose keep is not a valid pose.
Returns
-------
[List][Pose]: Pose in format [pose.x,pose.y,orientaion.x,orientaion.y,orientaion.z,orientaion.w]
"""
range_ = self.__grid_map.get_range()
trial_count = 0
while trial_count < max_num_of_trials:
x = np.random.uniform(range_[0][0], range_[0][1])
y = np.random.uniform(range_[1][0], range_[1][1])
orient_x = np.random.uniform(0, 1)
orient_y = np.random.uniform(0, 1)
orient_z = np.random.uniform(0, 1)
orient_w = np.random.uniform(0, 1)
if self.__grid_map.is_valid_pose([x, y], self.__distance):
goal = [x, y, orient_x, orient_y, orient_z, orient_w]
return goal
trial_count += 1
| 1,405 | Python | 30.954545 | 107 | 0.560854 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/humble_ws/src/navigation/isaac_ros_navigation_goal/isaac_ros_navigation_goal/goal_generators/__init__.py | from .random_goal_generator import RandomGoalGenerator
from .goal_reader import GoalReader
| 91 | Python | 29.666657 | 54 | 0.857143 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/humble_ws/src/navigation/isaac_ros_navigation_goal/isaac_ros_navigation_goal/goal_generators/goal_generator.py | from abc import ABC, abstractmethod
class GoalGenerator(ABC):
"""
Parent class for the Goal generators
"""
def __init__(self):
pass
@abstractmethod
def generate_goal(self, max_num_of_trials=2000):
"""
Generate the goal.
Parameters
----------
max_num_of_trials: maximum number of pose generations when generated pose keep is not a valid pose.
Returns
-------
[List][Pose]: Pose in format [pose.x,pose.y,orientaion.x,orientaion.y,orientaion.z,orientaion.w]
"""
pass
| 582 | Python | 21.423076 | 107 | 0.580756 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/humble_ws/src/navigation/isaac_ros_navigation_goal/assets/carter_warehouse_navigation.yaml | image: carter_warehouse_navigation.png
resolution: 0.05
origin: [-11.975, -17.975, 0.0000]
negate: 0
occupied_thresh: 0.65
free_thresh: 0.196
| 142 | YAML | 19.428569 | 38 | 0.739437 |
NVIDIA-Omniverse/RC-Car-CAD/Readme.md | # RC CAR CAD 1.0

This repository contains engineering data for a remote control car design. This data includes CAD, CAE, BOM and any other data used to design the vehicle. Each release in the repo represents a milestone in the design process.
Release 1.0 is the milestone where the car can be exported to NVIDIA omniverse and the vehicle suspension and steering can be rigged using physics joints.
The purpose of this data set is to give anyone working with NVIDIA omniverse production-quality CAD data to work with as they develop Omniverse applications, extensions, and/or microservices. This data may also be used for demonstrations, tutorials, engineering design process research, or however else it is needed.
The data is being released before it is complete intentionally so that it represents not just a finished product, but also an in-process product throughout its design. In this way the data can be used to facilitate in-process design workflows.
The assembly is modeled using NX. To open the full assembly "_Class1RC.prt". Subassemblies are in corresponding subfolders. Not all of the assemblies and parts are organized correctly, which is common at the early design phase of a product. As the design matures, older data will become better organized and newly introduced data will be disorganized, as is the way of these things. | 1,390 | Markdown | 98.357136 | 382 | 0.805036 |
NVIDIA-Omniverse/usd_scene_construction_utils/setup.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
setup(
name="usd_scene_construction_utils",
version="0.0.1",
description="",
py_modules=["usd_scene_construction_utils"]
) | 864 | Python | 35.041665 | 98 | 0.752315 |
NVIDIA-Omniverse/usd_scene_construction_utils/DCO.md | Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved. | 1,365 | Markdown | 39.176469 | 68 | 0.76044 |
NVIDIA-Omniverse/usd_scene_construction_utils/README.md | # USD Scene Construction Utilities
USD Scene Construction Utilities is an open-source collection of utilities
built on top of the USD Python API that makes it easy for beginners to create
and modify USD scenes.
<img src="examples/hand_truck_w_boxes/landing_graphic.jpg" height="320"/>
If you find that USD Scene Construction Utilities is too limited for your use case, you may find still
find the open-source code a useful reference for working with the USD
Python API.
> Please note, USD Scene Construction Utilities **is not** a comprehensive USD Python API wrapper. That
> said, it may help you with your project, or you might find the open-source code helpful
> as a reference for learning USD. See the full [disclaimer](#disclaimer)
> below for more information. If run into any issues or have any questions please [let us know](../..//issues)!
## Usage
USD Scene Construction Utilities exposes a variety of utility functions that operating on the USD stage
like this:
```python
from usd_scene_construction_utils import (
new_in_memory_stage, add_plane, add_box, stack, export_stage
)
stage = new_in_memory_stage()
floor = add_plane(stage, "/scene/floor", size=(500, 500))
box = add_box(stage, "/scene/box", size=(100, 100, 100))
stack_prims([floor, box], axis=2)
export_stage(stage, "hello_box.usda", default_prim="/scene")
```
If you don't want to use the higher level functions, you can read the [usd_scene_construction_utils.py](usd_scene_construction_utils.py)
file to learn some ways to use the USD Python API directly.
After building a scene with USD Scene Construction Utilities, we recommend using Omniverse Replicator
for generating synthetic data, while performing additional randomizations that
retain the structure of the scene, like camera position, lighting, and materials.
To get started, you may find the [using replicator with a fully developed scene](https://docs.omniverse.nvidia.com/prod_extensions/prod_extensions/ext_replicator/apis_with_fully_developed_scene.html)
example helpful.
## Installation
### Step 1 - Clone the repo
```bash
git clone https://github.com/NVIDIA-Omniverse/usd_scene_construction_utils
```
### Step 2 - Make it discoverable
If you're outside of omniverse:
```bash
python3 setup.py develop
```
If you're inside omniverse:
```python3
import sys
sys.path.append("/path/to/usd_scene_construction_utils/")
```
## Examples
| Graphic | Example | Description | Omniverse Only |
|---|---|---|---|
| <img src="examples/hello_box/landing_graphic.jpg" width="128"/> | [hello_box](examples/hello_box/) | First example to run. Adds a grid of boxes. Doesn't use any assets. | |
| <img src="examples/bind_mdl_material/landing_graphic.jpg" width="128"/> | [bind_mdl_material](examples/bind_mdl_material/) | Shows how to bind a material to an object. Needs omniverse to access material assets on nucleus server. | :heavy_check_mark: |
| <img src="examples/hand_truck_w_boxes/landing_graphic.jpg" width="128"/>| [hand_truck_w_boxes](examples/hand_truck_w_boxes/) | Create a grid of hand trucks with randomly stacked boxes. Needs omniverse to access cardboard box and hand truck assets. | :heavy_check_mark: |
## Disclaimer
This project **is not** a comprehensive USD Python API wrapper. It currently only exposes
a very limited subset of what USD is capable of and is subject
to change and breaking. The goal of this project is to make it easy to generate
structured scenes using USD and to give you an introduction to USD through
both examples and by reading the usd_scene_construction_utils source code.
If you're developing a larger project using usd_scene_construction_utils as a dependency, you may
want to fork the project, or simply reference the source code you're interested in. We're providing this project because we think the community will
benefit from more open-source code and examples that uses USD.
That said, you may still find usd_scene_construction_utils helpful as-is, and you're welcome to let
us know if you run into any issues, have any questions, or would like to contribute.
## Contributing
- Ask a question, request a feature, file a bug by creating an [issue](#).
- Add new functionality, or fix a bug, by filing a [pull request](#).
## See also
Here are other USD resources we've found helpful.
1. [NVIDIA USD Snippets](https://docs.omniverse.nvidia.com/prod_usd/prod_kit/programmer_ref/usd.html) Super helpful collection of documented USD snippets for getting familiar with directly working with USD Python API.
2. [USD C++ API Docs](https://openusd.org/release/api/index.html). Helpful for learning the full set of USD API functions. Most functions share very similar naming to the Python counterpart.
3. [NVIDIA Omniverse Replicator](https://docs.omniverse.nvidia.com/prod_extensions/prod_extensions/ext_replicator.html) - Helpful for taking USD scenes and efficiently generating synthetic data, like segmentation masks, 3D bounding boxes, depth images and more. Also
includes a variety of utilities for domain randomization.
4. [NVIDIA Omniverse](https://www.nvidia.com/en-us/omniverse/) - Large ecosystem of
tools for creating 3D worlds. Omniverse create is needed for executing many of the
examples here. Assets on the Omniverse nucleus servers make it easy to create
high quality scenes with rich geometries and materials. | 5,356 | Markdown | 47.261261 | 274 | 0.762883 |
NVIDIA-Omniverse/usd_scene_construction_utils/usd_scene_construction_utils.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import math
from typing import Optional, Sequence, Tuple
from typing_extensions import Literal
from pxr import Gf, Sdf, Usd, UsdGeom, UsdLux, UsdShade
def new_in_memory_stage() -> Usd.Stage:
"""Creates a new in memory USD stage.
Returns:
Usd.Stage: The USD stage.
"""
stage = Usd.Stage.CreateInMemory()
UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.z)
return stage
def new_omniverse_stage() -> Usd.Stage:
"""Creates a new Omniverse USD stage.
This method creates a new Omniverse USD stage. This will clear the active
omniverse stage, replacing it with a new one.
Returns:
Usd.Stage: The Omniverse USD stage.
"""
try:
import omni.usd
except ImportError:
raise ImportError("Omniverse not found. This method is unavailable.")
omni.usd.get_context().new_stage()
stage = omni.usd.get_context().get_stage()
UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.z)
return stage
def get_omniverse_stage() -> Usd.Stage:
"""Returns the current Omniverse USD stage.
Returns:
Usd.Stage: The currently active Omniverse USD stage.
"""
try:
import omni.usd
except ImportError:
raise ImportError("Omniverse not found. This method is unavailable.")
stage = omni.usd.get_context().get_stage()
return stage
def add_usd_ref(stage: Usd.Stage, path: str, usd_path: str) -> Usd.Prim:
"""Adds an external USD reference to a USD stage.
Args:
stage (:class:`Usd.Stage`): The USD stage to modify.
path (str): The path to add the USD reference.
usd_path (str): The filepath or URL of the USD reference (ie: a Nucleus
server URL).
Returns:
Usd.Prim: The created USD prim.
"""
stage.DefinePrim(path, "Xform")
prim_ref = stage.DefinePrim(os.path.join(path, "ref"))
prim_ref.GetReferences().AddReference(usd_path)
return get_prim(stage, path)
def _make_box_mesh(size: Tuple[float, float, float]):
# private utility function used by make_box
numFaces = 6
numVertexPerFace = 4
# Generate vertices on box
vertices = []
for i in [-1, 1]:
for j in [-1, 1]:
for k in [-1, 1]:
vertices.append((i * size[0], j * size[1], k * size[2]))
# Make faces for box (ccw convention)
faceVertexCounts = [numVertexPerFace] * numFaces
faceVertexIndices = [
2, 0, 1, 3,
4, 6, 7, 5,
0, 4, 5, 1,
6, 2, 3, 7,
0, 2, 6, 4,
5, 7, 3, 1,
]
# Make normals for face vertices
_faceVertexNormals = [
(-1, 0, 0),
(1, 0, 0),
(0, -1, 0),
(0, 1, 0),
(0, 0, -1),
(0, 0, 1),
]
faceVertexNormals = []
for n in _faceVertexNormals:
for i in range(numVertexPerFace):
faceVertexNormals.append(n)
# Assign uv-mapping for face vertices
_faceUvMaps = [
(0, 0), (1, 0), (1, 1), (0, 1)
]
faceUvMaps = []
for i in range(numFaces):
for uvm in _faceUvMaps:
faceUvMaps.append(uvm)
return (vertices, faceVertexCounts, faceVertexIndices, faceVertexNormals,
faceUvMaps)
def add_box(stage: Usd.Stage, path: str, size: Tuple[float, float, float]) -> Usd.Prim:
"""Adds a 3D box to a USD stage.
This adds a 3D box to the USD stage. The box is created with it's center
at (x, y, z) = (0, 0, 0).
Args:
stage (:class:`Usd.Stage`): The USD stage to modify.
path (str): The path to add the USD prim.
size (Tuple[float, float, float]): The size of the box (x, y, z sizes).
Returns:
Usd.Prim: The created USD prim.
"""
half_size = (size[0] / 2, size[1] / 2, size[2] / 2)
stage.DefinePrim(path, "Xform")
(vertices, faceVertexCounts, faceVertexIndices, faceVertexNormals,
faceUvMaps) = _make_box_mesh(half_size)
# create mesh at {path}/mesh, but return prim at {path}
prim: UsdGeom.Mesh = UsdGeom.Mesh.Define(stage, os.path.join(path, "mesh"))
prim.CreateExtentAttr().Set([
(-half_size[0], -half_size[1], -half_size[2]),
(half_size[0], half_size[1], half_size[2])
])
prim.CreateFaceVertexCountsAttr().Set(faceVertexCounts)
prim.CreateFaceVertexIndicesAttr().Set(faceVertexIndices)
var = UsdGeom.Primvar(prim.CreateNormalsAttr())
var.Set(faceVertexNormals)
var.SetInterpolation(UsdGeom.Tokens.faceVarying)
var = UsdGeom.PrimvarsAPI(prim).CreatePrimvar("primvars:st", Sdf.ValueTypeNames.Float2Array)
var.Set(faceUvMaps)
var.SetInterpolation(UsdGeom.Tokens.faceVarying)
prim.CreatePointsAttr().Set(vertices)
prim.CreateSubdivisionSchemeAttr().Set(UsdGeom.Tokens.none)
return get_prim(stage, path)
def add_xform(stage: Usd.Stage, path: str):
"""Adds a USD transform (Xform) to a USD stage.
This method adds a USD Xform to the USD stage at a given path. This is
helpful when you want to add hierarchy to a scene. After you create a
transform, any USD prims located under the transform path will be children
of the transform and can be moved as a group.
Args:
stage (:class:`Usd.Stage`): The USD stage to modify.
path (str): The path to add the USD prim.
Returns:
Usd.Prim: The created USD prim.
"""
stage.DefinePrim(path, "Xform")
return get_prim(stage, path)
def add_plane(
stage: Usd.Stage,
path: str,
size: Tuple[float, float],
uv: Tuple[float, float]=(1, 1)):
"""Adds a 2D plane to a USD stage.
Args:
stage (Usd.Stage): The USD stage to modify.
path (str): The path to add the USD prim.
size (Tuple[float, float]): The size of the 2D plane (x, y).
uv (Tuple[float, float]): The UV mapping for textures applied to the
plane. For example, uv=(1, 1), means the texture will be spread
to fit the full size of the plane. uv=(10, 10) means the texture
will repeat 10 times along each dimension. uv=(5, 10) means the
texture will be scaled to repeat 5 times along the x dimension and
10 times along the y direction.
Returns:
Usd.Prim: The created USD prim.
"""
stage.DefinePrim(path, "Xform")
# create mesh at {path}/mesh, but return prim at {path}
prim: UsdGeom.Mesh = UsdGeom.Mesh.Define(stage, os.path.join(path, "mesh"))
prim.CreateExtentAttr().Set([
(-size[0], -size[1], 0),
(size[0], size[1], 0)
])
prim.CreateFaceVertexCountsAttr().Set([4])
prim.CreateFaceVertexIndicesAttr().Set([0, 1, 3, 2])
var = UsdGeom.Primvar(prim.CreateNormalsAttr())
var.Set([(0, 0, 1)] * 4)
var.SetInterpolation(UsdGeom.Tokens.faceVarying)
var = UsdGeom.PrimvarsAPI(prim).CreatePrimvar("primvars:st",
Sdf.ValueTypeNames.Float2Array)
var.Set(
[(0, 0), (uv[0], 0), (uv[0], uv[1]), (0, uv[1])]
)
var.SetInterpolation(UsdGeom.Tokens.faceVarying)
prim.CreatePointsAttr().Set([
(-size[0], -size[1], 0),
(size[0], -size[1], 0),
(-size[0], size[1], 0),
(size[0], size[1], 0),
])
prim.CreateSubdivisionSchemeAttr().Set(UsdGeom.Tokens.none)
return get_prim(stage, path)
def add_dome_light(stage: Usd.Stage, path: str, intensity: float = 1000,
angle: float = 180, exposure: float=0.) -> UsdLux.DomeLight:
"""Adds a dome light to a USD stage.
Args:
stage (Usd.Stage): The USD stage to modify.
path (str): The path to add the USD prim.
intensity (float): The intensity of the dome light (default 1000).
angle (float): The angle of the dome light (default 180)
exposure (float): THe exposure of the dome light (default 0)
Returns:
UsdLux.DomeLight: The created Dome light.
"""
light = UsdLux.DomeLight.Define(stage, path)
# intensity
light.CreateIntensityAttr().Set(intensity)
light.CreateTextureFormatAttr().Set(UsdLux.Tokens.latlong)
light.CreateExposureAttr().Set(exposure)
# cone angle
shaping = UsdLux.ShapingAPI(light)
shaping.Apply(light.GetPrim())
shaping.CreateShapingConeAngleAttr().Set(angle)
shaping.CreateShapingConeSoftnessAttr()
shaping.CreateShapingFocusAttr()
shaping.CreateShapingFocusTintAttr()
shaping.CreateShapingIesFileAttr()
return light
def add_sphere_light(stage: Usd.Stage, path: str, intensity=30000,
radius=50, angle=180, exposure=0.):
"""Adds a sphere light to a USD stage.
Args:
stage (Usd.Stage): The USD stage to modify.
path (str): The path to add the USD prim.
radius (float): The radius of the sphere light
intensity (float): The intensity of the sphere light (default 1000).
angle (float): The angle of the sphere light (default 180)
exposure (float): THe exposure of the sphere light (default 0)
Returns:
UsdLux.SphereLight: The created sphere light.
"""
light = UsdLux.SphereLight.Define(stage, path)
# intensity
light.CreateIntensityAttr().Set(intensity)
light.CreateRadiusAttr().Set(radius)
light.CreateExposureAttr().Set(exposure)
# cone angle
shaping = UsdLux.ShapingAPI(light)
shaping.Apply(light.GetPrim())
shaping.CreateShapingConeAngleAttr().Set(angle)
shaping.CreateShapingConeSoftnessAttr()
shaping.CreateShapingFocusAttr()
shaping.CreateShapingFocusTintAttr()
shaping.CreateShapingIesFileAttr()
return light
def add_mdl_material(stage: Usd.Stage, path: str, material_url: str,
material_name: Optional[str] = None) -> UsdShade.Material:
"""Adds an Omniverse MDL material to a USD stage.
*Omniverse only*
Args:
stage (Usd.Stage): The USD stage to modify.
path (str): The path to add the USD prim.
material_url (str): The URL of the material, such as on a Nucelus server.
material_name (Optional[str]): An optional name to give the material. If
one is not provided, it will default to the filename of the material
URL (excluding the extension).
returns:
UsdShade.Material: The created USD material.
"""
try:
import omni.usd
except ImportError:
raise ImportError("Omniverse not found. This method is unavailable.")
# Set default mtl_name
if material_name is None:
material_name = os.path.basename(material_url).split('.')[0]
# Create material using omniverse kit
if not stage.GetPrimAtPath(path):
success, result = omni.kit.commands.execute(
"CreateMdlMaterialPrimCommand",
mtl_url=material_url,
mtl_name=material_name,
mtl_path=path
)
# Get material from stage
material = UsdShade.Material(stage.GetPrimAtPath(path))
return material
def add_camera(
stage: Usd.Stage,
path: str,
focal_length: float = 35,
horizontal_aperature: float = 20.955,
vertical_aperature: float = 20.955,
clipping_range: Tuple[float, float] = (0.1, 100000)
) -> UsdGeom.Camera:
"""Adds a camera to a USD stage.
Args:
stage (Usd.Stage): The USD stage to modify.
path (str): The path to add the USD prim.
focal_length (float): The focal length of the camera (default 35).
horizontal_aperature (float): The horizontal aperature of the camera
(default 20.955).
vertical_aperature (float): The vertical aperature of the camera
(default 20.955).
clipping_range (Tuple[float, float]): The clipping range of the camera.
returns:
UsdGeom.Camera: The created USD camera.
"""
camera = UsdGeom.Camera.Define(stage, path)
camera.CreateFocalLengthAttr().Set(focal_length)
camera.CreateHorizontalApertureAttr().Set(horizontal_aperature)
camera.CreateVerticalApertureAttr().Set(vertical_aperature)
camera.CreateClippingRangeAttr().Set(clipping_range)
return camera
def get_prim(stage: Usd.Stage, path: str) -> Usd.Prim:
"""Returns a prim at the specified path in a USD stage.
Args:
stage (Usd.Stage): The USD stage to query.
path (str): The path of the prim.
Returns:
Usd.Prim: The USD prim at the specified path.
"""
return stage.GetPrimAtPath(path)
def get_material(stage: Usd.Stage, path: str) -> UsdShade.Material:
"""Returns a material at the specified path in a USD stage.
Args:
stage (Usd.Stage): The USD stage to query.
path (str): The path of the material.
Returns:
UsdShade.Material: The USD material at the specified path.
"""
prim = get_prim(stage, path)
return UsdShade.Material(prim)
def export_stage(stage: Usd.Stage, filepath: str, default_prim=None):
"""Exports a USD stage to a given filepath.
Args:
stage (Usd.Stage): The USD stage to export.
path (str): The filepath to export the USD stage to.
default_prim (Optional[str]): The path of the USD prim in the
stage to set as the default prim. This is useful when you
want to use the exported USD as a reference, or when you want
to place the USD in Omniverse.
"""
if default_prim is not None:
stage.SetDefaultPrim(get_prim(stage, default_prim))
stage.Export(filepath)
def add_semantics(prim: Usd.Prim, type: str, name: str):
"""Adds semantics to a USD prim.
This function adds semantics to a USD prim. This is useful for assigning
classes to objects when generating synthetic data with Omniverse Replicator.
For example:
add_semantics(dog_prim, "class", "dog")
add_semantics(cat_prim, "class", "cat")
Args:
prim (Usd.Prim): The USD prim to modify.
type (str): The semantics type. This depends on how the data is ingested.
Typically, when using Omniverse replicator you will set this to "class".
name (str): The value of the semantic type. Typically, this would
correspond to the class label.
Returns:
Usd.Prim: The USD prim with added semantics.
"""
prim.AddAppliedSchema(f"SemanticsAPI:{type}_{name}")
prim.CreateAttribute(f"semantic:{type}_{name}:params:semanticType",
Sdf.ValueTypeNames.String).Set(type)
prim.CreateAttribute(f"semantic:{type}_{name}:params:semanticData",
Sdf.ValueTypeNames.String).Set(name)
return prim
def bind_material(prim: Usd.Prim, material: UsdShade.Material):
"""Binds a USD material to a USD prim.
Args:
prim (Usd.Prim): The USD prim to modify.
material (UsdShade.Material): The USD material to bind to the USD prim.
Returns:
Usd.Prim: The modified USD prim with the specified material bound to it.
"""
prim.ApplyAPI(UsdShade.MaterialBindingAPI)
UsdShade.MaterialBindingAPI(prim).Bind(material,
UsdShade.Tokens.strongerThanDescendants)
return prim
def collapse_xform(prim: Usd.Prim):
"""Collapses all xforms on a given USD prim.
This method collapses all Xforms on a given prim. For example,
a series of rotations, translations would be combined into a single matrix
operation.
Args:
prim (Usd.Prim): The Usd.Prim to collapse the transforms of.
Returns:
Usd.Prim: The Usd.Prim.
"""
x = UsdGeom.Xformable(prim)
local = x.GetLocalTransformation()
prim.RemoveProperty("xformOp:translate")
prim.RemoveProperty("xformOp:transform")
prim.RemoveProperty("xformOp:rotateX")
prim.RemoveProperty("xformOp:rotateY")
prim.RemoveProperty("xformOp:rotateZ")
var = x.MakeMatrixXform()
var.Set(local)
return prim
def get_xform_op_order(prim: Usd.Prim):
"""Returns the order of Xform ops on a given prim."""
x = UsdGeom.Xformable(prim)
op_order = x.GetXformOpOrderAttr().Get()
if op_order is not None:
op_order = list(op_order)
return op_order
else:
return []
def set_xform_op_order(prim: Usd.Prim, op_order: Sequence[str]):
"""Sets the order of Xform ops on a given prim"""
x = UsdGeom.Xformable(prim)
x.GetXformOpOrderAttr().Set(op_order)
return prim
def xform_op_move_end_to_front(prim: Usd.Prim):
"""Pops the last xform op on a given prim and adds it to the front."""
order = get_xform_op_order(prim)
end = order.pop(-1)
order.insert(0, end)
set_xform_op_order(prim, order)
return prim
def get_num_xform_ops(prim: Usd.Prim) -> int:
"""Returns the number of xform ops on a given prim."""
return len(get_xform_op_order(prim))
def apply_xform_matrix(prim: Usd.Prim, transform: np.ndarray):
"""Applies a homogeneous transformation matrix to the current prim's xform list.
Args:
prim (Usd.Prim): The USD prim to transform.
transform (np.ndarray): The 4x4 homogeneous transform matrix to apply.
Returns:
Usd.Prim: The modified USD prim with the provided transform applied after current transforms.
"""
x = UsdGeom.Xformable(prim)
x.AddTransformOp(opSuffix=f"num_{get_num_xform_ops(prim)}").Set(
Gf.Matrix4d(transform)
)
xform_op_move_end_to_front(prim)
return prim
def scale(prim: Usd.Prim, scale: Tuple[float, float, float]):
"""Scales a prim along the (x, y, z) dimensions.
Args:
prim (Usd.Prim): The USD prim to scale.
scale (Tuple[float, float, float]): The scaling factors for the (x, y, z) dimensions.
Returns:
Usd.Prim: The scaled prim.
"""
x = UsdGeom.Xformable(prim)
x.AddScaleOp(opSuffix=f"num_{get_num_xform_ops(prim)}").Set(scale)
xform_op_move_end_to_front(prim)
return prim
def translate(prim: Usd.Prim, offset: Tuple[float, float, float]):
"""Translates a prim along the (x, y, z) dimensions.
Args:
prim (Usd.Prim): The USD prim to translate.
offset (Tuple[float, float, float]): The offsets for the (x, y, z) dimensions.
Returns:
Usd.Prim: The translated prim.
"""
x = UsdGeom.Xformable(prim)
x.AddTranslateOp(opSuffix=f"num_{get_num_xform_ops(prim)}").Set(offset)
xform_op_move_end_to_front(prim)
return prim
def rotate_x(prim: Usd.Prim, angle: float):
"""Rotates a prim around the X axis.
Args:
prim (Usd.Prim): The USD prim to rotate.
angle (float): The rotation angle in degrees.
Returns:
Usd.Prim: The rotated prim.
"""
x = UsdGeom.Xformable(prim)
x.AddRotateXOp(opSuffix=f"num_{get_num_xform_ops(prim)}").Set(angle)
xform_op_move_end_to_front(prim)
return prim
def rotate_y(prim: Usd.Prim, angle: float):
"""Rotates a prim around the Y axis.
Args:
prim (Usd.Prim): The USD prim to rotate.
angle (float): The rotation angle in degrees.
Returns:
Usd.Prim: The rotated prim.
"""
x = UsdGeom.Xformable(prim)
x.AddRotateYOp(opSuffix=f"num_{get_num_xform_ops(prim)}").Set(angle)
xform_op_move_end_to_front(prim)
return prim
def rotate_z(prim: Usd.Prim, angle: float):
"""Rotates a prim around the Z axis.
Args:
prim (Usd.Prim): The USD prim to rotate.
angle (float): The rotation angle in degrees.
Returns:
Usd.Prim: The rotated prim.
"""
x = UsdGeom.Xformable(prim)
x.AddRotateZOp(opSuffix=f"num_{get_num_xform_ops(prim)}").Set(angle)
xform_op_move_end_to_front(prim)
return prim
def stack_prims(prims: Sequence[Usd.Prim], axis: int = 2, gap: float = 0, align_center=False):
"""Stacks prims on top of each other (or side-by-side).
This function stacks prims by placing them so their bounding boxes
are adjacent along a given axis.
Args:
prim (Usd.Prim): The USD prims to stack.
axis (int): The axis along which to stack the prims. x=0, y=1, z=2. Default 2.
gap (float): The spacing to add between stacked elements.
Returns:
Sequence[Usd.Prim]: The stacked prims.
"""
for i in range(1, len(prims)):
prev = prims[i - 1]
cur = prims[i]
bb_cur_min, bb_cur_max = compute_bbox(cur)
bb_prev_min, bb_prev_max = compute_bbox(prev)
if align_center:
offset = [
(bb_cur_max[0] + bb_cur_min[0]) / 2. - (bb_prev_max[0] + bb_prev_min[0]) / 2.,
(bb_cur_max[1] + bb_cur_min[1]) / 2. - (bb_prev_max[1] + bb_prev_min[1]) / 2.,
(bb_cur_max[2] + bb_cur_min[2]) / 2. - (bb_prev_max[2] + bb_prev_min[2]) / 2.
]
else:
offset = [0, 0, 0]
offset[axis] = bb_prev_max[axis] - bb_cur_min[axis]
if isinstance(gap, list):
offset[axis] = offset[axis] + gap[i]
else:
offset[axis] = offset[axis] + gap
translate(cur, tuple(offset))
return prims
def compute_bbox(prim: Usd.Prim) -> \
Tuple[Tuple[float, float, float], Tuple[float, float, float]]:
"""Computes the axis-aligned bounding box for a USD prim.
Args:
prim (Usd.Prim): The USD prim to compute the bounding box of.
Returns:
Tuple[Tuple[float, float, float], Tuple[float, float, float]] The ((min_x, min_y, min_z), (max_x, max_y, max_z)) values of the bounding box.
"""
bbox_cache: UsdGeom.BBoxCache = UsdGeom.BBoxCache(
time=Usd.TimeCode.Default(),
includedPurposes=[UsdGeom.Tokens.default_],
useExtentsHint=True
)
total_bounds = Gf.BBox3d()
for p in Usd.PrimRange(prim):
total_bounds = Gf.BBox3d.Combine(
total_bounds, Gf.BBox3d(bbox_cache.ComputeWorldBound(p).ComputeAlignedRange())
)
box = total_bounds.ComputeAlignedBox()
return (box.GetMin(), box.GetMax())
def compute_bbox_size(prim: Usd.Prim) -> Tuple[float, float, float]:
"""Computes the (x, y, z) size of the axis-aligned bounding box for a prim."""
bbox_min, bbox_max = compute_bbox(prim)
size = (
bbox_max[0] - bbox_min[0],
bbox_max[1] - bbox_min[1],
bbox_max[2] - bbox_min[2]
)
return size
def compute_bbox_center(prim: Usd.Prim) -> Tuple[float, float, float]:
"""Computes the (x, y, z) center of the axis-aligned bounding box for a prim."""
bbox_min, bbox_max = compute_bbox(prim)
center = (
(bbox_max[0] + bbox_min[0]) / 2,
(bbox_max[1] + bbox_min[1]) / 2,
(bbox_max[2] + bbox_min[2]) / 2
)
return center
def set_visibility(prim: Usd.Prim,
visibility: Literal["inherited", "invisible"] = "inherited"):
"""Sets the visibility of a prim.
Args:
prim (Usd.Prim): The prim to control the visibility of.
visibility (str): The visibility of the prim. "inherited" if the
prim is visibile as long as it's parent is visible, or invisible if
it's parent is invisible. Otherwise, "invisible" if the prim is
invisible regardless of it's parent's visibility.
Returns:
Usd.Prim: The USD prim.
"""
attr = prim.GetAttribute("visibility")
if attr is None:
prim.CreateAttribute("visibility")
attr.Set(visibility)
return prim
def get_visibility(prim: Usd.Prim):
"""Returns the visibility of a given prim.
See set_visibility for details.
"""
return prim.GetAttribute("visibility").Get()
def rad2deg(x):
"""Convert radians to degrees."""
return 180. * x / math.pi
def deg2rad(x):
"""Convert degrees to radians."""
return math.pi * x / 180.
def compute_sphere_point(
elevation: float,
azimuth: float,
distance: float
) -> Tuple[float, float, float]:
"""Compute a sphere point given an elevation, azimuth and distance.
Args:
elevation (float): The elevation in degrees.
azimuth (float): The azimuth in degrees.
distance (float): The distance.
Returns:
Tuple[float, float, float]: The sphere coordinate.
"""
elevation = rad2deg(elevation)
azimuth = rad2deg(azimuth)
elevation = elevation
camera_xy_distance = math.cos(elevation) * distance
camera_x = math.cos(azimuth) * camera_xy_distance
camera_y = math.sin(azimuth) * camera_xy_distance
camera_z = math.sin(elevation) * distance
eye = (
float(camera_x),
float(camera_y),
float(camera_z)
)
return eye
def compute_look_at_matrix(
at: Tuple[float, float, float],
up: Tuple[float, float, float],
eye: Tuple[float, float, float]
) -> np.ndarray:
"""Computes a 4x4 homogeneous "look at" transformation matrix.
Args:
at (Tuple[float, float, float]): The (x, y, z) location that the transform
should be facing. For example (0, 0, 0) if the transformation should
face the origin.
up (Tuple[float, float, float]): The up axis fot the transform. ie:
(0, 0, 1) for the up-axis to correspond to the z-axis.
eye (Tuple[float, float]): The (x, y, z) location of the transform.
For example, (100, 100, 100) if we want to place a camera at
(x=100,y=100,z=100)
Returns:
np.ndarray: The 4x4 homogeneous transformation matrix.
"""
at = np.array(at)
up = np.array(up)
up = up / np.linalg.norm(up)
eye = np.array(eye)
# forward axis (z)
z_axis = np.array(eye) - np.array(at)
z_axis = z_axis / np.linalg.norm(z_axis)
# right axis (x)
x_axis = np.cross(up, z_axis)
x_axis = x_axis / np.linalg.norm(x_axis)
# up axis
y_axis = np.cross(z_axis, x_axis)
y_axis = y_axis / np.linalg.norm(y_axis)
matrix = np.array([
[x_axis[0], x_axis[1], x_axis[2], 0.0],
[y_axis[0], y_axis[1], y_axis[2], 0.0],
[z_axis[0], z_axis[1], z_axis[2], 0.0],
[eye[0], eye[1], eye[2], 1.0]
])
return matrix
| 26,895 | Python | 29.844037 | 148 | 0.625469 |
NVIDIA-Omniverse/usd_scene_construction_utils/examples/bind_mdl_material/main.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
from pathlib import Path
sys.path.append(f"{Path.home()}/usd_scene_construction_utils") # use your install path
from usd_scene_construction_utils import (
add_mdl_material,
new_omniverse_stage,
add_plane,
add_box,
stack_prims,
bind_material,
add_dome_light
)
stage = new_omniverse_stage()
# Add cardboard material
cardboard = add_mdl_material(
stage,
"/scene/cardboard",
"http://omniverse-content-production.s3-us-west-2.amazonaws.com/Materials/Base/Wall_Board/Cardboard.mdl"
)
# Add concrete material
concrete = add_mdl_material(
stage,
"/scene/concrete",
"http://omniverse-content-production.s3-us-west-2.amazonaws.com/Materials/Base/Masonry/Concrete_Smooth.mdl"
)
# Add floor plane
floor = add_plane(stage, "/scene/floor", size=(500, 500))
# Add box
box = add_box(stage, "/scene/box", size=(100, 100, 100))
# Stack box on floor
stack_prims([floor, box], axis=2)
# Bind materials to objects
bind_material(floor, concrete)
bind_material(box, cardboard)
# Add dome light
light = add_dome_light(stage, "/scene/dome_light") | 1,784 | Python | 28.262295 | 112 | 0.732063 |
NVIDIA-Omniverse/usd_scene_construction_utils/examples/bind_mdl_material/README.md | # Example - Bind MDL Material
This example demonstrates binding materials to objects. It must be run inside
omniverse to pull from the rich set of available MDL materials.
<img src="landing_graphic.jpg" height="320"/>
The example should display a box with a cardboard texture and a floor with
a concrete texture.
## Instructions
1. Modify the path on line 3 of ``main.py`` to the path you cloned usd_scene_construction_utils
2. Launch [Omniverse Code](https://developer.nvidia.com/omniverse/code-app)
3. Open the script editor
4. Copy the code from ``main.py`` into the script editor
5. Run the script editor.
| 617 | Markdown | 31.526314 | 95 | 0.763371 |
NVIDIA-Omniverse/usd_scene_construction_utils/examples/hand_truck_w_boxes/main.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
from pathlib import Path
sys.path.append(f"{Path.home()}/usd_scene_construction_utils") # use your install path
from usd_scene_construction_utils import (
add_usd_ref,
rotate_x,
rotate_y,
rotate_z,
scale,
compute_bbox,
add_xform,
compute_bbox_center,
translate,
set_visibility,
new_omniverse_stage,
add_dome_light,
add_plane,
add_mdl_material,
bind_material
)
import random
from typing import Tuple
box_asset_url = "http://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/DigitalTwin/Assets/Warehouse/Shipping/Cardboard_Boxes/Flat_A/FlatBox_A02_15x21x8cm_PR_NVD_01.usd"
hand_truck_asset_url = "http://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/DigitalTwin/Assets/Warehouse/Equipment/Hand_Trucks/Convertible_Aluminum_A/ConvertableAlumHandTruck_A02_PR_NVD_01.usd"
def add_box_of_size(
stage,
path: str,
size: Tuple[float, float, float]
):
"""Adds a box and re-scales it to match the specified dimensions
"""
# Add USD box
prim = add_usd_ref(stage, path, usd_path=box_asset_url)
rotate_x(prim, random.choice([-90, 0, 90, 180]))
rotate_y(prim, random.choice([-90, 0, 90, 180]))
# Scale USD box to fit dimensions
usd_min, usd_max = compute_bbox(prim)
usd_size = (
usd_max[0] - usd_min[0],
usd_max[1] - usd_min[1],
usd_max[2] - usd_min[2]
)
required_scale = (
size[0] / usd_size[0],
size[1] / usd_size[1],
size[2] / usd_size[2]
)
scale(prim, required_scale)
return prim
def add_random_box_stack(
stage,
path: str,
count_range=(1, 5),
size_range=((30, 30, 10), (50, 50, 25)),
angle_range=(-5, 5),
jitter_range=(-3,3)
):
container = add_xform(stage, path)
count = random.randint(*count_range)
# get sizes and sort
sizes = [
(
random.uniform(size_range[0][0], size_range[1][0]),
random.uniform(size_range[0][1], size_range[1][1]),
random.uniform(size_range[0][2], size_range[1][2])
)
for i in range(count)
]
sizes = sorted(sizes, key=lambda x: x[0]**2 + x[1]**2, reverse=True)
boxes = []
for i in range(count):
box_i = add_box_of_size(stage, os.path.join(path, f"box_{i}"), sizes[i])
boxes.append(box_i)
if count > 0:
center = compute_bbox_center(boxes[0])
for i in range(1, count):
prev_box, cur_box = boxes[i - 1], boxes[i]
cur_bbox = compute_bbox(cur_box)
cur_center = compute_bbox_center(cur_box)
prev_bbox = compute_bbox(prev_box)
offset = (
center[0] - cur_center[0],
center[1] - cur_center[1],
prev_bbox[1][2] - cur_bbox[0][2]
)
translate(cur_box, offset)
# add some noise
for i in range(count):
rotate_z(boxes[i], random.uniform(*angle_range))
translate(boxes[i], (
random.uniform(*jitter_range),
random.uniform(*jitter_range),
0
))
return container, boxes
def add_random_box_stacks(
stage,
path: str,
count_range=(0, 3),
):
container = add_xform(stage, path)
stacks = []
count = random.randint(*count_range)
for i in range(count):
stack, items = add_random_box_stack(stage, os.path.join(path, f"stack_{i}"))
stacks.append(stack)
for i in range(count):
cur_stack = stacks[i]
cur_bbox = compute_bbox(cur_stack)
cur_center = compute_bbox_center(cur_stack)
translate(cur_stack, (0, -cur_center[1], -cur_bbox[0][2]))
if i > 0:
prev_bbox = compute_bbox(stacks[i - 1])
translate(cur_stack, (prev_bbox[1][0] - cur_bbox[0][0], 0, 0))
return container, stacks
def add_hand_truck_with_boxes(stage, path: str):
container = add_xform(stage, path)
hand_truck_path = f"{path}/truck"
box_stacks_path = f"{path}/box_stacks"
add_usd_ref(
stage,
hand_truck_path,
hand_truck_asset_url
)
box_stacks_container, box_stacks = add_random_box_stacks(stage, box_stacks_path, count_range=(1,4))
rotate_z(box_stacks_container, 90)
translate(
box_stacks_container,
offset=(0, random.uniform(8, 12), 28)
)
# remove out of bounds stacks
last_visible = box_stacks[0]
for i in range(len(box_stacks)):
_, stack_bbox_max = compute_bbox(box_stacks[i])
print(stack_bbox_max)
if stack_bbox_max[1] > 74:
set_visibility(box_stacks[i], "invisible")
else:
last_visible = box_stacks[i]
# wiggle inide bounds
boxes_bbox = compute_bbox(last_visible)
wiggle = (82 - boxes_bbox[1][1])
translate(box_stacks_container, (0, random.uniform(0, wiggle), 1))
return container
stage = new_omniverse_stage()
light = add_dome_light(stage, "/scene/dome_light")
floor = add_plane(stage, "/scene/floor", size=(1000, 1000))
concrete = add_mdl_material(
stage,
"/scene/materials/concrete",
"http://omniverse-content-production.s3-us-west-2.amazonaws.com/Materials/Base/Masonry/Concrete_Polished.mdl"
)
bind_material(floor, concrete)
all_objects_container = add_xform(stage, "/scene/objects")
for i in range(5):
for j in range(5):
path = f"/scene/objects/hand_truck_{i}_{j}"
current_object = add_hand_truck_with_boxes(stage, path)
rotate_z(current_object, random.uniform(-15, 15))
translate(current_object, (100*i, 150*j, 0))
objects_center = compute_bbox_center(all_objects_container)
translate(all_objects_container, (-objects_center[0], -objects_center[1], 0)) | 6,545 | Python | 30.171428 | 211 | 0.609778 |
NVIDIA-Omniverse/usd_scene_construction_utils/examples/hand_truck_w_boxes/README.md | # Example - Hand Truck with Boxes
This example demonstrates creating a scene with structured randomization.
It creates a grid of hand trucks with boxes scattered on top.
<img src="landing_graphic.jpg" height="320"/>
e
## Instructions
1. Modify the path on line 3 of ``main.py`` to the path you cloned usd_scene_construction_utils
2. Launch [Omniverse Code](https://developer.nvidia.com/omniverse/code-app)
3. Open the script editor
4. Copy the code from ``main.py`` into the script editor
5. Run the script editor.
## Notes
This example defines a few functions. Here are their descriptions.
| Function | Description |
|---|---|
| add_box_of_size | Adds a cardboard box of a given size, with randomly oriented labeling and tape. |
| add_random_box_stack | Adds a stack of cardboard boxes, sorted by cross-section size. Also adds some translation and angle jitter |
| add_random_box_stacks | Adds multiple random box stacks, aligned and stack on x-axis |
| add_hand_truck_with_boxes | Adds a hand truck, places the box stack at an offset so it appears as placed on the truck. Makes any out-of-bounds boxes invisible. Wiggles the visible boxes in the area remaining on the hand truck. |
When developing this example, we started with just a simple function, and added complexity
iteratively by trying rendering, viewing, tweaking, repeat.
| 1,346 | Markdown | 42.451612 | 231 | 0.759287 |
NVIDIA-Omniverse/usd_scene_construction_utils/examples/pallet_with_boxes/main.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import random
from pathlib import Path
sys.path.append(f"{Path.home()}/usd_scene_construction_utils") # use your install path
sys.path.append(f"{Path.home()}/usd_scene_construction_utils/examples/pallet_with_boxes") # use your install path
from usd_scene_construction_utils import *
PALLET_URIS = [
"http://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/DigitalTwin/Assets/Warehouse/Shipping/Pallets/Wood/Block_A/BlockPallet_A01_PR_NVD_01.usd",
"http://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/DigitalTwin/Assets/Warehouse/Shipping/Pallets/Wood/Block_B/BlockPallet_B01_PR_NVD_01.usd",
"http://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/DigitalTwin/Assets/Warehouse/Shipping/Pallets/Wood/Wing_A/WingPallet_A01_PR_NVD_01.usd"
]
CARDBOARD_BOX_URIS = [
"http://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/DigitalTwin/Assets/Warehouse/Shipping/Cardboard_Boxes/Cube_A/CubeBox_A02_16cm_PR_NVD_01.usd",
"http://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/DigitalTwin/Assets/Warehouse/Shipping/Cardboard_Boxes/Flat_A/FlatBox_A05_26x26x11cm_PR_NVD_01.usd",
"http://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/DigitalTwin/Assets/Warehouse/Shipping/Cardboard_Boxes/Printer_A/PrintersBox_A05_23x28x25cm_PR_NVD_01.usd"
]
def add_pallet(stage, path: str):
prim = add_usd_ref(stage, path, random.choice(PALLET_URIS))
add_semantics(prim, "class", "pallet")
return prim
def add_cardboard_box(stage, path: str):
prim = add_usd_ref(stage, path, random.choice(CARDBOARD_BOX_URIS))
add_semantics(prim, "class", "box")
return prim
def add_pallet_with_box(stage, path: str):
container = add_xform(stage, path)
pallet = add_pallet(stage, os.path.join(path, "pallet"))
box = add_cardboard_box(stage, os.path.join(path, "box"))
pallet_bbox = compute_bbox(pallet)
box_bbox = compute_bbox(box)
translate(box,(0, 0, pallet_bbox[1][2] - box_bbox[0][2]))
rotate_z(pallet, random.uniform(-25, 25))
return container
def add_tree(stage, path: str):
url = "http://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Vegetation/Trees/American_Beech.usd"
return add_usd_ref(stage, path, url)
stage = new_omniverse_stage()
brick = add_mdl_material(stage, "/scene/brick", "http://omniverse-content-production.s3-us-west-2.amazonaws.com/Materials/Base/Masonry/Brick_Pavers.mdl")
pallet_box = add_pallet_with_box(stage, "/scene/pallet")
floor = add_plane(stage, "/scene/floor", size=(1000, 1000), uv=(20., 20.))
tree = add_tree(stage, "/scene/tree")
translate(tree, (100, -150, 0))
bind_material(floor, brick)
light = add_dome_light(stage, "/scene/dome_light") | 3,446 | Python | 46.219177 | 180 | 0.74231 |
NVIDIA-Omniverse/usd_scene_construction_utils/examples/add_camera/main.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from pathlib import Path
sys.path.append(f"{Path.home()}/usd_scene_construction_utils") # use your install path
from usd_scene_construction_utils import (
new_in_memory_stage,
add_box,
add_camera,
compute_look_at_matrix,
apply_xform_matrix,
export_stage
)
stage = new_in_memory_stage()
box = add_box(stage, "/scene/box", size=(100, 100, 100))
camera = add_camera(stage, "/scene/camera")
matrix = compute_look_at_matrix(
at=(0, 0, 0),
up=(0, 0, 1),
eye=(500, 500, 500)
)
apply_xform_matrix(camera, matrix)
export_stage(stage, "add_camera.usda", default_prim="/scene")
| 1,302 | Python | 27.955555 | 98 | 0.72043 |
NVIDIA-Omniverse/usd_scene_construction_utils/examples/add_camera/README.md | # Example - Add Camera
> !UNDER CONSTRUCTION | 45 | Markdown | 14.333329 | 22 | 0.733333 |
NVIDIA-Omniverse/usd_scene_construction_utils/examples/render_with_replicator/main.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import random
from pathlib import Path
sys.path.append(f"{Path.home()}/usd_scene_construction_utils") # use your install path
from usd_scene_construction_utils import *
# set to your output dir
OUTPUT_DIR = f"{Path.home()}/usd_scene_construction_utils/examples/render_with_replicator/output"
def add_box_stack(stage, path: str, box_material):
container = add_xform(stage, path)
boxes = []
for i in range(3):
box_path = f"{path}/box_{i}"
box = add_box(stage, box_path, (random.uniform(20, 30), random.uniform(20, 30), 10))
add_semantics(box, "class", "box_stack")
bind_material(box, box_material)
rotate_z(box, random.uniform(-10, 10))
boxes.append(box)
stack_prims(boxes, axis=2)
return container
def build_scene(stage):
# Add cardboard material
cardboard = add_mdl_material(
stage,
"/scene/cardboard",
"http://omniverse-content-production.s3-us-west-2.amazonaws.com/Materials/Base/Wall_Board/Cardboard.mdl"
)
# Add concrete material
concrete = add_mdl_material(
stage,
"/scene/concrete",
"http://omniverse-content-production.s3-us-west-2.amazonaws.com/Materials/Base/Masonry/Concrete_Smooth.mdl"
)
# Add floor plane
floor = add_plane(stage, "/scene/floor", size=(500, 500))
bind_material(floor, concrete)
# Add box
box_stack = add_box_stack(stage, "/scene/box_stack", box_material=cardboard)
# Stack box on floor
stack_prims([floor, box_stack], axis=2)
# Add dome light
add_dome_light(stage, "/scene/dome_light")
import omni.replicator.core as rep
with rep.new_layer():
stage = new_omniverse_stage()
build_scene(stage)
camera = rep.create.camera()
render_product = rep.create.render_product(camera, (1024, 1024))
box_stack = rep.get.prims(path_pattern="^/scene/box_stack$")
# Setup randomization
with rep.trigger.on_frame(num_frames=100):
with box_stack:
rep.modify.pose(position=rep.distribution.uniform((-100, -100, 0), (100, 100, 0)))
with camera:
rep.modify.pose(position=rep.distribution.uniform((0, 0, 0), (400, 400, 400)), look_at=(0, 0, 0))
writer = rep.WriterRegistry.get("BasicWriter")
writer.initialize(
output_dir=OUTPUT_DIR,
rgb=True,
bounding_box_2d_tight=True,
distance_to_camera=True,
bounding_box_3d=True,
camera_params=True,
instance_id_segmentation=True,
colorize_instance_id_segmentation=False
)
writer.attach([render_product]) | 3,298 | Python | 31.029126 | 115 | 0.671922 |
NVIDIA-Omniverse/usd_scene_construction_utils/examples/render_with_replicator/README.md | # Example - Render with Omniverse Replicator
This example demonstrates how to render a scene constructed with usd_scene_construction_utils
using Omniverse replicator. | 167 | Markdown | 40.99999 | 93 | 0.838323 |
NVIDIA-Omniverse/usd_scene_construction_utils/examples/hello_box/main.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from pathlib import Path
sys.path.append(f"{Path.home()}/usd_scene_construction_utils") # use your install path
import random
from usd_scene_construction_utils import *
stage = new_omniverse_stage()
# Create floor
floor = add_plane(stage, "/scene/floor", (1000, 1000))
# Add a dome light
light = add_dome_light(stage, "/scene/dome_light")
# Create a grid of boxes
all_boxes = add_xform(stage, "/scene/boxes")
for i in range(5):
for j in range(5):
path = f"/scene/boxes/box_{i}_{j}"
# Add box of random size
size = (
random.uniform(20, 50),
random.uniform(20, 50),
random.uniform(20, 50),
)
box = add_box(stage, path, size=size)
# Set position in xy grid
translate(box, (100*i, 100*j, 0))
# Align z with floor
box_min, _ = compute_bbox(box)
translate(box, (0, 0, -box_min[2]))
# Translate all boxes to have xy center at (0, 0)
boxes_center = compute_bbox_center(all_boxes)
translate("/scene/boxes", (-boxes_center[0], -boxes_center[1], 0))
| 1,760 | Python | 31.018181 | 98 | 0.674432 |
NVIDIA-Omniverse/usd_scene_construction_utils/examples/hello_box/README.md | # Example - Hello Box
This example demonstrates creating a simply box shape and adding a dome light
to a scene.
<img src="landing_graphic.jpg" height="320"/>
It doesn't include any assets, so should load very quickly. This is simply
so you can get quick results and make sure usd_scene_construction_utils is working. Once you're
set up and working, you'll want to use omniverse with a nucleus server so you
can pull from a rich set of assets, like in the [hand truck example](../hand_truck_w_boxes/).
## Instructions
1. Modify the path on line 3 of ``main.py`` to the path you cloned usd_scene_construction_utils
2. Launch [Omniverse Code](https://developer.nvidia.com/omniverse/code-app)
3. Open the script editor
4. Copy the code from ``main.py`` into the script editor
5. Run the script editor.
| 809 | Markdown | 37.571427 | 96 | 0.750309 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.