Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
null | AICP-main/veins/src/veins/modules/phy/Decider80211p.cc | //
// Copyright (C) 2011 David Eckhoff <[email protected]>
// Copyright (C) 2012 Bastian Bloessl, Stefan Joerer, Michele Segata <{bloessl,joerer,segata}@ccs-labs.org>
// Copyright (C) 2018 Fabian Bronner <[email protected]>
//
// Documentation for these modules is at http://veins.car2x.org/
//
// SPDX-License-Identifier: GPL-2.0-or-later
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
//
/*
* Based on Decider80211.cc from Karl Wessel
* and modifications by Christopher Saloman
*/
#include "veins/modules/phy/Decider80211p.h"
#include "veins/modules/phy/DeciderResult80211.h"
#include "veins/modules/messages/Mac80211Pkt_m.h"
#include "veins/base/toolbox/Signal.h"
#include "veins/modules/messages/AirFrame11p_m.h"
#include "veins/modules/phy/NistErrorRate.h"
#include "veins/modules/utility/ConstsPhy.h"
#include "veins/base/toolbox/SignalUtils.h"
using namespace veins;
simtime_t Decider80211p::processNewSignal(AirFrame* msg)
{
AirFrame11p* frame = check_and_cast<AirFrame11p*>(msg);
// get the receiving power of the Signal at start-time and center frequency
Signal& signal = frame->getSignal();
signalStates[frame] = EXPECT_END;
if (signal.smallerAtCenterFrequency(minPowerLevel)) {
// annotate the frame, so that we won't try decoding it at its end
frame->setUnderMinPowerLevel(true);
// check channel busy status. a superposition of low power frames might turn channel status to busy
if (cca(simTime(), nullptr) == false) {
setChannelIdleStatus(false);
}
return signal.getReceptionEnd();
}
else {
// This value might be just an intermediate result (due to short circuiting)
double recvPower = signal.getAtCenterFrequency();
setChannelIdleStatus(false);
if (phy11p->getRadioState() == Radio::TX) {
frame->setBitError(true);
frame->setWasTransmitting(true);
EV_TRACE << "AirFrame: " << frame->getId() << " (" << recvPower << ") received, while already sending. Setting BitErrors to true" << std::endl;
}
else {
if (!currentSignal.first) {
// NIC is not yet synced to any frame, so lock and try to decode this frame
currentSignal.first = frame;
EV_TRACE << "AirFrame: " << frame->getId() << " with (" << recvPower << " > " << minPowerLevel << ") -> Trying to receive AirFrame." << std::endl;
if (notifyRxStart) {
phy->sendControlMsgToMac(new cMessage("RxStartStatus", MacToPhyInterface::PHY_RX_START));
}
}
else {
// NIC is currently trying to decode another frame. this frame will be simply treated as interference
EV_TRACE << "AirFrame: " << frame->getId() << " with (" << recvPower << " > " << minPowerLevel << ") -> Already synced to another AirFrame. Treating AirFrame as interference." << std::endl;
}
// channel turned busy
// measure communication density
myBusyTime += signal.getDuration().dbl();
}
return signal.getReceptionEnd();
}
}
int Decider80211p::getSignalState(AirFrame* frame)
{
if (signalStates.find(frame) == signalStates.end()) {
return NEW;
}
else {
return signalStates[frame];
}
}
DeciderResult* Decider80211p::checkIfSignalOk(AirFrame* frame)
{
auto frame11p = check_and_cast<AirFrame11p*>(frame);
Signal& s = frame->getSignal();
simtime_t start = s.getReceptionStart();
simtime_t end = s.getReceptionEnd();
// compute receive power
double recvPower_dBm = 10 * log10(s.getAtCenterFrequency());
start = start + PHY_HDR_PREAMBLE_DURATION; // its ok if something in the training phase is broken
AirFrameVector airFrames;
getChannelInfo(start, end, airFrames);
double noise = phy->getNoiseFloorValue();
// Make sure to use the adjusted starting-point (which ignores the preamble)
double sinrMin = SignalUtils::getMinSINR(start, end, frame, airFrames, noise);
double snrMin;
if (collectCollisionStats) {
// snrMin = SignalUtils::getMinSNR(start, end, frame, noise);
snrMin = s.getDataMin() / noise;
}
else {
// just set to any value. if collectCollisionStats != true
// it will be ignored by packetOk
snrMin = 1e200;
}
double payloadBitrate = getOfdmDatarate(static_cast<MCS>(frame11p->getMcs()), BANDWIDTH_11P);
DeciderResult80211* result = nullptr;
switch (packetOk(sinrMin, snrMin, frame->getBitLength(), payloadBitrate)) {
case DECODED:
EV_TRACE << "Packet is fine! We can decode it" << std::endl;
result = new DeciderResult80211(true, payloadBitrate, sinrMin, recvPower_dBm, false);
break;
case NOT_DECODED:
if (!collectCollisionStats) {
EV_TRACE << "Packet has bit Errors. Lost " << std::endl;
}
else {
EV_TRACE << "Packet has bit Errors due to low power. Lost " << std::endl;
}
result = new DeciderResult80211(false, payloadBitrate, sinrMin, recvPower_dBm, false);
break;
case COLLISION:
EV_TRACE << "Packet has bit Errors due to collision. Lost " << std::endl;
collisions++;
result = new DeciderResult80211(false, payloadBitrate, sinrMin, recvPower_dBm, true);
break;
default:
ASSERT2(false, "Impossible packet result returned by packetOk(). Check the code.");
break;
}
return result;
}
enum Decider80211p::PACKET_OK_RESULT Decider80211p::packetOk(double sinrMin, double snrMin, int lengthMPDU, double bitrate)
{
double packetOkSinr;
double packetOkSnr;
// compute success rate depending on mcs and bw
packetOkSinr = NistErrorRate::getChunkSuccessRate(bitrate, BANDWIDTH_11P, sinrMin, PHY_HDR_SERVICE_LENGTH + lengthMPDU + PHY_TAIL_LENGTH);
// check if header is broken
double headerNoError = NistErrorRate::getChunkSuccessRate(PHY_HDR_BITRATE, BANDWIDTH_11P, sinrMin, PHY_HDR_PLCPSIGNAL_LENGTH);
double headerNoErrorSnr;
// compute PER also for SNR only
if (collectCollisionStats) {
packetOkSnr = NistErrorRate::getChunkSuccessRate(bitrate, BANDWIDTH_11P, snrMin, PHY_HDR_SERVICE_LENGTH + lengthMPDU + PHY_TAIL_LENGTH);
headerNoErrorSnr = NistErrorRate::getChunkSuccessRate(PHY_HDR_BITRATE, BANDWIDTH_11P, snrMin, PHY_HDR_PLCPSIGNAL_LENGTH);
// the probability of correct reception without considering the interference
// MUST be greater or equal than when consider it
ASSERT(packetOkSnr >= packetOkSinr);
ASSERT(headerNoErrorSnr >= headerNoError);
}
// probability of no bit error in the PLCP header
double rand = RNGCONTEXT dblrand();
if (!collectCollisionStats) {
if (rand > headerNoError) return NOT_DECODED;
}
else {
if (rand > headerNoError) {
// ups, we have a header error. is that due to interference?
if (rand > headerNoErrorSnr) {
// no. we would have not been able to receive that even
// without interference
return NOT_DECODED;
}
else {
// yes. we would have decoded that without interference
return COLLISION;
}
}
}
// probability of no bit error in the rest of the packet
rand = RNGCONTEXT dblrand();
if (!collectCollisionStats) {
if (rand > packetOkSinr) {
return NOT_DECODED;
}
else {
return DECODED;
}
}
else {
if (rand > packetOkSinr) {
// ups, we have an error in the payload. is that due to interference?
if (rand > packetOkSnr) {
// no. we would have not been able to receive that even
// without interference
return NOT_DECODED;
}
else {
// yes. we would have decoded that without interference
return COLLISION;
}
}
else {
return DECODED;
}
}
}
bool Decider80211p::cca(simtime_t_cref time, AirFrame* exclude)
{
AirFrameVector airFrames;
// collect all AirFrames that intersect with [start, end]
getChannelInfo(time, time, airFrames);
// In the reference implementation only centerFrequenvy - 5e6 (half bandwidth) is checked!
// Although this is wrong, the same is done here to reproduce original results
double minPower = phy->getNoiseFloorValue();
bool isChannelIdle = minPower < ccaThreshold;
if (airFrames.size() > 0) {
size_t usedFreqIndex = airFrames.front()->getSignal().getSpectrum().indexOf(centerFrequency - 5e6);
isChannelIdle = SignalUtils::isChannelPowerBelowThreshold(time, airFrames, usedFreqIndex, ccaThreshold - minPower, exclude);
}
return isChannelIdle;
}
simtime_t Decider80211p::processSignalEnd(AirFrame* msg)
{
AirFrame11p* frame = check_and_cast<AirFrame11p*>(msg);
// here the Signal is finally processed
Signal& signal = frame->getSignal();
double recvPower_dBm = 10 * log10(signal.getMax());
bool whileSending = false;
// remove this frame from our current signals
signalStates.erase(frame);
DeciderResult* result;
if (frame->getUnderMinPowerLevel()) {
// this frame was not even detected by the radio card
result = new DeciderResult80211(false, 0, 0, recvPower_dBm);
}
else if (frame->getWasTransmitting() || phy11p->getRadioState() == Radio::TX) {
// this frame was received while sending
whileSending = true;
result = new DeciderResult80211(false, 0, 0, recvPower_dBm);
}
else {
// first check whether this is the frame NIC is currently synced on
if (frame == currentSignal.first) {
// check if the snr is above the Decider's specific threshold,
// i.e. the Decider has received it correctly
result = checkIfSignalOk(frame);
// after having tried to decode the frame, the NIC is no more synced to the frame
// and it is ready for syncing on a new one
currentSignal.first = 0;
}
else {
// if this is not the frame we are synced on, we cannot receive it
result = new DeciderResult80211(false, 0, 0, recvPower_dBm);
}
}
if (result->isSignalCorrect()) {
EV_TRACE << "packet was received correctly, it is now handed to upper layer...\n";
// go on with processing this AirFrame, send it to the Mac-Layer
if (notifyRxStart) {
phy->sendControlMsgToMac(new cMessage("RxStartStatus", MacToPhyInterface::PHY_RX_END_WITH_SUCCESS));
}
phy->sendUp(frame, result);
}
else {
if (frame->getUnderMinPowerLevel()) {
EV_TRACE << "packet was not detected by the card. power was under minPowerLevel threshold\n";
}
else if (whileSending) {
EV_TRACE << "packet was received while sending, sending it as control message to upper layer\n";
phy->sendControlMsgToMac(new cMessage("Error", RECWHILESEND));
}
else {
EV_TRACE << "packet was not received correctly, sending it as control message to upper layer\n";
if (notifyRxStart) {
phy->sendControlMsgToMac(new cMessage("RxStartStatus", MacToPhyInterface::PHY_RX_END_WITH_FAILURE));
}
if (((DeciderResult80211*) result)->isCollision()) {
phy->sendControlMsgToMac(new cMessage("Error", Decider80211p::COLLISION));
}
else {
phy->sendControlMsgToMac(new cMessage("Error", BITERROR));
}
}
delete result;
}
if (phy11p->getRadioState() == Radio::TX) {
EV_TRACE << "I'm currently sending\n";
}
// check if channel is idle now
// we declare channel busy if CCA tells us so, or if we are currently
// decoding a frame
else if (cca(simTime(), frame) == false || currentSignal.first != 0) {
EV_TRACE << "Channel not yet idle!\n";
}
else {
// might have been idle before (when the packet rxpower was below sens)
if (isChannelIdle != true) {
EV_TRACE << "Channel idle now!\n";
setChannelIdleStatus(true);
}
}
return notAgain;
}
void Decider80211p::setChannelIdleStatus(bool isIdle)
{
isChannelIdle = isIdle;
if (isIdle)
phy->sendControlMsgToMac(new cMessage("ChannelStatus", Mac80211pToPhy11pInterface::CHANNEL_IDLE));
else
phy->sendControlMsgToMac(new cMessage("ChannelStatus", Mac80211pToPhy11pInterface::CHANNEL_BUSY));
}
void Decider80211p::changeFrequency(double freq)
{
centerFrequency = freq;
}
double Decider80211p::getCCAThreshold()
{
return 10 * log10(ccaThreshold);
}
void Decider80211p::setCCAThreshold(double ccaThreshold_dBm)
{
ccaThreshold = pow(10, ccaThreshold_dBm / 10);
}
void Decider80211p::setNotifyRxStart(bool enable)
{
notifyRxStart = enable;
}
void Decider80211p::switchToTx()
{
if (currentSignal.first != 0) {
// we are currently trying to receive a frame.
if (allowTxDuringRx) {
// if the above layer decides to transmit anyhow, we need to abort reception
AirFrame11p* currentFrame = dynamic_cast<AirFrame11p*>(currentSignal.first);
ASSERT(currentFrame);
// flag the frame as "while transmitting"
currentFrame->setWasTransmitting(true);
currentFrame->setBitError(true);
// forget about the signal
currentSignal.first = 0;
}
else {
throw cRuntimeError("Decider80211p: mac layer requested phy to transmit a frame while currently receiving another");
}
}
}
void Decider80211p::finish()
{
simtime_t totalTime = simTime() - myStartTime;
phy->recordScalar("busyTime", myBusyTime / totalTime.dbl());
if (collectCollisionStats) {
phy->recordScalar("ncollisions", collisions);
}
}
Decider80211p::~Decider80211p(){};
| 14,961 | 34.122066 | 205 | cc |
null | AICP-main/veins/src/veins/modules/phy/Decider80211p.h | //
// Copyright (C) 2011 David Eckhoff <[email protected]>
// Copyright (C) 2012 Bastian Bloessl, Stefan Joerer, Michele Segata <{bloessl,joerer,segata}@ccs-labs.org>
// Copyright (C) 2018 Fabian Bronner <[email protected]>
//
// Documentation for these modules is at http://veins.car2x.org/
//
// SPDX-License-Identifier: GPL-2.0-or-later
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
//
#pragma once
#include "veins/base/phyLayer/BaseDecider.h"
#include "veins/modules/utility/Consts80211p.h"
#include "veins/modules/mac/ieee80211p/Mac80211pToPhy11pInterface.h"
#include "veins/modules/phy/Decider80211pToPhy80211pInterface.h"
namespace veins {
using veins::AirFrame;
/**
* @brief
* Based on Decider80211.h from Karl Wessel
* and modifications by Christopher Saloman
*
* @author David Eckhoff
*
* @ingroup decider
*
* @see DemoBaseApplLayer
* @see Mac1609_4
* @see PhyLayer80211p
* @see Decider80211p
*/
class VEINS_API Decider80211p : public BaseDecider {
public:
enum Decider80211ControlKinds {
NOTHING = 22100,
BITERROR, // the phy has recognized a bit error in the packet
LAST_DECIDER_80211_CONTROL_KIND,
RECWHILESEND
};
/**
* @brief tell the outcome of a packetOk() call, which might be
* correctly decoded, discarded due to low SNR or discarder due
* to low SINR (i.e. collision)
*/
enum PACKET_OK_RESULT {
DECODED,
NOT_DECODED,
COLLISION
};
protected:
// threshold value for checking a SNR-map (SNR-threshold)
double snrThreshold;
/** @brief Power level threshold used to declare channel busy if
* preamble portion is missed (802.11-2012 18.3.10.6
* CCA requirements). Notice that in 18.3.10.6, the mandatory CCA threshold
* for a 10 MHz channel is -65 dBm. However, there is another threshold
* called CCA-ED which requirements are defined in D.2.5. For a 10 MHz
* channel, CCA-ED threshold shall be -75 dBm. CCA-ED is required for
* certain operating classes that shall implement CCA-ED behavior.
* According to Table E-4 however, 802.11p channels should not implement
* it, so the correct threshold is -65 dBm.
* When considering ETSI ITS G5 (ETSI TS 102 687) things change again.
* Indeed, the DCC Sensitivity Control (DSC) part of the DCC algorithm
* changes the CCA threshold depending on the state. Values are listed
* in Table A.3: minimum value is -95 dBm, maximum value is -65 dBm,
* and default value is -85 dBm.
*/
double ccaThreshold;
/** @brief allows/disallows interruption of current reception for txing
*
* For a standard 802.11 MAC, starting a transmission while currently
* receiving a frame is forbidden, as the channel is in a BUSY state.
* For research purposes, however, one might use a custom MAC layer on
* top of an 802.11p OFDM PHY, and decide to interrupt an ongoing
* reception to start a transmission, whatever the reason. If the
* following variable is set to false, simulation will be terminated
* if this occurs. If not, simulation will continue, aborting current
* reception.
*/
bool allowTxDuringRx;
/** @brief The center frequency on which the decider listens for signals */
double centerFrequency;
double myBusyTime;
double myStartTime;
std::string myPath;
Decider80211pToPhy80211pInterface* phy11p;
std::map<AirFrame*, int> signalStates;
/** @brief enable/disable statistics collection for collisions
*
* For collecting statistics about collisions, we compute the Packet
* Error Rate for both SNR and SINR values. This might increase the
* simulation time, so if statistics about collisions are not needed,
* this variable should be set to false
*/
bool collectCollisionStats;
/** @brief count the number of collisions */
unsigned int collisions;
/** @brief notify PHY-RXSTART.indication */
bool notifyRxStart;
protected:
/**
* @brief Checks a mapping against a specific threshold (element-wise).
*
* @return true , if every entry of the mapping is above threshold
* false , otherwise
*
*
*/
virtual DeciderResult* checkIfSignalOk(AirFrame* frame);
simtime_t processNewSignal(AirFrame* frame) override;
/**
* @brief Processes a received AirFrame.
*
* The SNR-mapping for the Signal is created and checked against the Deciders
* SNR-threshold. Depending on that the received AirFrame is either sent up
* to the MAC-Layer or dropped.
*
* @return usually return a value for: 'do not pass it again'
*/
simtime_t processSignalEnd(AirFrame* frame) override;
/** @brief computes if packet is ok or has errors*/
enum PACKET_OK_RESULT packetOk(double snirMin, double snrMin, int lengthMPDU, double bitrate);
public:
/**
* @brief Initializes the Decider with a pointer to its PhyLayer and
* specific values for threshold and minPowerLevel
*/
Decider80211p(cComponent* owner, DeciderToPhyInterface* phy, double minPowerLevel, double ccaThreshold, bool allowTxDuringRx, double centerFrequency, int myIndex = -1, bool collectCollisionStatistics = false)
: BaseDecider(owner, phy, minPowerLevel, myIndex)
, ccaThreshold(ccaThreshold)
, allowTxDuringRx(allowTxDuringRx)
, centerFrequency(centerFrequency)
, myBusyTime(0)
, myStartTime(simTime().dbl())
, collectCollisionStats(collectCollisionStatistics)
, collisions(0)
, notifyRxStart(false)
{
phy11p = dynamic_cast<Decider80211pToPhy80211pInterface*>(phy);
ASSERT(phy11p);
}
void setPath(std::string myPath)
{
this->myPath = myPath;
}
bool cca(simtime_t_cref, AirFrame*);
int getSignalState(AirFrame* frame) override;
~Decider80211p() override;
void changeFrequency(double freq);
/**
* @brief returns the CCA threshold in dBm
*/
double getCCAThreshold();
/**
* @brief sets the CCA threshold
*/
void setCCAThreshold(double ccaThreshold_dBm);
void setChannelIdleStatus(bool isIdle) override;
/**
* @brief invoke this method when the phy layer is also finalized,
* so that statistics recorded by the decider can be written to
* the output file
*/
void finish() override;
/**
* @brief Notifies the decider that phy layer is starting a transmission.
*
* This helps the decider interrupting a current reception. In a standard
* 802.11 MAC, this should never happen, but in other MAC layers you might
* decide to interrupt an ongoing reception and start transmitting. Thank
* to this method, the decider can flag the ongoing frame as non received
* because of the transmission.
*/
void switchToTx() override;
/**
* @brief notify PHY-RXSTART.indication
*/
void setNotifyRxStart(bool enable);
};
} // namespace veins
| 7,753 | 33.7713 | 212 | h |
null | AICP-main/veins/src/veins/modules/phy/Decider80211pToPhy80211pInterface.h | //
// Copyright (C) 2011 David Eckhoff <[email protected]>
//
// Documentation for these modules is at http://veins.car2x.org/
//
// SPDX-License-Identifier: GPL-2.0-or-later
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
//
#pragma once
namespace veins {
/**
* @brief
* Interface of PhyLayer80211p exposed to Decider80211p.
*
* @author David Eckhoff
*
* @ingroup phyLayer
*/
class VEINS_API Decider80211pToPhy80211pInterface {
public:
virtual ~Decider80211pToPhy80211pInterface(){};
virtual int getRadioState() = 0;
};
} // namespace veins
| 1,229 | 28.285714 | 76 | h |
null | AICP-main/veins/src/veins/modules/phy/DeciderResult80211.h | //
// Copyright (C) 2007 Technische Universitaet Berlin (TUB), Germany, Telecommunication Networks Group
// Copyright (C) 2007 Technische Universiteit Delft (TUD), Netherlands
// Copyright (C) 2007 Universitaet Paderborn (UPB), Germany
// Copyright (C) 2014 Michele Segata <[email protected]>
//
// Documentation for these modules is at http://veins.car2x.org/
//
// SPDX-License-Identifier: GPL-2.0-or-later
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
//
/*
* DeciderResult80211.h
*
* Created on: 04.02.2009
* Author: karl
*
* Modified by Michele Segata ([email protected])
*/
#pragma once
#include "veins/veins.h"
#include "veins/base/phyLayer/Decider.h"
namespace veins {
/**
* @brief Defines an extended DeciderResult for the 80211 protocol
* which stores the bit-rate of the transmission.
*
* @ingroup decider
* @ingroup ieee80211
*/
class VEINS_API DeciderResult80211 : public DeciderResult {
protected:
/** @brief Stores the bit-rate of the transmission of the packet */
double bitrate;
/** @brief Stores the signal to noise ratio of the transmission */
double snr;
/** @brief Stores the received power in dBm
* Please note that this is NOT the RSSI. The RSSI is an indicator
* of the quality of the signal which is not standardized, and
* different vendors can define different indicators. This value
* indicates the power that the frame had when received by the
* NIC card, WITHOUT noise floor and WITHOUT interference
*/
double recvPower_dBm;
/** @brief Stores whether the uncorrect decoding was due to low power or collision */
bool collision;
public:
/**
* @brief Initialises with the passed values.
*
* "bitrate" defines the bit-rate of the transmission of the packet.
*/
DeciderResult80211(bool isCorrect, double bitrate, double snr, double recvPower_dBm = 0, bool collision = false)
: DeciderResult(isCorrect)
, bitrate(bitrate)
, snr(snr)
, recvPower_dBm(recvPower_dBm)
, collision(collision)
{
}
/**
* @brief Returns the bit-rate of the transmission of the packet.
*/
double getBitrate() const
{
return bitrate;
}
/**
* @brief Returns the signal to noise ratio of the transmission.
*/
double getSnr() const
{
return snr;
}
/**
* @brief Returns whether drop was due to collision, if isCorrect is false
*/
bool isCollision() const
{
return collision;
}
/**
* @brief Returns the signal power in dBm.
*/
double getRecvPower_dBm() const
{
return recvPower_dBm;
}
};
} // namespace veins
| 3,384 | 27.445378 | 116 | h |
null | AICP-main/veins/src/veins/modules/phy/NistErrorRate.cc | /*
* Copyright (c) 2010 The Boeing Company
* Copyright (c) 2014 Michele Segata <[email protected]>
*
* SPDX-License-Identifier: GPL-2.0-only
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Gary Pei <[email protected]>
*/
#include "veins/veins.h"
#include "veins/modules/phy/NistErrorRate.h"
using veins::NistErrorRate;
NistErrorRate::NistErrorRate()
{
}
double NistErrorRate::getBpskBer(double snr)
{
double z = std::sqrt(snr);
double ber = 0.5 * erfc(z);
return ber;
}
double NistErrorRate::getQpskBer(double snr)
{
double z = std::sqrt(snr / 2.0);
double ber = 0.5 * erfc(z);
return ber;
}
double NistErrorRate::get16QamBer(double snr)
{
double z = std::sqrt(snr / (5.0 * 2.0));
double ber = 0.75 * 0.5 * erfc(z);
return ber;
}
double NistErrorRate::get64QamBer(double snr)
{
double z = std::sqrt(snr / (21.0 * 2.0));
double ber = 7.0 / 12.0 * 0.5 * erfc(z);
return ber;
}
double NistErrorRate::getFecBpskBer(double snr, uint32_t nbits, uint32_t bValue)
{
double ber = getBpskBer(snr);
if (ber == 0.0) {
return 1.0;
}
double pe = calculatePe(ber, bValue);
pe = std::min(pe, 1.0);
double pms = std::pow(1 - pe, (double) nbits);
return pms;
}
double NistErrorRate::getFecQpskBer(double snr, uint32_t nbits, uint32_t bValue)
{
double ber = getQpskBer(snr);
if (ber == 0.0) {
return 1.0;
}
double pe = calculatePe(ber, bValue);
pe = std::min(pe, 1.0);
double pms = std::pow(1 - pe, (double) nbits);
return pms;
}
double NistErrorRate::calculatePe(double p, uint32_t bValue)
{
double D = std::sqrt(4.0 * p * (1.0 - p));
double pe = 1.0;
if (bValue == 1) {
// code rate 1/2, use table 3.1.1
pe = 0.5 * (36.0 * std::pow(D, 10) + 211.0 * std::pow(D, 12) + 1404.0 * std::pow(D, 14) + 11633.0 * std::pow(D, 16) + 77433.0 * std::pow(D, 18) + 502690.0 * std::pow(D, 20) + 3322763.0 * std::pow(D, 22) + 21292910.0 * std::pow(D, 24) + 134365911.0 * std::pow(D, 26));
}
else if (bValue == 2) {
// code rate 2/3, use table 3.1.2
pe = 1.0 / (2.0 * bValue) * (3.0 * std::pow(D, 6) + 70.0 * std::pow(D, 7) + 285.0 * std::pow(D, 8) + 1276.0 * std::pow(D, 9) + 6160.0 * std::pow(D, 10) + 27128.0 * std::pow(D, 11) + 117019.0 * std::pow(D, 12) + 498860.0 * std::pow(D, 13) + 2103891.0 * std::pow(D, 14) + 8784123.0 * std::pow(D, 15));
}
else if (bValue == 3) {
// code rate 3/4, use table 3.1.2
pe = 1.0 / (2.0 * bValue) * (42.0 * std::pow(D, 5) + 201.0 * std::pow(D, 6) + 1492.0 * std::pow(D, 7) + 10469.0 * std::pow(D, 8) + 62935.0 * std::pow(D, 9) + 379644.0 * std::pow(D, 10) + 2253373.0 * std::pow(D, 11) + 13073811.0 * std::pow(D, 12) + 75152755.0 * std::pow(D, 13) + 428005675.0 * std::pow(D, 14));
}
else {
ASSERT(false);
}
return pe;
}
double NistErrorRate::getFec16QamBer(double snr, uint32_t nbits, uint32_t bValue)
{
double ber = get16QamBer(snr);
if (ber == 0.0) {
return 1.0;
}
double pe = calculatePe(ber, bValue);
pe = std::min(pe, 1.0);
double pms = std::pow(1 - pe, static_cast<double>(nbits));
return pms;
}
double NistErrorRate::getFec64QamBer(double snr, uint32_t nbits, uint32_t bValue)
{
double ber = get64QamBer(snr);
if (ber == 0.0) {
return 1.0;
}
double pe = calculatePe(ber, bValue);
pe = std::min(pe, 1.0);
double pms = std::pow(1 - pe, static_cast<double>(nbits));
return pms;
}
double NistErrorRate::getChunkSuccessRate(unsigned int datarate, enum Bandwidth bw, double snr_mW, uint32_t nbits)
{
// get mcs from datarate and bw
MCS mcs = getMCS(datarate, bw);
// compute success rate depending on mcs
switch (mcs) {
case MCS::ofdm_bpsk_r_1_2:
return getFecBpskBer(snr_mW, nbits, 1);
break;
case MCS::ofdm_bpsk_r_3_4:
return getFecBpskBer(snr_mW, nbits, 3);
break;
case MCS::ofdm_qpsk_r_1_2:
return getFecQpskBer(snr_mW, nbits, 1);
break;
case MCS::ofdm_qpsk_r_3_4:
return getFecQpskBer(snr_mW, nbits, 3);
break;
case MCS::ofdm_qam16_r_1_2:
return getFec16QamBer(snr_mW, nbits, 1);
break;
case MCS::ofdm_qam16_r_3_4:
return getFec16QamBer(snr_mW, nbits, 3);
break;
case MCS::ofdm_qam64_r_2_3:
return getFec64QamBer(snr_mW, nbits, 2);
break;
case MCS::ofdm_qam64_r_3_4:
return getFec64QamBer(snr_mW, nbits, 3);
break;
default:
ASSERT2(false, "Invalid MCS chosen");
break;
}
return 0;
}
| 5,228 | 31.277778 | 318 | cc |
null | AICP-main/veins/src/veins/modules/phy/NistErrorRate.h | /*
* Copyright (c) 2010 The Boeing Company
* Copyright (c) 2014 Michele Segata <[email protected]>
*
* SPDX-License-Identifier: GPL-2.0-only
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Gary Pei <[email protected]>
*/
#pragma once
#include <stdint.h>
#include <cmath>
#include "veins/modules/utility/ConstsPhy.h"
namespace veins {
/**
* Model the error rate for different modulations and coding schemes.
* Taken from the nist wifi model of ns-3
*/
class VEINS_API NistErrorRate {
public:
NistErrorRate();
static double getChunkSuccessRate(unsigned int datarate, enum Bandwidth bw, double snr_mW, uint32_t nbits);
private:
/**
* Return the coded BER for the given p and b.
*
* \param p
* \param bValue
* \return BER
*/
static double calculatePe(double p, uint32_t bValue);
/**
* Return BER of BPSK at the given SNR.
*
* \param snr snr value
* \return BER of BPSK at the given SNR
*/
static double getBpskBer(double snr);
/**
* Return BER of QPSK at the given SNR.
*
* \param snr snr value
* \return BER of QPSK at the given SNR
*/
static double getQpskBer(double snr);
/**
* Return BER of QAM16 at the given SNR.
*
* \param snr snr value
* \return BER of QAM16 at the given SNR
*/
static double get16QamBer(double snr);
/**
* Return BER of QAM64 at the given SNR.
*
* \param snr snr value
* \return BER of QAM64 at the given SNR
*/
static double get64QamBer(double snr);
/**
* Return BER of BPSK at the given SNR after applying FEC.
*
* \param snr snr value
* \param nbits the number of bits in the chunk
* \param bValue
* \return BER of BPSK at the given SNR after applying FEC
*/
static double getFecBpskBer(double snr, uint32_t nbits, uint32_t bValue);
/**
* Return BER of QPSK at the given SNR after applying FEC.
*
* \param snr snr value
* \param nbits the number of bits in the chunk
* \param bValue
* \return BER of QPSK at the given SNR after applying FEC
*/
static double getFecQpskBer(double snr, uint32_t nbits, uint32_t bValue);
/**
* Return BER of QAM16 at the given SNR after applying FEC.
*
* \param snr snr value
* \param nbits the number of bits in the chunk
* \param bValue
* \return BER of QAM16 at the given SNR after applying FEC
*/
static double getFec16QamBer(double snr, uint32_t nbits, uint32_t bValue);
/**
* Return BER of QAM64 at the given SNR after applying FEC.
*
* \param snr snr value
* \param nbits the number of bits in the chunk
* \param bValue
* \return BER of QAM64 at the given SNR after applying FEC
*/
static double getFec64QamBer(double snr, uint32_t nbits, uint32_t bValue);
};
} // namespace veins
| 3,531 | 29.448276 | 111 | h |
null | AICP-main/veins/src/veins/modules/phy/PhyLayer80211p.cc | //
// Copyright (C) 2011 David Eckhoff <[email protected]>
//
// Documentation for these modules is at http://veins.car2x.org/
//
// SPDX-License-Identifier: GPL-2.0-or-later
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
//
/*
* Based on PhyLayer.cc from Karl Wessel
* and modifications by Christopher Saloman
*/
#include "veins/modules/phy/PhyLayer80211p.h"
#include "veins/modules/phy/Decider80211p.h"
#include "veins/modules/analogueModel/SimplePathlossModel.h"
#include "veins/modules/analogueModel/BreakpointPathlossModel.h"
#include "veins/modules/analogueModel/PERModel.h"
#include "veins/modules/analogueModel/SimpleObstacleShadowing.h"
#include "veins/modules/analogueModel/VehicleObstacleShadowing.h"
#include "veins/modules/analogueModel/TwoRayInterferenceModel.h"
#include "veins/modules/analogueModel/NakagamiFading.h"
#include "veins/base/connectionManager/BaseConnectionManager.h"
#include "veins/modules/utility/Consts80211p.h"
#include "veins/modules/messages/AirFrame11p_m.h"
#include "veins/modules/utility/MacToPhyControlInfo11p.h"
using namespace veins;
using std::unique_ptr;
Define_Module(veins::PhyLayer80211p);
void PhyLayer80211p::initialize(int stage)
{
if (stage == 0) {
// get ccaThreshold before calling BasePhyLayer::initialize() which instantiates the deciders
ccaThreshold = pow(10, par("ccaThreshold").doubleValue() / 10);
allowTxDuringRx = par("allowTxDuringRx").boolValue();
collectCollisionStatistics = par("collectCollisionStatistics").boolValue();
// Create frequency mappings and initialize spectrum for signal representation
Spectrum::Frequencies freqs;
for (auto& channel : IEEE80211ChannelFrequencies) {
freqs.push_back(channel.second - 5e6);
freqs.push_back(channel.second);
freqs.push_back(channel.second + 5e6);
}
overallSpectrum = Spectrum(freqs);
}
BasePhyLayer::initialize(stage);
}
unique_ptr<AnalogueModel> PhyLayer80211p::getAnalogueModelFromName(std::string name, ParameterMap& params)
{
if (name == "SimplePathlossModel") {
return initializeSimplePathlossModel(params);
}
else if (name == "BreakpointPathlossModel") {
return initializeBreakpointPathlossModel(params);
}
else if (name == "PERModel") {
return initializePERModel(params);
}
else if (name == "SimpleObstacleShadowing") {
return initializeSimpleObstacleShadowing(params);
}
else if (name == "VehicleObstacleShadowing") {
return initializeVehicleObstacleShadowing(params);
}
else if (name == "TwoRayInterferenceModel") {
if (world->use2D()) throw cRuntimeError("The TwoRayInterferenceModel uses nodes' z-position as the antenna height over ground. Refusing to work in a 2D world");
return initializeTwoRayInterferenceModel(params);
}
else if (name == "NakagamiFading") {
return initializeNakagamiFading(params);
}
return BasePhyLayer::getAnalogueModelFromName(name, params);
}
unique_ptr<AnalogueModel> PhyLayer80211p::initializeBreakpointPathlossModel(ParameterMap& params)
{
double alpha1 = -1, alpha2 = -1, breakpointDistance = -1;
double L01 = -1, L02 = -1;
bool useTorus = world->useTorus();
const Coord& playgroundSize = *(world->getPgs());
ParameterMap::iterator it;
it = params.find("alpha1");
if (it != params.end()) { // parameter alpha1 has been specified in config.xml
// set alpha1
alpha1 = it->second.doubleValue();
EV_TRACE << "createPathLossModel(): alpha1 set from config.xml to " << alpha1 << endl;
// check whether alpha is not smaller than specified in ConnectionManager
if (cc->hasPar("alpha") && alpha1 < cc->par("alpha").doubleValue()) {
// throw error
throw cRuntimeError("TestPhyLayer::createPathLossModel(): alpha can't be smaller than specified in \
ConnectionManager. Please adjust your config.xml file accordingly");
}
}
it = params.find("L01");
if (it != params.end()) {
L01 = it->second.doubleValue();
}
it = params.find("L02");
if (it != params.end()) {
L02 = it->second.doubleValue();
}
it = params.find("alpha2");
if (it != params.end()) { // parameter alpha1 has been specified in config.xml
// set alpha2
alpha2 = it->second.doubleValue();
EV_TRACE << "createPathLossModel(): alpha2 set from config.xml to " << alpha2 << endl;
// check whether alpha is not smaller than specified in ConnectionManager
if (cc->hasPar("alpha") && alpha2 < cc->par("alpha").doubleValue()) {
// throw error
throw cRuntimeError("TestPhyLayer::createPathLossModel(): alpha can't be smaller than specified in \
ConnectionManager. Please adjust your config.xml file accordingly");
}
}
it = params.find("breakpointDistance");
if (it != params.end()) { // parameter alpha1 has been specified in config.xml
breakpointDistance = it->second.doubleValue();
EV_TRACE << "createPathLossModel(): breakpointDistance set from config.xml to " << alpha2 << endl;
// check whether alpha is not smaller than specified in ConnectionManager
}
if (alpha1 == -1 || alpha2 == -1 || breakpointDistance == -1 || L01 == -1 || L02 == -1) {
throw cRuntimeError("Undefined parameters for breakpointPathlossModel. Please check your configuration.");
}
return make_unique<BreakpointPathlossModel>(this, L01, L02, alpha1, alpha2, breakpointDistance, useTorus, playgroundSize);
}
unique_ptr<AnalogueModel> PhyLayer80211p::initializeTwoRayInterferenceModel(ParameterMap& params)
{
ASSERT(params.count("DielectricConstant") == 1);
double dielectricConstant = params["DielectricConstant"].doubleValue();
return make_unique<TwoRayInterferenceModel>(this, dielectricConstant);
}
unique_ptr<AnalogueModel> PhyLayer80211p::initializeNakagamiFading(ParameterMap& params)
{
bool constM = params["constM"].boolValue();
double m = 0;
if (constM) {
m = params["m"].doubleValue();
}
return make_unique<NakagamiFading>(this, constM, m);
}
unique_ptr<AnalogueModel> PhyLayer80211p::initializeSimplePathlossModel(ParameterMap& params)
{
// init with default value
double alpha = 2.0;
bool useTorus = world->useTorus();
const Coord& playgroundSize = *(world->getPgs());
// get alpha-coefficient from config
ParameterMap::iterator it = params.find("alpha");
if (it != params.end()) { // parameter alpha has been specified in config.xml
// set alpha
alpha = it->second.doubleValue();
EV_TRACE << "createPathLossModel(): alpha set from config.xml to " << alpha << endl;
// check whether alpha is not smaller than specified in ConnectionManager
if (cc->hasPar("alpha") && alpha < cc->par("alpha").doubleValue()) {
// throw error
throw cRuntimeError("TestPhyLayer::createPathLossModel(): alpha can't be smaller than specified in \
ConnectionManager. Please adjust your config.xml file accordingly");
}
}
else // alpha has not been specified in config.xml
{
if (cc->hasPar("alpha")) { // parameter alpha has been specified in ConnectionManager
// set alpha according to ConnectionManager
alpha = cc->par("alpha").doubleValue();
EV_TRACE << "createPathLossModel(): alpha set from ConnectionManager to " << alpha << endl;
}
else // alpha has not been specified in ConnectionManager
{
// keep alpha at default value
EV_TRACE << "createPathLossModel(): alpha set from default value to " << alpha << endl;
}
}
return make_unique<SimplePathlossModel>(this, alpha, useTorus, playgroundSize);
}
unique_ptr<AnalogueModel> PhyLayer80211p::initializePERModel(ParameterMap& params)
{
double per = params["packetErrorRate"].doubleValue();
return make_unique<PERModel>(this, per);
}
unique_ptr<Decider> PhyLayer80211p::getDeciderFromName(std::string name, ParameterMap& params)
{
if (name == "Decider80211p") {
protocolId = IEEE_80211;
return initializeDecider80211p(params);
}
return BasePhyLayer::getDeciderFromName(name, params);
}
unique_ptr<AnalogueModel> PhyLayer80211p::initializeSimpleObstacleShadowing(ParameterMap& params)
{
// init with default value
bool useTorus = world->useTorus();
const Coord& playgroundSize = *(world->getPgs());
ParameterMap::iterator it;
ObstacleControl* obstacleControlP = ObstacleControlAccess().getIfExists();
if (!obstacleControlP) throw cRuntimeError("initializeSimpleObstacleShadowing(): cannot find ObstacleControl module");
return make_unique<SimpleObstacleShadowing>(this, *obstacleControlP, useTorus, playgroundSize);
}
unique_ptr<AnalogueModel> PhyLayer80211p::initializeVehicleObstacleShadowing(ParameterMap& params)
{
// init with default value
bool useTorus = world->useTorus();
const Coord& playgroundSize = *(world->getPgs());
ParameterMap::iterator it;
VehicleObstacleControl* vehicleObstacleControlP = VehicleObstacleControlAccess().getIfExists();
if (!vehicleObstacleControlP) throw cRuntimeError("initializeVehicleObstacleShadowing(): cannot find VehicleObstacleControl module");
return make_unique<VehicleObstacleShadowing>(this, *vehicleObstacleControlP, useTorus, playgroundSize);
}
unique_ptr<Decider> PhyLayer80211p::initializeDecider80211p(ParameterMap& params)
{
double centerFreq = params["centerFrequency"];
auto dec = make_unique<Decider80211p>(this, this, minPowerLevel, ccaThreshold, allowTxDuringRx, centerFreq, findHost()->getIndex(), collectCollisionStatistics);
dec->setPath(getParentModule()->getFullPath());
return unique_ptr<Decider>(std::move(dec));
}
void PhyLayer80211p::changeListeningChannel(Channel channel)
{
Decider80211p* dec = dynamic_cast<Decider80211p*>(decider.get());
ASSERT(dec);
double freq = IEEE80211ChannelFrequencies.at(channel);
dec->changeFrequency(freq);
}
void PhyLayer80211p::handleSelfMessage(cMessage* msg)
{
switch (msg->getKind()) {
// transmission overBasePhyLayer::
case TX_OVER: {
ASSERT(msg == txOverTimer);
sendControlMsgToMac(new cMessage("Transmission over", TX_OVER));
// check if there is another packet on the chan, and change the chan-state to idle
Decider80211p* dec = dynamic_cast<Decider80211p*>(decider.get());
ASSERT(dec);
if (dec->cca(simTime(), nullptr)) {
// chan is idle
EV_TRACE << "Channel idle after transmit!\n";
dec->setChannelIdleStatus(true);
}
else {
EV_TRACE << "Channel not yet idle after transmit!\n";
}
break;
}
// radio switch over
case RADIO_SWITCHING_OVER:
ASSERT(msg == radioSwitchingOverTimer);
BasePhyLayer::finishRadioSwitching();
break;
// AirFrame
case AIR_FRAME:
BasePhyLayer::handleAirFrame(static_cast<AirFrame*>(msg));
break;
default:
break;
}
}
unique_ptr<AirFrame> PhyLayer80211p::createAirFrame(cPacket* macPkt)
{
return make_unique<AirFrame11p>(macPkt->getName(), AIR_FRAME);
}
void PhyLayer80211p::attachSignal(AirFrame* airFrame, cObject* ctrlInfo)
{
const auto ctrlInfo11p = check_and_cast<MacToPhyControlInfo11p*>(ctrlInfo);
const auto duration = getFrameDuration(airFrame->getEncapsulatedPacket()->getBitLength(), ctrlInfo11p->mcs);
ASSERT(duration > 0);
Signal signal(overallSpectrum, simTime(), duration);
auto freqIndex = overallSpectrum.indexOf(IEEE80211ChannelFrequencies.at(ctrlInfo11p->channelNr));
signal.at(freqIndex - 1) = ctrlInfo11p->txPower_mW;
signal.at(freqIndex) = ctrlInfo11p->txPower_mW;
signal.at(freqIndex + 1) = ctrlInfo11p->txPower_mW;
signal.setDataStart(freqIndex - 1);
signal.setDataEnd(freqIndex + 1);
signal.setCenterFrequencyIndex(freqIndex);
// copy the signal into the AirFrame
airFrame->setSignal(signal);
airFrame->setDuration(signal.getDuration());
airFrame->setMcs(static_cast<int>(ctrlInfo11p->mcs));
}
int PhyLayer80211p::getRadioState()
{
return BasePhyLayer::getRadioState();
};
simtime_t PhyLayer80211p::setRadioState(int rs)
{
if (rs == Radio::TX) decider->switchToTx();
return BasePhyLayer::setRadioState(rs);
}
void PhyLayer80211p::setCCAThreshold(double ccaThreshold_dBm)
{
ccaThreshold = pow(10, ccaThreshold_dBm / 10);
Decider80211p* dec = dynamic_cast<Decider80211p*>(decider.get());
ASSERT(dec);
dec->setCCAThreshold(ccaThreshold_dBm);
}
double PhyLayer80211p::getCCAThreshold()
{
return 10 * log10(ccaThreshold);
}
void PhyLayer80211p::notifyMacAboutRxStart(bool enable)
{
Decider80211p* dec = dynamic_cast<Decider80211p*>(decider.get());
ASSERT(dec);
dec->setNotifyRxStart(enable);
}
void PhyLayer80211p::requestChannelStatusIfIdle()
{
Enter_Method_Silent();
Decider80211p* dec = dynamic_cast<Decider80211p*>(decider.get());
ASSERT(dec);
if (dec->cca(simTime(), nullptr)) {
// chan is idle
EV_TRACE << "Request channel status: channel idle!\n";
dec->setChannelIdleStatus(true);
}
}
simtime_t PhyLayer80211p::getFrameDuration(int payloadLengthBits, MCS mcs) const
{
Enter_Method_Silent();
ASSERT(mcs != MCS::undefined);
auto ndbps = getNDBPS(mcs);
// calculate frame duration according to Equation (17-29) of the IEEE 802.11-2007 standard
return PHY_HDR_PREAMBLE_DURATION + PHY_HDR_PLCPSIGNAL_DURATION + T_SYM_80211P * ceil(static_cast<double>(16 + payloadLengthBits + 6) / (ndbps));
}
| 14,532 | 36.94517 | 168 | cc |
null | AICP-main/veins/src/veins/modules/phy/PhyLayer80211p.h | //
// Copyright (C) 2011 David Eckhoff <[email protected]>
//
// Documentation for these modules is at http://veins.car2x.org/
//
// SPDX-License-Identifier: GPL-2.0-or-later
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
//
#pragma once
#include "veins/base/phyLayer/BasePhyLayer.h"
#include "veins/base/toolbox/Spectrum.h"
#include "veins/modules/mac/ieee80211p/Mac80211pToPhy11pInterface.h"
#include "veins/modules/phy/Decider80211p.h"
#include "veins/modules/analogueModel/SimplePathlossModel.h"
#include "veins/base/connectionManager/BaseConnectionManager.h"
#include "veins/modules/phy/Decider80211pToPhy80211pInterface.h"
#include "veins/base/utils/Move.h"
namespace veins {
/**
* @brief
* Adaptation of the PhyLayer class for 802.11p.
*
* @ingroup phyLayer
*
* @see DemoBaseApplLayer
* @see Mac1609_4
* @see PhyLayer80211p
* @see Decider80211p
*/
class VEINS_API PhyLayer80211p : public BasePhyLayer, public Mac80211pToPhy11pInterface, public Decider80211pToPhy80211pInterface {
public:
void initialize(int stage) override;
/**
* @brief Set the carrier sense threshold
* @param ccaThreshold_dBm the cca threshold in dBm
*/
void setCCAThreshold(double ccaThreshold_dBm) override;
/**
* @brief Return the cca threshold in dBm
*/
double getCCAThreshold();
/**
* @brief Enable notifications about PHY-RXSTART.indication in MAC
* @param enable true if Mac needs to be notified about it
*/
void notifyMacAboutRxStart(bool enable) override;
/**
* @brief Explicit request to PHY for the channel status
*/
void requestChannelStatusIfIdle() override;
protected:
/** @brief CCA threshold. See Decider80211p for details */
double ccaThreshold;
/** @brief enable/disable detection of packet collisions */
bool collectCollisionStatistics;
/** @brief allows/disallows interruption of current reception for txing
*
* See detailed description in Decider80211p
*/
bool allowTxDuringRx;
enum ProtocolIds {
IEEE_80211 = 12123
};
/**
* @brief Creates and returns an instance of the AnalogueModel with the
* specified name.
*
* Is able to initialize the following AnalogueModels:
*/
virtual std::unique_ptr<AnalogueModel> getAnalogueModelFromName(std::string name, ParameterMap& params) override;
/**
* @brief Creates and initializes a SimplePathlossModel with the
* passed parameter values.
*/
std::unique_ptr<AnalogueModel> initializeSimplePathlossModel(ParameterMap& params);
/**
* @brief Creates and initializes an AntennaModel with the
* passed parameter values.
*/
std::unique_ptr<AnalogueModel> initializeAntennaModel(ParameterMap& params);
/**
* @brief Creates and initializes a BreakpointPathlossModel with the
* passed parameter values.
*/
virtual std::unique_ptr<AnalogueModel> initializeBreakpointPathlossModel(ParameterMap& params);
/**
* @brief Creates and initializes a SimpleObstacleShadowing with the
* passed parameter values.
*/
std::unique_ptr<AnalogueModel> initializeSimpleObstacleShadowing(ParameterMap& params);
/**
* @brief Creates and initializes a VehicleObstacleShadowing with the
* passed parameter values.
*/
std::unique_ptr<AnalogueModel> initializeVehicleObstacleShadowing(ParameterMap& params);
/**
* @brief Creates a simple Packet Error Rate model that attenuates a percentage
* of the packets to zero, and does not attenuate the other packets.
*
*/
virtual std::unique_ptr<AnalogueModel> initializePERModel(ParameterMap& params);
/**
* @brief Creates and initializes a TwoRayInterferenceModel with the
* passed parameter values.
*/
std::unique_ptr<AnalogueModel> initializeTwoRayInterferenceModel(ParameterMap& params);
/**
* @brief Creates and initializes a NakagamiFading with the
* passed parameter values.
*/
std::unique_ptr<AnalogueModel> initializeNakagamiFading(ParameterMap& params);
/**
* @brief Creates and returns an instance of the Decider with the specified
* name.
*
* Is able to initialize the following Deciders:
*
* - Decider80211p
*/
virtual std::unique_ptr<Decider> getDeciderFromName(std::string name, ParameterMap& params) override;
/**
* @brief Initializes a new Decider80211 from the passed parameter map.
*/
virtual std::unique_ptr<Decider> initializeDecider80211p(ParameterMap& params);
/**
* Create a protocol-specific AirFrame
* Overloaded to create a specialize AirFrame11p.
*/
std::unique_ptr<AirFrame> createAirFrame(cPacket* macPkt) override;
/**
* Attach a signal to the given AirFrame.
*
* The attached Signal corresponds to the IEEE 802.11p standard.
* Parameters for the signal are passed in the control info.
* The indicated power levels are set up on the specified center frequency, as well as the neighboring 5MHz.
*
* @note The control info must be of type MacToPhyControlInfo11p
*/
void attachSignal(AirFrame* airFrame, cObject* ctrlInfo) override;
void changeListeningChannel(Channel channel) override;
virtual simtime_t getFrameDuration(int payloadLengthBits, MCS mcs) const override;
void handleSelfMessage(cMessage* msg) override;
int getRadioState() override;
simtime_t setRadioState(int rs) override;
};
} // namespace veins
| 6,210 | 32.755435 | 131 | h |
null | AICP-main/veins/src/veins/modules/phy/SampledAntenna1D.cc | //
// Copyright (C) 2016 Alexander Brummer <[email protected]>
// Copyright (C) 2018 Fabian Bronner <[email protected]>
//
// Documentation for these modules is at http://veins.car2x.org/
//
// SPDX-License-Identifier: GPL-2.0-or-later
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
//
#include "veins/modules/phy/SampledAntenna1D.h"
#include "veins/base/utils/FWMath.h"
using namespace veins;
SampledAntenna1D::SampledAntenna1D(std::vector<double>& values, std::string offsetType, std::vector<double>& offsetParams, std::string rotationType, std::vector<double>& rotationParams, cRNG* rng)
: antennaGains(values.size() + 1)
{
distance = (2 * M_PI) / values.size();
// instantiate a random number generator for sample offsets if one is specified
cRandom* offsetGen = nullptr;
if (offsetType == "uniform") {
if (!math::almost_equal(offsetParams[0], -offsetParams[1])) {
throw cRuntimeError("SampledAntenna1D::SampledAntenna1D(): The mean of the random distribution for the samples' offsets has to be 0.");
}
offsetGen = new cUniform(rng, offsetParams[0], offsetParams[1]);
}
else if (offsetType == "normal") {
if (!math::almost_equal<double>(offsetParams[0], 0)) {
throw cRuntimeError("SampledAntenna1D::SampledAntenna1D(): The mean of the random distribution for the samples' offsets has to be 0.");
}
offsetGen = new cNormal(rng, offsetParams[0], offsetParams[1]);
}
else if (offsetType == "triang") {
if (!math::almost_equal<double>((offsetParams[0] + offsetParams[1] + offsetParams[2]) / 3, 0)) {
throw cRuntimeError("SampledAntenna1D::SampledAntenna1D(): The mean of the random distribution for the samples' offsets has to be 0.");
}
offsetGen = new cTriang(rng, offsetParams[0], offsetParams[1], offsetParams[2]);
}
// determine random rotation of the antenna if specified
cRandom* rotationGen = nullptr;
if (rotationType == "uniform") {
rotationGen = new cUniform(rng, rotationParams[0], rotationParams[1]);
}
else if (rotationType == "normal") {
rotationGen = new cNormal(rng, rotationParams[0], rotationParams[1]);
}
else if (rotationType == "triang") {
rotationGen = new cTriang(rng, rotationParams[0], rotationParams[1], rotationParams[2]);
}
rotation = (rotationGen == nullptr) ? 0 : rotationGen->draw();
if (rotationGen != nullptr) delete rotationGen;
// transform to rad
rotation *= (M_PI / 180);
// copy values and apply offset
for (unsigned int i = 0; i < values.size(); i++) {
double offset = 0;
if (offsetGen != nullptr) {
offset = offsetGen->draw();
// transform to rad
offset *= (M_PI / 180);
}
antennaGains[i] = values[i] + offset;
}
if (offsetGen != nullptr) delete offsetGen;
// assign the value of 0 degrees to 360 degrees as well to assure correct interpolation (size allocated already before)
antennaGains[values.size()] = antennaGains[0];
}
SampledAntenna1D::~SampledAntenna1D()
{
}
double SampledAntenna1D::getGain(Coord ownPos, Coord ownOrient, Coord otherPos)
{
// get the line of sight vector
Coord los = otherPos - ownPos;
// calculate angle using atan2
double angle = atan2(los.y, los.x) - atan2(ownOrient.y, ownOrient.x);
// apply possible rotation
angle -= rotation;
// make sure angle is within [0, 2*M_PI)
angle = fmod(angle, 2 * M_PI);
if (angle < 0) angle += 2 * M_PI;
// calculate antennaGain
size_t baseElement = angle / distance;
double offset = (angle - (baseElement * distance)) / distance;
// make sure to not address an element out of antennaGains (baseElement == lastElement implies that offset is zero)
ASSERT((baseElement < antennaGains.size()) && (baseElement != antennaGains.size() - 1 || offset == 0));
double gainValue = antennaGains[baseElement];
if (offset > 0) {
gainValue += offset * (antennaGains[baseElement + 1] - antennaGains[baseElement]);
}
return FWMath::dBm2mW(gainValue);
}
double SampledAntenna1D::getLastAngle()
{
return lastAngle / M_PI * 180.0;
}
| 4,915 | 38.328 | 196 | cc |
null | AICP-main/veins/src/veins/modules/phy/SampledAntenna1D.h | //
// Copyright (C) 2016 Alexander Brummer <[email protected]>
// Copyright (C) 2018 Fabian Bronner <[email protected]>
//
// Documentation for these modules is at http://veins.car2x.org/
//
// SPDX-License-Identifier: GPL-2.0-or-later
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
//
#pragma once
#include "veins/base/phyLayer/Antenna.h"
#include <vector>
namespace veins {
/**
* @brief
* This class represents an antenna whose gain is calculated from given samples in the horizontal plane.
* The respective gain is therefore dependent on the azimuth angle.
* The user has to provide the samples, which are assumed to be distributed equidistantly.
* As the power is assumed to be relative to an isotropic radiator, the values have to be given in dBi.
* The values are stored in a mapping automatically supporting linear interpolation between samples.
* Optional randomness in terms of sample offsets and antenna rotation is supported.
*
* * An example antenna.xml for this Antenna can be the following:
* @verbatim
<?xml version="1.0" encoding="UTF-8"?>
<root>
<Antenna type="SampledAntenna1D" id="antenna1">
<!-- Write the samples in the value attribute, separated by spaces. The values will be -->
<!-- distributed equidistantly, e.g. 4 values will be placed at 0°, 90°, 180° and 270° -->
<parameter name="samples" type="string" value="3 -3 3 -3"/>
<!-- Options for random offsets are as follows. -->
<!-- The mean of the given distribution has to be 0 (so that the overall offset is close to 0dBi) -->
<!-- <parameter name="random-offsets" type="string" value="uniform a b"/> -->
<!-- <parameter name="random-offsets" type="string" value="normal mean stddev"/> -->
<!-- <parameter name="random-offsets" type="string" value="triang a b c"/> -->
<parameter name="random-offsets" type="string" value="uniform -0.01 0.01"/>
<!-- Options for random rotation of the antennas are the same, but mean doesn't have to be 0. -->
<parameter name="random-rotation" type="string" value="uniform -1 1"/>
</Antenna>
</root>
@endverbatim
*
*
* @author Alexander Brummer
*
*
* @see Antenna
* @see BasePhyLayer
*/
class VEINS_API SampledAntenna1D : public Antenna {
public:
/**
* @brief Constructor for the sampled antenna.
*
* @param values - contains the samples representing the antenna
* @param offsetType - name of random distribution to use for the random offset of the samples
* @param offsetParams - contains the parameters for the offset random distribution
* @param rotationType - name of random distribution to use for the random rotation of the whole antenna
* @param rotationParams - contains the parameters for the rotation random distribution
* @param rng - pointer to the random number generator to use
*/
SampledAntenna1D(std::vector<double>& values, std::string offsetType, std::vector<double>& offsetParams, std::string rotationType, std::vector<double>& rotationParams, cRNG* rng);
/**
* @brief Destructor of the sampled antenna.
*
* Deletes the mapping used for storing the antenna samples.
*/
~SampledAntenna1D() override;
/**
* @brief Calculates this antenna's gain based on the direction the signal is coming from/sent in.
*
* @param ownPos - coordinates of this antenna
* @param ownOrient - states the direction the antenna (i.e. the car) is pointing at
* @param otherPos - coordinates of the other antenna which this antenna is currently communicating with
* @return Returns the gain this antenna achieves depending on the computed direction.
* If the angle is within two samples, linear interpolation is applied.
*/
double getGain(Coord ownPos, Coord ownOrient, Coord otherPos) override;
double getLastAngle() override;
private:
/**
* @brief Used to store the antenna's samples.
*/
std::vector<double> antennaGains;
double distance;
/**
* @brief An optional random rotation of the antenna is stored in this field and applied every time
* the gain has to be calculated.
*/
double rotation;
double lastAngle;
};
} // namespace veins
| 5,067 | 41.233333 | 183 | h |
null | AICP-main/veins/src/veins/modules/utility/BBoxLookup.cc | //
// Copyright (C) 2019 Dominik S. Buse <[email protected]>
//
// Documentation for these modules is at http://veins.car2x.org/
//
// SPDX-License-Identifier: GPL-2.0-or-later
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
//
#include <cmath>
#include "veins/modules/utility/BBoxLookup.h"
namespace {
using Point = veins::BBoxLookup::Point;
using Box = veins::BBoxLookup::Box;
/**
* Helper structure representing a wireless ray from a sender to a receiver.
*
* Contains pre-computed values to speed up calls to intersect with the same ray but different boxes.
*/
struct Ray {
Point origin;
Point destination;
Point direction;
Point invDirection;
struct {
size_t x;
size_t y;
} sign;
double length;
};
/**
* Return a Ray struct for fast intersection tests from sender to receiver.
*/
Ray makeRay(const Point& sender, const Point& receiver)
{
const double dir_x = receiver.x - sender.x;
const double dir_y = receiver.y - sender.y;
Ray ray;
ray.origin = sender;
ray.destination = receiver;
ray.length = std::sqrt(dir_x * dir_x + dir_y * dir_y);
ray.direction.x = dir_x / ray.length;
ray.direction.y = dir_y / ray.length;
ray.invDirection.x = 1.0 / ray.direction.x;
ray.invDirection.y = 1.0 / ray.direction.y;
ray.sign.x = ray.invDirection.x < 0;
ray.sign.y = ray.invDirection.y < 0;
return ray;
}
/**
* Return whether ray intersects with box.
*
* Based on:
* Amy Williams, Steve Barrus, R. Keith Morley & Peter Shirley (2005) An Efficient and Robust Ray-Box Intersection Algorithm, Journal of Graphics Tools, 10:1, 49-54, DOI: 10.1080/2151237X.2005.10129188
*/
bool intersects(const Ray& ray, const Box& box)
{
const double x[2]{box.p1.x, box.p2.x};
const double y[2]{box.p1.y, box.p2.y};
double tmin = (x[ray.sign.x] - ray.origin.x) * ray.invDirection.x;
double tmax = (x[1 - ray.sign.x] - ray.origin.x) * ray.invDirection.x;
double tymin = (y[ray.sign.y] - ray.origin.y) * ray.invDirection.y;
double tymax = (y[1 - ray.sign.y] - ray.origin.y) * ray.invDirection.y;
if ((tmin > tymax) || (tymin > tmax)) return false;
if (tymin > tmin) tmin = tymin;
if (tymax < tmax) tmax = tymax;
return (tmin < ray.length) && (tmax > 0);
}
} // anonymous namespace
namespace veins {
BBoxLookup::BBoxLookup(const std::vector<Obstacle*>& obstacles, std::function<BBoxLookup::Box(Obstacle*)> makeBBox, double scenarioX, double scenarioY, int cellSize)
: bboxes()
, obstacleLookup()
, bboxCells()
, cellSize(cellSize)
, numCols(std::floor(scenarioX / cellSize) + 1)
, numRows(std::floor(scenarioY / cellSize) + 1)
{
// phase 1: build unordered collection of cells
// initialize proto-cells (cells in non-contiguos memory)
ASSERT(scenarioX > 0);
ASSERT(scenarioY > 0);
ASSERT(numCols * cellSize >= scenarioX);
ASSERT(numRows * cellSize >= scenarioY);
const size_t numCells = numCols * numRows;
std::vector<std::vector<BBoxLookup::Box>> protoCells(numCells);
std::vector<std::vector<Obstacle*>> protoLookup(numCells);
// fill protoCells with boundingBoxes
size_t numEntries = 0;
for (const auto obstaclePtr : obstacles) {
auto bbox = makeBBox(obstaclePtr);
const size_t fromCol = std::max(0, int(bbox.p1.x / cellSize));
const size_t toCol = std::max(0, int(bbox.p2.x / cellSize));
const size_t fromRow = std::max(0, int(bbox.p1.y / cellSize));
const size_t toRow = std::max(0, int(bbox.p2.y / cellSize));
for (size_t row = fromRow; row <= toRow; ++row) {
for (size_t col = fromCol; col <= toCol; ++col) {
const size_t cellIndex = col + row * numCols;
protoCells[cellIndex].push_back(bbox);
protoLookup[cellIndex].push_back(obstaclePtr);
++numEntries;
ASSERT(protoCells[cellIndex].size() == protoLookup[cellIndex].size());
}
}
}
// phase 2: derive read-only data structure with fast lookup
bboxes.reserve(numEntries);
obstacleLookup.reserve(numEntries);
bboxCells.reserve(numCells);
size_t index = 0;
for (size_t row = 0; row < numRows; ++row) {
for (size_t col = 0; col < numCols; ++col) {
const size_t cellIndex = col + row * numCols;
auto& currentCell = protoCells.at(cellIndex);
auto& currentLookup = protoLookup.at(cellIndex);
ASSERT(currentCell.size() == currentLookup.size());
const size_t count = currentCell.size();
// copy over bboxes and obstacle lookups (in strict order)
for (size_t entryIndex = 0; entryIndex < count; ++entryIndex) {
bboxes.push_back(currentCell.at(entryIndex));
obstacleLookup.push_back(currentLookup.at(entryIndex));
}
// create lookup table for this cell
bboxCells.push_back({index, count});
// forward index to begin of next cell
index += count;
ASSERT(bboxes.size() == index);
}
}
ASSERT(bboxes.size() == numEntries);
ASSERT(bboxes.size() == obstacleLookup.size());
}
std::vector<Obstacle*> BBoxLookup::findOverlapping(Point sender, Point receiver) const
{
std::vector<Obstacle*> overlappingObstacles;
const Box bbox{
{std::min(sender.x, receiver.x), std::min(sender.y, receiver.y)},
{std::max(sender.x, receiver.x), std::max(sender.y, receiver.y)},
};
// determine coordinates for all cells touched by bbox
const size_t firstCol = std::max(0, int(bbox.p1.x / cellSize));
const size_t lastCol = std::max(0, int(bbox.p2.x / cellSize));
const size_t firstRow = std::max(0, int(bbox.p1.y / cellSize));
const size_t lastRow = std::max(0, int(bbox.p2.y / cellSize));
ASSERT(lastCol < numCols && lastRow < numRows);
// precompute transmission ray properties
const Ray ray = makeRay(sender, receiver);
// iterate over cells
for (size_t row = firstRow; row <= lastRow; ++row) {
for (size_t col = firstCol; col <= lastCol; ++col) {
// skip cell if ray does not intersect with the cell.
const Box cellBox = {{static_cast<double>(col * cellSize), static_cast<double>(row * cellSize)}, {static_cast<double>((col + 1) * cellSize), static_cast<double>((row + 1) * cellSize)}};
if (!intersects(ray, cellBox)) continue;
// derive cell for current cell coordinates
const size_t cellIndex = col + row * numCols;
const BBoxCell& cell = bboxCells.at(cellIndex);
// iterate over bboxes in each cell
for (size_t bboxIndex = cell.index; bboxIndex < cell.index + cell.count; ++bboxIndex) {
const Box& current = bboxes.at(bboxIndex);
// check for overlap with bbox (fast rejection)
if (current.p2.x < bbox.p1.x) continue;
if (current.p1.x > bbox.p2.x) continue;
if (current.p2.y < bbox.p1.y) continue;
if (current.p1.y > bbox.p2.y) continue;
// derive corresponding obstacle
if (!intersects(ray, current)) continue;
overlappingObstacles.push_back(obstacleLookup.at(bboxIndex));
}
}
}
return overlappingObstacles;
}
} // namespace veins
| 8,075 | 39.38 | 201 | cc |
null | AICP-main/veins/src/veins/modules/utility/BBoxLookup.h | //
// Copyright (C) 2019 Dominik S. Buse <[email protected]>
//
// Documentation for these modules is at http://veins.car2x.org/
//
// SPDX-License-Identifier: GPL-2.0-or-later
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
//
#pragma once
#include <algorithm>
#include <functional>
#include <vector>
#include "veins/veins.h"
namespace veins {
class Obstacle;
/**
* Fast grid-based spatial datastructure to find obstacles (geometric shapes) in a bounding box.
*
* Stores bounding boxes for a set of obstacles and allows searching for them via another bounding box.
*
* Only considers a 2-dimensional plane (x and y coordinates).
*
* In principle, any kind (or implementation) of a obstacle/shape/polygon is possible.
* There only has to be a function to derive a bounding box for a given obstacle.
* Obstacle instances are stored as pointers, so the lifetime of the obstacle instances is not managed by this class.
*/
class VEINS_API BBoxLookup {
public:
struct Point {
double x;
double y;
};
// array of stucts approach
// cache line size: 64byte = 8 x 8-byte double = 2 bboxes per line
// bbox coordinates are inherently local, if i check x1, i likely also check the other
struct Box {
Point p1;
Point p2;
};
struct BBoxCell {
size_t index; /**< index of the first element of this cell in bboxes */
size_t count; /**< number of elements in this cell; index + number = index of last element */
};
BBoxLookup() = default;
BBoxLookup(const std::vector<Obstacle*>& obstacles, std::function<BBoxLookup::Box(Obstacle*)> makeBBox, double scenarioX, double scenarioY, int cellSize = 250);
/**
* Return all obstacles which have their bounding box touched by the transmission from sender to receiver.
*
* The obstacles itself may not actually overlap with transmission (false positives are possible).
*/
std::vector<Obstacle*> findOverlapping(Point sender, Point receiver) const;
private:
// NOTE: obstacles may occur multiple times in bboxes/obstacleLookup (if they are in multiple cells)
std::vector<Box> bboxes; /**< ALL bboxes in one chunck of contiguos memory, ordered by cells */
std::vector<Obstacle*> obstacleLookup; /**< bboxes[i] belongs to instance in obstacleLookup[i] */
std::vector<BBoxCell> bboxCells; /**< flattened matrix of X * Y BBoxCell instances */
int cellSize = 0;
size_t numCols = 0; /**< X BBoxCell instances in a row */
size_t numRows = 0; /**< Y BBoxCell instances in a column */
};
} // namespace veins
| 3,260 | 37.364706 | 164 | h |
null | AICP-main/veins/src/veins/modules/utility/Consts80211p.h | //
// Copyright (C) 2011 David Eckhoff <[email protected]>
//
// Documentation for these modules is at http://veins.car2x.org/
//
// SPDX-License-Identifier: GPL-2.0-or-later
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
//
#pragma once
#include <stdint.h>
#include "veins/veins.h"
#include "veins/modules/utility/ConstsPhy.h"
using omnetpp::SimTime;
namespace veins {
/** @brief Bit rates for 802.11p
*
* as defined in Table 17-14 MIB attribute default values/ranges in the IEEE 802.11-2007 standard
*/
const uint64_t NUM_BITRATES_80211P = 8;
const uint64_t BITRATES_80211P[] = {3000000, 4500000, 6000000, 9000000, 12000000, 18000000, 24000000, 27000000};
/** @brief Number of Data Bits Per Symbol (N_NBPS) corresponding to bitrates in BITRATES_80211P
*
* as defined in Table 17-3 in the IEEE 802.11-2007 standard
*/
const uint32_t N_DBPS_80211P[] = {24, 36, 48, 72, 96, 144, 192, 216};
/** @brief Symbol interval
*
* as defined in Table 17-4 in the IEEE 802.11-2007 standard
*/
const double T_SYM_80211P = 8e-6;
/** @brief Length (in bits) of SERVICE field in PHY HEADER
*
* as defined in 17.3.2 PLCP frame format in the IEEE 802.11-2007 standard
*/
const int PHY_HDR_SERVICE_LENGTH = 16;
/** @brief Length (in bits) of Tail field in PHY PPDU
*
* as defined in 17.3.2 PLCP frame format in the IEEE 802.11-2007 standard
*/
const int PHY_TAIL_LENGTH = 6;
/** @brief Duration of the PLCP Preamble
*
* as defined in Table 17.4 Timing-related parameters in the IEEE 802.11-2007 standard
*/
const double PHY_HDR_PREAMBLE_DURATION = 32e-6;
/** @brief Duration of the PLCP Signal
*
* as defined in Table 17.4 Timing-related parameters in the IEEE 802.11-2007 standard
*/
const double PHY_HDR_PLCPSIGNAL_DURATION = 8e-6;
/** @brief Length of the PLCP Signal
*
* as defined in Figure 17.1 PPDU frame format in the IEEE 802.11-2007 standard
*/
const int PHY_HDR_PLCPSIGNAL_LENGTH = 24;
/** @brief Bitrate of the PLCP Signal
*
* as defined in Table 17.4 Timing-related parameters in the IEEE 802.11-2007 standard
* 24 bits in 8e-6 seconds
*/
const uint64_t PHY_HDR_BITRATE = 3000000;
/** @brief Slot Time for 10 MHz channel spacing
*
* as defined in Table 17-15 OFDM PHY characteristics in the IEEE 802.11-2007 standard
*/
const SimTime SLOTLENGTH_11P = SimTime().setRaw(13000000UL);
/** @brief Short interframe space
*
* as defined in Table 17-15 OFDM PHY characteristics in the IEEE 802.11-2007 standard
*/
const SimTime SIFS_11P = SimTime().setRaw(32000000UL);
/** @brief Time it takes to switch from Rx to Tx Mode
*
* as defined in Table 17-15 OFDM PHY characteristics in the IEEE 802.11-2007 standard
*/
const SimTime RADIODELAY_11P = SimTime().setRaw(1000000UL);
/** @brief Contention Window minimal size
*
* as defined in Table 17-15 OFDM PHY characteristics in the IEEE 802.11-2007 standard
*/
const unsigned CWMIN_11P = 15;
/** @brief Contention Window maximal size
*
* as defined in Table 17-15 OFDM PHY characteristics in the IEEE 802.11-2007 standard
*/
const unsigned CWMAX_11P = 1023;
/** @brief 1609.4 slot length
*
* as defined in Table H.1 in the IEEE 1609.4-2010 standard
*/
const SimTime SWITCHING_INTERVAL_11P = SimTime().setRaw(50000000000UL);
/** @brief 1609.4 slot length
*
* as defined in Table H.1 in the IEEE 1609.4-2010 standard
* It is the sum of SyncTolerance and MaxChSwitchTime as defined in 6.2.5 in the IEEE 1609.4-2010 Standard
*/
const SimTime GUARD_INTERVAL_11P = SimTime().setRaw(4000000000UL);
const Bandwidth BANDWIDTH_11P = Bandwidth::ofdm_10_mhz;
/** @brief Channels as reserved by the FCC
*
*/
enum class Channel {
crit_sol = 172,
sch1 = 174,
sch2 = 176,
cch = 178,
sch3 = 180,
sch4 = 182,
hpps = 184
};
/**
* Maps channel identifier to the corresponding center frequency.
*
* @note Not all entries are defined.
*/
const std::map<Channel, double> IEEE80211ChannelFrequencies = {
{Channel::crit_sol, 5.86e9},
{Channel::sch1, 5.87e9},
{Channel::sch2, 5.88e9},
{Channel::cch, 5.89e9},
{Channel::sch3, 5.90e9},
{Channel::sch4, 5.91e9},
{Channel::hpps, 5.92e9},
};
enum class ChannelType {
control = 0,
service,
};
} // namespace veins
| 4,879 | 27.87574 | 112 | h |
null | AICP-main/veins/src/veins/modules/utility/ConstsPhy.h | //
// Copyright (C) 2014 Michele Segata <[email protected]>
//
// Documentation for these modules is at http://veins.car2x.org/
//
// SPDX-License-Identifier: GPL-2.0-or-later
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
//
#pragma once
#include <cmath>
#include <stdint.h>
#include "veins/veins.h"
namespace veins {
/** @brief Modulation and coding scheme to be used for transmission */
enum class MCS {
// use the default MCS
undefined = -1,
ofdm_bpsk_r_1_2,
ofdm_bpsk_r_3_4,
ofdm_qpsk_r_1_2,
ofdm_qpsk_r_3_4,
ofdm_qam16_r_1_2,
ofdm_qam16_r_3_4,
ofdm_qam64_r_2_3,
ofdm_qam64_r_3_4
};
/** @brief Available bandwidths */
enum class Bandwidth {
ofdm_5_mhz,
ofdm_10_mhz,
ofdm_20_mhz
};
/** @brief Given bandwidth and MCS returns datarate in bits per second */
inline uint64_t getOfdmDatarate(MCS mcs, Bandwidth bw)
{
// divide datarate by div, depending on bandwidth
uint64_t div;
// datarate to be returned
uint64_t dr;
switch (bw) {
case Bandwidth::ofdm_5_mhz:
div = 4;
break;
case Bandwidth::ofdm_10_mhz:
div = 2;
break;
case Bandwidth::ofdm_20_mhz:
default:
div = 1;
break;
}
switch (mcs) {
case MCS::ofdm_bpsk_r_1_2:
dr = 6000000;
break;
case MCS::ofdm_bpsk_r_3_4:
dr = 9000000;
break;
case MCS::ofdm_qpsk_r_1_2:
dr = 12000000;
break;
case MCS::ofdm_qpsk_r_3_4:
dr = 18000000;
break;
case MCS::ofdm_qam16_r_1_2:
dr = 24000000;
break;
case MCS::ofdm_qam16_r_3_4:
dr = 36000000;
break;
case MCS::ofdm_qam64_r_2_3:
dr = 48000000;
break;
case MCS::ofdm_qam64_r_3_4:
dr = 54000000;
break;
default:
dr = 6000000;
break;
}
return (dr / div);
}
/** @brief returns the number of databits per ofdm symbol */
inline uint32_t getNDBPS(MCS mcs)
{
uint32_t ndbps;
switch (mcs) {
case MCS::ofdm_bpsk_r_1_2:
ndbps = 24;
break;
case MCS::ofdm_bpsk_r_3_4:
ndbps = 36;
break;
case MCS::ofdm_qpsk_r_1_2:
ndbps = 48;
break;
case MCS::ofdm_qpsk_r_3_4:
ndbps = 72;
break;
case MCS::ofdm_qam16_r_1_2:
ndbps = 96;
break;
case MCS::ofdm_qam16_r_3_4:
ndbps = 144;
break;
case MCS::ofdm_qam64_r_2_3:
ndbps = 192;
break;
case MCS::ofdm_qam64_r_3_4:
ndbps = 216;
break;
default:
ndbps = 24;
break;
}
return ndbps;
}
/** @brief returns the bandwidth in Hz */
inline uint64_t getBandwidth(Bandwidth bw)
{
switch (bw) {
case Bandwidth::ofdm_5_mhz:
return 5000000;
break;
case Bandwidth::ofdm_10_mhz:
return 10000000;
break;
case Bandwidth::ofdm_20_mhz:
return 20000000;
break;
default:
ASSERT2(false, "Invalid datarate for required bandwidth");
return -1;
}
}
/** @brief returns encoding given datarate */
inline MCS getMCS(uint64_t datarate, Bandwidth bw)
{
if (bw == Bandwidth::ofdm_10_mhz) {
if (datarate == 3000000) {
return MCS::ofdm_bpsk_r_1_2;
}
if (datarate == 4500000) {
return MCS::ofdm_bpsk_r_3_4;
}
if (datarate == 6000000) {
return MCS::ofdm_qpsk_r_1_2;
}
if (datarate == 9000000) {
return MCS::ofdm_qpsk_r_3_4;
}
if (datarate == 12000000) {
return MCS::ofdm_qam16_r_1_2;
}
if (datarate == 18000000) {
return MCS::ofdm_qam16_r_3_4;
}
if (datarate == 24000000) {
return MCS::ofdm_qam64_r_2_3;
}
if (datarate == 27000000) {
return MCS::ofdm_qam64_r_3_4;
}
}
if (bw == Bandwidth::ofdm_20_mhz) {
if (datarate == 6000000) {
return MCS::ofdm_bpsk_r_1_2;
}
if (datarate == 9000000) {
return MCS::ofdm_bpsk_r_3_4;
}
if (datarate == 12000000) {
return MCS::ofdm_qpsk_r_1_2;
}
if (datarate == 18000000) {
return MCS::ofdm_qpsk_r_3_4;
}
if (datarate == 24000000) {
return MCS::ofdm_qam16_r_1_2;
}
if (datarate == 36000000) {
return MCS::ofdm_qam16_r_3_4;
}
if (datarate == 48000000) {
return MCS::ofdm_qam64_r_2_3;
}
if (datarate == 54000000) {
return MCS::ofdm_qam64_r_3_4;
}
}
if (bw == Bandwidth::ofdm_5_mhz) {
if (datarate == 1500000) {
return MCS::ofdm_bpsk_r_1_2;
}
if (datarate == 2250000) {
return MCS::ofdm_bpsk_r_3_4;
}
if (datarate == 3000000) {
return MCS::ofdm_qpsk_r_1_2;
}
if (datarate == 4500000) {
return MCS::ofdm_qpsk_r_3_4;
}
if (datarate == 6000000) {
return MCS::ofdm_qam16_r_1_2;
}
if (datarate == 9000000) {
return MCS::ofdm_qam16_r_3_4;
}
if (datarate == 12000000) {
return MCS::ofdm_qam64_r_2_3;
}
if (datarate == 13500000) {
return MCS::ofdm_qam64_r_3_4;
}
}
ASSERT2(false, "Invalid datarate for required bandwidth");
return MCS::undefined;
}
} // namespace veins
| 6,167 | 24.17551 | 76 | h |
null | AICP-main/veins/src/veins/modules/utility/HasLogProxy.cc | //
// Copyright (C) 2018 Christoph Sommer <[email protected]>
//
// Documentation for these modules is at http://veins.car2x.org/
//
// SPDX-License-Identifier: GPL-2.0-or-later
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
//
#include "veins/modules/utility/HasLogProxy.h"
namespace veins {
HasLogProxy::HasLogProxy(cComponent* owner)
: owner(owner)
{
}
const cComponent* HasLogProxy::getThisPtr() const
{
return owner;
}
} // namespace veins
| 1,129 | 28.736842 | 76 | cc |
null | AICP-main/veins/src/veins/modules/utility/HasLogProxy.h | //
// Copyright (C) 2018 Christoph Sommer <[email protected]>
//
// Documentation for these modules is at http://veins.car2x.org/
//
// SPDX-License-Identifier: GPL-2.0-or-later
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
//
#pragma once
#include <string>
#include "veins/veins.h"
namespace veins {
/**
* Helper class for logging from classes not derived from cComponent
*/
class VEINS_API HasLogProxy {
public:
HasLogProxy(cComponent* owner);
const cComponent* getThisPtr() const;
protected:
cComponent* owner;
};
} // namespace veins
| 1,230 | 27.627907 | 76 | h |
null | AICP-main/veins/src/veins/modules/utility/MacToPhyControlInfo11p.h | //
// Copyright (C) 2018-2019 Dominik S. Buse <[email protected]>
//
// Documentation for these modules is at http://veins.car2x.org/
//
// SPDX-License-Identifier: GPL-2.0-or-later
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
//
#pragma once
#include "veins/veins.h"
#include "veins/modules/utility/ConstsPhy.h"
#include "veins/modules/utility/Consts80211p.h"
namespace veins {
/**
* Stores information which is needed by the physical layer
* when sending a MacPkt.
*
* @ingroup phyLayer
* @ingroup macLayer
*/
struct VEINS_API MacToPhyControlInfo11p : public cObject {
Channel channelNr; ///< Channel number/index used to select frequency.
MCS mcs; ///< The modulation and coding scheme to employ for the associated frame.
double txPower_mW; ///< Transmission power in milliwatts.
MacToPhyControlInfo11p(Channel channelNr, MCS mcs, double txPower_mW)
: channelNr(channelNr)
, mcs(mcs)
, txPower_mW(txPower_mW)
{
}
};
} // namespace veins
| 1,672 | 30.566038 | 86 | h |
null | AICP-main/veins/src/veins/modules/utility/SignalManager.h | //
// Copyright (C) 2019-2019 Dominik S. Buse <[email protected]>
//
// Documentation for these modules is at http://veins.car2x.org/
//
// SPDX-License-Identifier: GPL-2.0-or-later
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
//
#pragma once
#include "veins/veins.h"
#include <functional>
#include <memory>
namespace veins {
template <typename Payload>
struct VEINS_API SignalPayload {
cComponent* source;
simsignal_t signalID;
Payload p;
cObject* details;
};
template <typename Payload>
class VEINS_API SignalCallbackListener : public cListener {
public:
using Callback = std::function<void (SignalPayload<Payload>)>;
SignalCallbackListener(Callback callback, cModule* receptor, simsignal_t signal)
: callback(callback)
, receptor(receptor)
, signal(signal)
{
receptor->subscribe(signal, this);
}
~SignalCallbackListener()
{
if (getSubscribeCount() > 0) {
receptor->unsubscribe(signal, this);
}
}
void receiveSignal(cComponent* source, simsignal_t signalID, Payload p, cObject* details) override
{
ASSERT(signalID == signal);
callback({source, signalID, p, details});
}
private:
const Callback callback;
cModule* const receptor;
const simsignal_t signal;
};
class VEINS_API SignalManager {
public:
void subscribeCallback(cModule* receptor, simsignal_t signal, const std::function<void(SignalPayload<bool>)> callback)
{
auto callbackListener = make_unique<SignalCallbackListener<bool>>(callback, receptor, signal);
callbacks.emplace_back(std::move(callbackListener));
}
void subscribeCallback(cModule* receptor, simsignal_t signal, const std::function<void(SignalPayload<long>)> callback)
{
auto callbackListener = make_unique<SignalCallbackListener<long>>(callback, receptor, signal);
callbacks.emplace_back(std::move(callbackListener));
}
void subscribeCallback(cModule* receptor, simsignal_t signal, const std::function<void(SignalPayload<unsigned long>)> callback)
{
auto callbackListener = make_unique<SignalCallbackListener<unsigned long>>(callback, receptor, signal);
callbacks.emplace_back(std::move(callbackListener));
}
void subscribeCallback(cModule* receptor, simsignal_t signal, const std::function<void(SignalPayload<double>)> callback)
{
auto callbackListener = make_unique<SignalCallbackListener<double>>(callback, receptor, signal);
callbacks.emplace_back(std::move(callbackListener));
}
void subscribeCallback(cModule* receptor, simsignal_t signal, const std::function<void(SignalPayload<const SimTime&>)> callback)
{
auto callbackListener = make_unique<SignalCallbackListener<const SimTime&>>(callback, receptor, signal);
callbacks.emplace_back(std::move(callbackListener));
}
void subscribeCallback(cModule* receptor, simsignal_t signal, const std::function<void(SignalPayload<const char*>)> callback)
{
auto callbackListener = make_unique<SignalCallbackListener<const char*>>(callback, receptor, signal);
callbacks.emplace_back(std::move(callbackListener));
}
void subscribeCallback(cModule* receptor, simsignal_t signal, const std::function<void(SignalPayload<cObject*>)> callback)
{
auto callbackListener = make_unique<SignalCallbackListener<cObject*>>(callback, receptor, signal);
callbacks.emplace_back(std::move(callbackListener));
}
private:
std::vector<std::unique_ptr<cListener>> callbacks;
};
} // namespace veins
| 4,271 | 36.80531 | 132 | h |
null | AICP-main/veins/src/veins/modules/utility/TimerManager.cc | //
// Copyright (C) 2018-2018 Max Schettler <[email protected]>
//
// Documentation for these modules is at http://veins.car2x.org/
//
// SPDX-License-Identifier: GPL-2.0-or-later
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
//
#include "veins/modules/utility/TimerManager.h"
#include <algorithm>
using omnetpp::simTime;
using omnetpp::simtime_t;
using veins::TimerManager;
using veins::TimerMessage;
using veins::TimerSpecification;
struct veins::TimerMessage : public omnetpp::cMessage {
TimerMessage(const std::string& name)
: omnetpp::cMessage(name.c_str())
{
}
};
TimerSpecification::TimerSpecification(std::function<void()> callback)
: start_mode_(StartMode::immediate)
, end_mode_(EndMode::open)
, period_(-1)
, callback_(callback)
{
}
TimerSpecification& TimerSpecification::interval(simtime_t interval)
{
ASSERT(interval > 0);
period_ = interval;
return *this;
}
TimerSpecification& TimerSpecification::relativeStart(simtime_t start)
{
start_mode_ = StartMode::relative;
start_ = start;
return *this;
}
TimerSpecification& TimerSpecification::absoluteStart(simtime_t start)
{
start_mode_ = StartMode::absolute;
start_ = start;
return *this;
}
TimerSpecification& TimerSpecification::relativeEnd(simtime_t end)
{
end_mode_ = EndMode::relative;
end_time_ = end;
return *this;
}
TimerSpecification& TimerSpecification::absoluteEnd(simtime_t end)
{
end_mode_ = EndMode::absolute;
end_time_ = end;
return *this;
}
TimerSpecification& TimerSpecification::repetitions(size_t n)
{
end_mode_ = EndMode::repetition;
end_count_ = n;
return *this;
}
TimerSpecification& TimerSpecification::openEnd()
{
end_mode_ = EndMode::open;
return *this;
}
TimerSpecification& TimerSpecification::oneshotIn(omnetpp::simtime_t in)
{
return this->relativeStart(in).interval(1).repetitions(1);
}
TimerSpecification& TimerSpecification::oneshotAt(omnetpp::simtime_t at)
{
return this->absoluteStart(at).interval(1).repetitions(1);
}
void TimerSpecification::finalize()
{
switch (start_mode_) {
case StartMode::relative:
start_ += simTime();
start_mode_ = StartMode::absolute;
break;
case StartMode::absolute:
break;
case StartMode::immediate:
start_ = simTime() + period_;
break;
}
switch (end_mode_) {
case EndMode::relative:
end_time_ += simTime();
end_mode_ = EndMode::absolute;
break;
case EndMode::absolute:
break;
case EndMode::repetition:
end_time_ = start_ + ((end_count_ - 1) * period_);
end_mode_ = EndMode::absolute;
break;
case EndMode::open:
break;
}
}
bool TimerSpecification::validOccurence(simtime_t time) const
{
const bool afterStart = time >= start_;
const bool beforeEnd = time <= end_time_;
const bool atPeriod = omnetpp::fmod(time - start_, period_) == 0;
return afterStart && (beforeEnd || end_mode_ == EndMode::open) && atPeriod;
}
TimerManager::TimerManager(omnetpp::cSimpleModule* parent)
: parent_(parent)
{
ASSERT(parent_);
}
TimerManager::~TimerManager()
{
for (const auto& timer : timers_) {
parent_->cancelAndDelete(timer.first);
}
}
bool TimerManager::handleMessage(omnetpp::cMessage* message)
{
auto* timerMessage = dynamic_cast<TimerMessage*>(message);
if (!timerMessage) {
return false;
}
ASSERT(timerMessage->isSelfMessage());
std::string s = timerMessage->getName();
auto timer = timers_.find(timerMessage);
if (timer == timers_.end()) {
return false;
}
ASSERT(timer->second.valid() && timer->second.validOccurence(simTime()));
timer->second.callback_();
if (timers_.find(timerMessage) != timers_.end()) { // confirm that the timer has not been cancelled during the callback
const auto next_event = simTime() + timer->second.period_;
if (timer->second.validOccurence(next_event)) {
parent_->scheduleAt(next_event, timer->first);
}
else {
parent_->cancelAndDelete(timer->first);
timers_.erase(timer);
}
}
return true;
}
TimerManager::TimerHandle TimerManager::create(TimerSpecification timerSpecification, const std::string name)
{
ASSERT(timerSpecification.valid());
timerSpecification.finalize();
const auto ret = timers_.insert(std::make_pair(new TimerMessage(name), std::move(timerSpecification)));
ASSERT(ret.second);
parent_->scheduleAt(ret.first->second.start_, ret.first->first);
return ret.first->first->getId();
}
void TimerManager::cancel(TimerManager::TimerHandle handle)
{
const auto entryMatchesHandle = [handle](const std::pair<TimerMessage*, TimerSpecification>& entry) { return entry.first->getId() == handle; };
auto timer = std::find_if(timers_.begin(), timers_.end(), entryMatchesHandle);
if (timer != timers_.end()) {
parent_->cancelAndDelete(timer->first);
timers_.erase(timer);
}
}
| 5,749 | 26.644231 | 147 | cc |
null | AICP-main/veins/src/veins/modules/utility/TimerManager.h | //
// Copyright (C) 2018-2018 Max Schettler <[email protected]>
//
// Documentation for these modules is at http://veins.car2x.org/
//
// SPDX-License-Identifier: GPL-2.0-or-later
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
//
#pragma once
#include <functional>
#include <map>
#include <string>
#include "veins/veins.h"
namespace veins {
/**
* Abstraction for (recurring) Timers for cSimpleModule.
*
* This abstraction takes care of managing the required self-messages to (repeatedly) execute a piece of code after a certain time.
* To use, instantiate one TimerManager per cSimpleModule, then call its handleMessage method from that of the cSimpleModule.
*
* In order to schedule a timer, create a TimerSpecification object using the corresponding methods.
* After configuration, use the create function from the TimerManager to actually schedule the configured timer.
*/
class TimerManager;
/**
* A message which is used for triggering Timers.
*
* Its implementation is empty as it is only used to differentiate from other
* messages.
*/
struct TimerMessage;
/**
* A class which specifies a Timer.
*
* This includes timing information as well as its callback.
*/
struct VEINS_API TimerSpecification {
public:
/**
* Create a new TimerSpecification.
*
* The created timer is invalid, an interval is missing for it to be usable.
* By default, the timer starts running immediately and triggers first after the first time after the interval.
* After that, it will continue to run until the simulation ends, calling the callback after the interval has elapsed.
* In order to create a timer, this needs to be passed to TimerManager::create.
*
* @param callback The callback which is executed when the timer is triggered.
*
* @see TimerManager
*/
TimerSpecification(std::function<void()> callback);
/**
* Set the period between two timer occurences.
*/
TimerSpecification& interval(omnetpp::simtime_t interval);
/**
* Set the number of repetitions.
*
* @note You cannot use both this and absoluteEnd or relativeEnd.
*/
TimerSpecification& repetitions(size_t n);
/**
* Set the timer's start time.
*
* Any previously set start time will be overwritten.
*
* @param start Time of first execution relative to the current simulation time. E.g., passing simtime_t(1, SIMTIME_S) will execute the timer in one second.
*
* @note You cannot use this in conjunction with repetition().
*/
TimerSpecification& relativeStart(omnetpp::simtime_t start);
/**
* Set the timer's start time.
*
* Any previously set start time will be overwritten.
*
* @param start The absolute start time. The first occurence will be exactly at this time. Passing a value earlier than the current simtime will result in an error.
*
* @note You cannot use this in conjunction with repetition().
*/
TimerSpecification& absoluteStart(omnetpp::simtime_t start);
/**
* Set the timer's end time.
*
* Any previously set end time will be overwritten.
*
* @param end Time after which this timer will no longer be executed, relative to the current simulation time. E.g., passing simtime_t(1, SIMTIME_S) will stop the execution of the time after one second has passed.
*/
TimerSpecification& relativeEnd(omnetpp::simtime_t end);
/**
* Set the timer's end time.
*
* Any previously set end time will be overwritten.
*
* @param end The absolute end time. The latest possible occurence is at this time. Values before the current start time will prevent any executions.
*/
TimerSpecification& absoluteEnd(omnetpp::simtime_t end);
/**
* Set the timer to be open ended.
*
* Any previously set end time will be overwritten.
*/
TimerSpecification& openEnd();
/**
* Set the timer to execute once in a given time.
*
* Any previously set start time, end time, and interval will be overwritten.
*/
TimerSpecification& oneshotIn(omnetpp::simtime_t in);
/**
* Set the timer to execute once at a given time.
*
* Any previously set start time, end time, and interval will be overwritten.
*/
TimerSpecification& oneshotAt(omnetpp::simtime_t at);
private:
friend TimerManager;
enum class StartMode {
relative,
absolute,
immediate
};
enum class EndMode {
relative,
absolute,
repetition,
open
};
/**
* Finalizes this instance such that its values are independent of current simulation time.
*
* After calling this function, start_mode_ is guaranteed to be StartMode::absolute and end_mode_ to be EndMode::absolute or EndMode::open.
*/
void finalize();
/**
* Checks validity of this specification, i.e., whether all necessary information is set.
*/
bool valid() const
{
return period_ != -1;
}
/**
* Check that the given time is a valid occurence for this timer.
*/
bool validOccurence(omnetpp::simtime_t time) const;
StartMode start_mode_; ///< Interpretation of start time._
omnetpp::simtime_t start_; ///< Time of the Timer's first occurence. Interpretation depends on start_mode_.
EndMode end_mode_; ///< Interpretation of end time._
unsigned end_count_; ///< Number of repetitions of the timer. Only valid when end_mode_ == repetition.
omnetpp::simtime_t end_time_; ///< Last possible occurence of the timer. Only valid when end_mode_ != repetition.
omnetpp::simtime_t period_; ///< Time between events.
std::function<void()> callback_; ///< The function to be called when the Timer is triggered.
};
class VEINS_API TimerManager {
private:
public:
using TimerHandle = long;
using TimerList = std::map<TimerMessage*, const TimerSpecification>;
TimerManager(omnetpp::cSimpleModule* parent);
/**
* Destroy this module.
*
* All associated events will be cancelled and the corresponding messages deleted.
*/
~TimerManager();
/**
* Handle the given message and, if applicable, trigger the associated timer.
*
* @param message The received message.
* @return true, if the message was meant for this TimerManager. In this case, the passed message might be invalidated.
*/
bool handleMessage(omnetpp::cMessage* message);
/**
* Create a new timer.
*
* @param timerSpecification Parameters for the new timer
* @param name The timer's name
* @return A handle for the timer.
*
* @see cancel
* @see TimerSpecification
*/
TimerHandle create(TimerSpecification timerSpecification, std::string name = "");
/**
* Cancel a timer.
*
* Prevents any future executions of the given timer. Expired timers are silently ignored.
*
* @param handle A handle which identifies the timer.
*/
void cancel(TimerHandle handle);
private:
TimerList timers_; ///< List of all active Timers.
omnetpp::cSimpleModule* const parent_; ///< A pointer to the module which owns this TimerManager.
};
} // namespace veins
| 7,952 | 32.276151 | 217 | h |
null | AICP-main/veins/src/veins/modules/world/annotations/AnnotationDummy.cc | //
// Copyright (C) 2010 Christoph Sommer <[email protected]>
//
// Documentation for these modules is at http://veins.car2x.org/
//
// SPDX-License-Identifier: GPL-2.0-or-later
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
//
// AnnotationDummy - workaround to visualize annotations
#include "veins/modules/world/annotations/AnnotationDummy.h"
using veins::AnnotationDummy;
Define_Module(veins::AnnotationDummy);
AnnotationDummy::~AnnotationDummy()
{
}
| 1,155 | 33 | 84 | cc |
null | AICP-main/veins/src/veins/modules/world/annotations/AnnotationDummy.h | //
// Copyright (C) 2010 Christoph Sommer <[email protected]>
//
// Documentation for these modules is at http://veins.car2x.org/
//
// SPDX-License-Identifier: GPL-2.0-or-later
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
//
// AnnotationDummy - workaround to visualize annotations
#pragma once
#include "veins/veins.h"
namespace veins {
/**
* AnnotationDummy is just a workaround to visualize annotations
*
* @author Christoph Sommer
*/
class VEINS_API AnnotationDummy : public cSimpleModule {
public:
~AnnotationDummy() override;
protected:
};
} // namespace veins
| 1,280 | 28.113636 | 84 | h |
null | AICP-main/veins/src/veins/modules/world/annotations/AnnotationManager.cc | //
// Copyright (C) 2010 Christoph Sommer <[email protected]>
//
// Documentation for these modules is at http://veins.car2x.org/
//
// SPDX-License-Identifier: GPL-2.0-or-later
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
//
// AnnotationManager - manages annotations on the OMNeT++ canvas
#include <sstream>
#include <cmath>
#include "veins/modules/world/annotations/AnnotationManager.h"
#include "veins/modules/mobility/traci/TraCIScenarioManager.h"
#include "veins/modules/mobility/traci/TraCICommandInterface.h"
Define_Module(veins::AnnotationManager);
using veins::AnnotationManager;
using veins::TraCIScenarioManager;
using veins::TraCIScenarioManagerAccess;
namespace {
const short EVT_SCHEDULED_ERASE = 3;
}
void AnnotationManager::initialize()
{
scheduledEraseEvts.clear();
annotations.clear();
groups.clear();
annotationLayer = new cGroupFigure();
cCanvas* canvas = getParentModule()->getCanvas();
canvas->addFigure(annotationLayer, canvas->findFigure("submodules"));
annotationsXml = par("annotations");
addFromXml(annotationsXml);
}
void AnnotationManager::finish()
{
hideAll();
}
AnnotationManager::~AnnotationManager()
{
while (scheduledEraseEvts.begin() != scheduledEraseEvts.end()) {
cancelAndDelete(*scheduledEraseEvts.begin());
scheduledEraseEvts.erase(scheduledEraseEvts.begin());
}
scheduledEraseEvts.clear();
while (annotations.begin() != annotations.end()) {
delete *annotations.begin();
annotations.erase(annotations.begin());
}
annotations.clear();
while (groups.begin() != groups.end()) {
delete *groups.begin();
groups.erase(groups.begin());
}
groups.clear();
}
void AnnotationManager::handleMessage(cMessage* msg)
{
if (msg->isSelfMessage()) {
handleSelfMsg(msg);
return;
}
throw cRuntimeError("AnnotationManager doesn't handle messages from other modules");
}
void AnnotationManager::handleSelfMsg(cMessage* msg)
{
if (msg->getKind() == EVT_SCHEDULED_ERASE) {
Annotation* a = static_cast<Annotation*>(msg->getContextPointer());
ASSERT(a);
erase(a);
scheduledEraseEvts.remove(msg);
delete msg;
return;
}
throw cRuntimeError("unknown self message type");
}
void AnnotationManager::handleParameterChange(const char* parname)
{
if (parname && (std::string(parname) == "draw")) {
if (par("draw")) {
showAll();
}
else {
hideAll();
}
}
}
/**
* adds Annotations from an XML document; example below.
*
* ```
* <annotations>
* <line color="#F00" shape="16,0 8,13.8564" />
* <poly color="#0F0" shape="16,64 8,77.8564 -8,77.8564 -16,64 -8,50.1436 8,50.1436" />
* </annotations>
* ```
*/
void AnnotationManager::addFromXml(cXMLElement* xml)
{
std::string rootTag = xml->getTagName();
ASSERT(rootTag == "annotations");
cXMLElementList list = xml->getChildren();
for (cXMLElementList::const_iterator i = list.begin(); i != list.end(); ++i) {
cXMLElement* e = *i;
std::string tag = e->getTagName();
if (tag == "point") {
ASSERT(e->getAttribute("text"));
std::string text = e->getAttribute("text");
ASSERT(e->getAttribute("color"));
std::string color = e->getAttribute("color");
ASSERT(e->getAttribute("shape"));
std::string shape = e->getAttribute("shape");
std::vector<std::string> points = cStringTokenizer(shape.c_str(), " ").asVector();
ASSERT(points.size() == 2);
std::vector<double> p1a = cStringTokenizer(points[0].c_str(), ",").asDoubleVector();
ASSERT(p1a.size() == 2);
drawPoint(Coord(p1a[0], p1a[1]), color, text);
}
else if (tag == "line") {
ASSERT(e->getAttribute("color"));
std::string color = e->getAttribute("color");
ASSERT(e->getAttribute("shape"));
std::string shape = e->getAttribute("shape");
std::vector<std::string> points = cStringTokenizer(shape.c_str(), " ").asVector();
ASSERT(points.size() == 2);
std::vector<double> p1a = cStringTokenizer(points[0].c_str(), ",").asDoubleVector();
ASSERT(p1a.size() == 2);
std::vector<double> p2a = cStringTokenizer(points[1].c_str(), ",").asDoubleVector();
ASSERT(p2a.size() == 2);
drawLine(Coord(p1a[0], p1a[1]), Coord(p2a[0], p2a[1]), color);
}
else if (tag == "poly") {
ASSERT(e->getAttribute("color"));
std::string color = e->getAttribute("color");
ASSERT(e->getAttribute("shape"));
std::string shape = e->getAttribute("shape");
std::vector<std::string> points = cStringTokenizer(shape.c_str(), " ").asVector();
ASSERT(points.size() >= 2);
std::vector<Coord> coords;
for (std::vector<std::string>::const_iterator i = points.begin(); i != points.end(); ++i) {
std::vector<double> pa = cStringTokenizer(i->c_str(), ",").asDoubleVector();
ASSERT(pa.size() == 2);
coords.push_back(Coord(pa[0], pa[1]));
}
drawPolygon(coords, color);
}
else {
throw cRuntimeError("while reading annotations xml: expected 'line' or 'poly', but got '%s'", tag.c_str());
}
}
}
AnnotationManager::Group* AnnotationManager::createGroup(std::string title)
{
Group* group = new Group(title);
groups.push_back(group);
return group;
}
AnnotationManager::Point* AnnotationManager::drawPoint(Coord p, std::string color, std::string text, Group* group)
{
Point* o = new Point(p, color, text);
o->group = group;
annotations.push_back(o);
if (par("draw")) show(o);
return o;
}
AnnotationManager::Line* AnnotationManager::drawLine(Coord p1, Coord p2, std::string color, Group* group)
{
Line* l = new Line(p1, p2, color);
l->group = group;
annotations.push_back(l);
if (par("draw")) show(l);
return l;
}
AnnotationManager::Polygon* AnnotationManager::drawPolygon(std::list<Coord> coords, std::string color, Group* group)
{
Polygon* p = new Polygon(coords, color);
p->group = group;
annotations.push_back(p);
if (par("draw")) show(p);
return p;
}
AnnotationManager::Polygon* AnnotationManager::drawPolygon(std::vector<Coord> coords, std::string color, Group* group)
{
return drawPolygon(std::list<Coord>(coords.begin(), coords.end()), color, group);
}
void AnnotationManager::drawBubble(Coord p1, std::string text)
{
std::string pxOld = getDisplayString().getTagArg("p", 0);
std::string pyOld = getDisplayString().getTagArg("p", 1);
std::string px;
{
std::stringstream ss;
ss << p1.x;
px = ss.str();
}
std::string py;
{
std::stringstream ss;
ss << p1.x;
py = ss.str();
}
getDisplayString().setTagArg("p", 0, px.c_str());
getDisplayString().setTagArg("p", 1, py.c_str());
bubble(text.c_str());
getDisplayString().setTagArg("p", 0, pxOld.c_str());
getDisplayString().setTagArg("p", 1, pyOld.c_str());
}
void AnnotationManager::erase(const Annotation* annotation)
{
hide(annotation);
annotations.remove(const_cast<Annotation*>(annotation));
delete annotation;
}
void AnnotationManager::eraseAll(Group* group)
{
for (Annotations::iterator i = annotations.begin(); i != annotations.end();) {
if ((!group) || ((*i)->group == group)) {
erase(*i++);
}
else {
i++;
}
}
}
void AnnotationManager::scheduleErase(simtime_t deltaT, Annotation* annotation)
{
Enter_Method_Silent();
cMessage* evt = new cMessage("erase", EVT_SCHEDULED_ERASE);
evt->setContextPointer(annotation);
scheduleAt(simTime() + deltaT, evt);
scheduledEraseEvts.push_back(evt);
}
void AnnotationManager::show(const Annotation* annotation)
{
if (annotation->figure) return;
if (const Point* o = dynamic_cast<const Point*>(annotation)) {
if (hasGUI()) {
// no corresponding TkEnv representation
}
TraCIScenarioManager* traci = TraCIScenarioManagerAccess().get();
if (traci && traci->isConnected()) {
std::stringstream nameBuilder;
nameBuilder << o->text << " " << getEnvir()->getUniqueNumber();
traci->getCommandInterface()->addPoi(nameBuilder.str(), "Annotation", TraCIColor::fromTkColor(o->color), 6, o->pos);
annotation->traciPoiIds.push_back(nameBuilder.str());
}
}
else if (const Line* l = dynamic_cast<const Line*>(annotation)) {
if (hasGUI()) {
cLineFigure* figure = new cLineFigure();
figure->setStart(cFigure::Point(l->p1.x, l->p1.y));
figure->setEnd(cFigure::Point(l->p2.x, l->p2.y));
figure->setLineColor(cFigure::Color(l->color.c_str()));
annotation->figure = figure;
annotationLayer->addFigure(annotation->figure);
}
TraCIScenarioManager* traci = TraCIScenarioManagerAccess().get();
if (traci && traci->isConnected()) {
std::list<Coord> coords;
coords.push_back(l->p1);
coords.push_back(l->p2);
std::stringstream nameBuilder;
nameBuilder << "Annotation" << getEnvir()->getUniqueNumber();
traci->getCommandInterface()->addPolygon(nameBuilder.str(), "Annotation", TraCIColor::fromTkColor(l->color), false, 5, coords);
annotation->traciLineIds.push_back(nameBuilder.str());
}
}
else if (const Polygon* p = dynamic_cast<const Polygon*>(annotation)) {
ASSERT(p->coords.size() >= 2);
if (hasGUI()) {
cPolygonFigure* figure = new cPolygonFigure();
std::vector<cFigure::Point> points;
for (std::list<Coord>::const_iterator i = p->coords.begin(); i != p->coords.end(); ++i) {
points.push_back(cFigure::Point(i->x, i->y));
}
figure->setPoints(points);
figure->setLineColor(cFigure::Color(p->color.c_str()));
figure->setFilled(false);
annotation->figure = figure;
annotationLayer->addFigure(annotation->figure);
}
TraCIScenarioManager* traci = TraCIScenarioManagerAccess().get();
if (traci && traci->isConnected()) {
std::stringstream nameBuilder;
nameBuilder << "Annotation" << getEnvir()->getUniqueNumber();
traci->getCommandInterface()->addPolygon(nameBuilder.str(), "Annotation", TraCIColor::fromTkColor(p->color), false, 4, p->coords);
annotation->traciPolygonsIds.push_back(nameBuilder.str());
}
}
else {
throw cRuntimeError("unknown Annotation type");
}
}
void AnnotationManager::hide(const Annotation* annotation)
{
if (annotation->figure) {
delete annotationLayer->removeFigure(annotation->figure);
annotation->figure = nullptr;
}
TraCIScenarioManager* traci = TraCIScenarioManagerAccess().get();
if (traci && traci->isConnected()) {
for (std::list<std::string>::const_iterator i = annotation->traciPolygonsIds.begin(); i != annotation->traciPolygonsIds.end(); ++i) {
std::string id = *i;
traci->getCommandInterface()->polygon(id).remove(3);
}
annotation->traciPolygonsIds.clear();
for (std::list<std::string>::const_iterator i = annotation->traciLineIds.begin(); i != annotation->traciLineIds.end(); ++i) {
std::string id = *i;
traci->getCommandInterface()->polygon(id).remove(4);
}
annotation->traciLineIds.clear();
for (std::list<std::string>::const_iterator i = annotation->traciPoiIds.begin(); i != annotation->traciPoiIds.end(); ++i) {
std::string id = *i;
traci->getCommandInterface()->poi(id).remove(5);
}
annotation->traciPoiIds.clear();
}
}
void AnnotationManager::showAll(Group* group)
{
for (Annotations::const_iterator i = annotations.begin(); i != annotations.end(); ++i) {
if ((!group) || ((*i)->group == group)) show(*i);
}
}
void AnnotationManager::hideAll(Group* group)
{
for (Annotations::const_iterator i = annotations.begin(); i != annotations.end(); ++i) {
if ((!group) || ((*i)->group == group)) hide(*i);
}
}
| 13,280 | 31.235437 | 142 | cc |
null | AICP-main/veins/src/veins/modules/world/annotations/AnnotationManager.h | //
// Copyright (C) 2010 Christoph Sommer <[email protected]>
//
// Documentation for these modules is at http://veins.car2x.org/
//
// SPDX-License-Identifier: GPL-2.0-or-later
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
//
// AnnotationManager - manages annotations on the OMNeT++ canvas
#pragma once
#include <list>
#include "veins/veins.h"
#include "veins/base/utils/FindModule.h"
#include "veins/base/utils/Coord.h"
namespace veins {
/**
* manages annotations on the OMNeT++ canvas.
*/
class VEINS_API AnnotationManager : public cSimpleModule {
public:
class VEINS_API Group;
class VEINS_API Annotation {
public:
Annotation()
: group(nullptr)
, figure(nullptr)
{
}
virtual ~Annotation()
{
}
protected:
friend class AnnotationManager;
Group* group;
mutable cFigure* figure;
mutable std::list<std::string> traciPoiIds;
mutable std::list<std::string> traciLineIds;
mutable std::list<std::string> traciPolygonsIds;
};
class VEINS_API Point : public Annotation {
public:
Point(Coord pos, std::string color, std::string text)
: pos(pos)
, color(color)
, text(text)
{
}
~Point() override
{
}
protected:
friend class AnnotationManager;
Coord pos;
std::string color;
std::string text;
};
class VEINS_API Line : public Annotation {
public:
Line(Coord p1, Coord p2, std::string color)
: p1(p1)
, p2(p2)
, color(color)
{
}
~Line() override
{
}
protected:
friend class AnnotationManager;
Coord p1;
Coord p2;
std::string color;
};
class VEINS_API Polygon : public Annotation {
public:
Polygon(std::list<Coord> coords, std::string color)
: coords(coords)
, color(color)
{
}
~Polygon() override
{
}
protected:
friend class AnnotationManager;
std::list<Coord> coords;
std::string color;
};
class VEINS_API Group {
public:
Group(std::string title)
: title(title)
{
}
virtual ~Group()
{
}
protected:
friend class AnnotationManager;
std::string title;
};
~AnnotationManager() override;
void initialize() override;
void finish() override;
void handleMessage(cMessage* msg) override;
void handleSelfMsg(cMessage* msg);
void handleParameterChange(const char* parname) override;
void addFromXml(cXMLElement* xml);
Group* createGroup(std::string title = "untitled");
Point* drawPoint(Coord p, std::string color, std::string text, Group* group = nullptr);
Line* drawLine(Coord p1, Coord p2, std::string color, Group* group = nullptr);
Polygon* drawPolygon(std::list<Coord> coords, std::string color, Group* group = nullptr);
Polygon* drawPolygon(std::vector<Coord> coords, std::string color, Group* group = nullptr);
void drawBubble(Coord p1, std::string text);
void erase(const Annotation* annotation);
void eraseAll(Group* group = nullptr);
void scheduleErase(simtime_t deltaT, Annotation* annotation);
void show(const Annotation* annotation);
void hide(const Annotation* annotation);
void showAll(Group* group = nullptr);
void hideAll(Group* group = nullptr);
protected:
using Annotations = std::list<Annotation*>;
using Groups = std::list<Group*>;
cXMLElement* annotationsXml; /**< annotations to add at startup */
std::list<cMessage*> scheduledEraseEvts;
Annotations annotations;
Groups groups;
cGroupFigure* annotationLayer;
};
class VEINS_API AnnotationManagerAccess {
public:
AnnotationManager* getIfExists()
{
return FindModule<AnnotationManager*>::findGlobalModule();
};
};
} // namespace veins
| 4,754 | 24.564516 | 95 | h |
null | AICP-main/veins/src/veins/modules/world/traci/trafficLight/TraCITrafficLightInterface.cc | //
// Copyright (C) 2015-2018 Dominik Buse <[email protected]>
//
// Documentation for these modules is at http://veins.car2x.org/
//
// SPDX-License-Identifier: GPL-2.0-or-later
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
//
#include <string>
#include "veins/modules/world/traci/trafficLight/TraCITrafficLightInterface.h"
#include "veins/modules/messages/TraCITrafficLightMessage_m.h"
using namespace veins;
using veins::TraCITrafficLightInterface;
using veins::TraCITrafficLightLink;
using veins::TraCITrafficLightProgram;
Define_Module(veins::TraCITrafficLightInterface);
TraCITrafficLightInterface::TraCITrafficLightInterface()
: cSimpleModule()
, isPreInitialized(false)
, updateInterval()
, manager(nullptr)
, commandInterface(nullptr)
, tlCommandInterface(nullptr)
, external_id("")
, position()
, programDefinition()
, currentLogicId("")
, currentPhaseNr(-1)
, nextSwitchTime()
, inOnlineSignalState(false)
{
}
TraCITrafficLightInterface::~TraCITrafficLightInterface()
{
delete tlCommandInterface;
}
void TraCITrafficLightInterface::preInitialize(const std::string& external_id, const Coord& position, const simtime_t& updateInterval)
{
isPreInitialized = true;
this->updateInterval = updateInterval;
setExternalId(external_id);
this->position = position;
}
Coord TraCITrafficLightInterface::getPosition() const
{
return this->position;
}
std::list<std::list<TraCITrafficLightLink>> TraCITrafficLightInterface::getControlledLinks()
{
return controlledLinks;
}
TraCITrafficLightProgram::Logic TraCITrafficLightInterface::getCurrentLogic() const
{
return programDefinition.getLogic(currentLogicId);
}
std::string TraCITrafficLightInterface::getCurrentLogicId() const
{
return currentLogicId;
}
int TraCITrafficLightInterface::getCurrentPhaseId() const
{
return currentPhaseNr;
}
TraCITrafficLightProgram::Phase TraCITrafficLightInterface::getCurrentPhase() const
{
return getCurrentLogic().phases[currentPhaseNr];
}
simtime_t TraCITrafficLightInterface::getAssumedNextSwitch() const
{
return nextSwitchTime;
}
simtime_t TraCITrafficLightInterface::getRemainingDuration() const
{
return nextSwitchTime - simTime();
}
std::string TraCITrafficLightInterface::getCurrentState() const
{
if (isInOnlineSignalState()) {
return currentSignalState;
}
else {
return getCurrentPhase().state;
}
}
bool TraCITrafficLightInterface::isInOnlineSignalState() const
{
return inOnlineSignalState;
}
void TraCITrafficLightInterface::setProgramDefinition(const TraCITrafficLightProgram& programDefinition)
{
this->programDefinition = programDefinition;
}
void TraCITrafficLightInterface::setControlledLinks(const std::list<std::list<TraCITrafficLightLink>>& controlledLinks)
{
this->controlledLinks = controlledLinks;
}
void TraCITrafficLightInterface::setCurrentLogicById(const std::string& logicId, bool setSumo)
{
if (setSumo) {
ASSERT(logicId != "online");
if (!programDefinition.hasLogic(logicId)) {
throw cRuntimeError("Logic '%s' not found in program of TraCITrafficLightInterface %s", logicId.c_str(), external_id.c_str());
}
tlCommandInterface->setProgram(logicId);
const std::string newValueInSumo = tlCommandInterface->getCurrentProgramID();
ASSERT(newValueInSumo == logicId);
}
if (currentLogicId != logicId || (isInOnlineSignalState() && logicId != "online")) {
sendChangeMsg(TrafficLightAtrributeType::LOGICID, logicId, currentLogicId);
}
if (logicId != "online") {
inOnlineSignalState = false;
this->currentLogicId = logicId;
}
}
void TraCITrafficLightInterface::setCurrentPhaseByNr(const unsigned int phaseNr, bool setSumo)
{
if (setSumo) {
if (phaseNr >= getCurrentLogic().phases.size()) {
throw cRuntimeError("Cannot set current phase to %d: current logic has only %d Phases (TraCITrafficLightInterface %s)", phaseNr, getCurrentLogic().phases.size(), external_id.c_str());
}
tlCommandInterface->setPhaseIndex(phaseNr);
const unsigned int newValueInSumo = tlCommandInterface->getCurrentPhaseIndex();
ASSERT(newValueInSumo == phaseNr);
}
if (currentPhaseNr != static_cast<int>(phaseNr) || isInOnlineSignalState()) {
sendChangeMsg(TrafficLightAtrributeType::PHASEID, std::to_string(phaseNr), std::to_string(currentPhaseNr));
}
inOnlineSignalState = false;
currentPhaseNr = phaseNr;
}
void TraCITrafficLightInterface::setCurrentState(const std::string& state, bool setSumo)
{
if (setSumo) {
tlCommandInterface->setState(state);
const std::string newValueInSumo = tlCommandInterface->getCurrentState();
ASSERT(newValueInSumo == state);
}
if (currentSignalState != state) {
sendChangeMsg(TrafficLightAtrributeType::STATE, state, currentSignalState);
}
inOnlineSignalState = true;
currentSignalState = state;
}
void TraCITrafficLightInterface::setNextSwitch(const simtime_t& newNextSwitch, bool setSumo)
{
// FIXME: not working reliably - use setRemainingDuration instead!
// round to be feasible for SUMO
simtime_t nextSwitch = ceil(newNextSwitch, updateInterval, 0);
if (setSumo) {
simtime_t remainingDuration = ceil(nextSwitch - simTime(), updateInterval, 0);
if (remainingDuration < 0) {
EV << "Warning: remaining duration for switch below 0: " << remainingDuration << std::endl;
// maybe issue even an error if this occurs
remainingDuration = 0;
}
getTlCommandInterface()->setPhaseDuration(remainingDuration);
simtime_t newValueInSumo = tlCommandInterface->getAssumedNextSwitchTime();
ASSERT(newValueInSumo == nextSwitch);
}
if (nextSwitchTime != nextSwitch) {
sendChangeMsg(TrafficLightAtrributeType::SWITCHTIME, std::to_string(nextSwitch.inUnit(SIMTIME_MS)), std::to_string(nextSwitchTime.inUnit(SIMTIME_MS)));
}
nextSwitchTime = nextSwitch;
}
void TraCITrafficLightInterface::setRemainingDuration(const simtime_t& rawRemainingDuration, bool setSumo)
{
ASSERT(rawRemainingDuration >= 0);
// round (up) to match sumo time steps
simtime_t veinsTimeNow(simTime());
simtime_t sumoTimeNow(ceil(veinsTimeNow, updateInterval) - updateInterval);
simtime_t roundedRemainingDuration = ceil(rawRemainingDuration, updateInterval, 0);
// simtime_t nextSwitchInVeins = floor(simTime() + roundedRemainingDuration, updateInterval, 0) - updateInterval;
simtime_t nextSwitchInVeins = sumoTimeNow + roundedRemainingDuration;
if (setSumo) {
// set value to sumo
getTlCommandInterface()->setPhaseDuration(roundedRemainingDuration);
// check that everything is consistent
simtime_t nextSwitchInSumo = tlCommandInterface->getAssumedNextSwitchTime();
ASSERT(nextSwitchInSumo == nextSwitchInVeins);
}
if (nextSwitchTime != nextSwitchInVeins) {
sendChangeMsg(TrafficLightAtrributeType::SWITCHTIME, std::to_string(nextSwitchInVeins.inUnit(SIMTIME_MS)), std::to_string(nextSwitchTime.inUnit(SIMTIME_MS)));
}
nextSwitchTime = nextSwitchInVeins;
}
void TraCITrafficLightInterface::initialize()
{
ASSERT(isPreInitialized);
isPreInitialized = false;
setProgramDefinition(getTlCommandInterface()->getProgramDefinition());
setControlledLinks(getTlCommandInterface()->getControlledLinks());
currentLogicId = getTlCommandInterface()->getCurrentProgramID();
currentPhaseNr = getTlCommandInterface()->getCurrentPhaseIndex();
nextSwitchTime = getTlCommandInterface()->getAssumedNextSwitchTime();
currentSignalState = getTlCommandInterface()->getCurrentState();
}
void TraCITrafficLightInterface::handleMessage(cMessage* msg)
{
if (msg->isSelfMessage()) {
// not in use (yet)
}
else if (msg->arrivedOn("logic$i")) {
handleChangeCommandMessage(msg);
}
delete msg;
}
void TraCITrafficLightInterface::handleChangeCommandMessage(cMessage* msg)
{
TraCITrafficLightMessage* tmMsg = check_and_cast<TraCITrafficLightMessage*>(msg);
switch (tmMsg->getChangedAttribute()) {
case TrafficLightAtrributeType::LOGICID:
setCurrentLogicById(tmMsg->getNewValue(), true);
break;
case TrafficLightAtrributeType::PHASEID:
setCurrentPhaseByNr(std::stoi(tmMsg->getNewValue()), true);
break;
case TrafficLightAtrributeType::SWITCHTIME:
setNextSwitch(SimTime(std::stoi(tmMsg->getNewValue()), SIMTIME_MS), true);
break;
case TrafficLightAtrributeType::STATE:
setCurrentState(tmMsg->getNewValue(), true);
break;
}
}
void TraCITrafficLightInterface::sendChangeMsg(int changedAttribute, const std::string newValue, const std::string oldValue)
{
Enter_Method_Silent();
TraCITrafficLightMessage* pMsg = new TraCITrafficLightMessage("TrafficLightChangeMessage");
pMsg->setTlId(external_id.c_str());
pMsg->setChangedAttribute(changedAttribute);
pMsg->setChangeSource(TrafficLightChangeSource::SUMO);
pMsg->setOldValue(oldValue.c_str());
pMsg->setNewValue(newValue.c_str());
send(pMsg, "logic$o");
}
| 9,949 | 34.663082 | 195 | cc |
null | AICP-main/veins/src/veins/modules/world/traci/trafficLight/TraCITrafficLightInterface.h | //
// Copyright (C) 2015-2018 Dominik Buse <[email protected]>
//
// Documentation for these modules is at http://veins.car2x.org/
//
// SPDX-License-Identifier: GPL-2.0-or-later
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
//
#pragma once
#include "veins/veins.h"
#include "veins/modules/mobility/traci/TraCIScenarioManager.h"
#include "veins/modules/mobility/traci/TraCICommandInterface.h"
#include "veins/modules/world/traci/trafficLight/TraCITrafficLightProgram.h"
namespace veins {
class VEINS_API TraCITrafficLightInterface : public cSimpleModule {
public:
TraCITrafficLightInterface();
~TraCITrafficLightInterface() override;
/** Set parameters for connection to TraCI */
virtual void preInitialize(const std::string& external_id, const Coord& position, const simtime_t& updateInterval);
virtual void setExternalId(const std::string& external_id)
{
this->external_id = external_id;
}
virtual std::string getExternalId() const
{
if (external_id == "") throw cRuntimeError("TraCITrafficLightInterface::getExternalId called with no external_id set yet");
return external_id;
}
virtual TraCIScenarioManager* getManager() const
{
if (!manager) {
manager = TraCIScenarioManagerAccess().get();
}
return manager;
}
virtual TraCICommandInterface* getCommandInterface() const
{
if (!commandInterface) {
commandInterface = getManager()->getCommandInterface();
}
return commandInterface;
}
virtual TraCICommandInterface::Trafficlight* getTlCommandInterface() const
{
if (!tlCommandInterface) {
tlCommandInterface = new TraCICommandInterface::Trafficlight(getCommandInterface(), external_id);
}
return tlCommandInterface;
}
virtual std::list<std::list<TraCITrafficLightLink>> getControlledLinks();
virtual Coord getPosition() const;
virtual TraCITrafficLightProgram::Logic getCurrentLogic() const;
virtual std::string getCurrentLogicId() const;
virtual int getCurrentPhaseId() const;
virtual TraCITrafficLightProgram::Phase getCurrentPhase() const;
virtual simtime_t getAssumedNextSwitch() const;
virtual simtime_t getRemainingDuration() const;
virtual std::string getCurrentState() const;
virtual bool isInOnlineSignalState() const;
virtual void setProgramDefinition(const TraCITrafficLightProgram& programDefinition);
virtual void setControlledLinks(const std::list<std::list<TraCITrafficLightLink>>& controlledLinks);
virtual void setCurrentLogicById(const std::string& logicId, bool setSumo = true);
virtual void setCurrentPhaseByNr(const unsigned int phaseNr, bool setSumo = true);
virtual void setCurrentState(const std::string& state, bool setSumo = true);
virtual void setNextSwitch(const simtime_t& newNextSwitch, bool setSumo = true);
virtual void setRemainingDuration(const simtime_t& timeTillSwitch, bool setSumo = true);
protected:
void initialize() override;
void handleMessage(cMessage* msg) override;
virtual void handleChangeCommandMessage(cMessage* msg);
virtual void sendChangeMsg(int changedAttribute, const std::string newValue, const std::string oldValue);
bool isPreInitialized; /**< true if preInitialize() has been called immediately before initialize() */
simtime_t updateInterval; /**< ScenarioManager's update interval */
/** reference to the simulations ScenarioManager */
mutable TraCIScenarioManager* manager;
/** reference to the simulations traffic light-specific TraCI command interface */
mutable TraCICommandInterface* commandInterface;
/** reference to the simulations traffic light-specific TraCI command interface */
mutable TraCICommandInterface::Trafficlight* tlCommandInterface;
std::string external_id; /**< id used on the other end of TraCI */
Coord position; /**< position of the traffic light */
TraCITrafficLightProgram programDefinition; /**< full definition of program (all logics) */
std::list<std::list<TraCITrafficLightLink>> controlledLinks; /**< controlledLinks[signal][link] */
// std::list< std::list<TraCITrafficLightLink> > controlledLanes; /**< controlledLanes[signal][link] */
std::string currentLogicId; /**< id of the currently active logic */
int currentPhaseNr; /**< current phase of the current program */
simtime_t nextSwitchTime; /**< predicted next phase switch time (absolute timestamp) */
std::string currentSignalState; /**< current state of the signals (rRgGyY-String) */
bool inOnlineSignalState; /**< whether the TLS is currently set to a manual (i.e. online) phase state */
};
} // namespace veins
namespace veins {
class VEINS_API TraCITrafficLightInterfaceAccess {
public:
TraCITrafficLightInterface* get(cModule* host)
{
TraCITrafficLightInterface* traci = FindModule<TraCITrafficLightInterface*>::findSubModule(host);
ASSERT(traci);
return traci;
};
};
} // namespace veins
| 5,743 | 42.515152 | 131 | h |
null | AICP-main/veins/src/veins/modules/world/traci/trafficLight/TraCITrafficLightProgram.cc | //
// Copyright (C) 2015-2018 Dominik Buse <[email protected]>
//
// Documentation for these modules is at http://veins.car2x.org/
//
// SPDX-License-Identifier: GPL-2.0-or-later
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
//
#include "veins/modules/world/traci/trafficLight/TraCITrafficLightProgram.h"
using veins::TraCITrafficLightProgram;
bool TraCITrafficLightProgram::Phase::isGreenPhase() const
{
// implementation taken from SUMO MSPhaseDefinition.cc
if (state.find_first_of("gG") == std::string::npos) {
return false;
}
if (state.find_first_of("yY") != std::string::npos) {
return false;
}
return true;
}
TraCITrafficLightProgram::TraCITrafficLightProgram(std::string id)
: id(id)
, logics()
{
}
void TraCITrafficLightProgram::addLogic(const Logic& logic)
{
logics[logic.id] = logic;
}
TraCITrafficLightProgram::Logic TraCITrafficLightProgram::getLogic(const std::string& lid) const
{
return logics.at(lid);
}
bool TraCITrafficLightProgram::hasLogic(const std::string& lid) const
{
return logics.find(lid) != logics.end();
}
| 1,782 | 29.220339 | 96 | cc |
null | AICP-main/veins/src/veins/modules/world/traci/trafficLight/TraCITrafficLightProgram.h | //
// Copyright (C) 2015-2018 Dominik Buse <[email protected]>
//
// Documentation for these modules is at http://veins.car2x.org/
//
// SPDX-License-Identifier: GPL-2.0-or-later
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
//
#pragma once
#include <string>
#include <vector>
#include <map>
#include "veins/veins.h"
using omnetpp::simtime_t;
namespace veins {
class VEINS_API TraCITrafficLightProgram {
public:
struct Phase {
simtime_t duration;
std::string state;
simtime_t minDuration;
simtime_t maxDuration;
std::vector<int32_t> next;
std::string name;
bool isGreenPhase() const;
};
struct Logic {
std::string id;
int32_t currentPhase;
std::vector<Phase> phases;
int32_t type; // currently unused, just 0
int32_t parameter; // currently unused, just 0
};
TraCITrafficLightProgram(std::string id = "");
void addLogic(const Logic& logic);
TraCITrafficLightProgram::Logic getLogic(const std::string& lid) const;
bool hasLogic(const std::string& lid) const;
private:
std::string id;
std::map<std::string, TraCITrafficLightProgram::Logic> logics;
};
struct VEINS_API TraCITrafficLightLink {
std::string incoming;
std::string outgoing;
std::string internal;
};
} // namespace veins
| 2,019 | 27.055556 | 76 | h |
null | AICP-main/veins/src/veins/modules/world/traci/trafficLight/logics/TraCITrafficLightAbstractLogic.cc | //
// Copyright (C) 2015-2018 Dominik Buse <[email protected]>
//
// Documentation for these modules is at http://veins.car2x.org/
//
// SPDX-License-Identifier: GPL-2.0-or-later
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
//
#include "veins/modules/world/traci/trafficLight/logics/TraCITrafficLightAbstractLogic.h"
using veins::TraCITrafficLightAbstractLogic;
using namespace omnetpp;
TraCITrafficLightAbstractLogic::TraCITrafficLightAbstractLogic()
: cSimpleModule()
, switchTimer(nullptr)
{
}
TraCITrafficLightAbstractLogic::~TraCITrafficLightAbstractLogic()
{
cancelAndDelete(switchTimer);
}
void TraCITrafficLightAbstractLogic::initialize()
{
switchTimer = new cMessage("trySwitch");
}
void TraCITrafficLightAbstractLogic::handleMessage(cMessage* msg)
{
if (msg->isSelfMessage()) {
handleSelfMsg(msg);
}
else if (msg->arrivedOn("interface$i")) {
TraCITrafficLightMessage* tlMsg = check_and_cast<TraCITrafficLightMessage*>(msg);
// always check for changed switch time and (re-)schedule switch handler if so
if (tlMsg->getChangedAttribute() == TrafficLightAtrributeType::SWITCHTIME) {
// schedule handler right before the switch
cancelEvent(switchTimer);
// make sure the message is not scheduled to the past
simtime_t nextTick = std::max(SimTime(std::stoi(tlMsg->getNewValue()), SIMTIME_MS), simTime());
scheduleAt(nextTick, switchTimer);
}
// defer further handling to subclass implementation
handleTlIfMsg(tlMsg);
}
else if (msg->arrivedOn("applLayer$i")) {
handleApplMsg(msg);
}
else {
throw cRuntimeError("Unknown message arrived on %s", msg->getArrivalGate()->getName());
}
}
void TraCITrafficLightAbstractLogic::handleSelfMsg(cMessage* msg)
{
if (msg == switchTimer) {
handlePossibleSwitch();
}
}
| 2,593 | 33.131579 | 107 | cc |
null | AICP-main/veins/src/veins/modules/world/traci/trafficLight/logics/TraCITrafficLightAbstractLogic.h | //
// Copyright (C) 2015-2018 Dominik Buse <[email protected]>
//
// Documentation for these modules is at http://veins.car2x.org/
//
// SPDX-License-Identifier: GPL-2.0-or-later
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
//
#pragma once
#include "veins/veins.h"
#include "veins/modules/messages/TraCITrafficLightMessage_m.h"
namespace veins {
using omnetpp::cMessage;
using omnetpp::cSimpleModule;
/**
* Base class to simplify implementation of traffic light logics
*
* already provides multiplexing of different message types to message handlers and a
* special handler to be executed right before the TraCI server performs a phase switch
*/
class VEINS_API TraCITrafficLightAbstractLogic : public cSimpleModule {
public:
TraCITrafficLightAbstractLogic();
~TraCITrafficLightAbstractLogic() override;
protected:
cMessage* switchTimer;
void initialize() override;
void handleMessage(cMessage* msg) override;
virtual void handleSelfMsg(cMessage* msg);
virtual void handleApplMsg(cMessage* msg) = 0;
virtual void handleTlIfMsg(TraCITrafficLightMessage* tlMsg) = 0;
virtual void handlePossibleSwitch() = 0;
};
} // namespace veins
| 1,860 | 32.232143 | 87 | h |
null | AICP-main/veins/src/veins/modules/world/traci/trafficLight/logics/TraCITrafficLightSimpleLogic.cc | //
// Copyright (C) 2015-2018 Dominik Buse <[email protected]>
//
// Documentation for these modules is at http://veins.car2x.org/
//
// SPDX-License-Identifier: GPL-2.0-or-later
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
//
#include "veins/modules/world/traci/trafficLight/logics/TraCITrafficLightSimpleLogic.h"
using veins::TraCITrafficLightSimpleLogic;
Define_Module(veins::TraCITrafficLightSimpleLogic);
void TraCITrafficLightSimpleLogic::handleApplMsg(cMessage* msg)
{
delete msg; // just drop it
}
void TraCITrafficLightSimpleLogic::handleTlIfMsg(TraCITrafficLightMessage* tlMsg)
{
delete tlMsg; // just drop it
}
void TraCITrafficLightSimpleLogic::handlePossibleSwitch()
{
// do nothing - just let it happen
}
| 1,418 | 32 | 87 | cc |
null | AICP-main/veins/src/veins/modules/world/traci/trafficLight/logics/TraCITrafficLightSimpleLogic.h | //
// Copyright (C) 2015-2018 Dominik Buse <[email protected]>
//
// Documentation for these modules is at http://veins.car2x.org/
//
// SPDX-License-Identifier: GPL-2.0-or-later
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
//
#pragma once
#include "veins/veins.h"
#include "veins/base/utils/FindModule.h"
#include "veins/modules/world/traci/trafficLight/logics/TraCITrafficLightAbstractLogic.h"
#include "veins/modules/world/traci/trafficLight/TraCITrafficLightInterface.h"
namespace veins {
class VEINS_API TraCITrafficLightSimpleLogic : public TraCITrafficLightAbstractLogic {
public:
using signalScheme = std::string;
protected:
void handleApplMsg(cMessage* msg) override;
void handleTlIfMsg(TraCITrafficLightMessage* tlMsg) override;
void handlePossibleSwitch() override;
};
class VEINS_API TraCITrafficLightSimpleLogicAccess {
public:
TraCITrafficLightSimpleLogic* get(cModule* host)
{
TraCITrafficLightSimpleLogic* traci = FindModule<TraCITrafficLightSimpleLogic*>::findSubModule(host);
ASSERT(traci);
return traci;
};
};
} // namespace veins
| 1,791 | 32.185185 | 109 | h |
LiDAR2LiDAR | LiDAR2LiDAR-master/README.md | # LiDAR2LiDAR
For more calibration codes, please refer to the link <a href="https://github.com/PJLab-ADG/SensorsCalibration" title="SensorsCalibration">SensorsCalibration</a>
## Pipline

## Network
### Installation
```shell
# Install packages and other dependencies
pip install -r requirements.txt
python setup.py build develop
```
### 3DMatch
```shell
python test.py --snapshot=../../output/geotransformer.3dmatch.stage4.gse.k3.max.oacl.stage2.sinkhorn/snapshots/snapshot.pth.tar --benchmark=3DMatch
python eval.py --benchmark=3DMatch --method=svd/--method=ransac
```
### Kitti
```shell
python test.py --snapshot=../../output/geotransformer.Kitti.stage4.gse.k3.max.oacl.stage2.sinkhorn/snapshots/snapshot.pth.tar
python eval.py --method=svd/--method=ransac
```
### road_scene
```shell
python test.py --snapshot=../../output/geotransformer.CROON.stage4.gse.k3.max.oacl.stage2.sinkhorn/snapshots/snapshot.pth.tar
python eval.py --method=svd/--method=ransac
```
## octree optimize
### Prerequisites
- Cmake
- Opencv 2.4.13
- PCL 1.9
### Compile
Compile in their respective folders
```shell
# mkdir build
mkdir -p build && cd build
# build
cmake .. && make
```
### Dataset
Because the dataset is relatively large, only test samples are uploaded, the complete data can be download from the link below.
```
Link(链接): https://pan.baidu.com/s/1EhiNVWAD1t96h0to7GTlIA
Extration code(提取码): ctuk
```
### Usage
1. Two input files:
`point_cloud_path initial_extrinsic`
- **point_cloud_path**: paths of Lidar point clouds
- **initial_extrinsic**: initial extrinsic parameters
2. Run the test sample:
The executable file is under the bin folder.
```
./bin/run_lidar2lidar data/0001/lidar_cloud_path.txt data/0001/initial_extrinsic.txt
```
| 1,831 | 25.171429 | 160 | md |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/setup.py | from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
setup(
name='geotransformer',
version='1.0.0',
ext_modules=[
CUDAExtension(
name='geotransformer.ext',
sources=[
'geotransformer/extensions/extra/cloud/cloud.cpp',
'geotransformer/extensions/cpu/grid_subsampling/grid_subsampling.cpp',
'geotransformer/extensions/cpu/grid_subsampling/grid_subsampling_cpu.cpp',
'geotransformer/extensions/cpu/radius_neighbors/radius_neighbors.cpp',
'geotransformer/extensions/cpu/radius_neighbors/radius_neighbors_cpu.cpp',
'geotransformer/extensions/pybind.cpp',
],
),
],
cmdclass={'build_ext': BuildExtension},
)
| 814 | 34.434783 | 90 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/data/Kitti/downsample_pcd.py | import os
import os.path as osp
import open3d as o3d
import numpy as np
import glob
from tqdm import tqdm
def main():
for i in range(11):
seq_id = '{:02d}'.format(i)
file_names = glob.glob(osp.join('sequences', seq_id, '*.bin'))
print("file_names:",file_names)
for file_name in tqdm(file_names):
frame = file_name.split('/')[-1][:-4]
new_file_name = osp.join('downsampled', seq_id, frame + '.npy')
points = np.fromfile(file_name, dtype=np.float32).reshape(-1, 4)
points = points[:, :3]
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(points)
pcd = pcd.voxel_down_sample(0.3)
points = np.array(pcd.points).astype(np.float32)
np.save(new_file_name, points)
if __name__ == '__main__':
main()
| 865 | 29.928571 | 76 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/data/road_scene/downsample_pcd.py | import os
import os.path as osp
import open3d as o3d
import numpy as np
import glob
from tqdm import tqdm
def main():
dataset_path = './unreal_world'
new_dataset_path = './downsampled/unreal_world'
all_data = os.listdir(dataset_path)
print("all_data:",len(all_data))
for file_name in tqdm(all_data):
# frame = file_name.split('/')[-1][:-4]
top_pcd = osp.join(dataset_path,file_name,'top-'+file_name+'.pcd')
front_pcd = osp.join(dataset_path,file_name,'front-'+file_name+'.pcd')
back_pcd = osp.join(dataset_path,file_name,'back-'+file_name+'.pcd')
left_pcd = osp.join(dataset_path,file_name,'left-'+file_name+'.pcd')
right_pcd = osp.join(dataset_path,file_name,'right-'+file_name+'.pcd')
new_top_pcd = osp.join(new_dataset_path,file_name,'top-'+file_name+'.pcd')
new_front_pcd = osp.join(new_dataset_path,file_name,'front-'+file_name+'.pcd')
new_back_pcd = osp.join(new_dataset_path,file_name,'back-'+file_name+'.pcd')
new_left_pcd = osp.join(new_dataset_path,file_name,'left-'+file_name+'.pcd')
new_right_pcd = osp.join(new_dataset_path,file_name,'right-'+file_name+'.pcd')
if not os.path.exists(os.path.dirname(new_top_pcd)):
os.makedirs(os.path.dirname(new_top_pcd))
pcd = o3d.io.read_point_cloud(top_pcd)
points = np.asarray(pcd.points)
points = points[:, :3]
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(points)
pcd = pcd.voxel_down_sample(0.1)
o3d.io.write_point_cloud(new_top_pcd, pcd)
pcd = o3d.io.read_point_cloud(front_pcd)
points = np.asarray(pcd.points)
points = points[:, :3]
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(points)
pcd = pcd.voxel_down_sample(0.1)
o3d.io.write_point_cloud(new_front_pcd, pcd)
pcd = o3d.io.read_point_cloud(back_pcd)
points = np.asarray(pcd.points)
points = points[:, :3]
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(points)
pcd = pcd.voxel_down_sample(0.1)
o3d.io.write_point_cloud(new_back_pcd, pcd)
pcd = o3d.io.read_point_cloud(left_pcd)
points = np.asarray(pcd.points)
points = points[:, :3]
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(points)
pcd = pcd.voxel_down_sample(0.1)
o3d.io.write_point_cloud(new_left_pcd, pcd)
pcd = o3d.io.read_point_cloud(right_pcd)
points = np.asarray(pcd.points)
points = points[:, :3]
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(points)
pcd = pcd.voxel_down_sample(0.1)
o3d.io.write_point_cloud(new_right_pcd, pcd)
if __name__ == '__main__':
main()
| 2,904 | 38.256757 | 86 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/data/road_scene/metadata/train_val_test_split.py | import os
import random
all_data = os.listdir('/Download/GeoTransformer/GeoTransformer/data/Lidar2Lidar/downsampled/unreal_world/unreal_world')
random.shuffle(all_data)
print("all_data", len(all_data))
train_data = all_data[:1140]
val_data = all_data[1140:1520]
test_data = all_data[1520:]
train_txt = open('/Download/GeoTransformer/GeoTransformer/data/Lidar2Lidar/metadata/train.txt', 'w')
for i in range(len(train_data)):
train_txt.write(str(train_data[i])+'-top'+'-front'+'\n')
train_txt.write(str(train_data[i])+'-top'+'-back'+'\n')
train_txt.write(str(train_data[i])+'-top'+'-left'+'\n')
train_txt.write(str(train_data[i])+'-top'+'-right'+'\n')
val_txt = open('/Download/GeoTransformer/GeoTransformer/data/Lidar2Lidar/metadata/val.txt', 'w')
for i in range(len(val_data)):
val_txt.write(str(val_data[i])+'-top'+'-front'+'\n')
val_txt.write(str(val_data[i])+'-top'+'-back'+'\n')
val_txt.write(str(val_data[i])+'-top'+'-left'+'\n')
val_txt.write(str(val_data[i])+'-top'+'-right'+'\n')
test_txt = open('/Download/GeoTransformer/GeoTransformer/data/Lidar2Lidar/metadata/test.txt', 'w')
for i in range(len(test_data)):
test_txt.write(str(test_data[i])+'-top'+'-front'+'\n')
test_txt.write(str(test_data[i])+'-top'+'-back'+'\n')
test_txt.write(str(test_data[i])+'-top'+'-left'+'\n')
test_txt.write(str(test_data[i])+'-top'+'-right'+'\n') | 1,398 | 45.633333 | 119 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/experiments/geotransformer.3dmatch.stage4.gse.k3.max.oacl.stage2.sinkhorn/__init__.py | 0 | 0 | 0 | py |
|
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/experiments/geotransformer.3dmatch.stage4.gse.k3.max.oacl.stage2.sinkhorn/backbone.py | import torch
import torch.nn as nn
from IPython import embed
from geotransformer.modules.kpconv import ConvBlock, ResidualBlock, UnaryBlock, LastUnaryBlock, nearest_upsample
class KPConvFPN(nn.Module):
def __init__(self, input_dim, output_dim, init_dim, kernel_size, init_radius, init_sigma, group_norm):
super(KPConvFPN, self).__init__()
self.encoder1_1 = ConvBlock(input_dim, init_dim, kernel_size, init_radius, init_sigma, group_norm)
self.encoder1_2 = ResidualBlock(init_dim, init_dim * 2, kernel_size, init_radius, init_sigma, group_norm)
self.encoder2_1 = ResidualBlock(
init_dim * 2, init_dim * 2, kernel_size, init_radius, init_sigma, group_norm, strided=True
)
self.encoder2_2 = ResidualBlock(
init_dim * 2, init_dim * 4, kernel_size, init_radius * 2, init_sigma * 2, group_norm
)
self.encoder2_3 = ResidualBlock(
init_dim * 4, init_dim * 4, kernel_size, init_radius * 2, init_sigma * 2, group_norm
)
self.encoder3_1 = ResidualBlock(
init_dim * 4, init_dim * 4, kernel_size, init_radius * 2, init_sigma * 2, group_norm, strided=True
)
self.encoder3_2 = ResidualBlock(
init_dim * 4, init_dim * 8, kernel_size, init_radius * 4, init_sigma * 4, group_norm
)
self.encoder3_3 = ResidualBlock(
init_dim * 8, init_dim * 8, kernel_size, init_radius * 4, init_sigma * 4, group_norm
)
self.encoder4_1 = ResidualBlock(
init_dim * 8, init_dim * 8, kernel_size, init_radius * 4, init_sigma * 4, group_norm, strided=True
)
self.encoder4_2 = ResidualBlock(
init_dim * 8, init_dim * 16, kernel_size, init_radius * 8, init_sigma * 8, group_norm
)
self.encoder4_3 = ResidualBlock(
init_dim * 16, init_dim * 16, kernel_size, init_radius * 8, init_sigma * 8, group_norm
)
self.decoder3 = UnaryBlock(init_dim * 24, init_dim * 8, group_norm)
# self.decoder2 = UnaryBlock(init_dim * 12, init_dim * 4, group_norm)
self.decoder2 = LastUnaryBlock(init_dim * 12, output_dim)
def forward(self, feats, data_dict):
feats_list = []
points_list = data_dict['points']
neighbors_list = data_dict['neighbors']
subsampling_list = data_dict['subsampling']
upsampling_list = data_dict['upsampling']
feats_s1 = feats
feats_s1 = self.encoder1_1(feats_s1, points_list[0], points_list[0], neighbors_list[0])
feats_s1 = self.encoder1_2(feats_s1, points_list[0], points_list[0], neighbors_list[0])
feats_s2 = self.encoder2_1(feats_s1, points_list[1], points_list[0], subsampling_list[0])
feats_s2 = self.encoder2_2(feats_s2, points_list[1], points_list[1], neighbors_list[1])
feats_s2 = self.encoder2_3(feats_s2, points_list[1], points_list[1], neighbors_list[1])
feats_s3 = self.encoder3_1(feats_s2, points_list[2], points_list[1], subsampling_list[1])
feats_s3 = self.encoder3_2(feats_s3, points_list[2], points_list[2], neighbors_list[2])
feats_s3 = self.encoder3_3(feats_s3, points_list[2], points_list[2], neighbors_list[2])
feats_s4 = self.encoder4_1(feats_s3, points_list[3], points_list[2], subsampling_list[2])
feats_s4 = self.encoder4_2(feats_s4, points_list[3], points_list[3], neighbors_list[3])
feats_s4 = self.encoder4_3(feats_s4, points_list[3], points_list[3], neighbors_list[3])
latent_s4 = feats_s4
feats_list.append(feats_s4)
latent_s3 = nearest_upsample(latent_s4, upsampling_list[2])
latent_s3 = torch.cat([latent_s3, feats_s3], dim=1)
latent_s3 = self.decoder3(latent_s3)
feats_list.append(latent_s3)
latent_s2 = nearest_upsample(latent_s3, upsampling_list[1])
latent_s2 = torch.cat([latent_s2, feats_s2], dim=1)
latent_s2 = self.decoder2(latent_s2)
feats_list.append(latent_s2)
# latent_s1 = nearest_upsample(latent_s2, upsampling_list[0])
# latent_s1 = torch.cat([latent_s1, feats_s1], dim=1)
# latent_s1 = self.decoder1(latent_s1)
# feats_list.append(latent_s1)
feats_list.reverse()
return feats_list
| 4,299 | 44.744681 | 113 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/experiments/geotransformer.3dmatch.stage4.gse.k3.max.oacl.stage2.sinkhorn/config.py | import os
import os.path as osp
import argparse
from easydict import EasyDict as edict
from geotransformer.utils.common import ensure_dir
_C = edict()
# common
_C.seed = 7351
# dirs
_C.working_dir = osp.dirname(osp.realpath(__file__))
_C.root_dir = osp.dirname(osp.dirname(_C.working_dir))
_C.exp_name = osp.basename(_C.working_dir)
_C.output_dir = osp.join(_C.root_dir, 'output', _C.exp_name)
_C.snapshot_dir = osp.join(_C.output_dir, 'snapshots')
_C.log_dir = osp.join(_C.output_dir, 'logs')
_C.event_dir = osp.join(_C.output_dir, 'events')
_C.feature_dir = osp.join(_C.output_dir, 'features')
_C.registration_dir = osp.join(_C.output_dir, 'registration')
ensure_dir(_C.output_dir)
ensure_dir(_C.snapshot_dir)
ensure_dir(_C.log_dir)
ensure_dir(_C.event_dir)
ensure_dir(_C.feature_dir)
ensure_dir(_C.registration_dir)
# data
_C.data = edict()
_C.data.dataset_root = osp.join(_C.root_dir, 'data', '3DMatch')
# train data
_C.train = edict()
_C.train.batch_size = 1
_C.train.num_workers = 15
_C.train.point_limit = 30000
_C.train.use_augmentation = True
_C.train.augmentation_noise = 0.005
_C.train.augmentation_rotation = 1.0
# test data
_C.test = edict()
_C.test.batch_size = 1
_C.test.num_workers = 8
_C.test.point_limit = None
# evaluation
_C.eval = edict()
_C.eval.acceptance_overlap = 0.0
_C.eval.acceptance_radius = 0.1
_C.eval.inlier_ratio_threshold = 0.05
_C.eval.rmse_threshold = 0.2
_C.eval.rre_threshold = 15.0
_C.eval.rte_threshold = 0.3
# ransac
_C.ransac = edict()
_C.ransac.distance_threshold = 0.05
_C.ransac.num_points = 3
_C.ransac.num_iterations = 50000
# optim
_C.optim = edict()
_C.optim.lr = 0.001
_C.optim.lr_decay = 0.95
_C.optim.lr_decay_steps = 1
_C.optim.weight_decay = 1e-6
_C.optim.max_epoch = 50
_C.optim.grad_acc_steps = 1
# model - backbone
_C.backbone = edict()
_C.backbone.num_stages = 4
_C.backbone.init_voxel_size = 0.025
_C.backbone.kernel_size = 15
_C.backbone.base_radius = 2.5
_C.backbone.base_sigma = 2.0
_C.backbone.init_radius = _C.backbone.base_radius * _C.backbone.init_voxel_size
_C.backbone.init_sigma = _C.backbone.base_sigma * _C.backbone.init_voxel_size
_C.backbone.group_norm = 32
_C.backbone.input_dim = 1
_C.backbone.init_dim = 64
_C.backbone.output_dim = 256
# model - Global
_C.model = edict()
_C.model.ground_truth_matching_radius = 0.05
_C.model.num_points_in_patch = 64
_C.model.num_sinkhorn_iterations = 100
# model - Coarse Matching
_C.coarse_matching = edict()
_C.coarse_matching.num_targets = 512
_C.coarse_matching.overlap_threshold = 0.1
_C.coarse_matching.num_correspondences = 512
_C.coarse_matching.dual_normalization = True
# model - GeoTransformer
_C.geotransformer = edict()
_C.geotransformer.input_dim = 1024
_C.geotransformer.hidden_dim = 256 #256
_C.geotransformer.output_dim = 256 #256
_C.geotransformer.num_heads = 4
_C.geotransformer.blocks = ['self', 'cross', 'self', 'cross', 'self', 'cross']
_C.geotransformer.sigma_d = 0.2
_C.geotransformer.sigma_a = 15
_C.geotransformer.angle_k = 3
_C.geotransformer.reduction_a = 'max'
# model - Fine Matching
_C.fine_matching = edict()
_C.fine_matching.topk = 10
_C.fine_matching.acceptance_radius = 0.1
_C.fine_matching.mutual = True
_C.fine_matching.confidence_threshold = 0.05
_C.fine_matching.use_dustbin = False
_C.fine_matching.use_global_score = False
_C.fine_matching.correspondence_threshold = 3
_C.fine_matching.correspondence_limit = None
_C.fine_matching.num_refinement_steps = 5
# loss - Coarse level
_C.coarse_loss = edict()
_C.coarse_loss.positive_margin = 0.1
_C.coarse_loss.negative_margin = 1.4
_C.coarse_loss.positive_optimal = 0.1
_C.coarse_loss.negative_optimal = 1.4
_C.coarse_loss.log_scale = 24
_C.coarse_loss.positive_overlap = 0.1
# loss - Fine level
_C.fine_loss = edict()
_C.fine_loss.positive_radius = 0.05
# loss - Overall
_C.loss = edict()
_C.loss.weight_coarse_loss = 1.0
_C.loss.weight_fine_loss = 1.0
# test args
_C.test_args = edict()
_C.test_args.snapshot = '/dssg/home/acct-eeyj/eeyj-user1/WPJ/GeoTransformer-nodecorr-classification/GeoTransformer/weights/geotransformer-3dmatch.pth.tar'
_C.test_args.benchmark = '3DMatch'
def make_cfg():
return _C
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--link_output', dest='link_output', action='store_true', help='link output dir')
args = parser.parse_args()
return args
def main():
cfg = make_cfg()
args = parse_args()
if args.link_output:
os.symlink(cfg.output_dir, 'output')
if __name__ == '__main__':
main()
| 4,511 | 25.385965 | 154 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/experiments/geotransformer.3dmatch.stage4.gse.k3.max.oacl.stage2.sinkhorn/dataset.py | from geotransformer.datasets.registration.threedmatch.dataset import ThreeDMatchPairDataset
from geotransformer.utils.data import (
registration_collate_fn_stack_mode,
calibrate_neighbors_stack_mode,
build_dataloader_stack_mode,
)
def train_valid_data_loader(cfg, distributed):
train_dataset = ThreeDMatchPairDataset(
cfg.data.dataset_root,
'train',
point_limit=cfg.train.point_limit,
use_augmentation=cfg.train.use_augmentation,
augmentation_noise=cfg.train.augmentation_noise,
augmentation_rotation=cfg.train.augmentation_rotation,
)
neighbor_limits = calibrate_neighbors_stack_mode(
train_dataset,
registration_collate_fn_stack_mode,
cfg.backbone.num_stages,
cfg.backbone.init_voxel_size,
cfg.backbone.init_radius,
)
train_loader = build_dataloader_stack_mode(
train_dataset,
registration_collate_fn_stack_mode,
cfg.backbone.num_stages,
cfg.backbone.init_voxel_size,
cfg.backbone.init_radius,
neighbor_limits,
batch_size=cfg.train.batch_size,
num_workers=cfg.train.num_workers,
shuffle=True,
distributed=distributed,
)
valid_dataset = ThreeDMatchPairDataset(
cfg.data.dataset_root,
'val',
point_limit=cfg.test.point_limit,
use_augmentation=False,
)
valid_loader = build_dataloader_stack_mode(
valid_dataset,
registration_collate_fn_stack_mode,
cfg.backbone.num_stages,
cfg.backbone.init_voxel_size,
cfg.backbone.init_radius,
neighbor_limits,
batch_size=cfg.test.batch_size,
num_workers=cfg.test.num_workers,
shuffle=False,
distributed=distributed,
)
return train_loader, valid_loader, neighbor_limits
def test_data_loader(cfg, benchmark):
train_dataset = ThreeDMatchPairDataset(
cfg.data.dataset_root,
'train',
point_limit=cfg.train.point_limit,
use_augmentation=cfg.train.use_augmentation,
augmentation_noise=cfg.train.augmentation_noise,
augmentation_rotation=cfg.train.augmentation_rotation,
)
neighbor_limits = calibrate_neighbors_stack_mode(
train_dataset,
registration_collate_fn_stack_mode,
cfg.backbone.num_stages,
cfg.backbone.init_voxel_size,
cfg.backbone.init_radius,
)
test_dataset = ThreeDMatchPairDataset(
cfg.data.dataset_root,
benchmark,
point_limit=cfg.test.point_limit,
use_augmentation=False,
feature_dir=cfg.feature_dir,
)
test_loader = build_dataloader_stack_mode(
test_dataset,
registration_collate_fn_stack_mode,
cfg.backbone.num_stages,
cfg.backbone.init_voxel_size,
cfg.backbone.init_radius,
neighbor_limits,
batch_size=cfg.test.batch_size,
num_workers=cfg.test.num_workers,
shuffle=False,
)
return test_loader, neighbor_limits
| 3,036 | 30.309278 | 91 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/experiments/geotransformer.3dmatch.stage4.gse.k3.max.oacl.stage2.sinkhorn/eval copy.sh | # if [ "$3" = "test" ]; then
# python test.py --test_epoch=$1 --benchmark=$2
# fi
# python eval.py --test_epoch=$1 --benchmark=$2 --method=lgr
# for n in 250 500 1000 2500 5000; do
# python eval.py --num_corr=$n --benchmark=$1 --method=lgr
# done
# python test.py --snapshot=../../weights/geotransformer-3dmatch.pth.tar --benchmark=3DLoMatch
# python test.py --snapshot=../../weights/geotransformer-3dmatch.pth.tar --benchmark=3DMatch
# python eval.py --benchmark=3DLoMatch --method=lgr
# for n in 250 500 1000 2500 5000; do
# python eval.py --num_corr=$n --benchmark=3DLoMatch --method=lgr
# done
# python eval.py --benchmark=3DLoMatch --method=ransac
# for n in 250 500 1000 2500 5000; do
# python eval.py --num_corr=$n --benchmark=3DLoMatch --method=ransac
# done
# python eval.py --benchmark=3DLoMatch --method=svd
# for n in 250 500 1000 2500 5000; do
# python eval.py --num_corr=$n --benchmark=3DLoMatch --method=svd
# done
# python eval.py --benchmark=3DMatch --method=lgr
# for n in 250 500 1000 2500 5000; do
# python eval.py --num_corr=$n --benchmark=3DMatch --method=lgr
# done
# python eval.py --benchmark=3DMatch --method=ransac
# for n in 250 500 1000 2500 5000; do
# python eval.py --num_corr=$n --benchmark=3DMatch --method=ransac
# done
python eval.py --num_corr=5000 --benchmark=3DMatch --method=ransac
python eval.py --benchmark=3DMatch --method=svd
for n in 250 500 1000 2500 5000; do
python eval.py --num_corr=$n --benchmark=3DMatch --method=svd
done
| 1,533 | 35.52381 | 95 | sh |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/experiments/geotransformer.3dmatch.stage4.gse.k3.max.oacl.stage2.sinkhorn/eval.py | import argparse
import os.path as osp
import time
import glob
import sys
import json
import torch
import numpy as np
from geotransformer.engine import Logger
from geotransformer.modules.registration import weighted_procrustes
from geotransformer.utils.summary_board import SummaryBoard
from geotransformer.utils.open3d import registration_with_ransac_from_correspondences
from geotransformer.utils.registration import (
evaluate_sparse_correspondences,
evaluate_correspondences,
compute_registration_error,
)
from geotransformer.datasets.registration.threedmatch.utils import (
get_num_fragments,
get_scene_abbr,
get_gt_logs_and_infos,
compute_transform_error,
write_log_file,
)
from config import make_cfg
from geotransformer.utils.torch import release_cuda
from geotransformer.utils.common import ensure_dir
import open3d as o3d
from geotransformer.utils.pointcloud import apply_transform
import os
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--test_epoch', default=None, type=int, help='test epoch')
parser.add_argument('--benchmark', choices=['3DMatch', '3DLoMatch'], required=True, help='test benchmark')
parser.add_argument('--method', choices=['lgr', 'ransac', 'svd'], required=True, help='registration method')
parser.add_argument('--num_corr', type=int, default=None, help='number of correspondences for registration')
parser.add_argument('--verbose', action='store_true', help='verbose mode')
return parser
def eval_one_epoch(args, cfg, logger):
features_root = osp.join(cfg.feature_dir, args.benchmark)
benchmark = args.benchmark
coarse_matching_meter = SummaryBoard()
coarse_matching_meter.register_meter('precision')
coarse_matching_meter.register_meter('PMR>0')
coarse_matching_meter.register_meter('PMR>=0.1')
coarse_matching_meter.register_meter('PMR>=0.3')
coarse_matching_meter.register_meter('PMR>=0.5')
coarse_matching_meter.register_meter('scene_precision')
coarse_matching_meter.register_meter('scene_PMR>0')
coarse_matching_meter.register_meter('scene_PMR>=0.1')
coarse_matching_meter.register_meter('scene_PMR>=0.3')
coarse_matching_meter.register_meter('scene_PMR>=0.5')
fine_matching_meter = SummaryBoard()
fine_matching_meter.register_meter('recall')
fine_matching_meter.register_meter('inlier_ratio')
fine_matching_meter.register_meter('overlap')
fine_matching_meter.register_meter('scene_recall')
fine_matching_meter.register_meter('scene_inlier_ratio')
fine_matching_meter.register_meter('scene_overlap')
registration_meter = SummaryBoard()
registration_meter.register_meter('recall')
registration_meter.register_meter('mean_rre')
registration_meter.register_meter('mean_rte')
registration_meter.register_meter('median_rre')
registration_meter.register_meter('median_rte')
registration_meter.register_meter('scene_recall')
registration_meter.register_meter('scene_rre')
registration_meter.register_meter('scene_rte')
scene_coarse_matching_result_dict = {}
scene_fine_matching_result_dict = {}
scene_registration_result_dict = {}
scene_roots = sorted(glob.glob(osp.join(features_root, '*')))
for scene_root in scene_roots:
coarse_matching_meter.reset_meter('scene_precision')
coarse_matching_meter.reset_meter('scene_PMR>0')
coarse_matching_meter.reset_meter('scene_PMR>=0.1')
coarse_matching_meter.reset_meter('scene_PMR>=0.3')
coarse_matching_meter.reset_meter('scene_PMR>=0.5')
fine_matching_meter.reset_meter('scene_recall')
fine_matching_meter.reset_meter('scene_inlier_ratio')
fine_matching_meter.reset_meter('scene_overlap')
registration_meter.reset_meter('scene_recall')
registration_meter.reset_meter('scene_rre')
registration_meter.reset_meter('scene_rte')
scene_name = osp.basename(scene_root)
scene_abbr = get_scene_abbr(scene_name)
num_fragments = get_num_fragments(scene_name)
gt_root = osp.join(cfg.data.dataset_root, 'metadata', 'benchmarks', benchmark, scene_name)
gt_indices, gt_logs, gt_infos = get_gt_logs_and_infos(gt_root, num_fragments)
estimated_transforms = []
file_names = sorted(
glob.glob(osp.join(scene_root, '*.npz')),
key=lambda x: [int(i) for i in osp.basename(x).split('.')[0].split('_')],
)
print("file_names:",len(file_names))
for file_name in file_names:
ref_frame, src_frame = [int(x) for x in osp.basename(file_name).split('.')[0].split('_')]
data_dict = np.load(file_name)
ref_points_c = data_dict['ref_points_c']
src_points_c = data_dict['src_points_c']
ref_node_corr_indices = data_dict['ref_node_corr_indices']
src_node_corr_indices = data_dict['src_node_corr_indices']
ref_corr_points = data_dict['ref_corr_points']
src_corr_points = data_dict['src_corr_points']
corr_scores = data_dict['corr_scores']
gt_node_corr_indices = data_dict['gt_node_corr_indices']
transform = data_dict['transform']
pcd_overlap = data_dict['overlap']
if args.num_corr is not None and corr_scores.shape[0] > args.num_corr:
sel_indices = np.argsort(-corr_scores)[: args.num_corr]
ref_corr_points = ref_corr_points[sel_indices]
src_corr_points = src_corr_points[sel_indices]
corr_scores = corr_scores[sel_indices]
message = '{}, id0: {}, id1: {}, OV: {:.3f}'.format(scene_abbr, ref_frame, src_frame, pcd_overlap)
# 1. evaluate correspondences
# 1.1 evaluate coarse correspondences
coarse_matching_result_dict = evaluate_sparse_correspondences(
ref_points_c, src_points_c, ref_node_corr_indices, src_node_corr_indices, gt_node_corr_indices
)
coarse_precision = coarse_matching_result_dict['precision']
coarse_matching_meter.update('scene_precision', coarse_precision)
coarse_matching_meter.update('scene_PMR>0', float(coarse_precision > 0))
coarse_matching_meter.update('scene_PMR>=0.1', float(coarse_precision >= 0.1))
coarse_matching_meter.update('scene_PMR>=0.3', float(coarse_precision >= 0.3))
coarse_matching_meter.update('scene_PMR>=0.5', float(coarse_precision >= 0.5))
# 1.2 evaluate fine correspondences
fine_matching_result_dict = evaluate_correspondences(
ref_corr_points, src_corr_points, transform, positive_radius=cfg.eval.acceptance_radius
)
inlier_ratio = fine_matching_result_dict['inlier_ratio']
overlap = fine_matching_result_dict['overlap']
fine_matching_meter.update('scene_inlier_ratio', inlier_ratio)
fine_matching_meter.update('scene_overlap', overlap)
fine_matching_meter.update('scene_recall', float(inlier_ratio >= cfg.eval.inlier_ratio_threshold))
message += ', c_PIR: {:.3f}'.format(coarse_precision)
message += ', f_IR: {:.3f}'.format(inlier_ratio)
message += ', f_OV: {:.3f}'.format(overlap)
message += ', f_RS: {:.3f}'.format(fine_matching_result_dict['residual'])
message += ', f_NU: {}'.format(fine_matching_result_dict['num_corr'])
# 2. evaluate registration
if args.method == 'lgr':
estimated_transform = data_dict['estimated_transform']
elif args.method == 'ransac':
ref_points = data_dict['ref_points']
src_points = data_dict['src_points']
# estimated_transform = data_dict['estimated_transform']
estimated_transform = registration_with_ransac_from_correspondences(
src_corr_points,
ref_corr_points,
distance_threshold=cfg.ransac.distance_threshold,
ransac_n=cfg.ransac.num_points,
num_iterations=cfg.ransac.num_iterations,
)
src_points = apply_transform(src_points, estimated_transform)
ref_points_pcd = np.array(ref_points)
src_points_pcd = np.array(src_points)
ref_pcd = o3d.geometry.PointCloud()
ref_pcd.points = o3d.utility.Vector3dVector(ref_points_pcd)
ref_path = file_name.replace('3DLoMatch','3DLoMatch_pcd')[:-4] +'/'
print("ref_path:",ref_path)
if not osp.exists(ref_path):
# print("ref_path:",ref_path)
os.makedirs(ref_path)
o3d.io.write_point_cloud((file_name.replace('3DLoMatch','3DLoMatch_pcd'))[:-4]+'/ref_pcd.pcd', ref_pcd)
src_pcd = o3d.geometry.PointCloud()
src_pcd.points = o3d.utility.Vector3dVector(src_points_pcd)
src_path = file_name.replace('3DLoMatch','3DLoMatch_pcd')[:-4] +'/'
print("src_path:",src_path)
if not osp.exists(src_path):
# print("src_path:",src_path)
os.makedirs(src_path)
o3d.io.write_point_cloud((file_name.replace('3DLoMatch','3DLoMatch_pcd'))[:-4]+'/src_pcd.pcd', src_pcd)
ensure_dir(file_name.replace('3DLoMatch','3DLoMatch_ransac_transform')[:-4] +'/')
file_name = osp.join(file_name.replace('3DLoMatch','3DLoMatch_ransac_transform')[:-4] +'/ransac_transform.npz')
np.savez_compressed(
file_name,
ref_points=release_cuda(estimated_transform),
)
elif args.method == 'svd':
with torch.no_grad():
ref_corr_points = torch.from_numpy(ref_corr_points).cuda()
src_corr_points = torch.from_numpy(src_corr_points).cuda()
corr_scores = torch.from_numpy(corr_scores).cuda()
estimated_transform = weighted_procrustes(
src_corr_points, ref_corr_points, corr_scores, return_transform=True
)
estimated_transform = estimated_transform.detach().cpu().numpy()
else:
raise ValueError(f'Unsupported registration method: {args.method}.')
estimated_transforms.append(
dict(
test_pair=[ref_frame, src_frame],
num_fragments=num_fragments,
transform=estimated_transform,
)
)
if gt_indices[ref_frame, src_frame] != -1:
# evaluate transform (realignment error)
gt_index = gt_indices[ref_frame, src_frame]
transform = gt_logs[gt_index]['transform']
covariance = gt_infos[gt_index]['covariance']
error = compute_transform_error(transform, covariance, estimated_transform)
message += ', r_RMSE: {:.3f}'.format(np.sqrt(error))
accepted = error < cfg.eval.rmse_threshold ** 2
registration_meter.update('scene_recall', float(accepted))
if accepted:
rre, rte = compute_registration_error(transform, estimated_transform)
registration_meter.update('scene_rre', rre)
registration_meter.update('scene_rte', rte)
message += ', r_RRE: {:.3f}'.format(rre)
message += ', r_RTE: {:.3f}'.format(rte)
# Evaluate re-alignment error
# if ref_frame + 1 < src_frame:
# evaluate transform (realignment error)
# src_points_f = data_dict['src_points_f']
# error = compute_realignment_error(src_points_f, transform, estimated_transform)
# message += ', r_RMSE: {:.3f}'.format(error)
# accepted = error < config.eval_rmse_threshold
# registration_meter.update('scene_recall', float(accepted))
# if accepted:
# rre, rte = compute_registration_error(transform, estimated_transform)
# registration_meter.update('scene_rre', rre)
# registration_meter.update('scene_rte', rte)
# message += ', r_RRE: {:.3f}, r_RTE: {:.3f}'.format(rre, rte)
if args.verbose:
logger.info(message)
est_log = osp.join(cfg.registration_dir, benchmark, scene_name, 'est.log')
write_log_file(est_log, estimated_transforms)
logger.info(f'Scene_name: {scene_name}')
# 1. print correspondence evaluation results (one scene)
# 1.1 coarse level statistics
coarse_precision = coarse_matching_meter.mean('scene_precision')
coarse_matching_recall_0 = coarse_matching_meter.mean('scene_PMR>0')
coarse_matching_recall_1 = coarse_matching_meter.mean('scene_PMR>=0.1')
coarse_matching_recall_3 = coarse_matching_meter.mean('scene_PMR>=0.3')
coarse_matching_recall_5 = coarse_matching_meter.mean('scene_PMR>=0.5')
coarse_matching_meter.update('precision', coarse_precision)
coarse_matching_meter.update('PMR>0', coarse_matching_recall_0)
coarse_matching_meter.update('PMR>=0.1', coarse_matching_recall_1)
coarse_matching_meter.update('PMR>=0.3', coarse_matching_recall_3)
coarse_matching_meter.update('PMR>=0.5', coarse_matching_recall_5)
scene_coarse_matching_result_dict[scene_abbr] = {
'precision': coarse_precision,
'PMR>0': coarse_matching_recall_0,
'PMR>=0.1': coarse_matching_recall_1,
'PMR>=0.3': coarse_matching_recall_3,
'PMR>=0.5': coarse_matching_recall_5,
}
# 1.2 fine level statistics
recall = fine_matching_meter.mean('scene_recall')
inlier_ratio = fine_matching_meter.mean('scene_inlier_ratio')
overlap = fine_matching_meter.mean('scene_overlap')
fine_matching_meter.update('recall', recall)
fine_matching_meter.update('inlier_ratio', inlier_ratio)
fine_matching_meter.update('overlap', overlap)
scene_fine_matching_result_dict[scene_abbr] = {'recall': recall, 'inlier_ratio': inlier_ratio}
message = ' Correspondence, '
message += ', c_PIR: {:.3f}'.format(coarse_precision)
message += ', c_PMR>0: {:.3f}'.format(coarse_matching_recall_0)
message += ', c_PMR>=0.1: {:.3f}'.format(coarse_matching_recall_1)
message += ', c_PMR>=0.3: {:.3f}'.format(coarse_matching_recall_3)
message += ', c_PMR>=0.5: {:.3f}'.format(coarse_matching_recall_5)
message += ', f_FMR: {:.3f}'.format(recall)
message += ', f_IR: {:.3f}'.format(inlier_ratio)
message += ', f_OV: {:.3f}'.format(overlap)
logger.info(message)
# 2. print registration evaluation results (one scene)
recall = registration_meter.mean('scene_recall')
mean_rre = registration_meter.mean('scene_rre')
mean_rte = registration_meter.mean('scene_rte')
median_rre = registration_meter.median('scene_rre')
median_rte = registration_meter.median('scene_rte')
registration_meter.update('recall', recall)
registration_meter.update('mean_rre', mean_rre)
registration_meter.update('mean_rte', mean_rte)
registration_meter.update('median_rre', median_rre)
registration_meter.update('median_rte', median_rte)
scene_registration_result_dict[scene_abbr] = {
'recall': recall,
'mean_rre': mean_rre,
'mean_rte': mean_rte,
'median_rre': median_rre,
'median_rte': median_rte,
}
message = ' Registration'
message += ', RR: {:.3f}'.format(recall)
message += ', mean_RRE: {:.3f}'.format(mean_rre)
message += ', mean_RTE: {:.3f}'.format(mean_rte)
message += ', median_RRE: {:.3f}'.format(median_rre)
message += ', median_RTE: {:.3f}'.format(median_rte)
logger.info(message)
if args.test_epoch is not None:
logger.critical('Epoch {}'.format(args.test_epoch))
# 1. print correspondence evaluation results
message = ' Coarse Matching'
message += ', PIR: {:.3f}'.format(coarse_matching_meter.mean('precision'))
message += ', PMR>0: {:.3f}'.format(coarse_matching_meter.mean('PMR>0'))
message += ', PMR>=0.1: {:.3f}'.format(coarse_matching_meter.mean('PMR>=0.1'))
message += ', PMR>=0.3: {:.3f}'.format(coarse_matching_meter.mean('PMR>=0.3'))
message += ', PMR>=0.5: {:.3f}'.format(coarse_matching_meter.mean('PMR>=0.5'))
logger.critical(message)
for scene_abbr, result_dict in scene_coarse_matching_result_dict.items():
message = ' {}'.format(scene_abbr)
message += ', PIR: {:.3f}'.format(result_dict['precision'])
message += ', PMR>0: {:.3f}'.format(result_dict['PMR>0'])
message += ', PMR>=0.1: {:.3f}'.format(result_dict['PMR>=0.1'])
message += ', PMR>=0.3: {:.3f}'.format(result_dict['PMR>=0.3'])
message += ', PMR>=0.5: {:.3f}'.format(result_dict['PMR>=0.5'])
logger.critical(message)
message = ' Fine Matching'
message += ', FMR: {:.3f}'.format(fine_matching_meter.mean('recall'))
message += ', IR: {:.3f}'.format(fine_matching_meter.mean('inlier_ratio'))
message += ', OV: {:.3f}'.format(fine_matching_meter.mean('overlap'))
message += ', std: {:.3f}'.format(fine_matching_meter.std('recall'))
logger.critical(message)
for scene_abbr, result_dict in scene_fine_matching_result_dict.items():
message = ' {}'.format(scene_abbr)
message += ', FMR: {:.3f}'.format(result_dict['recall'])
message += ', IR: {:.3f}'.format(result_dict['inlier_ratio'])
logger.critical(message)
# 2. print registration evaluation results
message = ' Registration'
message += ', RR: {:.3f}'.format(registration_meter.mean('recall'))
message += ', mean_RRE: {:.3f}'.format(registration_meter.mean('mean_rre'))
message += ', mean_RTE: {:.3f}'.format(registration_meter.mean('mean_rte'))
message += ', median_RRE: {:.3f}'.format(registration_meter.mean('median_rre'))
message += ', median_RTE: {:.3f}'.format(registration_meter.mean('median_rte'))
logger.critical(message)
for scene_abbr, result_dict in scene_registration_result_dict.items():
message = ' {}'.format(scene_abbr)
message += ', RR: {:.3f}'.format(result_dict['recall'])
message += ', mean_RRE: {:.3f}'.format(result_dict['mean_rre'])
message += ', mean_RTE: {:.3f}'.format(result_dict['mean_rte'])
message += ', median_RRE: {:.3f}'.format(result_dict['median_rre'])
message += ', median_RTE: {:.3f}'.format(result_dict['median_rte'])
logger.critical(message)
def main():
parser = make_parser()
args = parser.parse_args()
cfg = make_cfg()
log_file = osp.join(cfg.log_dir, 'eval-{}.log'.format(time.strftime('%Y%m%d-%H%M%S')))
logger = Logger(log_file=log_file)
message = 'Command executed: ' + ' '.join(sys.argv)
logger.info(message)
message = 'Configs:\n' + json.dumps(cfg, indent=4)
logger.info(message)
eval_one_epoch(args, cfg, logger)
if __name__ == '__main__':
main()
| 19,612 | 47.189189 | 127 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/experiments/geotransformer.3dmatch.stage4.gse.k3.max.oacl.stage2.sinkhorn/eval.sh | # if [ "$3" = "test" ]; then
# python test.py --test_epoch=$1 --benchmark=$2
# fi
# python eval.py --test_epoch=$1 --benchmark=$2 --method=lgr
# # for n in 250 500 1000 2500; do
# # python eval.py --test_epoch=$1 --num_corr=$n --run_matching --run_registration --benchmark=$2
# # done
python test1.py --test_epoch=50 --benchmark=3DLoMatch
python test1.py --test_epoch=50 --benchmark=3DMatch
python eval.py --benchmark=3DLoMatch --method=lgr
for n in 250 500 1000 2500 5000; do
python eval.py --num_corr=$n --benchmark=3DLoMatch --method=lgr
done
python eval.py --benchmark=3DLoMatch --method=ransac
for n in 250 500 1000 2500 5000; do
python eval.py --num_corr=$n --benchmark=3DLoMatch --method=ransac
done
python eval.py --benchmark=3DLoMatch --method=svd
for n in 250 500 1000 2500 5000; do
python eval.py --num_corr=$n --benchmark=3DLoMatch --method=svd
done
python eval.py --benchmark=3DMatch --method=lgr
for n in 250 500 1000 2500 5000; do
python eval.py --num_corr=$n --benchmark=3DMatch --method=lgr
done
python eval.py --benchmark=3DMatch --method=ransac
for n in 250 500 1000 2500 5000; do
python eval.py --num_corr=$n --benchmark=3DMatch --method=ransac
done
python eval.py --benchmark=3DMatch --method=svd
for n in 250 500 1000 2500 5000; do
python eval.py --num_corr=$n --benchmark=3DMatch --method=svd
done
| 1,380 | 32.682927 | 101 | sh |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/experiments/geotransformer.3dmatch.stage4.gse.k3.max.oacl.stage2.sinkhorn/eval_all.sh | for n in $(seq 20 40); do
python test.py --test_epoch=$n --benchmark=$1 --verbose
python eval.py --test_epoch=$n --benchmark=$1 --method=lgr
done
# for n in 250 500 1000 2500; do
# python eval.py --test_epoch=$1 --num_corr=$n --run_matching --run_registration --benchmark=$2
# done
| 294 | 35.875 | 99 | sh |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/experiments/geotransformer.3dmatch.stage4.gse.k3.max.oacl.stage2.sinkhorn/eval_dgr.py | import argparse
import os.path as osp
import time
import glob
import sys
import json
import torch
import numpy as np
from geotransformer.engine import Logger
from geotransformer.modules.registration.procrustes import weighted_procrustes
from geotransformer.utils.summary_board import SummaryBoard
from geotransformer.utils.open3d import registration_with_ransac_from_correspondences
from geotransformer.utils.registration import (
evaluate_sparse_correspondences,
evaluate_correspondences,
compute_registration_error,
)
from geotransformer.datasets.registration.threedmatch.utils import get_scene_abbr
from config import make_cfg
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--test_epoch', type=int, required=True, help='test epoch')
parser.add_argument('--benchmark', choices=['3DMatch', '3DLoMatch'], help='test benchmark')
parser.add_argument('--method', choices=['lgr', 'ransac', 'svd'], help='registration method')
parser.add_argument('--num_corr', type=int, default=None, help='number of correspondences for registration')
parser.add_argument('--verbose', action='store_true', help='verbose mode')
return parser
def eval_one_epoch(args, cfg, logger):
features_root = osp.join(cfg.feature_dir, args.benchmark, f'epoch-{args.test_epoch}')
coarse_matching_meter = SummaryBoard()
coarse_matching_meter.register_meter('precision')
coarse_matching_meter.register_meter('PMR>0')
coarse_matching_meter.register_meter('PMR>=0.1')
coarse_matching_meter.register_meter('PMR>=0.3')
coarse_matching_meter.register_meter('PMR>=0.5')
coarse_matching_meter.register_meter('scene_precision')
coarse_matching_meter.register_meter('scene_PMR>0')
coarse_matching_meter.register_meter('scene_PMR>=0.1')
coarse_matching_meter.register_meter('scene_PMR>=0.3')
coarse_matching_meter.register_meter('scene_PMR>=0.5')
fine_matching_meter = SummaryBoard()
fine_matching_meter.register_meter('recall')
fine_matching_meter.register_meter('inlier_ratio')
fine_matching_meter.register_meter('overlap')
fine_matching_meter.register_meter('scene_recall')
fine_matching_meter.register_meter('scene_inlier_ratio')
fine_matching_meter.register_meter('scene_overlap')
registration_meter = SummaryBoard()
registration_meter.register_meter('recall')
registration_meter.register_meter('rre')
registration_meter.register_meter('rte')
registration_meter.register_meter('scene_recall')
registration_meter.register_meter('scene_rre')
registration_meter.register_meter('scene_rte')
registration_meter.register_meter('overall_recall')
registration_meter.register_meter('overall_rre')
registration_meter.register_meter('overall_rte')
scene_coarse_matching_result_dict = {}
scene_fine_matching_result_dict = {}
scene_registration_result_dict = {}
scene_roots = sorted(glob.glob(osp.join(features_root, '*')))
for scene_root in scene_roots:
coarse_matching_meter.reset_meter('scene_precision')
coarse_matching_meter.reset_meter('scene_PMR>0')
coarse_matching_meter.reset_meter('scene_PMR>=0.1')
coarse_matching_meter.reset_meter('scene_PMR>=0.3')
coarse_matching_meter.reset_meter('scene_PMR>=0.5')
fine_matching_meter.reset_meter('scene_recall')
fine_matching_meter.reset_meter('scene_inlier_ratio')
fine_matching_meter.reset_meter('scene_overlap')
registration_meter.register_meter('scene_recall')
registration_meter.register_meter('scene_rre')
registration_meter.register_meter('scene_rte')
scene_name = osp.basename(scene_root)
scene_abbr = get_scene_abbr(scene_name)
file_names = sorted(
glob.glob(osp.join(scene_root, '*.npz')),
key=lambda x: [int(i) for i in osp.basename(x).split('.')[0].split('_')],
)
for file_name in file_names:
ref_frame, src_frame = [int(x) for x in osp.basename(file_name).split('.')[0].split('_')]
data_dict = np.load(file_name)
ref_points_c = data_dict['ref_points_c']
src_points_c = data_dict['src_points_c']
ref_node_corr_indices = data_dict['ref_node_corr_indices']
src_node_corr_indices = data_dict['src_node_corr_indices']
gt_node_corr_indices = data_dict['gt_node_corr_indices']
ref_corr_points = data_dict['ref_corr_points']
src_corr_points = data_dict['src_corr_points']
corr_scores = data_dict['corr_scores']
if args.num_corr is not None and corr_scores.shape[0] > args.num_corr:
sel_indices = np.argsort(-corr_scores)[: args.num_corr]
ref_corr_points = ref_corr_points[sel_indices]
src_corr_points = src_corr_points[sel_indices]
corr_scores = corr_scores[sel_indices]
transform = data_dict['transform']
pcd_overlap = data_dict['overlap']
message = '{}, id0: {}, id1: {}, OV: {:.3f}'.format(scene_abbr, ref_frame, src_frame, pcd_overlap)
# 1. evaluate correspondences
# 1.1 evaluate coarse correspondences
coarse_matching_result_dict = evaluate_sparse_correspondences(
ref_points_c, src_points_c, ref_node_corr_indices, src_node_corr_indices, gt_node_corr_indices
)
coarse_precision = coarse_matching_result_dict['precision']
coarse_matching_meter.update('scene_precision', coarse_precision)
coarse_matching_meter.update('scene_PMR>0', float(coarse_precision > 0))
coarse_matching_meter.update('scene_PMR>=0.1', float(coarse_precision >= 0.1))
coarse_matching_meter.update('scene_PMR>=0.3', float(coarse_precision >= 0.3))
coarse_matching_meter.update('scene_PMR>=0.5', float(coarse_precision >= 0.5))
# 1.2 evaluate fine correspondences
fine_matching_result_dict = evaluate_correspondences(
ref_corr_points, src_corr_points, transform, positive_radius=cfg.eval.acceptance_radius
)
inlier_ratio = fine_matching_result_dict['inlier_ratio']
overlap = fine_matching_result_dict['overlap']
fine_matching_meter.update('scene_inlier_ratio', inlier_ratio)
fine_matching_meter.update('scene_overlap', overlap)
fine_matching_meter.update('scene_recall', float(inlier_ratio >= cfg.eval.inlier_ratio_threshold))
message += ', c_PIR: {:.3f}'.format(coarse_precision)
message += ', f_IR: {:.3f}'.format(inlier_ratio)
message += ', f_OV: {:.3f}'.format(overlap)
message += ', f_RS: {:.3f}'.format(fine_matching_result_dict['residual'])
message += ', f_NU: {}'.format(fine_matching_result_dict['num_corr'])
# 2. evaluate registration
if args.method == 'lgr':
estimated_transform = data_dict['estimated_transform']
elif args.method == 'ransac':
estimated_transform = registration_with_ransac_from_correspondences(
src_corr_points,
ref_corr_points,
distance_threshold=cfg.ransac.distance_threshold,
ransac_n=cfg.ransac.num_points,
num_iterations=cfg.ransac.num_iterations,
)
elif args.method == 'svd':
with torch.no_grad():
ref_corr_points = torch.from_numpy(ref_corr_points).cuda()
src_corr_points = torch.from_numpy(src_corr_points).cuda()
corr_scores = torch.from_numpy(corr_scores).cuda()
estimated_transform = weighted_procrustes(
src_corr_points, ref_corr_points, corr_scores, return_transform=True
)
estimated_transform = estimated_transform.detach().cpu().numpy()
else:
raise ValueError(f'Unsupported registration method: {args.method}.')
rre, rte = compute_registration_error(transform, estimated_transform)
accepted = rre < cfg.eval.rre_threshold and rte < cfg.eval.rte_threshold
if accepted:
registration_meter.update('scene_rre', rre)
registration_meter.update('scene_rte', rte)
registration_meter.update('overall_rre', rre)
registration_meter.update('overall_rte', rte)
registration_meter.update('scene_recall', float(accepted))
registration_meter.update('overall_recall', float(accepted))
message += ', r_RRE: {:.3f}'.format(rre)
message += ', r_RTE: {:.3f}'.format(rte)
if args.verbose:
logger.info(message)
logger.info(f'Scene_name: {scene_name}')
# 1. print correspondence evaluation results (one scene)
# 1.1 coarse level statistics
coarse_precision = coarse_matching_meter.mean('scene_precision')
coarse_matching_recall_0 = coarse_matching_meter.mean('scene_PMR>0')
coarse_matching_recall_1 = coarse_matching_meter.mean('scene_PMR>=0.1')
coarse_matching_recall_3 = coarse_matching_meter.mean('scene_PMR>=0.3')
coarse_matching_recall_5 = coarse_matching_meter.mean('scene_PMR>=0.5')
coarse_matching_meter.update('precision', coarse_precision)
coarse_matching_meter.update('PMR>0', coarse_matching_recall_0)
coarse_matching_meter.update('PMR>=0.1', coarse_matching_recall_1)
coarse_matching_meter.update('PMR>=0.3', coarse_matching_recall_3)
coarse_matching_meter.update('PMR>=0.5', coarse_matching_recall_5)
scene_coarse_matching_result_dict[scene_abbr] = {
'precision': coarse_precision,
'PMR>0': coarse_matching_recall_0,
'PMR>=0.1': coarse_matching_recall_1,
'PMR>=0.3': coarse_matching_recall_3,
'PMR>=0.5': coarse_matching_recall_5,
}
# 1.2 fine level statistics
recall = fine_matching_meter.mean('scene_recall')
inlier_ratio = fine_matching_meter.mean('scene_inlier_ratio')
overlap = fine_matching_meter.mean('scene_overlap')
fine_matching_meter.update('recall', recall)
fine_matching_meter.update('inlier_ratio', inlier_ratio)
fine_matching_meter.update('overlap', overlap)
scene_fine_matching_result_dict[scene_abbr] = {'recall': recall, 'inlier_ratio': inlier_ratio}
message = ' Correspondence, '
message += ', c_PIR: {:.3f}'.format(coarse_precision)
message += ', c_PMR>0: {:.3f}'.format(coarse_matching_recall_0)
message += ', c_PMR>=0.1: {:.3f}'.format(coarse_matching_recall_1)
message += ', c_PMR>=0.3: {:.3f}'.format(coarse_matching_recall_3)
message += ', c_PMR>=0.5: {:.3f}'.format(coarse_matching_recall_5)
message += ', f_FMR: {:.3f}'.format(recall)
message += ', f_IR: {:.3f}'.format(inlier_ratio)
message += ', f_OV: {:.3f}'.format(overlap)
logger.info(message)
# 2. print registration evaluation results (one scene)
recall = registration_meter.mean('scene_recall')
rre = registration_meter.mean('scene_rre')
rte = registration_meter.mean('scene_rte')
registration_meter.update('recall', recall)
registration_meter.update('rre', rre)
registration_meter.update('rte', rte)
scene_registration_result_dict[scene_abbr] = {
'recall': recall,
'rre': rre,
'rte': rte,
}
message = ' Registration'
message += ', RR: {:.3f}'.format(recall)
message += ', RRE: {:.3f}'.format(rre)
message += ', RTE: {:.3f}'.format(rte)
logger.info(message)
logger.critical('Epoch {}'.format(args.test_epoch))
# 1. print correspondence evaluation results
message = ' Coarse Matching, '
message += ', PIR: {:.3f}'.format(coarse_matching_meter.mean('precision'))
message += ', PMR>0: {:.3f}'.format(coarse_matching_meter.mean('PMR>0'))
message += ', PMR>=0.1: {:.3f}'.format(coarse_matching_meter.mean('PMR>=0.1'))
message += ', PMR>=0.3: {:.3f}'.format(coarse_matching_meter.mean('PMR>=0.3'))
message += ', PMR>=0.5: {:.3f}'.format(coarse_matching_meter.mean('PMR>=0.5'))
logger.critical(message)
for scene_abbr, result_dict in scene_coarse_matching_result_dict.items():
message = ' {}'.format(scene_abbr)
message += ', PIR: {:.3f}'.format(result_dict['precision'])
message += ', PMR>0: {:.3f}'.format(result_dict['PMR>0'])
message += ', PMR>=0.1: {:.3f}'.format(result_dict['PMR>=0.1'])
message += ', PMR>=0.3: {:.3f}'.format(result_dict['PMR>=0.3'])
message += ', PMR>=0.5: {:.3f}'.format(result_dict['PMR>=0.5'])
logger.critical(message)
message = ' Fine Matching'
message += ', FMR: {:.3f}'.format(fine_matching_meter.mean('recall'))
message += ', IR: {:.3f}'.format(fine_matching_meter.mean('inlier_ratio'))
message += ', OV: {:.3f}'.format(fine_matching_meter.mean('overlap'))
message += ', std: {:.3f}'.format(fine_matching_meter.std('recall'))
logger.critical(message)
for scene_abbr, result_dict in scene_fine_matching_result_dict.items():
message = ' {}'.format(scene_abbr)
message += ', FMR: {:.3f}'.format(result_dict['recall'])
message += ', IR: {:.3f}'.format(result_dict['inlier_ratio'])
logger.critical(message)
# 2. print registration evaluation results
message = ' Registration'
message += ', RR: {:.3f}'.format(registration_meter.mean('overall_recall'))
message += ', RRE: {:.3f}'.format(registration_meter.mean('overall_rre'))
message += ', RTE: {:.3f}'.format(registration_meter.mean('overall_rte'))
message += ', mean_RR: {:.3f}'.format(registration_meter.mean('recall'))
message += ', mean_RRE: {:.3f}'.format(registration_meter.mean('rre'))
message += ', mean_RTE: {:.3f}, '.format(registration_meter.mean('rte'))
logger.critical(message)
for scene_abbr, result_dict in scene_registration_result_dict.items():
message = ' {}'.format(scene_abbr)
message += ', RR: {:.3f}'.format(result_dict['recall'])
message += ', RRE: {:.3f}'.format(result_dict['rre'])
message += ', RTE: {:.3f}'.format(result_dict['rte'])
logger.critical(message)
def main():
parser = make_parser()
args = parser.parse_args()
cfg = make_cfg()
log_file = osp.join(cfg.log_dir, 'eval-{}.log'.format(time.strftime('%Y%m%d-%H%M%S')))
logger = Logger(log_file=log_file)
message = 'Command executed: ' + ' '.join(sys.argv)
logger.info(message)
message = 'Configs:\n' + json.dumps(cfg, indent=4)
logger.info(message)
eval_one_epoch(args, cfg, logger)
if __name__ == '__main__':
main()
| 15,037 | 45.99375 | 112 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/experiments/geotransformer.3dmatch.stage4.gse.k3.max.oacl.stage2.sinkhorn/loss.py | import torch
import torch.nn as nn
from geotransformer.modules.loss import WeightedCircleLoss
from geotransformer.modules.ops.transformation import apply_transform
from geotransformer.modules.registration.metrics import isotropic_transform_error
from geotransformer.modules.ops.pairwise_distance import pairwise_distance
class CoarseMatchingLoss(nn.Module):
def __init__(self, cfg):
super(CoarseMatchingLoss, self).__init__()
self.weighted_circle_loss = WeightedCircleLoss(
cfg.coarse_loss.positive_margin,
cfg.coarse_loss.negative_margin,
cfg.coarse_loss.positive_optimal,
cfg.coarse_loss.negative_optimal,
cfg.coarse_loss.log_scale,
)
self.positive_overlap = cfg.coarse_loss.positive_overlap
def forward(self, output_dict):
ref_feats = output_dict['ref_feats_c']
src_feats = output_dict['src_feats_c']
gt_node_corr_indices = output_dict['gt_node_corr_indices']
gt_node_corr_overlaps = output_dict['gt_node_corr_overlaps']
gt_ref_node_corr_indices = gt_node_corr_indices[:, 0]
gt_src_node_corr_indices = gt_node_corr_indices[:, 1]
feat_dists = torch.sqrt(pairwise_distance(ref_feats, src_feats, normalized=True))
overlaps = torch.zeros_like(feat_dists)
overlaps[gt_ref_node_corr_indices, gt_src_node_corr_indices] = gt_node_corr_overlaps
pos_masks = torch.gt(overlaps, self.positive_overlap)
neg_masks = torch.eq(overlaps, 0)
pos_scales = torch.sqrt(overlaps * pos_masks.float())
loss = self.weighted_circle_loss(pos_masks, neg_masks, feat_dists, pos_scales)
return loss
class FineMatchingLoss(nn.Module):
def __init__(self, cfg):
super(FineMatchingLoss, self).__init__()
self.positive_radius = cfg.fine_loss.positive_radius
def forward(self, output_dict, data_dict):
ref_node_corr_knn_points = output_dict['ref_node_corr_knn_points']
src_node_corr_knn_points = output_dict['src_node_corr_knn_points']
ref_node_corr_knn_masks = output_dict['ref_node_corr_knn_masks']
src_node_corr_knn_masks = output_dict['src_node_corr_knn_masks']
matching_scores = output_dict['matching_scores']
transform = data_dict['transform']
src_node_corr_knn_points = apply_transform(src_node_corr_knn_points, transform)
dists = pairwise_distance(ref_node_corr_knn_points, src_node_corr_knn_points) # (B, N, M)
gt_masks = torch.logical_and(ref_node_corr_knn_masks.unsqueeze(2), src_node_corr_knn_masks.unsqueeze(1))
gt_corr_map = torch.lt(dists, self.positive_radius ** 2)
gt_corr_map = torch.logical_and(gt_corr_map, gt_masks)
slack_row_labels = torch.logical_and(torch.eq(gt_corr_map.sum(2), 0), ref_node_corr_knn_masks)
slack_col_labels = torch.logical_and(torch.eq(gt_corr_map.sum(1), 0), src_node_corr_knn_masks)
labels = torch.zeros_like(matching_scores, dtype=torch.bool)
labels[:, :-1, :-1] = gt_corr_map
labels[:, :-1, -1] = slack_row_labels
labels[:, -1, :-1] = slack_col_labels
loss = -matching_scores[labels].mean()
return loss
class OverallLoss(nn.Module):
def __init__(self, cfg):
super(OverallLoss, self).__init__()
self.coarse_loss = CoarseMatchingLoss(cfg)
self.fine_loss = FineMatchingLoss(cfg)
self.weight_coarse_loss = cfg.loss.weight_coarse_loss
self.weight_fine_loss = cfg.loss.weight_fine_loss
self.criterion = nn.BCELoss(reduction="mean") # 分类误差
def forward(self, output_dict, data_dict):
coarse_loss = self.coarse_loss(output_dict)
fine_loss = self.fine_loss(output_dict, data_dict)
loss = self.criterion(output_dict['predict_results'], output_dict['classification_ground_truth'])
# loss = self.weight_coarse_loss * coarse_loss + self.weight_fine_loss * fine_loss
return {
'loss': loss,
'c_loss': coarse_loss,
'f_loss': fine_loss,
}
class Evaluator(nn.Module):
def __init__(self, cfg):
super(Evaluator, self).__init__()
self.acceptance_overlap = cfg.eval.acceptance_overlap
self.acceptance_radius = cfg.eval.acceptance_radius
self.acceptance_rmse = cfg.eval.rmse_threshold
@torch.no_grad()
def evaluate_coarse(self, output_dict):
ref_length_c = output_dict['ref_points_c'].shape[0]
src_length_c = output_dict['src_points_c'].shape[0]
gt_node_corr_overlaps = output_dict['gt_node_corr_overlaps']
gt_node_corr_indices = output_dict['gt_node_corr_indices']
masks = torch.gt(gt_node_corr_overlaps, self.acceptance_overlap)
gt_node_corr_indices = gt_node_corr_indices[masks]
gt_ref_node_corr_indices = gt_node_corr_indices[:, 0]
gt_src_node_corr_indices = gt_node_corr_indices[:, 1]
gt_node_corr_map = torch.zeros(ref_length_c, src_length_c).cuda()
gt_node_corr_map[gt_ref_node_corr_indices, gt_src_node_corr_indices] = 1.0
ref_node_corr_indices = output_dict['ref_node_corr_indices']
src_node_corr_indices = output_dict['src_node_corr_indices']
precision = gt_node_corr_map[ref_node_corr_indices, src_node_corr_indices].mean()
return precision
@torch.no_grad()
def evaluate_fine(self, output_dict, data_dict):
transform = data_dict['transform']
ref_corr_points = output_dict['ref_corr_points']
src_corr_points = output_dict['src_corr_points']
src_corr_points = apply_transform(src_corr_points, transform)
corr_distances = torch.linalg.norm(ref_corr_points - src_corr_points, dim=1)
precision = torch.lt(corr_distances, self.acceptance_radius).float().mean()
return precision
@torch.no_grad()
def evaluate_registration(self, output_dict, data_dict):
transform = data_dict['transform']
est_transform = output_dict['estimated_transform']
src_points = output_dict['src_points']
rre, rte = isotropic_transform_error(transform, est_transform)
realignment_transform = torch.matmul(torch.inverse(transform), est_transform)
realigned_src_points_f = apply_transform(src_points, realignment_transform)
rmse = torch.linalg.norm(realigned_src_points_f - src_points, dim=1).mean()
recall = torch.lt(rmse, self.acceptance_rmse).float()
return rre, rte, rmse, recall
def forward(self, output_dict, data_dict):
c_precision = self.evaluate_coarse(output_dict)
f_precision = self.evaluate_fine(output_dict, data_dict)
rre, rte, rmse, recall = self.evaluate_registration(output_dict, data_dict)
# predict_results = torch.gt(output_dict['predict_results'], 0.5).float()
# predict_1_and_True_unmber = torch.eq(predict_results.cpu().clone().detach() + output_dict['classification_ground_truth'].cpu().clone().detach(),2.0).float().sum()
# predict_0_and_True_unmber = torch.eq(predict_results.cpu().clone().detach() + output_dict['classification_ground_truth'].cpu().clone().detach(),0.0).float().sum()
# predict_1_unmber = output_dict['predict_results'].cpu().clone().detach().sum()
return {
'PIR': c_precision,
'IR': f_precision,
'RRE': rre,
'RTE': rte,
'RMSE': rmse,
'RR': recall,
# 'predict_1_unmber': predict_1_unmber,
# 'ground_truth_1_unmber': output_dict['classification_ground_truth'].cpu().clone().detach().sum(),
# 'predict_1_and_True_unmber': predict_1_and_True_unmber,
# 'Classification_Acc':(predict_1_and_True_unmber+predict_0_and_True_unmber)/output_dict['classification_ground_truth'].cpu().clone().detach().shape[0],
# 'Classification_Predict_IR':predict_1_and_True_unmber/predict_1_unmber
}
| 7,978 | 45.660819 | 172 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/experiments/geotransformer.3dmatch.stage4.gse.k3.max.oacl.stage2.sinkhorn/model.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from IPython import embed
from geotransformer.modules.ops import point_to_node_partition, index_select
from geotransformer.modules.registration import get_node_correspondences
from geotransformer.modules.sinkhorn import LearnableLogOptimalTransport
from geotransformer.modules.geotransformer import (
GeometricTransformer,
SuperPointMatching,
SuperPointTargetGenerator,
LocalGlobalRegistration,
)
from classification.classification_model import classification_model
import random
from backbone import KPConvFPN
from torch.autograd import Variable
class GeoTransformer(nn.Module):
def __init__(self, cfg):
super(GeoTransformer, self).__init__()
self.num_points_in_patch = cfg.model.num_points_in_patch
self.matching_radius = cfg.model.ground_truth_matching_radius
self.backbone = KPConvFPN(
cfg.backbone.input_dim,
cfg.backbone.output_dim,
cfg.backbone.init_dim,
cfg.backbone.kernel_size,
cfg.backbone.init_radius,
cfg.backbone.init_sigma,
cfg.backbone.group_norm,
)
self.transformer = GeometricTransformer(
cfg.geotransformer.input_dim,
cfg.geotransformer.output_dim,
cfg.geotransformer.hidden_dim,
cfg.geotransformer.num_heads,
cfg.geotransformer.blocks,
cfg.geotransformer.sigma_d,
cfg.geotransformer.sigma_a,
cfg.geotransformer.angle_k,
reduction_a=cfg.geotransformer.reduction_a,
)
self.coarse_target = SuperPointTargetGenerator(
cfg.coarse_matching.num_targets, cfg.coarse_matching.overlap_threshold
)
self.coarse_matching = SuperPointMatching(
cfg.coarse_matching.num_correspondences, cfg.coarse_matching.dual_normalization
)
self.fine_matching = LocalGlobalRegistration(
cfg.fine_matching.topk,
cfg.fine_matching.acceptance_radius,
mutual=cfg.fine_matching.mutual,
confidence_threshold=cfg.fine_matching.confidence_threshold,
use_dustbin=cfg.fine_matching.use_dustbin,
use_global_score=cfg.fine_matching.use_global_score,
correspondence_threshold=cfg.fine_matching.correspondence_threshold,
correspondence_limit=cfg.fine_matching.correspondence_limit,
num_refinement_steps=cfg.fine_matching.num_refinement_steps,
)
self.optimal_transport = LearnableLogOptimalTransport(cfg.model.num_sinkhorn_iterations)
for p in self.parameters():
p.requires_grad=False
self.classification_model = classification_model()
def forward(self, data_dict):
output_dict = {}
# Downsample point clouds
feats = data_dict['features'].detach()
transform = data_dict['transform'].detach()
ref_length_c = data_dict['lengths'][-1][0].item()
ref_length_f = data_dict['lengths'][1][0].item()
ref_length = data_dict['lengths'][0][0].item()
points_c = data_dict['points'][-1].detach()
points_f = data_dict['points'][1].detach()
points = data_dict['points'][0].detach()
ref_points_c = points_c[:ref_length_c]
src_points_c = points_c[ref_length_c:]
ref_points_f = points_f[:ref_length_f]
src_points_f = points_f[ref_length_f:]
ref_points = points[:ref_length]
src_points = points[ref_length:]
output_dict['ref_points_c'] = ref_points_c
output_dict['src_points_c'] = src_points_c
output_dict['ref_points_f'] = ref_points_f
output_dict['src_points_f'] = src_points_f
output_dict['ref_points'] = ref_points
output_dict['src_points'] = src_points
# 1. Generate ground truth node correspondences
_, ref_node_masks, ref_node_knn_indices, ref_node_knn_masks = point_to_node_partition(
ref_points_f, ref_points_c, self.num_points_in_patch
)
_, src_node_masks, src_node_knn_indices, src_node_knn_masks = point_to_node_partition(
src_points_f, src_points_c, self.num_points_in_patch
)
ref_padded_points_f = torch.cat([ref_points_f, torch.zeros_like(ref_points_f[:1])], dim=0)
src_padded_points_f = torch.cat([src_points_f, torch.zeros_like(src_points_f[:1])], dim=0)
ref_node_knn_points = index_select(ref_padded_points_f, ref_node_knn_indices, dim=0)
src_node_knn_points = index_select(src_padded_points_f, src_node_knn_indices, dim=0)
gt_node_corr_indices, gt_node_corr_overlaps = get_node_correspondences(
ref_points_c,
src_points_c,
ref_node_knn_points,
src_node_knn_points,
transform,
self.matching_radius,
ref_masks=ref_node_masks,
src_masks=src_node_masks,
ref_knn_masks=ref_node_knn_masks,
src_knn_masks=src_node_knn_masks,
)
output_dict['gt_node_corr_indices'] = gt_node_corr_indices
output_dict['gt_node_corr_overlaps'] = gt_node_corr_overlaps
# 2. KPFCNN Encoder
feats_list = self.backbone(feats, data_dict)
feats_c = feats_list[-1]
feats_f = feats_list[0]
# print("feats_c:",feats_c.requires_grad)
# print("feats_c:",feats_c.is_leaf)
# 3. Conditional Transformer
ref_feats_c = feats_c[:ref_length_c]
src_feats_c = feats_c[ref_length_c:]
ref_feats_c, src_feats_c = self.transformer(
ref_points_c.unsqueeze(0),
src_points_c.unsqueeze(0),
ref_feats_c.unsqueeze(0),
src_feats_c.unsqueeze(0),
)
# print("ref_feats_c:",ref_feats_c.requires_grad)
# print("ref_feats_c:",ref_feats_c.is_leaf)
ref_feats_c_norm = F.normalize(ref_feats_c.squeeze(0), p=2, dim=1)
src_feats_c_norm = F.normalize(src_feats_c.squeeze(0), p=2, dim=1)
output_dict['ref_feats_c'] = ref_feats_c_norm
output_dict['src_feats_c'] = src_feats_c_norm
# 5. Head for fine level matching
ref_feats_f = feats_f[:ref_length_f]
src_feats_f = feats_f[ref_length_f:]
output_dict['ref_feats_f'] = ref_feats_f
output_dict['src_feats_f'] = src_feats_f
# 6. Select topk nearest node correspondences
with torch.no_grad():
ref_node_corr_indices, src_node_corr_indices, node_corr_scores = self.coarse_matching(
ref_feats_c_norm, src_feats_c_norm, ref_points_c, src_points_c, ref_node_masks, src_node_masks
)
output_dict['ref_node_corr_indices'] = ref_node_corr_indices
output_dict['src_node_corr_indices'] = src_node_corr_indices
# 7 Random select ground truth node correspondences during training
if self.training:
ref_node_corr_indices, src_node_corr_indices, node_corr_scores = self.coarse_target(
gt_node_corr_indices, gt_node_corr_overlaps
)
# classification data prepare
classification_one_data_dict = classification_data_prepare(self.training, ref_points_c.shape[0], src_points_c.shape[0], gt_node_corr_overlaps.detach(), gt_node_corr_indices.detach(), output_dict['ref_node_corr_indices'].detach(), output_dict['src_node_corr_indices'].detach(), ref_feats_c_norm.detach(), src_feats_c_norm.detach())
classification_inputs = Variable(classification_one_data_dict['corr_node_feat'],requires_grad=True).to('cuda')
classification_ground_truth = Variable(classification_one_data_dict['ground_truth']).to('cuda')
predict_results = self.classification_model(classification_inputs)
output_dict['predict_results'] = predict_results
output_dict['classification_ground_truth'] = classification_ground_truth.detach()
if not self.training:
# sorted_values, sorted_indices = torch.sort(predict_results.squeeze(),descending=True)
predict_results1 = torch.gt(predict_results.squeeze(), 0.80)
# print("predict_results:",predict_results)
predict_results1 = torch.nonzero(predict_results1).squeeze()
# print("predict_results1:",predict_results1.numel())
ref_node_corr_indices1 = ref_node_corr_indices[predict_results1]
src_node_corr_indices1 = src_node_corr_indices[predict_results1]
output_dict['ref_node_corr_indices'] = ref_node_corr_indices1
output_dict['src_node_corr_indices'] = src_node_corr_indices1
if predict_results1.numel() <= 20:
predict_results2 = torch.gt(predict_results.squeeze(), 0.55)
# print("predict_results:",predict_results)
predict_results2 = torch.nonzero(predict_results2).squeeze()
# print("predict_results:",predict_results)
ref_node_corr_indices2 = ref_node_corr_indices[predict_results2]
src_node_corr_indices2 = src_node_corr_indices[predict_results2]
output_dict['ref_node_corr_indices'] = ref_node_corr_indices2
output_dict['src_node_corr_indices'] = src_node_corr_indices2
if predict_results2.numel() <= 20:
output_dict['ref_node_corr_indices'] = ref_node_corr_indices
output_dict['src_node_corr_indices'] = src_node_corr_indices
ref_node_corr_indices = output_dict['ref_node_corr_indices']
src_node_corr_indices = output_dict['src_node_corr_indices']
# 7.2 Generate batched node points & feats
ref_node_corr_knn_indices = ref_node_knn_indices[ref_node_corr_indices] # (P, K)
src_node_corr_knn_indices = src_node_knn_indices[src_node_corr_indices] # (P, K)
ref_node_corr_knn_masks = ref_node_knn_masks[ref_node_corr_indices] # (P, K)
src_node_corr_knn_masks = src_node_knn_masks[src_node_corr_indices] # (P, K)
ref_node_corr_knn_points = ref_node_knn_points[ref_node_corr_indices] # (P, K, 3)
src_node_corr_knn_points = src_node_knn_points[src_node_corr_indices] # (P, K, 3)
ref_padded_feats_f = torch.cat([ref_feats_f, torch.zeros_like(ref_feats_f[:1])], dim=0)
src_padded_feats_f = torch.cat([src_feats_f, torch.zeros_like(src_feats_f[:1])], dim=0)
ref_node_corr_knn_feats = index_select(ref_padded_feats_f, ref_node_corr_knn_indices, dim=0) # (P, K, C)
src_node_corr_knn_feats = index_select(src_padded_feats_f, src_node_corr_knn_indices, dim=0) # (P, K, C)
output_dict['ref_node_corr_knn_points'] = ref_node_corr_knn_points
output_dict['src_node_corr_knn_points'] = src_node_corr_knn_points
output_dict['ref_node_corr_knn_masks'] = ref_node_corr_knn_masks
output_dict['src_node_corr_knn_masks'] = src_node_corr_knn_masks
# 8. Optimal transport
matching_scores = torch.einsum('bnd,bmd->bnm', ref_node_corr_knn_feats, src_node_corr_knn_feats) # (P, K, K)
matching_scores = matching_scores / feats_f.shape[1] ** 0.5
matching_scores = self.optimal_transport(matching_scores, ref_node_corr_knn_masks, src_node_corr_knn_masks)
output_dict['matching_scores'] = matching_scores
# 9. Generate final correspondences during testing
with torch.no_grad():
if not self.fine_matching.use_dustbin:
matching_scores = matching_scores[:, :-1, :-1]
ref_corr_points, src_corr_points, corr_scores, estimated_transform, = self.fine_matching(
ref_node_corr_knn_points,
src_node_corr_knn_points,
ref_node_corr_knn_masks,
src_node_corr_knn_masks,
matching_scores,
node_corr_scores,
# ref_node_corr_knn_feats,
# src_node_corr_knn_feats,
# transform,
)
output_dict['ref_corr_points'] = ref_corr_points
output_dict['src_corr_points'] = src_corr_points
output_dict['corr_scores'] = corr_scores
output_dict['estimated_transform'] = estimated_transform
return output_dict
def classification_data_prepare(mode, ref_length_c, src_length_c, gt_node_corr_overlaps, gt_node_corr_indices, ref_node_corr_indices, src_node_corr_indices, ref_feats_c_norm, src_feats_c_norm):
# ref_length_c = data_dict['ref_points_c'].shape[0]
# src_length_c = data_dict['src_points_c'].shape[0]
# gt_node_corr_overlaps = data_dict['gt_node_corr_overlaps'].data
# gt_node_corr_indices = data_dict['gt_node_corr_indices'].data
one_data = {}
masks = torch.gt(gt_node_corr_overlaps, 0.0)
gt_node_corr_indices = gt_node_corr_indices[masks]
gt_ref_node_corr_indices = gt_node_corr_indices[:, 0]
gt_src_node_corr_indices = gt_node_corr_indices[:, 1]
gt_node_corr_map = torch.zeros(ref_length_c, src_length_c)
gt_node_corr_map[gt_ref_node_corr_indices, gt_src_node_corr_indices] = 1.0
# ref_node_corr_indices = data_dict['ref_node_corr_indices'].data
# src_node_corr_indices = data_dict['src_node_corr_indices'].data
corr_node_ground_truth = gt_node_corr_map[ref_node_corr_indices, src_node_corr_indices]
# if self.mode == 'train':
ground_truth_pos = torch.nonzero(gt_node_corr_map)
ground_truth_neg = torch.nonzero(torch.eq(gt_node_corr_map,0))
pos_index = [i for i in range(ground_truth_pos.shape[0])]
random.shuffle(pos_index)
ground_truth_pos = ground_truth_pos[pos_index[:2500],:]
neg_index = [i for i in range(ground_truth_neg.shape[0])]
random.shuffle(neg_index)
ground_truth_neg = ground_truth_neg[neg_index[:ground_truth_pos.shape[0]],:]
ground_truth_both = torch.cat((ground_truth_pos,ground_truth_neg),dim=0)
random_index = [i for i in range(ground_truth_both.shape[0])]
random.shuffle(random_index)
ground_truth_both = ground_truth_both[random_index]
if mode == 'training':
ref_node_corr_indices = ground_truth_both[:,0]
src_node_corr_indices = ground_truth_both[:,1]
corr_node_ground_truth = gt_node_corr_map[ref_node_corr_indices, src_node_corr_indices]
# ref_feats_c_norm = data_dict['ref_feats_c'].data
# src_feats_c_norm = data_dict['src_feats_c'].data
ref_corr_node_feats = ref_feats_c_norm[ref_node_corr_indices]
src_corr_node_feats = src_feats_c_norm[src_node_corr_indices]
mean = src_corr_node_feats.mean()
var = src_corr_node_feats.var()
src_corr_node_feats = (src_corr_node_feats - mean) / torch.pow(var + 1e-05,0.5)
mean = ref_corr_node_feats.mean()
var = ref_corr_node_feats.var()
ref_corr_node_feats = (ref_corr_node_feats - mean) / torch.pow(var + 1e-05,0.5)
# print("src_corr_node_feate:",src_corr_node_feate.shape)
# print("ref_corr_node_feats:",ref_corr_node_feats.shape)
# print("ref_corr_node_feats:",ref_corr_node_feats.shape)
corr_node_feat = torch.cat((ref_corr_node_feats.unsqueeze(0).transpose(0,1), src_corr_node_feats.unsqueeze(0).transpose(0,1)), dim=1)
# print("corr_node_feat:",corr_node_feat.shape)
corr_node_feat = corr_node_feat.repeat(1,1,2)
# print("corr_node_feat:",corr_node_feat.shape)
corr_node_feat = corr_node_feat.chunk(16,dim=2)
# print("corr_node_feat:",corr_node_feat.shape)
corr_node_feat = torch.cat((corr_node_feat),dim=1)
# print("corr_node_feat:",corr_node_feat.shape)
corr_node_feat = corr_node_feat.unsqueeze(1)
# print("corr_node_feat:",corr_node_feat.shape)
one_data['corr_node_feat'] = corr_node_feat
one_data['ground_truth'] = corr_node_ground_truth.unsqueeze(1)
return one_data
def create_model(config):
model = GeoTransformer(config)
return model
def main():
from config import make_cfg
cfg = make_cfg()
model = create_model(cfg)
print(model.state_dict().keys())
print(model)
if __name__ == '__main__':
main()
| 16,238 | 45.133523 | 338 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/experiments/geotransformer.3dmatch.stage4.gse.k3.max.oacl.stage2.sinkhorn/test.py | import argparse
import os.path as osp
import time
import torch
import numpy as np
import random
from geotransformer.engine import SingleTester
from geotransformer.utils.torch import release_cuda
from geotransformer.utils.common import ensure_dir, get_log_string
from dataset import test_data_loader,train_valid_data_loader
from config import make_cfg
from model import create_model
from loss import Evaluator
from torch.autograd import Variable
import torch.nn as nn
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--benchmark', choices=['3DMatch', '3DLoMatch', 'val'], help='test benchmark')
return parser
class Tester(SingleTester):
def __init__(self, cfg):
super().__init__(cfg, parser=make_parser())
# dataloader
start_time = time.time()
data_loader, neighbor_limits = test_data_loader(cfg, self.args.benchmark)
# data_loader, neighbor_limits = train_valid_data_loader(cfg, self.args.benchmark)
loading_time = time.time() - start_time
message = f'Data loader created: {loading_time:.3f}s collapsed.'
self.logger.info(message)
message = f'Calibrate neighbors: {neighbor_limits}.'
self.logger.info(message)
self.register_loader(data_loader)
# model
model = create_model(cfg).cuda()
self.register_model(model)
# evaluator
self.evaluator = Evaluator(cfg).cuda()
# preparation
self.output_dir = osp.join(cfg.feature_dir, self.args.benchmark)
ensure_dir(self.output_dir)
def test_step(self, iteration, data_dict):
self.model.eval()
with torch.no_grad():
output_dict = self.model(data_dict)
return output_dict
def eval_step(self, iteration, data_dict, output_dict):
result_dict = self.evaluator(output_dict, data_dict)
return result_dict
def summary_string(self, iteration, data_dict, output_dict, result_dict):
scene_name = data_dict['scene_name']
ref_frame = data_dict['ref_frame']
src_frame = data_dict['src_frame']
message = f'{scene_name}, id0: {ref_frame}, id1: {src_frame}'
message += ', ' + get_log_string(result_dict=result_dict)
message += ', nCorr: {}'.format(output_dict['corr_scores'].shape[0])
return message
def after_test_step(self, iteration, data_dict, output_dict, result_dict):
scene_name = data_dict['scene_name']
ref_id = data_dict['ref_frame']
src_id = data_dict['src_frame']
ensure_dir(osp.join(self.output_dir, scene_name))
file_name = osp.join(self.output_dir, scene_name, f'{ref_id}_{src_id}.npz')
# with open("test_origin2.txt","a+") as f:
# f.write(file_name)
# f.write("\n")
# with open("geo_all_middle_text1.txt","a+") as f:
# f.write(file_name)
# f.write("\n")
np.savez_compressed(
file_name,
ref_points=release_cuda(output_dict['ref_points']),
src_points=release_cuda(output_dict['src_points']),
ref_points_f=release_cuda(output_dict['ref_points_f']),
src_points_f=release_cuda(output_dict['src_points_f']),
ref_points_c=release_cuda(output_dict['ref_points_c']),
src_points_c=release_cuda(output_dict['src_points_c']),
ref_feats_c=release_cuda(output_dict['ref_feats_c']),
src_feats_c=release_cuda(output_dict['src_feats_c']),
# corr_mat=release_cuda(output_dict['corr_mat']),
# ref_corr_points_feat=release_cuda(output_dict['ref_corr_points_feat']),
# src_corr_points_feat=release_cuda(output_dict['src_corr_points_feat']),
ref_node_corr_indices=release_cuda(output_dict['ref_node_corr_indices']),
src_node_corr_indices=release_cuda(output_dict['src_node_corr_indices']),
ref_corr_points=release_cuda(output_dict['ref_corr_points']),
src_corr_points=release_cuda(output_dict['src_corr_points']),
corr_scores=release_cuda(output_dict['corr_scores']),
gt_node_corr_indices=release_cuda(output_dict['gt_node_corr_indices']),
gt_node_corr_overlaps=release_cuda(output_dict['gt_node_corr_overlaps']),
estimated_transform=release_cuda(output_dict['estimated_transform']),
transform=release_cuda(data_dict['transform']),
overlap=data_dict['overlap'],
)
# def step_scheduler_and_save_model(self):
# self.scheduler.step()
# torch.save(self.classification_model, '/dssg/home/acct-eeyj/eeyj-user1/WPJ/GeoTransformer-nodecorr-classification/GeoTransformer/output/geotransformer.3dmatch.stage4.gse.k3.max.oacl.stage2.sinkhorn/snapshots/node_classification.pth')
# def train_classification_model(self,output_dict):
# one_data_dict = self.classification_dataset_prepare(output_dict)
# inputs = Variable(one_data_dict['corr_node_feat'],requires_grad=True).to('cuda')
# target = Variable(one_data_dict['ground_truth'],requires_grad=True).to('cuda')
# print("inputs:",inputs.shape)
# print("target:",target.shape)
# out = self.classification_model(inputs) # 前向传播
# out = out.squeeze()
# loss = self.criterion(out, target) # 计算误差
# loss.requires_grad_(True)
# self.optimizer.zero_grad() # 梯度清零
# loss.backward() # 后向传播
# self.optimizer.step() # 调整参数
# predict = torch.gt(out, 0.5).float()
# Inliers_number = target.cpu().clone().detach().sum()
# Outliers_number = target.cpu().clone().detach().shape[0] - Inliers_number
# predict_1_unmber = predict.cpu().clone().detach().sum()
# predict_0_unmber = predict.cpu().clone().detach().shape[0] - predict_1_unmber
# predict_1_and_True_unmber = torch.eq(predict.cpu().clone().detach() + target.cpu().clone().detach(),2).float().sum()
# predict_1_and_False_unmber = predict_1_unmber - predict_1_and_True_unmber
# predict_0_and_True_unmber = torch.eq(predict.cpu().clone().detach() + target.cpu().clone().detach(),0).float().sum()
# predict_0_and_False_unmber = predict_0_unmber - predict_0_and_True_unmber
# message = f'Loss: {loss.item()}.'
# self.logger.info(message)
# message = f'predict_Acc: {(predict_1_and_True_unmber+predict_0_and_True_unmber)/predict.shape[0]}.'
# self.logger.info(message)
# message = f'predict_IR: {predict_1_and_True_unmber/predict_1_unmber}.'
# self.logger.info(message)
# def classification_dataset_prepare(self, output_dict):
# one_data = {}
# ref_length_c = output_dict['ref_points_c'].shape[0]
# src_length_c = output_dict['src_points_c'].shape[0]
# gt_node_corr_overlaps = output_dict['gt_node_corr_overlaps'].data
# gt_node_corr_indices = output_dict['gt_node_corr_indices'].data
# masks = torch.gt(gt_node_corr_overlaps, 0.0)
# gt_node_corr_indices = gt_node_corr_indices[masks]
# gt_ref_node_corr_indices = gt_node_corr_indices[:, 0]
# gt_src_node_corr_indices = gt_node_corr_indices[:, 1]
# gt_node_corr_map = torch.zeros(ref_length_c, src_length_c)
# gt_node_corr_map[gt_ref_node_corr_indices, gt_src_node_corr_indices] = 1.0
# ref_node_corr_indices = output_dict['ref_node_corr_indices'].data
# src_node_corr_indices = output_dict['src_node_corr_indices'].data
# corr_node_ground_truth = gt_node_corr_map[ref_node_corr_indices, src_node_corr_indices]
# # if self.mode != 'test':
# ground_truth_pos = torch.nonzero(gt_node_corr_map)
# ground_truth_neg = torch.nonzero(torch.eq(gt_node_corr_map,0))
# pos_index = [i for i in range(ground_truth_pos.shape[0])]
# random.shuffle(pos_index)
# ground_truth_pos = ground_truth_pos[pos_index[:2500],:]
# neg_index = [i for i in range(ground_truth_neg.shape[0])]
# random.shuffle(neg_index)
# ground_truth_neg = ground_truth_neg[neg_index[:ground_truth_pos.shape[0]],:]
# ground_truth_both = torch.cat((ground_truth_pos,ground_truth_neg),dim=0)
# random_index = [i for i in range(ground_truth_both.shape[0])]
# random.shuffle(random_index)
# ground_truth_both = ground_truth_both[random_index]
# ref_node_corr_indices = ground_truth_both[:,0]
# src_node_corr_indices = ground_truth_both[:,1]
# corr_node_ground_truth = gt_node_corr_map[ref_node_corr_indices, src_node_corr_indices]
# ref_feats_c_norm = output_dict['ref_feats_c'].data
# src_feats_c_norm = output_dict['src_feats_c'].data
# ref_corr_node_feats = ref_feats_c_norm[ref_node_corr_indices]
# src_corr_node_feats = src_feats_c_norm[src_node_corr_indices]
# mean = src_corr_node_feats.mean()
# var = src_corr_node_feats.var()
# src_corr_node_feats = (src_corr_node_feats - mean) / torch.pow(var + 1e-05,0.5)
# mean = ref_corr_node_feats.mean()
# var = ref_corr_node_feats.var()
# ref_corr_node_feats = (ref_corr_node_feats - mean) / torch.pow(var + 1e-05,0.5)
# corr_node_feat = torch.cat((ref_corr_node_feats.unsqueeze(0).transpose(0,1), src_corr_node_feats.unsqueeze(0).transpose(0,1)), dim=1)
# corr_node_feat = corr_node_feat.repeat(1,1,2)
# corr_node_feat = corr_node_feat.chunk(16,dim=2)
# corr_node_feat = torch.cat((corr_node_feat),dim=1)
# corr_node_feat = corr_node_feat.unsqueeze(1)
# # one_data['file_name'] = (os.path.splitext('/'.join(file_path.split('/')[-3:])))[0]
# # one_data['src_corr_node_feats'] = src_corr_node_feats
# # one_data['ref_corr_node_feats'] = ref_corr_node_feats
# # one_data['src_corr_points'] = src_corr_points
# # one_data['ref_corr_points'] = ref_corr_points
# one_data['corr_node_feat'] = corr_node_feat
# one_data['ground_truth'] = corr_node_ground_truth
# return one_data
def main():
cfg = make_cfg()
tester = Tester(cfg)
tester.run()
if __name__ == '__main__':
main()
| 10,320 | 46.344037 | 248 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/experiments/geotransformer.3dmatch.stage4.gse.k3.max.oacl.stage2.sinkhorn/trainval.py | import argparse
import time
import torch.optim as optim
from geotransformer.engine import EpochBasedTrainer
from config import make_cfg
from dataset import train_valid_data_loader
from model import create_model
from loss import OverallLoss, Evaluator
class Trainer(EpochBasedTrainer):
def __init__(self, cfg):
super().__init__(cfg, max_epoch=cfg.optim.max_epoch)
# dataloader
start_time = time.time()
train_loader, val_loader, neighbor_limits = train_valid_data_loader(cfg, self.distributed)
loading_time = time.time() - start_time
message = 'Data loader created: {:.3f}s collapsed.'.format(loading_time)
self.logger.info(message)
message = 'Calibrate neighbors: {}.'.format(neighbor_limits)
self.logger.info(message)
self.register_loader(train_loader, val_loader)
# model, optimizer, scheduler
model = create_model(cfg).cuda()
model = self.register_model(model)
optimizer = optim.SGD(filter(lambda p:p.requires_grad,self.model.parameters()), lr=cfg.optim.lr, weight_decay=cfg.optim.weight_decay)
self.register_optimizer(optimizer)
scheduler = optim.lr_scheduler.StepLR(optimizer, cfg.optim.lr_decay_steps, gamma=cfg.optim.lr_decay)
self.register_scheduler(scheduler)
# loss function, evaluator
self.loss_func = OverallLoss(cfg).cuda()
self.evaluator = Evaluator(cfg).cuda()
def train_step(self, epoch, iteration, data_dict):
output_dict = self.model(data_dict)
loss_dict = self.loss_func(output_dict, data_dict)
result_dict = self.evaluator(output_dict, data_dict)
loss_dict.update(result_dict)
return output_dict, loss_dict
def val_step(self, epoch, iteration, data_dict):
output_dict = self.model(data_dict)
loss_dict = self.loss_func(output_dict, data_dict)
result_dict = self.evaluator(output_dict, data_dict)
loss_dict.update(result_dict)
return output_dict, loss_dict
def main():
cfg = make_cfg()
trainer = Trainer(cfg)
trainer.run()
if __name__ == '__main__':
main()
| 2,166 | 33.396825 | 141 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/experiments/geotransformer.3dmatch.stage4.gse.k3.max.oacl.stage2.sinkhorn/classification/__init__.py | 0 | 0 | 0 | py |
|
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/experiments/geotransformer.3dmatch.stage4.gse.k3.max.oacl.stage2.sinkhorn/classification/classification_model.py | import torch.nn as nn
import torch.nn.functional as F
# define model
class basic_block(nn.Module):
"""基本残差块,由两层卷积构成"""
def __init__(self,in_planes,planes,kernel_size=3,stride=1):
"""
:param in_planes: 输入通道
:param planes: 输出通道
:param kernel_size: 卷积核大小
:param stride: 卷积步长
"""
super(basic_block, self).__init__()
self.conv1=nn.Conv2d(in_planes,planes,kernel_size=kernel_size,stride=stride,padding=1,bias=False)
self.bn1=nn.BatchNorm2d(planes)
self.relu=nn.ReLU()
self.conv2=nn.Conv2d(planes,planes,kernel_size=kernel_size,stride=1,padding=1,bias=False)
self.bn2=nn.BatchNorm2d(planes)
if stride!=1 or in_planes!=planes:
self.downsample=nn.Sequential(nn.Conv2d(in_planes,planes,kernel_size=1,stride=stride)
,nn.BatchNorm2d(planes))
else:
self.downsample=nn.Sequential()
def forward(self,inx):
x=self.relu(self.bn1(self.conv1(inx)))
x=self.bn2(self.conv2(x))
out=x+self.downsample(inx)
return F.relu(out)
class Resnet(nn.Module):
def __init__(self,basicBlock,blockNums,nb_classes):
super(Resnet, self).__init__()
self.in_planes=64
#输入层
self.conv1=nn.Conv2d(1,self.in_planes,kernel_size=(3,3),stride=(1,1),padding=1,bias=False)
self.bn1=nn.BatchNorm2d(self.in_planes)
self.relu=nn.ReLU(inplace=True)
self.maxpool=nn.MaxPool2d(kernel_size=3,stride=2,padding=1)
self.layer1=self._make_layers(basicBlock,blockNums[0],64,1)
self.layer2=self._make_layers(basicBlock,blockNums[1],128,2)
self.layer3=self._make_layers(basicBlock,blockNums[2],256,2)
self.layer4=self._make_layers(basicBlock,blockNums[3],512,2)
self.avgpool=nn.AdaptiveAvgPool2d(output_size=(1,1))
self.fc=nn.Linear(512,nb_classes)
def _make_layers(self,basicBlock,blockNum,plane,stride):
"""
:param basicBlock: 基本残差块类
:param blockNum: 当前层包含基本残差块的数目,resnet18每层均为2
:param plane: 输出通道数
:param stride: 卷积步长
:return:
"""
layers=[]
for i in range(blockNum):
if i==0:
layer=basicBlock(self.in_planes,plane,3,stride=stride)
else:
layer=basicBlock(plane,plane,3,stride=1)
layers.append(layer)
self.in_planes=plane
return nn.Sequential(*layers)
def forward(self,inx):
x=self.maxpool(self.relu(self.bn1(self.conv1(inx))))
x=self.layer1(x)
x=self.layer2(x)
x=self.layer3(x)
x=self.layer4(x)
x=self.avgpool(x)
x=x.view(x.shape[0],-1)
out=self.fc(x)
return out
class classification_model(nn.Module):
def __init__(self):
super(classification_model, self).__init__()
self.resnet18=Resnet(basic_block,[2,2,2,2],256)
self.linear1 = nn.Linear(256, 128)
self.linear2 = nn.Linear(128, 1)
self.activate1 = nn.ReLU()
self.dropout1 = nn.Dropout(p=0.5)
self.activate2 = nn.Sigmoid()
def forward(self, x):
x = self.resnet18(x)
x = self.linear1(x)
x = self.activate1(x)
x = self.dropout1(x)
x = self.linear2(x)
x = self.activate2(x)
return x | 3,374 | 33.438776 | 105 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/experiments/geotransformer.CROON.stage5.gse.k3.max.oacl.stage2.sinkhorn/backbone.py | import torch
import torch.nn as nn
from geotransformer.modules.kpconv import ConvBlock, ResidualBlock, UnaryBlock, LastUnaryBlock, nearest_upsample
class KPConvFPN(nn.Module):
def __init__(self, input_dim, output_dim, init_dim, kernel_size, init_radius, init_sigma, group_norm):
super(KPConvFPN, self).__init__()
self.encoder1_1 = ConvBlock(input_dim, init_dim, kernel_size, init_radius, init_sigma, group_norm)
self.encoder1_2 = ResidualBlock(init_dim, init_dim * 2, kernel_size, init_radius, init_sigma, group_norm)
self.encoder2_1 = ResidualBlock(
init_dim * 2, init_dim * 2, kernel_size, init_radius, init_sigma, group_norm, strided=True
)
self.encoder2_2 = ResidualBlock(
init_dim * 2, init_dim * 4, kernel_size, init_radius * 2, init_sigma * 2, group_norm
)
self.encoder2_3 = ResidualBlock(
init_dim * 4, init_dim * 4, kernel_size, init_radius * 2, init_sigma * 2, group_norm
)
self.encoder3_1 = ResidualBlock(
init_dim * 4,
init_dim * 4,
kernel_size,
init_radius * 2,
init_sigma * 2,
group_norm,
strided=True,
)
self.encoder3_2 = ResidualBlock(
init_dim * 4, init_dim * 8, kernel_size, init_radius * 4, init_sigma * 4, group_norm
)
self.encoder3_3 = ResidualBlock(
init_dim * 8, init_dim * 8, kernel_size, init_radius * 4, init_sigma * 4, group_norm
)
self.encoder4_1 = ResidualBlock(
init_dim * 8,
init_dim * 8,
kernel_size,
init_radius * 4,
init_sigma * 4,
group_norm,
strided=True,
)
self.encoder4_2 = ResidualBlock(
init_dim * 8, init_dim * 16, kernel_size, init_radius * 8, init_sigma * 8, group_norm
)
self.encoder4_3 = ResidualBlock(
init_dim * 16, init_dim * 16, kernel_size, init_radius * 8, init_sigma * 8, group_norm
)
self.encoder5_1 = ResidualBlock(
init_dim * 16,
init_dim * 16,
kernel_size,
init_radius * 8,
init_sigma * 8,
group_norm,
strided=True,
)
self.encoder5_2 = ResidualBlock(
init_dim * 16, init_dim * 32, kernel_size, init_radius * 16, init_sigma * 16, group_norm
)
self.encoder5_3 = ResidualBlock(
init_dim * 32, init_dim * 32, kernel_size, init_radius * 16, init_sigma * 16, group_norm
)
self.decoder4 = UnaryBlock(init_dim * 48, init_dim * 16, group_norm)
self.decoder3 = UnaryBlock(init_dim * 24, init_dim * 8, group_norm)
self.decoder2 = LastUnaryBlock(init_dim * 12, output_dim)
def forward(self, feats, data_dict):
feats_list = []
points_list = data_dict['points']
neighbors_list = data_dict['neighbors']
subsampling_list = data_dict['subsampling']
upsampling_list = data_dict['upsampling']
feats_s1 = feats
feats_s1 = self.encoder1_1(feats_s1, points_list[0], points_list[0], neighbors_list[0])
feats_s1 = self.encoder1_2(feats_s1, points_list[0], points_list[0], neighbors_list[0])
feats_s2 = self.encoder2_1(feats_s1, points_list[1], points_list[0], subsampling_list[0])
feats_s2 = self.encoder2_2(feats_s2, points_list[1], points_list[1], neighbors_list[1])
feats_s2 = self.encoder2_3(feats_s2, points_list[1], points_list[1], neighbors_list[1])
feats_s3 = self.encoder3_1(feats_s2, points_list[2], points_list[1], subsampling_list[1])
feats_s3 = self.encoder3_2(feats_s3, points_list[2], points_list[2], neighbors_list[2])
feats_s3 = self.encoder3_3(feats_s3, points_list[2], points_list[2], neighbors_list[2])
feats_s4 = self.encoder4_1(feats_s3, points_list[3], points_list[2], subsampling_list[2])
feats_s4 = self.encoder4_2(feats_s4, points_list[3], points_list[3], neighbors_list[3])
feats_s4 = self.encoder4_3(feats_s4, points_list[3], points_list[3], neighbors_list[3])
feats_s5 = self.encoder5_1(feats_s4, points_list[4], points_list[3], subsampling_list[3])
feats_s5 = self.encoder5_2(feats_s5, points_list[4], points_list[4], neighbors_list[4])
feats_s5 = self.encoder5_3(feats_s5, points_list[4], points_list[4], neighbors_list[4])
latent_s5 = feats_s5
feats_list.append(feats_s5)
latent_s4 = nearest_upsample(latent_s5, upsampling_list[3])
latent_s4 = torch.cat([latent_s4, feats_s4], dim=1)
latent_s4 = self.decoder4(latent_s4)
feats_list.append(latent_s4)
latent_s3 = nearest_upsample(latent_s4, upsampling_list[2])
latent_s3 = torch.cat([latent_s3, feats_s3], dim=1)
latent_s3 = self.decoder3(latent_s3)
feats_list.append(latent_s3)
latent_s2 = nearest_upsample(latent_s3, upsampling_list[1])
latent_s2 = torch.cat([latent_s2, feats_s2], dim=1)
latent_s2 = self.decoder2(latent_s2)
feats_list.append(latent_s2)
feats_list.reverse()
return feats_list
| 5,227 | 40.824 | 113 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/experiments/geotransformer.CROON.stage5.gse.k3.max.oacl.stage2.sinkhorn/config.py | import argparse
import os
import os.path as osp
from easydict import EasyDict as edict
from geotransformer.utils.common import ensure_dir
_C = edict()
# random seed
_C.seed = 7351
# dirs
_C.working_dir = osp.dirname(osp.realpath(__file__))
_C.root_dir = osp.dirname(osp.dirname(_C.working_dir))
_C.exp_name = osp.basename(_C.working_dir)
_C.output_dir = osp.join(_C.root_dir, 'output', _C.exp_name)
_C.snapshot_dir = osp.join(_C.output_dir, 'snapshots')
_C.log_dir = osp.join(_C.output_dir, 'logs')
_C.event_dir = osp.join(_C.output_dir, 'events')
_C.feature_dir = osp.join(_C.output_dir, 'features')
ensure_dir(_C.output_dir)
ensure_dir(_C.snapshot_dir)
ensure_dir(_C.log_dir)
ensure_dir(_C.event_dir)
ensure_dir(_C.feature_dir)
# data
_C.data = edict()
_C.data.dataset_root = osp.join(_C.root_dir, 'data', 'Kitti')
# train data
_C.train = edict()
_C.train.batch_size = 1
_C.train.num_workers = 8
_C.train.point_limit = 30000
_C.train.use_augmentation = True
_C.train.augmentation_noise = 0.01
_C.train.augmentation_min_scale = 0.8
_C.train.augmentation_max_scale = 1.2
_C.train.augmentation_shift = 2.0
_C.train.augmentation_rotation = 1.0
# test config
_C.test = edict()
_C.test.batch_size = 1
_C.test.num_workers = 8
_C.test.point_limit = None
# eval config
_C.eval = edict()
_C.eval.acceptance_overlap = 0.0
_C.eval.acceptance_radius = 1.0
_C.eval.inlier_ratio_threshold = 0.05
_C.eval.rre_threshold = 5.0
_C.eval.rte_threshold = 2.0
# ransac
_C.ransac = edict()
_C.ransac.distance_threshold = 0.3
_C.ransac.num_points = 4
_C.ransac.num_iterations = 50000
# optim config
_C.optim = edict()
_C.optim.lr = 1e-4
_C.optim.lr_decay = 0.95
_C.optim.lr_decay_steps = 4
_C.optim.weight_decay = 1e-6
_C.optim.max_epoch = 139
_C.optim.grad_acc_steps = 1
# model - backbone
_C.backbone = edict()
_C.backbone.num_stages = 5
_C.backbone.init_voxel_size = 0.3
_C.backbone.kernel_size = 15
_C.backbone.base_radius = 4.25
_C.backbone.base_sigma = 2.0
_C.backbone.init_radius = _C.backbone.base_radius * _C.backbone.init_voxel_size
_C.backbone.init_sigma = _C.backbone.base_sigma * _C.backbone.init_voxel_size
_C.backbone.group_norm = 32
_C.backbone.input_dim = 1
_C.backbone.init_dim = 64
_C.backbone.output_dim = 256
# model - Global
_C.model = edict()
_C.model.ground_truth_matching_radius = 0.6
_C.model.num_points_in_patch = 128
_C.model.num_sinkhorn_iterations = 100
# model - Coarse Matching
_C.coarse_matching = edict()
_C.coarse_matching.num_targets = 128
_C.coarse_matching.overlap_threshold = 0.1
_C.coarse_matching.num_correspondences = 512 # 256 keep train and val same
_C.coarse_matching.dual_normalization = True
# model - GeoTransformer
_C.geotransformer = edict()
_C.geotransformer.input_dim = 2048
_C.geotransformer.hidden_dim = 128
_C.geotransformer.output_dim = 256
_C.geotransformer.num_heads = 4
_C.geotransformer.blocks = ['self', 'cross', 'self', 'cross', 'self', 'cross']
_C.geotransformer.sigma_d = 4.8
_C.geotransformer.sigma_a = 15
_C.geotransformer.angle_k = 3
_C.geotransformer.reduction_a = 'max'
# model - Fine Matching
_C.fine_matching = edict()
_C.fine_matching.topk = 10
_C.fine_matching.acceptance_radius = 0.6
_C.fine_matching.mutual = True
_C.fine_matching.confidence_threshold = 0.05
_C.fine_matching.use_dustbin = False
_C.fine_matching.use_global_score = False
_C.fine_matching.correspondence_threshold = 3
_C.fine_matching.correspondence_limit = None
_C.fine_matching.num_refinement_steps = 5
# loss - Coarse level
_C.coarse_loss = edict()
_C.coarse_loss.positive_margin = 0.1
_C.coarse_loss.negative_margin = 1.4
_C.coarse_loss.positive_optimal = 0.1
_C.coarse_loss.negative_optimal = 1.4
_C.coarse_loss.log_scale = 40
_C.coarse_loss.positive_overlap = 0.1
# loss - Fine level
_C.fine_loss = edict()
_C.fine_loss.positive_radius = 0.6
# loss - Overall
_C.loss = edict()
_C.loss.weight_coarse_loss = 1.0
_C.loss.weight_fine_loss = 1.0
def make_cfg():
return _C
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--link_output', dest='link_output', action='store_true', help='link output dir')
args = parser.parse_args()
return args
def main():
cfg = make_cfg()
args = parse_args()
if args.link_output:
os.symlink(cfg.output_dir, 'output')
if __name__ == '__main__':
main()
| 4,299 | 24.903614 | 105 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/experiments/geotransformer.CROON.stage5.gse.k3.max.oacl.stage2.sinkhorn/dataset.py | from geotransformer.datasets.registration.CROON.dataset import Lidar2LidarDataset
from geotransformer.utils.data import (
registration_collate_fn_stack_mode,
calibrate_neighbors_stack_mode,
build_dataloader_stack_mode,
)
def train_valid_data_loader(cfg, distributed):
train_dataset = Lidar2LidarDataset(
cfg.data.dataset_root,
'train',
point_limit=cfg.train.point_limit,
use_augmentation=cfg.train.use_augmentation,
augmentation_noise=cfg.train.augmentation_noise,
augmentation_min_scale=cfg.train.augmentation_min_scale,
augmentation_max_scale=cfg.train.augmentation_max_scale,
augmentation_shift=cfg.train.augmentation_shift,
augmentation_rotation=cfg.train.augmentation_rotation,
)
neighbor_limits = calibrate_neighbors_stack_mode(
train_dataset,
registration_collate_fn_stack_mode,
cfg.backbone.num_stages,
cfg.backbone.init_voxel_size,
cfg.backbone.init_radius,
)
train_loader = build_dataloader_stack_mode(
train_dataset,
registration_collate_fn_stack_mode,
cfg.backbone.num_stages,
cfg.backbone.init_voxel_size,
cfg.backbone.init_radius,
neighbor_limits,
batch_size=cfg.train.batch_size,
num_workers=cfg.train.num_workers,
shuffle=True,
distributed=distributed,
)
valid_dataset = Lidar2LidarDataset(
cfg.data.dataset_root,
'val',
point_limit=cfg.test.point_limit,
use_augmentation=False,
)
valid_loader = build_dataloader_stack_mode(
valid_dataset,
registration_collate_fn_stack_mode,
cfg.backbone.num_stages,
cfg.backbone.init_voxel_size,
cfg.backbone.init_radius,
neighbor_limits,
batch_size=cfg.test.batch_size,
num_workers=cfg.test.num_workers,
shuffle=False,
distributed=distributed,
)
return train_loader, valid_loader, neighbor_limits
def test_data_loader(cfg):
train_dataset = Lidar2LidarDataset(
cfg.data.dataset_root,
'train',
point_limit=cfg.train.point_limit,
use_augmentation=cfg.train.use_augmentation,
augmentation_noise=cfg.train.augmentation_noise,
augmentation_min_scale=cfg.train.augmentation_min_scale,
augmentation_max_scale=cfg.train.augmentation_max_scale,
augmentation_shift=cfg.train.augmentation_shift,
augmentation_rotation=cfg.train.augmentation_rotation,
)
neighbor_limits = calibrate_neighbors_stack_mode(
train_dataset,
registration_collate_fn_stack_mode,
cfg.backbone.num_stages,
cfg.backbone.init_voxel_size,
cfg.backbone.init_radius,
)
test_dataset = Lidar2LidarDataset(
cfg.data.dataset_root,
'test',
point_limit=cfg.test.point_limit,
use_augmentation=False,
)
test_loader = build_dataloader_stack_mode(
test_dataset,
registration_collate_fn_stack_mode,
cfg.backbone.num_stages,
cfg.backbone.init_voxel_size,
cfg.backbone.init_radius,
neighbor_limits,
batch_size=cfg.test.batch_size,
num_workers=cfg.test.num_workers,
shuffle=False,
)
return test_loader, neighbor_limits
| 3,331 | 32.32 | 81 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/experiments/geotransformer.CROON.stage5.gse.k3.max.oacl.stage2.sinkhorn/eval.py | import sys
import json
import argparse
import glob
import os.path as osp
import time
import numpy as np
import torch
from config import make_cfg
from geotransformer.engine import Logger
from geotransformer.modules.registration import weighted_procrustes
from geotransformer.utils.summary_board import SummaryBoard
from geotransformer.utils.open3d import registration_with_ransac_from_correspondences
from geotransformer.utils.registration import (
evaluate_sparse_correspondences,
evaluate_correspondences,
compute_registration_error,
)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--test_epoch', default=None, type=int, help='test epoch')
parser.add_argument('--method', choices=['lgr', 'ransac', 'svd'], required=True, help='registration method')
parser.add_argument('--num_corr', type=int, default=None, help='number of correspondences for registration')
parser.add_argument('--verbose', action='store_true', help='verbose mode')
return parser
def eval_one_epoch(args, cfg, logger):
features_root = cfg.feature_dir
coarse_matching_meter = SummaryBoard()
coarse_matching_meter.register_meter('precision')
coarse_matching_meter.register_meter('PMR>0')
coarse_matching_meter.register_meter('PMR>=0.1')
coarse_matching_meter.register_meter('PMR>=0.3')
coarse_matching_meter.register_meter('PMR>=0.5')
fine_matching_meter = SummaryBoard()
fine_matching_meter.register_meter('recall')
fine_matching_meter.register_meter('inlier_ratio')
fine_matching_meter.register_meter('overlap')
registration_meter = SummaryBoard()
registration_meter.register_meter('recall')
registration_meter.register_meter('rre')
registration_meter.register_meter('rte')
file_names = sorted(
glob.glob(osp.join(features_root, '*.npz')),
key=lambda x: [int(i) for i in osp.splitext(osp.basename(x))[0].split('_')],
)
num_test_pairs = len(file_names)
for i, file_name in enumerate(file_names):
seq_id, src_frame, ref_frame = [int(x) for x in osp.splitext(osp.basename(file_name))[0].split('_')]
data_dict = np.load(file_name)
ref_nodes = data_dict['ref_points_c']
src_nodes = data_dict['src_points_c']
ref_node_corr_indices = data_dict['ref_node_corr_indices']
src_node_corr_indices = data_dict['src_node_corr_indices']
ref_corr_points = data_dict['ref_corr_points']
src_corr_points = data_dict['src_corr_points']
corr_scores = data_dict['corr_scores']
gt_node_corr_indices = data_dict['gt_node_corr_indices']
gt_transform = data_dict['transform']
if args.num_corr is not None and corr_scores.shape[0] > args.num_corr:
sel_indices = np.argsort(-corr_scores)[: args.num_corr]
ref_corr_points = ref_corr_points[sel_indices]
src_corr_points = src_corr_points[sel_indices]
corr_scores = corr_scores[sel_indices]
message = '{}/{}, seq_id: {}, id0: {}, id1: {}'.format(i + 1, num_test_pairs, seq_id, src_frame, ref_frame)
# 1. evaluate correspondences
# 1.1 evaluate coarse correspondences
coarse_matching_result_dict = evaluate_sparse_correspondences(
ref_nodes,
src_nodes,
ref_node_corr_indices,
src_node_corr_indices,
gt_node_corr_indices,
)
coarse_precision = coarse_matching_result_dict['precision']
coarse_matching_meter.update('precision', coarse_precision)
coarse_matching_meter.update('PMR>0', float(coarse_precision > 0))
coarse_matching_meter.update('PMR>=0.1', float(coarse_precision >= 0.1))
coarse_matching_meter.update('PMR>=0.3', float(coarse_precision >= 0.3))
coarse_matching_meter.update('PMR>=0.5', float(coarse_precision >= 0.5))
# 1.2 evaluate fine correspondences
fine_matching_result_dict = evaluate_correspondences(
ref_corr_points,
src_corr_points,
gt_transform,
positive_radius=cfg.eval.acceptance_radius,
)
inlier_ratio = fine_matching_result_dict['inlier_ratio']
overlap = fine_matching_result_dict['overlap']
fine_matching_meter.update('inlier_ratio', inlier_ratio)
fine_matching_meter.update('overlap', overlap)
fine_matching_meter.update('recall', float(inlier_ratio >= cfg.eval.inlier_ratio_threshold))
message += ', c_PIR: {:.3f}'.format(coarse_precision)
message += ', f_IR: {:.3f}'.format(inlier_ratio)
message += ', f_OV: {:.3f}'.format(overlap)
message += ', f_RS: {:.3f}'.format(fine_matching_result_dict['residual'])
message += ', f_NU: {}'.format(fine_matching_result_dict['num_corr'])
# 2. evaluate registration
if args.method == 'lgr':
est_transform = data_dict['estimated_transform']
elif args.method == 'ransac':
est_transform = registration_with_ransac_from_correspondences(
src_corr_points,
ref_corr_points,
distance_threshold=cfg.ransac.distance_threshold,
ransac_n=cfg.ransac.num_points,
num_iterations=cfg.ransac.num_iterations,
)
elif args.method == 'svd':
with torch.no_grad():
ref_corr_points = torch.from_numpy(ref_corr_points).cuda()
src_corr_points = torch.from_numpy(src_corr_points).cuda()
corr_scores = torch.from_numpy(corr_scores).cuda()
est_transform = weighted_procrustes(
src_corr_points, ref_corr_points, corr_scores, return_transform=True
)
est_transform = est_transform.detach().cpu().numpy()
else:
raise ValueError(f'Unsupported registration method: {args.method}.')
rre, rte = compute_registration_error(gt_transform, est_transform)
accepted = rre < cfg.eval.rre_threshold and rte < cfg.eval.rte_threshold
if accepted:
registration_meter.update('rre', rre)
registration_meter.update('rte', rte)
registration_meter.update('recall', float(accepted))
message += ', r_RRE: {:.3f}'.format(rre)
message += ', r_RTE: {:.3f}'.format(rte)
if args.verbose:
logger.info(message)
if args.test_epoch is not None:
logger.critical(f'Epoch {args.test_epoch}')
# 1. print correspondence evaluation results
message = ' Coarse Matching'
message += ', PIR: {:.3f}'.format(coarse_matching_meter.mean('precision'))
message += ', PMR>0: {:.3f}'.format(coarse_matching_meter.mean('PMR>0'))
message += ', PMR>=0.1: {:.3f}'.format(coarse_matching_meter.mean('PMR>=0.1'))
message += ', PMR>=0.3: {:.3f}'.format(coarse_matching_meter.mean('PMR>=0.3'))
message += ', PMR>=0.5: {:.3f}'.format(coarse_matching_meter.mean('PMR>=0.5'))
logger.critical(message)
message = ' Fine Matching'
message += ', FMR: {:.3f}'.format(fine_matching_meter.mean('recall'))
message += ', IR: {:.3f}'.format(fine_matching_meter.mean('inlier_ratio'))
message += ', OV: {:.3f}'.format(fine_matching_meter.mean('overlap'))
message += ', std: {:.3f}'.format(fine_matching_meter.std('recall'))
logger.critical(message)
# 2. print registration evaluation results
message = ' Registration'
message += ', RR: {:.3f}'.format(registration_meter.mean("recall"))
message += ', RRE: {:.3f}'.format(registration_meter.mean("rre"))
message += ', RTE: {:.3f}'.format(registration_meter.mean("rte"))
logger.critical(message)
def main():
parser = make_parser()
args = parser.parse_args()
cfg = make_cfg()
log_file = osp.join(cfg.log_dir, 'eval-{}.log'.format(time.strftime("%Y%m%d-%H%M%S")))
logger = Logger(log_file=log_file)
message = 'Command executed: ' + ' '.join(sys.argv)
logger.info(message)
message = 'Configs:\n' + json.dumps(cfg, indent=4)
logger.info(message)
eval_one_epoch(args, cfg, logger)
if __name__ == '__main__':
main()
| 8,169 | 39.646766 | 115 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/experiments/geotransformer.CROON.stage5.gse.k3.max.oacl.stage2.sinkhorn/eval.sh | if [ "$2" = "test" ]; then
python test.py --test_epoch=$1
fi
python eval.py --test_epoch=$1 --method=lgr
| 109 | 21 | 43 | sh |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/experiments/geotransformer.CROON.stage5.gse.k3.max.oacl.stage2.sinkhorn/loss.py | import torch
import torch.nn as nn
from geotransformer.modules.ops import apply_transform, pairwise_distance
from geotransformer.modules.registration.metrics import isotropic_transform_error
from geotransformer.modules.loss import WeightedCircleLoss
class CoarseMatchingLoss(nn.Module):
def __init__(self, cfg):
super(CoarseMatchingLoss, self).__init__()
self.weighted_circle_loss = WeightedCircleLoss(
cfg.coarse_loss.positive_margin,
cfg.coarse_loss.negative_margin,
cfg.coarse_loss.positive_optimal,
cfg.coarse_loss.negative_optimal,
cfg.coarse_loss.log_scale,
)
self.positive_overlap = cfg.coarse_loss.positive_overlap
def forward(self, output_dict):
ref_feats = output_dict['ref_feats_c']
src_feats = output_dict['src_feats_c']
gt_node_corr_indices = output_dict['gt_node_corr_indices']
gt_node_corr_overlaps = output_dict['gt_node_corr_overlaps']
gt_ref_node_corr_indices = gt_node_corr_indices[:, 0]
gt_src_node_corr_indices = gt_node_corr_indices[:, 1]
feat_dists = torch.sqrt(pairwise_distance(ref_feats, src_feats, normalized=True))
overlaps = torch.zeros_like(feat_dists)
overlaps[gt_ref_node_corr_indices, gt_src_node_corr_indices] = gt_node_corr_overlaps
pos_masks = torch.gt(overlaps, self.positive_overlap)
neg_masks = torch.eq(overlaps, 0)
pos_scales = torch.sqrt(overlaps * pos_masks.float())
loss = self.weighted_circle_loss(pos_masks, neg_masks, feat_dists, pos_scales)
return loss
class FineMatchingLoss(nn.Module):
def __init__(self, cfg):
super(FineMatchingLoss, self).__init__()
self.positive_radius = cfg.fine_loss.positive_radius
def forward(self, output_dict, data_dict):
ref_node_corr_knn_points = output_dict['ref_node_corr_knn_points']
src_node_corr_knn_points = output_dict['src_node_corr_knn_points']
ref_node_corr_knn_masks = output_dict['ref_node_corr_knn_masks']
src_node_corr_knn_masks = output_dict['src_node_corr_knn_masks']
matching_scores = output_dict['matching_scores']
transform = data_dict['transform']
src_node_corr_knn_points = apply_transform(src_node_corr_knn_points, transform)
dists = pairwise_distance(ref_node_corr_knn_points, src_node_corr_knn_points) # (B, N, M)
gt_masks = torch.logical_and(ref_node_corr_knn_masks.unsqueeze(2), src_node_corr_knn_masks.unsqueeze(1))
gt_corr_map = torch.lt(dists, self.positive_radius ** 2)
gt_corr_map = torch.logical_and(gt_corr_map, gt_masks)
slack_row_labels = torch.logical_and(torch.eq(gt_corr_map.sum(2), 0), ref_node_corr_knn_masks)
slack_col_labels = torch.logical_and(torch.eq(gt_corr_map.sum(1), 0), src_node_corr_knn_masks)
labels = torch.zeros_like(matching_scores, dtype=torch.bool)
labels[:, :-1, :-1] = gt_corr_map
labels[:, :-1, -1] = slack_row_labels
labels[:, -1, :-1] = slack_col_labels
loss = -matching_scores[labels].mean()
return loss
class OverallLoss(nn.Module):
def __init__(self, cfg):
super(OverallLoss, self).__init__()
self.coarse_loss = CoarseMatchingLoss(cfg)
self.fine_loss = FineMatchingLoss(cfg)
self.weight_coarse_loss = cfg.loss.weight_coarse_loss
self.weight_fine_loss = cfg.loss.weight_fine_loss
self.criterion = nn.BCELoss(reduction="mean") # 分类误差
def forward(self, output_dict, data_dict):
coarse_loss = self.coarse_loss(output_dict)
fine_loss = self.fine_loss(output_dict, data_dict)
loss = self.criterion(output_dict['predict_results'], output_dict['classification_ground_truth'])
# loss = self.weight_coarse_loss * coarse_loss + self.weight_fine_loss * fine_loss
return {
'c_loss': coarse_loss,
'f_loss': fine_loss,
'loss': loss,
}
class Evaluator(nn.Module):
def __init__(self, cfg):
super(Evaluator, self).__init__()
self.acceptance_overlap = cfg.eval.acceptance_overlap
self.acceptance_radius = cfg.eval.acceptance_radius
self.rre_threshold = cfg.eval.rre_threshold
self.rte_threshold = cfg.eval.rte_threshold
@torch.no_grad()
def evaluate_coarse(self, output_dict):
ref_length_c = output_dict['ref_points_c'].shape[0]
src_length_c = output_dict['src_points_c'].shape[0]
gt_node_corr_overlaps = output_dict['gt_node_corr_overlaps']
gt_node_corr_indices = output_dict['gt_node_corr_indices']
masks = torch.gt(gt_node_corr_overlaps, self.acceptance_overlap)
gt_node_corr_indices = gt_node_corr_indices[masks]
gt_ref_node_corr_indices = gt_node_corr_indices[:, 0]
gt_src_node_corr_indices = gt_node_corr_indices[:, 1]
gt_node_corr_map = torch.zeros(size=(ref_length_c, src_length_c)).cuda()
gt_node_corr_map[gt_ref_node_corr_indices, gt_src_node_corr_indices] = 1.0
ref_node_corr_indices = output_dict['ref_node_corr_indices']
src_node_corr_indices = output_dict['src_node_corr_indices']
precision = gt_node_corr_map[ref_node_corr_indices, src_node_corr_indices].mean()
return precision
@torch.no_grad()
def evaluate_fine(self, output_dict, data_dict):
transform = data_dict['transform']
ref_corr_points = output_dict['ref_corr_points']
src_corr_points = output_dict['src_corr_points']
src_corr_points = apply_transform(src_corr_points, transform)
corr_distances = torch.linalg.norm(ref_corr_points - src_corr_points, dim=1)
if len(corr_distances)>0:
precision = torch.lt(corr_distances, self.acceptance_radius).float().mean()
else:
precision = 0
return precision
@torch.no_grad()
def evaluate_registration(self, output_dict, data_dict):
transform = data_dict['transform']
est_transform = output_dict['estimated_transform']
rre, rte = isotropic_transform_error(transform, est_transform)
recall = torch.logical_and(torch.lt(rre, self.rre_threshold), torch.lt(rte, self.rte_threshold)).float()
return rre, rte, recall
@torch.no_grad()
def evaluate_classification(self, output_dict, data_dict):
predict_label = output_dict['classification_result']
ground_truth_label = data_dict['classification_label']
_, statistic_reaults = torch.max(predict_label,dim=0)
return True if statistic_reaults==torch.tensor(ground_truth_label) else False
def forward(self, output_dict, data_dict):
c_precision = self.evaluate_coarse(output_dict)
f_precision = self.evaluate_fine(output_dict, data_dict)
rre, rte, recall = self.evaluate_registration(output_dict, data_dict)
return {
'PIR': c_precision,
'IR': f_precision,
'RRE': rre,
'RTE': rte,
'RR': recall,
}
| 7,104 | 42.323171 | 112 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/experiments/geotransformer.CROON.stage5.gse.k3.max.oacl.stage2.sinkhorn/model.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from IPython import embed
from geotransformer.modules.ops import point_to_node_partition, index_select
from geotransformer.modules.registration import get_node_correspondences
from geotransformer.modules.sinkhorn import LearnableLogOptimalTransport
from geotransformer.modules.geotransformer import (
GeometricTransformer,
SuperPointMatching,
SuperPointTargetGenerator,
LocalGlobalRegistration,
)
from classification.classification_model import classification_model
from geotransformer.modules.kpconv.modules import GlobalAvgPool
from backbone import KPConvFPN
import random
from torch.autograd import Variable
class GeoTransformer(nn.Module):
def __init__(self, cfg):
super(GeoTransformer, self).__init__()
self.num_points_in_patch = cfg.model.num_points_in_patch
self.matching_radius = cfg.model.ground_truth_matching_radius
self.backbone = KPConvFPN(
cfg.backbone.input_dim,
cfg.backbone.output_dim,
cfg.backbone.init_dim,
cfg.backbone.kernel_size,
cfg.backbone.init_radius,
cfg.backbone.init_sigma,
cfg.backbone.group_norm,
)
self.transformer = GeometricTransformer(
cfg.geotransformer.input_dim,
cfg.geotransformer.output_dim,
cfg.geotransformer.hidden_dim,
cfg.geotransformer.num_heads,
cfg.geotransformer.blocks,
cfg.geotransformer.sigma_d,
cfg.geotransformer.sigma_a,
cfg.geotransformer.angle_k,
reduction_a=cfg.geotransformer.reduction_a,
)
self.coarse_target = SuperPointTargetGenerator(
cfg.coarse_matching.num_targets, cfg.coarse_matching.overlap_threshold
)
self.coarse_matching = SuperPointMatching(
cfg.coarse_matching.num_correspondences, cfg.coarse_matching.dual_normalization
)
self.fine_matching = LocalGlobalRegistration(
cfg.fine_matching.topk,
cfg.fine_matching.acceptance_radius,
mutual=cfg.fine_matching.mutual,
confidence_threshold=cfg.fine_matching.confidence_threshold,
use_dustbin=cfg.fine_matching.use_dustbin,
use_global_score=cfg.fine_matching.use_global_score,
correspondence_threshold=cfg.fine_matching.correspondence_threshold,
correspondence_limit=cfg.fine_matching.correspondence_limit,
num_refinement_steps=cfg.fine_matching.num_refinement_steps,
)
self.optimal_transport = LearnableLogOptimalTransport(cfg.model.num_sinkhorn_iterations)
for p in self.parameters():
p.requires_grad=False
self.classification_model = classification_model()
def forward(self, data_dict):
output_dict = {}
# Downsample point clouds
feats = data_dict['features'].detach()
transform = data_dict['transform'].detach()
ref_length_c = data_dict['lengths'][-1][0].item()
ref_length_f = data_dict['lengths'][1][0].item()
ref_length = data_dict['lengths'][0][0].item()
points_c = data_dict['points'][-1].detach()
points_f = data_dict['points'][1].detach()
points = data_dict['points'][0].detach()
ref_points_c = points_c[:ref_length_c]
src_points_c = points_c[ref_length_c:]
ref_points_f = points_f[:ref_length_f]
src_points_f = points_f[ref_length_f:]
ref_points = points[:ref_length]
src_points = points[ref_length:]
output_dict['ref_points_c'] = ref_points_c
output_dict['src_points_c'] = src_points_c
output_dict['ref_points_f'] = ref_points_f
output_dict['src_points_f'] = src_points_f
output_dict['ref_points'] = ref_points
output_dict['src_points'] = src_points
# 1. Generate ground truth node correspondences
_, ref_node_masks, ref_node_knn_indices, ref_node_knn_masks = point_to_node_partition(
ref_points_f, ref_points_c, self.num_points_in_patch
)
_, src_node_masks, src_node_knn_indices, src_node_knn_masks = point_to_node_partition(
src_points_f, src_points_c, self.num_points_in_patch
)
ref_padded_points_f = torch.cat([ref_points_f, torch.zeros_like(ref_points_f[:1])], dim=0)
src_padded_points_f = torch.cat([src_points_f, torch.zeros_like(src_points_f[:1])], dim=0)
ref_node_knn_points = index_select(ref_padded_points_f, ref_node_knn_indices, dim=0)
src_node_knn_points = index_select(src_padded_points_f, src_node_knn_indices, dim=0)
gt_node_corr_indices, gt_node_corr_overlaps = get_node_correspondences(
ref_points_c,
src_points_c,
ref_node_knn_points,
src_node_knn_points,
transform,
self.matching_radius,
ref_masks=ref_node_masks,
src_masks=src_node_masks,
ref_knn_masks=ref_node_knn_masks,
src_knn_masks=src_node_knn_masks,
)
output_dict['gt_node_corr_indices'] = gt_node_corr_indices
output_dict['gt_node_corr_overlaps'] = gt_node_corr_overlaps
# 2. KPFCNN Encoder
feats_list = self.backbone(feats, data_dict)
feats_c = feats_list[-1]
feats_f = feats_list[0]
# 3. Conditional Transformer
ref_feats_c = feats_c[:ref_length_c]
src_feats_c = feats_c[ref_length_c:]
ref_feats_c, src_feats_c = self.transformer(
ref_points_c.unsqueeze(0),
src_points_c.unsqueeze(0),
ref_feats_c.unsqueeze(0),
src_feats_c.unsqueeze(0),
)
ref_feats_c_norm = F.normalize(ref_feats_c.squeeze(0), p=2, dim=1)
src_feats_c_norm = F.normalize(src_feats_c.squeeze(0), p=2, dim=1)
output_dict['ref_feats_c'] = ref_feats_c_norm
output_dict['src_feats_c'] = src_feats_c_norm
# 5. Head for fine level matching
ref_feats_f = feats_f[:ref_length_f]
src_feats_f = feats_f[ref_length_f:]
output_dict['ref_feats_f'] = ref_feats_f
output_dict['src_feats_f'] = src_feats_f
# 6. Select topk nearest node correspondences
with torch.no_grad():
ref_node_corr_indices, src_node_corr_indices, node_corr_scores = self.coarse_matching(
ref_feats_c_norm, src_feats_c_norm, ref_points_c, src_points_c, ref_node_masks, src_node_masks
)
output_dict['ref_node_corr_indices'] = ref_node_corr_indices
output_dict['src_node_corr_indices'] = src_node_corr_indices
# 7 Random select ground truth node correspondences during training
if self.training:
ref_node_corr_indices, src_node_corr_indices, node_corr_scores = self.coarse_target(
gt_node_corr_indices, gt_node_corr_overlaps
)
# classification data prepare
classification_one_data_dict = classification_data_prepare(self.training, ref_points_c.shape[0], src_points_c.shape[0], gt_node_corr_overlaps.detach(), gt_node_corr_indices.detach(), output_dict['ref_node_corr_indices'].detach(), output_dict['src_node_corr_indices'].detach(), ref_feats_c_norm.detach(), src_feats_c_norm.detach())
classification_inputs = Variable(classification_one_data_dict['corr_node_feat'],requires_grad=True).to('cuda')
classification_ground_truth = Variable(classification_one_data_dict['ground_truth']).to('cuda')
predict_results = self.classification_model(classification_inputs)
output_dict['predict_results'] = predict_results
output_dict['classification_ground_truth'] = classification_ground_truth.detach()
if not self.training:
# sorted_values, sorted_indices = torch.sort(predict_results.squeeze(),descending=True)
predict_results1 = torch.gt(predict_results.squeeze(), 0.80)
predict_results1 = torch.nonzero(predict_results1).squeeze()
ref_node_corr_indices1 = ref_node_corr_indices[predict_results1]
src_node_corr_indices1 = src_node_corr_indices[predict_results1]
output_dict['ref_node_corr_indices'] = ref_node_corr_indices1
output_dict['src_node_corr_indices'] = src_node_corr_indices1
if predict_results1.numel() <= 20:
predict_results2 = torch.gt(predict_results.squeeze(), 0.55)
predict_results2 = torch.nonzero(predict_results2).squeeze()
ref_node_corr_indices2 = ref_node_corr_indices[predict_results2]
src_node_corr_indices2 = src_node_corr_indices[predict_results2]
output_dict['ref_node_corr_indices'] = ref_node_corr_indices2
output_dict['src_node_corr_indices'] = src_node_corr_indices2
ref_node_corr_indices = output_dict['ref_node_corr_indices']
src_node_corr_indices = output_dict['src_node_corr_indices']
# 7.2 Generate batched node points & feats
ref_node_corr_knn_indices = ref_node_knn_indices[ref_node_corr_indices] # (P, K)
src_node_corr_knn_indices = src_node_knn_indices[src_node_corr_indices] # (P, K)
ref_node_corr_knn_masks = ref_node_knn_masks[ref_node_corr_indices] # (P, K)
src_node_corr_knn_masks = src_node_knn_masks[src_node_corr_indices] # (P, K)
ref_node_corr_knn_points = ref_node_knn_points[ref_node_corr_indices] # (P, K, 3)
src_node_corr_knn_points = src_node_knn_points[src_node_corr_indices] # (P, K, 3)
ref_padded_feats_f = torch.cat([ref_feats_f, torch.zeros_like(ref_feats_f[:1])], dim=0)
src_padded_feats_f = torch.cat([src_feats_f, torch.zeros_like(src_feats_f[:1])], dim=0)
ref_node_corr_knn_feats = index_select(ref_padded_feats_f, ref_node_corr_knn_indices, dim=0) # (P, K, C)
src_node_corr_knn_feats = index_select(src_padded_feats_f, src_node_corr_knn_indices, dim=0) # (P, K, C)
output_dict['ref_node_corr_knn_points'] = ref_node_corr_knn_points
output_dict['src_node_corr_knn_points'] = src_node_corr_knn_points
output_dict['ref_node_corr_knn_masks'] = ref_node_corr_knn_masks
output_dict['src_node_corr_knn_masks'] = src_node_corr_knn_masks
# 8. Optimal transport
matching_scores = torch.einsum('bnd,bmd->bnm', ref_node_corr_knn_feats, src_node_corr_knn_feats) # (P, K, K)
matching_scores = matching_scores / feats_f.shape[1] ** 0.5
matching_scores = self.optimal_transport(matching_scores, ref_node_corr_knn_masks, src_node_corr_knn_masks)
output_dict['matching_scores'] = matching_scores
# 9. Generate final correspondences during testing
with torch.no_grad():
if not self.fine_matching.use_dustbin:
matching_scores = matching_scores[:, :-1, :-1]
ref_corr_points, src_corr_points, corr_scores, estimated_transform = self.fine_matching(
ref_node_corr_knn_points,
src_node_corr_knn_points,
ref_node_corr_knn_masks,
src_node_corr_knn_masks,
matching_scores,
node_corr_scores,
)
output_dict['ref_corr_points'] = ref_corr_points
output_dict['src_corr_points'] = src_corr_points
output_dict['corr_scores'] = corr_scores
output_dict['estimated_transform'] = estimated_transform
output_dict['classification_result'] = 0 #classify2
return output_dict
def classification_data_prepare(mode, ref_length_c, src_length_c, gt_node_corr_overlaps, gt_node_corr_indices, ref_node_corr_indices, src_node_corr_indices, ref_feats_c_norm, src_feats_c_norm):
# ref_length_c = data_dict['ref_points_c'].shape[0]
# src_length_c = data_dict['src_points_c'].shape[0]
# gt_node_corr_overlaps = data_dict['gt_node_corr_overlaps'].data
# gt_node_corr_indices = data_dict['gt_node_corr_indices'].data
one_data = {}
masks = torch.gt(gt_node_corr_overlaps, 0.0)
gt_node_corr_indices = gt_node_corr_indices[masks]
gt_ref_node_corr_indices = gt_node_corr_indices[:, 0]
gt_src_node_corr_indices = gt_node_corr_indices[:, 1]
gt_node_corr_map = torch.zeros(ref_length_c, src_length_c)
gt_node_corr_map[gt_ref_node_corr_indices, gt_src_node_corr_indices] = 1.0
corr_node_ground_truth = gt_node_corr_map[ref_node_corr_indices, src_node_corr_indices]
# if self.mode == 'train':
ground_truth_pos = torch.nonzero(gt_node_corr_map)
ground_truth_neg = torch.nonzero(torch.eq(gt_node_corr_map,0))
pos_index = [i for i in range(ground_truth_pos.shape[0])]
random.shuffle(pos_index)
ground_truth_pos = ground_truth_pos[pos_index[:2500],:]
neg_index = [i for i in range(ground_truth_neg.shape[0])]
random.shuffle(neg_index)
ground_truth_neg = ground_truth_neg[neg_index[:ground_truth_pos.shape[0]],:]
ground_truth_both = torch.cat((ground_truth_pos,ground_truth_neg),dim=0)
random_index = [i for i in range(ground_truth_both.shape[0])]
random.shuffle(random_index)
ground_truth_both = ground_truth_both[random_index]
if mode == 'training':
ref_node_corr_indices = ground_truth_both[:,0]
src_node_corr_indices = ground_truth_both[:,1]
corr_node_ground_truth = gt_node_corr_map[ref_node_corr_indices, src_node_corr_indices]
# ref_feats_c_norm = data_dict['ref_feats_c'].data
# src_feats_c_norm = data_dict['src_feats_c'].data
ref_corr_node_feats = ref_feats_c_norm[ref_node_corr_indices]
src_corr_node_feats = src_feats_c_norm[src_node_corr_indices]
mean = src_corr_node_feats.mean()
var = src_corr_node_feats.var()
src_corr_node_feats = (src_corr_node_feats - mean) / torch.pow(var + 1e-05,0.5)
mean = ref_corr_node_feats.mean()
var = ref_corr_node_feats.var()
ref_corr_node_feats = (ref_corr_node_feats - mean) / torch.pow(var + 1e-05,0.5)
# print("src_corr_node_feate:",src_corr_node_feate.shape)
# print("ref_corr_node_feats:",ref_corr_node_feats.shape)
corr_node_feat = torch.cat((ref_corr_node_feats.unsqueeze(0).transpose(0,1), src_corr_node_feats.unsqueeze(0).transpose(0,1)), dim=1)
corr_node_feat = corr_node_feat.repeat(1,1,2)
corr_node_feat = corr_node_feat.chunk(16,dim=2)
# print("corr_node_feat:",corr_node_feat.shape)
corr_node_feat = torch.cat((corr_node_feat),dim=1)
corr_node_feat = corr_node_feat.unsqueeze(1)
one_data['corr_node_feat'] = corr_node_feat
one_data['ground_truth'] = corr_node_ground_truth.unsqueeze(1)
return one_data
def create_model(cfg):
model = GeoTransformer(cfg)
return model
def main():
from config import make_cfg
cfg = make_cfg()
model = create_model(cfg)
print(model.state_dict().keys())
print(model)
if __name__ == '__main__':
main()
| 15,089 | 43.777448 | 338 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/experiments/geotransformer.CROON.stage5.gse.k3.max.oacl.stage2.sinkhorn/test.py | import argparse
import os.path as osp
import time
import numpy as np
from geotransformer.engine import SingleTester
from geotransformer.utils.common import ensure_dir, get_log_string
from geotransformer.utils.torch import release_cuda
from config import make_cfg
from dataset import test_data_loader
from loss import Evaluator
from model import create_model
class Tester(SingleTester):
def __init__(self, cfg):
super().__init__(cfg)
self.logger.debug('Tester init')
# dataloader
start_time = time.time()
data_loader, neighbor_limits = test_data_loader(cfg)
loading_time = time.time() - start_time
message = f'Data loader created: {loading_time:.3f}s collapsed.'
self.logger.info(message)
message = f'Calibrate neighbors: {neighbor_limits}.'
self.logger.info(message)
self.register_loader(data_loader)
# model
model = create_model(cfg).cuda()
self.register_model(model)
# evaluator
self.evaluator = Evaluator(cfg).cuda()
# preparation
self.output_dir = osp.join(cfg.feature_dir)
ensure_dir(self.output_dir)
def test_step(self, iteration, data_dict):
output_dict = self.model(data_dict)
return output_dict
def eval_step(self, iteration, data_dict, output_dict):
result_dict = self.evaluator(output_dict, data_dict)
return result_dict
def summary_string(self, iteration, data_dict, output_dict, result_dict):
seq_id = data_dict['seq_id']
# ref_frame = 'top'
# src_frame = 'front'
ref_frame = seq_id.split('-')[1]
src_frame = seq_id.split('-')[2]
message = f'seq_id: {seq_id}, id0: {ref_frame}, id1: {src_frame}'
message += ', ' + get_log_string(result_dict=result_dict)
message += ', nCorr: {}'.format(output_dict['corr_scores'].shape[0])
return message
def after_test_step(self, iteration, data_dict, output_dict, result_dict):
seq_id = data_dict['seq_id']
# ref_frame = 'top'
# src_frame = 'front'
ref_frame = seq_id.split('-')[1]
src_frame = seq_id.split('-')[2]
file_name = osp.join(self.output_dir, f'{seq_id}_{src_frame}_{ref_frame}.npz')
# file_name = osp.join(self.output_dir, f'{seq_id[:-2]}.npz')
np.savez_compressed(
file_name,
ref_points=release_cuda(output_dict['ref_points']),
src_points=release_cuda(output_dict['src_points']),
ref_points_f=release_cuda(output_dict['ref_points_f']),
src_points_f=release_cuda(output_dict['src_points_f']),
ref_points_c=release_cuda(output_dict['ref_points_c']),
src_points_c=release_cuda(output_dict['src_points_c']),
ref_feats_c=release_cuda(output_dict['ref_feats_c']),
src_feats_c=release_cuda(output_dict['src_feats_c']),
ref_feats_f=release_cuda(output_dict['ref_feats_f']),
src_feats_f=release_cuda(output_dict['src_feats_f']),
ref_node_corr_indices=release_cuda(output_dict['ref_node_corr_indices']),
src_node_corr_indices=release_cuda(output_dict['src_node_corr_indices']),
ref_corr_points=release_cuda(output_dict['ref_corr_points']),
src_corr_points=release_cuda(output_dict['src_corr_points']),
corr_scores=release_cuda(output_dict['corr_scores']),
gt_node_corr_indices=release_cuda(output_dict['gt_node_corr_indices']),
gt_node_corr_overlaps=release_cuda(output_dict['gt_node_corr_overlaps']),
estimated_transform=release_cuda(output_dict['estimated_transform']),
transform=release_cuda(data_dict['transform']),
)
def main():
cfg = make_cfg()
tester = Tester(cfg)
tester.run()
if __name__ == '__main__':
main()
| 3,903 | 37.27451 | 86 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/experiments/geotransformer.CROON.stage5.gse.k3.max.oacl.stage2.sinkhorn/trainval.py | import argparse
import time
import torch.optim as optim
from geotransformer.engine import EpochBasedTrainer
from config import make_cfg
from dataset import train_valid_data_loader
from model import create_model
from loss import OverallLoss, Evaluator
class Trainer(EpochBasedTrainer):
def __init__(self, cfg):
super().__init__(cfg, max_epoch=cfg.optim.max_epoch)
# dataloader
start_time = time.time()
train_loader, val_loader, neighbor_limits = train_valid_data_loader(cfg, self.distributed)
loading_time = time.time() - start_time
message = 'Data loader created: {:.3f}s collapsed.'.format(loading_time)
self.logger.info(message)
message = 'Calibrate neighbors: {}.'.format(neighbor_limits)
self.logger.info(message)
self.register_loader(train_loader, val_loader)
# model, optimizer, scheduler
model = create_model(cfg).cuda()
model = self.register_model(model)
optimizer = optim.Adam(model.parameters(), lr=cfg.optim.lr, weight_decay=cfg.optim.weight_decay)
self.register_optimizer(optimizer)
scheduler = optim.lr_scheduler.StepLR(optimizer, cfg.optim.lr_decay_steps, gamma=cfg.optim.lr_decay)
self.register_scheduler(scheduler)
# loss function, evaluator
self.loss_func = OverallLoss(cfg).cuda()
self.evaluator = Evaluator(cfg).cuda()
def train_step(self, epoch, iteration, data_dict):
output_dict = self.model(data_dict)
loss_dict = self.loss_func(output_dict, data_dict)
result_dict = self.evaluator(output_dict, data_dict)
loss_dict.update(result_dict)
return output_dict, loss_dict
def val_step(self, epoch, iteration, data_dict):
output_dict = self.model(data_dict)
loss_dict = self.loss_func(output_dict, data_dict)
result_dict = self.evaluator(output_dict, data_dict)
loss_dict.update(result_dict)
return output_dict, loss_dict
def main():
cfg = make_cfg()
trainer = Trainer(cfg)
trainer.run()
if __name__ == '__main__':
main()
| 2,120 | 32.666667 | 108 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/experiments/geotransformer.CROON.stage5.gse.k3.max.oacl.stage2.sinkhorn/classification/__init__.py | 0 | 0 | 0 | py |
|
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/experiments/geotransformer.CROON.stage5.gse.k3.max.oacl.stage2.sinkhorn/classification/classification_model.py | import torch.nn as nn
import torch.nn.functional as F
# define model
class basic_block(nn.Module):
"""基本残差块,由两层卷积构成"""
def __init__(self,in_planes,planes,kernel_size=3,stride=1):
"""
:param in_planes: 输入通道
:param planes: 输出通道
:param kernel_size: 卷积核大小
:param stride: 卷积步长
"""
super(basic_block, self).__init__()
self.conv1=nn.Conv2d(in_planes,planes,kernel_size=kernel_size,stride=stride,padding=1,bias=False)
self.bn1=nn.BatchNorm2d(planes)
self.relu=nn.ReLU()
self.conv2=nn.Conv2d(planes,planes,kernel_size=kernel_size,stride=1,padding=1,bias=False)
self.bn2=nn.BatchNorm2d(planes)
if stride!=1 or in_planes!=planes:
self.downsample=nn.Sequential(nn.Conv2d(in_planes,planes,kernel_size=1,stride=stride)
,nn.BatchNorm2d(planes))
else:
self.downsample=nn.Sequential()
def forward(self,inx):
x=self.relu(self.bn1(self.conv1(inx)))
x=self.bn2(self.conv2(x))
out=x+self.downsample(inx)
return F.relu(out)
class Resnet(nn.Module):
def __init__(self,basicBlock,blockNums,nb_classes):
super(Resnet, self).__init__()
self.in_planes=64
#输入层
self.conv1=nn.Conv2d(1,self.in_planes,kernel_size=(3,3),stride=(1,1),padding=1,bias=False)
self.bn1=nn.BatchNorm2d(self.in_planes)
self.relu=nn.ReLU(inplace=True)
self.maxpool=nn.MaxPool2d(kernel_size=3,stride=2,padding=1)
self.layer1=self._make_layers(basicBlock,blockNums[0],64,1)
self.layer2=self._make_layers(basicBlock,blockNums[1],128,2)
self.layer3=self._make_layers(basicBlock,blockNums[2],256,2)
self.layer4=self._make_layers(basicBlock,blockNums[3],512,2)
self.avgpool=nn.AdaptiveAvgPool2d(output_size=(1,1))
self.fc=nn.Linear(512,nb_classes)
def _make_layers(self,basicBlock,blockNum,plane,stride):
"""
:param basicBlock: 基本残差块类
:param blockNum: 当前层包含基本残差块的数目,resnet18每层均为2
:param plane: 输出通道数
:param stride: 卷积步长
:return:
"""
layers=[]
for i in range(blockNum):
if i==0:
layer=basicBlock(self.in_planes,plane,3,stride=stride)
else:
layer=basicBlock(plane,plane,3,stride=1)
layers.append(layer)
self.in_planes=plane
return nn.Sequential(*layers)
def forward(self,inx):
x=self.maxpool(self.relu(self.bn1(self.conv1(inx))))
x=self.layer1(x)
x=self.layer2(x)
x=self.layer3(x)
x=self.layer4(x)
x=self.avgpool(x)
x=x.view(x.shape[0],-1)
out=self.fc(x)
return out
class classification_model(nn.Module):
def __init__(self):
super(classification_model, self).__init__()
self.resnet18=Resnet(basic_block,[2,2,2,2],256)
self.linear1 = nn.Linear(256, 128)
self.linear2 = nn.Linear(128, 1)
self.activate1 = nn.ReLU()
self.dropout1 = nn.Dropout(p=0.5)
self.activate2 = nn.Sigmoid()
def forward(self, x):
x = self.resnet18(x)
x = self.linear1(x)
x = self.activate1(x)
x = self.dropout1(x)
x = self.linear2(x)
x = self.activate2(x)
return x | 3,374 | 33.438776 | 105 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/experiments/geotransformer.kitti.stage5.gse.k3.max.oacl.stage2.sinkhorn/backbone.py | import torch
import torch.nn as nn
from geotransformer.modules.kpconv import ConvBlock, ResidualBlock, UnaryBlock, LastUnaryBlock, nearest_upsample
class KPConvFPN(nn.Module):
def __init__(self, input_dim, output_dim, init_dim, kernel_size, init_radius, init_sigma, group_norm):
super(KPConvFPN, self).__init__()
self.encoder1_1 = ConvBlock(input_dim, init_dim, kernel_size, init_radius, init_sigma, group_norm)
self.encoder1_2 = ResidualBlock(init_dim, init_dim * 2, kernel_size, init_radius, init_sigma, group_norm)
self.encoder2_1 = ResidualBlock(
init_dim * 2, init_dim * 2, kernel_size, init_radius, init_sigma, group_norm, strided=True
)
self.encoder2_2 = ResidualBlock(
init_dim * 2, init_dim * 4, kernel_size, init_radius * 2, init_sigma * 2, group_norm
)
self.encoder2_3 = ResidualBlock(
init_dim * 4, init_dim * 4, kernel_size, init_radius * 2, init_sigma * 2, group_norm
)
self.encoder3_1 = ResidualBlock(
init_dim * 4,
init_dim * 4,
kernel_size,
init_radius * 2,
init_sigma * 2,
group_norm,
strided=True,
)
self.encoder3_2 = ResidualBlock(
init_dim * 4, init_dim * 8, kernel_size, init_radius * 4, init_sigma * 4, group_norm
)
self.encoder3_3 = ResidualBlock(
init_dim * 8, init_dim * 8, kernel_size, init_radius * 4, init_sigma * 4, group_norm
)
self.encoder4_1 = ResidualBlock(
init_dim * 8,
init_dim * 8,
kernel_size,
init_radius * 4,
init_sigma * 4,
group_norm,
strided=True,
)
self.encoder4_2 = ResidualBlock(
init_dim * 8, init_dim * 16, kernel_size, init_radius * 8, init_sigma * 8, group_norm
)
self.encoder4_3 = ResidualBlock(
init_dim * 16, init_dim * 16, kernel_size, init_radius * 8, init_sigma * 8, group_norm
)
self.encoder5_1 = ResidualBlock(
init_dim * 16,
init_dim * 16,
kernel_size,
init_radius * 8,
init_sigma * 8,
group_norm,
strided=True,
)
self.encoder5_2 = ResidualBlock(
init_dim * 16, init_dim * 32, kernel_size, init_radius * 16, init_sigma * 16, group_norm
)
self.encoder5_3 = ResidualBlock(
init_dim * 32, init_dim * 32, kernel_size, init_radius * 16, init_sigma * 16, group_norm
)
self.decoder4 = UnaryBlock(init_dim * 48, init_dim * 16, group_norm)
self.decoder3 = UnaryBlock(init_dim * 24, init_dim * 8, group_norm)
self.decoder2 = LastUnaryBlock(init_dim * 12, output_dim)
def forward(self, feats, data_dict):
feats_list = []
points_list = data_dict['points']
neighbors_list = data_dict['neighbors']
subsampling_list = data_dict['subsampling']
upsampling_list = data_dict['upsampling']
feats_s1 = feats
feats_s1 = self.encoder1_1(feats_s1, points_list[0], points_list[0], neighbors_list[0])
feats_s1 = self.encoder1_2(feats_s1, points_list[0], points_list[0], neighbors_list[0])
feats_s2 = self.encoder2_1(feats_s1, points_list[1], points_list[0], subsampling_list[0])
feats_s2 = self.encoder2_2(feats_s2, points_list[1], points_list[1], neighbors_list[1])
feats_s2 = self.encoder2_3(feats_s2, points_list[1], points_list[1], neighbors_list[1])
feats_s3 = self.encoder3_1(feats_s2, points_list[2], points_list[1], subsampling_list[1])
feats_s3 = self.encoder3_2(feats_s3, points_list[2], points_list[2], neighbors_list[2])
feats_s3 = self.encoder3_3(feats_s3, points_list[2], points_list[2], neighbors_list[2])
feats_s4 = self.encoder4_1(feats_s3, points_list[3], points_list[2], subsampling_list[2])
feats_s4 = self.encoder4_2(feats_s4, points_list[3], points_list[3], neighbors_list[3])
feats_s4 = self.encoder4_3(feats_s4, points_list[3], points_list[3], neighbors_list[3])
feats_s5 = self.encoder5_1(feats_s4, points_list[4], points_list[3], subsampling_list[3])
feats_s5 = self.encoder5_2(feats_s5, points_list[4], points_list[4], neighbors_list[4])
feats_s5 = self.encoder5_3(feats_s5, points_list[4], points_list[4], neighbors_list[4])
latent_s5 = feats_s5
feats_list.append(feats_s5)
latent_s4 = nearest_upsample(latent_s5, upsampling_list[3])
latent_s4 = torch.cat([latent_s4, feats_s4], dim=1)
latent_s4 = self.decoder4(latent_s4)
feats_list.append(latent_s4)
latent_s3 = nearest_upsample(latent_s4, upsampling_list[2])
latent_s3 = torch.cat([latent_s3, feats_s3], dim=1)
latent_s3 = self.decoder3(latent_s3)
feats_list.append(latent_s3)
latent_s2 = nearest_upsample(latent_s3, upsampling_list[1])
latent_s2 = torch.cat([latent_s2, feats_s2], dim=1)
latent_s2 = self.decoder2(latent_s2)
feats_list.append(latent_s2)
feats_list.reverse()
return feats_list
| 5,227 | 40.824 | 113 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/experiments/geotransformer.kitti.stage5.gse.k3.max.oacl.stage2.sinkhorn/config.py | import argparse
import os
import os.path as osp
from easydict import EasyDict as edict
from geotransformer.utils.common import ensure_dir
_C = edict()
# random seed
_C.seed = 7351
# dirs
_C.working_dir = osp.dirname(osp.realpath(__file__))
_C.root_dir = osp.dirname(osp.dirname(_C.working_dir))
_C.exp_name = osp.basename(_C.working_dir)
_C.output_dir = osp.join(_C.root_dir, 'output', _C.exp_name)
_C.snapshot_dir = osp.join(_C.output_dir, 'snapshots')
_C.log_dir = osp.join(_C.output_dir, 'logs')
_C.event_dir = osp.join(_C.output_dir, 'events')
_C.feature_dir = osp.join(_C.output_dir, 'features')
ensure_dir(_C.output_dir)
ensure_dir(_C.snapshot_dir)
ensure_dir(_C.log_dir)
ensure_dir(_C.event_dir)
ensure_dir(_C.feature_dir)
# data
_C.data = edict()
_C.data.dataset_root = osp.join(_C.root_dir, 'data', 'Kitti')
# train data
_C.train = edict()
_C.train.batch_size = 1
_C.train.num_workers = 8
_C.train.point_limit = 30000
_C.train.use_augmentation = True
_C.train.augmentation_noise = 0.01
_C.train.augmentation_min_scale = 0.8
_C.train.augmentation_max_scale = 1.2
_C.train.augmentation_shift = 2.0
_C.train.augmentation_rotation = 1.0
# test config
_C.test = edict()
_C.test.batch_size = 1
_C.test.num_workers = 8
_C.test.point_limit = None
# eval config
_C.eval = edict()
_C.eval.acceptance_overlap = 0.0
_C.eval.acceptance_radius = 1.0
_C.eval.inlier_ratio_threshold = 0.05
_C.eval.rre_threshold = 5.0
_C.eval.rte_threshold = 2.0
# ransac
_C.ransac = edict()
_C.ransac.distance_threshold = 0.3
_C.ransac.num_points = 4
_C.ransac.num_iterations = 50000
# optim config
_C.optim = edict()
_C.optim.lr = 1e-4
_C.optim.lr_decay = 0.95
_C.optim.lr_decay_steps = 4
_C.optim.weight_decay = 1e-6
_C.optim.max_epoch = 139
_C.optim.grad_acc_steps = 1
# model - backbone
_C.backbone = edict()
_C.backbone.num_stages = 5
_C.backbone.init_voxel_size = 0.3
_C.backbone.kernel_size = 15
_C.backbone.base_radius = 4.25
_C.backbone.base_sigma = 2.0
_C.backbone.init_radius = _C.backbone.base_radius * _C.backbone.init_voxel_size
_C.backbone.init_sigma = _C.backbone.base_sigma * _C.backbone.init_voxel_size
_C.backbone.group_norm = 32
_C.backbone.input_dim = 1
_C.backbone.init_dim = 64
_C.backbone.output_dim = 256
# model - Global
_C.model = edict()
_C.model.ground_truth_matching_radius = 0.6
_C.model.num_points_in_patch = 128
_C.model.num_sinkhorn_iterations = 100
# model - Coarse Matching
_C.coarse_matching = edict()
_C.coarse_matching.num_targets = 128
_C.coarse_matching.overlap_threshold = 0.1
_C.coarse_matching.num_correspondences = 512 # 256 keep train and val same
_C.coarse_matching.dual_normalization = True
# model - GeoTransformer
_C.geotransformer = edict()
_C.geotransformer.input_dim = 2048
_C.geotransformer.hidden_dim = 128
_C.geotransformer.output_dim = 256
_C.geotransformer.num_heads = 4
_C.geotransformer.blocks = ['self', 'cross', 'self', 'cross', 'self', 'cross']
_C.geotransformer.sigma_d = 4.8
_C.geotransformer.sigma_a = 15
_C.geotransformer.angle_k = 3
_C.geotransformer.reduction_a = 'max'
# model - Fine Matching
_C.fine_matching = edict()
_C.fine_matching.topk = 10
_C.fine_matching.acceptance_radius = 0.6
_C.fine_matching.mutual = True
_C.fine_matching.confidence_threshold = 0.05
_C.fine_matching.use_dustbin = False
_C.fine_matching.use_global_score = False
_C.fine_matching.correspondence_threshold = 3
_C.fine_matching.correspondence_limit = None
_C.fine_matching.num_refinement_steps = 5
# loss - Coarse level
_C.coarse_loss = edict()
_C.coarse_loss.positive_margin = 0.1
_C.coarse_loss.negative_margin = 1.4
_C.coarse_loss.positive_optimal = 0.1
_C.coarse_loss.negative_optimal = 1.4
_C.coarse_loss.log_scale = 40
_C.coarse_loss.positive_overlap = 0.1
# loss - Fine level
_C.fine_loss = edict()
_C.fine_loss.positive_radius = 0.6
# loss - Overall
_C.loss = edict()
_C.loss.weight_coarse_loss = 1.0
_C.loss.weight_fine_loss = 1.0
def make_cfg():
return _C
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--link_output', dest='link_output', action='store_true', help='link output dir')
args = parser.parse_args()
return args
def main():
cfg = make_cfg()
args = parse_args()
if args.link_output:
os.symlink(cfg.output_dir, 'output')
if __name__ == '__main__':
main()
| 4,299 | 24.903614 | 105 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/experiments/geotransformer.kitti.stage5.gse.k3.max.oacl.stage2.sinkhorn/dataset.py | from geotransformer.datasets.registration.kitti.dataset import OdometryKittiPairDataset
from geotransformer.utils.data import (
registration_collate_fn_stack_mode,
calibrate_neighbors_stack_mode,
build_dataloader_stack_mode,
)
def train_valid_data_loader(cfg, distributed):
train_dataset = OdometryKittiPairDataset(
cfg.data.dataset_root,
'train',
point_limit=cfg.train.point_limit,
use_augmentation=cfg.train.use_augmentation,
augmentation_noise=cfg.train.augmentation_noise,
augmentation_min_scale=cfg.train.augmentation_min_scale,
augmentation_max_scale=cfg.train.augmentation_max_scale,
augmentation_shift=cfg.train.augmentation_shift,
augmentation_rotation=cfg.train.augmentation_rotation,
)
neighbor_limits = calibrate_neighbors_stack_mode(
train_dataset,
registration_collate_fn_stack_mode,
cfg.backbone.num_stages,
cfg.backbone.init_voxel_size,
cfg.backbone.init_radius,
)
train_loader = build_dataloader_stack_mode(
train_dataset,
registration_collate_fn_stack_mode,
cfg.backbone.num_stages,
cfg.backbone.init_voxel_size,
cfg.backbone.init_radius,
neighbor_limits,
batch_size=cfg.train.batch_size,
num_workers=cfg.train.num_workers,
shuffle=True,
distributed=distributed,
)
valid_dataset = OdometryKittiPairDataset(
cfg.data.dataset_root,
'val',
point_limit=cfg.test.point_limit,
use_augmentation=False,
)
valid_loader = build_dataloader_stack_mode(
valid_dataset,
registration_collate_fn_stack_mode,
cfg.backbone.num_stages,
cfg.backbone.init_voxel_size,
cfg.backbone.init_radius,
neighbor_limits,
batch_size=cfg.test.batch_size,
num_workers=cfg.test.num_workers,
shuffle=False,
distributed=distributed,
)
return train_loader, valid_loader, neighbor_limits
def test_data_loader(cfg):
train_dataset = OdometryKittiPairDataset(
cfg.data.dataset_root,
'train',
point_limit=cfg.train.point_limit,
use_augmentation=cfg.train.use_augmentation,
augmentation_noise=cfg.train.augmentation_noise,
augmentation_min_scale=cfg.train.augmentation_min_scale,
augmentation_max_scale=cfg.train.augmentation_max_scale,
augmentation_shift=cfg.train.augmentation_shift,
augmentation_rotation=cfg.train.augmentation_rotation,
)
neighbor_limits = calibrate_neighbors_stack_mode(
train_dataset,
registration_collate_fn_stack_mode,
cfg.backbone.num_stages,
cfg.backbone.init_voxel_size,
cfg.backbone.init_radius,
)
test_dataset = OdometryKittiPairDataset(
cfg.data.dataset_root,
'test',
point_limit=cfg.test.point_limit,
use_augmentation=False,
)
test_loader = build_dataloader_stack_mode(
test_dataset,
registration_collate_fn_stack_mode,
cfg.backbone.num_stages,
cfg.backbone.init_voxel_size,
cfg.backbone.init_radius,
neighbor_limits,
batch_size=cfg.test.batch_size,
num_workers=cfg.test.num_workers,
shuffle=False,
)
return test_loader, neighbor_limits
| 3,361 | 32.62 | 87 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/experiments/geotransformer.kitti.stage5.gse.k3.max.oacl.stage2.sinkhorn/eval.py | import sys
import json
import argparse
import glob
import os.path as osp
import time
import numpy as np
import torch
from config import make_cfg
from geotransformer.engine import Logger
from geotransformer.modules.registration import weighted_procrustes
from geotransformer.utils.summary_board import SummaryBoard
from geotransformer.utils.open3d import registration_with_ransac_from_correspondences
from geotransformer.utils.registration import (
evaluate_sparse_correspondences,
evaluate_correspondences,
compute_registration_error,
)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--test_epoch', default=None, type=int, help='test epoch')
parser.add_argument('--method', choices=['lgr', 'ransac', 'svd'], required=True, help='registration method')
parser.add_argument('--num_corr', type=int, default=None, help='number of correspondences for registration')
parser.add_argument('--verbose', action='store_true', help='verbose mode')
return parser
def eval_one_epoch(args, cfg, logger):
features_root = cfg.feature_dir
coarse_matching_meter = SummaryBoard()
coarse_matching_meter.register_meter('precision')
coarse_matching_meter.register_meter('PMR>0')
coarse_matching_meter.register_meter('PMR>=0.1')
coarse_matching_meter.register_meter('PMR>=0.3')
coarse_matching_meter.register_meter('PMR>=0.5')
fine_matching_meter = SummaryBoard()
fine_matching_meter.register_meter('recall')
fine_matching_meter.register_meter('inlier_ratio')
fine_matching_meter.register_meter('overlap')
registration_meter = SummaryBoard()
registration_meter.register_meter('recall')
registration_meter.register_meter('rre')
registration_meter.register_meter('rte')
file_names = sorted(
glob.glob(osp.join(features_root, '*.npz')),
key=lambda x: [int(i) for i in osp.splitext(osp.basename(x))[0].split('_')],
)
num_test_pairs = len(file_names)
for i, file_name in enumerate(file_names):
seq_id, src_frame, ref_frame = [int(x) for x in osp.splitext(osp.basename(file_name))[0].split('_')]
data_dict = np.load(file_name)
ref_nodes = data_dict['ref_points_c']
src_nodes = data_dict['src_points_c']
ref_node_corr_indices = data_dict['ref_node_corr_indices']
src_node_corr_indices = data_dict['src_node_corr_indices']
ref_corr_points = data_dict['ref_corr_points']
src_corr_points = data_dict['src_corr_points']
corr_scores = data_dict['corr_scores']
gt_node_corr_indices = data_dict['gt_node_corr_indices']
gt_transform = data_dict['transform']
if args.num_corr is not None and corr_scores.shape[0] > args.num_corr:
sel_indices = np.argsort(-corr_scores)[: args.num_corr]
ref_corr_points = ref_corr_points[sel_indices]
src_corr_points = src_corr_points[sel_indices]
corr_scores = corr_scores[sel_indices]
message = '{}/{}, seq_id: {}, id0: {}, id1: {}'.format(i + 1, num_test_pairs, seq_id, src_frame, ref_frame)
# 1. evaluate correspondences
# 1.1 evaluate coarse correspondences
coarse_matching_result_dict = evaluate_sparse_correspondences(
ref_nodes,
src_nodes,
ref_node_corr_indices,
src_node_corr_indices,
gt_node_corr_indices,
)
coarse_precision = coarse_matching_result_dict['precision']
coarse_matching_meter.update('precision', coarse_precision)
coarse_matching_meter.update('PMR>0', float(coarse_precision > 0))
coarse_matching_meter.update('PMR>=0.1', float(coarse_precision >= 0.1))
coarse_matching_meter.update('PMR>=0.3', float(coarse_precision >= 0.3))
coarse_matching_meter.update('PMR>=0.5', float(coarse_precision >= 0.5))
# 1.2 evaluate fine correspondences
fine_matching_result_dict = evaluate_correspondences(
ref_corr_points,
src_corr_points,
gt_transform,
positive_radius=cfg.eval.acceptance_radius,
)
inlier_ratio = fine_matching_result_dict['inlier_ratio']
overlap = fine_matching_result_dict['overlap']
fine_matching_meter.update('inlier_ratio', inlier_ratio)
fine_matching_meter.update('overlap', overlap)
fine_matching_meter.update('recall', float(inlier_ratio >= cfg.eval.inlier_ratio_threshold))
message += ', c_PIR: {:.3f}'.format(coarse_precision)
message += ', f_IR: {:.3f}'.format(inlier_ratio)
message += ', f_OV: {:.3f}'.format(overlap)
message += ', f_RS: {:.3f}'.format(fine_matching_result_dict['residual'])
message += ', f_NU: {}'.format(fine_matching_result_dict['num_corr'])
# 2. evaluate registration
if args.method == 'lgr':
est_transform = data_dict['estimated_transform']
elif args.method == 'ransac':
est_transform = registration_with_ransac_from_correspondences(
src_corr_points,
ref_corr_points,
distance_threshold=cfg.ransac.distance_threshold,
ransac_n=cfg.ransac.num_points,
num_iterations=cfg.ransac.num_iterations,
)
elif args.method == 'svd':
with torch.no_grad():
ref_corr_points = torch.from_numpy(ref_corr_points).cuda()
src_corr_points = torch.from_numpy(src_corr_points).cuda()
corr_scores = torch.from_numpy(corr_scores).cuda()
est_transform = weighted_procrustes(
src_corr_points, ref_corr_points, corr_scores, return_transform=True
)
est_transform = est_transform.detach().cpu().numpy()
else:
raise ValueError(f'Unsupported registration method: {args.method}.')
rre, rte = compute_registration_error(gt_transform, est_transform)
accepted = rre < cfg.eval.rre_threshold and rte < cfg.eval.rte_threshold
if accepted:
registration_meter.update('rre', rre)
registration_meter.update('rte', rte)
registration_meter.update('recall', float(accepted))
message += ', r_RRE: {:.3f}'.format(rre)
message += ', r_RTE: {:.3f}'.format(rte)
if args.verbose:
logger.info(message)
if args.test_epoch is not None:
logger.critical(f'Epoch {args.test_epoch}')
# 1. print correspondence evaluation results
message = ' Coarse Matching'
message += ', PIR: {:.3f}'.format(coarse_matching_meter.mean('precision'))
message += ', PMR>0: {:.3f}'.format(coarse_matching_meter.mean('PMR>0'))
message += ', PMR>=0.1: {:.3f}'.format(coarse_matching_meter.mean('PMR>=0.1'))
message += ', PMR>=0.3: {:.3f}'.format(coarse_matching_meter.mean('PMR>=0.3'))
message += ', PMR>=0.5: {:.3f}'.format(coarse_matching_meter.mean('PMR>=0.5'))
logger.critical(message)
message = ' Fine Matching'
message += ', FMR: {:.3f}'.format(fine_matching_meter.mean('recall'))
message += ', IR: {:.3f}'.format(fine_matching_meter.mean('inlier_ratio'))
message += ', OV: {:.3f}'.format(fine_matching_meter.mean('overlap'))
message += ', std: {:.3f}'.format(fine_matching_meter.std('recall'))
logger.critical(message)
# 2. print registration evaluation results
message = ' Registration'
message += ', RR: {:.3f}'.format(registration_meter.mean("recall"))
message += ', RRE: {:.3f}'.format(registration_meter.mean("rre"))
message += ', RTE: {:.3f}'.format(registration_meter.mean("rte"))
logger.critical(message)
def main():
parser = make_parser()
args = parser.parse_args()
cfg = make_cfg()
log_file = osp.join(cfg.log_dir, 'eval-{}.log'.format(time.strftime("%Y%m%d-%H%M%S")))
logger = Logger(log_file=log_file)
message = 'Command executed: ' + ' '.join(sys.argv)
logger.info(message)
message = 'Configs:\n' + json.dumps(cfg, indent=4)
logger.info(message)
eval_one_epoch(args, cfg, logger)
if __name__ == '__main__':
main()
| 8,169 | 39.646766 | 115 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/experiments/geotransformer.kitti.stage5.gse.k3.max.oacl.stage2.sinkhorn/eval.sh | if [ "$2" = "test" ]; then
python test.py --test_epoch=$1
fi
python eval.py --test_epoch=$1 --method=lgr
| 109 | 21 | 43 | sh |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/experiments/geotransformer.kitti.stage5.gse.k3.max.oacl.stage2.sinkhorn/loss.py | import torch
import torch.nn as nn
from geotransformer.modules.ops import apply_transform, pairwise_distance
from geotransformer.modules.registration.metrics import isotropic_transform_error
from geotransformer.modules.loss import WeightedCircleLoss
class CoarseMatchingLoss(nn.Module):
def __init__(self, cfg):
super(CoarseMatchingLoss, self).__init__()
self.weighted_circle_loss = WeightedCircleLoss(
cfg.coarse_loss.positive_margin,
cfg.coarse_loss.negative_margin,
cfg.coarse_loss.positive_optimal,
cfg.coarse_loss.negative_optimal,
cfg.coarse_loss.log_scale,
)
self.positive_overlap = cfg.coarse_loss.positive_overlap
def forward(self, output_dict):
ref_feats = output_dict['ref_feats_c']
src_feats = output_dict['src_feats_c']
gt_node_corr_indices = output_dict['gt_node_corr_indices']
gt_node_corr_overlaps = output_dict['gt_node_corr_overlaps']
gt_ref_node_corr_indices = gt_node_corr_indices[:, 0]
gt_src_node_corr_indices = gt_node_corr_indices[:, 1]
feat_dists = torch.sqrt(pairwise_distance(ref_feats, src_feats, normalized=True))
overlaps = torch.zeros_like(feat_dists)
overlaps[gt_ref_node_corr_indices, gt_src_node_corr_indices] = gt_node_corr_overlaps
pos_masks = torch.gt(overlaps, self.positive_overlap)
neg_masks = torch.eq(overlaps, 0)
pos_scales = torch.sqrt(overlaps * pos_masks.float())
loss = self.weighted_circle_loss(pos_masks, neg_masks, feat_dists, pos_scales)
return loss
class FineMatchingLoss(nn.Module):
def __init__(self, cfg):
super(FineMatchingLoss, self).__init__()
self.positive_radius = cfg.fine_loss.positive_radius
def forward(self, output_dict, data_dict):
ref_node_corr_knn_points = output_dict['ref_node_corr_knn_points']
src_node_corr_knn_points = output_dict['src_node_corr_knn_points']
ref_node_corr_knn_masks = output_dict['ref_node_corr_knn_masks']
src_node_corr_knn_masks = output_dict['src_node_corr_knn_masks']
matching_scores = output_dict['matching_scores']
transform = data_dict['transform']
src_node_corr_knn_points = apply_transform(src_node_corr_knn_points, transform)
dists = pairwise_distance(ref_node_corr_knn_points, src_node_corr_knn_points) # (B, N, M)
gt_masks = torch.logical_and(ref_node_corr_knn_masks.unsqueeze(2), src_node_corr_knn_masks.unsqueeze(1))
gt_corr_map = torch.lt(dists, self.positive_radius ** 2)
gt_corr_map = torch.logical_and(gt_corr_map, gt_masks)
slack_row_labels = torch.logical_and(torch.eq(gt_corr_map.sum(2), 0), ref_node_corr_knn_masks)
slack_col_labels = torch.logical_and(torch.eq(gt_corr_map.sum(1), 0), src_node_corr_knn_masks)
labels = torch.zeros_like(matching_scores, dtype=torch.bool)
labels[:, :-1, :-1] = gt_corr_map
labels[:, :-1, -1] = slack_row_labels
labels[:, -1, :-1] = slack_col_labels
loss = -matching_scores[labels].mean()
return loss
class OverallLoss(nn.Module):
def __init__(self, cfg):
super(OverallLoss, self).__init__()
self.coarse_loss = CoarseMatchingLoss(cfg)
self.fine_loss = FineMatchingLoss(cfg)
self.weight_coarse_loss = cfg.loss.weight_coarse_loss
self.weight_fine_loss = cfg.loss.weight_fine_loss
# self.classify_loss = nn.CrossEntropyLoss()
self.criterion = nn.BCELoss(reduction="mean") # 分类误差
def forward(self, output_dict, data_dict):
coarse_loss = self.coarse_loss(output_dict)
fine_loss = self.fine_loss(output_dict, data_dict)
loss = self.criterion(output_dict['predict_results'], output_dict['classification_ground_truth'])
# loss = self.weight_coarse_loss * coarse_loss + self.weight_fine_loss * fine_loss# + 0.1*classify_loss
return {
'c_loss': coarse_loss,
'f_loss': fine_loss,
# 'classify_loss': 0,#classify_loss,
'loss': loss,
}
class Evaluator(nn.Module):
def __init__(self, cfg):
super(Evaluator, self).__init__()
self.acceptance_overlap = cfg.eval.acceptance_overlap
self.acceptance_radius = cfg.eval.acceptance_radius
self.rre_threshold = cfg.eval.rre_threshold
self.rte_threshold = cfg.eval.rte_threshold
@torch.no_grad()
def evaluate_coarse(self, output_dict):
ref_length_c = output_dict['ref_points_c'].shape[0]
src_length_c = output_dict['src_points_c'].shape[0]
gt_node_corr_overlaps = output_dict['gt_node_corr_overlaps']
gt_node_corr_indices = output_dict['gt_node_corr_indices']
masks = torch.gt(gt_node_corr_overlaps, self.acceptance_overlap)
gt_node_corr_indices = gt_node_corr_indices[masks]
gt_ref_node_corr_indices = gt_node_corr_indices[:, 0]
gt_src_node_corr_indices = gt_node_corr_indices[:, 1]
gt_node_corr_map = torch.zeros(size=(ref_length_c, src_length_c)).cuda()
gt_node_corr_map[gt_ref_node_corr_indices, gt_src_node_corr_indices] = 1.0
ref_node_corr_indices = output_dict['ref_node_corr_indices']
src_node_corr_indices = output_dict['src_node_corr_indices']
precision = gt_node_corr_map[ref_node_corr_indices, src_node_corr_indices].mean()
return precision
@torch.no_grad()
def evaluate_fine(self, output_dict, data_dict):
transform = data_dict['transform']
ref_corr_points = output_dict['ref_corr_points']
src_corr_points = output_dict['src_corr_points']
src_corr_points = apply_transform(src_corr_points, transform)
corr_distances = torch.linalg.norm(ref_corr_points - src_corr_points, dim=1)
if len(corr_distances)>0:
precision = torch.lt(corr_distances, self.acceptance_radius).float().mean()
else:
precision = 0
return precision
@torch.no_grad()
def evaluate_registration(self, output_dict, data_dict):
transform = data_dict['transform']
est_transform = output_dict['estimated_transform']
rre, rte = isotropic_transform_error(transform, est_transform)
recall = torch.logical_and(torch.lt(rre, self.rre_threshold), torch.lt(rte, self.rte_threshold)).float()
return rre, rte, recall
@torch.no_grad()
def evaluate_classification(self, output_dict, data_dict):
predict_label = output_dict['classification_result']
ground_truth_label = data_dict['classification_label']
_, statistic_reaults = torch.max(predict_label,dim=0)
return True if statistic_reaults==torch.tensor(ground_truth_label) else False
def forward(self, output_dict, data_dict):
c_precision = self.evaluate_coarse(output_dict)
f_precision = self.evaluate_fine(output_dict, data_dict)
rre, rte, recall = self.evaluate_registration(output_dict, data_dict)
# classification_precision = self.evaluate_classification(output_dict, data_dict)
return {
'PIR': c_precision,
'IR': f_precision,
'RRE': rre,
'RTE': rte,
'RR': recall,
}
| 7,322 | 42.850299 | 112 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/experiments/geotransformer.kitti.stage5.gse.k3.max.oacl.stage2.sinkhorn/model.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from IPython import embed
from geotransformer.modules.ops import point_to_node_partition, index_select
from geotransformer.modules.registration import get_node_correspondences
from geotransformer.modules.sinkhorn import LearnableLogOptimalTransport
from geotransformer.modules.geotransformer import (
GeometricTransformer,
SuperPointMatching,
SuperPointTargetGenerator,
LocalGlobalRegistration,
)
from classification.classification_model import classification_model
from geotransformer.modules.kpconv.modules import GlobalAvgPool
from backbone import KPConvFPN
import random
from torch.autograd import Variable
class GeoTransformer(nn.Module):
def __init__(self, cfg):
super(GeoTransformer, self).__init__()
self.num_points_in_patch = cfg.model.num_points_in_patch
self.matching_radius = cfg.model.ground_truth_matching_radius
self.backbone = KPConvFPN(
cfg.backbone.input_dim,
cfg.backbone.output_dim,
cfg.backbone.init_dim,
cfg.backbone.kernel_size,
cfg.backbone.init_radius,
cfg.backbone.init_sigma,
cfg.backbone.group_norm,
)
self.transformer = GeometricTransformer(
cfg.geotransformer.input_dim,
cfg.geotransformer.output_dim,
cfg.geotransformer.hidden_dim,
cfg.geotransformer.num_heads,
cfg.geotransformer.blocks,
cfg.geotransformer.sigma_d,
cfg.geotransformer.sigma_a,
cfg.geotransformer.angle_k,
reduction_a=cfg.geotransformer.reduction_a,
)
self.coarse_target = SuperPointTargetGenerator(
cfg.coarse_matching.num_targets, cfg.coarse_matching.overlap_threshold
)
self.coarse_matching = SuperPointMatching(
cfg.coarse_matching.num_correspondences, cfg.coarse_matching.dual_normalization
)
self.fine_matching = LocalGlobalRegistration(
cfg.fine_matching.topk,
cfg.fine_matching.acceptance_radius,
mutual=cfg.fine_matching.mutual,
confidence_threshold=cfg.fine_matching.confidence_threshold,
use_dustbin=cfg.fine_matching.use_dustbin,
use_global_score=cfg.fine_matching.use_global_score,
correspondence_threshold=cfg.fine_matching.correspondence_threshold,
correspondence_limit=cfg.fine_matching.correspondence_limit,
num_refinement_steps=cfg.fine_matching.num_refinement_steps,
)
self.optimal_transport = LearnableLogOptimalTransport(cfg.model.num_sinkhorn_iterations)
for p in self.parameters():
p.requires_grad=False
self.classification_model = classification_model()
def forward(self, data_dict):
output_dict = {}
# Downsample point clouds
feats = data_dict['features'].detach()
transform = data_dict['transform'].detach()
ref_length_c = data_dict['lengths'][-1][0].item()
ref_length_f = data_dict['lengths'][1][0].item()
ref_length = data_dict['lengths'][0][0].item()
points_c = data_dict['points'][-1].detach()
points_f = data_dict['points'][1].detach()
points = data_dict['points'][0].detach()
ref_points_c = points_c[:ref_length_c]
src_points_c = points_c[ref_length_c:]
ref_points_f = points_f[:ref_length_f]
src_points_f = points_f[ref_length_f:]
ref_points = points[:ref_length]
src_points = points[ref_length:]
output_dict['ref_points_c'] = ref_points_c
output_dict['src_points_c'] = src_points_c
output_dict['ref_points_f'] = ref_points_f
output_dict['src_points_f'] = src_points_f
output_dict['ref_points'] = ref_points
output_dict['src_points'] = src_points
# 1. Generate ground truth node correspondences
_, ref_node_masks, ref_node_knn_indices, ref_node_knn_masks = point_to_node_partition(
ref_points_f, ref_points_c, self.num_points_in_patch
)
_, src_node_masks, src_node_knn_indices, src_node_knn_masks = point_to_node_partition(
src_points_f, src_points_c, self.num_points_in_patch
)
ref_padded_points_f = torch.cat([ref_points_f, torch.zeros_like(ref_points_f[:1])], dim=0)
src_padded_points_f = torch.cat([src_points_f, torch.zeros_like(src_points_f[:1])], dim=0)
ref_node_knn_points = index_select(ref_padded_points_f, ref_node_knn_indices, dim=0)
src_node_knn_points = index_select(src_padded_points_f, src_node_knn_indices, dim=0)
gt_node_corr_indices, gt_node_corr_overlaps = get_node_correspondences(
ref_points_c,
src_points_c,
ref_node_knn_points,
src_node_knn_points,
transform,
self.matching_radius,
ref_masks=ref_node_masks,
src_masks=src_node_masks,
ref_knn_masks=ref_node_knn_masks,
src_knn_masks=src_node_knn_masks,
)
output_dict['gt_node_corr_indices'] = gt_node_corr_indices
output_dict['gt_node_corr_overlaps'] = gt_node_corr_overlaps
# 2. KPFCNN Encoder
feats_list = self.backbone(feats, data_dict)
feats_c = feats_list[-1]
feats_f = feats_list[0]
# 3. Conditional Transformer
ref_feats_c = feats_c[:ref_length_c]
src_feats_c = feats_c[ref_length_c:]
ref_feats_c, src_feats_c = self.transformer(
ref_points_c.unsqueeze(0),
src_points_c.unsqueeze(0),
ref_feats_c.unsqueeze(0),
src_feats_c.unsqueeze(0),
)
ref_feats_c_norm = F.normalize(ref_feats_c.squeeze(0), p=2, dim=1)
src_feats_c_norm = F.normalize(src_feats_c.squeeze(0), p=2, dim=1)
output_dict['ref_feats_c'] = ref_feats_c_norm
output_dict['src_feats_c'] = src_feats_c_norm
# 5. Head for fine level matching
ref_feats_f = feats_f[:ref_length_f]
src_feats_f = feats_f[ref_length_f:]
output_dict['ref_feats_f'] = ref_feats_f
output_dict['src_feats_f'] = src_feats_f
# 6. Select topk nearest node correspondences
with torch.no_grad():
ref_node_corr_indices, src_node_corr_indices, node_corr_scores = self.coarse_matching(
ref_feats_c_norm, src_feats_c_norm, ref_points_c, src_points_c, ref_node_masks, src_node_masks
)
output_dict['ref_node_corr_indices'] = ref_node_corr_indices
output_dict['src_node_corr_indices'] = src_node_corr_indices
# 7 Random select ground truth node correspondences during training
if self.training:
ref_node_corr_indices, src_node_corr_indices, node_corr_scores = self.coarse_target(
gt_node_corr_indices, gt_node_corr_overlaps
)
# classification data prepare
classification_one_data_dict = classification_data_prepare(self.training, ref_points_c.shape[0], src_points_c.shape[0], gt_node_corr_overlaps.detach(), gt_node_corr_indices.detach(), output_dict['ref_node_corr_indices'].detach(), output_dict['src_node_corr_indices'].detach(), ref_feats_c_norm.detach(), src_feats_c_norm.detach())
classification_inputs = Variable(classification_one_data_dict['corr_node_feat'],requires_grad=True).to('cuda')
classification_ground_truth = Variable(classification_one_data_dict['ground_truth']).to('cuda')
predict_results = self.classification_model(classification_inputs)
output_dict['predict_results'] = predict_results
output_dict['classification_ground_truth'] = classification_ground_truth.detach()
if not self.training:
# sorted_values, sorted_indices = torch.sort(predict_results.squeeze(),descending=True)
predict_results1 = torch.gt(predict_results.squeeze(), 0.80)
# print("predict_results:",predict_results)
predict_results1 = torch.nonzero(predict_results1).squeeze()
# print("predict_results1:",predict_results1.numel())
ref_node_corr_indices1 = ref_node_corr_indices[predict_results1]
src_node_corr_indices1 = src_node_corr_indices[predict_results1]
output_dict['ref_node_corr_indices'] = ref_node_corr_indices1
output_dict['src_node_corr_indices'] = src_node_corr_indices1
if predict_results1.numel() <= 20:
predict_results2 = torch.gt(predict_results.squeeze(), 0.55)
# print("predict_results:",predict_results)
predict_results2 = torch.nonzero(predict_results2).squeeze()
# print("predict_results:",predict_results)
ref_node_corr_indices2 = ref_node_corr_indices[predict_results2]
src_node_corr_indices2 = src_node_corr_indices[predict_results2]
output_dict['ref_node_corr_indices'] = ref_node_corr_indices2
output_dict['src_node_corr_indices'] = src_node_corr_indices2
ref_node_corr_indices = output_dict['ref_node_corr_indices']
src_node_corr_indices = output_dict['src_node_corr_indices']
# 7.2 Generate batched node points & feats
ref_node_corr_knn_indices = ref_node_knn_indices[ref_node_corr_indices] # (P, K)
src_node_corr_knn_indices = src_node_knn_indices[src_node_corr_indices] # (P, K)
ref_node_corr_knn_masks = ref_node_knn_masks[ref_node_corr_indices] # (P, K)
src_node_corr_knn_masks = src_node_knn_masks[src_node_corr_indices] # (P, K)
ref_node_corr_knn_points = ref_node_knn_points[ref_node_corr_indices] # (P, K, 3)
src_node_corr_knn_points = src_node_knn_points[src_node_corr_indices] # (P, K, 3)
ref_padded_feats_f = torch.cat([ref_feats_f, torch.zeros_like(ref_feats_f[:1])], dim=0)
src_padded_feats_f = torch.cat([src_feats_f, torch.zeros_like(src_feats_f[:1])], dim=0)
ref_node_corr_knn_feats = index_select(ref_padded_feats_f, ref_node_corr_knn_indices, dim=0) # (P, K, C)
src_node_corr_knn_feats = index_select(src_padded_feats_f, src_node_corr_knn_indices, dim=0) # (P, K, C)
output_dict['ref_node_corr_knn_points'] = ref_node_corr_knn_points
output_dict['src_node_corr_knn_points'] = src_node_corr_knn_points
output_dict['ref_node_corr_knn_masks'] = ref_node_corr_knn_masks
output_dict['src_node_corr_knn_masks'] = src_node_corr_knn_masks
# 8. Optimal transport
matching_scores = torch.einsum('bnd,bmd->bnm', ref_node_corr_knn_feats, src_node_corr_knn_feats) # (P, K, K)
matching_scores = matching_scores / feats_f.shape[1] ** 0.5
matching_scores = self.optimal_transport(matching_scores, ref_node_corr_knn_masks, src_node_corr_knn_masks)
output_dict['matching_scores'] = matching_scores
# 9. Generate final correspondences during testing
with torch.no_grad():
if not self.fine_matching.use_dustbin:
matching_scores = matching_scores[:, :-1, :-1]
ref_corr_points, src_corr_points, corr_scores, estimated_transform = self.fine_matching(
ref_node_corr_knn_points,
src_node_corr_knn_points,
ref_node_corr_knn_masks,
src_node_corr_knn_masks,
matching_scores,
node_corr_scores,
)
output_dict['ref_corr_points'] = ref_corr_points
output_dict['src_corr_points'] = src_corr_points
output_dict['corr_scores'] = corr_scores
output_dict['estimated_transform'] = estimated_transform
output_dict['classification_result'] = 0 #classify2
return output_dict
def classification_data_prepare(mode, ref_length_c, src_length_c, gt_node_corr_overlaps, gt_node_corr_indices, ref_node_corr_indices, src_node_corr_indices, ref_feats_c_norm, src_feats_c_norm):
# ref_length_c = data_dict['ref_points_c'].shape[0]
# src_length_c = data_dict['src_points_c'].shape[0]
# gt_node_corr_overlaps = data_dict['gt_node_corr_overlaps'].data
# gt_node_corr_indices = data_dict['gt_node_corr_indices'].data
one_data = {}
masks = torch.gt(gt_node_corr_overlaps, 0.0)
gt_node_corr_indices = gt_node_corr_indices[masks]
gt_ref_node_corr_indices = gt_node_corr_indices[:, 0]
gt_src_node_corr_indices = gt_node_corr_indices[:, 1]
gt_node_corr_map = torch.zeros(ref_length_c, src_length_c)
gt_node_corr_map[gt_ref_node_corr_indices, gt_src_node_corr_indices] = 1.0
# ref_node_corr_indices = data_dict['ref_node_corr_indices'].data
# src_node_corr_indices = data_dict['src_node_corr_indices'].data
corr_node_ground_truth = gt_node_corr_map[ref_node_corr_indices, src_node_corr_indices]
# if self.mode == 'train':
ground_truth_pos = torch.nonzero(gt_node_corr_map)
ground_truth_neg = torch.nonzero(torch.eq(gt_node_corr_map,0))
pos_index = [i for i in range(ground_truth_pos.shape[0])]
random.shuffle(pos_index)
ground_truth_pos = ground_truth_pos[pos_index[:2500],:]
neg_index = [i for i in range(ground_truth_neg.shape[0])]
random.shuffle(neg_index)
ground_truth_neg = ground_truth_neg[neg_index[:ground_truth_pos.shape[0]],:]
ground_truth_both = torch.cat((ground_truth_pos,ground_truth_neg),dim=0)
random_index = [i for i in range(ground_truth_both.shape[0])]
random.shuffle(random_index)
ground_truth_both = ground_truth_both[random_index]
if mode == 'training':
ref_node_corr_indices = ground_truth_both[:,0]
src_node_corr_indices = ground_truth_both[:,1]
corr_node_ground_truth = gt_node_corr_map[ref_node_corr_indices, src_node_corr_indices]
# ref_feats_c_norm = data_dict['ref_feats_c'].data
# src_feats_c_norm = data_dict['src_feats_c'].data
ref_corr_node_feats = ref_feats_c_norm[ref_node_corr_indices]
src_corr_node_feats = src_feats_c_norm[src_node_corr_indices]
mean = src_corr_node_feats.mean()
var = src_corr_node_feats.var()
src_corr_node_feats = (src_corr_node_feats - mean) / torch.pow(var + 1e-05,0.5)
mean = ref_corr_node_feats.mean()
var = ref_corr_node_feats.var()
ref_corr_node_feats = (ref_corr_node_feats - mean) / torch.pow(var + 1e-05,0.5)
# print("src_corr_node_feate:",src_corr_node_feate.shape)
# print("ref_corr_node_feats:",ref_corr_node_feats.shape)
corr_node_feat = torch.cat((ref_corr_node_feats.unsqueeze(0).transpose(0,1), src_corr_node_feats.unsqueeze(0).transpose(0,1)), dim=1)
corr_node_feat = corr_node_feat.repeat(1,1,2)
corr_node_feat = corr_node_feat.chunk(16,dim=2)
# print("corr_node_feat:",corr_node_feat.shape)
corr_node_feat = torch.cat((corr_node_feat),dim=1)
corr_node_feat = corr_node_feat.unsqueeze(1)
one_data['corr_node_feat'] = corr_node_feat
one_data['ground_truth'] = corr_node_ground_truth.unsqueeze(1)
return one_data
def create_model(cfg):
model = GeoTransformer(cfg)
return model
def main():
from config import make_cfg
cfg = make_cfg()
model = create_model(cfg)
print(model.state_dict().keys())
print(model)
if __name__ == '__main__':
main()
| 15,470 | 44.236842 | 338 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/experiments/geotransformer.kitti.stage5.gse.k3.max.oacl.stage2.sinkhorn/test.py | import argparse
import os.path as osp
import time
import numpy as np
from geotransformer.engine import SingleTester
from geotransformer.utils.common import ensure_dir, get_log_string
from geotransformer.utils.torch import release_cuda
from config import make_cfg
from dataset import test_data_loader
from loss import Evaluator
from model import create_model
class Tester(SingleTester):
def __init__(self, cfg):
super().__init__(cfg)
self.logger.debug('Tester init')
# dataloader
start_time = time.time()
data_loader, neighbor_limits = test_data_loader(cfg)
loading_time = time.time() - start_time
message = f'Data loader created: {loading_time:.3f}s collapsed.'
self.logger.info(message)
message = f'Calibrate neighbors: {neighbor_limits}.'
self.logger.info(message)
self.register_loader(data_loader)
# model
model = create_model(cfg).cuda()
self.register_model(model)
# evaluator
self.evaluator = Evaluator(cfg).cuda()
# preparation
self.output_dir = osp.join(cfg.feature_dir)
ensure_dir(self.output_dir)
def test_step(self, iteration, data_dict):
output_dict = self.model(data_dict)
return output_dict
def eval_step(self, iteration, data_dict, output_dict):
result_dict = self.evaluator(output_dict, data_dict)
return result_dict
def summary_string(self, iteration, data_dict, output_dict, result_dict):
seq_id = data_dict['seq_id']
# ref_frame = 'top'
# src_frame = 'front'
ref_frame = data_dict['ref_frame']
src_frame = data_dict['src_frame']
message = f'seq_id: {seq_id}, id0: {ref_frame}, id1: {src_frame}'
message += ', ' + get_log_string(result_dict=result_dict)
message += ', nCorr: {}'.format(output_dict['corr_scores'].shape[0])
return message
def after_test_step(self, iteration, data_dict, output_dict, result_dict):
seq_id = data_dict['seq_id']
# ref_frame = 'top'
# src_frame = 'front'
ref_frame = data_dict['ref_frame']
src_frame = data_dict['src_frame']
# self.logger.debug(seq_id)
# self.logger.debug(data_dict['transform'])
# self.logger.debug(output_dict['estimated_transform'])
# self.logger.debug(output_dict['src_points_f'].shape)
# self.logger.debug(output_dict['ref_points_f'].shape)
# self.logger.debug(output_dict['src_feats_f'].shape)
# self.logger.debug(output_dict['ref_feats_f'].shape)
file_name = osp.join(self.output_dir, f'{seq_id}_{src_frame}_{ref_frame}.npz')
# file_name = osp.join(self.output_dir, f'{seq_id[:-2]}.npz')
np.savez_compressed(
file_name,
ref_points=release_cuda(output_dict['ref_points']),
src_points=release_cuda(output_dict['src_points']),
ref_points_f=release_cuda(output_dict['ref_points_f']),
src_points_f=release_cuda(output_dict['src_points_f']),
ref_points_c=release_cuda(output_dict['ref_points_c']),
src_points_c=release_cuda(output_dict['src_points_c']),
ref_feats_c=release_cuda(output_dict['ref_feats_c']),
src_feats_c=release_cuda(output_dict['src_feats_c']),
ref_feats_f=release_cuda(output_dict['ref_feats_f']),
src_feats_f=release_cuda(output_dict['src_feats_f']),
ref_node_corr_indices=release_cuda(output_dict['ref_node_corr_indices']),
src_node_corr_indices=release_cuda(output_dict['src_node_corr_indices']),
ref_corr_points=release_cuda(output_dict['ref_corr_points']),
src_corr_points=release_cuda(output_dict['src_corr_points']),
corr_scores=release_cuda(output_dict['corr_scores']),
gt_node_corr_indices=release_cuda(output_dict['gt_node_corr_indices']),
gt_node_corr_overlaps=release_cuda(output_dict['gt_node_corr_overlaps']),
estimated_transform=release_cuda(output_dict['estimated_transform']),
transform=release_cuda(data_dict['transform']),
)
def main():
cfg = make_cfg()
tester = Tester(cfg)
tester.run()
if __name__ == '__main__':
main()
| 4,316 | 38.605505 | 86 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/experiments/geotransformer.kitti.stage5.gse.k3.max.oacl.stage2.sinkhorn/trainval.py | import argparse
import time
import torch.optim as optim
from geotransformer.engine import EpochBasedTrainer
from config import make_cfg
from dataset import train_valid_data_loader
from model import create_model
from loss import OverallLoss, Evaluator
class Trainer(EpochBasedTrainer):
def __init__(self, cfg):
super().__init__(cfg, max_epoch=cfg.optim.max_epoch)
# dataloader
start_time = time.time()
train_loader, val_loader, neighbor_limits = train_valid_data_loader(cfg, self.distributed)
loading_time = time.time() - start_time
message = 'Data loader created: {:.3f}s collapsed.'.format(loading_time)
self.logger.info(message)
message = 'Calibrate neighbors: {}.'.format(neighbor_limits)
self.logger.info(message)
self.register_loader(train_loader, val_loader)
# model, optimizer, scheduler
model = create_model(cfg).cuda()
model = self.register_model(model)
optimizer = optim.Adam(model.parameters(), lr=cfg.optim.lr, weight_decay=cfg.optim.weight_decay)
self.register_optimizer(optimizer)
scheduler = optim.lr_scheduler.StepLR(optimizer, cfg.optim.lr_decay_steps, gamma=cfg.optim.lr_decay)
self.register_scheduler(scheduler)
# loss function, evaluator
self.loss_func = OverallLoss(cfg).cuda()
self.evaluator = Evaluator(cfg).cuda()
def train_step(self, epoch, iteration, data_dict):
output_dict = self.model(data_dict)
loss_dict = self.loss_func(output_dict, data_dict)
result_dict = self.evaluator(output_dict, data_dict)
loss_dict.update(result_dict)
return output_dict, loss_dict
def val_step(self, epoch, iteration, data_dict):
output_dict = self.model(data_dict)
loss_dict = self.loss_func(output_dict, data_dict)
result_dict = self.evaluator(output_dict, data_dict)
loss_dict.update(result_dict)
return output_dict, loss_dict
def main():
cfg = make_cfg()
trainer = Trainer(cfg)
trainer.run()
if __name__ == '__main__':
main()
| 2,120 | 32.666667 | 108 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/experiments/geotransformer.kitti.stage5.gse.k3.max.oacl.stage2.sinkhorn/classification/__init__.py | 0 | 0 | 0 | py |
|
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/experiments/geotransformer.kitti.stage5.gse.k3.max.oacl.stage2.sinkhorn/classification/classification_model.py | import torch.nn as nn
import torch.nn.functional as F
# define model
class basic_block(nn.Module):
"""基本残差块,由两层卷积构成"""
def __init__(self,in_planes,planes,kernel_size=3,stride=1):
"""
:param in_planes: 输入通道
:param planes: 输出通道
:param kernel_size: 卷积核大小
:param stride: 卷积步长
"""
super(basic_block, self).__init__()
self.conv1=nn.Conv2d(in_planes,planes,kernel_size=kernel_size,stride=stride,padding=1,bias=False)
self.bn1=nn.BatchNorm2d(planes)
self.relu=nn.ReLU()
self.conv2=nn.Conv2d(planes,planes,kernel_size=kernel_size,stride=1,padding=1,bias=False)
self.bn2=nn.BatchNorm2d(planes)
if stride!=1 or in_planes!=planes:
self.downsample=nn.Sequential(nn.Conv2d(in_planes,planes,kernel_size=1,stride=stride)
,nn.BatchNorm2d(planes))
else:
self.downsample=nn.Sequential()
def forward(self,inx):
x=self.relu(self.bn1(self.conv1(inx)))
x=self.bn2(self.conv2(x))
out=x+self.downsample(inx)
return F.relu(out)
class Resnet(nn.Module):
def __init__(self,basicBlock,blockNums,nb_classes):
super(Resnet, self).__init__()
self.in_planes=64
#输入层
self.conv1=nn.Conv2d(1,self.in_planes,kernel_size=(3,3),stride=(1,1),padding=1,bias=False)
self.bn1=nn.BatchNorm2d(self.in_planes)
self.relu=nn.ReLU(inplace=True)
self.maxpool=nn.MaxPool2d(kernel_size=3,stride=2,padding=1)
self.layer1=self._make_layers(basicBlock,blockNums[0],64,1)
self.layer2=self._make_layers(basicBlock,blockNums[1],128,2)
self.layer3=self._make_layers(basicBlock,blockNums[2],256,2)
self.layer4=self._make_layers(basicBlock,blockNums[3],512,2)
self.avgpool=nn.AdaptiveAvgPool2d(output_size=(1,1))
self.fc=nn.Linear(512,nb_classes)
def _make_layers(self,basicBlock,blockNum,plane,stride):
"""
:param basicBlock: 基本残差块类
:param blockNum: 当前层包含基本残差块的数目,resnet18每层均为2
:param plane: 输出通道数
:param stride: 卷积步长
:return:
"""
layers=[]
for i in range(blockNum):
if i==0:
layer=basicBlock(self.in_planes,plane,3,stride=stride)
else:
layer=basicBlock(plane,plane,3,stride=1)
layers.append(layer)
self.in_planes=plane
return nn.Sequential(*layers)
def forward(self,inx):
x=self.maxpool(self.relu(self.bn1(self.conv1(inx))))
x=self.layer1(x)
x=self.layer2(x)
x=self.layer3(x)
x=self.layer4(x)
x=self.avgpool(x)
x=x.view(x.shape[0],-1)
out=self.fc(x)
return out
class classification_model(nn.Module):
def __init__(self):
super(classification_model, self).__init__()
self.resnet18=Resnet(basic_block,[2,2,2,2],256)
self.linear1 = nn.Linear(256, 128)
self.linear2 = nn.Linear(128, 1)
self.activate1 = nn.ReLU()
self.dropout1 = nn.Dropout(p=0.5)
self.activate2 = nn.Sigmoid()
def forward(self, x):
x = self.resnet18(x)
x = self.linear1(x)
x = self.activate1(x)
x = self.dropout1(x)
x = self.linear2(x)
x = self.activate2(x)
return x | 3,374 | 33.438776 | 105 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/__init__.py | 0 | 0 | 0 | py |
|
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/datasets/__init__.py | 0 | 0 | 0 | py |
|
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/datasets/registration/__init__.py | 0 | 0 | 0 | py |
|
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/datasets/registration/CROON/__init__.py | from geotransformer.datasets.registration.CROON.dataset import Lidar2LidarDataset
__all__ = [
'Lidar2LidarDataset',
]
| 124 | 16.857143 | 81 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/datasets/registration/CROON/dataset.py | import os.path as osp
import random
import csv
import pandas as pd
from secrets import choice
# from mathutils import Matrix, Vector, Quaternion, Euler
import numpy as np
import torch.utils.data
import open3d as o3d
from geotransformer.utils.common import load_pickle
from geotransformer.utils.pointcloud import (
random_sample_rotation,
get_transform_from_rotation_translation,
get_rotation_translation_from_transform,
eulerAnglesToRotationMatrix,
)
from geotransformer.utils.registration import get_correspondences
from geotransformer.modules.ops import (
apply_transform,
inverse_transform,
)
class Lidar2LidarDataset(torch.utils.data.Dataset):
def __init__(
self,
dataset_root,
subset,
point_limit=None,
use_augmentation=False,
augmentation_noise=0.005,
augmentation_min_scale=0.8,
augmentation_max_scale=1.2,
augmentation_shift=2.0,
augmentation_rotation=1.0,
return_corr_indices=False,
matching_radius=None,
max_t=10,
max_r=180.,
):
super(Lidar2LidarDataset, self).__init__()
self.dataset_root = '/mnt/lustre/weipengjin/geotransformer/data/road_scene'
self.subset = subset
self.point_limit = point_limit
self.use_augmentation = use_augmentation
self.augmentation_noise = augmentation_noise
self.augmentation_min_scale = augmentation_min_scale
self.augmentation_max_scale = augmentation_max_scale
self.augmentation_shift = augmentation_shift
self.augmentation_rotation = augmentation_rotation
self.return_corr_indices = return_corr_indices
self.matching_radius = matching_radius
self.max_r = max_r
self.max_t = max_t
data_file = open('../../data/road_scene/metadata/train.txt','r')
self.data_train = data_file.readlines()
data_file = open('../../data/road_scene/metadata/val.txt','r')
self.data_val = data_file.readlines()
data_file = open('../../data/road_scene/metadata/test.txt','r')
self.data_test = data_file.readlines()
self.lidar_type = 'back'
if self.return_corr_indices and self.matching_radius is None:
raise ValueError('"matching_radius" is None but "return_corr_indices" is set.')
# self.metadata = load_pickle(osp.join(self.dataset_root, 'metadata', f'{subset}.pkl'))
# load or genetate RT matrix
if subset == 'train':
self.train_RT = []
train_RT_file = osp.join(self.dataset_root, 'metadata',
f'Lidar2Lidar_{subset}_RT_{max_r:.2f}_{max_t:.2f}.csv')
if osp.exists(train_RT_file):
print(f'TRAIN SET: Using this file: {train_RT_file}')
df_train_RT = pd.read_csv(train_RT_file, sep=',')
for index, row in df_train_RT.iterrows():
self.train_RT.append(list(row))
else:
print(f'TRAIN SET - Not found: {train_RT_file}')
print("Generating a new one")
train_RT_file = open(train_RT_file, 'w')
train_RT_file = csv.writer(train_RT_file, delimiter=',')
train_RT_file.writerow(['id', 'pair', 'Yaw', 'Pitch', 'Roll', 'tx', 'ty', 'tz', 'classify_label'])
for i in range(len(self.data_train)):
data_seed = random.randint(1,10) #大于5数据为配准失败(负样本),小于等于5为配准成功(正样本)
if data_seed > 10 :
max_angle = self.max_r
rotz = np.random.uniform(*(choice([(-max_angle,-5),(5,max_angle)]))) * (3.141592 / 180.0)
roty = np.random.uniform(*(choice([(-max_angle,-5),(5,max_angle)]))) * (3.141592 / 180.0)
rotx = np.random.uniform(*(choice([(-max_angle,-5),(5,max_angle)]))) * (3.141592 / 180.0)
transl_x = np.random.uniform(*(choice([(-self.max_t,-2),(2,self.max_t)])))
transl_y = np.random.uniform(*(choice([(-self.max_t,-2),(2,self.max_t)])))
transl_z = np.random.uniform(*(choice([(-self.max_t,-2),(2,self.max_t)])))
label = 0
else:
rotz = np.random.uniform(-180, 180) * (3.141592 / 180.0)
roty = np.random.uniform(-180, 180) * (3.141592 / 180.0)
rotx = np.random.uniform(-180, 180) * (3.141592 / 180.0)
transl_x = np.random.uniform(-10, 10)
transl_y = np.random.uniform(-10, 10)
transl_z = np.random.uniform(-10, 10)
label = 1
train_RT_file.writerow([str(self.data_train[i][:6]),str(self.data_train[i][:-1]), rotx, roty, rotz, transl_x, transl_y, transl_z, label])
self.train_RT.append([str(self.data_train[i][:6]), str(self.data_train[i][:-1]), float(rotx), float(roty), float(rotz), float(transl_x), float(transl_y), float(transl_z), int(label)])
assert len(self.train_RT) == len(self.data_train), "Something wrong with train RTs"
self.metadata = self.data_train
self.val_RT = []
if subset == 'val':
# val_RT_file = os.path.join(dataset_dir, 'sequences',
# f'val_RT_seq{val_sequence}_{max_r:.2f}_{max_t:.2f}.csv')
val_RT_file = osp.join(self.dataset_root, 'metadata',
f'Lidar2Lidar_{subset}_RT_{max_r:.2f}_{max_t:.2f}.csv')
if osp.exists(val_RT_file):
print(f'{subset} SET: Using this file: {val_RT_file}')
df_test_RT = pd.read_csv(val_RT_file, sep=',')
for index, row in df_test_RT.iterrows():
self.val_RT.append(list(row))
else:
print(f'{subset} SET - Not found: {val_RT_file}')
print("Generating a new one")
val_RT_file = open(val_RT_file, 'w')
val_RT_file = csv.writer(val_RT_file, delimiter=',')
val_RT_file.writerow(['id', 'pair', 'Yaw', 'Pitch', 'Roll', 'tx', 'ty', 'tz', 'classify_label'])
for i in range(len(self.data_val)):
data_seed = random.randint(1,10) #大于5数据为配准失败(负样本),小于等于5为配准成功(正样本)
if data_seed > 10 :
max_angle = self.max_r
rotz = np.random.uniform(*(choice([(-max_angle,-5),(5,max_angle)]))) * (3.141592 / 180.0)
roty = np.random.uniform(*(choice([(-max_angle,-5),(5,max_angle)]))) * (3.141592 / 180.0)
rotx = np.random.uniform(*(choice([(-max_angle,-5),(5,max_angle)]))) * (3.141592 / 180.0)
transl_x = np.random.uniform(*(choice([(-self.max_t,-2),(2,self.max_t)])))
transl_y = np.random.uniform(*(choice([(-self.max_t,-2),(2,self.max_t)])))
transl_z = np.random.uniform(*(choice([(-self.max_t,-2),(2,self.max_t)])))
label = 0
else:
rotz = np.random.uniform(-180, 180) * (3.141592 / 180.0)
roty = np.random.uniform(-180, 180) * (3.141592 / 180.0)
rotx = np.random.uniform(-180, 180) * (3.141592 / 180.0)
transl_x = np.random.uniform(-10, 10)
transl_y = np.random.uniform(-10, 10)
transl_z = np.random.uniform(-10, 10)
label = 1
val_RT_file.writerow([str(self.data_val[i][:6]),str(self.data_val[i][:-1]), rotx, roty, rotz, transl_x, transl_y, transl_z, label])
self.val_RT.append([str(self.data_val[i][:6]), str(self.data_val[i][:-1]), float(rotx), float(roty), float(rotz), float(transl_x), float(transl_y), float(transl_z), int(label)])
assert len(self.val_RT) == len(self.data_val), "Something wrong with test RTs"
self.metadata = self.data_val
if subset == 'test':
self.test_RT = []
test_RT_file = osp.join(self.dataset_root, 'metadata',
f'Lidar2Lidar_{subset}_RT_{max_r:.2f}_{max_t:.2f}.csv')
if osp.exists(test_RT_file):
print(f'TEST SET: Using this file: {test_RT_file}')
df_test_RT = pd.read_csv(test_RT_file, sep=',')
for index, row in df_test_RT.iterrows():
self.test_RT.append(list(row))
else:
print(f'TEST SET - Not found: {test_RT_file}')
print("Generating a new one")
test_RT_file = open(test_RT_file, 'w')
test_RT_file = csv.writer(test_RT_file, delimiter=',')
test_RT_file.writerow(['id', 'pair', 'Yaw', 'Pitch', 'Roll', 'tx', 'ty', 'tz', 'classify_label'])
for i in range(len(self.data_test)):
data_seed = random.randint(1,10) #大于5数据为配准失败(负样本),小于等于5为配准成功(正样本)
if data_seed > 10 :
max_angle = self.max_r
rotz = np.random.uniform(*(choice([(-max_angle,-5),(5,max_angle)]))) * (3.141592 / 180.0)
roty = np.random.uniform(*(choice([(-max_angle,-5),(5,max_angle)]))) * (3.141592 / 180.0)
rotx = np.random.uniform(*(choice([(-max_angle,-5),(5,max_angle)]))) * (3.141592 / 180.0)
transl_x = np.random.uniform(*(choice([(-self.max_t,-2),(2,self.max_t)])))
transl_y = np.random.uniform(*(choice([(-self.max_t,-2),(2,self.max_t)])))
transl_z = np.random.uniform(*(choice([(-self.max_t,-2),(2,self.max_t)])))
label = 0
else:
rotz = np.random.uniform(-180, 180) * (3.141592 / 180.0)
roty = np.random.uniform(-180, 180) * (3.141592 / 180.0)
rotx = np.random.uniform(-180, 180) * (3.141592 / 180.0)
transl_x = np.random.uniform(-10, 10)
transl_y = np.random.uniform(-10, 10)
transl_z = np.random.uniform(-10, 10)
label = 1
test_RT_file.writerow([str(self.data_test[i][:6]),str(self.data_test[i][:-1]), rotx, roty, rotz, transl_x, transl_y, transl_z, label])
self.test_RT.append([str(self.data_test[i][:6]), str(self.data_test[i][:-1]), float(rotx), float(roty), float(rotz), float(transl_x), float(transl_y), float(transl_z), int(label)])
assert len(self.test_RT) == len(self.data_test), "Something wrong with train RTs"
self.metadata = self.data_test
def _augment_point_cloud(self, ref_points, src_points, transform):
rotation, translation = get_rotation_translation_from_transform(transform)
# add gaussian noise
ref_points = ref_points + (np.random.rand(ref_points.shape[0], 3) - 0.5) * self.augmentation_noise
src_points = src_points + (np.random.rand(src_points.shape[0], 3) - 0.5) * self.augmentation_noise
# random rotation
aug_rotation = random_sample_rotation(self.augmentation_rotation)
if random.random() > 0.5:
ref_points = np.matmul(ref_points, aug_rotation.T)
rotation = np.matmul(aug_rotation, rotation)
translation = np.matmul(aug_rotation, translation)
else:
src_points = np.matmul(src_points, aug_rotation.T)
rotation = np.matmul(rotation, aug_rotation.T)
# random scaling
scale = random.random()
scale = self.augmentation_min_scale + (self.augmentation_max_scale - self.augmentation_min_scale) * scale
ref_points = ref_points * scale
src_points = src_points * scale
translation = translation * scale
# random shift
ref_shift = np.random.uniform(-self.augmentation_shift, self.augmentation_shift, 3)
src_shift = np.random.uniform(-self.augmentation_shift, self.augmentation_shift, 3)
ref_points = ref_points + ref_shift
src_points = src_points + src_shift
translation = -np.matmul(src_shift[None, :], rotation.T) + translation + ref_shift
# compose transform from rotation and translation
transform = get_transform_from_rotation_translation(rotation, translation)
return ref_points, src_points, transform
def _load_point_cloud(self, file_name):
# points = np.load(file_name)
pcd = o3d.io.read_point_cloud(file_name)
points = np.asarray(pcd.points)
if self.point_limit is not None and points.shape[0] > self.point_limit:
indices = np.random.permutation(points.shape[0])[: self.point_limit]
points = points[indices]
delete_points = []
for i in range(points.shape[0]):
if (points[i][0]>-1 and points[i][0]<1 and points[i][1]>-1 and points[i][1]<1 and points[i][2]>-1 and points[i][2]<1) or \
points[i][0]>25 or points[i][0]<-25 or points[i][1]>25 or points[i][1]<-25 or points[i][1]>25 or points[i][1]<-25:
delete_points.append(i)
for i in range(len(delete_points)-1, 0, -1):
points = np.delete(points, delete_points[i], axis=0)
return points
def __getitem__(self, index):
data_dict = {}
data_name = self.metadata[index]
data_order_number = data_name.split("-")[0]
self.lidar_type = data_name.split("-")[2][:-1]
data_dict['seq_id'] = str(data_name)
data_dict['ref_pcd'] = '../../data/road_scene/unreal_world/'+data_order_number+'/top-'+data_order_number+'.pcd'
data_dict['src_pcd'] = '../../data/road_scene/unreal_world/'+data_order_number+'/'+self.lidar_type+'-'+data_order_number+'.pcd'
# gt_R = o3d.geometry.get_rotation_matrix_from_zyx([gt_roll/180*3.1415926535, gt_yaw/180*3.1415926535 , gt_pitch/180*3.1415926535])
if str(self.lidar_type) == "front":
# front
gt_R = o3d.geometry.get_rotation_matrix_from_zyx([0/180*3.1415926535, 48/180*3.1415926535 , 0/180*3.1415926535])
gt_T = np.array([2.9, 0.15, -1.195])
elif self.lidar_type == 'back':
# back
gt_R = o3d.geometry.get_rotation_matrix_from_zyx([180/180*3.1415926535, 48/180*3.1415926535 , 0/180*3.1415926535])
gt_T = np.array([-1.8, 0, -0.82])
elif self.lidar_type == 'left':
# left
gt_R = o3d.geometry.get_rotation_matrix_from_zyx([-90/180*3.1415926535, 48/180*3.1415926535 , 0/180*3.1415926535])
gt_T = np.array([0.25, -0.85, -0.595])
elif self.lidar_type == 'right':
# right
gt_R = o3d.geometry.get_rotation_matrix_from_zyx([90/180*3.1415926535, 48/180*3.1415926535 , 0/180*3.1415926535])
gt_T = np.array([0.25, 0.85, -0.595])
gt_R = gt_R[np.newaxis,:]
gt_T = gt_T[np.newaxis,:]
gt_R = torch.tensor(gt_R)
gt_T = torch.tensor(gt_T)
gt_RT = get_transform_from_rotation_translation(gt_R, gt_T)
gt_RT = torch.tensor(gt_RT).double()
transform = gt_RT
ref_points = self._load_point_cloud(osp.join(self.dataset_root, data_dict['ref_pcd']))
src_points = self._load_point_cloud(osp.join(self.dataset_root, data_dict['src_pcd']))
# 1.apply transform
if self.subset == 'train':
initial_RT = self.train_RT[index]
rotz = initial_RT[7]
roty = initial_RT[6]
rotx = initial_RT[5]
transl_x = initial_RT[2]
transl_y = initial_RT[3]
transl_z = initial_RT[4]
label = initial_RT[8]
if self.subset == 'val':
initial_RT = self.val_RT[index]
rotz = initial_RT[7]
roty = initial_RT[6]
rotx = initial_RT[5]
transl_x = initial_RT[2]
transl_y = initial_RT[3]
transl_z = initial_RT[4]
label = initial_RT[8]
if self.subset == 'test':
initial_RT = self.test_RT[index]
rotz = initial_RT[7]
roty = initial_RT[6]
rotx = initial_RT[5]
transl_x = initial_RT[2]
transl_y = initial_RT[3]
transl_z = initial_RT[4]
label = initial_RT[8]
# 2. get rotation translation and matrix
R = eulerAnglesToRotationMatrix([rotx, roty, rotz])
T = np.array([transl_x, transl_y, transl_z])
R = R[np.newaxis,:]
T = T[np.newaxis,:]
R = torch.tensor(R)
T = torch.tensor(T)
RT = get_transform_from_rotation_translation(R, T)
RT = torch.tensor(RT).double()
# 3.apply "transform" matrix to registration two points cloud
src_points = torch.tensor(src_points).double()
src_points = apply_transform(src_points, transform)
# new_src_points = src_points.numpy()
# new_ref_points = ref_points
# new_calibrated = np.concatenate((new_src_points,new_ref_points),axis=0)
# pcd_src = o3d.geometry.PointCloud()
# pcd_src.points = o3d.utility.Vector3dVector(new_src_points)
# pcd_src.paint_uniform_color([0, 0, 1.0])
# pcd_ref = o3d.geometry.PointCloud()
# pcd_ref.points = o3d.utility.Vector3dVector(new_ref_points)
# pcd_ref.paint_uniform_color([0, 1, 0.0])
# pcd = o3d.geometry.PointCloud()
# # pcd.points = o3d.utility.Vector3dVector(new_calibrated)
# pcd += pcd_src
# pcd += pcd_ref
# o3d.io.write_point_cloud('/Download/GeoTransformer/GeoTransformer/data/Lidar2Lidar/metadata/results/wrong_'+data_name[:-1]+'.pcd', pcd)
# print("data_dict['src_pcd']:",data_dict['src_pcd'])
# 4.genetate a new pose for src point cloud
RT_inv = inverse_transform(RT).double()
src_points = apply_transform(src_points, RT_inv)
transform = np.array(RT)
src_points = np.array(src_points)
transform = np.array(transform)
if self.use_augmentation:
ref_points, src_points, transform = self._augment_point_cloud(ref_points, src_points, transform)
if self.return_corr_indices:
corr_indices = get_correspondences(ref_points, src_points, transform, self.matching_radius)
data_dict['corr_indices'] = corr_indices
data_dict['ref_points'] = ref_points.astype(np.float32)
data_dict['src_points'] = src_points.astype(np.float32)
data_dict['ref_feats'] = np.ones((ref_points.shape[0], 1), dtype=np.float32)
data_dict['src_feats'] = np.ones((src_points.shape[0], 1), dtype=np.float32)
data_dict['transform'] = transform.astype(np.float32)
data_dict['classification_label'] = label
return data_dict
def __len__(self):
return len(self.metadata)
| 19,246 | 51.018919 | 203 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/datasets/registration/kitti/__init__.py | from geotransformer.datasets.registration.kitti.dataset import OdometryKittiPairDataset
__all__ = [
'OdometryKittiPairDataset',
]
| 136 | 18.571429 | 87 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/datasets/registration/kitti/dataset.py | import os.path as osp
import random
import csv
import pandas as pd
from secrets import choice
# from mathutils import Matrix, Vector, Quaternion, Euler
import numpy as np
import torch.utils.data
import open3d as o3d
from geotransformer.utils.common import load_pickle
from geotransformer.utils.pointcloud import (
random_sample_rotation,
get_transform_from_rotation_translation,
get_rotation_translation_from_transform,
eulerAnglesToRotationMatrix,
)
from geotransformer.utils.registration import get_correspondences
from geotransformer.modules.ops import (
apply_transform,
inverse_transform,
)
class OdometryKittiPairDataset(torch.utils.data.Dataset):
ODOMETRY_KITTI_DATA_SPLIT = {
'train': ['00', '01', '02', '03', '04', '05'],
'val': ['06', '07'],
'test': ['08', '09', '10'],
}
def __init__(
self,
dataset_root,
subset,
point_limit=None,
use_augmentation=False,
augmentation_noise=0.005,
augmentation_min_scale=0.8,
augmentation_max_scale=1.2,
augmentation_shift=2.0,
augmentation_rotation=1.0,
return_corr_indices=False,
matching_radius=None,
max_t=4,
max_r=20.,
):
super(OdometryKittiPairDataset, self).__init__()
self.dataset_root = dataset_root
self.subset = subset
self.point_limit = point_limit
self.use_augmentation = use_augmentation
self.augmentation_noise = augmentation_noise
self.augmentation_min_scale = augmentation_min_scale
self.augmentation_max_scale = augmentation_max_scale
self.augmentation_shift = augmentation_shift
self.augmentation_rotation = augmentation_rotation
self.return_corr_indices = return_corr_indices
self.matching_radius = matching_radius
self.max_r = max_r
self.max_t = max_t
if self.return_corr_indices and self.matching_radius is None:
raise ValueError('"matching_radius" is None but "return_corr_indices" is set.')
self.metadata = load_pickle(osp.join(self.dataset_root, 'metadata', f'{subset}.pkl'))
def _augment_point_cloud(self, ref_points, src_points, transform):
rotation, translation = get_rotation_translation_from_transform(transform)
# add gaussian noise
ref_points = ref_points + (np.random.rand(ref_points.shape[0], 3) - 0.5) * self.augmentation_noise
src_points = src_points + (np.random.rand(src_points.shape[0], 3) - 0.5) * self.augmentation_noise
# random rotation
aug_rotation = random_sample_rotation(self.augmentation_rotation)
if random.random() > 0.5:
ref_points = np.matmul(ref_points, aug_rotation.T)
rotation = np.matmul(aug_rotation, rotation)
translation = np.matmul(aug_rotation, translation)
else:
src_points = np.matmul(src_points, aug_rotation.T)
rotation = np.matmul(rotation, aug_rotation.T)
# random scaling
scale = random.random()
scale = self.augmentation_min_scale + (self.augmentation_max_scale - self.augmentation_min_scale) * scale
ref_points = ref_points * scale
src_points = src_points * scale
translation = translation * scale
# random shift
ref_shift = np.random.uniform(-self.augmentation_shift, self.augmentation_shift, 3)
src_shift = np.random.uniform(-self.augmentation_shift, self.augmentation_shift, 3)
ref_points = ref_points + ref_shift
src_points = src_points + src_shift
translation = -np.matmul(src_shift[None, :], rotation.T) + translation + ref_shift
# compose transform from rotation and translation
transform = get_transform_from_rotation_translation(rotation, translation)
return ref_points, src_points, transform
def _load_point_cloud(self, file_name):
points = np.load(file_name)
if self.point_limit is not None and points.shape[0] > self.point_limit:
indices = np.random.permutation(points.shape[0])[: self.point_limit]
points = points[indices]
return points
def __getitem__(self, index):
data_dict = {}
metadata = self.metadata[index]
data_dict['seq_id'] = metadata['seq_id']
data_dict['ref_frame'] = metadata['frame0']
data_dict['src_frame'] = metadata['frame1']
ref_points = self._load_point_cloud(osp.join(self.dataset_root, metadata['pcd0']))
src_points = self._load_point_cloud(osp.join(self.dataset_root, metadata['pcd1']))
transform = metadata['transform']
if self.use_augmentation:
ref_points, src_points, transform = self._augment_point_cloud(ref_points, src_points, transform)
if self.return_corr_indices:
corr_indices = get_correspondences(ref_points, src_points, transform, self.matching_radius)
data_dict['corr_indices'] = corr_indices
data_dict['ref_points'] = ref_points.astype(np.float32)
data_dict['src_points'] = src_points.astype(np.float32)
data_dict['ref_feats'] = np.ones((ref_points.shape[0], 1), dtype=np.float32)
data_dict['src_feats'] = np.ones((src_points.shape[0], 1), dtype=np.float32)
data_dict['transform'] = transform.astype(np.float32)
# data_dict['classification_label'] = label
return data_dict
def __len__(self):
return len(self.metadata)
| 5,521 | 38.726619 | 113 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/datasets/registration/modelnet/__init__.py | from geotransformer.datasets.registration.modelnet.dataset import ModelNetPairDataset
| 86 | 42.5 | 85 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/datasets/registration/modelnet/dataset.py | import os.path as osp
from typing import Dict, Optional
import numpy as np
import torch.utils.data
import open3d as o3d
from IPython import embed
from geotransformer.utils.common import load_pickle
from geotransformer.utils.pointcloud import random_sample_transform, apply_transform, inverse_transform, regularize_normals
from geotransformer.utils.registration import compute_overlap
from geotransformer.utils.open3d import estimate_normals, voxel_downsample
from geotransformer.transforms.functional import (
normalize_points,
random_jitter_points,
random_shuffle_points,
random_sample_points,
random_crop_point_cloud_with_plane,
random_sample_viewpoint,
random_crop_point_cloud_with_point,
)
class ModelNetPairDataset(torch.utils.data.Dataset):
# fmt: off
ALL_CATEGORIES = [
'airplane', 'bathtub', 'bed', 'bench', 'bookshelf', 'bottle', 'bowl', 'car', 'chair', 'cone', 'cup', 'curtain',
'desk', 'door', 'dresser', 'flower_pot', 'glass_box', 'guitar', 'keyboard', 'lamp', 'laptop', 'mantel',
'monitor', 'night_stand', 'person', 'piano', 'plant', 'radio', 'range_hood', 'sink', 'sofa', 'stairs', 'stool',
'table', 'tent', 'toilet', 'tv_stand', 'vase', 'wardrobe', 'xbox'
]
ASYMMETRIC_CATEGORIES = [
'airplane', 'bathtub', 'bed', 'bench', 'bookshelf', 'car', 'chair', 'curtain', 'desk', 'door', 'dresser',
'glass_box', 'guitar', 'keyboard', 'laptop', 'mantel', 'monitor', 'night_stand', 'person', 'piano', 'plant',
'radio', 'range_hood', 'sink', 'sofa', 'stairs', 'stool', 'table', 'toilet', 'tv_stand', 'wardrobe', 'xbox'
]
ASYMMETRIC_INDICES = [
0, 1, 2, 3, 4, 7, 8, 11, 12, 13, 14, 16, 17, 18, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 35, 36,
38, 39
]
# fmt: on
def __init__(
self,
dataset_root: str,
subset: str,
num_points: int = 1024,
voxel_size: Optional[float] = None,
rotation_magnitude: float = 45.0,
translation_magnitude: float = 0.5,
noise_magnitude: Optional[float] = None,
keep_ratio: float = 0.7,
crop_method: str = 'plane',
asymmetric: bool = True,
class_indices: str = 'all',
deterministic: bool = False,
twice_sample: bool = False,
twice_transform: bool = False,
return_normals: bool = True,
return_occupancy: bool = False,
min_overlap: Optional[float] = None,
max_overlap: Optional[float] = None,
estimate_normal: bool = False,
overfitting_index: Optional[int] = None,
):
super(ModelNetPairDataset, self).__init__()
assert subset in ['train', 'val', 'test']
assert crop_method in ['plane', 'point']
self.dataset_root = dataset_root
self.subset = subset
self.num_points = num_points
self.voxel_size = voxel_size
self.rotation_magnitude = rotation_magnitude
self.translation_magnitude = translation_magnitude
self.noise_magnitude = noise_magnitude
self.keep_ratio = keep_ratio
self.crop_method = crop_method
self.asymmetric = asymmetric
self.class_indices = self.get_class_indices(class_indices, asymmetric)
self.deterministic = deterministic
self.twice_sample = twice_sample
self.twice_transform = twice_transform
self.return_normals = return_normals
self.return_occupancy = return_occupancy
self.min_overlap = min_overlap
self.max_overlap = max_overlap
self.check_overlap = self.min_overlap is not None or self.max_overlap is not None
self.estimate_normal = estimate_normal
self.overfitting_index = overfitting_index
data_list = load_pickle(osp.join(dataset_root, f'{subset}.pkl'))
data_list = [x for x in data_list if x['label'] in self.class_indices]
if overfitting_index is not None and deterministic:
data_list = [data_list[overfitting_index]]
self.data_list = data_list
def get_class_indices(self, class_indices, asymmetric):
r"""Generate class indices.
'all' -> all 40 classes.
'seen' -> first 20 classes.
'unseen' -> last 20 classes.
list|tuple -> unchanged.
asymmetric -> remove symmetric classes.
"""
if isinstance(class_indices, str):
assert class_indices in ['all', 'seen', 'unseen']
if class_indices == 'all':
class_indices = list(range(40))
elif class_indices == 'seen':
class_indices = list(range(20))
else:
class_indices = list(range(20, 40))
if asymmetric:
class_indices = [x for x in class_indices if x in self.ASYMMETRIC_INDICES]
return class_indices
def __getitem__(self, index):
if self.overfitting_index is not None:
index = self.overfitting_index
data_dict: Dict = self.data_list[index]
raw_points = data_dict['points'].copy()
raw_normals = data_dict['normals'].copy()
label = data_dict['label']
# set deterministic
if self.deterministic:
np.random.seed(index)
# normalize raw point cloud
raw_points = normalize_points(raw_points)
# once sample on raw point cloud
if not self.twice_sample:
raw_points, raw_normals = random_sample_points(raw_points, self.num_points, normals=raw_normals)
# split reference and source point cloud
ref_points = raw_points.copy()
ref_normals = raw_normals.copy()
# twice transform
if self.twice_transform:
transform = random_sample_transform(self.rotation_magnitude, self.translation_magnitude)
ref_points, ref_normals = apply_transform(ref_points, transform, normals=ref_normals)
src_points = ref_points.copy()
src_normals = ref_normals.copy()
# random transform to source point cloud
transform = random_sample_transform(self.rotation_magnitude, self.translation_magnitude)
inv_transform = inverse_transform(transform)
src_points, src_normals = apply_transform(src_points, inv_transform, normals=src_normals)
raw_ref_points = ref_points
raw_ref_normals = ref_normals
raw_src_points = src_points
raw_src_normals = src_normals
while True:
ref_points = raw_ref_points
ref_normals = raw_ref_normals
src_points = raw_src_points
src_normals = raw_src_normals
# crop
if self.keep_ratio is not None:
if self.crop_method == 'plane':
ref_points, ref_normals = random_crop_point_cloud_with_plane(
ref_points, keep_ratio=self.keep_ratio, normals=ref_normals
)
src_points, src_normals = random_crop_point_cloud_with_plane(
src_points, keep_ratio=self.keep_ratio, normals=src_normals
)
else:
viewpoint = random_sample_viewpoint()
ref_points, ref_normals = random_crop_point_cloud_with_point(
ref_points, viewpoint=viewpoint, keep_ratio=self.keep_ratio, normals=ref_normals
)
src_points, src_normals = random_crop_point_cloud_with_point(
src_points, viewpoint=viewpoint, keep_ratio=self.keep_ratio, normals=src_normals
)
# data check
is_available = True
# check overlap
if self.check_overlap:
overlap = compute_overlap(ref_points, src_points, transform, positive_radius=0.05)
if self.min_overlap is not None:
is_available = is_available and overlap >= self.min_overlap
if self.max_overlap is not None:
is_available = is_available and overlap <= self.max_overlap
if is_available:
break
if self.twice_sample:
# twice sample on both point clouds
ref_points, ref_normals = random_sample_points(ref_points, self.num_points, normals=ref_normals)
src_points, src_normals = random_sample_points(src_points, self.num_points, normals=src_normals)
# random jitter
if self.noise_magnitude is not None:
ref_points = random_jitter_points(ref_points, scale=0.01, noise_magnitude=self.noise_magnitude)
src_points = random_jitter_points(src_points, scale=0.01, noise_magnitude=self.noise_magnitude)
# random shuffle
ref_points, ref_normals = random_shuffle_points(ref_points, normals=ref_normals)
src_points, src_normals = random_shuffle_points(src_points, normals=src_normals)
if self.voxel_size is not None:
# voxel downsample reference point cloud
ref_points, ref_normals = voxel_downsample(ref_points, self.voxel_size, normals=ref_normals)
src_points, src_normals = voxel_downsample(src_points, self.voxel_size, normals=src_normals)
new_data_dict = {
'raw_points': raw_points.astype(np.float32),
'ref_points': ref_points.astype(np.float32),
'src_points': src_points.astype(np.float32),
'transform': transform.astype(np.float32),
'label': int(label),
'index': int(index),
}
if self.estimate_normal:
ref_normals = estimate_normals(ref_points)
ref_normals = regularize_normals(ref_points, ref_normals)
src_normals = estimate_normals(src_points)
src_normals = regularize_normals(src_points, src_normals)
if self.return_normals:
new_data_dict['raw_normals'] = raw_normals.astype(np.float32)
new_data_dict['ref_normals'] = ref_normals.astype(np.float32)
new_data_dict['src_normals'] = src_normals.astype(np.float32)
if self.return_occupancy:
new_data_dict['ref_feats'] = np.ones_like(ref_points[:, :1]).astype(np.float32)
new_data_dict['src_feats'] = np.ones_like(src_points[:, :1]).astype(np.float32)
return new_data_dict
def __len__(self):
return len(self.data_list)
| 10,405 | 41.647541 | 123 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/datasets/registration/threedmatch/__init__.py | from geotransformer.datasets.registration.threedmatch.dataset import ThreeDMatchPairDataset
# from geotransformer.datasets.registration.threedmatch.dataset_minkowski import ThreeDMatchPairMinkowskiDataset
__all__ = [
'ThreeDMatchPairDataset',
# 'ThreeDMatchPairMinkowskiDataset',
]
| 292 | 31.555556 | 112 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/datasets/registration/threedmatch/dataset.py | import os.path as osp
import pickle
import random
from typing import Dict
import numpy as np
import torch
import torch.utils.data
from geotransformer.utils.pointcloud import (
random_sample_rotation,
random_sample_rotation_v2,
get_transform_from_rotation_translation,
)
from geotransformer.utils.registration import get_correspondences
class ThreeDMatchPairDataset(torch.utils.data.Dataset):
def __init__(
self,
dataset_root,
subset,
point_limit=None,
use_augmentation=False,
augmentation_noise=0.005,
augmentation_rotation=1,
overlap_threshold=None,
return_corr_indices=False,
matching_radius=None,
rotated=False,
feature_dir=None,
):
super(ThreeDMatchPairDataset, self).__init__()
self.dataset_root = dataset_root
self.metadata_root = osp.join(self.dataset_root, 'metadata')
self.data_root = osp.join(self.dataset_root, 'data')
self.subset = subset
self.point_limit = point_limit
self.overlap_threshold = overlap_threshold
self.rotated = rotated
self.return_corr_indices = return_corr_indices
self.matching_radius = matching_radius
if self.return_corr_indices and self.matching_radius is None:
raise ValueError('"matching_radius" is None but "return_corr_indices" is set.')
self.use_augmentation = use_augmentation
self.aug_noise = augmentation_noise
self.aug_rotation = augmentation_rotation
with open(osp.join(self.metadata_root, f'{subset}.pkl'), 'rb') as f:
self.metadata_list = pickle.load(f)
if self.overlap_threshold is not None:
self.metadata_list = [x for x in self.metadata_list if x['overlap'] > self.overlap_threshold]
def __len__(self):
return len(self.metadata_list)
def _load_point_cloud(self, file_name):
points = torch.load(osp.join(self.data_root, file_name))
# NOTE: setting "point_limit" with "num_workers" > 1 will cause nondeterminism.
if self.point_limit is not None and points.shape[0] > self.point_limit:
indices = np.random.permutation(points.shape[0])[: self.point_limit]
points = points[indices]
return points
def _augment_point_cloud(self, ref_points, src_points, rotation, translation):
r"""Augment point clouds.
ref_points = src_points @ rotation.T + translation
1. Random rotation to one point cloud.
2. Random noise.
"""
aug_rotation = random_sample_rotation(self.aug_rotation)
if random.random() > 0.5:
ref_points = np.matmul(ref_points, aug_rotation.T)
rotation = np.matmul(aug_rotation, rotation)
translation = np.matmul(aug_rotation, translation)
else:
src_points = np.matmul(src_points, aug_rotation.T)
rotation = np.matmul(rotation, aug_rotation.T)
ref_points += (np.random.rand(ref_points.shape[0], 3) - 0.5) * self.aug_noise
src_points += (np.random.rand(src_points.shape[0], 3) - 0.5) * self.aug_noise
return ref_points, src_points, rotation, translation
def __getitem__(self, index):
data_dict = {}
# metadata
metadata: Dict = self.metadata_list[index]
data_dict['scene_name'] = metadata['scene_name']
data_dict['ref_frame'] = metadata['frag_id0']
data_dict['src_frame'] = metadata['frag_id1']
data_dict['overlap'] = metadata['overlap']
# get transformation
rotation = metadata['rotation']
translation = metadata['translation']
# get point cloud
ref_points = self._load_point_cloud(metadata['pcd0'])
src_points = self._load_point_cloud(metadata['pcd1'])
# augmentation
if self.use_augmentation:
ref_points, src_points, rotation, translation = self._augment_point_cloud(
ref_points, src_points, rotation, translation
)
if self.rotated:
ref_rotation = random_sample_rotation_v2()
ref_points = np.matmul(ref_points, ref_rotation.T)
rotation = np.matmul(ref_rotation, rotation)
translation = np.matmul(ref_rotation, translation)
src_rotation = random_sample_rotation_v2()
src_points = np.matmul(src_points, src_rotation.T)
rotation = np.matmul(rotation, src_rotation.T)
transform = get_transform_from_rotation_translation(rotation, translation)
# get correspondences
if self.return_corr_indices:
corr_indices = get_correspondences(ref_points, src_points, transform, self.matching_radius)
data_dict['corr_indices'] = corr_indices
data_dict['ref_points'] = ref_points.astype(np.float32)
data_dict['src_points'] = src_points.astype(np.float32)
data_dict['ref_feats'] = np.ones((ref_points.shape[0], 1), dtype=np.float32)
data_dict['src_feats'] = np.ones((src_points.shape[0], 1), dtype=np.float32)
data_dict['transform'] = transform.astype(np.float32)
return data_dict
| 5,212 | 36.503597 | 109 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/datasets/registration/threedmatch/utils.py | import os.path as osp
import numpy as np
from nibabel import quaternions as nq
from geotransformer.utils.common import ensure_dir
from geotransformer.utils.summary_board import SummaryBoard
from geotransformer.utils.pointcloud import (
apply_transform,
get_rotation_translation_from_transform,
get_nearest_neighbor,
)
from geotransformer.utils.registration import compute_overlap, compute_registration_error
_scene_name_to_num_fragments = {
'7-scenes-redkitchen': 60,
'sun3d-home_at-home_at_scan1_2013_jan_1': 60,
'sun3d-home_md-home_md_scan9_2012_sep_30': 60,
'sun3d-hotel_uc-scan3': 55,
'sun3d-hotel_umd-maryland_hotel1': 57,
'sun3d-hotel_umd-maryland_hotel3': 37,
'sun3d-mit_76_studyroom-76-1studyroom2': 66,
'sun3d-mit_lab_hj-lab_hj_tea_nov_2_2012_scan1_erika': 38,
}
_scene_name_to_abbr = {
'7-scenes-redkitchen': 'Kitchen',
'sun3d-home_at-home_at_scan1_2013_jan_1': 'Home_1',
'sun3d-home_md-home_md_scan9_2012_sep_30': 'Home_2',
'sun3d-hotel_uc-scan3': 'Hotel_1',
'sun3d-hotel_umd-maryland_hotel1': 'Hotel_2',
'sun3d-hotel_umd-maryland_hotel3': 'Hotel_3',
'sun3d-mit_76_studyroom-76-1studyroom2': 'Study',
'sun3d-mit_lab_hj-lab_hj_tea_nov_2_2012_scan1_erika': 'MIT_Lab',
}
def get_num_fragments(scene_name):
if scene_name not in _scene_name_to_num_fragments:
raise ValueError('Unsupported test scene name "{}".'.format(scene_name))
return _scene_name_to_num_fragments[scene_name]
def get_scene_abbr(scene_name):
if scene_name not in _scene_name_to_abbr:
return scene_name
else:
return _scene_name_to_abbr[scene_name]
def read_pose_file(file_name):
with open(file_name, 'r') as f:
lines = f.readlines()
lines = lines[1:]
pose = []
for line in lines:
pose_row = [float(x) for x in line.strip().split()]
pose.append(pose_row)
pose = np.stack(pose, axis=0)
return pose
def read_log_file(file_name):
with open(file_name) as f:
lines = f.readlines()
lines = [line.strip() for line in lines]
test_pairs = []
num_pairs = len(lines) // 5
for i in range(num_pairs):
line_id = i * 5
split_line = lines[line_id].split()
test_pair = [int(split_line[0]), int(split_line[1])]
num_fragments = int(split_line[2])
transform = []
for j in range(1, 5):
transform.append(lines[line_id + j].split())
# transform is the pose from test_pair[1] to test_pair[0]
transform = np.array(transform, dtype=np.float32)
test_pairs.append(dict(test_pair=test_pair, num_fragments=num_fragments, transform=transform))
return test_pairs
def read_info_file(file_name):
with open(file_name) as f:
lines = f.readlines()
lines = [line.strip() for line in lines]
test_pairs = []
num_pairs = len(lines) // 7
for i in range(num_pairs):
line_id = i * 7
split_line = lines[line_id].split()
test_pair = [int(split_line[0]), int(split_line[1])]
num_fragments = int(split_line[2])
info = []
for j in range(1, 7):
info.append(lines[line_id + j].split())
info = np.array(info, dtype=np.float32)
test_pairs.append(dict(test_pair=test_pair, num_fragments=num_fragments, covariance=info))
return test_pairs
def write_log_file(file_name, test_pairs):
ensure_dir(osp.dirname(file_name))
lines = []
for test_pair in test_pairs:
frag_id0, frag_id1 = test_pair['test_pair']
lines.append('{}\t{}\t{}\n'.format(frag_id0, frag_id1, test_pair['num_fragments']))
rows = test_pair['transform'].tolist()
for row in rows:
lines.append('{}\t{}\t{}\t{}\n'.format(row[0], row[1], row[2], row[3]))
with open(file_name, 'w') as f:
f.writelines(lines)
def get_gt_logs_and_infos(gt_root, num_fragments):
gt_logs = read_log_file(osp.join(gt_root, 'gt.log'))
gt_infos = read_info_file(osp.join(gt_root, 'gt.info'))
gt_indices = -np.ones((num_fragments, num_fragments), dtype=np.int32)
for i, gt_log in enumerate(gt_logs):
frag_id0, frag_id1 = gt_log['test_pair']
if frag_id1 > frag_id0 + 1:
gt_indices[frag_id0, frag_id1] = i
return gt_indices, gt_logs, gt_infos
def compute_transform_error(transform, covariance, estimated_transform):
relative_transform = np.matmul(np.linalg.inv(transform), estimated_transform)
R, t = get_rotation_translation_from_transform(relative_transform)
q = nq.mat2quat(R)
er = np.concatenate([t, q[1:]], axis=0)
p = er.reshape(1, 6) @ covariance @ er.reshape(6, 1) / covariance[0, 0]
return p.item()
def evaluate_registration_one_scene(gt_log_file, gt_info_file, result_file, positive_threshold=0.2):
registration_meter = SummaryBoard(['rre', 'rte'])
gt_logs = read_log_file(gt_log_file)
gt_infos = read_info_file(gt_info_file)
result_logs = read_log_file(result_file)
num_fragments = gt_logs[0]['num_fragments']
num_pos_pairs = 0
num_gt_pairs = 0
num_pred_pairs = 0
gt_indices = -np.ones((num_fragments, num_fragments), dtype=np.int32)
for i, gt_log in enumerate(gt_logs):
frag_id0, frag_id1 = gt_log['test_pair']
if frag_id1 > frag_id0 + 1:
gt_indices[frag_id0, frag_id1] = i
num_gt_pairs += 1
errors = []
for result_log in result_logs:
frag_id0, frag_id1 = result_log['test_pair']
estimated_transform = result_log['transform']
if gt_indices[frag_id0, frag_id1] != -1:
num_pred_pairs += 1
gt_index = gt_indices[frag_id0, frag_id1]
transform = gt_logs[gt_index]['transform']
covariance = gt_infos[gt_index]['covariance']
assert gt_infos[gt_index]['test_pair'][0] == frag_id0 and gt_infos[gt_index]['test_pair'][1] == frag_id1
error = compute_transform_error(transform, covariance, estimated_transform)
errors.append({'id0': frag_id0, 'id1': frag_id1, 'error': error})
if error <= positive_threshold ** 2:
num_pos_pairs += 1
rre, rte = compute_registration_error(transform, estimated_transform)
registration_meter.update('rre', rre)
registration_meter.update('rte', rte)
precision = num_pos_pairs / num_pred_pairs if num_pred_pairs > 0 else 0
recall = num_pos_pairs / num_gt_pairs
return {
'precision': precision,
'recall': recall,
'mean_rre': registration_meter.mean('rre'),
'mean_rte': registration_meter.mean('rte'),
'median_rre': registration_meter.median('rre'),
'median_rte': registration_meter.median('rte'),
'num_pos_pairs': num_pos_pairs,
'num_pred_pairs': num_pred_pairs,
'num_gt_pairs': num_gt_pairs,
'errors': errors,
}
def calibrate_ground_truth(ref_pcd, src_pcd, transform, voxel_size=0.006):
ref_pcd = ref_pcd.voxel_down_sample(0.01)
src_pcd = src_pcd.voxel_down_sample(0.01)
ref_points = np.asarray(ref_pcd.points)
src_points = np.asarray(src_pcd.points)
# compute overlap
overlap = compute_overlap(ref_points, src_points, transform, positive_radius=voxel_size * 5)
# compute info
src_points = apply_transform(src_points, transform)
nn_distances, nn_indices = get_nearest_neighbor(ref_points, src_points, return_index=True)
nn_indices = nn_indices[nn_distances < voxel_size]
if nn_indices.shape[0] > 5000:
nn_indices = np.random.choice(nn_indices, 5000, replace=False)
src_corr_points = src_points[nn_indices]
if src_corr_points.shape[0] > 0:
g = np.zeros([src_corr_points.shape[0], 3, 6])
g[:, :3, :3] = np.eye(3)
g[:, 0, 4] = src_corr_points[:, 2]
g[:, 0, 5] = -src_corr_points[:, 1]
g[:, 1, 3] = -src_corr_points[:, 2]
g[:, 1, 5] = src_corr_points[:, 0]
g[:, 2, 3] = src_corr_points[:, 1]
g[:, 2, 4] = -src_corr_points[:, 0]
gt = g.transpose([0, 2, 1])
gtg = np.matmul(gt, g)
cov_matrix = gtg.sum(0)
else:
cov_matrix = np.zeros((6, 6))
return overlap, cov_matrix
| 8,248 | 35.021834 | 116 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/engine/__init__.py | from geotransformer.engine.epoch_based_trainer import EpochBasedTrainer
from geotransformer.engine.iter_based_trainer import IterBasedTrainer
from geotransformer.engine.single_tester import SingleTester
from geotransformer.engine.logger import Logger
| 251 | 49.4 | 71 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/engine/base_tester.py | import sys
import argparse
import os.path as osp
import time
import json
import abc
import torch
import ipdb
from geotransformer.utils.torch import initialize
from geotransformer.engine.logger import Logger
def inject_default_parser(parser=None):
if parser is None:
parser = argparse.ArgumentParser()
parser.add_argument('--snapshot', default=None, help='load from snapshot')
parser.add_argument('--test_epoch', type=str, default=None, help='test epoch')
parser.add_argument('--test_iter', type=int, default=None, help='test iteration')
return parser
class BaseTester(abc.ABC):
def __init__(self, cfg, parser=None, cudnn_deterministic=True):
# parser
parser = inject_default_parser(parser)
self.args = parser.parse_args()
# logger
log_file = osp.join(cfg.log_dir, 'test-{}.log'.format(time.strftime('%Y%m%d-%H%M%S')))
self.logger = Logger(log_file=log_file)
# command executed
message = 'Command executed: ' + ' '.join(sys.argv)
self.logger.info(message)
self.logger.debug('BaseTester init')
# find snapshot
if self.args.snapshot is None:
if self.args.test_epoch is not None:
self.args.snapshot = osp.join(cfg.snapshot_dir, 'epoch-{}.pth.tar'.format(self.args.test_epoch))
elif self.args.test_iter is not None:
self.args.snapshot = osp.join(cfg.snapshot_dir, 'iter-{}.pth.tar'.format(self.args.test_iter))
if self.args.snapshot is None:
raise RuntimeError('Snapshot is not specified.')
# print config
message = 'Configs:\n' + json.dumps(cfg, indent=4)
self.logger.info(message)
# cuda and distributed
if not torch.cuda.is_available():
raise RuntimeError('No CUDA devices available.')
self.cudnn_deterministic = cudnn_deterministic
self.seed = cfg.seed
initialize(seed=self.seed, cudnn_deterministic=self.cudnn_deterministic)
# state
self.model = None
self.iteration = None
self.test_loader = None
self.saved_states = {}
def load_snapshot(self, snapshot):
self.logger.info('Loading from "{}".'.format(snapshot))
state_dict = torch.load(snapshot, map_location=torch.device('cpu'))
assert 'model' in state_dict, 'No model can be loaded.'
self.model.load_state_dict(state_dict['model'], strict=True)
self.logger.info('Model has been loaded.')
def register_model(self, model):
r"""Register model. DDP is automatically used."""
self.model = model
message = 'Model description:\n' + str(model)
self.logger.info(message)
return model
def register_loader(self, test_loader):
r"""Register data loader."""
self.test_loader = test_loader
@abc.abstractmethod
def run(self):
raise NotImplemented
| 2,952 | 32.556818 | 112 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/engine/base_trainer.py | import sys
import argparse
import os.path as osp
import time
import json
import abc
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.distributed as dist
from torch.utils.tensorboard import SummaryWriter
import ipdb
from geotransformer.utils.summary_board import SummaryBoard
from geotransformer.utils.timer import Timer
from geotransformer.utils.torch import all_reduce_tensors, release_cuda, initialize
from geotransformer.engine.logger import Logger
def inject_default_parser(parser=None):
if parser is None:
parser = argparse.ArgumentParser()
parser.add_argument('--resume', action='store_true', help='resume training')
parser.add_argument('--snapshot', default=None, help='load from snapshot')
parser.add_argument('--epoch', type=int, default=None, help='load epoch')
parser.add_argument('--log_steps', type=int, default=10, help='logging steps')
parser.add_argument('--local_rank', type=int, default=-1, help='local rank for ddp')
return parser
class BaseTrainer(abc.ABC):
def __init__(
self,
cfg,
parser=None,
cudnn_deterministic=True,
autograd_anomaly_detection=False,
save_all_snapshots=True,
run_grad_check=False,
grad_acc_steps=1,
):
# parser
parser = inject_default_parser(parser)
self.args = parser.parse_args()
# logger
log_file = osp.join(cfg.log_dir, 'train-{}.log'.format(time.strftime('%Y%m%d-%H%M%S')))
self.logger = Logger(log_file=log_file, local_rank=self.args.local_rank)
# command executed
message = 'Command executed: ' + ' '.join(sys.argv)
self.logger.info(message)
# print config
message = 'Configs:\n' + json.dumps(cfg, indent=4)
self.logger.info(message)
# tensorboard
self.writer = SummaryWriter(log_dir=cfg.event_dir)
self.logger.info(f'Tensorboard is enabled. Write events to {cfg.event_dir}.')
# cuda and distributed
if not torch.cuda.is_available():
raise RuntimeError('No CUDA devices available.')
self.distributed = self.args.local_rank != -1
if self.distributed:
torch.cuda.set_device(self.args.local_rank)
dist.init_process_group(backend='nccl')
self.world_size = dist.get_world_size()
self.local_rank = self.args.local_rank
self.logger.info(f'Using DistributedDataParallel mode (world_size: {self.world_size})')
else:
if torch.cuda.device_count() > 1:
self.logger.warning('DataParallel is deprecated. Use DistributedDataParallel instead.')
self.world_size = 1
self.local_rank = 0
self.logger.info('Using Single-GPU mode.')
self.cudnn_deterministic = cudnn_deterministic
self.autograd_anomaly_detection = autograd_anomaly_detection
self.seed = cfg.seed + self.local_rank
initialize(
seed=self.seed,
cudnn_deterministic=self.cudnn_deterministic,
autograd_anomaly_detection=self.autograd_anomaly_detection,
)
# basic config
self.snapshot_dir = cfg.snapshot_dir
self.log_steps = self.args.log_steps
self.run_grad_check = run_grad_check
self.save_all_snapshots = save_all_snapshots
# state
self.model = None
self.optimizer = None
self.scheduler = None
self.epoch = 0
self.iteration = 0
self.inner_iteration = 0
self.train_loader = None
self.val_loader = None
self.summary_board = SummaryBoard(last_n=self.log_steps, adaptive=True)
self.timer = Timer()
self.saved_states = {}
# training config
self.training = True
self.grad_acc_steps = grad_acc_steps
def save_snapshot(self, filename):
if self.local_rank != 0:
return
model_state_dict = self.model.state_dict()
# Remove '.module' prefix in DistributedDataParallel mode.
if self.distributed:
model_state_dict = OrderedDict([(key[7:], value) for key, value in model_state_dict.items()])
# save model
filename = osp.join(self.snapshot_dir, filename)
state_dict = {
'epoch': self.epoch,
'iteration': self.iteration,
'model': model_state_dict,
}
torch.save(state_dict, filename)
self.logger.info('Model saved to "{}"'.format(filename))
# save snapshot
snapshot_filename = osp.join(self.snapshot_dir, 'snapshot.pth.tar')
state_dict['optimizer'] = self.optimizer.state_dict()
if self.scheduler is not None:
state_dict['scheduler'] = self.scheduler.state_dict()
torch.save(state_dict, snapshot_filename)
self.logger.info('Snapshot saved to "{}"'.format(snapshot_filename))
def load_snapshot(self, snapshot, fix_prefix=True):
self.logger.info('Loading from "{}".'.format(snapshot))
state_dict = torch.load(snapshot, map_location=torch.device('cpu'))
# Load model
model_dict = state_dict['model']
if fix_prefix and self.distributed:
model_dict = OrderedDict([('module.' + key, value) for key, value in model_dict.items()])
self.model.load_state_dict(model_dict, strict=False)
# log missing keys and unexpected keys
snapshot_keys = set(model_dict.keys())
model_keys = set(self.model.state_dict().keys())
missing_keys = model_keys - snapshot_keys
unexpected_keys = snapshot_keys - model_keys
if self.distributed:
missing_keys = set([missing_key[7:] for missing_key in missing_keys])
unexpected_keys = set([unexpected_key[7:] for unexpected_key in unexpected_keys])
if len(missing_keys) > 0:
message = f'Missing keys: {missing_keys}'
self.logger.warning(message)
if len(unexpected_keys) > 0:
message = f'Unexpected keys: {unexpected_keys}'
self.logger.warning(message)
self.logger.info('Model has been loaded.')
# Load other attributes
if 'epoch' in state_dict:
self.epoch = state_dict['epoch']
self.logger.info('Epoch has been loaded: {}.'.format(self.epoch))
if 'iteration' in state_dict:
self.iteration = state_dict['iteration']
self.logger.info('Iteration has been loaded: {}.'.format(self.iteration))
if 'optimizer' in state_dict and self.optimizer is not None:
self.optimizer.load_state_dict(state_dict['optimizer'])
self.logger.info('Optimizer has been loaded.')
if 'scheduler' in state_dict and self.scheduler is not None:
self.scheduler.load_state_dict(state_dict['scheduler'])
self.logger.info('Scheduler has been loaded.')
def register_model(self, model):
r"""Register model. DDP is automatically used."""
if self.distributed:
local_rank = self.local_rank
model = nn.parallel.DistributedDataParallel(model, device_ids=[local_rank], output_device=local_rank)
self.model = model
message = 'Model description:\n' + str(model)
self.logger.info(message)
return model
def register_optimizer(self, optimizer):
r"""Register optimizer. DDP is automatically used."""
if self.distributed:
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr'] * self.world_size
self.optimizer = optimizer
def register_scheduler(self, scheduler):
r"""Register LR scheduler."""
self.scheduler = scheduler
def register_loader(self, train_loader, val_loader):
r"""Register data loader."""
self.train_loader = train_loader
self.val_loader = val_loader
def get_lr(self):
return self.optimizer.param_groups[0]['lr']
def optimizer_step(self, iteration):
if iteration % self.grad_acc_steps == 0:
self.optimizer.step()
self.optimizer.zero_grad()
def save_state(self, key, value):
self.saved_states[key] = release_cuda(value)
def read_state(self, key):
return self.saved_states[key]
def check_invalid_gradients(self):
for param in self.model.parameters():
if torch.isnan(param.grad).any():
self.logger.error('NaN in gradients.')
return False
if torch.isinf(param.grad).any():
self.logger.error('Inf in gradients.')
return False
return True
def release_tensors(self, result_dict):
r"""All reduce and release tensors."""
if self.distributed:
result_dict = all_reduce_tensors(result_dict, world_size=self.world_size)
result_dict = release_cuda(result_dict)
return result_dict
def set_train_mode(self):
self.training = True
self.model.train()
torch.set_grad_enabled(True)
def set_eval_mode(self):
self.training = False
self.model.eval()
torch.set_grad_enabled(False)
def write_event(self, phase, event_dict, index):
r"""Write TensorBoard event."""
if self.local_rank != 0:
return
for key, value in event_dict.items():
self.writer.add_scalar(f'{phase}/{key}', value, index)
@abc.abstractmethod
def run(self):
raise NotImplemented
| 9,586 | 36.449219 | 113 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/engine/epoch_based_trainer.py | import os
import os.path as osp
from typing import Tuple, Dict
import ipdb
import torch
import tqdm
from geotransformer.engine.base_trainer import BaseTrainer
from geotransformer.utils.torch import to_cuda
from geotransformer.utils.summary_board import SummaryBoard
from geotransformer.utils.timer import Timer
from geotransformer.utils.common import get_log_string
class EpochBasedTrainer(BaseTrainer):
def __init__(
self,
cfg,
max_epoch,
parser=None,
cudnn_deterministic=True,
autograd_anomaly_detection=False,
save_all_snapshots=True,
run_grad_check=False,
grad_acc_steps=1,
):
super().__init__(
cfg,
parser=parser,
cudnn_deterministic=cudnn_deterministic,
autograd_anomaly_detection=autograd_anomaly_detection,
save_all_snapshots=save_all_snapshots,
run_grad_check=run_grad_check,
grad_acc_steps=grad_acc_steps,
)
self.max_epoch = max_epoch
def before_train_step(self, epoch, iteration, data_dict) -> None:
pass
def before_val_step(self, epoch, iteration, data_dict) -> None:
pass
def after_train_step(self, epoch, iteration, data_dict, output_dict, result_dict) -> None:
pass
def after_val_step(self, epoch, iteration, data_dict, output_dict, result_dict) -> None:
pass
def before_train_epoch(self, epoch) -> None:
pass
def before_val_epoch(self, epoch) -> None:
pass
def after_train_epoch(self, epoch) -> None:
pass
def after_val_epoch(self, epoch) -> None:
pass
def train_step(self, epoch, iteration, data_dict) -> Tuple[Dict, Dict]:
pass
def val_step(self, epoch, iteration, data_dict) -> Tuple[Dict, Dict]:
pass
def after_backward(self, epoch, iteration, data_dict, output_dict, result_dict) -> None:
pass
def check_gradients(self, epoch, iteration, data_dict, output_dict, result_dict):
if not self.run_grad_check:
return
if not self.check_invalid_gradients():
self.logger.error('Epoch: {}, iter: {}, invalid gradients.'.format(epoch, iteration))
torch.save(data_dict, 'data.pth')
torch.save(self.model, 'model.pth')
self.logger.error('Data_dict and model snapshot saved.')
ipdb.set_trace()
def train_epoch(self):
if self.distributed:
self.train_loader.sampler.set_epoch(self.epoch)
self.before_train_epoch(self.epoch)
self.optimizer.zero_grad()
total_iterations = len(self.train_loader)
for iteration, data_dict in enumerate(self.train_loader):
self.inner_iteration = iteration + 1
self.iteration += 1
data_dict = to_cuda(data_dict)
self.before_train_step(self.epoch, self.inner_iteration, data_dict)
self.timer.add_prepare_time()
# forward
output_dict, result_dict = self.train_step(self.epoch, self.inner_iteration, data_dict)
# backward & optimization
result_dict['loss'].backward()
self.after_backward(self.epoch, self.inner_iteration, data_dict, output_dict, result_dict)
self.check_gradients(self.epoch, self.inner_iteration, data_dict, output_dict, result_dict)
self.optimizer_step(self.inner_iteration)
# after training
self.timer.add_process_time()
self.after_train_step(self.epoch, self.inner_iteration, data_dict, output_dict, result_dict)
result_dict = self.release_tensors(result_dict)
self.summary_board.update_from_result_dict(result_dict)
# logging
if self.inner_iteration % self.log_steps == 0:
summary_dict = self.summary_board.summary()
message = get_log_string(
result_dict=summary_dict,
epoch=self.epoch,
max_epoch=self.max_epoch,
iteration=self.inner_iteration,
max_iteration=total_iterations,
lr=self.get_lr(),
timer=self.timer,
)
self.logger.info(message)
self.write_event('train', summary_dict, self.iteration)
torch.cuda.empty_cache()
self.after_train_epoch(self.epoch)
message = get_log_string(self.summary_board.summary(), epoch=self.epoch, timer=self.timer)
self.logger.critical(message)
# scheduler
if self.scheduler is not None:
self.scheduler.step()
# snapshot
self.save_snapshot(f'epoch-{self.epoch}.pth.tar')
if not self.save_all_snapshots:
last_snapshot = f'epoch-{self.epoch - 1}.pth.tar'
if osp.exists(last_snapshot):
os.remove(last_snapshot)
def inference_epoch(self):
self.set_eval_mode()
self.before_val_epoch(self.epoch)
summary_board = SummaryBoard(adaptive=True)
timer = Timer()
total_iterations = len(self.val_loader)
pbar = tqdm.tqdm(enumerate(self.val_loader), total=total_iterations)
for iteration, data_dict in pbar:
self.inner_iteration = iteration + 1
data_dict = to_cuda(data_dict)
self.before_val_step(self.epoch, self.inner_iteration, data_dict)
timer.add_prepare_time()
output_dict, result_dict = self.val_step(self.epoch, self.inner_iteration, data_dict)
torch.cuda.synchronize()
timer.add_process_time()
self.after_val_step(self.epoch, self.inner_iteration, data_dict, output_dict, result_dict)
result_dict = self.release_tensors(result_dict)
summary_board.update_from_result_dict(result_dict)
message = get_log_string(
result_dict=summary_board.summary(),
epoch=self.epoch,
iteration=self.inner_iteration,
max_iteration=total_iterations,
timer=timer,
)
pbar.set_description(message)
torch.cuda.empty_cache()
self.after_val_epoch(self.epoch)
summary_dict = summary_board.summary()
message = '[Val] ' + get_log_string(summary_dict, epoch=self.epoch, timer=timer)
self.logger.critical(message)
self.write_event('val', summary_dict, self.epoch)
self.set_train_mode()
def run(self):
assert self.train_loader is not None
assert self.val_loader is not None
if self.args.resume:
self.load_snapshot(osp.join(self.snapshot_dir, 'snapshot.pth.tar'))
elif self.args.snapshot is not None:
self.load_snapshot(self.args.snapshot)
self.set_train_mode()
# state_dict = torch.load('/dssg/home/acct-eeyj/eeyj-user1/WPJ/GeoTransformer-nodecorr-classification3/GeoTransformer/weights/geotransformer-3dmatch.pth.tar', map_location=torch.device('cpu'))
# model_dict = state_dict['model']
# self.model.load_state_dict(model_dict, strict=False)
while self.epoch < self.max_epoch:
self.epoch += 1
self.train_epoch()
self.inference_epoch()
| 7,356 | 38.767568 | 200 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/engine/iter_based_trainer.py | import os
import os.path as osp
from typing import Tuple, Dict
import ipdb
import torch
import tqdm
from IPython import embed
from geotransformer.engine.base_trainer import BaseTrainer
from geotransformer.utils.torch import to_cuda
from geotransformer.utils.summary_board import SummaryBoard
from geotransformer.utils.timer import Timer
from geotransformer.utils.common import get_log_string
class CycleLoader(object):
def __init__(self, data_loader, epoch, distributed):
self.data_loader = data_loader
self.last_epoch = epoch
self.distributed = distributed
self.iterator = self.initialize_iterator()
def initialize_iterator(self):
if self.distributed:
self.data_loader.sampler.set_epoch(self.last_epoch + 1)
return iter(self.data_loader)
def __next__(self):
try:
data_dict = next(self.iterator)
except StopIteration:
self.last_epoch += 1
self.iterator = self.initialize_iterator()
data_dict = next(self.iterator)
return data_dict
class IterBasedTrainer(BaseTrainer):
def __init__(
self,
cfg,
max_iteration,
snapshot_steps,
parser=None,
cudnn_deterministic=True,
autograd_anomaly_detection=False,
save_all_snapshots=True,
run_grad_check=False,
grad_acc_steps=1,
):
super().__init__(
cfg,
parser=parser,
cudnn_deterministic=cudnn_deterministic,
autograd_anomaly_detection=autograd_anomaly_detection,
save_all_snapshots=save_all_snapshots,
run_grad_check=run_grad_check,
grad_acc_steps=grad_acc_steps,
)
self.max_iteration = max_iteration
self.snapshot_steps = snapshot_steps
def before_train(self) -> None:
pass
def after_train(self) -> None:
pass
def before_val(self) -> None:
pass
def after_val(self) -> None:
pass
def before_train_step(self, iteration, data_dict) -> None:
pass
def before_val_step(self, iteration, data_dict) -> None:
pass
def after_train_step(self, iteration, data_dict, output_dict, result_dict) -> None:
pass
def after_val_step(self, iteration, data_dict, output_dict, result_dict) -> None:
pass
def train_step(self, iteration, data_dict) -> Tuple[Dict, Dict]:
pass
def val_step(self, iteration, data_dict) -> Tuple[Dict, Dict]:
pass
def after_backward(self, iteration, data_dict, output_dict, result_dict) -> None:
pass
def check_gradients(self, iteration, data_dict, output_dict, result_dict):
if not self.run_grad_check:
return
if not self.check_invalid_gradients():
self.logger.error('Iter: {}, invalid gradients.'.format(iteration))
torch.save(data_dict, 'data.pth')
torch.save(self.model, 'model.pth')
self.logger.error('Data_dict and model snapshot saved.')
ipdb.set_trace()
def inference(self):
self.set_eval_mode()
self.before_val()
summary_board = SummaryBoard(adaptive=True)
timer = Timer()
total_iterations = len(self.val_loader)
pbar = tqdm.tqdm(enumerate(self.val_loader), total=total_iterations)
for iteration, data_dict in pbar:
self.inner_iteration = iteration + 1
data_dict = to_cuda(data_dict)
self.before_val_step(self.inner_iteration, data_dict)
timer.add_prepare_time()
output_dict, result_dict = self.val_step(self.inner_iteration, data_dict)
timer.add_process_time()
self.after_val_step(self.inner_iteration, data_dict, output_dict, result_dict)
result_dict = self.release_tensors(result_dict)
summary_board.update_from_result_dict(result_dict)
message = get_log_string(
result_dict=summary_board.summary(),
iteration=self.inner_iteration,
max_iteration=total_iterations,
timer=timer,
)
pbar.set_description(message)
torch.cuda.empty_cache()
self.after_val()
summary_dict = summary_board.summary()
message = '[Val] ' + get_log_string(summary_dict, iteration=self.iteration, timer=timer)
self.logger.critical(message)
self.write_event('val', summary_dict, self.iteration // self.snapshot_steps)
self.set_train_mode()
def run(self):
assert self.train_loader is not None
assert self.val_loader is not None
if self.args.resume:
self.load_snapshot(osp.join(self.snapshot_dir, 'snapshot.pth.tar'))
elif self.args.snapshot is not None:
self.load_snapshot(self.args.snapshot)
self.set_train_mode()
self.summary_board.reset_all()
self.timer.reset()
train_loader = CycleLoader(self.train_loader, self.epoch, self.distributed)
self.before_train()
self.optimizer.zero_grad()
while self.iteration < self.max_iteration:
self.iteration += 1
data_dict = next(train_loader)
data_dict = to_cuda(data_dict)
self.before_train_step(self.iteration, data_dict)
self.timer.add_prepare_time()
# forward
output_dict, result_dict = self.train_step(self.iteration, data_dict)
# backward & optimization
result_dict['loss'].backward()
self.after_backward(self.iteration, data_dict, output_dict, result_dict)
self.check_gradients(self.iteration, data_dict, output_dict, result_dict)
self.optimizer_step(self.iteration)
# after training
self.timer.add_process_time()
self.after_train_step(self.iteration, data_dict, output_dict, result_dict)
result_dict = self.release_tensors(result_dict)
self.summary_board.update_from_result_dict(result_dict)
# logging
if self.iteration % self.log_steps == 0:
summary_dict = self.summary_board.summary()
message = get_log_string(
result_dict=summary_dict,
iteration=self.iteration,
max_iteration=self.max_iteration,
lr=self.get_lr(),
timer=self.timer,
)
self.logger.info(message)
self.write_event('train', summary_dict, self.iteration)
# snapshot & validation
if self.iteration % self.snapshot_steps == 0:
self.epoch = train_loader.last_epoch
self.save_snapshot(f'iter-{self.iteration}.pth.tar')
if not self.save_all_snapshots:
last_snapshot = f'iter_{self.iteration - self.snapshot_steps}.pth.tar'
if osp.exists(last_snapshot):
os.remove(last_snapshot)
self.inference()
# scheduler
if self.scheduler is not None and self.iteration % self.grad_acc_steps == 0:
self.scheduler.step()
torch.cuda.empty_cache()
self.after_train()
message = 'Training finished.'
self.logger.critical(message)
| 7,415 | 35.895522 | 96 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/engine/logger.py | import logging
import coloredlogs
def create_logger(log_file=None):
logger = logging.getLogger()
logger.handlers.clear()
logger.setLevel(level=logging.DEBUG)
logger.propagate = False
format_str = '[%(asctime)s] [%(levelname).4s] %(message)s'
stream_handler = logging.StreamHandler()
colored_formatter = coloredlogs.ColoredFormatter(format_str)
stream_handler.setFormatter(colored_formatter)
logger.addHandler(stream_handler)
if log_file is not None:
file_handler = logging.FileHandler(log_file)
formatter = logging.Formatter(format_str, datefmt='%Y-%m-%d %H:%M:%S')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
class Logger:
def __init__(self, log_file=None, local_rank=-1):
if local_rank == 0 or local_rank == -1:
self.logger = create_logger(log_file=log_file)
else:
self.logger = None
def debug(self, message):
if self.logger is not None:
self.logger.debug(message)
def info(self, message):
if self.logger is not None:
self.logger.info(message)
def warning(self, message):
if self.logger is not None:
self.logger.warning(message)
def error(self, message):
if self.logger is not None:
self.logger.error(message)
def critical(self, message):
if self.logger is not None:
self.logger.critical(message)
| 1,492 | 26.648148 | 78 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/engine/single_tester.py | from typing import Dict
import torch
import ipdb
from tqdm import tqdm
from geotransformer.engine.base_tester import BaseTester
from geotransformer.utils.summary_board import SummaryBoard
from geotransformer.utils.timer import Timer
from geotransformer.utils.common import get_log_string
from geotransformer.utils.torch import release_cuda, to_cuda
class SingleTester(BaseTester):
def __init__(self, cfg, parser=None, cudnn_deterministic=True):
super().__init__(cfg, parser=parser, cudnn_deterministic=cudnn_deterministic)
def before_test_epoch(self):
pass
def before_test_step(self, iteration, data_dict):
pass
def test_step(self, iteration, data_dict) -> Dict:
pass
def eval_step(self, iteration, data_dict, output_dict) -> Dict:
pass
def after_test_step(self, iteration, data_dict, output_dict, result_dict):
pass
def after_test_epoch(self):
pass
def classification_dataset_prepare(self, output_dict):
pass
def train_classification_model(self,output_dict):
pass
def step_scheduler_and_save_model(self):
pass
def summary_string(self, iteration, data_dict, output_dict, result_dict):
return get_log_string(result_dict)
def run(self):
assert self.test_loader is not None
self.load_snapshot(self.args.snapshot)
self.model.eval()
torch.set_grad_enabled(False)
self.before_test_epoch()
summary_board = SummaryBoard(adaptive=True)
timer = Timer()
total_iterations = len(self.test_loader)
# pbar = tqdm(enumerate(self.test_loader), total=total_iterations)
self.step_scheduler_and_save_model()
pbar = tqdm(enumerate(self.test_loader), total=total_iterations)
for iteration, data_dict in pbar:
# on start
# if iteration < 1440:
# continue
self.iteration = iteration + 1
data_dict = to_cuda(data_dict)
self.before_test_step(self.iteration, data_dict)
# test step
torch.cuda.synchronize()
timer.add_prepare_time()
output_dict = self.test_step(self.iteration, data_dict)
torch.cuda.synchronize()
timer.add_process_time()
# eval step
result_dict = self.eval_step(self.iteration, data_dict, output_dict)
# after step
# self.train_classification_model(output_dict)
self.after_test_step(self.iteration, data_dict, output_dict, result_dict)
# logging
result_dict = release_cuda(result_dict)
summary_board.update_from_result_dict(result_dict)
message = self.summary_string(self.iteration, data_dict, output_dict, result_dict)
message += f', {timer.tostring()}'
pbar.set_description(message)
torch.cuda.empty_cache()
# self.step_scheduler_and_save_model()
self.after_test_epoch()
summary_dict = summary_board.summary()
message = get_log_string(result_dict=summary_dict, timer=timer)
self.logger.critical(message)
| 3,177 | 35.113636 | 94 | py |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/extensions/pybind.cpp | #include <torch/extension.h>
#include "cpu/radius_neighbors/radius_neighbors.h"
#include "cpu/grid_subsampling/grid_subsampling.h"
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
// CPU extensions
m.def(
"radius_neighbors",
&radius_neighbors,
"Radius neighbors (CPU)"
);
m.def(
"grid_subsampling",
&grid_subsampling,
"Grid subsampling (CPU)"
);
}
| 378 | 18.947368 | 50 | cpp |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/extensions/common/torch_helper.h | #pragma once
#include <ATen/cuda/CUDAContext.h>
#include <torch/extension.h>
#define CHECK_CUDA(x) \
TORCH_CHECK(x.device().is_cuda(), #x " must be a CUDA tensor")
#define CHECK_CPU(x) \
TORCH_CHECK(!x.device().is_cuda(), #x " must be a CPU tensor")
#define CHECK_CONTIGUOUS(x) \
TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
#define CHECK_INPUT(x) \
CHECK_CUDA(x); \
CHECK_CONTIGUOUS(x)
#define CHECK_IS_INT(x) \
do { \
TORCH_CHECK(x.scalar_type() == at::ScalarType::Int, \
#x " must be an int tensor"); \
} while (0)
#define CHECK_IS_LONG(x) \
do { \
TORCH_CHECK(x.scalar_type() == at::ScalarType::Long, \
#x " must be an long tensor"); \
} while (0)
#define CHECK_IS_FLOAT(x) \
do { \
TORCH_CHECK(x.scalar_type() == at::ScalarType::Float, \
#x " must be a float tensor"); \
} while (0)
| 1,698 | 46.194444 | 79 | h |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/extensions/cpu/grid_subsampling/grid_subsampling.cpp | #include <cstring>
#include "grid_subsampling.h"
#include "grid_subsampling_cpu.h"
std::vector<at::Tensor> grid_subsampling(
at::Tensor points,
at::Tensor lengths,
float voxel_size
) {
CHECK_CPU(points);
CHECK_CPU(lengths);
CHECK_IS_FLOAT(points);
CHECK_IS_LONG(lengths);
CHECK_CONTIGUOUS(points);
CHECK_CONTIGUOUS(lengths);
std::size_t batch_size = lengths.size(0);
std::size_t total_points = points.size(0);
std::vector<PointXYZ> vec_points = std::vector<PointXYZ>(
reinterpret_cast<PointXYZ*>(points.data_ptr<float>()),
reinterpret_cast<PointXYZ*>(points.data_ptr<float>()) + total_points
);
std::vector<PointXYZ> vec_s_points;
std::vector<long> vec_lengths = std::vector<long>(
lengths.data_ptr<long>(),
lengths.data_ptr<long>() + batch_size
);
std::vector<long> vec_s_lengths;
grid_subsampling_cpu(
vec_points,
vec_s_points,
vec_lengths,
vec_s_lengths,
voxel_size
);
std::size_t total_s_points = vec_s_points.size();
at::Tensor s_points = torch::zeros(
{total_s_points, 3},
at::device(points.device()).dtype(at::ScalarType::Float)
);
at::Tensor s_lengths = torch::zeros(
{batch_size},
at::device(lengths.device()).dtype(at::ScalarType::Long)
);
std::memcpy(
s_points.data_ptr<float>(),
reinterpret_cast<float*>(vec_s_points.data()),
sizeof(float) * total_s_points * 3
);
std::memcpy(
s_lengths.data_ptr<long>(),
vec_s_lengths.data(),
sizeof(long) * batch_size
);
return {s_points, s_lengths};
}
| 1,542 | 23.492063 | 72 | cpp |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/extensions/cpu/grid_subsampling/grid_subsampling.h | #pragma once
#include <vector>
#include "../../common/torch_helper.h"
std::vector<at::Tensor> grid_subsampling(
at::Tensor points,
at::Tensor lengths,
float voxel_size
);
| 179 | 15.363636 | 41 | h |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/extensions/cpu/grid_subsampling/grid_subsampling_cpu.cpp | #include "grid_subsampling_cpu.h"
void single_grid_subsampling_cpu(
std::vector<PointXYZ>& points,
std::vector<PointXYZ>& s_points,
float voxel_size
) {
// float sub_scale = 1. / voxel_size;
PointXYZ minCorner = min_point(points);
PointXYZ maxCorner = max_point(points);
PointXYZ originCorner = floor(minCorner * (1. / voxel_size)) * voxel_size;
std::size_t sampleNX = static_cast<std::size_t>(
// floor((maxCorner.x - originCorner.x) * sub_scale) + 1
floor((maxCorner.x - originCorner.x) / voxel_size) + 1
);
std::size_t sampleNY = static_cast<std::size_t>(
// floor((maxCorner.y - originCorner.y) * sub_scale) + 1
floor((maxCorner.y - originCorner.y) / voxel_size) + 1
);
std::size_t iX = 0;
std::size_t iY = 0;
std::size_t iZ = 0;
std::size_t mapIdx = 0;
std::unordered_map<std::size_t, SampledData> data;
for (auto& p : points) {
// iX = static_cast<std::size_t>(floor((p.x - originCorner.x) * sub_scale));
// iY = static_cast<std::size_t>(floor((p.y - originCorner.y) * sub_scale));
// iZ = static_cast<std::size_t>(floor((p.z - originCorner.z) * sub_scale));
iX = static_cast<std::size_t>(floor((p.x - originCorner.x) / voxel_size));
iY = static_cast<std::size_t>(floor((p.y - originCorner.y) / voxel_size));
iZ = static_cast<std::size_t>(floor((p.z - originCorner.z) / voxel_size));
mapIdx = iX + sampleNX * iY + sampleNX * sampleNY * iZ;
if (!data.count(mapIdx)) {
data.emplace(mapIdx, SampledData());
}
data[mapIdx].update(p);
}
s_points.reserve(data.size());
for (auto& v : data) {
s_points.push_back(v.second.point * (1.0 / v.second.count));
}
}
void grid_subsampling_cpu(
std::vector<PointXYZ>& points,
std::vector<PointXYZ>& s_points,
std::vector<long>& lengths,
std::vector<long>& s_lengths,
float voxel_size
) {
std::size_t start_index = 0;
std::size_t batch_size = lengths.size();
for (std::size_t b = 0; b < batch_size; b++) {
std::vector<PointXYZ> cur_points = std::vector<PointXYZ>(
points.begin() + start_index,
points.begin() + start_index + lengths[b]
);
std::vector<PointXYZ> cur_s_points;
single_grid_subsampling_cpu(cur_points, cur_s_points, voxel_size);
s_points.insert(s_points.end(), cur_s_points.begin(), cur_s_points.end());
s_lengths.push_back(cur_s_points.size());
start_index += lengths[b];
}
return;
}
| 2,410 | 30.723684 | 79 | cpp |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/extensions/cpu/grid_subsampling/grid_subsampling_cpu.h | #pragma once
#include <vector>
#include <unordered_map>
#include "../../extra/cloud/cloud.h"
class SampledData {
public:
int count;
PointXYZ point;
SampledData() {
count = 0;
point = PointXYZ();
}
void update(const PointXYZ& p) {
count += 1;
point += p;
}
};
void single_grid_subsampling_cpu(
std::vector<PointXYZ>& o_points,
std::vector<PointXYZ>& s_points,
float voxel_size
);
void grid_subsampling_cpu(
std::vector<PointXYZ>& o_points,
std::vector<PointXYZ>& s_points,
std::vector<long>& o_lengths,
std::vector<long>& s_lengths,
float voxel_size
);
| 603 | 15.324324 | 36 | h |
LiDAR2LiDAR | LiDAR2LiDAR-master/geotransformer/geotransformer/extensions/cpu/radius_neighbors/radius_neighbors.cpp | #include <cstring>
#include "radius_neighbors.h"
#include "radius_neighbors_cpu.h"
at::Tensor radius_neighbors(
at::Tensor q_points,
at::Tensor s_points,
at::Tensor q_lengths,
at::Tensor s_lengths,
float radius
) {
CHECK_CPU(q_points);
CHECK_CPU(s_points);
CHECK_CPU(q_lengths);
CHECK_CPU(s_lengths);
CHECK_IS_FLOAT(q_points);
CHECK_IS_FLOAT(s_points);
CHECK_IS_LONG(q_lengths);
CHECK_IS_LONG(s_lengths);
CHECK_CONTIGUOUS(q_points);
CHECK_CONTIGUOUS(s_points);
CHECK_CONTIGUOUS(q_lengths);
CHECK_CONTIGUOUS(s_lengths);
std::size_t total_q_points = q_points.size(0);
std::size_t total_s_points = s_points.size(0);
std::size_t batch_size = q_lengths.size(0);
std::vector<PointXYZ> vec_q_points = std::vector<PointXYZ>(
reinterpret_cast<PointXYZ*>(q_points.data_ptr<float>()),
reinterpret_cast<PointXYZ*>(q_points.data_ptr<float>()) + total_q_points
);
std::vector<PointXYZ> vec_s_points = std::vector<PointXYZ>(
reinterpret_cast<PointXYZ*>(s_points.data_ptr<float>()),
reinterpret_cast<PointXYZ*>(s_points.data_ptr<float>()) + total_s_points
);
std::vector<long> vec_q_lengths = std::vector<long>(
q_lengths.data_ptr<long>(), q_lengths.data_ptr<long>() + batch_size
);
std::vector<long> vec_s_lengths = std::vector<long>(
s_lengths.data_ptr<long>(), s_lengths.data_ptr<long>() + batch_size
);
std::vector<long> vec_neighbor_indices;
radius_neighbors_cpu(
vec_q_points,
vec_s_points,
vec_q_lengths,
vec_s_lengths,
vec_neighbor_indices,
radius
);
std::size_t max_neighbors = vec_neighbor_indices.size() / total_q_points;
at::Tensor neighbor_indices = torch::zeros(
{total_q_points, max_neighbors},
at::device(q_points.device()).dtype(at::ScalarType::Long)
);
std::memcpy(
neighbor_indices.data_ptr<long>(),
vec_neighbor_indices.data(),
sizeof(long) * total_q_points * max_neighbors
);
return neighbor_indices;
}
| 1,958 | 27.391304 | 76 | cpp |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.